* Bump CI to Go 1.19 * Update golang/go edwards25519 test * Apply formatting to printer stubs output (to get correct comment formatting) * Bump gofumpt version
84070 lines
2.4 MiB
84070 lines
2.4 MiB
// Code generated by command: avogen -output zinstructions.go build. DO NOT EDIT.
|
|
|
|
package build
|
|
|
|
import (
|
|
"github.com/mmcloughlin/avo/ir"
|
|
"github.com/mmcloughlin/avo/operand"
|
|
"github.com/mmcloughlin/avo/x86"
|
|
)
|
|
|
|
func (c *Context) addinstruction(i *ir.Instruction, err error) {
|
|
if err == nil {
|
|
c.Instruction(i)
|
|
} else {
|
|
c.adderror(err)
|
|
}
|
|
}
|
|
|
|
// ADCB: Add with Carry.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ADCB imm8 al
|
|
// ADCB imm8 m8
|
|
// ADCB imm8 r8
|
|
// ADCB m8 r8
|
|
// ADCB r8 m8
|
|
// ADCB r8 r8
|
|
//
|
|
// Construct and append a ADCB instruction to the active function.
|
|
func (c *Context) ADCB(imr, amr operand.Op) {
|
|
c.addinstruction(x86.ADCB(imr, amr))
|
|
}
|
|
|
|
// ADCB: Add with Carry.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ADCB imm8 al
|
|
// ADCB imm8 m8
|
|
// ADCB imm8 r8
|
|
// ADCB m8 r8
|
|
// ADCB r8 m8
|
|
// ADCB r8 r8
|
|
//
|
|
// Construct and append a ADCB instruction to the active function.
|
|
// Operates on the global context.
|
|
func ADCB(imr, amr operand.Op) { ctx.ADCB(imr, amr) }
|
|
|
|
// ADCL: Add with Carry.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ADCL imm32 eax
|
|
// ADCL imm32 m32
|
|
// ADCL imm32 r32
|
|
// ADCL imm8 m32
|
|
// ADCL imm8 r32
|
|
// ADCL m32 r32
|
|
// ADCL r32 m32
|
|
// ADCL r32 r32
|
|
//
|
|
// Construct and append a ADCL instruction to the active function.
|
|
func (c *Context) ADCL(imr, emr operand.Op) {
|
|
c.addinstruction(x86.ADCL(imr, emr))
|
|
}
|
|
|
|
// ADCL: Add with Carry.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ADCL imm32 eax
|
|
// ADCL imm32 m32
|
|
// ADCL imm32 r32
|
|
// ADCL imm8 m32
|
|
// ADCL imm8 r32
|
|
// ADCL m32 r32
|
|
// ADCL r32 m32
|
|
// ADCL r32 r32
|
|
//
|
|
// Construct and append a ADCL instruction to the active function.
|
|
// Operates on the global context.
|
|
func ADCL(imr, emr operand.Op) { ctx.ADCL(imr, emr) }
|
|
|
|
// ADCQ: Add with Carry.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ADCQ imm32 m64
|
|
// ADCQ imm32 r64
|
|
// ADCQ imm32 rax
|
|
// ADCQ imm8 m64
|
|
// ADCQ imm8 r64
|
|
// ADCQ m64 r64
|
|
// ADCQ r64 m64
|
|
// ADCQ r64 r64
|
|
//
|
|
// Construct and append a ADCQ instruction to the active function.
|
|
func (c *Context) ADCQ(imr, mr operand.Op) {
|
|
c.addinstruction(x86.ADCQ(imr, mr))
|
|
}
|
|
|
|
// ADCQ: Add with Carry.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ADCQ imm32 m64
|
|
// ADCQ imm32 r64
|
|
// ADCQ imm32 rax
|
|
// ADCQ imm8 m64
|
|
// ADCQ imm8 r64
|
|
// ADCQ m64 r64
|
|
// ADCQ r64 m64
|
|
// ADCQ r64 r64
|
|
//
|
|
// Construct and append a ADCQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func ADCQ(imr, mr operand.Op) { ctx.ADCQ(imr, mr) }
|
|
|
|
// ADCW: Add with Carry.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ADCW imm16 ax
|
|
// ADCW imm16 m16
|
|
// ADCW imm16 r16
|
|
// ADCW imm8 m16
|
|
// ADCW imm8 r16
|
|
// ADCW m16 r16
|
|
// ADCW r16 m16
|
|
// ADCW r16 r16
|
|
//
|
|
// Construct and append a ADCW instruction to the active function.
|
|
func (c *Context) ADCW(imr, amr operand.Op) {
|
|
c.addinstruction(x86.ADCW(imr, amr))
|
|
}
|
|
|
|
// ADCW: Add with Carry.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ADCW imm16 ax
|
|
// ADCW imm16 m16
|
|
// ADCW imm16 r16
|
|
// ADCW imm8 m16
|
|
// ADCW imm8 r16
|
|
// ADCW m16 r16
|
|
// ADCW r16 m16
|
|
// ADCW r16 r16
|
|
//
|
|
// Construct and append a ADCW instruction to the active function.
|
|
// Operates on the global context.
|
|
func ADCW(imr, amr operand.Op) { ctx.ADCW(imr, amr) }
|
|
|
|
// ADCXL: Unsigned Integer Addition of Two Operands with Carry Flag.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ADCXL m32 r32
|
|
// ADCXL r32 r32
|
|
//
|
|
// Construct and append a ADCXL instruction to the active function.
|
|
func (c *Context) ADCXL(mr, r operand.Op) {
|
|
c.addinstruction(x86.ADCXL(mr, r))
|
|
}
|
|
|
|
// ADCXL: Unsigned Integer Addition of Two Operands with Carry Flag.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ADCXL m32 r32
|
|
// ADCXL r32 r32
|
|
//
|
|
// Construct and append a ADCXL instruction to the active function.
|
|
// Operates on the global context.
|
|
func ADCXL(mr, r operand.Op) { ctx.ADCXL(mr, r) }
|
|
|
|
// ADCXQ: Unsigned Integer Addition of Two Operands with Carry Flag.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ADCXQ m64 r64
|
|
// ADCXQ r64 r64
|
|
//
|
|
// Construct and append a ADCXQ instruction to the active function.
|
|
func (c *Context) ADCXQ(mr, r operand.Op) {
|
|
c.addinstruction(x86.ADCXQ(mr, r))
|
|
}
|
|
|
|
// ADCXQ: Unsigned Integer Addition of Two Operands with Carry Flag.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ADCXQ m64 r64
|
|
// ADCXQ r64 r64
|
|
//
|
|
// Construct and append a ADCXQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func ADCXQ(mr, r operand.Op) { ctx.ADCXQ(mr, r) }
|
|
|
|
// ADDB: Add.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ADDB imm8 al
|
|
// ADDB imm8 m8
|
|
// ADDB imm8 r8
|
|
// ADDB m8 r8
|
|
// ADDB r8 m8
|
|
// ADDB r8 r8
|
|
//
|
|
// Construct and append a ADDB instruction to the active function.
|
|
func (c *Context) ADDB(imr, amr operand.Op) {
|
|
c.addinstruction(x86.ADDB(imr, amr))
|
|
}
|
|
|
|
// ADDB: Add.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ADDB imm8 al
|
|
// ADDB imm8 m8
|
|
// ADDB imm8 r8
|
|
// ADDB m8 r8
|
|
// ADDB r8 m8
|
|
// ADDB r8 r8
|
|
//
|
|
// Construct and append a ADDB instruction to the active function.
|
|
// Operates on the global context.
|
|
func ADDB(imr, amr operand.Op) { ctx.ADDB(imr, amr) }
|
|
|
|
// ADDL: Add.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ADDL imm32 eax
|
|
// ADDL imm32 m32
|
|
// ADDL imm32 r32
|
|
// ADDL imm8 m32
|
|
// ADDL imm8 r32
|
|
// ADDL m32 r32
|
|
// ADDL r32 m32
|
|
// ADDL r32 r32
|
|
//
|
|
// Construct and append a ADDL instruction to the active function.
|
|
func (c *Context) ADDL(imr, emr operand.Op) {
|
|
c.addinstruction(x86.ADDL(imr, emr))
|
|
}
|
|
|
|
// ADDL: Add.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ADDL imm32 eax
|
|
// ADDL imm32 m32
|
|
// ADDL imm32 r32
|
|
// ADDL imm8 m32
|
|
// ADDL imm8 r32
|
|
// ADDL m32 r32
|
|
// ADDL r32 m32
|
|
// ADDL r32 r32
|
|
//
|
|
// Construct and append a ADDL instruction to the active function.
|
|
// Operates on the global context.
|
|
func ADDL(imr, emr operand.Op) { ctx.ADDL(imr, emr) }
|
|
|
|
// ADDPD: Add Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ADDPD m128 xmm
|
|
// ADDPD xmm xmm
|
|
//
|
|
// Construct and append a ADDPD instruction to the active function.
|
|
func (c *Context) ADDPD(mx, x operand.Op) {
|
|
c.addinstruction(x86.ADDPD(mx, x))
|
|
}
|
|
|
|
// ADDPD: Add Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ADDPD m128 xmm
|
|
// ADDPD xmm xmm
|
|
//
|
|
// Construct and append a ADDPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func ADDPD(mx, x operand.Op) { ctx.ADDPD(mx, x) }
|
|
|
|
// ADDPS: Add Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ADDPS m128 xmm
|
|
// ADDPS xmm xmm
|
|
//
|
|
// Construct and append a ADDPS instruction to the active function.
|
|
func (c *Context) ADDPS(mx, x operand.Op) {
|
|
c.addinstruction(x86.ADDPS(mx, x))
|
|
}
|
|
|
|
// ADDPS: Add Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ADDPS m128 xmm
|
|
// ADDPS xmm xmm
|
|
//
|
|
// Construct and append a ADDPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func ADDPS(mx, x operand.Op) { ctx.ADDPS(mx, x) }
|
|
|
|
// ADDQ: Add.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ADDQ imm32 m64
|
|
// ADDQ imm32 r64
|
|
// ADDQ imm32 rax
|
|
// ADDQ imm8 m64
|
|
// ADDQ imm8 r64
|
|
// ADDQ m64 r64
|
|
// ADDQ r64 m64
|
|
// ADDQ r64 r64
|
|
//
|
|
// Construct and append a ADDQ instruction to the active function.
|
|
func (c *Context) ADDQ(imr, mr operand.Op) {
|
|
c.addinstruction(x86.ADDQ(imr, mr))
|
|
}
|
|
|
|
// ADDQ: Add.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ADDQ imm32 m64
|
|
// ADDQ imm32 r64
|
|
// ADDQ imm32 rax
|
|
// ADDQ imm8 m64
|
|
// ADDQ imm8 r64
|
|
// ADDQ m64 r64
|
|
// ADDQ r64 m64
|
|
// ADDQ r64 r64
|
|
//
|
|
// Construct and append a ADDQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func ADDQ(imr, mr operand.Op) { ctx.ADDQ(imr, mr) }
|
|
|
|
// ADDSD: Add Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ADDSD m64 xmm
|
|
// ADDSD xmm xmm
|
|
//
|
|
// Construct and append a ADDSD instruction to the active function.
|
|
func (c *Context) ADDSD(mx, x operand.Op) {
|
|
c.addinstruction(x86.ADDSD(mx, x))
|
|
}
|
|
|
|
// ADDSD: Add Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ADDSD m64 xmm
|
|
// ADDSD xmm xmm
|
|
//
|
|
// Construct and append a ADDSD instruction to the active function.
|
|
// Operates on the global context.
|
|
func ADDSD(mx, x operand.Op) { ctx.ADDSD(mx, x) }
|
|
|
|
// ADDSS: Add Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ADDSS m32 xmm
|
|
// ADDSS xmm xmm
|
|
//
|
|
// Construct and append a ADDSS instruction to the active function.
|
|
func (c *Context) ADDSS(mx, x operand.Op) {
|
|
c.addinstruction(x86.ADDSS(mx, x))
|
|
}
|
|
|
|
// ADDSS: Add Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ADDSS m32 xmm
|
|
// ADDSS xmm xmm
|
|
//
|
|
// Construct and append a ADDSS instruction to the active function.
|
|
// Operates on the global context.
|
|
func ADDSS(mx, x operand.Op) { ctx.ADDSS(mx, x) }
|
|
|
|
// ADDSUBPD: Packed Double-FP Add/Subtract.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ADDSUBPD m128 xmm
|
|
// ADDSUBPD xmm xmm
|
|
//
|
|
// Construct and append a ADDSUBPD instruction to the active function.
|
|
func (c *Context) ADDSUBPD(mx, x operand.Op) {
|
|
c.addinstruction(x86.ADDSUBPD(mx, x))
|
|
}
|
|
|
|
// ADDSUBPD: Packed Double-FP Add/Subtract.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ADDSUBPD m128 xmm
|
|
// ADDSUBPD xmm xmm
|
|
//
|
|
// Construct and append a ADDSUBPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func ADDSUBPD(mx, x operand.Op) { ctx.ADDSUBPD(mx, x) }
|
|
|
|
// ADDSUBPS: Packed Single-FP Add/Subtract.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ADDSUBPS m128 xmm
|
|
// ADDSUBPS xmm xmm
|
|
//
|
|
// Construct and append a ADDSUBPS instruction to the active function.
|
|
func (c *Context) ADDSUBPS(mx, x operand.Op) {
|
|
c.addinstruction(x86.ADDSUBPS(mx, x))
|
|
}
|
|
|
|
// ADDSUBPS: Packed Single-FP Add/Subtract.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ADDSUBPS m128 xmm
|
|
// ADDSUBPS xmm xmm
|
|
//
|
|
// Construct and append a ADDSUBPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func ADDSUBPS(mx, x operand.Op) { ctx.ADDSUBPS(mx, x) }
|
|
|
|
// ADDW: Add.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ADDW imm16 ax
|
|
// ADDW imm16 m16
|
|
// ADDW imm16 r16
|
|
// ADDW imm8 m16
|
|
// ADDW imm8 r16
|
|
// ADDW m16 r16
|
|
// ADDW r16 m16
|
|
// ADDW r16 r16
|
|
//
|
|
// Construct and append a ADDW instruction to the active function.
|
|
func (c *Context) ADDW(imr, amr operand.Op) {
|
|
c.addinstruction(x86.ADDW(imr, amr))
|
|
}
|
|
|
|
// ADDW: Add.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ADDW imm16 ax
|
|
// ADDW imm16 m16
|
|
// ADDW imm16 r16
|
|
// ADDW imm8 m16
|
|
// ADDW imm8 r16
|
|
// ADDW m16 r16
|
|
// ADDW r16 m16
|
|
// ADDW r16 r16
|
|
//
|
|
// Construct and append a ADDW instruction to the active function.
|
|
// Operates on the global context.
|
|
func ADDW(imr, amr operand.Op) { ctx.ADDW(imr, amr) }
|
|
|
|
// ADOXL: Unsigned Integer Addition of Two Operands with Overflow Flag.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ADOXL m32 r32
|
|
// ADOXL r32 r32
|
|
//
|
|
// Construct and append a ADOXL instruction to the active function.
|
|
func (c *Context) ADOXL(mr, r operand.Op) {
|
|
c.addinstruction(x86.ADOXL(mr, r))
|
|
}
|
|
|
|
// ADOXL: Unsigned Integer Addition of Two Operands with Overflow Flag.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ADOXL m32 r32
|
|
// ADOXL r32 r32
|
|
//
|
|
// Construct and append a ADOXL instruction to the active function.
|
|
// Operates on the global context.
|
|
func ADOXL(mr, r operand.Op) { ctx.ADOXL(mr, r) }
|
|
|
|
// ADOXQ: Unsigned Integer Addition of Two Operands with Overflow Flag.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ADOXQ m64 r64
|
|
// ADOXQ r64 r64
|
|
//
|
|
// Construct and append a ADOXQ instruction to the active function.
|
|
func (c *Context) ADOXQ(mr, r operand.Op) {
|
|
c.addinstruction(x86.ADOXQ(mr, r))
|
|
}
|
|
|
|
// ADOXQ: Unsigned Integer Addition of Two Operands with Overflow Flag.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ADOXQ m64 r64
|
|
// ADOXQ r64 r64
|
|
//
|
|
// Construct and append a ADOXQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func ADOXQ(mr, r operand.Op) { ctx.ADOXQ(mr, r) }
|
|
|
|
// AESDEC: Perform One Round of an AES Decryption Flow.
|
|
//
|
|
// Forms:
|
|
//
|
|
// AESDEC m128 xmm
|
|
// AESDEC xmm xmm
|
|
//
|
|
// Construct and append a AESDEC instruction to the active function.
|
|
func (c *Context) AESDEC(mx, x operand.Op) {
|
|
c.addinstruction(x86.AESDEC(mx, x))
|
|
}
|
|
|
|
// AESDEC: Perform One Round of an AES Decryption Flow.
|
|
//
|
|
// Forms:
|
|
//
|
|
// AESDEC m128 xmm
|
|
// AESDEC xmm xmm
|
|
//
|
|
// Construct and append a AESDEC instruction to the active function.
|
|
// Operates on the global context.
|
|
func AESDEC(mx, x operand.Op) { ctx.AESDEC(mx, x) }
|
|
|
|
// AESDECLAST: Perform Last Round of an AES Decryption Flow.
|
|
//
|
|
// Forms:
|
|
//
|
|
// AESDECLAST m128 xmm
|
|
// AESDECLAST xmm xmm
|
|
//
|
|
// Construct and append a AESDECLAST instruction to the active function.
|
|
func (c *Context) AESDECLAST(mx, x operand.Op) {
|
|
c.addinstruction(x86.AESDECLAST(mx, x))
|
|
}
|
|
|
|
// AESDECLAST: Perform Last Round of an AES Decryption Flow.
|
|
//
|
|
// Forms:
|
|
//
|
|
// AESDECLAST m128 xmm
|
|
// AESDECLAST xmm xmm
|
|
//
|
|
// Construct and append a AESDECLAST instruction to the active function.
|
|
// Operates on the global context.
|
|
func AESDECLAST(mx, x operand.Op) { ctx.AESDECLAST(mx, x) }
|
|
|
|
// AESENC: Perform One Round of an AES Encryption Flow.
|
|
//
|
|
// Forms:
|
|
//
|
|
// AESENC m128 xmm
|
|
// AESENC xmm xmm
|
|
//
|
|
// Construct and append a AESENC instruction to the active function.
|
|
func (c *Context) AESENC(mx, x operand.Op) {
|
|
c.addinstruction(x86.AESENC(mx, x))
|
|
}
|
|
|
|
// AESENC: Perform One Round of an AES Encryption Flow.
|
|
//
|
|
// Forms:
|
|
//
|
|
// AESENC m128 xmm
|
|
// AESENC xmm xmm
|
|
//
|
|
// Construct and append a AESENC instruction to the active function.
|
|
// Operates on the global context.
|
|
func AESENC(mx, x operand.Op) { ctx.AESENC(mx, x) }
|
|
|
|
// AESENCLAST: Perform Last Round of an AES Encryption Flow.
|
|
//
|
|
// Forms:
|
|
//
|
|
// AESENCLAST m128 xmm
|
|
// AESENCLAST xmm xmm
|
|
//
|
|
// Construct and append a AESENCLAST instruction to the active function.
|
|
func (c *Context) AESENCLAST(mx, x operand.Op) {
|
|
c.addinstruction(x86.AESENCLAST(mx, x))
|
|
}
|
|
|
|
// AESENCLAST: Perform Last Round of an AES Encryption Flow.
|
|
//
|
|
// Forms:
|
|
//
|
|
// AESENCLAST m128 xmm
|
|
// AESENCLAST xmm xmm
|
|
//
|
|
// Construct and append a AESENCLAST instruction to the active function.
|
|
// Operates on the global context.
|
|
func AESENCLAST(mx, x operand.Op) { ctx.AESENCLAST(mx, x) }
|
|
|
|
// AESIMC: Perform the AES InvMixColumn Transformation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// AESIMC m128 xmm
|
|
// AESIMC xmm xmm
|
|
//
|
|
// Construct and append a AESIMC instruction to the active function.
|
|
func (c *Context) AESIMC(mx, x operand.Op) {
|
|
c.addinstruction(x86.AESIMC(mx, x))
|
|
}
|
|
|
|
// AESIMC: Perform the AES InvMixColumn Transformation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// AESIMC m128 xmm
|
|
// AESIMC xmm xmm
|
|
//
|
|
// Construct and append a AESIMC instruction to the active function.
|
|
// Operates on the global context.
|
|
func AESIMC(mx, x operand.Op) { ctx.AESIMC(mx, x) }
|
|
|
|
// AESKEYGENASSIST: AES Round Key Generation Assist.
|
|
//
|
|
// Forms:
|
|
//
|
|
// AESKEYGENASSIST imm8 m128 xmm
|
|
// AESKEYGENASSIST imm8 xmm xmm
|
|
//
|
|
// Construct and append a AESKEYGENASSIST instruction to the active function.
|
|
func (c *Context) AESKEYGENASSIST(i, mx, x operand.Op) {
|
|
c.addinstruction(x86.AESKEYGENASSIST(i, mx, x))
|
|
}
|
|
|
|
// AESKEYGENASSIST: AES Round Key Generation Assist.
|
|
//
|
|
// Forms:
|
|
//
|
|
// AESKEYGENASSIST imm8 m128 xmm
|
|
// AESKEYGENASSIST imm8 xmm xmm
|
|
//
|
|
// Construct and append a AESKEYGENASSIST instruction to the active function.
|
|
// Operates on the global context.
|
|
func AESKEYGENASSIST(i, mx, x operand.Op) { ctx.AESKEYGENASSIST(i, mx, x) }
|
|
|
|
// ANDB: Logical AND.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ANDB imm8 al
|
|
// ANDB imm8 m8
|
|
// ANDB imm8 r8
|
|
// ANDB m8 r8
|
|
// ANDB r8 m8
|
|
// ANDB r8 r8
|
|
//
|
|
// Construct and append a ANDB instruction to the active function.
|
|
func (c *Context) ANDB(imr, amr operand.Op) {
|
|
c.addinstruction(x86.ANDB(imr, amr))
|
|
}
|
|
|
|
// ANDB: Logical AND.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ANDB imm8 al
|
|
// ANDB imm8 m8
|
|
// ANDB imm8 r8
|
|
// ANDB m8 r8
|
|
// ANDB r8 m8
|
|
// ANDB r8 r8
|
|
//
|
|
// Construct and append a ANDB instruction to the active function.
|
|
// Operates on the global context.
|
|
func ANDB(imr, amr operand.Op) { ctx.ANDB(imr, amr) }
|
|
|
|
// ANDL: Logical AND.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ANDL imm32 eax
|
|
// ANDL imm32 m32
|
|
// ANDL imm32 r32
|
|
// ANDL imm8 m32
|
|
// ANDL imm8 r32
|
|
// ANDL m32 r32
|
|
// ANDL r32 m32
|
|
// ANDL r32 r32
|
|
//
|
|
// Construct and append a ANDL instruction to the active function.
|
|
func (c *Context) ANDL(imr, emr operand.Op) {
|
|
c.addinstruction(x86.ANDL(imr, emr))
|
|
}
|
|
|
|
// ANDL: Logical AND.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ANDL imm32 eax
|
|
// ANDL imm32 m32
|
|
// ANDL imm32 r32
|
|
// ANDL imm8 m32
|
|
// ANDL imm8 r32
|
|
// ANDL m32 r32
|
|
// ANDL r32 m32
|
|
// ANDL r32 r32
|
|
//
|
|
// Construct and append a ANDL instruction to the active function.
|
|
// Operates on the global context.
|
|
func ANDL(imr, emr operand.Op) { ctx.ANDL(imr, emr) }
|
|
|
|
// ANDNL: Logical AND NOT.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ANDNL m32 r32 r32
|
|
// ANDNL r32 r32 r32
|
|
//
|
|
// Construct and append a ANDNL instruction to the active function.
|
|
func (c *Context) ANDNL(mr, r, r1 operand.Op) {
|
|
c.addinstruction(x86.ANDNL(mr, r, r1))
|
|
}
|
|
|
|
// ANDNL: Logical AND NOT.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ANDNL m32 r32 r32
|
|
// ANDNL r32 r32 r32
|
|
//
|
|
// Construct and append a ANDNL instruction to the active function.
|
|
// Operates on the global context.
|
|
func ANDNL(mr, r, r1 operand.Op) { ctx.ANDNL(mr, r, r1) }
|
|
|
|
// ANDNPD: Bitwise Logical AND NOT of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ANDNPD m128 xmm
|
|
// ANDNPD xmm xmm
|
|
//
|
|
// Construct and append a ANDNPD instruction to the active function.
|
|
func (c *Context) ANDNPD(mx, x operand.Op) {
|
|
c.addinstruction(x86.ANDNPD(mx, x))
|
|
}
|
|
|
|
// ANDNPD: Bitwise Logical AND NOT of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ANDNPD m128 xmm
|
|
// ANDNPD xmm xmm
|
|
//
|
|
// Construct and append a ANDNPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func ANDNPD(mx, x operand.Op) { ctx.ANDNPD(mx, x) }
|
|
|
|
// ANDNPS: Bitwise Logical AND NOT of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ANDNPS m128 xmm
|
|
// ANDNPS xmm xmm
|
|
//
|
|
// Construct and append a ANDNPS instruction to the active function.
|
|
func (c *Context) ANDNPS(mx, x operand.Op) {
|
|
c.addinstruction(x86.ANDNPS(mx, x))
|
|
}
|
|
|
|
// ANDNPS: Bitwise Logical AND NOT of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ANDNPS m128 xmm
|
|
// ANDNPS xmm xmm
|
|
//
|
|
// Construct and append a ANDNPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func ANDNPS(mx, x operand.Op) { ctx.ANDNPS(mx, x) }
|
|
|
|
// ANDNQ: Logical AND NOT.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ANDNQ m64 r64 r64
|
|
// ANDNQ r64 r64 r64
|
|
//
|
|
// Construct and append a ANDNQ instruction to the active function.
|
|
func (c *Context) ANDNQ(mr, r, r1 operand.Op) {
|
|
c.addinstruction(x86.ANDNQ(mr, r, r1))
|
|
}
|
|
|
|
// ANDNQ: Logical AND NOT.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ANDNQ m64 r64 r64
|
|
// ANDNQ r64 r64 r64
|
|
//
|
|
// Construct and append a ANDNQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func ANDNQ(mr, r, r1 operand.Op) { ctx.ANDNQ(mr, r, r1) }
|
|
|
|
// ANDPD: Bitwise Logical AND of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ANDPD m128 xmm
|
|
// ANDPD xmm xmm
|
|
//
|
|
// Construct and append a ANDPD instruction to the active function.
|
|
func (c *Context) ANDPD(mx, x operand.Op) {
|
|
c.addinstruction(x86.ANDPD(mx, x))
|
|
}
|
|
|
|
// ANDPD: Bitwise Logical AND of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ANDPD m128 xmm
|
|
// ANDPD xmm xmm
|
|
//
|
|
// Construct and append a ANDPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func ANDPD(mx, x operand.Op) { ctx.ANDPD(mx, x) }
|
|
|
|
// ANDPS: Bitwise Logical AND of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ANDPS m128 xmm
|
|
// ANDPS xmm xmm
|
|
//
|
|
// Construct and append a ANDPS instruction to the active function.
|
|
func (c *Context) ANDPS(mx, x operand.Op) {
|
|
c.addinstruction(x86.ANDPS(mx, x))
|
|
}
|
|
|
|
// ANDPS: Bitwise Logical AND of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ANDPS m128 xmm
|
|
// ANDPS xmm xmm
|
|
//
|
|
// Construct and append a ANDPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func ANDPS(mx, x operand.Op) { ctx.ANDPS(mx, x) }
|
|
|
|
// ANDQ: Logical AND.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ANDQ imm32 m64
|
|
// ANDQ imm32 r64
|
|
// ANDQ imm32 rax
|
|
// ANDQ imm8 m64
|
|
// ANDQ imm8 r64
|
|
// ANDQ m64 r64
|
|
// ANDQ r64 m64
|
|
// ANDQ r64 r64
|
|
//
|
|
// Construct and append a ANDQ instruction to the active function.
|
|
func (c *Context) ANDQ(imr, mr operand.Op) {
|
|
c.addinstruction(x86.ANDQ(imr, mr))
|
|
}
|
|
|
|
// ANDQ: Logical AND.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ANDQ imm32 m64
|
|
// ANDQ imm32 r64
|
|
// ANDQ imm32 rax
|
|
// ANDQ imm8 m64
|
|
// ANDQ imm8 r64
|
|
// ANDQ m64 r64
|
|
// ANDQ r64 m64
|
|
// ANDQ r64 r64
|
|
//
|
|
// Construct and append a ANDQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func ANDQ(imr, mr operand.Op) { ctx.ANDQ(imr, mr) }
|
|
|
|
// ANDW: Logical AND.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ANDW imm16 ax
|
|
// ANDW imm16 m16
|
|
// ANDW imm16 r16
|
|
// ANDW imm8 m16
|
|
// ANDW imm8 r16
|
|
// ANDW m16 r16
|
|
// ANDW r16 m16
|
|
// ANDW r16 r16
|
|
//
|
|
// Construct and append a ANDW instruction to the active function.
|
|
func (c *Context) ANDW(imr, amr operand.Op) {
|
|
c.addinstruction(x86.ANDW(imr, amr))
|
|
}
|
|
|
|
// ANDW: Logical AND.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ANDW imm16 ax
|
|
// ANDW imm16 m16
|
|
// ANDW imm16 r16
|
|
// ANDW imm8 m16
|
|
// ANDW imm8 r16
|
|
// ANDW m16 r16
|
|
// ANDW r16 m16
|
|
// ANDW r16 r16
|
|
//
|
|
// Construct and append a ANDW instruction to the active function.
|
|
// Operates on the global context.
|
|
func ANDW(imr, amr operand.Op) { ctx.ANDW(imr, amr) }
|
|
|
|
// BEXTRL: Bit Field Extract.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BEXTRL r32 m32 r32
|
|
// BEXTRL r32 r32 r32
|
|
//
|
|
// Construct and append a BEXTRL instruction to the active function.
|
|
func (c *Context) BEXTRL(r, mr, r1 operand.Op) {
|
|
c.addinstruction(x86.BEXTRL(r, mr, r1))
|
|
}
|
|
|
|
// BEXTRL: Bit Field Extract.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BEXTRL r32 m32 r32
|
|
// BEXTRL r32 r32 r32
|
|
//
|
|
// Construct and append a BEXTRL instruction to the active function.
|
|
// Operates on the global context.
|
|
func BEXTRL(r, mr, r1 operand.Op) { ctx.BEXTRL(r, mr, r1) }
|
|
|
|
// BEXTRQ: Bit Field Extract.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BEXTRQ r64 m64 r64
|
|
// BEXTRQ r64 r64 r64
|
|
//
|
|
// Construct and append a BEXTRQ instruction to the active function.
|
|
func (c *Context) BEXTRQ(r, mr, r1 operand.Op) {
|
|
c.addinstruction(x86.BEXTRQ(r, mr, r1))
|
|
}
|
|
|
|
// BEXTRQ: Bit Field Extract.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BEXTRQ r64 m64 r64
|
|
// BEXTRQ r64 r64 r64
|
|
//
|
|
// Construct and append a BEXTRQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func BEXTRQ(r, mr, r1 operand.Op) { ctx.BEXTRQ(r, mr, r1) }
|
|
|
|
// BLENDPD: Blend Packed Double Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BLENDPD imm8 m128 xmm
|
|
// BLENDPD imm8 xmm xmm
|
|
//
|
|
// Construct and append a BLENDPD instruction to the active function.
|
|
func (c *Context) BLENDPD(i, mx, x operand.Op) {
|
|
c.addinstruction(x86.BLENDPD(i, mx, x))
|
|
}
|
|
|
|
// BLENDPD: Blend Packed Double Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BLENDPD imm8 m128 xmm
|
|
// BLENDPD imm8 xmm xmm
|
|
//
|
|
// Construct and append a BLENDPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func BLENDPD(i, mx, x operand.Op) { ctx.BLENDPD(i, mx, x) }
|
|
|
|
// BLENDPS: Blend Packed Single Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BLENDPS imm8 m128 xmm
|
|
// BLENDPS imm8 xmm xmm
|
|
//
|
|
// Construct and append a BLENDPS instruction to the active function.
|
|
func (c *Context) BLENDPS(i, mx, x operand.Op) {
|
|
c.addinstruction(x86.BLENDPS(i, mx, x))
|
|
}
|
|
|
|
// BLENDPS: Blend Packed Single Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BLENDPS imm8 m128 xmm
|
|
// BLENDPS imm8 xmm xmm
|
|
//
|
|
// Construct and append a BLENDPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func BLENDPS(i, mx, x operand.Op) { ctx.BLENDPS(i, mx, x) }
|
|
|
|
// BLENDVPD: Variable Blend Packed Double Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BLENDVPD xmm0 m128 xmm
|
|
// BLENDVPD xmm0 xmm xmm
|
|
//
|
|
// Construct and append a BLENDVPD instruction to the active function.
|
|
func (c *Context) BLENDVPD(x, mx, x1 operand.Op) {
|
|
c.addinstruction(x86.BLENDVPD(x, mx, x1))
|
|
}
|
|
|
|
// BLENDVPD: Variable Blend Packed Double Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BLENDVPD xmm0 m128 xmm
|
|
// BLENDVPD xmm0 xmm xmm
|
|
//
|
|
// Construct and append a BLENDVPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func BLENDVPD(x, mx, x1 operand.Op) { ctx.BLENDVPD(x, mx, x1) }
|
|
|
|
// BLENDVPS: Variable Blend Packed Single Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BLENDVPS xmm0 m128 xmm
|
|
// BLENDVPS xmm0 xmm xmm
|
|
//
|
|
// Construct and append a BLENDVPS instruction to the active function.
|
|
func (c *Context) BLENDVPS(x, mx, x1 operand.Op) {
|
|
c.addinstruction(x86.BLENDVPS(x, mx, x1))
|
|
}
|
|
|
|
// BLENDVPS: Variable Blend Packed Single Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BLENDVPS xmm0 m128 xmm
|
|
// BLENDVPS xmm0 xmm xmm
|
|
//
|
|
// Construct and append a BLENDVPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func BLENDVPS(x, mx, x1 operand.Op) { ctx.BLENDVPS(x, mx, x1) }
|
|
|
|
// BLSIL: Isolate Lowest Set Bit.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BLSIL m32 r32
|
|
// BLSIL r32 r32
|
|
//
|
|
// Construct and append a BLSIL instruction to the active function.
|
|
func (c *Context) BLSIL(mr, r operand.Op) {
|
|
c.addinstruction(x86.BLSIL(mr, r))
|
|
}
|
|
|
|
// BLSIL: Isolate Lowest Set Bit.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BLSIL m32 r32
|
|
// BLSIL r32 r32
|
|
//
|
|
// Construct and append a BLSIL instruction to the active function.
|
|
// Operates on the global context.
|
|
func BLSIL(mr, r operand.Op) { ctx.BLSIL(mr, r) }
|
|
|
|
// BLSIQ: Isolate Lowest Set Bit.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BLSIQ m64 r64
|
|
// BLSIQ r64 r64
|
|
//
|
|
// Construct and append a BLSIQ instruction to the active function.
|
|
func (c *Context) BLSIQ(mr, r operand.Op) {
|
|
c.addinstruction(x86.BLSIQ(mr, r))
|
|
}
|
|
|
|
// BLSIQ: Isolate Lowest Set Bit.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BLSIQ m64 r64
|
|
// BLSIQ r64 r64
|
|
//
|
|
// Construct and append a BLSIQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func BLSIQ(mr, r operand.Op) { ctx.BLSIQ(mr, r) }
|
|
|
|
// BLSMSKL: Mask From Lowest Set Bit.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BLSMSKL m32 r32
|
|
// BLSMSKL r32 r32
|
|
//
|
|
// Construct and append a BLSMSKL instruction to the active function.
|
|
func (c *Context) BLSMSKL(mr, r operand.Op) {
|
|
c.addinstruction(x86.BLSMSKL(mr, r))
|
|
}
|
|
|
|
// BLSMSKL: Mask From Lowest Set Bit.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BLSMSKL m32 r32
|
|
// BLSMSKL r32 r32
|
|
//
|
|
// Construct and append a BLSMSKL instruction to the active function.
|
|
// Operates on the global context.
|
|
func BLSMSKL(mr, r operand.Op) { ctx.BLSMSKL(mr, r) }
|
|
|
|
// BLSMSKQ: Mask From Lowest Set Bit.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BLSMSKQ m64 r64
|
|
// BLSMSKQ r64 r64
|
|
//
|
|
// Construct and append a BLSMSKQ instruction to the active function.
|
|
func (c *Context) BLSMSKQ(mr, r operand.Op) {
|
|
c.addinstruction(x86.BLSMSKQ(mr, r))
|
|
}
|
|
|
|
// BLSMSKQ: Mask From Lowest Set Bit.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BLSMSKQ m64 r64
|
|
// BLSMSKQ r64 r64
|
|
//
|
|
// Construct and append a BLSMSKQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func BLSMSKQ(mr, r operand.Op) { ctx.BLSMSKQ(mr, r) }
|
|
|
|
// BLSRL: Reset Lowest Set Bit.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BLSRL m32 r32
|
|
// BLSRL r32 r32
|
|
//
|
|
// Construct and append a BLSRL instruction to the active function.
|
|
func (c *Context) BLSRL(mr, r operand.Op) {
|
|
c.addinstruction(x86.BLSRL(mr, r))
|
|
}
|
|
|
|
// BLSRL: Reset Lowest Set Bit.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BLSRL m32 r32
|
|
// BLSRL r32 r32
|
|
//
|
|
// Construct and append a BLSRL instruction to the active function.
|
|
// Operates on the global context.
|
|
func BLSRL(mr, r operand.Op) { ctx.BLSRL(mr, r) }
|
|
|
|
// BLSRQ: Reset Lowest Set Bit.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BLSRQ m64 r64
|
|
// BLSRQ r64 r64
|
|
//
|
|
// Construct and append a BLSRQ instruction to the active function.
|
|
func (c *Context) BLSRQ(mr, r operand.Op) {
|
|
c.addinstruction(x86.BLSRQ(mr, r))
|
|
}
|
|
|
|
// BLSRQ: Reset Lowest Set Bit.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BLSRQ m64 r64
|
|
// BLSRQ r64 r64
|
|
//
|
|
// Construct and append a BLSRQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func BLSRQ(mr, r operand.Op) { ctx.BLSRQ(mr, r) }
|
|
|
|
// BSFL: Bit Scan Forward.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BSFL m32 r32
|
|
// BSFL r32 r32
|
|
//
|
|
// Construct and append a BSFL instruction to the active function.
|
|
func (c *Context) BSFL(mr, r operand.Op) {
|
|
c.addinstruction(x86.BSFL(mr, r))
|
|
}
|
|
|
|
// BSFL: Bit Scan Forward.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BSFL m32 r32
|
|
// BSFL r32 r32
|
|
//
|
|
// Construct and append a BSFL instruction to the active function.
|
|
// Operates on the global context.
|
|
func BSFL(mr, r operand.Op) { ctx.BSFL(mr, r) }
|
|
|
|
// BSFQ: Bit Scan Forward.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BSFQ m64 r64
|
|
// BSFQ r64 r64
|
|
//
|
|
// Construct and append a BSFQ instruction to the active function.
|
|
func (c *Context) BSFQ(mr, r operand.Op) {
|
|
c.addinstruction(x86.BSFQ(mr, r))
|
|
}
|
|
|
|
// BSFQ: Bit Scan Forward.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BSFQ m64 r64
|
|
// BSFQ r64 r64
|
|
//
|
|
// Construct and append a BSFQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func BSFQ(mr, r operand.Op) { ctx.BSFQ(mr, r) }
|
|
|
|
// BSFW: Bit Scan Forward.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BSFW m16 r16
|
|
// BSFW r16 r16
|
|
//
|
|
// Construct and append a BSFW instruction to the active function.
|
|
func (c *Context) BSFW(mr, r operand.Op) {
|
|
c.addinstruction(x86.BSFW(mr, r))
|
|
}
|
|
|
|
// BSFW: Bit Scan Forward.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BSFW m16 r16
|
|
// BSFW r16 r16
|
|
//
|
|
// Construct and append a BSFW instruction to the active function.
|
|
// Operates on the global context.
|
|
func BSFW(mr, r operand.Op) { ctx.BSFW(mr, r) }
|
|
|
|
// BSRL: Bit Scan Reverse.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BSRL m32 r32
|
|
// BSRL r32 r32
|
|
//
|
|
// Construct and append a BSRL instruction to the active function.
|
|
func (c *Context) BSRL(mr, r operand.Op) {
|
|
c.addinstruction(x86.BSRL(mr, r))
|
|
}
|
|
|
|
// BSRL: Bit Scan Reverse.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BSRL m32 r32
|
|
// BSRL r32 r32
|
|
//
|
|
// Construct and append a BSRL instruction to the active function.
|
|
// Operates on the global context.
|
|
func BSRL(mr, r operand.Op) { ctx.BSRL(mr, r) }
|
|
|
|
// BSRQ: Bit Scan Reverse.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BSRQ m64 r64
|
|
// BSRQ r64 r64
|
|
//
|
|
// Construct and append a BSRQ instruction to the active function.
|
|
func (c *Context) BSRQ(mr, r operand.Op) {
|
|
c.addinstruction(x86.BSRQ(mr, r))
|
|
}
|
|
|
|
// BSRQ: Bit Scan Reverse.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BSRQ m64 r64
|
|
// BSRQ r64 r64
|
|
//
|
|
// Construct and append a BSRQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func BSRQ(mr, r operand.Op) { ctx.BSRQ(mr, r) }
|
|
|
|
// BSRW: Bit Scan Reverse.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BSRW m16 r16
|
|
// BSRW r16 r16
|
|
//
|
|
// Construct and append a BSRW instruction to the active function.
|
|
func (c *Context) BSRW(mr, r operand.Op) {
|
|
c.addinstruction(x86.BSRW(mr, r))
|
|
}
|
|
|
|
// BSRW: Bit Scan Reverse.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BSRW m16 r16
|
|
// BSRW r16 r16
|
|
//
|
|
// Construct and append a BSRW instruction to the active function.
|
|
// Operates on the global context.
|
|
func BSRW(mr, r operand.Op) { ctx.BSRW(mr, r) }
|
|
|
|
// BSWAPL: Byte Swap.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BSWAPL r32
|
|
//
|
|
// Construct and append a BSWAPL instruction to the active function.
|
|
func (c *Context) BSWAPL(r operand.Op) {
|
|
c.addinstruction(x86.BSWAPL(r))
|
|
}
|
|
|
|
// BSWAPL: Byte Swap.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BSWAPL r32
|
|
//
|
|
// Construct and append a BSWAPL instruction to the active function.
|
|
// Operates on the global context.
|
|
func BSWAPL(r operand.Op) { ctx.BSWAPL(r) }
|
|
|
|
// BSWAPQ: Byte Swap.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BSWAPQ r64
|
|
//
|
|
// Construct and append a BSWAPQ instruction to the active function.
|
|
func (c *Context) BSWAPQ(r operand.Op) {
|
|
c.addinstruction(x86.BSWAPQ(r))
|
|
}
|
|
|
|
// BSWAPQ: Byte Swap.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BSWAPQ r64
|
|
//
|
|
// Construct and append a BSWAPQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func BSWAPQ(r operand.Op) { ctx.BSWAPQ(r) }
|
|
|
|
// BTCL: Bit Test and Complement.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BTCL imm8 m32
|
|
// BTCL imm8 r32
|
|
// BTCL r32 m32
|
|
// BTCL r32 r32
|
|
//
|
|
// Construct and append a BTCL instruction to the active function.
|
|
func (c *Context) BTCL(ir, mr operand.Op) {
|
|
c.addinstruction(x86.BTCL(ir, mr))
|
|
}
|
|
|
|
// BTCL: Bit Test and Complement.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BTCL imm8 m32
|
|
// BTCL imm8 r32
|
|
// BTCL r32 m32
|
|
// BTCL r32 r32
|
|
//
|
|
// Construct and append a BTCL instruction to the active function.
|
|
// Operates on the global context.
|
|
func BTCL(ir, mr operand.Op) { ctx.BTCL(ir, mr) }
|
|
|
|
// BTCQ: Bit Test and Complement.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BTCQ imm8 m64
|
|
// BTCQ imm8 r64
|
|
// BTCQ r64 m64
|
|
// BTCQ r64 r64
|
|
//
|
|
// Construct and append a BTCQ instruction to the active function.
|
|
func (c *Context) BTCQ(ir, mr operand.Op) {
|
|
c.addinstruction(x86.BTCQ(ir, mr))
|
|
}
|
|
|
|
// BTCQ: Bit Test and Complement.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BTCQ imm8 m64
|
|
// BTCQ imm8 r64
|
|
// BTCQ r64 m64
|
|
// BTCQ r64 r64
|
|
//
|
|
// Construct and append a BTCQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func BTCQ(ir, mr operand.Op) { ctx.BTCQ(ir, mr) }
|
|
|
|
// BTCW: Bit Test and Complement.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BTCW imm8 m16
|
|
// BTCW imm8 r16
|
|
// BTCW r16 m16
|
|
// BTCW r16 r16
|
|
//
|
|
// Construct and append a BTCW instruction to the active function.
|
|
func (c *Context) BTCW(ir, mr operand.Op) {
|
|
c.addinstruction(x86.BTCW(ir, mr))
|
|
}
|
|
|
|
// BTCW: Bit Test and Complement.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BTCW imm8 m16
|
|
// BTCW imm8 r16
|
|
// BTCW r16 m16
|
|
// BTCW r16 r16
|
|
//
|
|
// Construct and append a BTCW instruction to the active function.
|
|
// Operates on the global context.
|
|
func BTCW(ir, mr operand.Op) { ctx.BTCW(ir, mr) }
|
|
|
|
// BTL: Bit Test.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BTL imm8 m32
|
|
// BTL imm8 r32
|
|
// BTL r32 m32
|
|
// BTL r32 r32
|
|
//
|
|
// Construct and append a BTL instruction to the active function.
|
|
func (c *Context) BTL(ir, mr operand.Op) {
|
|
c.addinstruction(x86.BTL(ir, mr))
|
|
}
|
|
|
|
// BTL: Bit Test.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BTL imm8 m32
|
|
// BTL imm8 r32
|
|
// BTL r32 m32
|
|
// BTL r32 r32
|
|
//
|
|
// Construct and append a BTL instruction to the active function.
|
|
// Operates on the global context.
|
|
func BTL(ir, mr operand.Op) { ctx.BTL(ir, mr) }
|
|
|
|
// BTQ: Bit Test.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BTQ imm8 m64
|
|
// BTQ imm8 r64
|
|
// BTQ r64 m64
|
|
// BTQ r64 r64
|
|
//
|
|
// Construct and append a BTQ instruction to the active function.
|
|
func (c *Context) BTQ(ir, mr operand.Op) {
|
|
c.addinstruction(x86.BTQ(ir, mr))
|
|
}
|
|
|
|
// BTQ: Bit Test.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BTQ imm8 m64
|
|
// BTQ imm8 r64
|
|
// BTQ r64 m64
|
|
// BTQ r64 r64
|
|
//
|
|
// Construct and append a BTQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func BTQ(ir, mr operand.Op) { ctx.BTQ(ir, mr) }
|
|
|
|
// BTRL: Bit Test and Reset.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BTRL imm8 m32
|
|
// BTRL imm8 r32
|
|
// BTRL r32 m32
|
|
// BTRL r32 r32
|
|
//
|
|
// Construct and append a BTRL instruction to the active function.
|
|
func (c *Context) BTRL(ir, mr operand.Op) {
|
|
c.addinstruction(x86.BTRL(ir, mr))
|
|
}
|
|
|
|
// BTRL: Bit Test and Reset.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BTRL imm8 m32
|
|
// BTRL imm8 r32
|
|
// BTRL r32 m32
|
|
// BTRL r32 r32
|
|
//
|
|
// Construct and append a BTRL instruction to the active function.
|
|
// Operates on the global context.
|
|
func BTRL(ir, mr operand.Op) { ctx.BTRL(ir, mr) }
|
|
|
|
// BTRQ: Bit Test and Reset.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BTRQ imm8 m64
|
|
// BTRQ imm8 r64
|
|
// BTRQ r64 m64
|
|
// BTRQ r64 r64
|
|
//
|
|
// Construct and append a BTRQ instruction to the active function.
|
|
func (c *Context) BTRQ(ir, mr operand.Op) {
|
|
c.addinstruction(x86.BTRQ(ir, mr))
|
|
}
|
|
|
|
// BTRQ: Bit Test and Reset.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BTRQ imm8 m64
|
|
// BTRQ imm8 r64
|
|
// BTRQ r64 m64
|
|
// BTRQ r64 r64
|
|
//
|
|
// Construct and append a BTRQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func BTRQ(ir, mr operand.Op) { ctx.BTRQ(ir, mr) }
|
|
|
|
// BTRW: Bit Test and Reset.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BTRW imm8 m16
|
|
// BTRW imm8 r16
|
|
// BTRW r16 m16
|
|
// BTRW r16 r16
|
|
//
|
|
// Construct and append a BTRW instruction to the active function.
|
|
func (c *Context) BTRW(ir, mr operand.Op) {
|
|
c.addinstruction(x86.BTRW(ir, mr))
|
|
}
|
|
|
|
// BTRW: Bit Test and Reset.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BTRW imm8 m16
|
|
// BTRW imm8 r16
|
|
// BTRW r16 m16
|
|
// BTRW r16 r16
|
|
//
|
|
// Construct and append a BTRW instruction to the active function.
|
|
// Operates on the global context.
|
|
func BTRW(ir, mr operand.Op) { ctx.BTRW(ir, mr) }
|
|
|
|
// BTSL: Bit Test and Set.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BTSL imm8 m32
|
|
// BTSL imm8 r32
|
|
// BTSL r32 m32
|
|
// BTSL r32 r32
|
|
//
|
|
// Construct and append a BTSL instruction to the active function.
|
|
func (c *Context) BTSL(ir, mr operand.Op) {
|
|
c.addinstruction(x86.BTSL(ir, mr))
|
|
}
|
|
|
|
// BTSL: Bit Test and Set.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BTSL imm8 m32
|
|
// BTSL imm8 r32
|
|
// BTSL r32 m32
|
|
// BTSL r32 r32
|
|
//
|
|
// Construct and append a BTSL instruction to the active function.
|
|
// Operates on the global context.
|
|
func BTSL(ir, mr operand.Op) { ctx.BTSL(ir, mr) }
|
|
|
|
// BTSQ: Bit Test and Set.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BTSQ imm8 m64
|
|
// BTSQ imm8 r64
|
|
// BTSQ r64 m64
|
|
// BTSQ r64 r64
|
|
//
|
|
// Construct and append a BTSQ instruction to the active function.
|
|
func (c *Context) BTSQ(ir, mr operand.Op) {
|
|
c.addinstruction(x86.BTSQ(ir, mr))
|
|
}
|
|
|
|
// BTSQ: Bit Test and Set.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BTSQ imm8 m64
|
|
// BTSQ imm8 r64
|
|
// BTSQ r64 m64
|
|
// BTSQ r64 r64
|
|
//
|
|
// Construct and append a BTSQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func BTSQ(ir, mr operand.Op) { ctx.BTSQ(ir, mr) }
|
|
|
|
// BTSW: Bit Test and Set.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BTSW imm8 m16
|
|
// BTSW imm8 r16
|
|
// BTSW r16 m16
|
|
// BTSW r16 r16
|
|
//
|
|
// Construct and append a BTSW instruction to the active function.
|
|
func (c *Context) BTSW(ir, mr operand.Op) {
|
|
c.addinstruction(x86.BTSW(ir, mr))
|
|
}
|
|
|
|
// BTSW: Bit Test and Set.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BTSW imm8 m16
|
|
// BTSW imm8 r16
|
|
// BTSW r16 m16
|
|
// BTSW r16 r16
|
|
//
|
|
// Construct and append a BTSW instruction to the active function.
|
|
// Operates on the global context.
|
|
func BTSW(ir, mr operand.Op) { ctx.BTSW(ir, mr) }
|
|
|
|
// BTW: Bit Test.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BTW imm8 m16
|
|
// BTW imm8 r16
|
|
// BTW r16 m16
|
|
// BTW r16 r16
|
|
//
|
|
// Construct and append a BTW instruction to the active function.
|
|
func (c *Context) BTW(ir, mr operand.Op) {
|
|
c.addinstruction(x86.BTW(ir, mr))
|
|
}
|
|
|
|
// BTW: Bit Test.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BTW imm8 m16
|
|
// BTW imm8 r16
|
|
// BTW r16 m16
|
|
// BTW r16 r16
|
|
//
|
|
// Construct and append a BTW instruction to the active function.
|
|
// Operates on the global context.
|
|
func BTW(ir, mr operand.Op) { ctx.BTW(ir, mr) }
|
|
|
|
// BZHIL: Zero High Bits Starting with Specified Bit Position.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BZHIL r32 m32 r32
|
|
// BZHIL r32 r32 r32
|
|
//
|
|
// Construct and append a BZHIL instruction to the active function.
|
|
func (c *Context) BZHIL(r, mr, r1 operand.Op) {
|
|
c.addinstruction(x86.BZHIL(r, mr, r1))
|
|
}
|
|
|
|
// BZHIL: Zero High Bits Starting with Specified Bit Position.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BZHIL r32 m32 r32
|
|
// BZHIL r32 r32 r32
|
|
//
|
|
// Construct and append a BZHIL instruction to the active function.
|
|
// Operates on the global context.
|
|
func BZHIL(r, mr, r1 operand.Op) { ctx.BZHIL(r, mr, r1) }
|
|
|
|
// BZHIQ: Zero High Bits Starting with Specified Bit Position.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BZHIQ r64 m64 r64
|
|
// BZHIQ r64 r64 r64
|
|
//
|
|
// Construct and append a BZHIQ instruction to the active function.
|
|
func (c *Context) BZHIQ(r, mr, r1 operand.Op) {
|
|
c.addinstruction(x86.BZHIQ(r, mr, r1))
|
|
}
|
|
|
|
// BZHIQ: Zero High Bits Starting with Specified Bit Position.
|
|
//
|
|
// Forms:
|
|
//
|
|
// BZHIQ r64 m64 r64
|
|
// BZHIQ r64 r64 r64
|
|
//
|
|
// Construct and append a BZHIQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func BZHIQ(r, mr, r1 operand.Op) { ctx.BZHIQ(r, mr, r1) }
|
|
|
|
// CALL: Call Procedure.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CALL rel32
|
|
//
|
|
// Construct and append a CALL instruction to the active function.
|
|
func (c *Context) CALL(r operand.Op) {
|
|
c.addinstruction(x86.CALL(r))
|
|
}
|
|
|
|
// CALL: Call Procedure.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CALL rel32
|
|
//
|
|
// Construct and append a CALL instruction to the active function.
|
|
// Operates on the global context.
|
|
func CALL(r operand.Op) { ctx.CALL(r) }
|
|
|
|
// CBW: Convert Byte to Word.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CBW
|
|
//
|
|
// Construct and append a CBW instruction to the active function.
|
|
func (c *Context) CBW() {
|
|
c.addinstruction(x86.CBW())
|
|
}
|
|
|
|
// CBW: Convert Byte to Word.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CBW
|
|
//
|
|
// Construct and append a CBW instruction to the active function.
|
|
// Operates on the global context.
|
|
func CBW() { ctx.CBW() }
|
|
|
|
// CDQ: Convert Doubleword to Quadword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CDQ
|
|
//
|
|
// Construct and append a CDQ instruction to the active function.
|
|
func (c *Context) CDQ() {
|
|
c.addinstruction(x86.CDQ())
|
|
}
|
|
|
|
// CDQ: Convert Doubleword to Quadword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CDQ
|
|
//
|
|
// Construct and append a CDQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func CDQ() { ctx.CDQ() }
|
|
|
|
// CDQE: Convert Doubleword to Quadword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CDQE
|
|
//
|
|
// Construct and append a CDQE instruction to the active function.
|
|
func (c *Context) CDQE() {
|
|
c.addinstruction(x86.CDQE())
|
|
}
|
|
|
|
// CDQE: Convert Doubleword to Quadword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CDQE
|
|
//
|
|
// Construct and append a CDQE instruction to the active function.
|
|
// Operates on the global context.
|
|
func CDQE() { ctx.CDQE() }
|
|
|
|
// CLC: Clear Carry Flag.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CLC
|
|
//
|
|
// Construct and append a CLC instruction to the active function.
|
|
func (c *Context) CLC() {
|
|
c.addinstruction(x86.CLC())
|
|
}
|
|
|
|
// CLC: Clear Carry Flag.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CLC
|
|
//
|
|
// Construct and append a CLC instruction to the active function.
|
|
// Operates on the global context.
|
|
func CLC() { ctx.CLC() }
|
|
|
|
// CLD: Clear Direction Flag.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CLD
|
|
//
|
|
// Construct and append a CLD instruction to the active function.
|
|
func (c *Context) CLD() {
|
|
c.addinstruction(x86.CLD())
|
|
}
|
|
|
|
// CLD: Clear Direction Flag.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CLD
|
|
//
|
|
// Construct and append a CLD instruction to the active function.
|
|
// Operates on the global context.
|
|
func CLD() { ctx.CLD() }
|
|
|
|
// CLFLUSH: Flush Cache Line.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CLFLUSH m8
|
|
//
|
|
// Construct and append a CLFLUSH instruction to the active function.
|
|
func (c *Context) CLFLUSH(m operand.Op) {
|
|
c.addinstruction(x86.CLFLUSH(m))
|
|
}
|
|
|
|
// CLFLUSH: Flush Cache Line.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CLFLUSH m8
|
|
//
|
|
// Construct and append a CLFLUSH instruction to the active function.
|
|
// Operates on the global context.
|
|
func CLFLUSH(m operand.Op) { ctx.CLFLUSH(m) }
|
|
|
|
// CLFLUSHOPT: Flush Cache Line Optimized.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CLFLUSHOPT m8
|
|
//
|
|
// Construct and append a CLFLUSHOPT instruction to the active function.
|
|
func (c *Context) CLFLUSHOPT(m operand.Op) {
|
|
c.addinstruction(x86.CLFLUSHOPT(m))
|
|
}
|
|
|
|
// CLFLUSHOPT: Flush Cache Line Optimized.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CLFLUSHOPT m8
|
|
//
|
|
// Construct and append a CLFLUSHOPT instruction to the active function.
|
|
// Operates on the global context.
|
|
func CLFLUSHOPT(m operand.Op) { ctx.CLFLUSHOPT(m) }
|
|
|
|
// CMC: Complement Carry Flag.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMC
|
|
//
|
|
// Construct and append a CMC instruction to the active function.
|
|
func (c *Context) CMC() {
|
|
c.addinstruction(x86.CMC())
|
|
}
|
|
|
|
// CMC: Complement Carry Flag.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMC
|
|
//
|
|
// Construct and append a CMC instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMC() { ctx.CMC() }
|
|
|
|
// CMOVLCC: Move if above or equal (CF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVLCC m32 r32
|
|
// CMOVLCC r32 r32
|
|
//
|
|
// Construct and append a CMOVLCC instruction to the active function.
|
|
func (c *Context) CMOVLCC(mr, r operand.Op) {
|
|
c.addinstruction(x86.CMOVLCC(mr, r))
|
|
}
|
|
|
|
// CMOVLCC: Move if above or equal (CF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVLCC m32 r32
|
|
// CMOVLCC r32 r32
|
|
//
|
|
// Construct and append a CMOVLCC instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVLCC(mr, r operand.Op) { ctx.CMOVLCC(mr, r) }
|
|
|
|
// CMOVLCS: Move if below (CF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVLCS m32 r32
|
|
// CMOVLCS r32 r32
|
|
//
|
|
// Construct and append a CMOVLCS instruction to the active function.
|
|
func (c *Context) CMOVLCS(mr, r operand.Op) {
|
|
c.addinstruction(x86.CMOVLCS(mr, r))
|
|
}
|
|
|
|
// CMOVLCS: Move if below (CF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVLCS m32 r32
|
|
// CMOVLCS r32 r32
|
|
//
|
|
// Construct and append a CMOVLCS instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVLCS(mr, r operand.Op) { ctx.CMOVLCS(mr, r) }
|
|
|
|
// CMOVLEQ: Move if equal (ZF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVLEQ m32 r32
|
|
// CMOVLEQ r32 r32
|
|
//
|
|
// Construct and append a CMOVLEQ instruction to the active function.
|
|
func (c *Context) CMOVLEQ(mr, r operand.Op) {
|
|
c.addinstruction(x86.CMOVLEQ(mr, r))
|
|
}
|
|
|
|
// CMOVLEQ: Move if equal (ZF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVLEQ m32 r32
|
|
// CMOVLEQ r32 r32
|
|
//
|
|
// Construct and append a CMOVLEQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVLEQ(mr, r operand.Op) { ctx.CMOVLEQ(mr, r) }
|
|
|
|
// CMOVLGE: Move if greater or equal (SF == OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVLGE m32 r32
|
|
// CMOVLGE r32 r32
|
|
//
|
|
// Construct and append a CMOVLGE instruction to the active function.
|
|
func (c *Context) CMOVLGE(mr, r operand.Op) {
|
|
c.addinstruction(x86.CMOVLGE(mr, r))
|
|
}
|
|
|
|
// CMOVLGE: Move if greater or equal (SF == OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVLGE m32 r32
|
|
// CMOVLGE r32 r32
|
|
//
|
|
// Construct and append a CMOVLGE instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVLGE(mr, r operand.Op) { ctx.CMOVLGE(mr, r) }
|
|
|
|
// CMOVLGT: Move if greater (ZF == 0 and SF == OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVLGT m32 r32
|
|
// CMOVLGT r32 r32
|
|
//
|
|
// Construct and append a CMOVLGT instruction to the active function.
|
|
func (c *Context) CMOVLGT(mr, r operand.Op) {
|
|
c.addinstruction(x86.CMOVLGT(mr, r))
|
|
}
|
|
|
|
// CMOVLGT: Move if greater (ZF == 0 and SF == OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVLGT m32 r32
|
|
// CMOVLGT r32 r32
|
|
//
|
|
// Construct and append a CMOVLGT instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVLGT(mr, r operand.Op) { ctx.CMOVLGT(mr, r) }
|
|
|
|
// CMOVLHI: Move if above (CF == 0 and ZF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVLHI m32 r32
|
|
// CMOVLHI r32 r32
|
|
//
|
|
// Construct and append a CMOVLHI instruction to the active function.
|
|
func (c *Context) CMOVLHI(mr, r operand.Op) {
|
|
c.addinstruction(x86.CMOVLHI(mr, r))
|
|
}
|
|
|
|
// CMOVLHI: Move if above (CF == 0 and ZF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVLHI m32 r32
|
|
// CMOVLHI r32 r32
|
|
//
|
|
// Construct and append a CMOVLHI instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVLHI(mr, r operand.Op) { ctx.CMOVLHI(mr, r) }
|
|
|
|
// CMOVLLE: Move if less or equal (ZF == 1 or SF != OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVLLE m32 r32
|
|
// CMOVLLE r32 r32
|
|
//
|
|
// Construct and append a CMOVLLE instruction to the active function.
|
|
func (c *Context) CMOVLLE(mr, r operand.Op) {
|
|
c.addinstruction(x86.CMOVLLE(mr, r))
|
|
}
|
|
|
|
// CMOVLLE: Move if less or equal (ZF == 1 or SF != OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVLLE m32 r32
|
|
// CMOVLLE r32 r32
|
|
//
|
|
// Construct and append a CMOVLLE instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVLLE(mr, r operand.Op) { ctx.CMOVLLE(mr, r) }
|
|
|
|
// CMOVLLS: Move if below or equal (CF == 1 or ZF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVLLS m32 r32
|
|
// CMOVLLS r32 r32
|
|
//
|
|
// Construct and append a CMOVLLS instruction to the active function.
|
|
func (c *Context) CMOVLLS(mr, r operand.Op) {
|
|
c.addinstruction(x86.CMOVLLS(mr, r))
|
|
}
|
|
|
|
// CMOVLLS: Move if below or equal (CF == 1 or ZF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVLLS m32 r32
|
|
// CMOVLLS r32 r32
|
|
//
|
|
// Construct and append a CMOVLLS instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVLLS(mr, r operand.Op) { ctx.CMOVLLS(mr, r) }
|
|
|
|
// CMOVLLT: Move if less (SF != OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVLLT m32 r32
|
|
// CMOVLLT r32 r32
|
|
//
|
|
// Construct and append a CMOVLLT instruction to the active function.
|
|
func (c *Context) CMOVLLT(mr, r operand.Op) {
|
|
c.addinstruction(x86.CMOVLLT(mr, r))
|
|
}
|
|
|
|
// CMOVLLT: Move if less (SF != OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVLLT m32 r32
|
|
// CMOVLLT r32 r32
|
|
//
|
|
// Construct and append a CMOVLLT instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVLLT(mr, r operand.Op) { ctx.CMOVLLT(mr, r) }
|
|
|
|
// CMOVLMI: Move if sign (SF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVLMI m32 r32
|
|
// CMOVLMI r32 r32
|
|
//
|
|
// Construct and append a CMOVLMI instruction to the active function.
|
|
func (c *Context) CMOVLMI(mr, r operand.Op) {
|
|
c.addinstruction(x86.CMOVLMI(mr, r))
|
|
}
|
|
|
|
// CMOVLMI: Move if sign (SF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVLMI m32 r32
|
|
// CMOVLMI r32 r32
|
|
//
|
|
// Construct and append a CMOVLMI instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVLMI(mr, r operand.Op) { ctx.CMOVLMI(mr, r) }
|
|
|
|
// CMOVLNE: Move if not equal (ZF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVLNE m32 r32
|
|
// CMOVLNE r32 r32
|
|
//
|
|
// Construct and append a CMOVLNE instruction to the active function.
|
|
func (c *Context) CMOVLNE(mr, r operand.Op) {
|
|
c.addinstruction(x86.CMOVLNE(mr, r))
|
|
}
|
|
|
|
// CMOVLNE: Move if not equal (ZF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVLNE m32 r32
|
|
// CMOVLNE r32 r32
|
|
//
|
|
// Construct and append a CMOVLNE instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVLNE(mr, r operand.Op) { ctx.CMOVLNE(mr, r) }
|
|
|
|
// CMOVLOC: Move if not overflow (OF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVLOC m32 r32
|
|
// CMOVLOC r32 r32
|
|
//
|
|
// Construct and append a CMOVLOC instruction to the active function.
|
|
func (c *Context) CMOVLOC(mr, r operand.Op) {
|
|
c.addinstruction(x86.CMOVLOC(mr, r))
|
|
}
|
|
|
|
// CMOVLOC: Move if not overflow (OF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVLOC m32 r32
|
|
// CMOVLOC r32 r32
|
|
//
|
|
// Construct and append a CMOVLOC instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVLOC(mr, r operand.Op) { ctx.CMOVLOC(mr, r) }
|
|
|
|
// CMOVLOS: Move if overflow (OF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVLOS m32 r32
|
|
// CMOVLOS r32 r32
|
|
//
|
|
// Construct and append a CMOVLOS instruction to the active function.
|
|
func (c *Context) CMOVLOS(mr, r operand.Op) {
|
|
c.addinstruction(x86.CMOVLOS(mr, r))
|
|
}
|
|
|
|
// CMOVLOS: Move if overflow (OF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVLOS m32 r32
|
|
// CMOVLOS r32 r32
|
|
//
|
|
// Construct and append a CMOVLOS instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVLOS(mr, r operand.Op) { ctx.CMOVLOS(mr, r) }
|
|
|
|
// CMOVLPC: Move if not parity (PF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVLPC m32 r32
|
|
// CMOVLPC r32 r32
|
|
//
|
|
// Construct and append a CMOVLPC instruction to the active function.
|
|
func (c *Context) CMOVLPC(mr, r operand.Op) {
|
|
c.addinstruction(x86.CMOVLPC(mr, r))
|
|
}
|
|
|
|
// CMOVLPC: Move if not parity (PF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVLPC m32 r32
|
|
// CMOVLPC r32 r32
|
|
//
|
|
// Construct and append a CMOVLPC instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVLPC(mr, r operand.Op) { ctx.CMOVLPC(mr, r) }
|
|
|
|
// CMOVLPL: Move if not sign (SF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVLPL m32 r32
|
|
// CMOVLPL r32 r32
|
|
//
|
|
// Construct and append a CMOVLPL instruction to the active function.
|
|
func (c *Context) CMOVLPL(mr, r operand.Op) {
|
|
c.addinstruction(x86.CMOVLPL(mr, r))
|
|
}
|
|
|
|
// CMOVLPL: Move if not sign (SF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVLPL m32 r32
|
|
// CMOVLPL r32 r32
|
|
//
|
|
// Construct and append a CMOVLPL instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVLPL(mr, r operand.Op) { ctx.CMOVLPL(mr, r) }
|
|
|
|
// CMOVLPS: Move if parity (PF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVLPS m32 r32
|
|
// CMOVLPS r32 r32
|
|
//
|
|
// Construct and append a CMOVLPS instruction to the active function.
|
|
func (c *Context) CMOVLPS(mr, r operand.Op) {
|
|
c.addinstruction(x86.CMOVLPS(mr, r))
|
|
}
|
|
|
|
// CMOVLPS: Move if parity (PF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVLPS m32 r32
|
|
// CMOVLPS r32 r32
|
|
//
|
|
// Construct and append a CMOVLPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVLPS(mr, r operand.Op) { ctx.CMOVLPS(mr, r) }
|
|
|
|
// CMOVQCC: Move if above or equal (CF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVQCC m64 r64
|
|
// CMOVQCC r64 r64
|
|
//
|
|
// Construct and append a CMOVQCC instruction to the active function.
|
|
func (c *Context) CMOVQCC(mr, r operand.Op) {
|
|
c.addinstruction(x86.CMOVQCC(mr, r))
|
|
}
|
|
|
|
// CMOVQCC: Move if above or equal (CF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVQCC m64 r64
|
|
// CMOVQCC r64 r64
|
|
//
|
|
// Construct and append a CMOVQCC instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVQCC(mr, r operand.Op) { ctx.CMOVQCC(mr, r) }
|
|
|
|
// CMOVQCS: Move if below (CF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVQCS m64 r64
|
|
// CMOVQCS r64 r64
|
|
//
|
|
// Construct and append a CMOVQCS instruction to the active function.
|
|
func (c *Context) CMOVQCS(mr, r operand.Op) {
|
|
c.addinstruction(x86.CMOVQCS(mr, r))
|
|
}
|
|
|
|
// CMOVQCS: Move if below (CF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVQCS m64 r64
|
|
// CMOVQCS r64 r64
|
|
//
|
|
// Construct and append a CMOVQCS instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVQCS(mr, r operand.Op) { ctx.CMOVQCS(mr, r) }
|
|
|
|
// CMOVQEQ: Move if equal (ZF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVQEQ m64 r64
|
|
// CMOVQEQ r64 r64
|
|
//
|
|
// Construct and append a CMOVQEQ instruction to the active function.
|
|
func (c *Context) CMOVQEQ(mr, r operand.Op) {
|
|
c.addinstruction(x86.CMOVQEQ(mr, r))
|
|
}
|
|
|
|
// CMOVQEQ: Move if equal (ZF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVQEQ m64 r64
|
|
// CMOVQEQ r64 r64
|
|
//
|
|
// Construct and append a CMOVQEQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVQEQ(mr, r operand.Op) { ctx.CMOVQEQ(mr, r) }
|
|
|
|
// CMOVQGE: Move if greater or equal (SF == OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVQGE m64 r64
|
|
// CMOVQGE r64 r64
|
|
//
|
|
// Construct and append a CMOVQGE instruction to the active function.
|
|
func (c *Context) CMOVQGE(mr, r operand.Op) {
|
|
c.addinstruction(x86.CMOVQGE(mr, r))
|
|
}
|
|
|
|
// CMOVQGE: Move if greater or equal (SF == OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVQGE m64 r64
|
|
// CMOVQGE r64 r64
|
|
//
|
|
// Construct and append a CMOVQGE instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVQGE(mr, r operand.Op) { ctx.CMOVQGE(mr, r) }
|
|
|
|
// CMOVQGT: Move if greater (ZF == 0 and SF == OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVQGT m64 r64
|
|
// CMOVQGT r64 r64
|
|
//
|
|
// Construct and append a CMOVQGT instruction to the active function.
|
|
func (c *Context) CMOVQGT(mr, r operand.Op) {
|
|
c.addinstruction(x86.CMOVQGT(mr, r))
|
|
}
|
|
|
|
// CMOVQGT: Move if greater (ZF == 0 and SF == OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVQGT m64 r64
|
|
// CMOVQGT r64 r64
|
|
//
|
|
// Construct and append a CMOVQGT instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVQGT(mr, r operand.Op) { ctx.CMOVQGT(mr, r) }
|
|
|
|
// CMOVQHI: Move if above (CF == 0 and ZF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVQHI m64 r64
|
|
// CMOVQHI r64 r64
|
|
//
|
|
// Construct and append a CMOVQHI instruction to the active function.
|
|
func (c *Context) CMOVQHI(mr, r operand.Op) {
|
|
c.addinstruction(x86.CMOVQHI(mr, r))
|
|
}
|
|
|
|
// CMOVQHI: Move if above (CF == 0 and ZF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVQHI m64 r64
|
|
// CMOVQHI r64 r64
|
|
//
|
|
// Construct and append a CMOVQHI instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVQHI(mr, r operand.Op) { ctx.CMOVQHI(mr, r) }
|
|
|
|
// CMOVQLE: Move if less or equal (ZF == 1 or SF != OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVQLE m64 r64
|
|
// CMOVQLE r64 r64
|
|
//
|
|
// Construct and append a CMOVQLE instruction to the active function.
|
|
func (c *Context) CMOVQLE(mr, r operand.Op) {
|
|
c.addinstruction(x86.CMOVQLE(mr, r))
|
|
}
|
|
|
|
// CMOVQLE: Move if less or equal (ZF == 1 or SF != OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVQLE m64 r64
|
|
// CMOVQLE r64 r64
|
|
//
|
|
// Construct and append a CMOVQLE instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVQLE(mr, r operand.Op) { ctx.CMOVQLE(mr, r) }
|
|
|
|
// CMOVQLS: Move if below or equal (CF == 1 or ZF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVQLS m64 r64
|
|
// CMOVQLS r64 r64
|
|
//
|
|
// Construct and append a CMOVQLS instruction to the active function.
|
|
func (c *Context) CMOVQLS(mr, r operand.Op) {
|
|
c.addinstruction(x86.CMOVQLS(mr, r))
|
|
}
|
|
|
|
// CMOVQLS: Move if below or equal (CF == 1 or ZF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVQLS m64 r64
|
|
// CMOVQLS r64 r64
|
|
//
|
|
// Construct and append a CMOVQLS instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVQLS(mr, r operand.Op) { ctx.CMOVQLS(mr, r) }
|
|
|
|
// CMOVQLT: Move if less (SF != OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVQLT m64 r64
|
|
// CMOVQLT r64 r64
|
|
//
|
|
// Construct and append a CMOVQLT instruction to the active function.
|
|
func (c *Context) CMOVQLT(mr, r operand.Op) {
|
|
c.addinstruction(x86.CMOVQLT(mr, r))
|
|
}
|
|
|
|
// CMOVQLT: Move if less (SF != OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVQLT m64 r64
|
|
// CMOVQLT r64 r64
|
|
//
|
|
// Construct and append a CMOVQLT instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVQLT(mr, r operand.Op) { ctx.CMOVQLT(mr, r) }
|
|
|
|
// CMOVQMI: Move if sign (SF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVQMI m64 r64
|
|
// CMOVQMI r64 r64
|
|
//
|
|
// Construct and append a CMOVQMI instruction to the active function.
|
|
func (c *Context) CMOVQMI(mr, r operand.Op) {
|
|
c.addinstruction(x86.CMOVQMI(mr, r))
|
|
}
|
|
|
|
// CMOVQMI: Move if sign (SF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVQMI m64 r64
|
|
// CMOVQMI r64 r64
|
|
//
|
|
// Construct and append a CMOVQMI instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVQMI(mr, r operand.Op) { ctx.CMOVQMI(mr, r) }
|
|
|
|
// CMOVQNE: Move if not equal (ZF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVQNE m64 r64
|
|
// CMOVQNE r64 r64
|
|
//
|
|
// Construct and append a CMOVQNE instruction to the active function.
|
|
func (c *Context) CMOVQNE(mr, r operand.Op) {
|
|
c.addinstruction(x86.CMOVQNE(mr, r))
|
|
}
|
|
|
|
// CMOVQNE: Move if not equal (ZF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVQNE m64 r64
|
|
// CMOVQNE r64 r64
|
|
//
|
|
// Construct and append a CMOVQNE instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVQNE(mr, r operand.Op) { ctx.CMOVQNE(mr, r) }
|
|
|
|
// CMOVQOC: Move if not overflow (OF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVQOC m64 r64
|
|
// CMOVQOC r64 r64
|
|
//
|
|
// Construct and append a CMOVQOC instruction to the active function.
|
|
func (c *Context) CMOVQOC(mr, r operand.Op) {
|
|
c.addinstruction(x86.CMOVQOC(mr, r))
|
|
}
|
|
|
|
// CMOVQOC: Move if not overflow (OF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVQOC m64 r64
|
|
// CMOVQOC r64 r64
|
|
//
|
|
// Construct and append a CMOVQOC instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVQOC(mr, r operand.Op) { ctx.CMOVQOC(mr, r) }
|
|
|
|
// CMOVQOS: Move if overflow (OF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVQOS m64 r64
|
|
// CMOVQOS r64 r64
|
|
//
|
|
// Construct and append a CMOVQOS instruction to the active function.
|
|
func (c *Context) CMOVQOS(mr, r operand.Op) {
|
|
c.addinstruction(x86.CMOVQOS(mr, r))
|
|
}
|
|
|
|
// CMOVQOS: Move if overflow (OF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVQOS m64 r64
|
|
// CMOVQOS r64 r64
|
|
//
|
|
// Construct and append a CMOVQOS instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVQOS(mr, r operand.Op) { ctx.CMOVQOS(mr, r) }
|
|
|
|
// CMOVQPC: Move if not parity (PF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVQPC m64 r64
|
|
// CMOVQPC r64 r64
|
|
//
|
|
// Construct and append a CMOVQPC instruction to the active function.
|
|
func (c *Context) CMOVQPC(mr, r operand.Op) {
|
|
c.addinstruction(x86.CMOVQPC(mr, r))
|
|
}
|
|
|
|
// CMOVQPC: Move if not parity (PF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVQPC m64 r64
|
|
// CMOVQPC r64 r64
|
|
//
|
|
// Construct and append a CMOVQPC instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVQPC(mr, r operand.Op) { ctx.CMOVQPC(mr, r) }
|
|
|
|
// CMOVQPL: Move if not sign (SF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVQPL m64 r64
|
|
// CMOVQPL r64 r64
|
|
//
|
|
// Construct and append a CMOVQPL instruction to the active function.
|
|
func (c *Context) CMOVQPL(mr, r operand.Op) {
|
|
c.addinstruction(x86.CMOVQPL(mr, r))
|
|
}
|
|
|
|
// CMOVQPL: Move if not sign (SF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVQPL m64 r64
|
|
// CMOVQPL r64 r64
|
|
//
|
|
// Construct and append a CMOVQPL instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVQPL(mr, r operand.Op) { ctx.CMOVQPL(mr, r) }
|
|
|
|
// CMOVQPS: Move if parity (PF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVQPS m64 r64
|
|
// CMOVQPS r64 r64
|
|
//
|
|
// Construct and append a CMOVQPS instruction to the active function.
|
|
func (c *Context) CMOVQPS(mr, r operand.Op) {
|
|
c.addinstruction(x86.CMOVQPS(mr, r))
|
|
}
|
|
|
|
// CMOVQPS: Move if parity (PF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVQPS m64 r64
|
|
// CMOVQPS r64 r64
|
|
//
|
|
// Construct and append a CMOVQPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVQPS(mr, r operand.Op) { ctx.CMOVQPS(mr, r) }
|
|
|
|
// CMOVWCC: Move if above or equal (CF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVWCC m16 r16
|
|
// CMOVWCC r16 r16
|
|
//
|
|
// Construct and append a CMOVWCC instruction to the active function.
|
|
func (c *Context) CMOVWCC(mr, r operand.Op) {
|
|
c.addinstruction(x86.CMOVWCC(mr, r))
|
|
}
|
|
|
|
// CMOVWCC: Move if above or equal (CF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVWCC m16 r16
|
|
// CMOVWCC r16 r16
|
|
//
|
|
// Construct and append a CMOVWCC instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVWCC(mr, r operand.Op) { ctx.CMOVWCC(mr, r) }
|
|
|
|
// CMOVWCS: Move if below (CF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVWCS m16 r16
|
|
// CMOVWCS r16 r16
|
|
//
|
|
// Construct and append a CMOVWCS instruction to the active function.
|
|
func (c *Context) CMOVWCS(mr, r operand.Op) {
|
|
c.addinstruction(x86.CMOVWCS(mr, r))
|
|
}
|
|
|
|
// CMOVWCS: Move if below (CF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVWCS m16 r16
|
|
// CMOVWCS r16 r16
|
|
//
|
|
// Construct and append a CMOVWCS instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVWCS(mr, r operand.Op) { ctx.CMOVWCS(mr, r) }
|
|
|
|
// CMOVWEQ: Move if equal (ZF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVWEQ m16 r16
|
|
// CMOVWEQ r16 r16
|
|
//
|
|
// Construct and append a CMOVWEQ instruction to the active function.
|
|
func (c *Context) CMOVWEQ(mr, r operand.Op) {
|
|
c.addinstruction(x86.CMOVWEQ(mr, r))
|
|
}
|
|
|
|
// CMOVWEQ: Move if equal (ZF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVWEQ m16 r16
|
|
// CMOVWEQ r16 r16
|
|
//
|
|
// Construct and append a CMOVWEQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVWEQ(mr, r operand.Op) { ctx.CMOVWEQ(mr, r) }
|
|
|
|
// CMOVWGE: Move if greater or equal (SF == OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVWGE m16 r16
|
|
// CMOVWGE r16 r16
|
|
//
|
|
// Construct and append a CMOVWGE instruction to the active function.
|
|
func (c *Context) CMOVWGE(mr, r operand.Op) {
|
|
c.addinstruction(x86.CMOVWGE(mr, r))
|
|
}
|
|
|
|
// CMOVWGE: Move if greater or equal (SF == OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVWGE m16 r16
|
|
// CMOVWGE r16 r16
|
|
//
|
|
// Construct and append a CMOVWGE instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVWGE(mr, r operand.Op) { ctx.CMOVWGE(mr, r) }
|
|
|
|
// CMOVWGT: Move if greater (ZF == 0 and SF == OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVWGT m16 r16
|
|
// CMOVWGT r16 r16
|
|
//
|
|
// Construct and append a CMOVWGT instruction to the active function.
|
|
func (c *Context) CMOVWGT(mr, r operand.Op) {
|
|
c.addinstruction(x86.CMOVWGT(mr, r))
|
|
}
|
|
|
|
// CMOVWGT: Move if greater (ZF == 0 and SF == OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVWGT m16 r16
|
|
// CMOVWGT r16 r16
|
|
//
|
|
// Construct and append a CMOVWGT instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVWGT(mr, r operand.Op) { ctx.CMOVWGT(mr, r) }
|
|
|
|
// CMOVWHI: Move if above (CF == 0 and ZF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVWHI m16 r16
|
|
// CMOVWHI r16 r16
|
|
//
|
|
// Construct and append a CMOVWHI instruction to the active function.
|
|
func (c *Context) CMOVWHI(mr, r operand.Op) {
|
|
c.addinstruction(x86.CMOVWHI(mr, r))
|
|
}
|
|
|
|
// CMOVWHI: Move if above (CF == 0 and ZF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVWHI m16 r16
|
|
// CMOVWHI r16 r16
|
|
//
|
|
// Construct and append a CMOVWHI instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVWHI(mr, r operand.Op) { ctx.CMOVWHI(mr, r) }
|
|
|
|
// CMOVWLE: Move if less or equal (ZF == 1 or SF != OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVWLE m16 r16
|
|
// CMOVWLE r16 r16
|
|
//
|
|
// Construct and append a CMOVWLE instruction to the active function.
|
|
func (c *Context) CMOVWLE(mr, r operand.Op) {
|
|
c.addinstruction(x86.CMOVWLE(mr, r))
|
|
}
|
|
|
|
// CMOVWLE: Move if less or equal (ZF == 1 or SF != OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVWLE m16 r16
|
|
// CMOVWLE r16 r16
|
|
//
|
|
// Construct and append a CMOVWLE instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVWLE(mr, r operand.Op) { ctx.CMOVWLE(mr, r) }
|
|
|
|
// CMOVWLS: Move if below or equal (CF == 1 or ZF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVWLS m16 r16
|
|
// CMOVWLS r16 r16
|
|
//
|
|
// Construct and append a CMOVWLS instruction to the active function.
|
|
func (c *Context) CMOVWLS(mr, r operand.Op) {
|
|
c.addinstruction(x86.CMOVWLS(mr, r))
|
|
}
|
|
|
|
// CMOVWLS: Move if below or equal (CF == 1 or ZF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVWLS m16 r16
|
|
// CMOVWLS r16 r16
|
|
//
|
|
// Construct and append a CMOVWLS instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVWLS(mr, r operand.Op) { ctx.CMOVWLS(mr, r) }
|
|
|
|
// CMOVWLT: Move if less (SF != OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVWLT m16 r16
|
|
// CMOVWLT r16 r16
|
|
//
|
|
// Construct and append a CMOVWLT instruction to the active function.
|
|
func (c *Context) CMOVWLT(mr, r operand.Op) {
|
|
c.addinstruction(x86.CMOVWLT(mr, r))
|
|
}
|
|
|
|
// CMOVWLT: Move if less (SF != OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVWLT m16 r16
|
|
// CMOVWLT r16 r16
|
|
//
|
|
// Construct and append a CMOVWLT instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVWLT(mr, r operand.Op) { ctx.CMOVWLT(mr, r) }
|
|
|
|
// CMOVWMI: Move if sign (SF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVWMI m16 r16
|
|
// CMOVWMI r16 r16
|
|
//
|
|
// Construct and append a CMOVWMI instruction to the active function.
|
|
func (c *Context) CMOVWMI(mr, r operand.Op) {
|
|
c.addinstruction(x86.CMOVWMI(mr, r))
|
|
}
|
|
|
|
// CMOVWMI: Move if sign (SF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVWMI m16 r16
|
|
// CMOVWMI r16 r16
|
|
//
|
|
// Construct and append a CMOVWMI instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVWMI(mr, r operand.Op) { ctx.CMOVWMI(mr, r) }
|
|
|
|
// CMOVWNE: Move if not equal (ZF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVWNE m16 r16
|
|
// CMOVWNE r16 r16
|
|
//
|
|
// Construct and append a CMOVWNE instruction to the active function.
|
|
func (c *Context) CMOVWNE(mr, r operand.Op) {
|
|
c.addinstruction(x86.CMOVWNE(mr, r))
|
|
}
|
|
|
|
// CMOVWNE: Move if not equal (ZF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVWNE m16 r16
|
|
// CMOVWNE r16 r16
|
|
//
|
|
// Construct and append a CMOVWNE instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVWNE(mr, r operand.Op) { ctx.CMOVWNE(mr, r) }
|
|
|
|
// CMOVWOC: Move if not overflow (OF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVWOC m16 r16
|
|
// CMOVWOC r16 r16
|
|
//
|
|
// Construct and append a CMOVWOC instruction to the active function.
|
|
func (c *Context) CMOVWOC(mr, r operand.Op) {
|
|
c.addinstruction(x86.CMOVWOC(mr, r))
|
|
}
|
|
|
|
// CMOVWOC: Move if not overflow (OF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVWOC m16 r16
|
|
// CMOVWOC r16 r16
|
|
//
|
|
// Construct and append a CMOVWOC instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVWOC(mr, r operand.Op) { ctx.CMOVWOC(mr, r) }
|
|
|
|
// CMOVWOS: Move if overflow (OF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVWOS m16 r16
|
|
// CMOVWOS r16 r16
|
|
//
|
|
// Construct and append a CMOVWOS instruction to the active function.
|
|
func (c *Context) CMOVWOS(mr, r operand.Op) {
|
|
c.addinstruction(x86.CMOVWOS(mr, r))
|
|
}
|
|
|
|
// CMOVWOS: Move if overflow (OF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVWOS m16 r16
|
|
// CMOVWOS r16 r16
|
|
//
|
|
// Construct and append a CMOVWOS instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVWOS(mr, r operand.Op) { ctx.CMOVWOS(mr, r) }
|
|
|
|
// CMOVWPC: Move if not parity (PF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVWPC m16 r16
|
|
// CMOVWPC r16 r16
|
|
//
|
|
// Construct and append a CMOVWPC instruction to the active function.
|
|
func (c *Context) CMOVWPC(mr, r operand.Op) {
|
|
c.addinstruction(x86.CMOVWPC(mr, r))
|
|
}
|
|
|
|
// CMOVWPC: Move if not parity (PF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVWPC m16 r16
|
|
// CMOVWPC r16 r16
|
|
//
|
|
// Construct and append a CMOVWPC instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVWPC(mr, r operand.Op) { ctx.CMOVWPC(mr, r) }
|
|
|
|
// CMOVWPL: Move if not sign (SF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVWPL m16 r16
|
|
// CMOVWPL r16 r16
|
|
//
|
|
// Construct and append a CMOVWPL instruction to the active function.
|
|
func (c *Context) CMOVWPL(mr, r operand.Op) {
|
|
c.addinstruction(x86.CMOVWPL(mr, r))
|
|
}
|
|
|
|
// CMOVWPL: Move if not sign (SF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVWPL m16 r16
|
|
// CMOVWPL r16 r16
|
|
//
|
|
// Construct and append a CMOVWPL instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVWPL(mr, r operand.Op) { ctx.CMOVWPL(mr, r) }
|
|
|
|
// CMOVWPS: Move if parity (PF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVWPS m16 r16
|
|
// CMOVWPS r16 r16
|
|
//
|
|
// Construct and append a CMOVWPS instruction to the active function.
|
|
func (c *Context) CMOVWPS(mr, r operand.Op) {
|
|
c.addinstruction(x86.CMOVWPS(mr, r))
|
|
}
|
|
|
|
// CMOVWPS: Move if parity (PF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMOVWPS m16 r16
|
|
// CMOVWPS r16 r16
|
|
//
|
|
// Construct and append a CMOVWPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMOVWPS(mr, r operand.Op) { ctx.CMOVWPS(mr, r) }
|
|
|
|
// CMPB: Compare Two Operands.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMPB al imm8
|
|
// CMPB m8 imm8
|
|
// CMPB m8 r8
|
|
// CMPB r8 imm8
|
|
// CMPB r8 m8
|
|
// CMPB r8 r8
|
|
//
|
|
// Construct and append a CMPB instruction to the active function.
|
|
func (c *Context) CMPB(amr, imr operand.Op) {
|
|
c.addinstruction(x86.CMPB(amr, imr))
|
|
}
|
|
|
|
// CMPB: Compare Two Operands.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMPB al imm8
|
|
// CMPB m8 imm8
|
|
// CMPB m8 r8
|
|
// CMPB r8 imm8
|
|
// CMPB r8 m8
|
|
// CMPB r8 r8
|
|
//
|
|
// Construct and append a CMPB instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMPB(amr, imr operand.Op) { ctx.CMPB(amr, imr) }
|
|
|
|
// CMPL: Compare Two Operands.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMPL eax imm32
|
|
// CMPL m32 imm32
|
|
// CMPL m32 imm8
|
|
// CMPL m32 r32
|
|
// CMPL r32 imm32
|
|
// CMPL r32 imm8
|
|
// CMPL r32 m32
|
|
// CMPL r32 r32
|
|
//
|
|
// Construct and append a CMPL instruction to the active function.
|
|
func (c *Context) CMPL(emr, imr operand.Op) {
|
|
c.addinstruction(x86.CMPL(emr, imr))
|
|
}
|
|
|
|
// CMPL: Compare Two Operands.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMPL eax imm32
|
|
// CMPL m32 imm32
|
|
// CMPL m32 imm8
|
|
// CMPL m32 r32
|
|
// CMPL r32 imm32
|
|
// CMPL r32 imm8
|
|
// CMPL r32 m32
|
|
// CMPL r32 r32
|
|
//
|
|
// Construct and append a CMPL instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMPL(emr, imr operand.Op) { ctx.CMPL(emr, imr) }
|
|
|
|
// CMPPD: Compare Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMPPD m128 xmm imm8
|
|
// CMPPD xmm xmm imm8
|
|
//
|
|
// Construct and append a CMPPD instruction to the active function.
|
|
func (c *Context) CMPPD(mx, x, i operand.Op) {
|
|
c.addinstruction(x86.CMPPD(mx, x, i))
|
|
}
|
|
|
|
// CMPPD: Compare Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMPPD m128 xmm imm8
|
|
// CMPPD xmm xmm imm8
|
|
//
|
|
// Construct and append a CMPPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMPPD(mx, x, i operand.Op) { ctx.CMPPD(mx, x, i) }
|
|
|
|
// CMPPS: Compare Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMPPS m128 xmm imm8
|
|
// CMPPS xmm xmm imm8
|
|
//
|
|
// Construct and append a CMPPS instruction to the active function.
|
|
func (c *Context) CMPPS(mx, x, i operand.Op) {
|
|
c.addinstruction(x86.CMPPS(mx, x, i))
|
|
}
|
|
|
|
// CMPPS: Compare Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMPPS m128 xmm imm8
|
|
// CMPPS xmm xmm imm8
|
|
//
|
|
// Construct and append a CMPPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMPPS(mx, x, i operand.Op) { ctx.CMPPS(mx, x, i) }
|
|
|
|
// CMPQ: Compare Two Operands.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMPQ m64 imm32
|
|
// CMPQ m64 imm8
|
|
// CMPQ m64 r64
|
|
// CMPQ r64 imm32
|
|
// CMPQ r64 imm8
|
|
// CMPQ r64 m64
|
|
// CMPQ r64 r64
|
|
// CMPQ rax imm32
|
|
//
|
|
// Construct and append a CMPQ instruction to the active function.
|
|
func (c *Context) CMPQ(mr, imr operand.Op) {
|
|
c.addinstruction(x86.CMPQ(mr, imr))
|
|
}
|
|
|
|
// CMPQ: Compare Two Operands.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMPQ m64 imm32
|
|
// CMPQ m64 imm8
|
|
// CMPQ m64 r64
|
|
// CMPQ r64 imm32
|
|
// CMPQ r64 imm8
|
|
// CMPQ r64 m64
|
|
// CMPQ r64 r64
|
|
// CMPQ rax imm32
|
|
//
|
|
// Construct and append a CMPQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMPQ(mr, imr operand.Op) { ctx.CMPQ(mr, imr) }
|
|
|
|
// CMPSD: Compare Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMPSD m64 xmm imm8
|
|
// CMPSD xmm xmm imm8
|
|
//
|
|
// Construct and append a CMPSD instruction to the active function.
|
|
func (c *Context) CMPSD(mx, x, i operand.Op) {
|
|
c.addinstruction(x86.CMPSD(mx, x, i))
|
|
}
|
|
|
|
// CMPSD: Compare Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMPSD m64 xmm imm8
|
|
// CMPSD xmm xmm imm8
|
|
//
|
|
// Construct and append a CMPSD instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMPSD(mx, x, i operand.Op) { ctx.CMPSD(mx, x, i) }
|
|
|
|
// CMPSS: Compare Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMPSS m32 xmm imm8
|
|
// CMPSS xmm xmm imm8
|
|
//
|
|
// Construct and append a CMPSS instruction to the active function.
|
|
func (c *Context) CMPSS(mx, x, i operand.Op) {
|
|
c.addinstruction(x86.CMPSS(mx, x, i))
|
|
}
|
|
|
|
// CMPSS: Compare Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMPSS m32 xmm imm8
|
|
// CMPSS xmm xmm imm8
|
|
//
|
|
// Construct and append a CMPSS instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMPSS(mx, x, i operand.Op) { ctx.CMPSS(mx, x, i) }
|
|
|
|
// CMPW: Compare Two Operands.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMPW ax imm16
|
|
// CMPW m16 imm16
|
|
// CMPW m16 imm8
|
|
// CMPW m16 r16
|
|
// CMPW r16 imm16
|
|
// CMPW r16 imm8
|
|
// CMPW r16 m16
|
|
// CMPW r16 r16
|
|
//
|
|
// Construct and append a CMPW instruction to the active function.
|
|
func (c *Context) CMPW(amr, imr operand.Op) {
|
|
c.addinstruction(x86.CMPW(amr, imr))
|
|
}
|
|
|
|
// CMPW: Compare Two Operands.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMPW ax imm16
|
|
// CMPW m16 imm16
|
|
// CMPW m16 imm8
|
|
// CMPW m16 r16
|
|
// CMPW r16 imm16
|
|
// CMPW r16 imm8
|
|
// CMPW r16 m16
|
|
// CMPW r16 r16
|
|
//
|
|
// Construct and append a CMPW instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMPW(amr, imr operand.Op) { ctx.CMPW(amr, imr) }
|
|
|
|
// CMPXCHG16B: Compare and Exchange 16 Bytes.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMPXCHG16B m128
|
|
//
|
|
// Construct and append a CMPXCHG16B instruction to the active function.
|
|
func (c *Context) CMPXCHG16B(m operand.Op) {
|
|
c.addinstruction(x86.CMPXCHG16B(m))
|
|
}
|
|
|
|
// CMPXCHG16B: Compare and Exchange 16 Bytes.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMPXCHG16B m128
|
|
//
|
|
// Construct and append a CMPXCHG16B instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMPXCHG16B(m operand.Op) { ctx.CMPXCHG16B(m) }
|
|
|
|
// CMPXCHG8B: Compare and Exchange 8 Bytes.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMPXCHG8B m64
|
|
//
|
|
// Construct and append a CMPXCHG8B instruction to the active function.
|
|
func (c *Context) CMPXCHG8B(m operand.Op) {
|
|
c.addinstruction(x86.CMPXCHG8B(m))
|
|
}
|
|
|
|
// CMPXCHG8B: Compare and Exchange 8 Bytes.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMPXCHG8B m64
|
|
//
|
|
// Construct and append a CMPXCHG8B instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMPXCHG8B(m operand.Op) { ctx.CMPXCHG8B(m) }
|
|
|
|
// CMPXCHGB: Compare and Exchange.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMPXCHGB r8 m8
|
|
// CMPXCHGB r8 r8
|
|
//
|
|
// Construct and append a CMPXCHGB instruction to the active function.
|
|
func (c *Context) CMPXCHGB(r, mr operand.Op) {
|
|
c.addinstruction(x86.CMPXCHGB(r, mr))
|
|
}
|
|
|
|
// CMPXCHGB: Compare and Exchange.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMPXCHGB r8 m8
|
|
// CMPXCHGB r8 r8
|
|
//
|
|
// Construct and append a CMPXCHGB instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMPXCHGB(r, mr operand.Op) { ctx.CMPXCHGB(r, mr) }
|
|
|
|
// CMPXCHGL: Compare and Exchange.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMPXCHGL r32 m32
|
|
// CMPXCHGL r32 r32
|
|
//
|
|
// Construct and append a CMPXCHGL instruction to the active function.
|
|
func (c *Context) CMPXCHGL(r, mr operand.Op) {
|
|
c.addinstruction(x86.CMPXCHGL(r, mr))
|
|
}
|
|
|
|
// CMPXCHGL: Compare and Exchange.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMPXCHGL r32 m32
|
|
// CMPXCHGL r32 r32
|
|
//
|
|
// Construct and append a CMPXCHGL instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMPXCHGL(r, mr operand.Op) { ctx.CMPXCHGL(r, mr) }
|
|
|
|
// CMPXCHGQ: Compare and Exchange.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMPXCHGQ r64 m64
|
|
// CMPXCHGQ r64 r64
|
|
//
|
|
// Construct and append a CMPXCHGQ instruction to the active function.
|
|
func (c *Context) CMPXCHGQ(r, mr operand.Op) {
|
|
c.addinstruction(x86.CMPXCHGQ(r, mr))
|
|
}
|
|
|
|
// CMPXCHGQ: Compare and Exchange.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMPXCHGQ r64 m64
|
|
// CMPXCHGQ r64 r64
|
|
//
|
|
// Construct and append a CMPXCHGQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMPXCHGQ(r, mr operand.Op) { ctx.CMPXCHGQ(r, mr) }
|
|
|
|
// CMPXCHGW: Compare and Exchange.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMPXCHGW r16 m16
|
|
// CMPXCHGW r16 r16
|
|
//
|
|
// Construct and append a CMPXCHGW instruction to the active function.
|
|
func (c *Context) CMPXCHGW(r, mr operand.Op) {
|
|
c.addinstruction(x86.CMPXCHGW(r, mr))
|
|
}
|
|
|
|
// CMPXCHGW: Compare and Exchange.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CMPXCHGW r16 m16
|
|
// CMPXCHGW r16 r16
|
|
//
|
|
// Construct and append a CMPXCHGW instruction to the active function.
|
|
// Operates on the global context.
|
|
func CMPXCHGW(r, mr operand.Op) { ctx.CMPXCHGW(r, mr) }
|
|
|
|
// COMISD: Compare Scalar Ordered Double-Precision Floating-Point Values and Set EFLAGS.
|
|
//
|
|
// Forms:
|
|
//
|
|
// COMISD m64 xmm
|
|
// COMISD xmm xmm
|
|
//
|
|
// Construct and append a COMISD instruction to the active function.
|
|
func (c *Context) COMISD(mx, x operand.Op) {
|
|
c.addinstruction(x86.COMISD(mx, x))
|
|
}
|
|
|
|
// COMISD: Compare Scalar Ordered Double-Precision Floating-Point Values and Set EFLAGS.
|
|
//
|
|
// Forms:
|
|
//
|
|
// COMISD m64 xmm
|
|
// COMISD xmm xmm
|
|
//
|
|
// Construct and append a COMISD instruction to the active function.
|
|
// Operates on the global context.
|
|
func COMISD(mx, x operand.Op) { ctx.COMISD(mx, x) }
|
|
|
|
// COMISS: Compare Scalar Ordered Single-Precision Floating-Point Values and Set EFLAGS.
|
|
//
|
|
// Forms:
|
|
//
|
|
// COMISS m32 xmm
|
|
// COMISS xmm xmm
|
|
//
|
|
// Construct and append a COMISS instruction to the active function.
|
|
func (c *Context) COMISS(mx, x operand.Op) {
|
|
c.addinstruction(x86.COMISS(mx, x))
|
|
}
|
|
|
|
// COMISS: Compare Scalar Ordered Single-Precision Floating-Point Values and Set EFLAGS.
|
|
//
|
|
// Forms:
|
|
//
|
|
// COMISS m32 xmm
|
|
// COMISS xmm xmm
|
|
//
|
|
// Construct and append a COMISS instruction to the active function.
|
|
// Operates on the global context.
|
|
func COMISS(mx, x operand.Op) { ctx.COMISS(mx, x) }
|
|
|
|
// CPUID: CPU Identification.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CPUID
|
|
//
|
|
// Construct and append a CPUID instruction to the active function.
|
|
func (c *Context) CPUID() {
|
|
c.addinstruction(x86.CPUID())
|
|
}
|
|
|
|
// CPUID: CPU Identification.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CPUID
|
|
//
|
|
// Construct and append a CPUID instruction to the active function.
|
|
// Operates on the global context.
|
|
func CPUID() { ctx.CPUID() }
|
|
|
|
// CQO: Convert Quadword to Octaword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CQO
|
|
//
|
|
// Construct and append a CQO instruction to the active function.
|
|
func (c *Context) CQO() {
|
|
c.addinstruction(x86.CQO())
|
|
}
|
|
|
|
// CQO: Convert Quadword to Octaword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CQO
|
|
//
|
|
// Construct and append a CQO instruction to the active function.
|
|
// Operates on the global context.
|
|
func CQO() { ctx.CQO() }
|
|
|
|
// CRC32B: Accumulate CRC32 Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CRC32B m8 r32
|
|
// CRC32B m8 r64
|
|
// CRC32B r8 r32
|
|
// CRC32B r8 r64
|
|
//
|
|
// Construct and append a CRC32B instruction to the active function.
|
|
func (c *Context) CRC32B(mr, r operand.Op) {
|
|
c.addinstruction(x86.CRC32B(mr, r))
|
|
}
|
|
|
|
// CRC32B: Accumulate CRC32 Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CRC32B m8 r32
|
|
// CRC32B m8 r64
|
|
// CRC32B r8 r32
|
|
// CRC32B r8 r64
|
|
//
|
|
// Construct and append a CRC32B instruction to the active function.
|
|
// Operates on the global context.
|
|
func CRC32B(mr, r operand.Op) { ctx.CRC32B(mr, r) }
|
|
|
|
// CRC32L: Accumulate CRC32 Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CRC32L m32 r32
|
|
// CRC32L r32 r32
|
|
//
|
|
// Construct and append a CRC32L instruction to the active function.
|
|
func (c *Context) CRC32L(mr, r operand.Op) {
|
|
c.addinstruction(x86.CRC32L(mr, r))
|
|
}
|
|
|
|
// CRC32L: Accumulate CRC32 Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CRC32L m32 r32
|
|
// CRC32L r32 r32
|
|
//
|
|
// Construct and append a CRC32L instruction to the active function.
|
|
// Operates on the global context.
|
|
func CRC32L(mr, r operand.Op) { ctx.CRC32L(mr, r) }
|
|
|
|
// CRC32Q: Accumulate CRC32 Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CRC32Q m64 r64
|
|
// CRC32Q r64 r64
|
|
//
|
|
// Construct and append a CRC32Q instruction to the active function.
|
|
func (c *Context) CRC32Q(mr, r operand.Op) {
|
|
c.addinstruction(x86.CRC32Q(mr, r))
|
|
}
|
|
|
|
// CRC32Q: Accumulate CRC32 Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CRC32Q m64 r64
|
|
// CRC32Q r64 r64
|
|
//
|
|
// Construct and append a CRC32Q instruction to the active function.
|
|
// Operates on the global context.
|
|
func CRC32Q(mr, r operand.Op) { ctx.CRC32Q(mr, r) }
|
|
|
|
// CRC32W: Accumulate CRC32 Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CRC32W m16 r32
|
|
// CRC32W r16 r32
|
|
//
|
|
// Construct and append a CRC32W instruction to the active function.
|
|
func (c *Context) CRC32W(mr, r operand.Op) {
|
|
c.addinstruction(x86.CRC32W(mr, r))
|
|
}
|
|
|
|
// CRC32W: Accumulate CRC32 Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CRC32W m16 r32
|
|
// CRC32W r16 r32
|
|
//
|
|
// Construct and append a CRC32W instruction to the active function.
|
|
// Operates on the global context.
|
|
func CRC32W(mr, r operand.Op) { ctx.CRC32W(mr, r) }
|
|
|
|
// CVTPD2PL: Convert Packed Double-Precision FP Values to Packed Dword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CVTPD2PL m128 xmm
|
|
// CVTPD2PL xmm xmm
|
|
//
|
|
// Construct and append a CVTPD2PL instruction to the active function.
|
|
func (c *Context) CVTPD2PL(mx, x operand.Op) {
|
|
c.addinstruction(x86.CVTPD2PL(mx, x))
|
|
}
|
|
|
|
// CVTPD2PL: Convert Packed Double-Precision FP Values to Packed Dword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CVTPD2PL m128 xmm
|
|
// CVTPD2PL xmm xmm
|
|
//
|
|
// Construct and append a CVTPD2PL instruction to the active function.
|
|
// Operates on the global context.
|
|
func CVTPD2PL(mx, x operand.Op) { ctx.CVTPD2PL(mx, x) }
|
|
|
|
// CVTPD2PS: Convert Packed Double-Precision FP Values to Packed Single-Precision FP Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CVTPD2PS m128 xmm
|
|
// CVTPD2PS xmm xmm
|
|
//
|
|
// Construct and append a CVTPD2PS instruction to the active function.
|
|
func (c *Context) CVTPD2PS(mx, x operand.Op) {
|
|
c.addinstruction(x86.CVTPD2PS(mx, x))
|
|
}
|
|
|
|
// CVTPD2PS: Convert Packed Double-Precision FP Values to Packed Single-Precision FP Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CVTPD2PS m128 xmm
|
|
// CVTPD2PS xmm xmm
|
|
//
|
|
// Construct and append a CVTPD2PS instruction to the active function.
|
|
// Operates on the global context.
|
|
func CVTPD2PS(mx, x operand.Op) { ctx.CVTPD2PS(mx, x) }
|
|
|
|
// CVTPL2PD: Convert Packed Dword Integers to Packed Double-Precision FP Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CVTPL2PD m64 xmm
|
|
// CVTPL2PD xmm xmm
|
|
//
|
|
// Construct and append a CVTPL2PD instruction to the active function.
|
|
func (c *Context) CVTPL2PD(mx, x operand.Op) {
|
|
c.addinstruction(x86.CVTPL2PD(mx, x))
|
|
}
|
|
|
|
// CVTPL2PD: Convert Packed Dword Integers to Packed Double-Precision FP Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CVTPL2PD m64 xmm
|
|
// CVTPL2PD xmm xmm
|
|
//
|
|
// Construct and append a CVTPL2PD instruction to the active function.
|
|
// Operates on the global context.
|
|
func CVTPL2PD(mx, x operand.Op) { ctx.CVTPL2PD(mx, x) }
|
|
|
|
// CVTPL2PS: Convert Packed Dword Integers to Packed Single-Precision FP Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CVTPL2PS m128 xmm
|
|
// CVTPL2PS xmm xmm
|
|
//
|
|
// Construct and append a CVTPL2PS instruction to the active function.
|
|
func (c *Context) CVTPL2PS(mx, x operand.Op) {
|
|
c.addinstruction(x86.CVTPL2PS(mx, x))
|
|
}
|
|
|
|
// CVTPL2PS: Convert Packed Dword Integers to Packed Single-Precision FP Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CVTPL2PS m128 xmm
|
|
// CVTPL2PS xmm xmm
|
|
//
|
|
// Construct and append a CVTPL2PS instruction to the active function.
|
|
// Operates on the global context.
|
|
func CVTPL2PS(mx, x operand.Op) { ctx.CVTPL2PS(mx, x) }
|
|
|
|
// CVTPS2PD: Convert Packed Single-Precision FP Values to Packed Double-Precision FP Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CVTPS2PD m64 xmm
|
|
// CVTPS2PD xmm xmm
|
|
//
|
|
// Construct and append a CVTPS2PD instruction to the active function.
|
|
func (c *Context) CVTPS2PD(mx, x operand.Op) {
|
|
c.addinstruction(x86.CVTPS2PD(mx, x))
|
|
}
|
|
|
|
// CVTPS2PD: Convert Packed Single-Precision FP Values to Packed Double-Precision FP Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CVTPS2PD m64 xmm
|
|
// CVTPS2PD xmm xmm
|
|
//
|
|
// Construct and append a CVTPS2PD instruction to the active function.
|
|
// Operates on the global context.
|
|
func CVTPS2PD(mx, x operand.Op) { ctx.CVTPS2PD(mx, x) }
|
|
|
|
// CVTPS2PL: Convert Packed Single-Precision FP Values to Packed Dword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CVTPS2PL m128 xmm
|
|
// CVTPS2PL xmm xmm
|
|
//
|
|
// Construct and append a CVTPS2PL instruction to the active function.
|
|
func (c *Context) CVTPS2PL(mx, x operand.Op) {
|
|
c.addinstruction(x86.CVTPS2PL(mx, x))
|
|
}
|
|
|
|
// CVTPS2PL: Convert Packed Single-Precision FP Values to Packed Dword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CVTPS2PL m128 xmm
|
|
// CVTPS2PL xmm xmm
|
|
//
|
|
// Construct and append a CVTPS2PL instruction to the active function.
|
|
// Operates on the global context.
|
|
func CVTPS2PL(mx, x operand.Op) { ctx.CVTPS2PL(mx, x) }
|
|
|
|
// CVTSD2SL: Convert Scalar Double-Precision FP Value to Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CVTSD2SL m64 r32
|
|
// CVTSD2SL m64 r64
|
|
// CVTSD2SL xmm r32
|
|
// CVTSD2SL xmm r64
|
|
//
|
|
// Construct and append a CVTSD2SL instruction to the active function.
|
|
func (c *Context) CVTSD2SL(mx, r operand.Op) {
|
|
c.addinstruction(x86.CVTSD2SL(mx, r))
|
|
}
|
|
|
|
// CVTSD2SL: Convert Scalar Double-Precision FP Value to Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CVTSD2SL m64 r32
|
|
// CVTSD2SL m64 r64
|
|
// CVTSD2SL xmm r32
|
|
// CVTSD2SL xmm r64
|
|
//
|
|
// Construct and append a CVTSD2SL instruction to the active function.
|
|
// Operates on the global context.
|
|
func CVTSD2SL(mx, r operand.Op) { ctx.CVTSD2SL(mx, r) }
|
|
|
|
// CVTSD2SS: Convert Scalar Double-Precision FP Value to Scalar Single-Precision FP Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CVTSD2SS m64 xmm
|
|
// CVTSD2SS xmm xmm
|
|
//
|
|
// Construct and append a CVTSD2SS instruction to the active function.
|
|
func (c *Context) CVTSD2SS(mx, x operand.Op) {
|
|
c.addinstruction(x86.CVTSD2SS(mx, x))
|
|
}
|
|
|
|
// CVTSD2SS: Convert Scalar Double-Precision FP Value to Scalar Single-Precision FP Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CVTSD2SS m64 xmm
|
|
// CVTSD2SS xmm xmm
|
|
//
|
|
// Construct and append a CVTSD2SS instruction to the active function.
|
|
// Operates on the global context.
|
|
func CVTSD2SS(mx, x operand.Op) { ctx.CVTSD2SS(mx, x) }
|
|
|
|
// CVTSL2SD: Convert Dword Integer to Scalar Double-Precision FP Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CVTSL2SD m32 xmm
|
|
// CVTSL2SD r32 xmm
|
|
//
|
|
// Construct and append a CVTSL2SD instruction to the active function.
|
|
func (c *Context) CVTSL2SD(mr, x operand.Op) {
|
|
c.addinstruction(x86.CVTSL2SD(mr, x))
|
|
}
|
|
|
|
// CVTSL2SD: Convert Dword Integer to Scalar Double-Precision FP Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CVTSL2SD m32 xmm
|
|
// CVTSL2SD r32 xmm
|
|
//
|
|
// Construct and append a CVTSL2SD instruction to the active function.
|
|
// Operates on the global context.
|
|
func CVTSL2SD(mr, x operand.Op) { ctx.CVTSL2SD(mr, x) }
|
|
|
|
// CVTSL2SS: Convert Dword Integer to Scalar Single-Precision FP Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CVTSL2SS m32 xmm
|
|
// CVTSL2SS r32 xmm
|
|
//
|
|
// Construct and append a CVTSL2SS instruction to the active function.
|
|
func (c *Context) CVTSL2SS(mr, x operand.Op) {
|
|
c.addinstruction(x86.CVTSL2SS(mr, x))
|
|
}
|
|
|
|
// CVTSL2SS: Convert Dword Integer to Scalar Single-Precision FP Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CVTSL2SS m32 xmm
|
|
// CVTSL2SS r32 xmm
|
|
//
|
|
// Construct and append a CVTSL2SS instruction to the active function.
|
|
// Operates on the global context.
|
|
func CVTSL2SS(mr, x operand.Op) { ctx.CVTSL2SS(mr, x) }
|
|
|
|
// CVTSQ2SD: Convert Dword Integer to Scalar Double-Precision FP Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CVTSQ2SD m64 xmm
|
|
// CVTSQ2SD r64 xmm
|
|
//
|
|
// Construct and append a CVTSQ2SD instruction to the active function.
|
|
func (c *Context) CVTSQ2SD(mr, x operand.Op) {
|
|
c.addinstruction(x86.CVTSQ2SD(mr, x))
|
|
}
|
|
|
|
// CVTSQ2SD: Convert Dword Integer to Scalar Double-Precision FP Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CVTSQ2SD m64 xmm
|
|
// CVTSQ2SD r64 xmm
|
|
//
|
|
// Construct and append a CVTSQ2SD instruction to the active function.
|
|
// Operates on the global context.
|
|
func CVTSQ2SD(mr, x operand.Op) { ctx.CVTSQ2SD(mr, x) }
|
|
|
|
// CVTSQ2SS: Convert Dword Integer to Scalar Single-Precision FP Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CVTSQ2SS m64 xmm
|
|
// CVTSQ2SS r64 xmm
|
|
//
|
|
// Construct and append a CVTSQ2SS instruction to the active function.
|
|
func (c *Context) CVTSQ2SS(mr, x operand.Op) {
|
|
c.addinstruction(x86.CVTSQ2SS(mr, x))
|
|
}
|
|
|
|
// CVTSQ2SS: Convert Dword Integer to Scalar Single-Precision FP Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CVTSQ2SS m64 xmm
|
|
// CVTSQ2SS r64 xmm
|
|
//
|
|
// Construct and append a CVTSQ2SS instruction to the active function.
|
|
// Operates on the global context.
|
|
func CVTSQ2SS(mr, x operand.Op) { ctx.CVTSQ2SS(mr, x) }
|
|
|
|
// CVTSS2SD: Convert Scalar Single-Precision FP Value to Scalar Double-Precision FP Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CVTSS2SD m32 xmm
|
|
// CVTSS2SD xmm xmm
|
|
//
|
|
// Construct and append a CVTSS2SD instruction to the active function.
|
|
func (c *Context) CVTSS2SD(mx, x operand.Op) {
|
|
c.addinstruction(x86.CVTSS2SD(mx, x))
|
|
}
|
|
|
|
// CVTSS2SD: Convert Scalar Single-Precision FP Value to Scalar Double-Precision FP Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CVTSS2SD m32 xmm
|
|
// CVTSS2SD xmm xmm
|
|
//
|
|
// Construct and append a CVTSS2SD instruction to the active function.
|
|
// Operates on the global context.
|
|
func CVTSS2SD(mx, x operand.Op) { ctx.CVTSS2SD(mx, x) }
|
|
|
|
// CVTSS2SL: Convert Scalar Single-Precision FP Value to Dword Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CVTSS2SL m32 r32
|
|
// CVTSS2SL m32 r64
|
|
// CVTSS2SL xmm r32
|
|
// CVTSS2SL xmm r64
|
|
//
|
|
// Construct and append a CVTSS2SL instruction to the active function.
|
|
func (c *Context) CVTSS2SL(mx, r operand.Op) {
|
|
c.addinstruction(x86.CVTSS2SL(mx, r))
|
|
}
|
|
|
|
// CVTSS2SL: Convert Scalar Single-Precision FP Value to Dword Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CVTSS2SL m32 r32
|
|
// CVTSS2SL m32 r64
|
|
// CVTSS2SL xmm r32
|
|
// CVTSS2SL xmm r64
|
|
//
|
|
// Construct and append a CVTSS2SL instruction to the active function.
|
|
// Operates on the global context.
|
|
func CVTSS2SL(mx, r operand.Op) { ctx.CVTSS2SL(mx, r) }
|
|
|
|
// CVTTPD2PL: Convert with Truncation Packed Double-Precision FP Values to Packed Dword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CVTTPD2PL m128 xmm
|
|
// CVTTPD2PL xmm xmm
|
|
//
|
|
// Construct and append a CVTTPD2PL instruction to the active function.
|
|
func (c *Context) CVTTPD2PL(mx, x operand.Op) {
|
|
c.addinstruction(x86.CVTTPD2PL(mx, x))
|
|
}
|
|
|
|
// CVTTPD2PL: Convert with Truncation Packed Double-Precision FP Values to Packed Dword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CVTTPD2PL m128 xmm
|
|
// CVTTPD2PL xmm xmm
|
|
//
|
|
// Construct and append a CVTTPD2PL instruction to the active function.
|
|
// Operates on the global context.
|
|
func CVTTPD2PL(mx, x operand.Op) { ctx.CVTTPD2PL(mx, x) }
|
|
|
|
// CVTTPS2PL: Convert with Truncation Packed Single-Precision FP Values to Packed Dword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CVTTPS2PL m128 xmm
|
|
// CVTTPS2PL xmm xmm
|
|
//
|
|
// Construct and append a CVTTPS2PL instruction to the active function.
|
|
func (c *Context) CVTTPS2PL(mx, x operand.Op) {
|
|
c.addinstruction(x86.CVTTPS2PL(mx, x))
|
|
}
|
|
|
|
// CVTTPS2PL: Convert with Truncation Packed Single-Precision FP Values to Packed Dword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CVTTPS2PL m128 xmm
|
|
// CVTTPS2PL xmm xmm
|
|
//
|
|
// Construct and append a CVTTPS2PL instruction to the active function.
|
|
// Operates on the global context.
|
|
func CVTTPS2PL(mx, x operand.Op) { ctx.CVTTPS2PL(mx, x) }
|
|
|
|
// CVTTSD2SL: Convert with Truncation Scalar Double-Precision FP Value to Signed Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CVTTSD2SL m64 r32
|
|
// CVTTSD2SL xmm r32
|
|
//
|
|
// Construct and append a CVTTSD2SL instruction to the active function.
|
|
func (c *Context) CVTTSD2SL(mx, r operand.Op) {
|
|
c.addinstruction(x86.CVTTSD2SL(mx, r))
|
|
}
|
|
|
|
// CVTTSD2SL: Convert with Truncation Scalar Double-Precision FP Value to Signed Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CVTTSD2SL m64 r32
|
|
// CVTTSD2SL xmm r32
|
|
//
|
|
// Construct and append a CVTTSD2SL instruction to the active function.
|
|
// Operates on the global context.
|
|
func CVTTSD2SL(mx, r operand.Op) { ctx.CVTTSD2SL(mx, r) }
|
|
|
|
// CVTTSD2SQ: Convert with Truncation Scalar Double-Precision FP Value to Signed Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CVTTSD2SQ m64 r64
|
|
// CVTTSD2SQ xmm r64
|
|
//
|
|
// Construct and append a CVTTSD2SQ instruction to the active function.
|
|
func (c *Context) CVTTSD2SQ(mx, r operand.Op) {
|
|
c.addinstruction(x86.CVTTSD2SQ(mx, r))
|
|
}
|
|
|
|
// CVTTSD2SQ: Convert with Truncation Scalar Double-Precision FP Value to Signed Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CVTTSD2SQ m64 r64
|
|
// CVTTSD2SQ xmm r64
|
|
//
|
|
// Construct and append a CVTTSD2SQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func CVTTSD2SQ(mx, r operand.Op) { ctx.CVTTSD2SQ(mx, r) }
|
|
|
|
// CVTTSS2SL: Convert with Truncation Scalar Single-Precision FP Value to Dword Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CVTTSS2SL m32 r32
|
|
// CVTTSS2SL m32 r64
|
|
// CVTTSS2SL xmm r32
|
|
// CVTTSS2SL xmm r64
|
|
//
|
|
// Construct and append a CVTTSS2SL instruction to the active function.
|
|
func (c *Context) CVTTSS2SL(mx, r operand.Op) {
|
|
c.addinstruction(x86.CVTTSS2SL(mx, r))
|
|
}
|
|
|
|
// CVTTSS2SL: Convert with Truncation Scalar Single-Precision FP Value to Dword Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CVTTSS2SL m32 r32
|
|
// CVTTSS2SL m32 r64
|
|
// CVTTSS2SL xmm r32
|
|
// CVTTSS2SL xmm r64
|
|
//
|
|
// Construct and append a CVTTSS2SL instruction to the active function.
|
|
// Operates on the global context.
|
|
func CVTTSS2SL(mx, r operand.Op) { ctx.CVTTSS2SL(mx, r) }
|
|
|
|
// CWD: Convert Word to Doubleword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CWD
|
|
//
|
|
// Construct and append a CWD instruction to the active function.
|
|
func (c *Context) CWD() {
|
|
c.addinstruction(x86.CWD())
|
|
}
|
|
|
|
// CWD: Convert Word to Doubleword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CWD
|
|
//
|
|
// Construct and append a CWD instruction to the active function.
|
|
// Operates on the global context.
|
|
func CWD() { ctx.CWD() }
|
|
|
|
// CWDE: Convert Word to Doubleword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CWDE
|
|
//
|
|
// Construct and append a CWDE instruction to the active function.
|
|
func (c *Context) CWDE() {
|
|
c.addinstruction(x86.CWDE())
|
|
}
|
|
|
|
// CWDE: Convert Word to Doubleword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// CWDE
|
|
//
|
|
// Construct and append a CWDE instruction to the active function.
|
|
// Operates on the global context.
|
|
func CWDE() { ctx.CWDE() }
|
|
|
|
// DECB: Decrement by 1.
|
|
//
|
|
// Forms:
|
|
//
|
|
// DECB m8
|
|
// DECB r8
|
|
//
|
|
// Construct and append a DECB instruction to the active function.
|
|
func (c *Context) DECB(mr operand.Op) {
|
|
c.addinstruction(x86.DECB(mr))
|
|
}
|
|
|
|
// DECB: Decrement by 1.
|
|
//
|
|
// Forms:
|
|
//
|
|
// DECB m8
|
|
// DECB r8
|
|
//
|
|
// Construct and append a DECB instruction to the active function.
|
|
// Operates on the global context.
|
|
func DECB(mr operand.Op) { ctx.DECB(mr) }
|
|
|
|
// DECL: Decrement by 1.
|
|
//
|
|
// Forms:
|
|
//
|
|
// DECL m32
|
|
// DECL r32
|
|
//
|
|
// Construct and append a DECL instruction to the active function.
|
|
func (c *Context) DECL(mr operand.Op) {
|
|
c.addinstruction(x86.DECL(mr))
|
|
}
|
|
|
|
// DECL: Decrement by 1.
|
|
//
|
|
// Forms:
|
|
//
|
|
// DECL m32
|
|
// DECL r32
|
|
//
|
|
// Construct and append a DECL instruction to the active function.
|
|
// Operates on the global context.
|
|
func DECL(mr operand.Op) { ctx.DECL(mr) }
|
|
|
|
// DECQ: Decrement by 1.
|
|
//
|
|
// Forms:
|
|
//
|
|
// DECQ m64
|
|
// DECQ r64
|
|
//
|
|
// Construct and append a DECQ instruction to the active function.
|
|
func (c *Context) DECQ(mr operand.Op) {
|
|
c.addinstruction(x86.DECQ(mr))
|
|
}
|
|
|
|
// DECQ: Decrement by 1.
|
|
//
|
|
// Forms:
|
|
//
|
|
// DECQ m64
|
|
// DECQ r64
|
|
//
|
|
// Construct and append a DECQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func DECQ(mr operand.Op) { ctx.DECQ(mr) }
|
|
|
|
// DECW: Decrement by 1.
|
|
//
|
|
// Forms:
|
|
//
|
|
// DECW m16
|
|
// DECW r16
|
|
//
|
|
// Construct and append a DECW instruction to the active function.
|
|
func (c *Context) DECW(mr operand.Op) {
|
|
c.addinstruction(x86.DECW(mr))
|
|
}
|
|
|
|
// DECW: Decrement by 1.
|
|
//
|
|
// Forms:
|
|
//
|
|
// DECW m16
|
|
// DECW r16
|
|
//
|
|
// Construct and append a DECW instruction to the active function.
|
|
// Operates on the global context.
|
|
func DECW(mr operand.Op) { ctx.DECW(mr) }
|
|
|
|
// DIVB: Unsigned Divide.
|
|
//
|
|
// Forms:
|
|
//
|
|
// DIVB m8
|
|
// DIVB r8
|
|
//
|
|
// Construct and append a DIVB instruction to the active function.
|
|
func (c *Context) DIVB(mr operand.Op) {
|
|
c.addinstruction(x86.DIVB(mr))
|
|
}
|
|
|
|
// DIVB: Unsigned Divide.
|
|
//
|
|
// Forms:
|
|
//
|
|
// DIVB m8
|
|
// DIVB r8
|
|
//
|
|
// Construct and append a DIVB instruction to the active function.
|
|
// Operates on the global context.
|
|
func DIVB(mr operand.Op) { ctx.DIVB(mr) }
|
|
|
|
// DIVL: Unsigned Divide.
|
|
//
|
|
// Forms:
|
|
//
|
|
// DIVL m32
|
|
// DIVL r32
|
|
//
|
|
// Construct and append a DIVL instruction to the active function.
|
|
func (c *Context) DIVL(mr operand.Op) {
|
|
c.addinstruction(x86.DIVL(mr))
|
|
}
|
|
|
|
// DIVL: Unsigned Divide.
|
|
//
|
|
// Forms:
|
|
//
|
|
// DIVL m32
|
|
// DIVL r32
|
|
//
|
|
// Construct and append a DIVL instruction to the active function.
|
|
// Operates on the global context.
|
|
func DIVL(mr operand.Op) { ctx.DIVL(mr) }
|
|
|
|
// DIVPD: Divide Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// DIVPD m128 xmm
|
|
// DIVPD xmm xmm
|
|
//
|
|
// Construct and append a DIVPD instruction to the active function.
|
|
func (c *Context) DIVPD(mx, x operand.Op) {
|
|
c.addinstruction(x86.DIVPD(mx, x))
|
|
}
|
|
|
|
// DIVPD: Divide Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// DIVPD m128 xmm
|
|
// DIVPD xmm xmm
|
|
//
|
|
// Construct and append a DIVPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func DIVPD(mx, x operand.Op) { ctx.DIVPD(mx, x) }
|
|
|
|
// DIVPS: Divide Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// DIVPS m128 xmm
|
|
// DIVPS xmm xmm
|
|
//
|
|
// Construct and append a DIVPS instruction to the active function.
|
|
func (c *Context) DIVPS(mx, x operand.Op) {
|
|
c.addinstruction(x86.DIVPS(mx, x))
|
|
}
|
|
|
|
// DIVPS: Divide Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// DIVPS m128 xmm
|
|
// DIVPS xmm xmm
|
|
//
|
|
// Construct and append a DIVPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func DIVPS(mx, x operand.Op) { ctx.DIVPS(mx, x) }
|
|
|
|
// DIVQ: Unsigned Divide.
|
|
//
|
|
// Forms:
|
|
//
|
|
// DIVQ m64
|
|
// DIVQ r64
|
|
//
|
|
// Construct and append a DIVQ instruction to the active function.
|
|
func (c *Context) DIVQ(mr operand.Op) {
|
|
c.addinstruction(x86.DIVQ(mr))
|
|
}
|
|
|
|
// DIVQ: Unsigned Divide.
|
|
//
|
|
// Forms:
|
|
//
|
|
// DIVQ m64
|
|
// DIVQ r64
|
|
//
|
|
// Construct and append a DIVQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func DIVQ(mr operand.Op) { ctx.DIVQ(mr) }
|
|
|
|
// DIVSD: Divide Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// DIVSD m64 xmm
|
|
// DIVSD xmm xmm
|
|
//
|
|
// Construct and append a DIVSD instruction to the active function.
|
|
func (c *Context) DIVSD(mx, x operand.Op) {
|
|
c.addinstruction(x86.DIVSD(mx, x))
|
|
}
|
|
|
|
// DIVSD: Divide Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// DIVSD m64 xmm
|
|
// DIVSD xmm xmm
|
|
//
|
|
// Construct and append a DIVSD instruction to the active function.
|
|
// Operates on the global context.
|
|
func DIVSD(mx, x operand.Op) { ctx.DIVSD(mx, x) }
|
|
|
|
// DIVSS: Divide Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// DIVSS m32 xmm
|
|
// DIVSS xmm xmm
|
|
//
|
|
// Construct and append a DIVSS instruction to the active function.
|
|
func (c *Context) DIVSS(mx, x operand.Op) {
|
|
c.addinstruction(x86.DIVSS(mx, x))
|
|
}
|
|
|
|
// DIVSS: Divide Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// DIVSS m32 xmm
|
|
// DIVSS xmm xmm
|
|
//
|
|
// Construct and append a DIVSS instruction to the active function.
|
|
// Operates on the global context.
|
|
func DIVSS(mx, x operand.Op) { ctx.DIVSS(mx, x) }
|
|
|
|
// DIVW: Unsigned Divide.
|
|
//
|
|
// Forms:
|
|
//
|
|
// DIVW m16
|
|
// DIVW r16
|
|
//
|
|
// Construct and append a DIVW instruction to the active function.
|
|
func (c *Context) DIVW(mr operand.Op) {
|
|
c.addinstruction(x86.DIVW(mr))
|
|
}
|
|
|
|
// DIVW: Unsigned Divide.
|
|
//
|
|
// Forms:
|
|
//
|
|
// DIVW m16
|
|
// DIVW r16
|
|
//
|
|
// Construct and append a DIVW instruction to the active function.
|
|
// Operates on the global context.
|
|
func DIVW(mr operand.Op) { ctx.DIVW(mr) }
|
|
|
|
// DPPD: Dot Product of Packed Double Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// DPPD imm8 m128 xmm
|
|
// DPPD imm8 xmm xmm
|
|
//
|
|
// Construct and append a DPPD instruction to the active function.
|
|
func (c *Context) DPPD(i, mx, x operand.Op) {
|
|
c.addinstruction(x86.DPPD(i, mx, x))
|
|
}
|
|
|
|
// DPPD: Dot Product of Packed Double Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// DPPD imm8 m128 xmm
|
|
// DPPD imm8 xmm xmm
|
|
//
|
|
// Construct and append a DPPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func DPPD(i, mx, x operand.Op) { ctx.DPPD(i, mx, x) }
|
|
|
|
// DPPS: Dot Product of Packed Single Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// DPPS imm8 m128 xmm
|
|
// DPPS imm8 xmm xmm
|
|
//
|
|
// Construct and append a DPPS instruction to the active function.
|
|
func (c *Context) DPPS(i, mx, x operand.Op) {
|
|
c.addinstruction(x86.DPPS(i, mx, x))
|
|
}
|
|
|
|
// DPPS: Dot Product of Packed Single Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// DPPS imm8 m128 xmm
|
|
// DPPS imm8 xmm xmm
|
|
//
|
|
// Construct and append a DPPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func DPPS(i, mx, x operand.Op) { ctx.DPPS(i, mx, x) }
|
|
|
|
// EXTRACTPS: Extract Packed Single Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// EXTRACTPS imm2u xmm m32
|
|
// EXTRACTPS imm2u xmm r32
|
|
//
|
|
// Construct and append a EXTRACTPS instruction to the active function.
|
|
func (c *Context) EXTRACTPS(i, x, mr operand.Op) {
|
|
c.addinstruction(x86.EXTRACTPS(i, x, mr))
|
|
}
|
|
|
|
// EXTRACTPS: Extract Packed Single Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// EXTRACTPS imm2u xmm m32
|
|
// EXTRACTPS imm2u xmm r32
|
|
//
|
|
// Construct and append a EXTRACTPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func EXTRACTPS(i, x, mr operand.Op) { ctx.EXTRACTPS(i, x, mr) }
|
|
|
|
// HADDPD: Packed Double-FP Horizontal Add.
|
|
//
|
|
// Forms:
|
|
//
|
|
// HADDPD m128 xmm
|
|
// HADDPD xmm xmm
|
|
//
|
|
// Construct and append a HADDPD instruction to the active function.
|
|
func (c *Context) HADDPD(mx, x operand.Op) {
|
|
c.addinstruction(x86.HADDPD(mx, x))
|
|
}
|
|
|
|
// HADDPD: Packed Double-FP Horizontal Add.
|
|
//
|
|
// Forms:
|
|
//
|
|
// HADDPD m128 xmm
|
|
// HADDPD xmm xmm
|
|
//
|
|
// Construct and append a HADDPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func HADDPD(mx, x operand.Op) { ctx.HADDPD(mx, x) }
|
|
|
|
// HADDPS: Packed Single-FP Horizontal Add.
|
|
//
|
|
// Forms:
|
|
//
|
|
// HADDPS m128 xmm
|
|
// HADDPS xmm xmm
|
|
//
|
|
// Construct and append a HADDPS instruction to the active function.
|
|
func (c *Context) HADDPS(mx, x operand.Op) {
|
|
c.addinstruction(x86.HADDPS(mx, x))
|
|
}
|
|
|
|
// HADDPS: Packed Single-FP Horizontal Add.
|
|
//
|
|
// Forms:
|
|
//
|
|
// HADDPS m128 xmm
|
|
// HADDPS xmm xmm
|
|
//
|
|
// Construct and append a HADDPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func HADDPS(mx, x operand.Op) { ctx.HADDPS(mx, x) }
|
|
|
|
// HSUBPD: Packed Double-FP Horizontal Subtract.
|
|
//
|
|
// Forms:
|
|
//
|
|
// HSUBPD m128 xmm
|
|
// HSUBPD xmm xmm
|
|
//
|
|
// Construct and append a HSUBPD instruction to the active function.
|
|
func (c *Context) HSUBPD(mx, x operand.Op) {
|
|
c.addinstruction(x86.HSUBPD(mx, x))
|
|
}
|
|
|
|
// HSUBPD: Packed Double-FP Horizontal Subtract.
|
|
//
|
|
// Forms:
|
|
//
|
|
// HSUBPD m128 xmm
|
|
// HSUBPD xmm xmm
|
|
//
|
|
// Construct and append a HSUBPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func HSUBPD(mx, x operand.Op) { ctx.HSUBPD(mx, x) }
|
|
|
|
// HSUBPS: Packed Single-FP Horizontal Subtract.
|
|
//
|
|
// Forms:
|
|
//
|
|
// HSUBPS m128 xmm
|
|
// HSUBPS xmm xmm
|
|
//
|
|
// Construct and append a HSUBPS instruction to the active function.
|
|
func (c *Context) HSUBPS(mx, x operand.Op) {
|
|
c.addinstruction(x86.HSUBPS(mx, x))
|
|
}
|
|
|
|
// HSUBPS: Packed Single-FP Horizontal Subtract.
|
|
//
|
|
// Forms:
|
|
//
|
|
// HSUBPS m128 xmm
|
|
// HSUBPS xmm xmm
|
|
//
|
|
// Construct and append a HSUBPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func HSUBPS(mx, x operand.Op) { ctx.HSUBPS(mx, x) }
|
|
|
|
// IDIVB: Signed Divide.
|
|
//
|
|
// Forms:
|
|
//
|
|
// IDIVB m8
|
|
// IDIVB r8
|
|
//
|
|
// Construct and append a IDIVB instruction to the active function.
|
|
func (c *Context) IDIVB(mr operand.Op) {
|
|
c.addinstruction(x86.IDIVB(mr))
|
|
}
|
|
|
|
// IDIVB: Signed Divide.
|
|
//
|
|
// Forms:
|
|
//
|
|
// IDIVB m8
|
|
// IDIVB r8
|
|
//
|
|
// Construct and append a IDIVB instruction to the active function.
|
|
// Operates on the global context.
|
|
func IDIVB(mr operand.Op) { ctx.IDIVB(mr) }
|
|
|
|
// IDIVL: Signed Divide.
|
|
//
|
|
// Forms:
|
|
//
|
|
// IDIVL m32
|
|
// IDIVL r32
|
|
//
|
|
// Construct and append a IDIVL instruction to the active function.
|
|
func (c *Context) IDIVL(mr operand.Op) {
|
|
c.addinstruction(x86.IDIVL(mr))
|
|
}
|
|
|
|
// IDIVL: Signed Divide.
|
|
//
|
|
// Forms:
|
|
//
|
|
// IDIVL m32
|
|
// IDIVL r32
|
|
//
|
|
// Construct and append a IDIVL instruction to the active function.
|
|
// Operates on the global context.
|
|
func IDIVL(mr operand.Op) { ctx.IDIVL(mr) }
|
|
|
|
// IDIVQ: Signed Divide.
|
|
//
|
|
// Forms:
|
|
//
|
|
// IDIVQ m64
|
|
// IDIVQ r64
|
|
//
|
|
// Construct and append a IDIVQ instruction to the active function.
|
|
func (c *Context) IDIVQ(mr operand.Op) {
|
|
c.addinstruction(x86.IDIVQ(mr))
|
|
}
|
|
|
|
// IDIVQ: Signed Divide.
|
|
//
|
|
// Forms:
|
|
//
|
|
// IDIVQ m64
|
|
// IDIVQ r64
|
|
//
|
|
// Construct and append a IDIVQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func IDIVQ(mr operand.Op) { ctx.IDIVQ(mr) }
|
|
|
|
// IDIVW: Signed Divide.
|
|
//
|
|
// Forms:
|
|
//
|
|
// IDIVW m16
|
|
// IDIVW r16
|
|
//
|
|
// Construct and append a IDIVW instruction to the active function.
|
|
func (c *Context) IDIVW(mr operand.Op) {
|
|
c.addinstruction(x86.IDIVW(mr))
|
|
}
|
|
|
|
// IDIVW: Signed Divide.
|
|
//
|
|
// Forms:
|
|
//
|
|
// IDIVW m16
|
|
// IDIVW r16
|
|
//
|
|
// Construct and append a IDIVW instruction to the active function.
|
|
// Operates on the global context.
|
|
func IDIVW(mr operand.Op) { ctx.IDIVW(mr) }
|
|
|
|
// IMUL3L: Signed Multiply.
|
|
//
|
|
// Forms:
|
|
//
|
|
// IMUL3L imm32 m32 r32
|
|
// IMUL3L imm32 r32 r32
|
|
// IMUL3L imm8 m32 r32
|
|
// IMUL3L imm8 r32 r32
|
|
//
|
|
// Construct and append a IMUL3L instruction to the active function.
|
|
func (c *Context) IMUL3L(i, mr, r operand.Op) {
|
|
c.addinstruction(x86.IMUL3L(i, mr, r))
|
|
}
|
|
|
|
// IMUL3L: Signed Multiply.
|
|
//
|
|
// Forms:
|
|
//
|
|
// IMUL3L imm32 m32 r32
|
|
// IMUL3L imm32 r32 r32
|
|
// IMUL3L imm8 m32 r32
|
|
// IMUL3L imm8 r32 r32
|
|
//
|
|
// Construct and append a IMUL3L instruction to the active function.
|
|
// Operates on the global context.
|
|
func IMUL3L(i, mr, r operand.Op) { ctx.IMUL3L(i, mr, r) }
|
|
|
|
// IMUL3Q: Signed Multiply.
|
|
//
|
|
// Forms:
|
|
//
|
|
// IMUL3Q imm32 m64 r64
|
|
// IMUL3Q imm32 r64 r64
|
|
// IMUL3Q imm8 m64 r64
|
|
// IMUL3Q imm8 r64 r64
|
|
//
|
|
// Construct and append a IMUL3Q instruction to the active function.
|
|
func (c *Context) IMUL3Q(i, mr, r operand.Op) {
|
|
c.addinstruction(x86.IMUL3Q(i, mr, r))
|
|
}
|
|
|
|
// IMUL3Q: Signed Multiply.
|
|
//
|
|
// Forms:
|
|
//
|
|
// IMUL3Q imm32 m64 r64
|
|
// IMUL3Q imm32 r64 r64
|
|
// IMUL3Q imm8 m64 r64
|
|
// IMUL3Q imm8 r64 r64
|
|
//
|
|
// Construct and append a IMUL3Q instruction to the active function.
|
|
// Operates on the global context.
|
|
func IMUL3Q(i, mr, r operand.Op) { ctx.IMUL3Q(i, mr, r) }
|
|
|
|
// IMUL3W: Signed Multiply.
|
|
//
|
|
// Forms:
|
|
//
|
|
// IMUL3W imm16 m16 r16
|
|
// IMUL3W imm16 r16 r16
|
|
// IMUL3W imm8 m16 r16
|
|
// IMUL3W imm8 r16 r16
|
|
//
|
|
// Construct and append a IMUL3W instruction to the active function.
|
|
func (c *Context) IMUL3W(i, mr, r operand.Op) {
|
|
c.addinstruction(x86.IMUL3W(i, mr, r))
|
|
}
|
|
|
|
// IMUL3W: Signed Multiply.
|
|
//
|
|
// Forms:
|
|
//
|
|
// IMUL3W imm16 m16 r16
|
|
// IMUL3W imm16 r16 r16
|
|
// IMUL3W imm8 m16 r16
|
|
// IMUL3W imm8 r16 r16
|
|
//
|
|
// Construct and append a IMUL3W instruction to the active function.
|
|
// Operates on the global context.
|
|
func IMUL3W(i, mr, r operand.Op) { ctx.IMUL3W(i, mr, r) }
|
|
|
|
// IMULB: Signed Multiply.
|
|
//
|
|
// Forms:
|
|
//
|
|
// IMULB m8
|
|
// IMULB r8
|
|
//
|
|
// Construct and append a IMULB instruction to the active function.
|
|
func (c *Context) IMULB(mr operand.Op) {
|
|
c.addinstruction(x86.IMULB(mr))
|
|
}
|
|
|
|
// IMULB: Signed Multiply.
|
|
//
|
|
// Forms:
|
|
//
|
|
// IMULB m8
|
|
// IMULB r8
|
|
//
|
|
// Construct and append a IMULB instruction to the active function.
|
|
// Operates on the global context.
|
|
func IMULB(mr operand.Op) { ctx.IMULB(mr) }
|
|
|
|
// IMULL: Signed Multiply.
|
|
//
|
|
// Forms:
|
|
//
|
|
// IMULL m32 r32
|
|
// IMULL m32
|
|
// IMULL r32 r32
|
|
// IMULL r32
|
|
//
|
|
// Construct and append a IMULL instruction to the active function.
|
|
func (c *Context) IMULL(ops ...operand.Op) {
|
|
c.addinstruction(x86.IMULL(ops...))
|
|
}
|
|
|
|
// IMULL: Signed Multiply.
|
|
//
|
|
// Forms:
|
|
//
|
|
// IMULL m32 r32
|
|
// IMULL m32
|
|
// IMULL r32 r32
|
|
// IMULL r32
|
|
//
|
|
// Construct and append a IMULL instruction to the active function.
|
|
// Operates on the global context.
|
|
func IMULL(ops ...operand.Op) { ctx.IMULL(ops...) }
|
|
|
|
// IMULQ: Signed Multiply.
|
|
//
|
|
// Forms:
|
|
//
|
|
// IMULQ m64 r64
|
|
// IMULQ m64
|
|
// IMULQ r64 r64
|
|
// IMULQ r64
|
|
//
|
|
// Construct and append a IMULQ instruction to the active function.
|
|
func (c *Context) IMULQ(ops ...operand.Op) {
|
|
c.addinstruction(x86.IMULQ(ops...))
|
|
}
|
|
|
|
// IMULQ: Signed Multiply.
|
|
//
|
|
// Forms:
|
|
//
|
|
// IMULQ m64 r64
|
|
// IMULQ m64
|
|
// IMULQ r64 r64
|
|
// IMULQ r64
|
|
//
|
|
// Construct and append a IMULQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func IMULQ(ops ...operand.Op) { ctx.IMULQ(ops...) }
|
|
|
|
// IMULW: Signed Multiply.
|
|
//
|
|
// Forms:
|
|
//
|
|
// IMULW m16 r16
|
|
// IMULW m16
|
|
// IMULW r16 r16
|
|
// IMULW r16
|
|
//
|
|
// Construct and append a IMULW instruction to the active function.
|
|
func (c *Context) IMULW(ops ...operand.Op) {
|
|
c.addinstruction(x86.IMULW(ops...))
|
|
}
|
|
|
|
// IMULW: Signed Multiply.
|
|
//
|
|
// Forms:
|
|
//
|
|
// IMULW m16 r16
|
|
// IMULW m16
|
|
// IMULW r16 r16
|
|
// IMULW r16
|
|
//
|
|
// Construct and append a IMULW instruction to the active function.
|
|
// Operates on the global context.
|
|
func IMULW(ops ...operand.Op) { ctx.IMULW(ops...) }
|
|
|
|
// INCB: Increment by 1.
|
|
//
|
|
// Forms:
|
|
//
|
|
// INCB m8
|
|
// INCB r8
|
|
//
|
|
// Construct and append a INCB instruction to the active function.
|
|
func (c *Context) INCB(mr operand.Op) {
|
|
c.addinstruction(x86.INCB(mr))
|
|
}
|
|
|
|
// INCB: Increment by 1.
|
|
//
|
|
// Forms:
|
|
//
|
|
// INCB m8
|
|
// INCB r8
|
|
//
|
|
// Construct and append a INCB instruction to the active function.
|
|
// Operates on the global context.
|
|
func INCB(mr operand.Op) { ctx.INCB(mr) }
|
|
|
|
// INCL: Increment by 1.
|
|
//
|
|
// Forms:
|
|
//
|
|
// INCL m32
|
|
// INCL r32
|
|
//
|
|
// Construct and append a INCL instruction to the active function.
|
|
func (c *Context) INCL(mr operand.Op) {
|
|
c.addinstruction(x86.INCL(mr))
|
|
}
|
|
|
|
// INCL: Increment by 1.
|
|
//
|
|
// Forms:
|
|
//
|
|
// INCL m32
|
|
// INCL r32
|
|
//
|
|
// Construct and append a INCL instruction to the active function.
|
|
// Operates on the global context.
|
|
func INCL(mr operand.Op) { ctx.INCL(mr) }
|
|
|
|
// INCQ: Increment by 1.
|
|
//
|
|
// Forms:
|
|
//
|
|
// INCQ m64
|
|
// INCQ r64
|
|
//
|
|
// Construct and append a INCQ instruction to the active function.
|
|
func (c *Context) INCQ(mr operand.Op) {
|
|
c.addinstruction(x86.INCQ(mr))
|
|
}
|
|
|
|
// INCQ: Increment by 1.
|
|
//
|
|
// Forms:
|
|
//
|
|
// INCQ m64
|
|
// INCQ r64
|
|
//
|
|
// Construct and append a INCQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func INCQ(mr operand.Op) { ctx.INCQ(mr) }
|
|
|
|
// INCW: Increment by 1.
|
|
//
|
|
// Forms:
|
|
//
|
|
// INCW m16
|
|
// INCW r16
|
|
//
|
|
// Construct and append a INCW instruction to the active function.
|
|
func (c *Context) INCW(mr operand.Op) {
|
|
c.addinstruction(x86.INCW(mr))
|
|
}
|
|
|
|
// INCW: Increment by 1.
|
|
//
|
|
// Forms:
|
|
//
|
|
// INCW m16
|
|
// INCW r16
|
|
//
|
|
// Construct and append a INCW instruction to the active function.
|
|
// Operates on the global context.
|
|
func INCW(mr operand.Op) { ctx.INCW(mr) }
|
|
|
|
// INSERTPS: Insert Packed Single Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// INSERTPS imm8 m32 xmm
|
|
// INSERTPS imm8 xmm xmm
|
|
//
|
|
// Construct and append a INSERTPS instruction to the active function.
|
|
func (c *Context) INSERTPS(i, mx, x operand.Op) {
|
|
c.addinstruction(x86.INSERTPS(i, mx, x))
|
|
}
|
|
|
|
// INSERTPS: Insert Packed Single Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// INSERTPS imm8 m32 xmm
|
|
// INSERTPS imm8 xmm xmm
|
|
//
|
|
// Construct and append a INSERTPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func INSERTPS(i, mx, x operand.Op) { ctx.INSERTPS(i, mx, x) }
|
|
|
|
// INT: Call to Interrupt Procedure.
|
|
//
|
|
// Forms:
|
|
//
|
|
// INT 3
|
|
// INT imm8
|
|
//
|
|
// Construct and append a INT instruction to the active function.
|
|
func (c *Context) INT(i operand.Op) {
|
|
c.addinstruction(x86.INT(i))
|
|
}
|
|
|
|
// INT: Call to Interrupt Procedure.
|
|
//
|
|
// Forms:
|
|
//
|
|
// INT 3
|
|
// INT imm8
|
|
//
|
|
// Construct and append a INT instruction to the active function.
|
|
// Operates on the global context.
|
|
func INT(i operand.Op) { ctx.INT(i) }
|
|
|
|
// JA: Jump if above (CF == 0 and ZF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JA rel32
|
|
// JA rel8
|
|
//
|
|
// Construct and append a JA instruction to the active function.
|
|
func (c *Context) JA(r operand.Op) {
|
|
c.addinstruction(x86.JA(r))
|
|
}
|
|
|
|
// JA: Jump if above (CF == 0 and ZF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JA rel32
|
|
// JA rel8
|
|
//
|
|
// Construct and append a JA instruction to the active function.
|
|
// Operates on the global context.
|
|
func JA(r operand.Op) { ctx.JA(r) }
|
|
|
|
// JAE: Jump if above or equal (CF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JAE rel32
|
|
// JAE rel8
|
|
//
|
|
// Construct and append a JAE instruction to the active function.
|
|
func (c *Context) JAE(r operand.Op) {
|
|
c.addinstruction(x86.JAE(r))
|
|
}
|
|
|
|
// JAE: Jump if above or equal (CF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JAE rel32
|
|
// JAE rel8
|
|
//
|
|
// Construct and append a JAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func JAE(r operand.Op) { ctx.JAE(r) }
|
|
|
|
// JB: Jump if below (CF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JB rel32
|
|
// JB rel8
|
|
//
|
|
// Construct and append a JB instruction to the active function.
|
|
func (c *Context) JB(r operand.Op) {
|
|
c.addinstruction(x86.JB(r))
|
|
}
|
|
|
|
// JB: Jump if below (CF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JB rel32
|
|
// JB rel8
|
|
//
|
|
// Construct and append a JB instruction to the active function.
|
|
// Operates on the global context.
|
|
func JB(r operand.Op) { ctx.JB(r) }
|
|
|
|
// JBE: Jump if below or equal (CF == 1 or ZF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JBE rel32
|
|
// JBE rel8
|
|
//
|
|
// Construct and append a JBE instruction to the active function.
|
|
func (c *Context) JBE(r operand.Op) {
|
|
c.addinstruction(x86.JBE(r))
|
|
}
|
|
|
|
// JBE: Jump if below or equal (CF == 1 or ZF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JBE rel32
|
|
// JBE rel8
|
|
//
|
|
// Construct and append a JBE instruction to the active function.
|
|
// Operates on the global context.
|
|
func JBE(r operand.Op) { ctx.JBE(r) }
|
|
|
|
// JC: Jump if below (CF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JC rel32
|
|
// JC rel8
|
|
//
|
|
// Construct and append a JC instruction to the active function.
|
|
func (c *Context) JC(r operand.Op) {
|
|
c.addinstruction(x86.JC(r))
|
|
}
|
|
|
|
// JC: Jump if below (CF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JC rel32
|
|
// JC rel8
|
|
//
|
|
// Construct and append a JC instruction to the active function.
|
|
// Operates on the global context.
|
|
func JC(r operand.Op) { ctx.JC(r) }
|
|
|
|
// JCC: Jump if above or equal (CF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JCC rel32
|
|
// JCC rel8
|
|
//
|
|
// Construct and append a JCC instruction to the active function.
|
|
func (c *Context) JCC(r operand.Op) {
|
|
c.addinstruction(x86.JCC(r))
|
|
}
|
|
|
|
// JCC: Jump if above or equal (CF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JCC rel32
|
|
// JCC rel8
|
|
//
|
|
// Construct and append a JCC instruction to the active function.
|
|
// Operates on the global context.
|
|
func JCC(r operand.Op) { ctx.JCC(r) }
|
|
|
|
// JCS: Jump if below (CF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JCS rel32
|
|
// JCS rel8
|
|
//
|
|
// Construct and append a JCS instruction to the active function.
|
|
func (c *Context) JCS(r operand.Op) {
|
|
c.addinstruction(x86.JCS(r))
|
|
}
|
|
|
|
// JCS: Jump if below (CF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JCS rel32
|
|
// JCS rel8
|
|
//
|
|
// Construct and append a JCS instruction to the active function.
|
|
// Operates on the global context.
|
|
func JCS(r operand.Op) { ctx.JCS(r) }
|
|
|
|
// JCXZL: Jump if ECX register is 0.
|
|
//
|
|
// Forms:
|
|
//
|
|
// JCXZL rel8
|
|
//
|
|
// Construct and append a JCXZL instruction to the active function.
|
|
func (c *Context) JCXZL(r operand.Op) {
|
|
c.addinstruction(x86.JCXZL(r))
|
|
}
|
|
|
|
// JCXZL: Jump if ECX register is 0.
|
|
//
|
|
// Forms:
|
|
//
|
|
// JCXZL rel8
|
|
//
|
|
// Construct and append a JCXZL instruction to the active function.
|
|
// Operates on the global context.
|
|
func JCXZL(r operand.Op) { ctx.JCXZL(r) }
|
|
|
|
// JCXZQ: Jump if RCX register is 0.
|
|
//
|
|
// Forms:
|
|
//
|
|
// JCXZQ rel8
|
|
//
|
|
// Construct and append a JCXZQ instruction to the active function.
|
|
func (c *Context) JCXZQ(r operand.Op) {
|
|
c.addinstruction(x86.JCXZQ(r))
|
|
}
|
|
|
|
// JCXZQ: Jump if RCX register is 0.
|
|
//
|
|
// Forms:
|
|
//
|
|
// JCXZQ rel8
|
|
//
|
|
// Construct and append a JCXZQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func JCXZQ(r operand.Op) { ctx.JCXZQ(r) }
|
|
|
|
// JE: Jump if equal (ZF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JE rel32
|
|
// JE rel8
|
|
//
|
|
// Construct and append a JE instruction to the active function.
|
|
func (c *Context) JE(r operand.Op) {
|
|
c.addinstruction(x86.JE(r))
|
|
}
|
|
|
|
// JE: Jump if equal (ZF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JE rel32
|
|
// JE rel8
|
|
//
|
|
// Construct and append a JE instruction to the active function.
|
|
// Operates on the global context.
|
|
func JE(r operand.Op) { ctx.JE(r) }
|
|
|
|
// JEQ: Jump if equal (ZF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JEQ rel32
|
|
// JEQ rel8
|
|
//
|
|
// Construct and append a JEQ instruction to the active function.
|
|
func (c *Context) JEQ(r operand.Op) {
|
|
c.addinstruction(x86.JEQ(r))
|
|
}
|
|
|
|
// JEQ: Jump if equal (ZF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JEQ rel32
|
|
// JEQ rel8
|
|
//
|
|
// Construct and append a JEQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func JEQ(r operand.Op) { ctx.JEQ(r) }
|
|
|
|
// JG: Jump if greater (ZF == 0 and SF == OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JG rel32
|
|
// JG rel8
|
|
//
|
|
// Construct and append a JG instruction to the active function.
|
|
func (c *Context) JG(r operand.Op) {
|
|
c.addinstruction(x86.JG(r))
|
|
}
|
|
|
|
// JG: Jump if greater (ZF == 0 and SF == OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JG rel32
|
|
// JG rel8
|
|
//
|
|
// Construct and append a JG instruction to the active function.
|
|
// Operates on the global context.
|
|
func JG(r operand.Op) { ctx.JG(r) }
|
|
|
|
// JGE: Jump if greater or equal (SF == OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JGE rel32
|
|
// JGE rel8
|
|
//
|
|
// Construct and append a JGE instruction to the active function.
|
|
func (c *Context) JGE(r operand.Op) {
|
|
c.addinstruction(x86.JGE(r))
|
|
}
|
|
|
|
// JGE: Jump if greater or equal (SF == OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JGE rel32
|
|
// JGE rel8
|
|
//
|
|
// Construct and append a JGE instruction to the active function.
|
|
// Operates on the global context.
|
|
func JGE(r operand.Op) { ctx.JGE(r) }
|
|
|
|
// JGT: Jump if greater (ZF == 0 and SF == OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JGT rel32
|
|
// JGT rel8
|
|
//
|
|
// Construct and append a JGT instruction to the active function.
|
|
func (c *Context) JGT(r operand.Op) {
|
|
c.addinstruction(x86.JGT(r))
|
|
}
|
|
|
|
// JGT: Jump if greater (ZF == 0 and SF == OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JGT rel32
|
|
// JGT rel8
|
|
//
|
|
// Construct and append a JGT instruction to the active function.
|
|
// Operates on the global context.
|
|
func JGT(r operand.Op) { ctx.JGT(r) }
|
|
|
|
// JHI: Jump if above (CF == 0 and ZF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JHI rel32
|
|
// JHI rel8
|
|
//
|
|
// Construct and append a JHI instruction to the active function.
|
|
func (c *Context) JHI(r operand.Op) {
|
|
c.addinstruction(x86.JHI(r))
|
|
}
|
|
|
|
// JHI: Jump if above (CF == 0 and ZF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JHI rel32
|
|
// JHI rel8
|
|
//
|
|
// Construct and append a JHI instruction to the active function.
|
|
// Operates on the global context.
|
|
func JHI(r operand.Op) { ctx.JHI(r) }
|
|
|
|
// JHS: Jump if above or equal (CF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JHS rel32
|
|
// JHS rel8
|
|
//
|
|
// Construct and append a JHS instruction to the active function.
|
|
func (c *Context) JHS(r operand.Op) {
|
|
c.addinstruction(x86.JHS(r))
|
|
}
|
|
|
|
// JHS: Jump if above or equal (CF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JHS rel32
|
|
// JHS rel8
|
|
//
|
|
// Construct and append a JHS instruction to the active function.
|
|
// Operates on the global context.
|
|
func JHS(r operand.Op) { ctx.JHS(r) }
|
|
|
|
// JL: Jump if less (SF != OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JL rel32
|
|
// JL rel8
|
|
//
|
|
// Construct and append a JL instruction to the active function.
|
|
func (c *Context) JL(r operand.Op) {
|
|
c.addinstruction(x86.JL(r))
|
|
}
|
|
|
|
// JL: Jump if less (SF != OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JL rel32
|
|
// JL rel8
|
|
//
|
|
// Construct and append a JL instruction to the active function.
|
|
// Operates on the global context.
|
|
func JL(r operand.Op) { ctx.JL(r) }
|
|
|
|
// JLE: Jump if less or equal (ZF == 1 or SF != OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JLE rel32
|
|
// JLE rel8
|
|
//
|
|
// Construct and append a JLE instruction to the active function.
|
|
func (c *Context) JLE(r operand.Op) {
|
|
c.addinstruction(x86.JLE(r))
|
|
}
|
|
|
|
// JLE: Jump if less or equal (ZF == 1 or SF != OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JLE rel32
|
|
// JLE rel8
|
|
//
|
|
// Construct and append a JLE instruction to the active function.
|
|
// Operates on the global context.
|
|
func JLE(r operand.Op) { ctx.JLE(r) }
|
|
|
|
// JLO: Jump if below (CF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JLO rel32
|
|
// JLO rel8
|
|
//
|
|
// Construct and append a JLO instruction to the active function.
|
|
func (c *Context) JLO(r operand.Op) {
|
|
c.addinstruction(x86.JLO(r))
|
|
}
|
|
|
|
// JLO: Jump if below (CF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JLO rel32
|
|
// JLO rel8
|
|
//
|
|
// Construct and append a JLO instruction to the active function.
|
|
// Operates on the global context.
|
|
func JLO(r operand.Op) { ctx.JLO(r) }
|
|
|
|
// JLS: Jump if below or equal (CF == 1 or ZF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JLS rel32
|
|
// JLS rel8
|
|
//
|
|
// Construct and append a JLS instruction to the active function.
|
|
func (c *Context) JLS(r operand.Op) {
|
|
c.addinstruction(x86.JLS(r))
|
|
}
|
|
|
|
// JLS: Jump if below or equal (CF == 1 or ZF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JLS rel32
|
|
// JLS rel8
|
|
//
|
|
// Construct and append a JLS instruction to the active function.
|
|
// Operates on the global context.
|
|
func JLS(r operand.Op) { ctx.JLS(r) }
|
|
|
|
// JLT: Jump if less (SF != OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JLT rel32
|
|
// JLT rel8
|
|
//
|
|
// Construct and append a JLT instruction to the active function.
|
|
func (c *Context) JLT(r operand.Op) {
|
|
c.addinstruction(x86.JLT(r))
|
|
}
|
|
|
|
// JLT: Jump if less (SF != OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JLT rel32
|
|
// JLT rel8
|
|
//
|
|
// Construct and append a JLT instruction to the active function.
|
|
// Operates on the global context.
|
|
func JLT(r operand.Op) { ctx.JLT(r) }
|
|
|
|
// JMI: Jump if sign (SF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JMI rel32
|
|
// JMI rel8
|
|
//
|
|
// Construct and append a JMI instruction to the active function.
|
|
func (c *Context) JMI(r operand.Op) {
|
|
c.addinstruction(x86.JMI(r))
|
|
}
|
|
|
|
// JMI: Jump if sign (SF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JMI rel32
|
|
// JMI rel8
|
|
//
|
|
// Construct and append a JMI instruction to the active function.
|
|
// Operates on the global context.
|
|
func JMI(r operand.Op) { ctx.JMI(r) }
|
|
|
|
// JMP: Jump Unconditionally.
|
|
//
|
|
// Forms:
|
|
//
|
|
// JMP rel32
|
|
// JMP rel8
|
|
// JMP m64
|
|
// JMP r64
|
|
//
|
|
// Construct and append a JMP instruction to the active function.
|
|
func (c *Context) JMP(mr operand.Op) {
|
|
c.addinstruction(x86.JMP(mr))
|
|
}
|
|
|
|
// JMP: Jump Unconditionally.
|
|
//
|
|
// Forms:
|
|
//
|
|
// JMP rel32
|
|
// JMP rel8
|
|
// JMP m64
|
|
// JMP r64
|
|
//
|
|
// Construct and append a JMP instruction to the active function.
|
|
// Operates on the global context.
|
|
func JMP(mr operand.Op) { ctx.JMP(mr) }
|
|
|
|
// JNA: Jump if below or equal (CF == 1 or ZF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JNA rel32
|
|
// JNA rel8
|
|
//
|
|
// Construct and append a JNA instruction to the active function.
|
|
func (c *Context) JNA(r operand.Op) {
|
|
c.addinstruction(x86.JNA(r))
|
|
}
|
|
|
|
// JNA: Jump if below or equal (CF == 1 or ZF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JNA rel32
|
|
// JNA rel8
|
|
//
|
|
// Construct and append a JNA instruction to the active function.
|
|
// Operates on the global context.
|
|
func JNA(r operand.Op) { ctx.JNA(r) }
|
|
|
|
// JNAE: Jump if below (CF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JNAE rel32
|
|
// JNAE rel8
|
|
//
|
|
// Construct and append a JNAE instruction to the active function.
|
|
func (c *Context) JNAE(r operand.Op) {
|
|
c.addinstruction(x86.JNAE(r))
|
|
}
|
|
|
|
// JNAE: Jump if below (CF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JNAE rel32
|
|
// JNAE rel8
|
|
//
|
|
// Construct and append a JNAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func JNAE(r operand.Op) { ctx.JNAE(r) }
|
|
|
|
// JNB: Jump if above or equal (CF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JNB rel32
|
|
// JNB rel8
|
|
//
|
|
// Construct and append a JNB instruction to the active function.
|
|
func (c *Context) JNB(r operand.Op) {
|
|
c.addinstruction(x86.JNB(r))
|
|
}
|
|
|
|
// JNB: Jump if above or equal (CF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JNB rel32
|
|
// JNB rel8
|
|
//
|
|
// Construct and append a JNB instruction to the active function.
|
|
// Operates on the global context.
|
|
func JNB(r operand.Op) { ctx.JNB(r) }
|
|
|
|
// JNBE: Jump if above (CF == 0 and ZF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JNBE rel32
|
|
// JNBE rel8
|
|
//
|
|
// Construct and append a JNBE instruction to the active function.
|
|
func (c *Context) JNBE(r operand.Op) {
|
|
c.addinstruction(x86.JNBE(r))
|
|
}
|
|
|
|
// JNBE: Jump if above (CF == 0 and ZF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JNBE rel32
|
|
// JNBE rel8
|
|
//
|
|
// Construct and append a JNBE instruction to the active function.
|
|
// Operates on the global context.
|
|
func JNBE(r operand.Op) { ctx.JNBE(r) }
|
|
|
|
// JNC: Jump if above or equal (CF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JNC rel32
|
|
// JNC rel8
|
|
//
|
|
// Construct and append a JNC instruction to the active function.
|
|
func (c *Context) JNC(r operand.Op) {
|
|
c.addinstruction(x86.JNC(r))
|
|
}
|
|
|
|
// JNC: Jump if above or equal (CF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JNC rel32
|
|
// JNC rel8
|
|
//
|
|
// Construct and append a JNC instruction to the active function.
|
|
// Operates on the global context.
|
|
func JNC(r operand.Op) { ctx.JNC(r) }
|
|
|
|
// JNE: Jump if not equal (ZF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JNE rel32
|
|
// JNE rel8
|
|
//
|
|
// Construct and append a JNE instruction to the active function.
|
|
func (c *Context) JNE(r operand.Op) {
|
|
c.addinstruction(x86.JNE(r))
|
|
}
|
|
|
|
// JNE: Jump if not equal (ZF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JNE rel32
|
|
// JNE rel8
|
|
//
|
|
// Construct and append a JNE instruction to the active function.
|
|
// Operates on the global context.
|
|
func JNE(r operand.Op) { ctx.JNE(r) }
|
|
|
|
// JNG: Jump if less or equal (ZF == 1 or SF != OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JNG rel32
|
|
// JNG rel8
|
|
//
|
|
// Construct and append a JNG instruction to the active function.
|
|
func (c *Context) JNG(r operand.Op) {
|
|
c.addinstruction(x86.JNG(r))
|
|
}
|
|
|
|
// JNG: Jump if less or equal (ZF == 1 or SF != OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JNG rel32
|
|
// JNG rel8
|
|
//
|
|
// Construct and append a JNG instruction to the active function.
|
|
// Operates on the global context.
|
|
func JNG(r operand.Op) { ctx.JNG(r) }
|
|
|
|
// JNGE: Jump if less (SF != OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JNGE rel32
|
|
// JNGE rel8
|
|
//
|
|
// Construct and append a JNGE instruction to the active function.
|
|
func (c *Context) JNGE(r operand.Op) {
|
|
c.addinstruction(x86.JNGE(r))
|
|
}
|
|
|
|
// JNGE: Jump if less (SF != OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JNGE rel32
|
|
// JNGE rel8
|
|
//
|
|
// Construct and append a JNGE instruction to the active function.
|
|
// Operates on the global context.
|
|
func JNGE(r operand.Op) { ctx.JNGE(r) }
|
|
|
|
// JNL: Jump if greater or equal (SF == OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JNL rel32
|
|
// JNL rel8
|
|
//
|
|
// Construct and append a JNL instruction to the active function.
|
|
func (c *Context) JNL(r operand.Op) {
|
|
c.addinstruction(x86.JNL(r))
|
|
}
|
|
|
|
// JNL: Jump if greater or equal (SF == OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JNL rel32
|
|
// JNL rel8
|
|
//
|
|
// Construct and append a JNL instruction to the active function.
|
|
// Operates on the global context.
|
|
func JNL(r operand.Op) { ctx.JNL(r) }
|
|
|
|
// JNLE: Jump if greater (ZF == 0 and SF == OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JNLE rel32
|
|
// JNLE rel8
|
|
//
|
|
// Construct and append a JNLE instruction to the active function.
|
|
func (c *Context) JNLE(r operand.Op) {
|
|
c.addinstruction(x86.JNLE(r))
|
|
}
|
|
|
|
// JNLE: Jump if greater (ZF == 0 and SF == OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JNLE rel32
|
|
// JNLE rel8
|
|
//
|
|
// Construct and append a JNLE instruction to the active function.
|
|
// Operates on the global context.
|
|
func JNLE(r operand.Op) { ctx.JNLE(r) }
|
|
|
|
// JNO: Jump if not overflow (OF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JNO rel32
|
|
// JNO rel8
|
|
//
|
|
// Construct and append a JNO instruction to the active function.
|
|
func (c *Context) JNO(r operand.Op) {
|
|
c.addinstruction(x86.JNO(r))
|
|
}
|
|
|
|
// JNO: Jump if not overflow (OF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JNO rel32
|
|
// JNO rel8
|
|
//
|
|
// Construct and append a JNO instruction to the active function.
|
|
// Operates on the global context.
|
|
func JNO(r operand.Op) { ctx.JNO(r) }
|
|
|
|
// JNP: Jump if not parity (PF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JNP rel32
|
|
// JNP rel8
|
|
//
|
|
// Construct and append a JNP instruction to the active function.
|
|
func (c *Context) JNP(r operand.Op) {
|
|
c.addinstruction(x86.JNP(r))
|
|
}
|
|
|
|
// JNP: Jump if not parity (PF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JNP rel32
|
|
// JNP rel8
|
|
//
|
|
// Construct and append a JNP instruction to the active function.
|
|
// Operates on the global context.
|
|
func JNP(r operand.Op) { ctx.JNP(r) }
|
|
|
|
// JNS: Jump if not sign (SF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JNS rel32
|
|
// JNS rel8
|
|
//
|
|
// Construct and append a JNS instruction to the active function.
|
|
func (c *Context) JNS(r operand.Op) {
|
|
c.addinstruction(x86.JNS(r))
|
|
}
|
|
|
|
// JNS: Jump if not sign (SF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JNS rel32
|
|
// JNS rel8
|
|
//
|
|
// Construct and append a JNS instruction to the active function.
|
|
// Operates on the global context.
|
|
func JNS(r operand.Op) { ctx.JNS(r) }
|
|
|
|
// JNZ: Jump if not equal (ZF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JNZ rel32
|
|
// JNZ rel8
|
|
//
|
|
// Construct and append a JNZ instruction to the active function.
|
|
func (c *Context) JNZ(r operand.Op) {
|
|
c.addinstruction(x86.JNZ(r))
|
|
}
|
|
|
|
// JNZ: Jump if not equal (ZF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JNZ rel32
|
|
// JNZ rel8
|
|
//
|
|
// Construct and append a JNZ instruction to the active function.
|
|
// Operates on the global context.
|
|
func JNZ(r operand.Op) { ctx.JNZ(r) }
|
|
|
|
// JO: Jump if overflow (OF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JO rel32
|
|
// JO rel8
|
|
//
|
|
// Construct and append a JO instruction to the active function.
|
|
func (c *Context) JO(r operand.Op) {
|
|
c.addinstruction(x86.JO(r))
|
|
}
|
|
|
|
// JO: Jump if overflow (OF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JO rel32
|
|
// JO rel8
|
|
//
|
|
// Construct and append a JO instruction to the active function.
|
|
// Operates on the global context.
|
|
func JO(r operand.Op) { ctx.JO(r) }
|
|
|
|
// JOC: Jump if not overflow (OF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JOC rel32
|
|
// JOC rel8
|
|
//
|
|
// Construct and append a JOC instruction to the active function.
|
|
func (c *Context) JOC(r operand.Op) {
|
|
c.addinstruction(x86.JOC(r))
|
|
}
|
|
|
|
// JOC: Jump if not overflow (OF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JOC rel32
|
|
// JOC rel8
|
|
//
|
|
// Construct and append a JOC instruction to the active function.
|
|
// Operates on the global context.
|
|
func JOC(r operand.Op) { ctx.JOC(r) }
|
|
|
|
// JOS: Jump if overflow (OF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JOS rel32
|
|
// JOS rel8
|
|
//
|
|
// Construct and append a JOS instruction to the active function.
|
|
func (c *Context) JOS(r operand.Op) {
|
|
c.addinstruction(x86.JOS(r))
|
|
}
|
|
|
|
// JOS: Jump if overflow (OF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JOS rel32
|
|
// JOS rel8
|
|
//
|
|
// Construct and append a JOS instruction to the active function.
|
|
// Operates on the global context.
|
|
func JOS(r operand.Op) { ctx.JOS(r) }
|
|
|
|
// JP: Jump if parity (PF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JP rel32
|
|
// JP rel8
|
|
//
|
|
// Construct and append a JP instruction to the active function.
|
|
func (c *Context) JP(r operand.Op) {
|
|
c.addinstruction(x86.JP(r))
|
|
}
|
|
|
|
// JP: Jump if parity (PF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JP rel32
|
|
// JP rel8
|
|
//
|
|
// Construct and append a JP instruction to the active function.
|
|
// Operates on the global context.
|
|
func JP(r operand.Op) { ctx.JP(r) }
|
|
|
|
// JPC: Jump if not parity (PF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JPC rel32
|
|
// JPC rel8
|
|
//
|
|
// Construct and append a JPC instruction to the active function.
|
|
func (c *Context) JPC(r operand.Op) {
|
|
c.addinstruction(x86.JPC(r))
|
|
}
|
|
|
|
// JPC: Jump if not parity (PF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JPC rel32
|
|
// JPC rel8
|
|
//
|
|
// Construct and append a JPC instruction to the active function.
|
|
// Operates on the global context.
|
|
func JPC(r operand.Op) { ctx.JPC(r) }
|
|
|
|
// JPE: Jump if parity (PF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JPE rel32
|
|
// JPE rel8
|
|
//
|
|
// Construct and append a JPE instruction to the active function.
|
|
func (c *Context) JPE(r operand.Op) {
|
|
c.addinstruction(x86.JPE(r))
|
|
}
|
|
|
|
// JPE: Jump if parity (PF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JPE rel32
|
|
// JPE rel8
|
|
//
|
|
// Construct and append a JPE instruction to the active function.
|
|
// Operates on the global context.
|
|
func JPE(r operand.Op) { ctx.JPE(r) }
|
|
|
|
// JPL: Jump if not sign (SF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JPL rel32
|
|
// JPL rel8
|
|
//
|
|
// Construct and append a JPL instruction to the active function.
|
|
func (c *Context) JPL(r operand.Op) {
|
|
c.addinstruction(x86.JPL(r))
|
|
}
|
|
|
|
// JPL: Jump if not sign (SF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JPL rel32
|
|
// JPL rel8
|
|
//
|
|
// Construct and append a JPL instruction to the active function.
|
|
// Operates on the global context.
|
|
func JPL(r operand.Op) { ctx.JPL(r) }
|
|
|
|
// JPO: Jump if not parity (PF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JPO rel32
|
|
// JPO rel8
|
|
//
|
|
// Construct and append a JPO instruction to the active function.
|
|
func (c *Context) JPO(r operand.Op) {
|
|
c.addinstruction(x86.JPO(r))
|
|
}
|
|
|
|
// JPO: Jump if not parity (PF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JPO rel32
|
|
// JPO rel8
|
|
//
|
|
// Construct and append a JPO instruction to the active function.
|
|
// Operates on the global context.
|
|
func JPO(r operand.Op) { ctx.JPO(r) }
|
|
|
|
// JPS: Jump if parity (PF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JPS rel32
|
|
// JPS rel8
|
|
//
|
|
// Construct and append a JPS instruction to the active function.
|
|
func (c *Context) JPS(r operand.Op) {
|
|
c.addinstruction(x86.JPS(r))
|
|
}
|
|
|
|
// JPS: Jump if parity (PF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JPS rel32
|
|
// JPS rel8
|
|
//
|
|
// Construct and append a JPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func JPS(r operand.Op) { ctx.JPS(r) }
|
|
|
|
// JS: Jump if sign (SF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JS rel32
|
|
// JS rel8
|
|
//
|
|
// Construct and append a JS instruction to the active function.
|
|
func (c *Context) JS(r operand.Op) {
|
|
c.addinstruction(x86.JS(r))
|
|
}
|
|
|
|
// JS: Jump if sign (SF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JS rel32
|
|
// JS rel8
|
|
//
|
|
// Construct and append a JS instruction to the active function.
|
|
// Operates on the global context.
|
|
func JS(r operand.Op) { ctx.JS(r) }
|
|
|
|
// JZ: Jump if equal (ZF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JZ rel32
|
|
// JZ rel8
|
|
//
|
|
// Construct and append a JZ instruction to the active function.
|
|
func (c *Context) JZ(r operand.Op) {
|
|
c.addinstruction(x86.JZ(r))
|
|
}
|
|
|
|
// JZ: Jump if equal (ZF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// JZ rel32
|
|
// JZ rel8
|
|
//
|
|
// Construct and append a JZ instruction to the active function.
|
|
// Operates on the global context.
|
|
func JZ(r operand.Op) { ctx.JZ(r) }
|
|
|
|
// KADDB: ADD Two 8-bit Masks.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KADDB k k k
|
|
//
|
|
// Construct and append a KADDB instruction to the active function.
|
|
func (c *Context) KADDB(k, k1, k2 operand.Op) {
|
|
c.addinstruction(x86.KADDB(k, k1, k2))
|
|
}
|
|
|
|
// KADDB: ADD Two 8-bit Masks.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KADDB k k k
|
|
//
|
|
// Construct and append a KADDB instruction to the active function.
|
|
// Operates on the global context.
|
|
func KADDB(k, k1, k2 operand.Op) { ctx.KADDB(k, k1, k2) }
|
|
|
|
// KADDD: ADD Two 32-bit Masks.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KADDD k k k
|
|
//
|
|
// Construct and append a KADDD instruction to the active function.
|
|
func (c *Context) KADDD(k, k1, k2 operand.Op) {
|
|
c.addinstruction(x86.KADDD(k, k1, k2))
|
|
}
|
|
|
|
// KADDD: ADD Two 32-bit Masks.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KADDD k k k
|
|
//
|
|
// Construct and append a KADDD instruction to the active function.
|
|
// Operates on the global context.
|
|
func KADDD(k, k1, k2 operand.Op) { ctx.KADDD(k, k1, k2) }
|
|
|
|
// KADDQ: ADD Two 64-bit Masks.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KADDQ k k k
|
|
//
|
|
// Construct and append a KADDQ instruction to the active function.
|
|
func (c *Context) KADDQ(k, k1, k2 operand.Op) {
|
|
c.addinstruction(x86.KADDQ(k, k1, k2))
|
|
}
|
|
|
|
// KADDQ: ADD Two 64-bit Masks.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KADDQ k k k
|
|
//
|
|
// Construct and append a KADDQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func KADDQ(k, k1, k2 operand.Op) { ctx.KADDQ(k, k1, k2) }
|
|
|
|
// KADDW: ADD Two 16-bit Masks.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KADDW k k k
|
|
//
|
|
// Construct and append a KADDW instruction to the active function.
|
|
func (c *Context) KADDW(k, k1, k2 operand.Op) {
|
|
c.addinstruction(x86.KADDW(k, k1, k2))
|
|
}
|
|
|
|
// KADDW: ADD Two 16-bit Masks.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KADDW k k k
|
|
//
|
|
// Construct and append a KADDW instruction to the active function.
|
|
// Operates on the global context.
|
|
func KADDW(k, k1, k2 operand.Op) { ctx.KADDW(k, k1, k2) }
|
|
|
|
// KANDB: Bitwise Logical AND 8-bit Masks.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KANDB k k k
|
|
//
|
|
// Construct and append a KANDB instruction to the active function.
|
|
func (c *Context) KANDB(k, k1, k2 operand.Op) {
|
|
c.addinstruction(x86.KANDB(k, k1, k2))
|
|
}
|
|
|
|
// KANDB: Bitwise Logical AND 8-bit Masks.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KANDB k k k
|
|
//
|
|
// Construct and append a KANDB instruction to the active function.
|
|
// Operates on the global context.
|
|
func KANDB(k, k1, k2 operand.Op) { ctx.KANDB(k, k1, k2) }
|
|
|
|
// KANDD: Bitwise Logical AND 32-bit Masks.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KANDD k k k
|
|
//
|
|
// Construct and append a KANDD instruction to the active function.
|
|
func (c *Context) KANDD(k, k1, k2 operand.Op) {
|
|
c.addinstruction(x86.KANDD(k, k1, k2))
|
|
}
|
|
|
|
// KANDD: Bitwise Logical AND 32-bit Masks.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KANDD k k k
|
|
//
|
|
// Construct and append a KANDD instruction to the active function.
|
|
// Operates on the global context.
|
|
func KANDD(k, k1, k2 operand.Op) { ctx.KANDD(k, k1, k2) }
|
|
|
|
// KANDNB: Bitwise Logical AND NOT 8-bit Masks.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KANDNB k k k
|
|
//
|
|
// Construct and append a KANDNB instruction to the active function.
|
|
func (c *Context) KANDNB(k, k1, k2 operand.Op) {
|
|
c.addinstruction(x86.KANDNB(k, k1, k2))
|
|
}
|
|
|
|
// KANDNB: Bitwise Logical AND NOT 8-bit Masks.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KANDNB k k k
|
|
//
|
|
// Construct and append a KANDNB instruction to the active function.
|
|
// Operates on the global context.
|
|
func KANDNB(k, k1, k2 operand.Op) { ctx.KANDNB(k, k1, k2) }
|
|
|
|
// KANDND: Bitwise Logical AND NOT 32-bit Masks.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KANDND k k k
|
|
//
|
|
// Construct and append a KANDND instruction to the active function.
|
|
func (c *Context) KANDND(k, k1, k2 operand.Op) {
|
|
c.addinstruction(x86.KANDND(k, k1, k2))
|
|
}
|
|
|
|
// KANDND: Bitwise Logical AND NOT 32-bit Masks.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KANDND k k k
|
|
//
|
|
// Construct and append a KANDND instruction to the active function.
|
|
// Operates on the global context.
|
|
func KANDND(k, k1, k2 operand.Op) { ctx.KANDND(k, k1, k2) }
|
|
|
|
// KANDNQ: Bitwise Logical AND NOT 64-bit Masks.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KANDNQ k k k
|
|
//
|
|
// Construct and append a KANDNQ instruction to the active function.
|
|
func (c *Context) KANDNQ(k, k1, k2 operand.Op) {
|
|
c.addinstruction(x86.KANDNQ(k, k1, k2))
|
|
}
|
|
|
|
// KANDNQ: Bitwise Logical AND NOT 64-bit Masks.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KANDNQ k k k
|
|
//
|
|
// Construct and append a KANDNQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func KANDNQ(k, k1, k2 operand.Op) { ctx.KANDNQ(k, k1, k2) }
|
|
|
|
// KANDNW: Bitwise Logical AND NOT 16-bit Masks.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KANDNW k k k
|
|
//
|
|
// Construct and append a KANDNW instruction to the active function.
|
|
func (c *Context) KANDNW(k, k1, k2 operand.Op) {
|
|
c.addinstruction(x86.KANDNW(k, k1, k2))
|
|
}
|
|
|
|
// KANDNW: Bitwise Logical AND NOT 16-bit Masks.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KANDNW k k k
|
|
//
|
|
// Construct and append a KANDNW instruction to the active function.
|
|
// Operates on the global context.
|
|
func KANDNW(k, k1, k2 operand.Op) { ctx.KANDNW(k, k1, k2) }
|
|
|
|
// KANDQ: Bitwise Logical AND 64-bit Masks.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KANDQ k k k
|
|
//
|
|
// Construct and append a KANDQ instruction to the active function.
|
|
func (c *Context) KANDQ(k, k1, k2 operand.Op) {
|
|
c.addinstruction(x86.KANDQ(k, k1, k2))
|
|
}
|
|
|
|
// KANDQ: Bitwise Logical AND 64-bit Masks.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KANDQ k k k
|
|
//
|
|
// Construct and append a KANDQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func KANDQ(k, k1, k2 operand.Op) { ctx.KANDQ(k, k1, k2) }
|
|
|
|
// KANDW: Bitwise Logical AND 16-bit Masks.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KANDW k k k
|
|
//
|
|
// Construct and append a KANDW instruction to the active function.
|
|
func (c *Context) KANDW(k, k1, k2 operand.Op) {
|
|
c.addinstruction(x86.KANDW(k, k1, k2))
|
|
}
|
|
|
|
// KANDW: Bitwise Logical AND 16-bit Masks.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KANDW k k k
|
|
//
|
|
// Construct and append a KANDW instruction to the active function.
|
|
// Operates on the global context.
|
|
func KANDW(k, k1, k2 operand.Op) { ctx.KANDW(k, k1, k2) }
|
|
|
|
// KMOVB: Move 8-bit Mask.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KMOVB k k
|
|
// KMOVB k m8
|
|
// KMOVB k r32
|
|
// KMOVB m8 k
|
|
// KMOVB r32 k
|
|
//
|
|
// Construct and append a KMOVB instruction to the active function.
|
|
func (c *Context) KMOVB(kmr, kmr1 operand.Op) {
|
|
c.addinstruction(x86.KMOVB(kmr, kmr1))
|
|
}
|
|
|
|
// KMOVB: Move 8-bit Mask.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KMOVB k k
|
|
// KMOVB k m8
|
|
// KMOVB k r32
|
|
// KMOVB m8 k
|
|
// KMOVB r32 k
|
|
//
|
|
// Construct and append a KMOVB instruction to the active function.
|
|
// Operates on the global context.
|
|
func KMOVB(kmr, kmr1 operand.Op) { ctx.KMOVB(kmr, kmr1) }
|
|
|
|
// KMOVD: Move 32-bit Mask.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KMOVD k k
|
|
// KMOVD k m32
|
|
// KMOVD k r32
|
|
// KMOVD m32 k
|
|
// KMOVD r32 k
|
|
//
|
|
// Construct and append a KMOVD instruction to the active function.
|
|
func (c *Context) KMOVD(kmr, kmr1 operand.Op) {
|
|
c.addinstruction(x86.KMOVD(kmr, kmr1))
|
|
}
|
|
|
|
// KMOVD: Move 32-bit Mask.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KMOVD k k
|
|
// KMOVD k m32
|
|
// KMOVD k r32
|
|
// KMOVD m32 k
|
|
// KMOVD r32 k
|
|
//
|
|
// Construct and append a KMOVD instruction to the active function.
|
|
// Operates on the global context.
|
|
func KMOVD(kmr, kmr1 operand.Op) { ctx.KMOVD(kmr, kmr1) }
|
|
|
|
// KMOVQ: Move 64-bit Mask.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KMOVQ k k
|
|
// KMOVQ k m64
|
|
// KMOVQ k r64
|
|
// KMOVQ m64 k
|
|
// KMOVQ r64 k
|
|
//
|
|
// Construct and append a KMOVQ instruction to the active function.
|
|
func (c *Context) KMOVQ(kmr, kmr1 operand.Op) {
|
|
c.addinstruction(x86.KMOVQ(kmr, kmr1))
|
|
}
|
|
|
|
// KMOVQ: Move 64-bit Mask.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KMOVQ k k
|
|
// KMOVQ k m64
|
|
// KMOVQ k r64
|
|
// KMOVQ m64 k
|
|
// KMOVQ r64 k
|
|
//
|
|
// Construct and append a KMOVQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func KMOVQ(kmr, kmr1 operand.Op) { ctx.KMOVQ(kmr, kmr1) }
|
|
|
|
// KMOVW: Move 16-bit Mask.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KMOVW k k
|
|
// KMOVW k m16
|
|
// KMOVW k r32
|
|
// KMOVW m16 k
|
|
// KMOVW r32 k
|
|
//
|
|
// Construct and append a KMOVW instruction to the active function.
|
|
func (c *Context) KMOVW(kmr, kmr1 operand.Op) {
|
|
c.addinstruction(x86.KMOVW(kmr, kmr1))
|
|
}
|
|
|
|
// KMOVW: Move 16-bit Mask.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KMOVW k k
|
|
// KMOVW k m16
|
|
// KMOVW k r32
|
|
// KMOVW m16 k
|
|
// KMOVW r32 k
|
|
//
|
|
// Construct and append a KMOVW instruction to the active function.
|
|
// Operates on the global context.
|
|
func KMOVW(kmr, kmr1 operand.Op) { ctx.KMOVW(kmr, kmr1) }
|
|
|
|
// KNOTB: NOT 8-bit Mask Register.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KNOTB k k
|
|
//
|
|
// Construct and append a KNOTB instruction to the active function.
|
|
func (c *Context) KNOTB(k, k1 operand.Op) {
|
|
c.addinstruction(x86.KNOTB(k, k1))
|
|
}
|
|
|
|
// KNOTB: NOT 8-bit Mask Register.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KNOTB k k
|
|
//
|
|
// Construct and append a KNOTB instruction to the active function.
|
|
// Operates on the global context.
|
|
func KNOTB(k, k1 operand.Op) { ctx.KNOTB(k, k1) }
|
|
|
|
// KNOTD: NOT 32-bit Mask Register.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KNOTD k k
|
|
//
|
|
// Construct and append a KNOTD instruction to the active function.
|
|
func (c *Context) KNOTD(k, k1 operand.Op) {
|
|
c.addinstruction(x86.KNOTD(k, k1))
|
|
}
|
|
|
|
// KNOTD: NOT 32-bit Mask Register.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KNOTD k k
|
|
//
|
|
// Construct and append a KNOTD instruction to the active function.
|
|
// Operates on the global context.
|
|
func KNOTD(k, k1 operand.Op) { ctx.KNOTD(k, k1) }
|
|
|
|
// KNOTQ: NOT 64-bit Mask Register.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KNOTQ k k
|
|
//
|
|
// Construct and append a KNOTQ instruction to the active function.
|
|
func (c *Context) KNOTQ(k, k1 operand.Op) {
|
|
c.addinstruction(x86.KNOTQ(k, k1))
|
|
}
|
|
|
|
// KNOTQ: NOT 64-bit Mask Register.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KNOTQ k k
|
|
//
|
|
// Construct and append a KNOTQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func KNOTQ(k, k1 operand.Op) { ctx.KNOTQ(k, k1) }
|
|
|
|
// KNOTW: NOT 16-bit Mask Register.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KNOTW k k
|
|
//
|
|
// Construct and append a KNOTW instruction to the active function.
|
|
func (c *Context) KNOTW(k, k1 operand.Op) {
|
|
c.addinstruction(x86.KNOTW(k, k1))
|
|
}
|
|
|
|
// KNOTW: NOT 16-bit Mask Register.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KNOTW k k
|
|
//
|
|
// Construct and append a KNOTW instruction to the active function.
|
|
// Operates on the global context.
|
|
func KNOTW(k, k1 operand.Op) { ctx.KNOTW(k, k1) }
|
|
|
|
// KORB: Bitwise Logical OR 8-bit Masks.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KORB k k k
|
|
//
|
|
// Construct and append a KORB instruction to the active function.
|
|
func (c *Context) KORB(k, k1, k2 operand.Op) {
|
|
c.addinstruction(x86.KORB(k, k1, k2))
|
|
}
|
|
|
|
// KORB: Bitwise Logical OR 8-bit Masks.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KORB k k k
|
|
//
|
|
// Construct and append a KORB instruction to the active function.
|
|
// Operates on the global context.
|
|
func KORB(k, k1, k2 operand.Op) { ctx.KORB(k, k1, k2) }
|
|
|
|
// KORD: Bitwise Logical OR 32-bit Masks.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KORD k k k
|
|
//
|
|
// Construct and append a KORD instruction to the active function.
|
|
func (c *Context) KORD(k, k1, k2 operand.Op) {
|
|
c.addinstruction(x86.KORD(k, k1, k2))
|
|
}
|
|
|
|
// KORD: Bitwise Logical OR 32-bit Masks.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KORD k k k
|
|
//
|
|
// Construct and append a KORD instruction to the active function.
|
|
// Operates on the global context.
|
|
func KORD(k, k1, k2 operand.Op) { ctx.KORD(k, k1, k2) }
|
|
|
|
// KORQ: Bitwise Logical OR 64-bit Masks.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KORQ k k k
|
|
//
|
|
// Construct and append a KORQ instruction to the active function.
|
|
func (c *Context) KORQ(k, k1, k2 operand.Op) {
|
|
c.addinstruction(x86.KORQ(k, k1, k2))
|
|
}
|
|
|
|
// KORQ: Bitwise Logical OR 64-bit Masks.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KORQ k k k
|
|
//
|
|
// Construct and append a KORQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func KORQ(k, k1, k2 operand.Op) { ctx.KORQ(k, k1, k2) }
|
|
|
|
// KORTESTB: OR 8-bit Masks and Set Flags.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KORTESTB k k
|
|
//
|
|
// Construct and append a KORTESTB instruction to the active function.
|
|
func (c *Context) KORTESTB(k, k1 operand.Op) {
|
|
c.addinstruction(x86.KORTESTB(k, k1))
|
|
}
|
|
|
|
// KORTESTB: OR 8-bit Masks and Set Flags.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KORTESTB k k
|
|
//
|
|
// Construct and append a KORTESTB instruction to the active function.
|
|
// Operates on the global context.
|
|
func KORTESTB(k, k1 operand.Op) { ctx.KORTESTB(k, k1) }
|
|
|
|
// KORTESTD: OR 32-bit Masks and Set Flags.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KORTESTD k k
|
|
//
|
|
// Construct and append a KORTESTD instruction to the active function.
|
|
func (c *Context) KORTESTD(k, k1 operand.Op) {
|
|
c.addinstruction(x86.KORTESTD(k, k1))
|
|
}
|
|
|
|
// KORTESTD: OR 32-bit Masks and Set Flags.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KORTESTD k k
|
|
//
|
|
// Construct and append a KORTESTD instruction to the active function.
|
|
// Operates on the global context.
|
|
func KORTESTD(k, k1 operand.Op) { ctx.KORTESTD(k, k1) }
|
|
|
|
// KORTESTQ: OR 64-bit Masks and Set Flags.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KORTESTQ k k
|
|
//
|
|
// Construct and append a KORTESTQ instruction to the active function.
|
|
func (c *Context) KORTESTQ(k, k1 operand.Op) {
|
|
c.addinstruction(x86.KORTESTQ(k, k1))
|
|
}
|
|
|
|
// KORTESTQ: OR 64-bit Masks and Set Flags.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KORTESTQ k k
|
|
//
|
|
// Construct and append a KORTESTQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func KORTESTQ(k, k1 operand.Op) { ctx.KORTESTQ(k, k1) }
|
|
|
|
// KORTESTW: OR 16-bit Masks and Set Flags.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KORTESTW k k
|
|
//
|
|
// Construct and append a KORTESTW instruction to the active function.
|
|
func (c *Context) KORTESTW(k, k1 operand.Op) {
|
|
c.addinstruction(x86.KORTESTW(k, k1))
|
|
}
|
|
|
|
// KORTESTW: OR 16-bit Masks and Set Flags.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KORTESTW k k
|
|
//
|
|
// Construct and append a KORTESTW instruction to the active function.
|
|
// Operates on the global context.
|
|
func KORTESTW(k, k1 operand.Op) { ctx.KORTESTW(k, k1) }
|
|
|
|
// KORW: Bitwise Logical OR 16-bit Masks.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KORW k k k
|
|
//
|
|
// Construct and append a KORW instruction to the active function.
|
|
func (c *Context) KORW(k, k1, k2 operand.Op) {
|
|
c.addinstruction(x86.KORW(k, k1, k2))
|
|
}
|
|
|
|
// KORW: Bitwise Logical OR 16-bit Masks.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KORW k k k
|
|
//
|
|
// Construct and append a KORW instruction to the active function.
|
|
// Operates on the global context.
|
|
func KORW(k, k1, k2 operand.Op) { ctx.KORW(k, k1, k2) }
|
|
|
|
// KSHIFTLB: Shift Left 8-bit Masks.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KSHIFTLB imm8 k k
|
|
//
|
|
// Construct and append a KSHIFTLB instruction to the active function.
|
|
func (c *Context) KSHIFTLB(i, k, k1 operand.Op) {
|
|
c.addinstruction(x86.KSHIFTLB(i, k, k1))
|
|
}
|
|
|
|
// KSHIFTLB: Shift Left 8-bit Masks.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KSHIFTLB imm8 k k
|
|
//
|
|
// Construct and append a KSHIFTLB instruction to the active function.
|
|
// Operates on the global context.
|
|
func KSHIFTLB(i, k, k1 operand.Op) { ctx.KSHIFTLB(i, k, k1) }
|
|
|
|
// KSHIFTLD: Shift Left 32-bit Masks.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KSHIFTLD imm8 k k
|
|
//
|
|
// Construct and append a KSHIFTLD instruction to the active function.
|
|
func (c *Context) KSHIFTLD(i, k, k1 operand.Op) {
|
|
c.addinstruction(x86.KSHIFTLD(i, k, k1))
|
|
}
|
|
|
|
// KSHIFTLD: Shift Left 32-bit Masks.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KSHIFTLD imm8 k k
|
|
//
|
|
// Construct and append a KSHIFTLD instruction to the active function.
|
|
// Operates on the global context.
|
|
func KSHIFTLD(i, k, k1 operand.Op) { ctx.KSHIFTLD(i, k, k1) }
|
|
|
|
// KSHIFTLQ: Shift Left 64-bit Masks.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KSHIFTLQ imm8 k k
|
|
//
|
|
// Construct and append a KSHIFTLQ instruction to the active function.
|
|
func (c *Context) KSHIFTLQ(i, k, k1 operand.Op) {
|
|
c.addinstruction(x86.KSHIFTLQ(i, k, k1))
|
|
}
|
|
|
|
// KSHIFTLQ: Shift Left 64-bit Masks.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KSHIFTLQ imm8 k k
|
|
//
|
|
// Construct and append a KSHIFTLQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func KSHIFTLQ(i, k, k1 operand.Op) { ctx.KSHIFTLQ(i, k, k1) }
|
|
|
|
// KSHIFTLW: Shift Left 16-bit Masks.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KSHIFTLW imm8 k k
|
|
//
|
|
// Construct and append a KSHIFTLW instruction to the active function.
|
|
func (c *Context) KSHIFTLW(i, k, k1 operand.Op) {
|
|
c.addinstruction(x86.KSHIFTLW(i, k, k1))
|
|
}
|
|
|
|
// KSHIFTLW: Shift Left 16-bit Masks.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KSHIFTLW imm8 k k
|
|
//
|
|
// Construct and append a KSHIFTLW instruction to the active function.
|
|
// Operates on the global context.
|
|
func KSHIFTLW(i, k, k1 operand.Op) { ctx.KSHIFTLW(i, k, k1) }
|
|
|
|
// KSHIFTRB: Shift Right 8-bit Masks.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KSHIFTRB imm8 k k
|
|
//
|
|
// Construct and append a KSHIFTRB instruction to the active function.
|
|
func (c *Context) KSHIFTRB(i, k, k1 operand.Op) {
|
|
c.addinstruction(x86.KSHIFTRB(i, k, k1))
|
|
}
|
|
|
|
// KSHIFTRB: Shift Right 8-bit Masks.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KSHIFTRB imm8 k k
|
|
//
|
|
// Construct and append a KSHIFTRB instruction to the active function.
|
|
// Operates on the global context.
|
|
func KSHIFTRB(i, k, k1 operand.Op) { ctx.KSHIFTRB(i, k, k1) }
|
|
|
|
// KSHIFTRD: Shift Right 32-bit Masks.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KSHIFTRD imm8 k k
|
|
//
|
|
// Construct and append a KSHIFTRD instruction to the active function.
|
|
func (c *Context) KSHIFTRD(i, k, k1 operand.Op) {
|
|
c.addinstruction(x86.KSHIFTRD(i, k, k1))
|
|
}
|
|
|
|
// KSHIFTRD: Shift Right 32-bit Masks.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KSHIFTRD imm8 k k
|
|
//
|
|
// Construct and append a KSHIFTRD instruction to the active function.
|
|
// Operates on the global context.
|
|
func KSHIFTRD(i, k, k1 operand.Op) { ctx.KSHIFTRD(i, k, k1) }
|
|
|
|
// KSHIFTRQ: Shift Right 64-bit Masks.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KSHIFTRQ imm8 k k
|
|
//
|
|
// Construct and append a KSHIFTRQ instruction to the active function.
|
|
func (c *Context) KSHIFTRQ(i, k, k1 operand.Op) {
|
|
c.addinstruction(x86.KSHIFTRQ(i, k, k1))
|
|
}
|
|
|
|
// KSHIFTRQ: Shift Right 64-bit Masks.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KSHIFTRQ imm8 k k
|
|
//
|
|
// Construct and append a KSHIFTRQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func KSHIFTRQ(i, k, k1 operand.Op) { ctx.KSHIFTRQ(i, k, k1) }
|
|
|
|
// KSHIFTRW: Shift Right 16-bit Masks.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KSHIFTRW imm8 k k
|
|
//
|
|
// Construct and append a KSHIFTRW instruction to the active function.
|
|
func (c *Context) KSHIFTRW(i, k, k1 operand.Op) {
|
|
c.addinstruction(x86.KSHIFTRW(i, k, k1))
|
|
}
|
|
|
|
// KSHIFTRW: Shift Right 16-bit Masks.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KSHIFTRW imm8 k k
|
|
//
|
|
// Construct and append a KSHIFTRW instruction to the active function.
|
|
// Operates on the global context.
|
|
func KSHIFTRW(i, k, k1 operand.Op) { ctx.KSHIFTRW(i, k, k1) }
|
|
|
|
// KTESTB: Bit Test 8-bit Masks and Set Flags.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KTESTB k k
|
|
//
|
|
// Construct and append a KTESTB instruction to the active function.
|
|
func (c *Context) KTESTB(k, k1 operand.Op) {
|
|
c.addinstruction(x86.KTESTB(k, k1))
|
|
}
|
|
|
|
// KTESTB: Bit Test 8-bit Masks and Set Flags.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KTESTB k k
|
|
//
|
|
// Construct and append a KTESTB instruction to the active function.
|
|
// Operates on the global context.
|
|
func KTESTB(k, k1 operand.Op) { ctx.KTESTB(k, k1) }
|
|
|
|
// KTESTD: Bit Test 32-bit Masks and Set Flags.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KTESTD k k
|
|
//
|
|
// Construct and append a KTESTD instruction to the active function.
|
|
func (c *Context) KTESTD(k, k1 operand.Op) {
|
|
c.addinstruction(x86.KTESTD(k, k1))
|
|
}
|
|
|
|
// KTESTD: Bit Test 32-bit Masks and Set Flags.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KTESTD k k
|
|
//
|
|
// Construct and append a KTESTD instruction to the active function.
|
|
// Operates on the global context.
|
|
func KTESTD(k, k1 operand.Op) { ctx.KTESTD(k, k1) }
|
|
|
|
// KTESTQ: Bit Test 64-bit Masks and Set Flags.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KTESTQ k k
|
|
//
|
|
// Construct and append a KTESTQ instruction to the active function.
|
|
func (c *Context) KTESTQ(k, k1 operand.Op) {
|
|
c.addinstruction(x86.KTESTQ(k, k1))
|
|
}
|
|
|
|
// KTESTQ: Bit Test 64-bit Masks and Set Flags.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KTESTQ k k
|
|
//
|
|
// Construct and append a KTESTQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func KTESTQ(k, k1 operand.Op) { ctx.KTESTQ(k, k1) }
|
|
|
|
// KTESTW: Bit Test 16-bit Masks and Set Flags.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KTESTW k k
|
|
//
|
|
// Construct and append a KTESTW instruction to the active function.
|
|
func (c *Context) KTESTW(k, k1 operand.Op) {
|
|
c.addinstruction(x86.KTESTW(k, k1))
|
|
}
|
|
|
|
// KTESTW: Bit Test 16-bit Masks and Set Flags.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KTESTW k k
|
|
//
|
|
// Construct and append a KTESTW instruction to the active function.
|
|
// Operates on the global context.
|
|
func KTESTW(k, k1 operand.Op) { ctx.KTESTW(k, k1) }
|
|
|
|
// KUNPCKBW: Unpack and Interleave 8-bit Masks.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KUNPCKBW k k k
|
|
//
|
|
// Construct and append a KUNPCKBW instruction to the active function.
|
|
func (c *Context) KUNPCKBW(k, k1, k2 operand.Op) {
|
|
c.addinstruction(x86.KUNPCKBW(k, k1, k2))
|
|
}
|
|
|
|
// KUNPCKBW: Unpack and Interleave 8-bit Masks.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KUNPCKBW k k k
|
|
//
|
|
// Construct and append a KUNPCKBW instruction to the active function.
|
|
// Operates on the global context.
|
|
func KUNPCKBW(k, k1, k2 operand.Op) { ctx.KUNPCKBW(k, k1, k2) }
|
|
|
|
// KUNPCKDQ: Unpack and Interleave 32-bit Masks.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KUNPCKDQ k k k
|
|
//
|
|
// Construct and append a KUNPCKDQ instruction to the active function.
|
|
func (c *Context) KUNPCKDQ(k, k1, k2 operand.Op) {
|
|
c.addinstruction(x86.KUNPCKDQ(k, k1, k2))
|
|
}
|
|
|
|
// KUNPCKDQ: Unpack and Interleave 32-bit Masks.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KUNPCKDQ k k k
|
|
//
|
|
// Construct and append a KUNPCKDQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func KUNPCKDQ(k, k1, k2 operand.Op) { ctx.KUNPCKDQ(k, k1, k2) }
|
|
|
|
// KUNPCKWD: Unpack and Interleave 16-bit Masks.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KUNPCKWD k k k
|
|
//
|
|
// Construct and append a KUNPCKWD instruction to the active function.
|
|
func (c *Context) KUNPCKWD(k, k1, k2 operand.Op) {
|
|
c.addinstruction(x86.KUNPCKWD(k, k1, k2))
|
|
}
|
|
|
|
// KUNPCKWD: Unpack and Interleave 16-bit Masks.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KUNPCKWD k k k
|
|
//
|
|
// Construct and append a KUNPCKWD instruction to the active function.
|
|
// Operates on the global context.
|
|
func KUNPCKWD(k, k1, k2 operand.Op) { ctx.KUNPCKWD(k, k1, k2) }
|
|
|
|
// KXNORB: Bitwise Logical XNOR 8-bit Masks.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KXNORB k k k
|
|
//
|
|
// Construct and append a KXNORB instruction to the active function.
|
|
func (c *Context) KXNORB(k, k1, k2 operand.Op) {
|
|
c.addinstruction(x86.KXNORB(k, k1, k2))
|
|
}
|
|
|
|
// KXNORB: Bitwise Logical XNOR 8-bit Masks.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KXNORB k k k
|
|
//
|
|
// Construct and append a KXNORB instruction to the active function.
|
|
// Operates on the global context.
|
|
func KXNORB(k, k1, k2 operand.Op) { ctx.KXNORB(k, k1, k2) }
|
|
|
|
// KXNORD: Bitwise Logical XNOR 32-bit Masks.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KXNORD k k k
|
|
//
|
|
// Construct and append a KXNORD instruction to the active function.
|
|
func (c *Context) KXNORD(k, k1, k2 operand.Op) {
|
|
c.addinstruction(x86.KXNORD(k, k1, k2))
|
|
}
|
|
|
|
// KXNORD: Bitwise Logical XNOR 32-bit Masks.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KXNORD k k k
|
|
//
|
|
// Construct and append a KXNORD instruction to the active function.
|
|
// Operates on the global context.
|
|
func KXNORD(k, k1, k2 operand.Op) { ctx.KXNORD(k, k1, k2) }
|
|
|
|
// KXNORQ: Bitwise Logical XNOR 64-bit Masks.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KXNORQ k k k
|
|
//
|
|
// Construct and append a KXNORQ instruction to the active function.
|
|
func (c *Context) KXNORQ(k, k1, k2 operand.Op) {
|
|
c.addinstruction(x86.KXNORQ(k, k1, k2))
|
|
}
|
|
|
|
// KXNORQ: Bitwise Logical XNOR 64-bit Masks.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KXNORQ k k k
|
|
//
|
|
// Construct and append a KXNORQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func KXNORQ(k, k1, k2 operand.Op) { ctx.KXNORQ(k, k1, k2) }
|
|
|
|
// KXNORW: Bitwise Logical XNOR 16-bit Masks.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KXNORW k k k
|
|
//
|
|
// Construct and append a KXNORW instruction to the active function.
|
|
func (c *Context) KXNORW(k, k1, k2 operand.Op) {
|
|
c.addinstruction(x86.KXNORW(k, k1, k2))
|
|
}
|
|
|
|
// KXNORW: Bitwise Logical XNOR 16-bit Masks.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KXNORW k k k
|
|
//
|
|
// Construct and append a KXNORW instruction to the active function.
|
|
// Operates on the global context.
|
|
func KXNORW(k, k1, k2 operand.Op) { ctx.KXNORW(k, k1, k2) }
|
|
|
|
// KXORB: Bitwise Logical XOR 8-bit Masks.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KXORB k k k
|
|
//
|
|
// Construct and append a KXORB instruction to the active function.
|
|
func (c *Context) KXORB(k, k1, k2 operand.Op) {
|
|
c.addinstruction(x86.KXORB(k, k1, k2))
|
|
}
|
|
|
|
// KXORB: Bitwise Logical XOR 8-bit Masks.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KXORB k k k
|
|
//
|
|
// Construct and append a KXORB instruction to the active function.
|
|
// Operates on the global context.
|
|
func KXORB(k, k1, k2 operand.Op) { ctx.KXORB(k, k1, k2) }
|
|
|
|
// KXORD: Bitwise Logical XOR 32-bit Masks.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KXORD k k k
|
|
//
|
|
// Construct and append a KXORD instruction to the active function.
|
|
func (c *Context) KXORD(k, k1, k2 operand.Op) {
|
|
c.addinstruction(x86.KXORD(k, k1, k2))
|
|
}
|
|
|
|
// KXORD: Bitwise Logical XOR 32-bit Masks.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KXORD k k k
|
|
//
|
|
// Construct and append a KXORD instruction to the active function.
|
|
// Operates on the global context.
|
|
func KXORD(k, k1, k2 operand.Op) { ctx.KXORD(k, k1, k2) }
|
|
|
|
// KXORQ: Bitwise Logical XOR 64-bit Masks.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KXORQ k k k
|
|
//
|
|
// Construct and append a KXORQ instruction to the active function.
|
|
func (c *Context) KXORQ(k, k1, k2 operand.Op) {
|
|
c.addinstruction(x86.KXORQ(k, k1, k2))
|
|
}
|
|
|
|
// KXORQ: Bitwise Logical XOR 64-bit Masks.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KXORQ k k k
|
|
//
|
|
// Construct and append a KXORQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func KXORQ(k, k1, k2 operand.Op) { ctx.KXORQ(k, k1, k2) }
|
|
|
|
// KXORW: Bitwise Logical XOR 16-bit Masks.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KXORW k k k
|
|
//
|
|
// Construct and append a KXORW instruction to the active function.
|
|
func (c *Context) KXORW(k, k1, k2 operand.Op) {
|
|
c.addinstruction(x86.KXORW(k, k1, k2))
|
|
}
|
|
|
|
// KXORW: Bitwise Logical XOR 16-bit Masks.
|
|
//
|
|
// Forms:
|
|
//
|
|
// KXORW k k k
|
|
//
|
|
// Construct and append a KXORW instruction to the active function.
|
|
// Operates on the global context.
|
|
func KXORW(k, k1, k2 operand.Op) { ctx.KXORW(k, k1, k2) }
|
|
|
|
// LDDQU: Load Unaligned Integer 128 Bits.
|
|
//
|
|
// Forms:
|
|
//
|
|
// LDDQU m128 xmm
|
|
//
|
|
// Construct and append a LDDQU instruction to the active function.
|
|
func (c *Context) LDDQU(m, x operand.Op) {
|
|
c.addinstruction(x86.LDDQU(m, x))
|
|
}
|
|
|
|
// LDDQU: Load Unaligned Integer 128 Bits.
|
|
//
|
|
// Forms:
|
|
//
|
|
// LDDQU m128 xmm
|
|
//
|
|
// Construct and append a LDDQU instruction to the active function.
|
|
// Operates on the global context.
|
|
func LDDQU(m, x operand.Op) { ctx.LDDQU(m, x) }
|
|
|
|
// LDMXCSR: Load MXCSR Register.
|
|
//
|
|
// Forms:
|
|
//
|
|
// LDMXCSR m32
|
|
//
|
|
// Construct and append a LDMXCSR instruction to the active function.
|
|
func (c *Context) LDMXCSR(m operand.Op) {
|
|
c.addinstruction(x86.LDMXCSR(m))
|
|
}
|
|
|
|
// LDMXCSR: Load MXCSR Register.
|
|
//
|
|
// Forms:
|
|
//
|
|
// LDMXCSR m32
|
|
//
|
|
// Construct and append a LDMXCSR instruction to the active function.
|
|
// Operates on the global context.
|
|
func LDMXCSR(m operand.Op) { ctx.LDMXCSR(m) }
|
|
|
|
// LEAL: Load Effective Address.
|
|
//
|
|
// Forms:
|
|
//
|
|
// LEAL m r32
|
|
//
|
|
// Construct and append a LEAL instruction to the active function.
|
|
func (c *Context) LEAL(m, r operand.Op) {
|
|
c.addinstruction(x86.LEAL(m, r))
|
|
}
|
|
|
|
// LEAL: Load Effective Address.
|
|
//
|
|
// Forms:
|
|
//
|
|
// LEAL m r32
|
|
//
|
|
// Construct and append a LEAL instruction to the active function.
|
|
// Operates on the global context.
|
|
func LEAL(m, r operand.Op) { ctx.LEAL(m, r) }
|
|
|
|
// LEAQ: Load Effective Address.
|
|
//
|
|
// Forms:
|
|
//
|
|
// LEAQ m r64
|
|
//
|
|
// Construct and append a LEAQ instruction to the active function.
|
|
func (c *Context) LEAQ(m, r operand.Op) {
|
|
c.addinstruction(x86.LEAQ(m, r))
|
|
}
|
|
|
|
// LEAQ: Load Effective Address.
|
|
//
|
|
// Forms:
|
|
//
|
|
// LEAQ m r64
|
|
//
|
|
// Construct and append a LEAQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func LEAQ(m, r operand.Op) { ctx.LEAQ(m, r) }
|
|
|
|
// LEAW: Load Effective Address.
|
|
//
|
|
// Forms:
|
|
//
|
|
// LEAW m r16
|
|
//
|
|
// Construct and append a LEAW instruction to the active function.
|
|
func (c *Context) LEAW(m, r operand.Op) {
|
|
c.addinstruction(x86.LEAW(m, r))
|
|
}
|
|
|
|
// LEAW: Load Effective Address.
|
|
//
|
|
// Forms:
|
|
//
|
|
// LEAW m r16
|
|
//
|
|
// Construct and append a LEAW instruction to the active function.
|
|
// Operates on the global context.
|
|
func LEAW(m, r operand.Op) { ctx.LEAW(m, r) }
|
|
|
|
// LFENCE: Load Fence.
|
|
//
|
|
// Forms:
|
|
//
|
|
// LFENCE
|
|
//
|
|
// Construct and append a LFENCE instruction to the active function.
|
|
func (c *Context) LFENCE() {
|
|
c.addinstruction(x86.LFENCE())
|
|
}
|
|
|
|
// LFENCE: Load Fence.
|
|
//
|
|
// Forms:
|
|
//
|
|
// LFENCE
|
|
//
|
|
// Construct and append a LFENCE instruction to the active function.
|
|
// Operates on the global context.
|
|
func LFENCE() { ctx.LFENCE() }
|
|
|
|
// LZCNTL: Count the Number of Leading Zero Bits.
|
|
//
|
|
// Forms:
|
|
//
|
|
// LZCNTL m32 r32
|
|
// LZCNTL r32 r32
|
|
//
|
|
// Construct and append a LZCNTL instruction to the active function.
|
|
func (c *Context) LZCNTL(mr, r operand.Op) {
|
|
c.addinstruction(x86.LZCNTL(mr, r))
|
|
}
|
|
|
|
// LZCNTL: Count the Number of Leading Zero Bits.
|
|
//
|
|
// Forms:
|
|
//
|
|
// LZCNTL m32 r32
|
|
// LZCNTL r32 r32
|
|
//
|
|
// Construct and append a LZCNTL instruction to the active function.
|
|
// Operates on the global context.
|
|
func LZCNTL(mr, r operand.Op) { ctx.LZCNTL(mr, r) }
|
|
|
|
// LZCNTQ: Count the Number of Leading Zero Bits.
|
|
//
|
|
// Forms:
|
|
//
|
|
// LZCNTQ m64 r64
|
|
// LZCNTQ r64 r64
|
|
//
|
|
// Construct and append a LZCNTQ instruction to the active function.
|
|
func (c *Context) LZCNTQ(mr, r operand.Op) {
|
|
c.addinstruction(x86.LZCNTQ(mr, r))
|
|
}
|
|
|
|
// LZCNTQ: Count the Number of Leading Zero Bits.
|
|
//
|
|
// Forms:
|
|
//
|
|
// LZCNTQ m64 r64
|
|
// LZCNTQ r64 r64
|
|
//
|
|
// Construct and append a LZCNTQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func LZCNTQ(mr, r operand.Op) { ctx.LZCNTQ(mr, r) }
|
|
|
|
// LZCNTW: Count the Number of Leading Zero Bits.
|
|
//
|
|
// Forms:
|
|
//
|
|
// LZCNTW m16 r16
|
|
// LZCNTW r16 r16
|
|
//
|
|
// Construct and append a LZCNTW instruction to the active function.
|
|
func (c *Context) LZCNTW(mr, r operand.Op) {
|
|
c.addinstruction(x86.LZCNTW(mr, r))
|
|
}
|
|
|
|
// LZCNTW: Count the Number of Leading Zero Bits.
|
|
//
|
|
// Forms:
|
|
//
|
|
// LZCNTW m16 r16
|
|
// LZCNTW r16 r16
|
|
//
|
|
// Construct and append a LZCNTW instruction to the active function.
|
|
// Operates on the global context.
|
|
func LZCNTW(mr, r operand.Op) { ctx.LZCNTW(mr, r) }
|
|
|
|
// MASKMOVDQU: Store Selected Bytes of Double Quadword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MASKMOVDQU xmm xmm
|
|
//
|
|
// Construct and append a MASKMOVDQU instruction to the active function.
|
|
func (c *Context) MASKMOVDQU(x, x1 operand.Op) {
|
|
c.addinstruction(x86.MASKMOVDQU(x, x1))
|
|
}
|
|
|
|
// MASKMOVDQU: Store Selected Bytes of Double Quadword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MASKMOVDQU xmm xmm
|
|
//
|
|
// Construct and append a MASKMOVDQU instruction to the active function.
|
|
// Operates on the global context.
|
|
func MASKMOVDQU(x, x1 operand.Op) { ctx.MASKMOVDQU(x, x1) }
|
|
|
|
// MASKMOVOU: Store Selected Bytes of Double Quadword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MASKMOVOU xmm xmm
|
|
//
|
|
// Construct and append a MASKMOVOU instruction to the active function.
|
|
func (c *Context) MASKMOVOU(x, x1 operand.Op) {
|
|
c.addinstruction(x86.MASKMOVOU(x, x1))
|
|
}
|
|
|
|
// MASKMOVOU: Store Selected Bytes of Double Quadword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MASKMOVOU xmm xmm
|
|
//
|
|
// Construct and append a MASKMOVOU instruction to the active function.
|
|
// Operates on the global context.
|
|
func MASKMOVOU(x, x1 operand.Op) { ctx.MASKMOVOU(x, x1) }
|
|
|
|
// MAXPD: Return Maximum Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MAXPD m128 xmm
|
|
// MAXPD xmm xmm
|
|
//
|
|
// Construct and append a MAXPD instruction to the active function.
|
|
func (c *Context) MAXPD(mx, x operand.Op) {
|
|
c.addinstruction(x86.MAXPD(mx, x))
|
|
}
|
|
|
|
// MAXPD: Return Maximum Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MAXPD m128 xmm
|
|
// MAXPD xmm xmm
|
|
//
|
|
// Construct and append a MAXPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func MAXPD(mx, x operand.Op) { ctx.MAXPD(mx, x) }
|
|
|
|
// MAXPS: Return Maximum Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MAXPS m128 xmm
|
|
// MAXPS xmm xmm
|
|
//
|
|
// Construct and append a MAXPS instruction to the active function.
|
|
func (c *Context) MAXPS(mx, x operand.Op) {
|
|
c.addinstruction(x86.MAXPS(mx, x))
|
|
}
|
|
|
|
// MAXPS: Return Maximum Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MAXPS m128 xmm
|
|
// MAXPS xmm xmm
|
|
//
|
|
// Construct and append a MAXPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func MAXPS(mx, x operand.Op) { ctx.MAXPS(mx, x) }
|
|
|
|
// MAXSD: Return Maximum Scalar Double-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MAXSD m64 xmm
|
|
// MAXSD xmm xmm
|
|
//
|
|
// Construct and append a MAXSD instruction to the active function.
|
|
func (c *Context) MAXSD(mx, x operand.Op) {
|
|
c.addinstruction(x86.MAXSD(mx, x))
|
|
}
|
|
|
|
// MAXSD: Return Maximum Scalar Double-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MAXSD m64 xmm
|
|
// MAXSD xmm xmm
|
|
//
|
|
// Construct and append a MAXSD instruction to the active function.
|
|
// Operates on the global context.
|
|
func MAXSD(mx, x operand.Op) { ctx.MAXSD(mx, x) }
|
|
|
|
// MAXSS: Return Maximum Scalar Single-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MAXSS m32 xmm
|
|
// MAXSS xmm xmm
|
|
//
|
|
// Construct and append a MAXSS instruction to the active function.
|
|
func (c *Context) MAXSS(mx, x operand.Op) {
|
|
c.addinstruction(x86.MAXSS(mx, x))
|
|
}
|
|
|
|
// MAXSS: Return Maximum Scalar Single-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MAXSS m32 xmm
|
|
// MAXSS xmm xmm
|
|
//
|
|
// Construct and append a MAXSS instruction to the active function.
|
|
// Operates on the global context.
|
|
func MAXSS(mx, x operand.Op) { ctx.MAXSS(mx, x) }
|
|
|
|
// MFENCE: Memory Fence.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MFENCE
|
|
//
|
|
// Construct and append a MFENCE instruction to the active function.
|
|
func (c *Context) MFENCE() {
|
|
c.addinstruction(x86.MFENCE())
|
|
}
|
|
|
|
// MFENCE: Memory Fence.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MFENCE
|
|
//
|
|
// Construct and append a MFENCE instruction to the active function.
|
|
// Operates on the global context.
|
|
func MFENCE() { ctx.MFENCE() }
|
|
|
|
// MINPD: Return Minimum Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MINPD m128 xmm
|
|
// MINPD xmm xmm
|
|
//
|
|
// Construct and append a MINPD instruction to the active function.
|
|
func (c *Context) MINPD(mx, x operand.Op) {
|
|
c.addinstruction(x86.MINPD(mx, x))
|
|
}
|
|
|
|
// MINPD: Return Minimum Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MINPD m128 xmm
|
|
// MINPD xmm xmm
|
|
//
|
|
// Construct and append a MINPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func MINPD(mx, x operand.Op) { ctx.MINPD(mx, x) }
|
|
|
|
// MINPS: Return Minimum Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MINPS m128 xmm
|
|
// MINPS xmm xmm
|
|
//
|
|
// Construct and append a MINPS instruction to the active function.
|
|
func (c *Context) MINPS(mx, x operand.Op) {
|
|
c.addinstruction(x86.MINPS(mx, x))
|
|
}
|
|
|
|
// MINPS: Return Minimum Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MINPS m128 xmm
|
|
// MINPS xmm xmm
|
|
//
|
|
// Construct and append a MINPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func MINPS(mx, x operand.Op) { ctx.MINPS(mx, x) }
|
|
|
|
// MINSD: Return Minimum Scalar Double-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MINSD m64 xmm
|
|
// MINSD xmm xmm
|
|
//
|
|
// Construct and append a MINSD instruction to the active function.
|
|
func (c *Context) MINSD(mx, x operand.Op) {
|
|
c.addinstruction(x86.MINSD(mx, x))
|
|
}
|
|
|
|
// MINSD: Return Minimum Scalar Double-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MINSD m64 xmm
|
|
// MINSD xmm xmm
|
|
//
|
|
// Construct and append a MINSD instruction to the active function.
|
|
// Operates on the global context.
|
|
func MINSD(mx, x operand.Op) { ctx.MINSD(mx, x) }
|
|
|
|
// MINSS: Return Minimum Scalar Single-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MINSS m32 xmm
|
|
// MINSS xmm xmm
|
|
//
|
|
// Construct and append a MINSS instruction to the active function.
|
|
func (c *Context) MINSS(mx, x operand.Op) {
|
|
c.addinstruction(x86.MINSS(mx, x))
|
|
}
|
|
|
|
// MINSS: Return Minimum Scalar Single-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MINSS m32 xmm
|
|
// MINSS xmm xmm
|
|
//
|
|
// Construct and append a MINSS instruction to the active function.
|
|
// Operates on the global context.
|
|
func MINSS(mx, x operand.Op) { ctx.MINSS(mx, x) }
|
|
|
|
// MONITOR: Monitor a Linear Address Range.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MONITOR
|
|
//
|
|
// Construct and append a MONITOR instruction to the active function.
|
|
func (c *Context) MONITOR() {
|
|
c.addinstruction(x86.MONITOR())
|
|
}
|
|
|
|
// MONITOR: Monitor a Linear Address Range.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MONITOR
|
|
//
|
|
// Construct and append a MONITOR instruction to the active function.
|
|
// Operates on the global context.
|
|
func MONITOR() { ctx.MONITOR() }
|
|
|
|
// MOVAPD: Move Aligned Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVAPD m128 xmm
|
|
// MOVAPD xmm m128
|
|
// MOVAPD xmm xmm
|
|
//
|
|
// Construct and append a MOVAPD instruction to the active function.
|
|
func (c *Context) MOVAPD(mx, mx1 operand.Op) {
|
|
c.addinstruction(x86.MOVAPD(mx, mx1))
|
|
}
|
|
|
|
// MOVAPD: Move Aligned Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVAPD m128 xmm
|
|
// MOVAPD xmm m128
|
|
// MOVAPD xmm xmm
|
|
//
|
|
// Construct and append a MOVAPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVAPD(mx, mx1 operand.Op) { ctx.MOVAPD(mx, mx1) }
|
|
|
|
// MOVAPS: Move Aligned Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVAPS m128 xmm
|
|
// MOVAPS xmm m128
|
|
// MOVAPS xmm xmm
|
|
//
|
|
// Construct and append a MOVAPS instruction to the active function.
|
|
func (c *Context) MOVAPS(mx, mx1 operand.Op) {
|
|
c.addinstruction(x86.MOVAPS(mx, mx1))
|
|
}
|
|
|
|
// MOVAPS: Move Aligned Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVAPS m128 xmm
|
|
// MOVAPS xmm m128
|
|
// MOVAPS xmm xmm
|
|
//
|
|
// Construct and append a MOVAPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVAPS(mx, mx1 operand.Op) { ctx.MOVAPS(mx, mx1) }
|
|
|
|
// MOVB: Move.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVB imm8 m8
|
|
// MOVB imm8 r8
|
|
// MOVB m8 r8
|
|
// MOVB r8 m8
|
|
// MOVB r8 r8
|
|
//
|
|
// Construct and append a MOVB instruction to the active function.
|
|
func (c *Context) MOVB(imr, mr operand.Op) {
|
|
c.addinstruction(x86.MOVB(imr, mr))
|
|
}
|
|
|
|
// MOVB: Move.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVB imm8 m8
|
|
// MOVB imm8 r8
|
|
// MOVB m8 r8
|
|
// MOVB r8 m8
|
|
// MOVB r8 r8
|
|
//
|
|
// Construct and append a MOVB instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVB(imr, mr operand.Op) { ctx.MOVB(imr, mr) }
|
|
|
|
// MOVBELL: Move Data After Swapping Bytes.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVBELL m32 r32
|
|
// MOVBELL r32 m32
|
|
//
|
|
// Construct and append a MOVBELL instruction to the active function.
|
|
func (c *Context) MOVBELL(mr, mr1 operand.Op) {
|
|
c.addinstruction(x86.MOVBELL(mr, mr1))
|
|
}
|
|
|
|
// MOVBELL: Move Data After Swapping Bytes.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVBELL m32 r32
|
|
// MOVBELL r32 m32
|
|
//
|
|
// Construct and append a MOVBELL instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVBELL(mr, mr1 operand.Op) { ctx.MOVBELL(mr, mr1) }
|
|
|
|
// MOVBEQQ: Move Data After Swapping Bytes.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVBEQQ m64 r64
|
|
// MOVBEQQ r64 m64
|
|
//
|
|
// Construct and append a MOVBEQQ instruction to the active function.
|
|
func (c *Context) MOVBEQQ(mr, mr1 operand.Op) {
|
|
c.addinstruction(x86.MOVBEQQ(mr, mr1))
|
|
}
|
|
|
|
// MOVBEQQ: Move Data After Swapping Bytes.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVBEQQ m64 r64
|
|
// MOVBEQQ r64 m64
|
|
//
|
|
// Construct and append a MOVBEQQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVBEQQ(mr, mr1 operand.Op) { ctx.MOVBEQQ(mr, mr1) }
|
|
|
|
// MOVBEWW: Move Data After Swapping Bytes.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVBEWW m16 r16
|
|
// MOVBEWW r16 m16
|
|
//
|
|
// Construct and append a MOVBEWW instruction to the active function.
|
|
func (c *Context) MOVBEWW(mr, mr1 operand.Op) {
|
|
c.addinstruction(x86.MOVBEWW(mr, mr1))
|
|
}
|
|
|
|
// MOVBEWW: Move Data After Swapping Bytes.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVBEWW m16 r16
|
|
// MOVBEWW r16 m16
|
|
//
|
|
// Construct and append a MOVBEWW instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVBEWW(mr, mr1 operand.Op) { ctx.MOVBEWW(mr, mr1) }
|
|
|
|
// MOVBLSX: Move with Sign-Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVBLSX m8 r32
|
|
// MOVBLSX r8 r32
|
|
//
|
|
// Construct and append a MOVBLSX instruction to the active function.
|
|
func (c *Context) MOVBLSX(mr, r operand.Op) {
|
|
c.addinstruction(x86.MOVBLSX(mr, r))
|
|
}
|
|
|
|
// MOVBLSX: Move with Sign-Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVBLSX m8 r32
|
|
// MOVBLSX r8 r32
|
|
//
|
|
// Construct and append a MOVBLSX instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVBLSX(mr, r operand.Op) { ctx.MOVBLSX(mr, r) }
|
|
|
|
// MOVBLZX: Move with Zero-Extend.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVBLZX m8 r32
|
|
// MOVBLZX r8 r32
|
|
//
|
|
// Construct and append a MOVBLZX instruction to the active function.
|
|
func (c *Context) MOVBLZX(mr, r operand.Op) {
|
|
c.addinstruction(x86.MOVBLZX(mr, r))
|
|
}
|
|
|
|
// MOVBLZX: Move with Zero-Extend.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVBLZX m8 r32
|
|
// MOVBLZX r8 r32
|
|
//
|
|
// Construct and append a MOVBLZX instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVBLZX(mr, r operand.Op) { ctx.MOVBLZX(mr, r) }
|
|
|
|
// MOVBQSX: Move with Sign-Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVBQSX m8 r64
|
|
// MOVBQSX r8 r64
|
|
//
|
|
// Construct and append a MOVBQSX instruction to the active function.
|
|
func (c *Context) MOVBQSX(mr, r operand.Op) {
|
|
c.addinstruction(x86.MOVBQSX(mr, r))
|
|
}
|
|
|
|
// MOVBQSX: Move with Sign-Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVBQSX m8 r64
|
|
// MOVBQSX r8 r64
|
|
//
|
|
// Construct and append a MOVBQSX instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVBQSX(mr, r operand.Op) { ctx.MOVBQSX(mr, r) }
|
|
|
|
// MOVBQZX: Move with Zero-Extend.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVBQZX m8 r64
|
|
// MOVBQZX r8 r64
|
|
//
|
|
// Construct and append a MOVBQZX instruction to the active function.
|
|
func (c *Context) MOVBQZX(mr, r operand.Op) {
|
|
c.addinstruction(x86.MOVBQZX(mr, r))
|
|
}
|
|
|
|
// MOVBQZX: Move with Zero-Extend.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVBQZX m8 r64
|
|
// MOVBQZX r8 r64
|
|
//
|
|
// Construct and append a MOVBQZX instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVBQZX(mr, r operand.Op) { ctx.MOVBQZX(mr, r) }
|
|
|
|
// MOVBWSX: Move with Sign-Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVBWSX m8 r16
|
|
// MOVBWSX r8 r16
|
|
//
|
|
// Construct and append a MOVBWSX instruction to the active function.
|
|
func (c *Context) MOVBWSX(mr, r operand.Op) {
|
|
c.addinstruction(x86.MOVBWSX(mr, r))
|
|
}
|
|
|
|
// MOVBWSX: Move with Sign-Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVBWSX m8 r16
|
|
// MOVBWSX r8 r16
|
|
//
|
|
// Construct and append a MOVBWSX instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVBWSX(mr, r operand.Op) { ctx.MOVBWSX(mr, r) }
|
|
|
|
// MOVBWZX: Move with Zero-Extend.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVBWZX m8 r16
|
|
// MOVBWZX r8 r16
|
|
//
|
|
// Construct and append a MOVBWZX instruction to the active function.
|
|
func (c *Context) MOVBWZX(mr, r operand.Op) {
|
|
c.addinstruction(x86.MOVBWZX(mr, r))
|
|
}
|
|
|
|
// MOVBWZX: Move with Zero-Extend.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVBWZX m8 r16
|
|
// MOVBWZX r8 r16
|
|
//
|
|
// Construct and append a MOVBWZX instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVBWZX(mr, r operand.Op) { ctx.MOVBWZX(mr, r) }
|
|
|
|
// MOVD: Move.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVD m32 xmm
|
|
// MOVD m64 xmm
|
|
// MOVD r32 xmm
|
|
// MOVD r64 xmm
|
|
// MOVD xmm m32
|
|
// MOVD xmm m64
|
|
// MOVD xmm r32
|
|
// MOVD xmm r64
|
|
// MOVD xmm xmm
|
|
// MOVD imm32 m64
|
|
// MOVD imm32 r64
|
|
// MOVD imm64 r64
|
|
// MOVD m64 r64
|
|
// MOVD r64 m64
|
|
// MOVD r64 r64
|
|
//
|
|
// Construct and append a MOVD instruction to the active function.
|
|
func (c *Context) MOVD(imrx, mrx operand.Op) {
|
|
c.addinstruction(x86.MOVD(imrx, mrx))
|
|
}
|
|
|
|
// MOVD: Move.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVD m32 xmm
|
|
// MOVD m64 xmm
|
|
// MOVD r32 xmm
|
|
// MOVD r64 xmm
|
|
// MOVD xmm m32
|
|
// MOVD xmm m64
|
|
// MOVD xmm r32
|
|
// MOVD xmm r64
|
|
// MOVD xmm xmm
|
|
// MOVD imm32 m64
|
|
// MOVD imm32 r64
|
|
// MOVD imm64 r64
|
|
// MOVD m64 r64
|
|
// MOVD r64 m64
|
|
// MOVD r64 r64
|
|
//
|
|
// Construct and append a MOVD instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVD(imrx, mrx operand.Op) { ctx.MOVD(imrx, mrx) }
|
|
|
|
// MOVDDUP: Move One Double-FP and Duplicate.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVDDUP m64 xmm
|
|
// MOVDDUP xmm xmm
|
|
//
|
|
// Construct and append a MOVDDUP instruction to the active function.
|
|
func (c *Context) MOVDDUP(mx, x operand.Op) {
|
|
c.addinstruction(x86.MOVDDUP(mx, x))
|
|
}
|
|
|
|
// MOVDDUP: Move One Double-FP and Duplicate.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVDDUP m64 xmm
|
|
// MOVDDUP xmm xmm
|
|
//
|
|
// Construct and append a MOVDDUP instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVDDUP(mx, x operand.Op) { ctx.MOVDDUP(mx, x) }
|
|
|
|
// MOVDQ2Q: Move.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVDQ2Q m32 xmm
|
|
// MOVDQ2Q m64 xmm
|
|
// MOVDQ2Q r32 xmm
|
|
// MOVDQ2Q r64 xmm
|
|
// MOVDQ2Q xmm m32
|
|
// MOVDQ2Q xmm m64
|
|
// MOVDQ2Q xmm r32
|
|
// MOVDQ2Q xmm r64
|
|
// MOVDQ2Q xmm xmm
|
|
// MOVDQ2Q imm32 m64
|
|
// MOVDQ2Q imm32 r64
|
|
// MOVDQ2Q imm64 r64
|
|
// MOVDQ2Q m64 r64
|
|
// MOVDQ2Q r64 m64
|
|
// MOVDQ2Q r64 r64
|
|
//
|
|
// Construct and append a MOVDQ2Q instruction to the active function.
|
|
func (c *Context) MOVDQ2Q(imrx, mrx operand.Op) {
|
|
c.addinstruction(x86.MOVDQ2Q(imrx, mrx))
|
|
}
|
|
|
|
// MOVDQ2Q: Move.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVDQ2Q m32 xmm
|
|
// MOVDQ2Q m64 xmm
|
|
// MOVDQ2Q r32 xmm
|
|
// MOVDQ2Q r64 xmm
|
|
// MOVDQ2Q xmm m32
|
|
// MOVDQ2Q xmm m64
|
|
// MOVDQ2Q xmm r32
|
|
// MOVDQ2Q xmm r64
|
|
// MOVDQ2Q xmm xmm
|
|
// MOVDQ2Q imm32 m64
|
|
// MOVDQ2Q imm32 r64
|
|
// MOVDQ2Q imm64 r64
|
|
// MOVDQ2Q m64 r64
|
|
// MOVDQ2Q r64 m64
|
|
// MOVDQ2Q r64 r64
|
|
//
|
|
// Construct and append a MOVDQ2Q instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVDQ2Q(imrx, mrx operand.Op) { ctx.MOVDQ2Q(imrx, mrx) }
|
|
|
|
// MOVHLPS: Move Packed Single-Precision Floating-Point Values High to Low.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVHLPS xmm xmm
|
|
//
|
|
// Construct and append a MOVHLPS instruction to the active function.
|
|
func (c *Context) MOVHLPS(x, x1 operand.Op) {
|
|
c.addinstruction(x86.MOVHLPS(x, x1))
|
|
}
|
|
|
|
// MOVHLPS: Move Packed Single-Precision Floating-Point Values High to Low.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVHLPS xmm xmm
|
|
//
|
|
// Construct and append a MOVHLPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVHLPS(x, x1 operand.Op) { ctx.MOVHLPS(x, x1) }
|
|
|
|
// MOVHPD: Move High Packed Double-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVHPD m64 xmm
|
|
// MOVHPD xmm m64
|
|
//
|
|
// Construct and append a MOVHPD instruction to the active function.
|
|
func (c *Context) MOVHPD(mx, mx1 operand.Op) {
|
|
c.addinstruction(x86.MOVHPD(mx, mx1))
|
|
}
|
|
|
|
// MOVHPD: Move High Packed Double-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVHPD m64 xmm
|
|
// MOVHPD xmm m64
|
|
//
|
|
// Construct and append a MOVHPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVHPD(mx, mx1 operand.Op) { ctx.MOVHPD(mx, mx1) }
|
|
|
|
// MOVHPS: Move High Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVHPS m64 xmm
|
|
// MOVHPS xmm m64
|
|
//
|
|
// Construct and append a MOVHPS instruction to the active function.
|
|
func (c *Context) MOVHPS(mx, mx1 operand.Op) {
|
|
c.addinstruction(x86.MOVHPS(mx, mx1))
|
|
}
|
|
|
|
// MOVHPS: Move High Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVHPS m64 xmm
|
|
// MOVHPS xmm m64
|
|
//
|
|
// Construct and append a MOVHPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVHPS(mx, mx1 operand.Op) { ctx.MOVHPS(mx, mx1) }
|
|
|
|
// MOVL: Move.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVL imm32 m32
|
|
// MOVL imm32 r32
|
|
// MOVL m32 r32
|
|
// MOVL r32 m32
|
|
// MOVL r32 r32
|
|
//
|
|
// Construct and append a MOVL instruction to the active function.
|
|
func (c *Context) MOVL(imr, mr operand.Op) {
|
|
c.addinstruction(x86.MOVL(imr, mr))
|
|
}
|
|
|
|
// MOVL: Move.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVL imm32 m32
|
|
// MOVL imm32 r32
|
|
// MOVL m32 r32
|
|
// MOVL r32 m32
|
|
// MOVL r32 r32
|
|
//
|
|
// Construct and append a MOVL instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVL(imr, mr operand.Op) { ctx.MOVL(imr, mr) }
|
|
|
|
// MOVLHPS: Move Packed Single-Precision Floating-Point Values Low to High.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVLHPS xmm xmm
|
|
//
|
|
// Construct and append a MOVLHPS instruction to the active function.
|
|
func (c *Context) MOVLHPS(x, x1 operand.Op) {
|
|
c.addinstruction(x86.MOVLHPS(x, x1))
|
|
}
|
|
|
|
// MOVLHPS: Move Packed Single-Precision Floating-Point Values Low to High.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVLHPS xmm xmm
|
|
//
|
|
// Construct and append a MOVLHPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVLHPS(x, x1 operand.Op) { ctx.MOVLHPS(x, x1) }
|
|
|
|
// MOVLPD: Move Low Packed Double-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVLPD m64 xmm
|
|
// MOVLPD xmm m64
|
|
//
|
|
// Construct and append a MOVLPD instruction to the active function.
|
|
func (c *Context) MOVLPD(mx, mx1 operand.Op) {
|
|
c.addinstruction(x86.MOVLPD(mx, mx1))
|
|
}
|
|
|
|
// MOVLPD: Move Low Packed Double-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVLPD m64 xmm
|
|
// MOVLPD xmm m64
|
|
//
|
|
// Construct and append a MOVLPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVLPD(mx, mx1 operand.Op) { ctx.MOVLPD(mx, mx1) }
|
|
|
|
// MOVLPS: Move Low Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVLPS m64 xmm
|
|
// MOVLPS xmm m64
|
|
//
|
|
// Construct and append a MOVLPS instruction to the active function.
|
|
func (c *Context) MOVLPS(mx, mx1 operand.Op) {
|
|
c.addinstruction(x86.MOVLPS(mx, mx1))
|
|
}
|
|
|
|
// MOVLPS: Move Low Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVLPS m64 xmm
|
|
// MOVLPS xmm m64
|
|
//
|
|
// Construct and append a MOVLPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVLPS(mx, mx1 operand.Op) { ctx.MOVLPS(mx, mx1) }
|
|
|
|
// MOVLQSX: Move Doubleword to Quadword with Sign-Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVLQSX m32 r64
|
|
// MOVLQSX r32 r64
|
|
//
|
|
// Construct and append a MOVLQSX instruction to the active function.
|
|
func (c *Context) MOVLQSX(mr, r operand.Op) {
|
|
c.addinstruction(x86.MOVLQSX(mr, r))
|
|
}
|
|
|
|
// MOVLQSX: Move Doubleword to Quadword with Sign-Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVLQSX m32 r64
|
|
// MOVLQSX r32 r64
|
|
//
|
|
// Construct and append a MOVLQSX instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVLQSX(mr, r operand.Op) { ctx.MOVLQSX(mr, r) }
|
|
|
|
// MOVLQZX: Move with Zero-Extend.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVLQZX m32 r64
|
|
//
|
|
// Construct and append a MOVLQZX instruction to the active function.
|
|
func (c *Context) MOVLQZX(m, r operand.Op) {
|
|
c.addinstruction(x86.MOVLQZX(m, r))
|
|
}
|
|
|
|
// MOVLQZX: Move with Zero-Extend.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVLQZX m32 r64
|
|
//
|
|
// Construct and append a MOVLQZX instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVLQZX(m, r operand.Op) { ctx.MOVLQZX(m, r) }
|
|
|
|
// MOVMSKPD: Extract Packed Double-Precision Floating-Point Sign Mask.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVMSKPD xmm r32
|
|
//
|
|
// Construct and append a MOVMSKPD instruction to the active function.
|
|
func (c *Context) MOVMSKPD(x, r operand.Op) {
|
|
c.addinstruction(x86.MOVMSKPD(x, r))
|
|
}
|
|
|
|
// MOVMSKPD: Extract Packed Double-Precision Floating-Point Sign Mask.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVMSKPD xmm r32
|
|
//
|
|
// Construct and append a MOVMSKPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVMSKPD(x, r operand.Op) { ctx.MOVMSKPD(x, r) }
|
|
|
|
// MOVMSKPS: Extract Packed Single-Precision Floating-Point Sign Mask.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVMSKPS xmm r32
|
|
//
|
|
// Construct and append a MOVMSKPS instruction to the active function.
|
|
func (c *Context) MOVMSKPS(x, r operand.Op) {
|
|
c.addinstruction(x86.MOVMSKPS(x, r))
|
|
}
|
|
|
|
// MOVMSKPS: Extract Packed Single-Precision Floating-Point Sign Mask.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVMSKPS xmm r32
|
|
//
|
|
// Construct and append a MOVMSKPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVMSKPS(x, r operand.Op) { ctx.MOVMSKPS(x, r) }
|
|
|
|
// MOVNTDQ: Store Double Quadword Using Non-Temporal Hint.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVNTDQ xmm m128
|
|
//
|
|
// Construct and append a MOVNTDQ instruction to the active function.
|
|
func (c *Context) MOVNTDQ(x, m operand.Op) {
|
|
c.addinstruction(x86.MOVNTDQ(x, m))
|
|
}
|
|
|
|
// MOVNTDQ: Store Double Quadword Using Non-Temporal Hint.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVNTDQ xmm m128
|
|
//
|
|
// Construct and append a MOVNTDQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVNTDQ(x, m operand.Op) { ctx.MOVNTDQ(x, m) }
|
|
|
|
// MOVNTDQA: Load Double Quadword Non-Temporal Aligned Hint.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVNTDQA m128 xmm
|
|
//
|
|
// Construct and append a MOVNTDQA instruction to the active function.
|
|
func (c *Context) MOVNTDQA(m, x operand.Op) {
|
|
c.addinstruction(x86.MOVNTDQA(m, x))
|
|
}
|
|
|
|
// MOVNTDQA: Load Double Quadword Non-Temporal Aligned Hint.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVNTDQA m128 xmm
|
|
//
|
|
// Construct and append a MOVNTDQA instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVNTDQA(m, x operand.Op) { ctx.MOVNTDQA(m, x) }
|
|
|
|
// MOVNTIL: Store Doubleword Using Non-Temporal Hint.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVNTIL r32 m32
|
|
//
|
|
// Construct and append a MOVNTIL instruction to the active function.
|
|
func (c *Context) MOVNTIL(r, m operand.Op) {
|
|
c.addinstruction(x86.MOVNTIL(r, m))
|
|
}
|
|
|
|
// MOVNTIL: Store Doubleword Using Non-Temporal Hint.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVNTIL r32 m32
|
|
//
|
|
// Construct and append a MOVNTIL instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVNTIL(r, m operand.Op) { ctx.MOVNTIL(r, m) }
|
|
|
|
// MOVNTIQ: Store Doubleword Using Non-Temporal Hint.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVNTIQ r64 m64
|
|
//
|
|
// Construct and append a MOVNTIQ instruction to the active function.
|
|
func (c *Context) MOVNTIQ(r, m operand.Op) {
|
|
c.addinstruction(x86.MOVNTIQ(r, m))
|
|
}
|
|
|
|
// MOVNTIQ: Store Doubleword Using Non-Temporal Hint.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVNTIQ r64 m64
|
|
//
|
|
// Construct and append a MOVNTIQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVNTIQ(r, m operand.Op) { ctx.MOVNTIQ(r, m) }
|
|
|
|
// MOVNTO: Store Double Quadword Using Non-Temporal Hint.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVNTO xmm m128
|
|
//
|
|
// Construct and append a MOVNTO instruction to the active function.
|
|
func (c *Context) MOVNTO(x, m operand.Op) {
|
|
c.addinstruction(x86.MOVNTO(x, m))
|
|
}
|
|
|
|
// MOVNTO: Store Double Quadword Using Non-Temporal Hint.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVNTO xmm m128
|
|
//
|
|
// Construct and append a MOVNTO instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVNTO(x, m operand.Op) { ctx.MOVNTO(x, m) }
|
|
|
|
// MOVNTPD: Store Packed Double-Precision Floating-Point Values Using Non-Temporal Hint.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVNTPD xmm m128
|
|
//
|
|
// Construct and append a MOVNTPD instruction to the active function.
|
|
func (c *Context) MOVNTPD(x, m operand.Op) {
|
|
c.addinstruction(x86.MOVNTPD(x, m))
|
|
}
|
|
|
|
// MOVNTPD: Store Packed Double-Precision Floating-Point Values Using Non-Temporal Hint.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVNTPD xmm m128
|
|
//
|
|
// Construct and append a MOVNTPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVNTPD(x, m operand.Op) { ctx.MOVNTPD(x, m) }
|
|
|
|
// MOVNTPS: Store Packed Single-Precision Floating-Point Values Using Non-Temporal Hint.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVNTPS xmm m128
|
|
//
|
|
// Construct and append a MOVNTPS instruction to the active function.
|
|
func (c *Context) MOVNTPS(x, m operand.Op) {
|
|
c.addinstruction(x86.MOVNTPS(x, m))
|
|
}
|
|
|
|
// MOVNTPS: Store Packed Single-Precision Floating-Point Values Using Non-Temporal Hint.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVNTPS xmm m128
|
|
//
|
|
// Construct and append a MOVNTPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVNTPS(x, m operand.Op) { ctx.MOVNTPS(x, m) }
|
|
|
|
// MOVO: Move Aligned Double Quadword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVO m128 xmm
|
|
// MOVO xmm m128
|
|
// MOVO xmm xmm
|
|
//
|
|
// Construct and append a MOVO instruction to the active function.
|
|
func (c *Context) MOVO(mx, mx1 operand.Op) {
|
|
c.addinstruction(x86.MOVO(mx, mx1))
|
|
}
|
|
|
|
// MOVO: Move Aligned Double Quadword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVO m128 xmm
|
|
// MOVO xmm m128
|
|
// MOVO xmm xmm
|
|
//
|
|
// Construct and append a MOVO instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVO(mx, mx1 operand.Op) { ctx.MOVO(mx, mx1) }
|
|
|
|
// MOVOA: Move Aligned Double Quadword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVOA m128 xmm
|
|
// MOVOA xmm m128
|
|
// MOVOA xmm xmm
|
|
//
|
|
// Construct and append a MOVOA instruction to the active function.
|
|
func (c *Context) MOVOA(mx, mx1 operand.Op) {
|
|
c.addinstruction(x86.MOVOA(mx, mx1))
|
|
}
|
|
|
|
// MOVOA: Move Aligned Double Quadword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVOA m128 xmm
|
|
// MOVOA xmm m128
|
|
// MOVOA xmm xmm
|
|
//
|
|
// Construct and append a MOVOA instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVOA(mx, mx1 operand.Op) { ctx.MOVOA(mx, mx1) }
|
|
|
|
// MOVOU: Move Unaligned Double Quadword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVOU m128 xmm
|
|
// MOVOU xmm m128
|
|
// MOVOU xmm xmm
|
|
//
|
|
// Construct and append a MOVOU instruction to the active function.
|
|
func (c *Context) MOVOU(mx, mx1 operand.Op) {
|
|
c.addinstruction(x86.MOVOU(mx, mx1))
|
|
}
|
|
|
|
// MOVOU: Move Unaligned Double Quadword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVOU m128 xmm
|
|
// MOVOU xmm m128
|
|
// MOVOU xmm xmm
|
|
//
|
|
// Construct and append a MOVOU instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVOU(mx, mx1 operand.Op) { ctx.MOVOU(mx, mx1) }
|
|
|
|
// MOVQ: Move.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVQ m32 xmm
|
|
// MOVQ m64 xmm
|
|
// MOVQ r32 xmm
|
|
// MOVQ r64 xmm
|
|
// MOVQ xmm m32
|
|
// MOVQ xmm m64
|
|
// MOVQ xmm r32
|
|
// MOVQ xmm r64
|
|
// MOVQ xmm xmm
|
|
// MOVQ imm32 m64
|
|
// MOVQ imm32 r64
|
|
// MOVQ imm64 r64
|
|
// MOVQ m64 r64
|
|
// MOVQ r64 m64
|
|
// MOVQ r64 r64
|
|
//
|
|
// Construct and append a MOVQ instruction to the active function.
|
|
func (c *Context) MOVQ(imrx, mrx operand.Op) {
|
|
c.addinstruction(x86.MOVQ(imrx, mrx))
|
|
}
|
|
|
|
// MOVQ: Move.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVQ m32 xmm
|
|
// MOVQ m64 xmm
|
|
// MOVQ r32 xmm
|
|
// MOVQ r64 xmm
|
|
// MOVQ xmm m32
|
|
// MOVQ xmm m64
|
|
// MOVQ xmm r32
|
|
// MOVQ xmm r64
|
|
// MOVQ xmm xmm
|
|
// MOVQ imm32 m64
|
|
// MOVQ imm32 r64
|
|
// MOVQ imm64 r64
|
|
// MOVQ m64 r64
|
|
// MOVQ r64 m64
|
|
// MOVQ r64 r64
|
|
//
|
|
// Construct and append a MOVQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVQ(imrx, mrx operand.Op) { ctx.MOVQ(imrx, mrx) }
|
|
|
|
// MOVSD: Move Scalar Double-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVSD m64 xmm
|
|
// MOVSD xmm m64
|
|
// MOVSD xmm xmm
|
|
//
|
|
// Construct and append a MOVSD instruction to the active function.
|
|
func (c *Context) MOVSD(mx, mx1 operand.Op) {
|
|
c.addinstruction(x86.MOVSD(mx, mx1))
|
|
}
|
|
|
|
// MOVSD: Move Scalar Double-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVSD m64 xmm
|
|
// MOVSD xmm m64
|
|
// MOVSD xmm xmm
|
|
//
|
|
// Construct and append a MOVSD instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVSD(mx, mx1 operand.Op) { ctx.MOVSD(mx, mx1) }
|
|
|
|
// MOVSHDUP: Move Packed Single-FP High and Duplicate.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVSHDUP m128 xmm
|
|
// MOVSHDUP xmm xmm
|
|
//
|
|
// Construct and append a MOVSHDUP instruction to the active function.
|
|
func (c *Context) MOVSHDUP(mx, x operand.Op) {
|
|
c.addinstruction(x86.MOVSHDUP(mx, x))
|
|
}
|
|
|
|
// MOVSHDUP: Move Packed Single-FP High and Duplicate.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVSHDUP m128 xmm
|
|
// MOVSHDUP xmm xmm
|
|
//
|
|
// Construct and append a MOVSHDUP instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVSHDUP(mx, x operand.Op) { ctx.MOVSHDUP(mx, x) }
|
|
|
|
// MOVSLDUP: Move Packed Single-FP Low and Duplicate.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVSLDUP m128 xmm
|
|
// MOVSLDUP xmm xmm
|
|
//
|
|
// Construct and append a MOVSLDUP instruction to the active function.
|
|
func (c *Context) MOVSLDUP(mx, x operand.Op) {
|
|
c.addinstruction(x86.MOVSLDUP(mx, x))
|
|
}
|
|
|
|
// MOVSLDUP: Move Packed Single-FP Low and Duplicate.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVSLDUP m128 xmm
|
|
// MOVSLDUP xmm xmm
|
|
//
|
|
// Construct and append a MOVSLDUP instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVSLDUP(mx, x operand.Op) { ctx.MOVSLDUP(mx, x) }
|
|
|
|
// MOVSS: Move Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVSS m32 xmm
|
|
// MOVSS xmm m32
|
|
// MOVSS xmm xmm
|
|
//
|
|
// Construct and append a MOVSS instruction to the active function.
|
|
func (c *Context) MOVSS(mx, mx1 operand.Op) {
|
|
c.addinstruction(x86.MOVSS(mx, mx1))
|
|
}
|
|
|
|
// MOVSS: Move Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVSS m32 xmm
|
|
// MOVSS xmm m32
|
|
// MOVSS xmm xmm
|
|
//
|
|
// Construct and append a MOVSS instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVSS(mx, mx1 operand.Op) { ctx.MOVSS(mx, mx1) }
|
|
|
|
// MOVUPD: Move Unaligned Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVUPD m128 xmm
|
|
// MOVUPD xmm m128
|
|
// MOVUPD xmm xmm
|
|
//
|
|
// Construct and append a MOVUPD instruction to the active function.
|
|
func (c *Context) MOVUPD(mx, mx1 operand.Op) {
|
|
c.addinstruction(x86.MOVUPD(mx, mx1))
|
|
}
|
|
|
|
// MOVUPD: Move Unaligned Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVUPD m128 xmm
|
|
// MOVUPD xmm m128
|
|
// MOVUPD xmm xmm
|
|
//
|
|
// Construct and append a MOVUPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVUPD(mx, mx1 operand.Op) { ctx.MOVUPD(mx, mx1) }
|
|
|
|
// MOVUPS: Move Unaligned Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVUPS m128 xmm
|
|
// MOVUPS xmm m128
|
|
// MOVUPS xmm xmm
|
|
//
|
|
// Construct and append a MOVUPS instruction to the active function.
|
|
func (c *Context) MOVUPS(mx, mx1 operand.Op) {
|
|
c.addinstruction(x86.MOVUPS(mx, mx1))
|
|
}
|
|
|
|
// MOVUPS: Move Unaligned Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVUPS m128 xmm
|
|
// MOVUPS xmm m128
|
|
// MOVUPS xmm xmm
|
|
//
|
|
// Construct and append a MOVUPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVUPS(mx, mx1 operand.Op) { ctx.MOVUPS(mx, mx1) }
|
|
|
|
// MOVW: Move.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVW imm16 m16
|
|
// MOVW imm16 r16
|
|
// MOVW m16 r16
|
|
// MOVW r16 m16
|
|
// MOVW r16 r16
|
|
//
|
|
// Construct and append a MOVW instruction to the active function.
|
|
func (c *Context) MOVW(imr, mr operand.Op) {
|
|
c.addinstruction(x86.MOVW(imr, mr))
|
|
}
|
|
|
|
// MOVW: Move.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVW imm16 m16
|
|
// MOVW imm16 r16
|
|
// MOVW m16 r16
|
|
// MOVW r16 m16
|
|
// MOVW r16 r16
|
|
//
|
|
// Construct and append a MOVW instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVW(imr, mr operand.Op) { ctx.MOVW(imr, mr) }
|
|
|
|
// MOVWLSX: Move with Sign-Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVWLSX m16 r32
|
|
// MOVWLSX r16 r32
|
|
//
|
|
// Construct and append a MOVWLSX instruction to the active function.
|
|
func (c *Context) MOVWLSX(mr, r operand.Op) {
|
|
c.addinstruction(x86.MOVWLSX(mr, r))
|
|
}
|
|
|
|
// MOVWLSX: Move with Sign-Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVWLSX m16 r32
|
|
// MOVWLSX r16 r32
|
|
//
|
|
// Construct and append a MOVWLSX instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVWLSX(mr, r operand.Op) { ctx.MOVWLSX(mr, r) }
|
|
|
|
// MOVWLZX: Move with Zero-Extend.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVWLZX m16 r32
|
|
// MOVWLZX r16 r32
|
|
//
|
|
// Construct and append a MOVWLZX instruction to the active function.
|
|
func (c *Context) MOVWLZX(mr, r operand.Op) {
|
|
c.addinstruction(x86.MOVWLZX(mr, r))
|
|
}
|
|
|
|
// MOVWLZX: Move with Zero-Extend.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVWLZX m16 r32
|
|
// MOVWLZX r16 r32
|
|
//
|
|
// Construct and append a MOVWLZX instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVWLZX(mr, r operand.Op) { ctx.MOVWLZX(mr, r) }
|
|
|
|
// MOVWQSX: Move with Sign-Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVWQSX m16 r64
|
|
// MOVWQSX r16 r64
|
|
//
|
|
// Construct and append a MOVWQSX instruction to the active function.
|
|
func (c *Context) MOVWQSX(mr, r operand.Op) {
|
|
c.addinstruction(x86.MOVWQSX(mr, r))
|
|
}
|
|
|
|
// MOVWQSX: Move with Sign-Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVWQSX m16 r64
|
|
// MOVWQSX r16 r64
|
|
//
|
|
// Construct and append a MOVWQSX instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVWQSX(mr, r operand.Op) { ctx.MOVWQSX(mr, r) }
|
|
|
|
// MOVWQZX: Move with Zero-Extend.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVWQZX m16 r64
|
|
// MOVWQZX r16 r64
|
|
//
|
|
// Construct and append a MOVWQZX instruction to the active function.
|
|
func (c *Context) MOVWQZX(mr, r operand.Op) {
|
|
c.addinstruction(x86.MOVWQZX(mr, r))
|
|
}
|
|
|
|
// MOVWQZX: Move with Zero-Extend.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MOVWQZX m16 r64
|
|
// MOVWQZX r16 r64
|
|
//
|
|
// Construct and append a MOVWQZX instruction to the active function.
|
|
// Operates on the global context.
|
|
func MOVWQZX(mr, r operand.Op) { ctx.MOVWQZX(mr, r) }
|
|
|
|
// MPSADBW: Compute Multiple Packed Sums of Absolute Difference.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MPSADBW imm8 m128 xmm
|
|
// MPSADBW imm8 xmm xmm
|
|
//
|
|
// Construct and append a MPSADBW instruction to the active function.
|
|
func (c *Context) MPSADBW(i, mx, x operand.Op) {
|
|
c.addinstruction(x86.MPSADBW(i, mx, x))
|
|
}
|
|
|
|
// MPSADBW: Compute Multiple Packed Sums of Absolute Difference.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MPSADBW imm8 m128 xmm
|
|
// MPSADBW imm8 xmm xmm
|
|
//
|
|
// Construct and append a MPSADBW instruction to the active function.
|
|
// Operates on the global context.
|
|
func MPSADBW(i, mx, x operand.Op) { ctx.MPSADBW(i, mx, x) }
|
|
|
|
// MULB: Unsigned Multiply.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MULB m8
|
|
// MULB r8
|
|
//
|
|
// Construct and append a MULB instruction to the active function.
|
|
func (c *Context) MULB(mr operand.Op) {
|
|
c.addinstruction(x86.MULB(mr))
|
|
}
|
|
|
|
// MULB: Unsigned Multiply.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MULB m8
|
|
// MULB r8
|
|
//
|
|
// Construct and append a MULB instruction to the active function.
|
|
// Operates on the global context.
|
|
func MULB(mr operand.Op) { ctx.MULB(mr) }
|
|
|
|
// MULL: Unsigned Multiply.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MULL m32
|
|
// MULL r32
|
|
//
|
|
// Construct and append a MULL instruction to the active function.
|
|
func (c *Context) MULL(mr operand.Op) {
|
|
c.addinstruction(x86.MULL(mr))
|
|
}
|
|
|
|
// MULL: Unsigned Multiply.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MULL m32
|
|
// MULL r32
|
|
//
|
|
// Construct and append a MULL instruction to the active function.
|
|
// Operates on the global context.
|
|
func MULL(mr operand.Op) { ctx.MULL(mr) }
|
|
|
|
// MULPD: Multiply Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MULPD m128 xmm
|
|
// MULPD xmm xmm
|
|
//
|
|
// Construct and append a MULPD instruction to the active function.
|
|
func (c *Context) MULPD(mx, x operand.Op) {
|
|
c.addinstruction(x86.MULPD(mx, x))
|
|
}
|
|
|
|
// MULPD: Multiply Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MULPD m128 xmm
|
|
// MULPD xmm xmm
|
|
//
|
|
// Construct and append a MULPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func MULPD(mx, x operand.Op) { ctx.MULPD(mx, x) }
|
|
|
|
// MULPS: Multiply Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MULPS m128 xmm
|
|
// MULPS xmm xmm
|
|
//
|
|
// Construct and append a MULPS instruction to the active function.
|
|
func (c *Context) MULPS(mx, x operand.Op) {
|
|
c.addinstruction(x86.MULPS(mx, x))
|
|
}
|
|
|
|
// MULPS: Multiply Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MULPS m128 xmm
|
|
// MULPS xmm xmm
|
|
//
|
|
// Construct and append a MULPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func MULPS(mx, x operand.Op) { ctx.MULPS(mx, x) }
|
|
|
|
// MULQ: Unsigned Multiply.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MULQ m64
|
|
// MULQ r64
|
|
//
|
|
// Construct and append a MULQ instruction to the active function.
|
|
func (c *Context) MULQ(mr operand.Op) {
|
|
c.addinstruction(x86.MULQ(mr))
|
|
}
|
|
|
|
// MULQ: Unsigned Multiply.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MULQ m64
|
|
// MULQ r64
|
|
//
|
|
// Construct and append a MULQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func MULQ(mr operand.Op) { ctx.MULQ(mr) }
|
|
|
|
// MULSD: Multiply Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MULSD m64 xmm
|
|
// MULSD xmm xmm
|
|
//
|
|
// Construct and append a MULSD instruction to the active function.
|
|
func (c *Context) MULSD(mx, x operand.Op) {
|
|
c.addinstruction(x86.MULSD(mx, x))
|
|
}
|
|
|
|
// MULSD: Multiply Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MULSD m64 xmm
|
|
// MULSD xmm xmm
|
|
//
|
|
// Construct and append a MULSD instruction to the active function.
|
|
// Operates on the global context.
|
|
func MULSD(mx, x operand.Op) { ctx.MULSD(mx, x) }
|
|
|
|
// MULSS: Multiply Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MULSS m32 xmm
|
|
// MULSS xmm xmm
|
|
//
|
|
// Construct and append a MULSS instruction to the active function.
|
|
func (c *Context) MULSS(mx, x operand.Op) {
|
|
c.addinstruction(x86.MULSS(mx, x))
|
|
}
|
|
|
|
// MULSS: Multiply Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MULSS m32 xmm
|
|
// MULSS xmm xmm
|
|
//
|
|
// Construct and append a MULSS instruction to the active function.
|
|
// Operates on the global context.
|
|
func MULSS(mx, x operand.Op) { ctx.MULSS(mx, x) }
|
|
|
|
// MULW: Unsigned Multiply.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MULW m16
|
|
// MULW r16
|
|
//
|
|
// Construct and append a MULW instruction to the active function.
|
|
func (c *Context) MULW(mr operand.Op) {
|
|
c.addinstruction(x86.MULW(mr))
|
|
}
|
|
|
|
// MULW: Unsigned Multiply.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MULW m16
|
|
// MULW r16
|
|
//
|
|
// Construct and append a MULW instruction to the active function.
|
|
// Operates on the global context.
|
|
func MULW(mr operand.Op) { ctx.MULW(mr) }
|
|
|
|
// MULXL: Unsigned Multiply Without Affecting Flags.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MULXL m32 r32 r32
|
|
// MULXL r32 r32 r32
|
|
//
|
|
// Construct and append a MULXL instruction to the active function.
|
|
func (c *Context) MULXL(mr, r, r1 operand.Op) {
|
|
c.addinstruction(x86.MULXL(mr, r, r1))
|
|
}
|
|
|
|
// MULXL: Unsigned Multiply Without Affecting Flags.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MULXL m32 r32 r32
|
|
// MULXL r32 r32 r32
|
|
//
|
|
// Construct and append a MULXL instruction to the active function.
|
|
// Operates on the global context.
|
|
func MULXL(mr, r, r1 operand.Op) { ctx.MULXL(mr, r, r1) }
|
|
|
|
// MULXQ: Unsigned Multiply Without Affecting Flags.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MULXQ m64 r64 r64
|
|
// MULXQ r64 r64 r64
|
|
//
|
|
// Construct and append a MULXQ instruction to the active function.
|
|
func (c *Context) MULXQ(mr, r, r1 operand.Op) {
|
|
c.addinstruction(x86.MULXQ(mr, r, r1))
|
|
}
|
|
|
|
// MULXQ: Unsigned Multiply Without Affecting Flags.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MULXQ m64 r64 r64
|
|
// MULXQ r64 r64 r64
|
|
//
|
|
// Construct and append a MULXQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func MULXQ(mr, r, r1 operand.Op) { ctx.MULXQ(mr, r, r1) }
|
|
|
|
// MWAIT: Monitor Wait.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MWAIT
|
|
//
|
|
// Construct and append a MWAIT instruction to the active function.
|
|
func (c *Context) MWAIT() {
|
|
c.addinstruction(x86.MWAIT())
|
|
}
|
|
|
|
// MWAIT: Monitor Wait.
|
|
//
|
|
// Forms:
|
|
//
|
|
// MWAIT
|
|
//
|
|
// Construct and append a MWAIT instruction to the active function.
|
|
// Operates on the global context.
|
|
func MWAIT() { ctx.MWAIT() }
|
|
|
|
// NEGB: Two's Complement Negation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// NEGB m8
|
|
// NEGB r8
|
|
//
|
|
// Construct and append a NEGB instruction to the active function.
|
|
func (c *Context) NEGB(mr operand.Op) {
|
|
c.addinstruction(x86.NEGB(mr))
|
|
}
|
|
|
|
// NEGB: Two's Complement Negation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// NEGB m8
|
|
// NEGB r8
|
|
//
|
|
// Construct and append a NEGB instruction to the active function.
|
|
// Operates on the global context.
|
|
func NEGB(mr operand.Op) { ctx.NEGB(mr) }
|
|
|
|
// NEGL: Two's Complement Negation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// NEGL m32
|
|
// NEGL r32
|
|
//
|
|
// Construct and append a NEGL instruction to the active function.
|
|
func (c *Context) NEGL(mr operand.Op) {
|
|
c.addinstruction(x86.NEGL(mr))
|
|
}
|
|
|
|
// NEGL: Two's Complement Negation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// NEGL m32
|
|
// NEGL r32
|
|
//
|
|
// Construct and append a NEGL instruction to the active function.
|
|
// Operates on the global context.
|
|
func NEGL(mr operand.Op) { ctx.NEGL(mr) }
|
|
|
|
// NEGQ: Two's Complement Negation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// NEGQ m64
|
|
// NEGQ r64
|
|
//
|
|
// Construct and append a NEGQ instruction to the active function.
|
|
func (c *Context) NEGQ(mr operand.Op) {
|
|
c.addinstruction(x86.NEGQ(mr))
|
|
}
|
|
|
|
// NEGQ: Two's Complement Negation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// NEGQ m64
|
|
// NEGQ r64
|
|
//
|
|
// Construct and append a NEGQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func NEGQ(mr operand.Op) { ctx.NEGQ(mr) }
|
|
|
|
// NEGW: Two's Complement Negation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// NEGW m16
|
|
// NEGW r16
|
|
//
|
|
// Construct and append a NEGW instruction to the active function.
|
|
func (c *Context) NEGW(mr operand.Op) {
|
|
c.addinstruction(x86.NEGW(mr))
|
|
}
|
|
|
|
// NEGW: Two's Complement Negation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// NEGW m16
|
|
// NEGW r16
|
|
//
|
|
// Construct and append a NEGW instruction to the active function.
|
|
// Operates on the global context.
|
|
func NEGW(mr operand.Op) { ctx.NEGW(mr) }
|
|
|
|
// NOP: No Operation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// NOP
|
|
//
|
|
// Construct and append a NOP instruction to the active function.
|
|
func (c *Context) NOP() {
|
|
c.addinstruction(x86.NOP())
|
|
}
|
|
|
|
// NOP: No Operation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// NOP
|
|
//
|
|
// Construct and append a NOP instruction to the active function.
|
|
// Operates on the global context.
|
|
func NOP() { ctx.NOP() }
|
|
|
|
// NOTB: One's Complement Negation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// NOTB m8
|
|
// NOTB r8
|
|
//
|
|
// Construct and append a NOTB instruction to the active function.
|
|
func (c *Context) NOTB(mr operand.Op) {
|
|
c.addinstruction(x86.NOTB(mr))
|
|
}
|
|
|
|
// NOTB: One's Complement Negation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// NOTB m8
|
|
// NOTB r8
|
|
//
|
|
// Construct and append a NOTB instruction to the active function.
|
|
// Operates on the global context.
|
|
func NOTB(mr operand.Op) { ctx.NOTB(mr) }
|
|
|
|
// NOTL: One's Complement Negation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// NOTL m32
|
|
// NOTL r32
|
|
//
|
|
// Construct and append a NOTL instruction to the active function.
|
|
func (c *Context) NOTL(mr operand.Op) {
|
|
c.addinstruction(x86.NOTL(mr))
|
|
}
|
|
|
|
// NOTL: One's Complement Negation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// NOTL m32
|
|
// NOTL r32
|
|
//
|
|
// Construct and append a NOTL instruction to the active function.
|
|
// Operates on the global context.
|
|
func NOTL(mr operand.Op) { ctx.NOTL(mr) }
|
|
|
|
// NOTQ: One's Complement Negation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// NOTQ m64
|
|
// NOTQ r64
|
|
//
|
|
// Construct and append a NOTQ instruction to the active function.
|
|
func (c *Context) NOTQ(mr operand.Op) {
|
|
c.addinstruction(x86.NOTQ(mr))
|
|
}
|
|
|
|
// NOTQ: One's Complement Negation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// NOTQ m64
|
|
// NOTQ r64
|
|
//
|
|
// Construct and append a NOTQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func NOTQ(mr operand.Op) { ctx.NOTQ(mr) }
|
|
|
|
// NOTW: One's Complement Negation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// NOTW m16
|
|
// NOTW r16
|
|
//
|
|
// Construct and append a NOTW instruction to the active function.
|
|
func (c *Context) NOTW(mr operand.Op) {
|
|
c.addinstruction(x86.NOTW(mr))
|
|
}
|
|
|
|
// NOTW: One's Complement Negation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// NOTW m16
|
|
// NOTW r16
|
|
//
|
|
// Construct and append a NOTW instruction to the active function.
|
|
// Operates on the global context.
|
|
func NOTW(mr operand.Op) { ctx.NOTW(mr) }
|
|
|
|
// ORB: Logical Inclusive OR.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ORB imm8 al
|
|
// ORB imm8 m8
|
|
// ORB imm8 r8
|
|
// ORB m8 r8
|
|
// ORB r8 m8
|
|
// ORB r8 r8
|
|
//
|
|
// Construct and append a ORB instruction to the active function.
|
|
func (c *Context) ORB(imr, amr operand.Op) {
|
|
c.addinstruction(x86.ORB(imr, amr))
|
|
}
|
|
|
|
// ORB: Logical Inclusive OR.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ORB imm8 al
|
|
// ORB imm8 m8
|
|
// ORB imm8 r8
|
|
// ORB m8 r8
|
|
// ORB r8 m8
|
|
// ORB r8 r8
|
|
//
|
|
// Construct and append a ORB instruction to the active function.
|
|
// Operates on the global context.
|
|
func ORB(imr, amr operand.Op) { ctx.ORB(imr, amr) }
|
|
|
|
// ORL: Logical Inclusive OR.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ORL imm32 eax
|
|
// ORL imm32 m32
|
|
// ORL imm32 r32
|
|
// ORL imm8 m32
|
|
// ORL imm8 r32
|
|
// ORL m32 r32
|
|
// ORL r32 m32
|
|
// ORL r32 r32
|
|
//
|
|
// Construct and append a ORL instruction to the active function.
|
|
func (c *Context) ORL(imr, emr operand.Op) {
|
|
c.addinstruction(x86.ORL(imr, emr))
|
|
}
|
|
|
|
// ORL: Logical Inclusive OR.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ORL imm32 eax
|
|
// ORL imm32 m32
|
|
// ORL imm32 r32
|
|
// ORL imm8 m32
|
|
// ORL imm8 r32
|
|
// ORL m32 r32
|
|
// ORL r32 m32
|
|
// ORL r32 r32
|
|
//
|
|
// Construct and append a ORL instruction to the active function.
|
|
// Operates on the global context.
|
|
func ORL(imr, emr operand.Op) { ctx.ORL(imr, emr) }
|
|
|
|
// ORPD: Bitwise Logical OR of Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ORPD m128 xmm
|
|
// ORPD xmm xmm
|
|
//
|
|
// Construct and append a ORPD instruction to the active function.
|
|
func (c *Context) ORPD(mx, x operand.Op) {
|
|
c.addinstruction(x86.ORPD(mx, x))
|
|
}
|
|
|
|
// ORPD: Bitwise Logical OR of Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ORPD m128 xmm
|
|
// ORPD xmm xmm
|
|
//
|
|
// Construct and append a ORPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func ORPD(mx, x operand.Op) { ctx.ORPD(mx, x) }
|
|
|
|
// ORPS: Bitwise Logical OR of Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ORPS m128 xmm
|
|
// ORPS xmm xmm
|
|
//
|
|
// Construct and append a ORPS instruction to the active function.
|
|
func (c *Context) ORPS(mx, x operand.Op) {
|
|
c.addinstruction(x86.ORPS(mx, x))
|
|
}
|
|
|
|
// ORPS: Bitwise Logical OR of Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ORPS m128 xmm
|
|
// ORPS xmm xmm
|
|
//
|
|
// Construct and append a ORPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func ORPS(mx, x operand.Op) { ctx.ORPS(mx, x) }
|
|
|
|
// ORQ: Logical Inclusive OR.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ORQ imm32 m64
|
|
// ORQ imm32 r64
|
|
// ORQ imm32 rax
|
|
// ORQ imm8 m64
|
|
// ORQ imm8 r64
|
|
// ORQ m64 r64
|
|
// ORQ r64 m64
|
|
// ORQ r64 r64
|
|
//
|
|
// Construct and append a ORQ instruction to the active function.
|
|
func (c *Context) ORQ(imr, mr operand.Op) {
|
|
c.addinstruction(x86.ORQ(imr, mr))
|
|
}
|
|
|
|
// ORQ: Logical Inclusive OR.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ORQ imm32 m64
|
|
// ORQ imm32 r64
|
|
// ORQ imm32 rax
|
|
// ORQ imm8 m64
|
|
// ORQ imm8 r64
|
|
// ORQ m64 r64
|
|
// ORQ r64 m64
|
|
// ORQ r64 r64
|
|
//
|
|
// Construct and append a ORQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func ORQ(imr, mr operand.Op) { ctx.ORQ(imr, mr) }
|
|
|
|
// ORW: Logical Inclusive OR.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ORW imm16 ax
|
|
// ORW imm16 m16
|
|
// ORW imm16 r16
|
|
// ORW imm8 m16
|
|
// ORW imm8 r16
|
|
// ORW m16 r16
|
|
// ORW r16 m16
|
|
// ORW r16 r16
|
|
//
|
|
// Construct and append a ORW instruction to the active function.
|
|
func (c *Context) ORW(imr, amr operand.Op) {
|
|
c.addinstruction(x86.ORW(imr, amr))
|
|
}
|
|
|
|
// ORW: Logical Inclusive OR.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ORW imm16 ax
|
|
// ORW imm16 m16
|
|
// ORW imm16 r16
|
|
// ORW imm8 m16
|
|
// ORW imm8 r16
|
|
// ORW m16 r16
|
|
// ORW r16 m16
|
|
// ORW r16 r16
|
|
//
|
|
// Construct and append a ORW instruction to the active function.
|
|
// Operates on the global context.
|
|
func ORW(imr, amr operand.Op) { ctx.ORW(imr, amr) }
|
|
|
|
// PABSB: Packed Absolute Value of Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PABSB m128 xmm
|
|
// PABSB xmm xmm
|
|
//
|
|
// Construct and append a PABSB instruction to the active function.
|
|
func (c *Context) PABSB(mx, x operand.Op) {
|
|
c.addinstruction(x86.PABSB(mx, x))
|
|
}
|
|
|
|
// PABSB: Packed Absolute Value of Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PABSB m128 xmm
|
|
// PABSB xmm xmm
|
|
//
|
|
// Construct and append a PABSB instruction to the active function.
|
|
// Operates on the global context.
|
|
func PABSB(mx, x operand.Op) { ctx.PABSB(mx, x) }
|
|
|
|
// PABSD: Packed Absolute Value of Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PABSD m128 xmm
|
|
// PABSD xmm xmm
|
|
//
|
|
// Construct and append a PABSD instruction to the active function.
|
|
func (c *Context) PABSD(mx, x operand.Op) {
|
|
c.addinstruction(x86.PABSD(mx, x))
|
|
}
|
|
|
|
// PABSD: Packed Absolute Value of Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PABSD m128 xmm
|
|
// PABSD xmm xmm
|
|
//
|
|
// Construct and append a PABSD instruction to the active function.
|
|
// Operates on the global context.
|
|
func PABSD(mx, x operand.Op) { ctx.PABSD(mx, x) }
|
|
|
|
// PABSW: Packed Absolute Value of Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PABSW m128 xmm
|
|
// PABSW xmm xmm
|
|
//
|
|
// Construct and append a PABSW instruction to the active function.
|
|
func (c *Context) PABSW(mx, x operand.Op) {
|
|
c.addinstruction(x86.PABSW(mx, x))
|
|
}
|
|
|
|
// PABSW: Packed Absolute Value of Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PABSW m128 xmm
|
|
// PABSW xmm xmm
|
|
//
|
|
// Construct and append a PABSW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PABSW(mx, x operand.Op) { ctx.PABSW(mx, x) }
|
|
|
|
// PACKSSLW: Pack Doublewords into Words with Signed Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PACKSSLW m128 xmm
|
|
// PACKSSLW xmm xmm
|
|
//
|
|
// Construct and append a PACKSSLW instruction to the active function.
|
|
func (c *Context) PACKSSLW(mx, x operand.Op) {
|
|
c.addinstruction(x86.PACKSSLW(mx, x))
|
|
}
|
|
|
|
// PACKSSLW: Pack Doublewords into Words with Signed Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PACKSSLW m128 xmm
|
|
// PACKSSLW xmm xmm
|
|
//
|
|
// Construct and append a PACKSSLW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PACKSSLW(mx, x operand.Op) { ctx.PACKSSLW(mx, x) }
|
|
|
|
// PACKSSWB: Pack Words into Bytes with Signed Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PACKSSWB m128 xmm
|
|
// PACKSSWB xmm xmm
|
|
//
|
|
// Construct and append a PACKSSWB instruction to the active function.
|
|
func (c *Context) PACKSSWB(mx, x operand.Op) {
|
|
c.addinstruction(x86.PACKSSWB(mx, x))
|
|
}
|
|
|
|
// PACKSSWB: Pack Words into Bytes with Signed Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PACKSSWB m128 xmm
|
|
// PACKSSWB xmm xmm
|
|
//
|
|
// Construct and append a PACKSSWB instruction to the active function.
|
|
// Operates on the global context.
|
|
func PACKSSWB(mx, x operand.Op) { ctx.PACKSSWB(mx, x) }
|
|
|
|
// PACKUSDW: Pack Doublewords into Words with Unsigned Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PACKUSDW m128 xmm
|
|
// PACKUSDW xmm xmm
|
|
//
|
|
// Construct and append a PACKUSDW instruction to the active function.
|
|
func (c *Context) PACKUSDW(mx, x operand.Op) {
|
|
c.addinstruction(x86.PACKUSDW(mx, x))
|
|
}
|
|
|
|
// PACKUSDW: Pack Doublewords into Words with Unsigned Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PACKUSDW m128 xmm
|
|
// PACKUSDW xmm xmm
|
|
//
|
|
// Construct and append a PACKUSDW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PACKUSDW(mx, x operand.Op) { ctx.PACKUSDW(mx, x) }
|
|
|
|
// PACKUSWB: Pack Words into Bytes with Unsigned Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PACKUSWB m128 xmm
|
|
// PACKUSWB xmm xmm
|
|
//
|
|
// Construct and append a PACKUSWB instruction to the active function.
|
|
func (c *Context) PACKUSWB(mx, x operand.Op) {
|
|
c.addinstruction(x86.PACKUSWB(mx, x))
|
|
}
|
|
|
|
// PACKUSWB: Pack Words into Bytes with Unsigned Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PACKUSWB m128 xmm
|
|
// PACKUSWB xmm xmm
|
|
//
|
|
// Construct and append a PACKUSWB instruction to the active function.
|
|
// Operates on the global context.
|
|
func PACKUSWB(mx, x operand.Op) { ctx.PACKUSWB(mx, x) }
|
|
|
|
// PADDB: Add Packed Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PADDB m128 xmm
|
|
// PADDB xmm xmm
|
|
//
|
|
// Construct and append a PADDB instruction to the active function.
|
|
func (c *Context) PADDB(mx, x operand.Op) {
|
|
c.addinstruction(x86.PADDB(mx, x))
|
|
}
|
|
|
|
// PADDB: Add Packed Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PADDB m128 xmm
|
|
// PADDB xmm xmm
|
|
//
|
|
// Construct and append a PADDB instruction to the active function.
|
|
// Operates on the global context.
|
|
func PADDB(mx, x operand.Op) { ctx.PADDB(mx, x) }
|
|
|
|
// PADDD: Add Packed Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PADDD m128 xmm
|
|
// PADDD xmm xmm
|
|
//
|
|
// Construct and append a PADDD instruction to the active function.
|
|
func (c *Context) PADDD(mx, x operand.Op) {
|
|
c.addinstruction(x86.PADDD(mx, x))
|
|
}
|
|
|
|
// PADDD: Add Packed Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PADDD m128 xmm
|
|
// PADDD xmm xmm
|
|
//
|
|
// Construct and append a PADDD instruction to the active function.
|
|
// Operates on the global context.
|
|
func PADDD(mx, x operand.Op) { ctx.PADDD(mx, x) }
|
|
|
|
// PADDL: Add Packed Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PADDL m128 xmm
|
|
// PADDL xmm xmm
|
|
//
|
|
// Construct and append a PADDL instruction to the active function.
|
|
func (c *Context) PADDL(mx, x operand.Op) {
|
|
c.addinstruction(x86.PADDL(mx, x))
|
|
}
|
|
|
|
// PADDL: Add Packed Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PADDL m128 xmm
|
|
// PADDL xmm xmm
|
|
//
|
|
// Construct and append a PADDL instruction to the active function.
|
|
// Operates on the global context.
|
|
func PADDL(mx, x operand.Op) { ctx.PADDL(mx, x) }
|
|
|
|
// PADDQ: Add Packed Quadword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PADDQ m128 xmm
|
|
// PADDQ xmm xmm
|
|
//
|
|
// Construct and append a PADDQ instruction to the active function.
|
|
func (c *Context) PADDQ(mx, x operand.Op) {
|
|
c.addinstruction(x86.PADDQ(mx, x))
|
|
}
|
|
|
|
// PADDQ: Add Packed Quadword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PADDQ m128 xmm
|
|
// PADDQ xmm xmm
|
|
//
|
|
// Construct and append a PADDQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func PADDQ(mx, x operand.Op) { ctx.PADDQ(mx, x) }
|
|
|
|
// PADDSB: Add Packed Signed Byte Integers with Signed Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PADDSB m128 xmm
|
|
// PADDSB xmm xmm
|
|
//
|
|
// Construct and append a PADDSB instruction to the active function.
|
|
func (c *Context) PADDSB(mx, x operand.Op) {
|
|
c.addinstruction(x86.PADDSB(mx, x))
|
|
}
|
|
|
|
// PADDSB: Add Packed Signed Byte Integers with Signed Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PADDSB m128 xmm
|
|
// PADDSB xmm xmm
|
|
//
|
|
// Construct and append a PADDSB instruction to the active function.
|
|
// Operates on the global context.
|
|
func PADDSB(mx, x operand.Op) { ctx.PADDSB(mx, x) }
|
|
|
|
// PADDSW: Add Packed Signed Word Integers with Signed Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PADDSW m128 xmm
|
|
// PADDSW xmm xmm
|
|
//
|
|
// Construct and append a PADDSW instruction to the active function.
|
|
func (c *Context) PADDSW(mx, x operand.Op) {
|
|
c.addinstruction(x86.PADDSW(mx, x))
|
|
}
|
|
|
|
// PADDSW: Add Packed Signed Word Integers with Signed Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PADDSW m128 xmm
|
|
// PADDSW xmm xmm
|
|
//
|
|
// Construct and append a PADDSW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PADDSW(mx, x operand.Op) { ctx.PADDSW(mx, x) }
|
|
|
|
// PADDUSB: Add Packed Unsigned Byte Integers with Unsigned Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PADDUSB m128 xmm
|
|
// PADDUSB xmm xmm
|
|
//
|
|
// Construct and append a PADDUSB instruction to the active function.
|
|
func (c *Context) PADDUSB(mx, x operand.Op) {
|
|
c.addinstruction(x86.PADDUSB(mx, x))
|
|
}
|
|
|
|
// PADDUSB: Add Packed Unsigned Byte Integers with Unsigned Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PADDUSB m128 xmm
|
|
// PADDUSB xmm xmm
|
|
//
|
|
// Construct and append a PADDUSB instruction to the active function.
|
|
// Operates on the global context.
|
|
func PADDUSB(mx, x operand.Op) { ctx.PADDUSB(mx, x) }
|
|
|
|
// PADDUSW: Add Packed Unsigned Word Integers with Unsigned Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PADDUSW m128 xmm
|
|
// PADDUSW xmm xmm
|
|
//
|
|
// Construct and append a PADDUSW instruction to the active function.
|
|
func (c *Context) PADDUSW(mx, x operand.Op) {
|
|
c.addinstruction(x86.PADDUSW(mx, x))
|
|
}
|
|
|
|
// PADDUSW: Add Packed Unsigned Word Integers with Unsigned Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PADDUSW m128 xmm
|
|
// PADDUSW xmm xmm
|
|
//
|
|
// Construct and append a PADDUSW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PADDUSW(mx, x operand.Op) { ctx.PADDUSW(mx, x) }
|
|
|
|
// PADDW: Add Packed Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PADDW m128 xmm
|
|
// PADDW xmm xmm
|
|
//
|
|
// Construct and append a PADDW instruction to the active function.
|
|
func (c *Context) PADDW(mx, x operand.Op) {
|
|
c.addinstruction(x86.PADDW(mx, x))
|
|
}
|
|
|
|
// PADDW: Add Packed Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PADDW m128 xmm
|
|
// PADDW xmm xmm
|
|
//
|
|
// Construct and append a PADDW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PADDW(mx, x operand.Op) { ctx.PADDW(mx, x) }
|
|
|
|
// PALIGNR: Packed Align Right.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PALIGNR imm8 m128 xmm
|
|
// PALIGNR imm8 xmm xmm
|
|
//
|
|
// Construct and append a PALIGNR instruction to the active function.
|
|
func (c *Context) PALIGNR(i, mx, x operand.Op) {
|
|
c.addinstruction(x86.PALIGNR(i, mx, x))
|
|
}
|
|
|
|
// PALIGNR: Packed Align Right.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PALIGNR imm8 m128 xmm
|
|
// PALIGNR imm8 xmm xmm
|
|
//
|
|
// Construct and append a PALIGNR instruction to the active function.
|
|
// Operates on the global context.
|
|
func PALIGNR(i, mx, x operand.Op) { ctx.PALIGNR(i, mx, x) }
|
|
|
|
// PAND: Packed Bitwise Logical AND.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PAND m128 xmm
|
|
// PAND xmm xmm
|
|
//
|
|
// Construct and append a PAND instruction to the active function.
|
|
func (c *Context) PAND(mx, x operand.Op) {
|
|
c.addinstruction(x86.PAND(mx, x))
|
|
}
|
|
|
|
// PAND: Packed Bitwise Logical AND.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PAND m128 xmm
|
|
// PAND xmm xmm
|
|
//
|
|
// Construct and append a PAND instruction to the active function.
|
|
// Operates on the global context.
|
|
func PAND(mx, x operand.Op) { ctx.PAND(mx, x) }
|
|
|
|
// PANDN: Packed Bitwise Logical AND NOT.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PANDN m128 xmm
|
|
// PANDN xmm xmm
|
|
//
|
|
// Construct and append a PANDN instruction to the active function.
|
|
func (c *Context) PANDN(mx, x operand.Op) {
|
|
c.addinstruction(x86.PANDN(mx, x))
|
|
}
|
|
|
|
// PANDN: Packed Bitwise Logical AND NOT.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PANDN m128 xmm
|
|
// PANDN xmm xmm
|
|
//
|
|
// Construct and append a PANDN instruction to the active function.
|
|
// Operates on the global context.
|
|
func PANDN(mx, x operand.Op) { ctx.PANDN(mx, x) }
|
|
|
|
// PAUSE: Spin Loop Hint.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PAUSE
|
|
//
|
|
// Construct and append a PAUSE instruction to the active function.
|
|
func (c *Context) PAUSE() {
|
|
c.addinstruction(x86.PAUSE())
|
|
}
|
|
|
|
// PAUSE: Spin Loop Hint.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PAUSE
|
|
//
|
|
// Construct and append a PAUSE instruction to the active function.
|
|
// Operates on the global context.
|
|
func PAUSE() { ctx.PAUSE() }
|
|
|
|
// PAVGB: Average Packed Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PAVGB m128 xmm
|
|
// PAVGB xmm xmm
|
|
//
|
|
// Construct and append a PAVGB instruction to the active function.
|
|
func (c *Context) PAVGB(mx, x operand.Op) {
|
|
c.addinstruction(x86.PAVGB(mx, x))
|
|
}
|
|
|
|
// PAVGB: Average Packed Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PAVGB m128 xmm
|
|
// PAVGB xmm xmm
|
|
//
|
|
// Construct and append a PAVGB instruction to the active function.
|
|
// Operates on the global context.
|
|
func PAVGB(mx, x operand.Op) { ctx.PAVGB(mx, x) }
|
|
|
|
// PAVGW: Average Packed Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PAVGW m128 xmm
|
|
// PAVGW xmm xmm
|
|
//
|
|
// Construct and append a PAVGW instruction to the active function.
|
|
func (c *Context) PAVGW(mx, x operand.Op) {
|
|
c.addinstruction(x86.PAVGW(mx, x))
|
|
}
|
|
|
|
// PAVGW: Average Packed Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PAVGW m128 xmm
|
|
// PAVGW xmm xmm
|
|
//
|
|
// Construct and append a PAVGW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PAVGW(mx, x operand.Op) { ctx.PAVGW(mx, x) }
|
|
|
|
// PBLENDVB: Variable Blend Packed Bytes.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PBLENDVB xmm0 m128 xmm
|
|
// PBLENDVB xmm0 xmm xmm
|
|
//
|
|
// Construct and append a PBLENDVB instruction to the active function.
|
|
func (c *Context) PBLENDVB(x, mx, x1 operand.Op) {
|
|
c.addinstruction(x86.PBLENDVB(x, mx, x1))
|
|
}
|
|
|
|
// PBLENDVB: Variable Blend Packed Bytes.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PBLENDVB xmm0 m128 xmm
|
|
// PBLENDVB xmm0 xmm xmm
|
|
//
|
|
// Construct and append a PBLENDVB instruction to the active function.
|
|
// Operates on the global context.
|
|
func PBLENDVB(x, mx, x1 operand.Op) { ctx.PBLENDVB(x, mx, x1) }
|
|
|
|
// PBLENDW: Blend Packed Words.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PBLENDW imm8 m128 xmm
|
|
// PBLENDW imm8 xmm xmm
|
|
//
|
|
// Construct and append a PBLENDW instruction to the active function.
|
|
func (c *Context) PBLENDW(i, mx, x operand.Op) {
|
|
c.addinstruction(x86.PBLENDW(i, mx, x))
|
|
}
|
|
|
|
// PBLENDW: Blend Packed Words.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PBLENDW imm8 m128 xmm
|
|
// PBLENDW imm8 xmm xmm
|
|
//
|
|
// Construct and append a PBLENDW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PBLENDW(i, mx, x operand.Op) { ctx.PBLENDW(i, mx, x) }
|
|
|
|
// PCLMULQDQ: Carry-Less Quadword Multiplication.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PCLMULQDQ imm8 m128 xmm
|
|
// PCLMULQDQ imm8 xmm xmm
|
|
//
|
|
// Construct and append a PCLMULQDQ instruction to the active function.
|
|
func (c *Context) PCLMULQDQ(i, mx, x operand.Op) {
|
|
c.addinstruction(x86.PCLMULQDQ(i, mx, x))
|
|
}
|
|
|
|
// PCLMULQDQ: Carry-Less Quadword Multiplication.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PCLMULQDQ imm8 m128 xmm
|
|
// PCLMULQDQ imm8 xmm xmm
|
|
//
|
|
// Construct and append a PCLMULQDQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func PCLMULQDQ(i, mx, x operand.Op) { ctx.PCLMULQDQ(i, mx, x) }
|
|
|
|
// PCMPEQB: Compare Packed Byte Data for Equality.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PCMPEQB m128 xmm
|
|
// PCMPEQB xmm xmm
|
|
//
|
|
// Construct and append a PCMPEQB instruction to the active function.
|
|
func (c *Context) PCMPEQB(mx, x operand.Op) {
|
|
c.addinstruction(x86.PCMPEQB(mx, x))
|
|
}
|
|
|
|
// PCMPEQB: Compare Packed Byte Data for Equality.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PCMPEQB m128 xmm
|
|
// PCMPEQB xmm xmm
|
|
//
|
|
// Construct and append a PCMPEQB instruction to the active function.
|
|
// Operates on the global context.
|
|
func PCMPEQB(mx, x operand.Op) { ctx.PCMPEQB(mx, x) }
|
|
|
|
// PCMPEQL: Compare Packed Doubleword Data for Equality.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PCMPEQL m128 xmm
|
|
// PCMPEQL xmm xmm
|
|
//
|
|
// Construct and append a PCMPEQL instruction to the active function.
|
|
func (c *Context) PCMPEQL(mx, x operand.Op) {
|
|
c.addinstruction(x86.PCMPEQL(mx, x))
|
|
}
|
|
|
|
// PCMPEQL: Compare Packed Doubleword Data for Equality.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PCMPEQL m128 xmm
|
|
// PCMPEQL xmm xmm
|
|
//
|
|
// Construct and append a PCMPEQL instruction to the active function.
|
|
// Operates on the global context.
|
|
func PCMPEQL(mx, x operand.Op) { ctx.PCMPEQL(mx, x) }
|
|
|
|
// PCMPEQQ: Compare Packed Quadword Data for Equality.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PCMPEQQ m128 xmm
|
|
// PCMPEQQ xmm xmm
|
|
//
|
|
// Construct and append a PCMPEQQ instruction to the active function.
|
|
func (c *Context) PCMPEQQ(mx, x operand.Op) {
|
|
c.addinstruction(x86.PCMPEQQ(mx, x))
|
|
}
|
|
|
|
// PCMPEQQ: Compare Packed Quadword Data for Equality.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PCMPEQQ m128 xmm
|
|
// PCMPEQQ xmm xmm
|
|
//
|
|
// Construct and append a PCMPEQQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func PCMPEQQ(mx, x operand.Op) { ctx.PCMPEQQ(mx, x) }
|
|
|
|
// PCMPEQW: Compare Packed Word Data for Equality.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PCMPEQW m128 xmm
|
|
// PCMPEQW xmm xmm
|
|
//
|
|
// Construct and append a PCMPEQW instruction to the active function.
|
|
func (c *Context) PCMPEQW(mx, x operand.Op) {
|
|
c.addinstruction(x86.PCMPEQW(mx, x))
|
|
}
|
|
|
|
// PCMPEQW: Compare Packed Word Data for Equality.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PCMPEQW m128 xmm
|
|
// PCMPEQW xmm xmm
|
|
//
|
|
// Construct and append a PCMPEQW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PCMPEQW(mx, x operand.Op) { ctx.PCMPEQW(mx, x) }
|
|
|
|
// PCMPESTRI: Packed Compare Explicit Length Strings, Return Index.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PCMPESTRI imm8 m128 xmm
|
|
// PCMPESTRI imm8 xmm xmm
|
|
//
|
|
// Construct and append a PCMPESTRI instruction to the active function.
|
|
func (c *Context) PCMPESTRI(i, mx, x operand.Op) {
|
|
c.addinstruction(x86.PCMPESTRI(i, mx, x))
|
|
}
|
|
|
|
// PCMPESTRI: Packed Compare Explicit Length Strings, Return Index.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PCMPESTRI imm8 m128 xmm
|
|
// PCMPESTRI imm8 xmm xmm
|
|
//
|
|
// Construct and append a PCMPESTRI instruction to the active function.
|
|
// Operates on the global context.
|
|
func PCMPESTRI(i, mx, x operand.Op) { ctx.PCMPESTRI(i, mx, x) }
|
|
|
|
// PCMPESTRM: Packed Compare Explicit Length Strings, Return Mask.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PCMPESTRM imm8 m128 xmm
|
|
// PCMPESTRM imm8 xmm xmm
|
|
//
|
|
// Construct and append a PCMPESTRM instruction to the active function.
|
|
func (c *Context) PCMPESTRM(i, mx, x operand.Op) {
|
|
c.addinstruction(x86.PCMPESTRM(i, mx, x))
|
|
}
|
|
|
|
// PCMPESTRM: Packed Compare Explicit Length Strings, Return Mask.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PCMPESTRM imm8 m128 xmm
|
|
// PCMPESTRM imm8 xmm xmm
|
|
//
|
|
// Construct and append a PCMPESTRM instruction to the active function.
|
|
// Operates on the global context.
|
|
func PCMPESTRM(i, mx, x operand.Op) { ctx.PCMPESTRM(i, mx, x) }
|
|
|
|
// PCMPGTB: Compare Packed Signed Byte Integers for Greater Than.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PCMPGTB m128 xmm
|
|
// PCMPGTB xmm xmm
|
|
//
|
|
// Construct and append a PCMPGTB instruction to the active function.
|
|
func (c *Context) PCMPGTB(mx, x operand.Op) {
|
|
c.addinstruction(x86.PCMPGTB(mx, x))
|
|
}
|
|
|
|
// PCMPGTB: Compare Packed Signed Byte Integers for Greater Than.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PCMPGTB m128 xmm
|
|
// PCMPGTB xmm xmm
|
|
//
|
|
// Construct and append a PCMPGTB instruction to the active function.
|
|
// Operates on the global context.
|
|
func PCMPGTB(mx, x operand.Op) { ctx.PCMPGTB(mx, x) }
|
|
|
|
// PCMPGTL: Compare Packed Signed Doubleword Integers for Greater Than.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PCMPGTL m128 xmm
|
|
// PCMPGTL xmm xmm
|
|
//
|
|
// Construct and append a PCMPGTL instruction to the active function.
|
|
func (c *Context) PCMPGTL(mx, x operand.Op) {
|
|
c.addinstruction(x86.PCMPGTL(mx, x))
|
|
}
|
|
|
|
// PCMPGTL: Compare Packed Signed Doubleword Integers for Greater Than.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PCMPGTL m128 xmm
|
|
// PCMPGTL xmm xmm
|
|
//
|
|
// Construct and append a PCMPGTL instruction to the active function.
|
|
// Operates on the global context.
|
|
func PCMPGTL(mx, x operand.Op) { ctx.PCMPGTL(mx, x) }
|
|
|
|
// PCMPGTQ: Compare Packed Data for Greater Than.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PCMPGTQ m128 xmm
|
|
// PCMPGTQ xmm xmm
|
|
//
|
|
// Construct and append a PCMPGTQ instruction to the active function.
|
|
func (c *Context) PCMPGTQ(mx, x operand.Op) {
|
|
c.addinstruction(x86.PCMPGTQ(mx, x))
|
|
}
|
|
|
|
// PCMPGTQ: Compare Packed Data for Greater Than.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PCMPGTQ m128 xmm
|
|
// PCMPGTQ xmm xmm
|
|
//
|
|
// Construct and append a PCMPGTQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func PCMPGTQ(mx, x operand.Op) { ctx.PCMPGTQ(mx, x) }
|
|
|
|
// PCMPGTW: Compare Packed Signed Word Integers for Greater Than.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PCMPGTW m128 xmm
|
|
// PCMPGTW xmm xmm
|
|
//
|
|
// Construct and append a PCMPGTW instruction to the active function.
|
|
func (c *Context) PCMPGTW(mx, x operand.Op) {
|
|
c.addinstruction(x86.PCMPGTW(mx, x))
|
|
}
|
|
|
|
// PCMPGTW: Compare Packed Signed Word Integers for Greater Than.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PCMPGTW m128 xmm
|
|
// PCMPGTW xmm xmm
|
|
//
|
|
// Construct and append a PCMPGTW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PCMPGTW(mx, x operand.Op) { ctx.PCMPGTW(mx, x) }
|
|
|
|
// PCMPISTRI: Packed Compare Implicit Length Strings, Return Index.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PCMPISTRI imm8 m128 xmm
|
|
// PCMPISTRI imm8 xmm xmm
|
|
//
|
|
// Construct and append a PCMPISTRI instruction to the active function.
|
|
func (c *Context) PCMPISTRI(i, mx, x operand.Op) {
|
|
c.addinstruction(x86.PCMPISTRI(i, mx, x))
|
|
}
|
|
|
|
// PCMPISTRI: Packed Compare Implicit Length Strings, Return Index.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PCMPISTRI imm8 m128 xmm
|
|
// PCMPISTRI imm8 xmm xmm
|
|
//
|
|
// Construct and append a PCMPISTRI instruction to the active function.
|
|
// Operates on the global context.
|
|
func PCMPISTRI(i, mx, x operand.Op) { ctx.PCMPISTRI(i, mx, x) }
|
|
|
|
// PCMPISTRM: Packed Compare Implicit Length Strings, Return Mask.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PCMPISTRM imm8 m128 xmm
|
|
// PCMPISTRM imm8 xmm xmm
|
|
//
|
|
// Construct and append a PCMPISTRM instruction to the active function.
|
|
func (c *Context) PCMPISTRM(i, mx, x operand.Op) {
|
|
c.addinstruction(x86.PCMPISTRM(i, mx, x))
|
|
}
|
|
|
|
// PCMPISTRM: Packed Compare Implicit Length Strings, Return Mask.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PCMPISTRM imm8 m128 xmm
|
|
// PCMPISTRM imm8 xmm xmm
|
|
//
|
|
// Construct and append a PCMPISTRM instruction to the active function.
|
|
// Operates on the global context.
|
|
func PCMPISTRM(i, mx, x operand.Op) { ctx.PCMPISTRM(i, mx, x) }
|
|
|
|
// PDEPL: Parallel Bits Deposit.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PDEPL m32 r32 r32
|
|
// PDEPL r32 r32 r32
|
|
//
|
|
// Construct and append a PDEPL instruction to the active function.
|
|
func (c *Context) PDEPL(mr, r, r1 operand.Op) {
|
|
c.addinstruction(x86.PDEPL(mr, r, r1))
|
|
}
|
|
|
|
// PDEPL: Parallel Bits Deposit.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PDEPL m32 r32 r32
|
|
// PDEPL r32 r32 r32
|
|
//
|
|
// Construct and append a PDEPL instruction to the active function.
|
|
// Operates on the global context.
|
|
func PDEPL(mr, r, r1 operand.Op) { ctx.PDEPL(mr, r, r1) }
|
|
|
|
// PDEPQ: Parallel Bits Deposit.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PDEPQ m64 r64 r64
|
|
// PDEPQ r64 r64 r64
|
|
//
|
|
// Construct and append a PDEPQ instruction to the active function.
|
|
func (c *Context) PDEPQ(mr, r, r1 operand.Op) {
|
|
c.addinstruction(x86.PDEPQ(mr, r, r1))
|
|
}
|
|
|
|
// PDEPQ: Parallel Bits Deposit.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PDEPQ m64 r64 r64
|
|
// PDEPQ r64 r64 r64
|
|
//
|
|
// Construct and append a PDEPQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func PDEPQ(mr, r, r1 operand.Op) { ctx.PDEPQ(mr, r, r1) }
|
|
|
|
// PEXTL: Parallel Bits Extract.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PEXTL m32 r32 r32
|
|
// PEXTL r32 r32 r32
|
|
//
|
|
// Construct and append a PEXTL instruction to the active function.
|
|
func (c *Context) PEXTL(mr, r, r1 operand.Op) {
|
|
c.addinstruction(x86.PEXTL(mr, r, r1))
|
|
}
|
|
|
|
// PEXTL: Parallel Bits Extract.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PEXTL m32 r32 r32
|
|
// PEXTL r32 r32 r32
|
|
//
|
|
// Construct and append a PEXTL instruction to the active function.
|
|
// Operates on the global context.
|
|
func PEXTL(mr, r, r1 operand.Op) { ctx.PEXTL(mr, r, r1) }
|
|
|
|
// PEXTQ: Parallel Bits Extract.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PEXTQ m64 r64 r64
|
|
// PEXTQ r64 r64 r64
|
|
//
|
|
// Construct and append a PEXTQ instruction to the active function.
|
|
func (c *Context) PEXTQ(mr, r, r1 operand.Op) {
|
|
c.addinstruction(x86.PEXTQ(mr, r, r1))
|
|
}
|
|
|
|
// PEXTQ: Parallel Bits Extract.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PEXTQ m64 r64 r64
|
|
// PEXTQ r64 r64 r64
|
|
//
|
|
// Construct and append a PEXTQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func PEXTQ(mr, r, r1 operand.Op) { ctx.PEXTQ(mr, r, r1) }
|
|
|
|
// PEXTRB: Extract Byte.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PEXTRB imm8 xmm m8
|
|
// PEXTRB imm8 xmm r32
|
|
//
|
|
// Construct and append a PEXTRB instruction to the active function.
|
|
func (c *Context) PEXTRB(i, x, mr operand.Op) {
|
|
c.addinstruction(x86.PEXTRB(i, x, mr))
|
|
}
|
|
|
|
// PEXTRB: Extract Byte.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PEXTRB imm8 xmm m8
|
|
// PEXTRB imm8 xmm r32
|
|
//
|
|
// Construct and append a PEXTRB instruction to the active function.
|
|
// Operates on the global context.
|
|
func PEXTRB(i, x, mr operand.Op) { ctx.PEXTRB(i, x, mr) }
|
|
|
|
// PEXTRD: Extract Doubleword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PEXTRD imm8 xmm m32
|
|
// PEXTRD imm8 xmm r32
|
|
//
|
|
// Construct and append a PEXTRD instruction to the active function.
|
|
func (c *Context) PEXTRD(i, x, mr operand.Op) {
|
|
c.addinstruction(x86.PEXTRD(i, x, mr))
|
|
}
|
|
|
|
// PEXTRD: Extract Doubleword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PEXTRD imm8 xmm m32
|
|
// PEXTRD imm8 xmm r32
|
|
//
|
|
// Construct and append a PEXTRD instruction to the active function.
|
|
// Operates on the global context.
|
|
func PEXTRD(i, x, mr operand.Op) { ctx.PEXTRD(i, x, mr) }
|
|
|
|
// PEXTRQ: Extract Quadword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PEXTRQ imm8 xmm m64
|
|
// PEXTRQ imm8 xmm r64
|
|
//
|
|
// Construct and append a PEXTRQ instruction to the active function.
|
|
func (c *Context) PEXTRQ(i, x, mr operand.Op) {
|
|
c.addinstruction(x86.PEXTRQ(i, x, mr))
|
|
}
|
|
|
|
// PEXTRQ: Extract Quadword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PEXTRQ imm8 xmm m64
|
|
// PEXTRQ imm8 xmm r64
|
|
//
|
|
// Construct and append a PEXTRQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func PEXTRQ(i, x, mr operand.Op) { ctx.PEXTRQ(i, x, mr) }
|
|
|
|
// PEXTRW: Extract Word.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PEXTRW imm8 xmm m16
|
|
// PEXTRW imm8 xmm r32
|
|
//
|
|
// Construct and append a PEXTRW instruction to the active function.
|
|
func (c *Context) PEXTRW(i, x, mr operand.Op) {
|
|
c.addinstruction(x86.PEXTRW(i, x, mr))
|
|
}
|
|
|
|
// PEXTRW: Extract Word.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PEXTRW imm8 xmm m16
|
|
// PEXTRW imm8 xmm r32
|
|
//
|
|
// Construct and append a PEXTRW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PEXTRW(i, x, mr operand.Op) { ctx.PEXTRW(i, x, mr) }
|
|
|
|
// PHADDD: Packed Horizontal Add Doubleword Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PHADDD m128 xmm
|
|
// PHADDD xmm xmm
|
|
//
|
|
// Construct and append a PHADDD instruction to the active function.
|
|
func (c *Context) PHADDD(mx, x operand.Op) {
|
|
c.addinstruction(x86.PHADDD(mx, x))
|
|
}
|
|
|
|
// PHADDD: Packed Horizontal Add Doubleword Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PHADDD m128 xmm
|
|
// PHADDD xmm xmm
|
|
//
|
|
// Construct and append a PHADDD instruction to the active function.
|
|
// Operates on the global context.
|
|
func PHADDD(mx, x operand.Op) { ctx.PHADDD(mx, x) }
|
|
|
|
// PHADDSW: Packed Horizontal Add Signed Word Integers with Signed Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PHADDSW m128 xmm
|
|
// PHADDSW xmm xmm
|
|
//
|
|
// Construct and append a PHADDSW instruction to the active function.
|
|
func (c *Context) PHADDSW(mx, x operand.Op) {
|
|
c.addinstruction(x86.PHADDSW(mx, x))
|
|
}
|
|
|
|
// PHADDSW: Packed Horizontal Add Signed Word Integers with Signed Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PHADDSW m128 xmm
|
|
// PHADDSW xmm xmm
|
|
//
|
|
// Construct and append a PHADDSW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PHADDSW(mx, x operand.Op) { ctx.PHADDSW(mx, x) }
|
|
|
|
// PHADDW: Packed Horizontal Add Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PHADDW m128 xmm
|
|
// PHADDW xmm xmm
|
|
//
|
|
// Construct and append a PHADDW instruction to the active function.
|
|
func (c *Context) PHADDW(mx, x operand.Op) {
|
|
c.addinstruction(x86.PHADDW(mx, x))
|
|
}
|
|
|
|
// PHADDW: Packed Horizontal Add Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PHADDW m128 xmm
|
|
// PHADDW xmm xmm
|
|
//
|
|
// Construct and append a PHADDW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PHADDW(mx, x operand.Op) { ctx.PHADDW(mx, x) }
|
|
|
|
// PHMINPOSUW: Packed Horizontal Minimum of Unsigned Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PHMINPOSUW m128 xmm
|
|
// PHMINPOSUW xmm xmm
|
|
//
|
|
// Construct and append a PHMINPOSUW instruction to the active function.
|
|
func (c *Context) PHMINPOSUW(mx, x operand.Op) {
|
|
c.addinstruction(x86.PHMINPOSUW(mx, x))
|
|
}
|
|
|
|
// PHMINPOSUW: Packed Horizontal Minimum of Unsigned Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PHMINPOSUW m128 xmm
|
|
// PHMINPOSUW xmm xmm
|
|
//
|
|
// Construct and append a PHMINPOSUW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PHMINPOSUW(mx, x operand.Op) { ctx.PHMINPOSUW(mx, x) }
|
|
|
|
// PHSUBD: Packed Horizontal Subtract Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PHSUBD m128 xmm
|
|
// PHSUBD xmm xmm
|
|
//
|
|
// Construct and append a PHSUBD instruction to the active function.
|
|
func (c *Context) PHSUBD(mx, x operand.Op) {
|
|
c.addinstruction(x86.PHSUBD(mx, x))
|
|
}
|
|
|
|
// PHSUBD: Packed Horizontal Subtract Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PHSUBD m128 xmm
|
|
// PHSUBD xmm xmm
|
|
//
|
|
// Construct and append a PHSUBD instruction to the active function.
|
|
// Operates on the global context.
|
|
func PHSUBD(mx, x operand.Op) { ctx.PHSUBD(mx, x) }
|
|
|
|
// PHSUBSW: Packed Horizontal Subtract Signed Word Integers with Signed Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PHSUBSW m128 xmm
|
|
// PHSUBSW xmm xmm
|
|
//
|
|
// Construct and append a PHSUBSW instruction to the active function.
|
|
func (c *Context) PHSUBSW(mx, x operand.Op) {
|
|
c.addinstruction(x86.PHSUBSW(mx, x))
|
|
}
|
|
|
|
// PHSUBSW: Packed Horizontal Subtract Signed Word Integers with Signed Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PHSUBSW m128 xmm
|
|
// PHSUBSW xmm xmm
|
|
//
|
|
// Construct and append a PHSUBSW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PHSUBSW(mx, x operand.Op) { ctx.PHSUBSW(mx, x) }
|
|
|
|
// PHSUBW: Packed Horizontal Subtract Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PHSUBW m128 xmm
|
|
// PHSUBW xmm xmm
|
|
//
|
|
// Construct and append a PHSUBW instruction to the active function.
|
|
func (c *Context) PHSUBW(mx, x operand.Op) {
|
|
c.addinstruction(x86.PHSUBW(mx, x))
|
|
}
|
|
|
|
// PHSUBW: Packed Horizontal Subtract Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PHSUBW m128 xmm
|
|
// PHSUBW xmm xmm
|
|
//
|
|
// Construct and append a PHSUBW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PHSUBW(mx, x operand.Op) { ctx.PHSUBW(mx, x) }
|
|
|
|
// PINSRB: Insert Byte.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PINSRB imm8 m8 xmm
|
|
// PINSRB imm8 r32 xmm
|
|
//
|
|
// Construct and append a PINSRB instruction to the active function.
|
|
func (c *Context) PINSRB(i, mr, x operand.Op) {
|
|
c.addinstruction(x86.PINSRB(i, mr, x))
|
|
}
|
|
|
|
// PINSRB: Insert Byte.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PINSRB imm8 m8 xmm
|
|
// PINSRB imm8 r32 xmm
|
|
//
|
|
// Construct and append a PINSRB instruction to the active function.
|
|
// Operates on the global context.
|
|
func PINSRB(i, mr, x operand.Op) { ctx.PINSRB(i, mr, x) }
|
|
|
|
// PINSRD: Insert Doubleword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PINSRD imm8 m32 xmm
|
|
// PINSRD imm8 r32 xmm
|
|
//
|
|
// Construct and append a PINSRD instruction to the active function.
|
|
func (c *Context) PINSRD(i, mr, x operand.Op) {
|
|
c.addinstruction(x86.PINSRD(i, mr, x))
|
|
}
|
|
|
|
// PINSRD: Insert Doubleword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PINSRD imm8 m32 xmm
|
|
// PINSRD imm8 r32 xmm
|
|
//
|
|
// Construct and append a PINSRD instruction to the active function.
|
|
// Operates on the global context.
|
|
func PINSRD(i, mr, x operand.Op) { ctx.PINSRD(i, mr, x) }
|
|
|
|
// PINSRQ: Insert Quadword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PINSRQ imm8 m64 xmm
|
|
// PINSRQ imm8 r64 xmm
|
|
//
|
|
// Construct and append a PINSRQ instruction to the active function.
|
|
func (c *Context) PINSRQ(i, mr, x operand.Op) {
|
|
c.addinstruction(x86.PINSRQ(i, mr, x))
|
|
}
|
|
|
|
// PINSRQ: Insert Quadword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PINSRQ imm8 m64 xmm
|
|
// PINSRQ imm8 r64 xmm
|
|
//
|
|
// Construct and append a PINSRQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func PINSRQ(i, mr, x operand.Op) { ctx.PINSRQ(i, mr, x) }
|
|
|
|
// PINSRW: Insert Word.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PINSRW imm8 m16 xmm
|
|
// PINSRW imm8 r32 xmm
|
|
//
|
|
// Construct and append a PINSRW instruction to the active function.
|
|
func (c *Context) PINSRW(i, mr, x operand.Op) {
|
|
c.addinstruction(x86.PINSRW(i, mr, x))
|
|
}
|
|
|
|
// PINSRW: Insert Word.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PINSRW imm8 m16 xmm
|
|
// PINSRW imm8 r32 xmm
|
|
//
|
|
// Construct and append a PINSRW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PINSRW(i, mr, x operand.Op) { ctx.PINSRW(i, mr, x) }
|
|
|
|
// PMADDUBSW: Multiply and Add Packed Signed and Unsigned Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMADDUBSW m128 xmm
|
|
// PMADDUBSW xmm xmm
|
|
//
|
|
// Construct and append a PMADDUBSW instruction to the active function.
|
|
func (c *Context) PMADDUBSW(mx, x operand.Op) {
|
|
c.addinstruction(x86.PMADDUBSW(mx, x))
|
|
}
|
|
|
|
// PMADDUBSW: Multiply and Add Packed Signed and Unsigned Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMADDUBSW m128 xmm
|
|
// PMADDUBSW xmm xmm
|
|
//
|
|
// Construct and append a PMADDUBSW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PMADDUBSW(mx, x operand.Op) { ctx.PMADDUBSW(mx, x) }
|
|
|
|
// PMADDWL: Multiply and Add Packed Signed Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMADDWL m128 xmm
|
|
// PMADDWL xmm xmm
|
|
//
|
|
// Construct and append a PMADDWL instruction to the active function.
|
|
func (c *Context) PMADDWL(mx, x operand.Op) {
|
|
c.addinstruction(x86.PMADDWL(mx, x))
|
|
}
|
|
|
|
// PMADDWL: Multiply and Add Packed Signed Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMADDWL m128 xmm
|
|
// PMADDWL xmm xmm
|
|
//
|
|
// Construct and append a PMADDWL instruction to the active function.
|
|
// Operates on the global context.
|
|
func PMADDWL(mx, x operand.Op) { ctx.PMADDWL(mx, x) }
|
|
|
|
// PMAXSB: Maximum of Packed Signed Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMAXSB m128 xmm
|
|
// PMAXSB xmm xmm
|
|
//
|
|
// Construct and append a PMAXSB instruction to the active function.
|
|
func (c *Context) PMAXSB(mx, x operand.Op) {
|
|
c.addinstruction(x86.PMAXSB(mx, x))
|
|
}
|
|
|
|
// PMAXSB: Maximum of Packed Signed Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMAXSB m128 xmm
|
|
// PMAXSB xmm xmm
|
|
//
|
|
// Construct and append a PMAXSB instruction to the active function.
|
|
// Operates on the global context.
|
|
func PMAXSB(mx, x operand.Op) { ctx.PMAXSB(mx, x) }
|
|
|
|
// PMAXSD: Maximum of Packed Signed Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMAXSD m128 xmm
|
|
// PMAXSD xmm xmm
|
|
//
|
|
// Construct and append a PMAXSD instruction to the active function.
|
|
func (c *Context) PMAXSD(mx, x operand.Op) {
|
|
c.addinstruction(x86.PMAXSD(mx, x))
|
|
}
|
|
|
|
// PMAXSD: Maximum of Packed Signed Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMAXSD m128 xmm
|
|
// PMAXSD xmm xmm
|
|
//
|
|
// Construct and append a PMAXSD instruction to the active function.
|
|
// Operates on the global context.
|
|
func PMAXSD(mx, x operand.Op) { ctx.PMAXSD(mx, x) }
|
|
|
|
// PMAXSW: Maximum of Packed Signed Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMAXSW m128 xmm
|
|
// PMAXSW xmm xmm
|
|
//
|
|
// Construct and append a PMAXSW instruction to the active function.
|
|
func (c *Context) PMAXSW(mx, x operand.Op) {
|
|
c.addinstruction(x86.PMAXSW(mx, x))
|
|
}
|
|
|
|
// PMAXSW: Maximum of Packed Signed Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMAXSW m128 xmm
|
|
// PMAXSW xmm xmm
|
|
//
|
|
// Construct and append a PMAXSW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PMAXSW(mx, x operand.Op) { ctx.PMAXSW(mx, x) }
|
|
|
|
// PMAXUB: Maximum of Packed Unsigned Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMAXUB m128 xmm
|
|
// PMAXUB xmm xmm
|
|
//
|
|
// Construct and append a PMAXUB instruction to the active function.
|
|
func (c *Context) PMAXUB(mx, x operand.Op) {
|
|
c.addinstruction(x86.PMAXUB(mx, x))
|
|
}
|
|
|
|
// PMAXUB: Maximum of Packed Unsigned Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMAXUB m128 xmm
|
|
// PMAXUB xmm xmm
|
|
//
|
|
// Construct and append a PMAXUB instruction to the active function.
|
|
// Operates on the global context.
|
|
func PMAXUB(mx, x operand.Op) { ctx.PMAXUB(mx, x) }
|
|
|
|
// PMAXUD: Maximum of Packed Unsigned Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMAXUD m128 xmm
|
|
// PMAXUD xmm xmm
|
|
//
|
|
// Construct and append a PMAXUD instruction to the active function.
|
|
func (c *Context) PMAXUD(mx, x operand.Op) {
|
|
c.addinstruction(x86.PMAXUD(mx, x))
|
|
}
|
|
|
|
// PMAXUD: Maximum of Packed Unsigned Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMAXUD m128 xmm
|
|
// PMAXUD xmm xmm
|
|
//
|
|
// Construct and append a PMAXUD instruction to the active function.
|
|
// Operates on the global context.
|
|
func PMAXUD(mx, x operand.Op) { ctx.PMAXUD(mx, x) }
|
|
|
|
// PMAXUW: Maximum of Packed Unsigned Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMAXUW m128 xmm
|
|
// PMAXUW xmm xmm
|
|
//
|
|
// Construct and append a PMAXUW instruction to the active function.
|
|
func (c *Context) PMAXUW(mx, x operand.Op) {
|
|
c.addinstruction(x86.PMAXUW(mx, x))
|
|
}
|
|
|
|
// PMAXUW: Maximum of Packed Unsigned Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMAXUW m128 xmm
|
|
// PMAXUW xmm xmm
|
|
//
|
|
// Construct and append a PMAXUW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PMAXUW(mx, x operand.Op) { ctx.PMAXUW(mx, x) }
|
|
|
|
// PMINSB: Minimum of Packed Signed Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMINSB m128 xmm
|
|
// PMINSB xmm xmm
|
|
//
|
|
// Construct and append a PMINSB instruction to the active function.
|
|
func (c *Context) PMINSB(mx, x operand.Op) {
|
|
c.addinstruction(x86.PMINSB(mx, x))
|
|
}
|
|
|
|
// PMINSB: Minimum of Packed Signed Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMINSB m128 xmm
|
|
// PMINSB xmm xmm
|
|
//
|
|
// Construct and append a PMINSB instruction to the active function.
|
|
// Operates on the global context.
|
|
func PMINSB(mx, x operand.Op) { ctx.PMINSB(mx, x) }
|
|
|
|
// PMINSD: Minimum of Packed Signed Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMINSD m128 xmm
|
|
// PMINSD xmm xmm
|
|
//
|
|
// Construct and append a PMINSD instruction to the active function.
|
|
func (c *Context) PMINSD(mx, x operand.Op) {
|
|
c.addinstruction(x86.PMINSD(mx, x))
|
|
}
|
|
|
|
// PMINSD: Minimum of Packed Signed Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMINSD m128 xmm
|
|
// PMINSD xmm xmm
|
|
//
|
|
// Construct and append a PMINSD instruction to the active function.
|
|
// Operates on the global context.
|
|
func PMINSD(mx, x operand.Op) { ctx.PMINSD(mx, x) }
|
|
|
|
// PMINSW: Minimum of Packed Signed Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMINSW m128 xmm
|
|
// PMINSW xmm xmm
|
|
//
|
|
// Construct and append a PMINSW instruction to the active function.
|
|
func (c *Context) PMINSW(mx, x operand.Op) {
|
|
c.addinstruction(x86.PMINSW(mx, x))
|
|
}
|
|
|
|
// PMINSW: Minimum of Packed Signed Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMINSW m128 xmm
|
|
// PMINSW xmm xmm
|
|
//
|
|
// Construct and append a PMINSW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PMINSW(mx, x operand.Op) { ctx.PMINSW(mx, x) }
|
|
|
|
// PMINUB: Minimum of Packed Unsigned Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMINUB m128 xmm
|
|
// PMINUB xmm xmm
|
|
//
|
|
// Construct and append a PMINUB instruction to the active function.
|
|
func (c *Context) PMINUB(mx, x operand.Op) {
|
|
c.addinstruction(x86.PMINUB(mx, x))
|
|
}
|
|
|
|
// PMINUB: Minimum of Packed Unsigned Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMINUB m128 xmm
|
|
// PMINUB xmm xmm
|
|
//
|
|
// Construct and append a PMINUB instruction to the active function.
|
|
// Operates on the global context.
|
|
func PMINUB(mx, x operand.Op) { ctx.PMINUB(mx, x) }
|
|
|
|
// PMINUD: Minimum of Packed Unsigned Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMINUD m128 xmm
|
|
// PMINUD xmm xmm
|
|
//
|
|
// Construct and append a PMINUD instruction to the active function.
|
|
func (c *Context) PMINUD(mx, x operand.Op) {
|
|
c.addinstruction(x86.PMINUD(mx, x))
|
|
}
|
|
|
|
// PMINUD: Minimum of Packed Unsigned Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMINUD m128 xmm
|
|
// PMINUD xmm xmm
|
|
//
|
|
// Construct and append a PMINUD instruction to the active function.
|
|
// Operates on the global context.
|
|
func PMINUD(mx, x operand.Op) { ctx.PMINUD(mx, x) }
|
|
|
|
// PMINUW: Minimum of Packed Unsigned Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMINUW m128 xmm
|
|
// PMINUW xmm xmm
|
|
//
|
|
// Construct and append a PMINUW instruction to the active function.
|
|
func (c *Context) PMINUW(mx, x operand.Op) {
|
|
c.addinstruction(x86.PMINUW(mx, x))
|
|
}
|
|
|
|
// PMINUW: Minimum of Packed Unsigned Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMINUW m128 xmm
|
|
// PMINUW xmm xmm
|
|
//
|
|
// Construct and append a PMINUW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PMINUW(mx, x operand.Op) { ctx.PMINUW(mx, x) }
|
|
|
|
// PMOVMSKB: Move Byte Mask.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMOVMSKB xmm r32
|
|
//
|
|
// Construct and append a PMOVMSKB instruction to the active function.
|
|
func (c *Context) PMOVMSKB(x, r operand.Op) {
|
|
c.addinstruction(x86.PMOVMSKB(x, r))
|
|
}
|
|
|
|
// PMOVMSKB: Move Byte Mask.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMOVMSKB xmm r32
|
|
//
|
|
// Construct and append a PMOVMSKB instruction to the active function.
|
|
// Operates on the global context.
|
|
func PMOVMSKB(x, r operand.Op) { ctx.PMOVMSKB(x, r) }
|
|
|
|
// PMOVSXBD: Move Packed Byte Integers to Doubleword Integers with Sign Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMOVSXBD m32 xmm
|
|
// PMOVSXBD xmm xmm
|
|
//
|
|
// Construct and append a PMOVSXBD instruction to the active function.
|
|
func (c *Context) PMOVSXBD(mx, x operand.Op) {
|
|
c.addinstruction(x86.PMOVSXBD(mx, x))
|
|
}
|
|
|
|
// PMOVSXBD: Move Packed Byte Integers to Doubleword Integers with Sign Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMOVSXBD m32 xmm
|
|
// PMOVSXBD xmm xmm
|
|
//
|
|
// Construct and append a PMOVSXBD instruction to the active function.
|
|
// Operates on the global context.
|
|
func PMOVSXBD(mx, x operand.Op) { ctx.PMOVSXBD(mx, x) }
|
|
|
|
// PMOVSXBQ: Move Packed Byte Integers to Quadword Integers with Sign Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMOVSXBQ m16 xmm
|
|
// PMOVSXBQ xmm xmm
|
|
//
|
|
// Construct and append a PMOVSXBQ instruction to the active function.
|
|
func (c *Context) PMOVSXBQ(mx, x operand.Op) {
|
|
c.addinstruction(x86.PMOVSXBQ(mx, x))
|
|
}
|
|
|
|
// PMOVSXBQ: Move Packed Byte Integers to Quadword Integers with Sign Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMOVSXBQ m16 xmm
|
|
// PMOVSXBQ xmm xmm
|
|
//
|
|
// Construct and append a PMOVSXBQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func PMOVSXBQ(mx, x operand.Op) { ctx.PMOVSXBQ(mx, x) }
|
|
|
|
// PMOVSXBW: Move Packed Byte Integers to Word Integers with Sign Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMOVSXBW m64 xmm
|
|
// PMOVSXBW xmm xmm
|
|
//
|
|
// Construct and append a PMOVSXBW instruction to the active function.
|
|
func (c *Context) PMOVSXBW(mx, x operand.Op) {
|
|
c.addinstruction(x86.PMOVSXBW(mx, x))
|
|
}
|
|
|
|
// PMOVSXBW: Move Packed Byte Integers to Word Integers with Sign Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMOVSXBW m64 xmm
|
|
// PMOVSXBW xmm xmm
|
|
//
|
|
// Construct and append a PMOVSXBW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PMOVSXBW(mx, x operand.Op) { ctx.PMOVSXBW(mx, x) }
|
|
|
|
// PMOVSXDQ: Move Packed Doubleword Integers to Quadword Integers with Sign Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMOVSXDQ m64 xmm
|
|
// PMOVSXDQ xmm xmm
|
|
//
|
|
// Construct and append a PMOVSXDQ instruction to the active function.
|
|
func (c *Context) PMOVSXDQ(mx, x operand.Op) {
|
|
c.addinstruction(x86.PMOVSXDQ(mx, x))
|
|
}
|
|
|
|
// PMOVSXDQ: Move Packed Doubleword Integers to Quadword Integers with Sign Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMOVSXDQ m64 xmm
|
|
// PMOVSXDQ xmm xmm
|
|
//
|
|
// Construct and append a PMOVSXDQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func PMOVSXDQ(mx, x operand.Op) { ctx.PMOVSXDQ(mx, x) }
|
|
|
|
// PMOVSXWD: Move Packed Word Integers to Doubleword Integers with Sign Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMOVSXWD m64 xmm
|
|
// PMOVSXWD xmm xmm
|
|
//
|
|
// Construct and append a PMOVSXWD instruction to the active function.
|
|
func (c *Context) PMOVSXWD(mx, x operand.Op) {
|
|
c.addinstruction(x86.PMOVSXWD(mx, x))
|
|
}
|
|
|
|
// PMOVSXWD: Move Packed Word Integers to Doubleword Integers with Sign Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMOVSXWD m64 xmm
|
|
// PMOVSXWD xmm xmm
|
|
//
|
|
// Construct and append a PMOVSXWD instruction to the active function.
|
|
// Operates on the global context.
|
|
func PMOVSXWD(mx, x operand.Op) { ctx.PMOVSXWD(mx, x) }
|
|
|
|
// PMOVSXWQ: Move Packed Word Integers to Quadword Integers with Sign Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMOVSXWQ m32 xmm
|
|
// PMOVSXWQ xmm xmm
|
|
//
|
|
// Construct and append a PMOVSXWQ instruction to the active function.
|
|
func (c *Context) PMOVSXWQ(mx, x operand.Op) {
|
|
c.addinstruction(x86.PMOVSXWQ(mx, x))
|
|
}
|
|
|
|
// PMOVSXWQ: Move Packed Word Integers to Quadword Integers with Sign Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMOVSXWQ m32 xmm
|
|
// PMOVSXWQ xmm xmm
|
|
//
|
|
// Construct and append a PMOVSXWQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func PMOVSXWQ(mx, x operand.Op) { ctx.PMOVSXWQ(mx, x) }
|
|
|
|
// PMOVZXBD: Move Packed Byte Integers to Doubleword Integers with Zero Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMOVZXBD m32 xmm
|
|
// PMOVZXBD xmm xmm
|
|
//
|
|
// Construct and append a PMOVZXBD instruction to the active function.
|
|
func (c *Context) PMOVZXBD(mx, x operand.Op) {
|
|
c.addinstruction(x86.PMOVZXBD(mx, x))
|
|
}
|
|
|
|
// PMOVZXBD: Move Packed Byte Integers to Doubleword Integers with Zero Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMOVZXBD m32 xmm
|
|
// PMOVZXBD xmm xmm
|
|
//
|
|
// Construct and append a PMOVZXBD instruction to the active function.
|
|
// Operates on the global context.
|
|
func PMOVZXBD(mx, x operand.Op) { ctx.PMOVZXBD(mx, x) }
|
|
|
|
// PMOVZXBQ: Move Packed Byte Integers to Quadword Integers with Zero Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMOVZXBQ m16 xmm
|
|
// PMOVZXBQ xmm xmm
|
|
//
|
|
// Construct and append a PMOVZXBQ instruction to the active function.
|
|
func (c *Context) PMOVZXBQ(mx, x operand.Op) {
|
|
c.addinstruction(x86.PMOVZXBQ(mx, x))
|
|
}
|
|
|
|
// PMOVZXBQ: Move Packed Byte Integers to Quadword Integers with Zero Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMOVZXBQ m16 xmm
|
|
// PMOVZXBQ xmm xmm
|
|
//
|
|
// Construct and append a PMOVZXBQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func PMOVZXBQ(mx, x operand.Op) { ctx.PMOVZXBQ(mx, x) }
|
|
|
|
// PMOVZXBW: Move Packed Byte Integers to Word Integers with Zero Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMOVZXBW m64 xmm
|
|
// PMOVZXBW xmm xmm
|
|
//
|
|
// Construct and append a PMOVZXBW instruction to the active function.
|
|
func (c *Context) PMOVZXBW(mx, x operand.Op) {
|
|
c.addinstruction(x86.PMOVZXBW(mx, x))
|
|
}
|
|
|
|
// PMOVZXBW: Move Packed Byte Integers to Word Integers with Zero Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMOVZXBW m64 xmm
|
|
// PMOVZXBW xmm xmm
|
|
//
|
|
// Construct and append a PMOVZXBW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PMOVZXBW(mx, x operand.Op) { ctx.PMOVZXBW(mx, x) }
|
|
|
|
// PMOVZXDQ: Move Packed Doubleword Integers to Quadword Integers with Zero Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMOVZXDQ m64 xmm
|
|
// PMOVZXDQ xmm xmm
|
|
//
|
|
// Construct and append a PMOVZXDQ instruction to the active function.
|
|
func (c *Context) PMOVZXDQ(mx, x operand.Op) {
|
|
c.addinstruction(x86.PMOVZXDQ(mx, x))
|
|
}
|
|
|
|
// PMOVZXDQ: Move Packed Doubleword Integers to Quadword Integers with Zero Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMOVZXDQ m64 xmm
|
|
// PMOVZXDQ xmm xmm
|
|
//
|
|
// Construct and append a PMOVZXDQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func PMOVZXDQ(mx, x operand.Op) { ctx.PMOVZXDQ(mx, x) }
|
|
|
|
// PMOVZXWD: Move Packed Word Integers to Doubleword Integers with Zero Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMOVZXWD m64 xmm
|
|
// PMOVZXWD xmm xmm
|
|
//
|
|
// Construct and append a PMOVZXWD instruction to the active function.
|
|
func (c *Context) PMOVZXWD(mx, x operand.Op) {
|
|
c.addinstruction(x86.PMOVZXWD(mx, x))
|
|
}
|
|
|
|
// PMOVZXWD: Move Packed Word Integers to Doubleword Integers with Zero Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMOVZXWD m64 xmm
|
|
// PMOVZXWD xmm xmm
|
|
//
|
|
// Construct and append a PMOVZXWD instruction to the active function.
|
|
// Operates on the global context.
|
|
func PMOVZXWD(mx, x operand.Op) { ctx.PMOVZXWD(mx, x) }
|
|
|
|
// PMOVZXWQ: Move Packed Word Integers to Quadword Integers with Zero Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMOVZXWQ m32 xmm
|
|
// PMOVZXWQ xmm xmm
|
|
//
|
|
// Construct and append a PMOVZXWQ instruction to the active function.
|
|
func (c *Context) PMOVZXWQ(mx, x operand.Op) {
|
|
c.addinstruction(x86.PMOVZXWQ(mx, x))
|
|
}
|
|
|
|
// PMOVZXWQ: Move Packed Word Integers to Quadword Integers with Zero Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMOVZXWQ m32 xmm
|
|
// PMOVZXWQ xmm xmm
|
|
//
|
|
// Construct and append a PMOVZXWQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func PMOVZXWQ(mx, x operand.Op) { ctx.PMOVZXWQ(mx, x) }
|
|
|
|
// PMULDQ: Multiply Packed Signed Doubleword Integers and Store Quadword Result.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMULDQ m128 xmm
|
|
// PMULDQ xmm xmm
|
|
//
|
|
// Construct and append a PMULDQ instruction to the active function.
|
|
func (c *Context) PMULDQ(mx, x operand.Op) {
|
|
c.addinstruction(x86.PMULDQ(mx, x))
|
|
}
|
|
|
|
// PMULDQ: Multiply Packed Signed Doubleword Integers and Store Quadword Result.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMULDQ m128 xmm
|
|
// PMULDQ xmm xmm
|
|
//
|
|
// Construct and append a PMULDQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func PMULDQ(mx, x operand.Op) { ctx.PMULDQ(mx, x) }
|
|
|
|
// PMULHRSW: Packed Multiply Signed Word Integers and Store High Result with Round and Scale.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMULHRSW m128 xmm
|
|
// PMULHRSW xmm xmm
|
|
//
|
|
// Construct and append a PMULHRSW instruction to the active function.
|
|
func (c *Context) PMULHRSW(mx, x operand.Op) {
|
|
c.addinstruction(x86.PMULHRSW(mx, x))
|
|
}
|
|
|
|
// PMULHRSW: Packed Multiply Signed Word Integers and Store High Result with Round and Scale.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMULHRSW m128 xmm
|
|
// PMULHRSW xmm xmm
|
|
//
|
|
// Construct and append a PMULHRSW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PMULHRSW(mx, x operand.Op) { ctx.PMULHRSW(mx, x) }
|
|
|
|
// PMULHUW: Multiply Packed Unsigned Word Integers and Store High Result.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMULHUW m128 xmm
|
|
// PMULHUW xmm xmm
|
|
//
|
|
// Construct and append a PMULHUW instruction to the active function.
|
|
func (c *Context) PMULHUW(mx, x operand.Op) {
|
|
c.addinstruction(x86.PMULHUW(mx, x))
|
|
}
|
|
|
|
// PMULHUW: Multiply Packed Unsigned Word Integers and Store High Result.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMULHUW m128 xmm
|
|
// PMULHUW xmm xmm
|
|
//
|
|
// Construct and append a PMULHUW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PMULHUW(mx, x operand.Op) { ctx.PMULHUW(mx, x) }
|
|
|
|
// PMULHW: Multiply Packed Signed Word Integers and Store High Result.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMULHW m128 xmm
|
|
// PMULHW xmm xmm
|
|
//
|
|
// Construct and append a PMULHW instruction to the active function.
|
|
func (c *Context) PMULHW(mx, x operand.Op) {
|
|
c.addinstruction(x86.PMULHW(mx, x))
|
|
}
|
|
|
|
// PMULHW: Multiply Packed Signed Word Integers and Store High Result.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMULHW m128 xmm
|
|
// PMULHW xmm xmm
|
|
//
|
|
// Construct and append a PMULHW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PMULHW(mx, x operand.Op) { ctx.PMULHW(mx, x) }
|
|
|
|
// PMULLD: Multiply Packed Signed Doubleword Integers and Store Low Result.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMULLD m128 xmm
|
|
// PMULLD xmm xmm
|
|
//
|
|
// Construct and append a PMULLD instruction to the active function.
|
|
func (c *Context) PMULLD(mx, x operand.Op) {
|
|
c.addinstruction(x86.PMULLD(mx, x))
|
|
}
|
|
|
|
// PMULLD: Multiply Packed Signed Doubleword Integers and Store Low Result.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMULLD m128 xmm
|
|
// PMULLD xmm xmm
|
|
//
|
|
// Construct and append a PMULLD instruction to the active function.
|
|
// Operates on the global context.
|
|
func PMULLD(mx, x operand.Op) { ctx.PMULLD(mx, x) }
|
|
|
|
// PMULLW: Multiply Packed Signed Word Integers and Store Low Result.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMULLW m128 xmm
|
|
// PMULLW xmm xmm
|
|
//
|
|
// Construct and append a PMULLW instruction to the active function.
|
|
func (c *Context) PMULLW(mx, x operand.Op) {
|
|
c.addinstruction(x86.PMULLW(mx, x))
|
|
}
|
|
|
|
// PMULLW: Multiply Packed Signed Word Integers and Store Low Result.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMULLW m128 xmm
|
|
// PMULLW xmm xmm
|
|
//
|
|
// Construct and append a PMULLW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PMULLW(mx, x operand.Op) { ctx.PMULLW(mx, x) }
|
|
|
|
// PMULULQ: Multiply Packed Unsigned Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMULULQ m128 xmm
|
|
// PMULULQ xmm xmm
|
|
//
|
|
// Construct and append a PMULULQ instruction to the active function.
|
|
func (c *Context) PMULULQ(mx, x operand.Op) {
|
|
c.addinstruction(x86.PMULULQ(mx, x))
|
|
}
|
|
|
|
// PMULULQ: Multiply Packed Unsigned Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PMULULQ m128 xmm
|
|
// PMULULQ xmm xmm
|
|
//
|
|
// Construct and append a PMULULQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func PMULULQ(mx, x operand.Op) { ctx.PMULULQ(mx, x) }
|
|
|
|
// POPCNTL: Count of Number of Bits Set to 1.
|
|
//
|
|
// Forms:
|
|
//
|
|
// POPCNTL m32 r32
|
|
// POPCNTL r32 r32
|
|
//
|
|
// Construct and append a POPCNTL instruction to the active function.
|
|
func (c *Context) POPCNTL(mr, r operand.Op) {
|
|
c.addinstruction(x86.POPCNTL(mr, r))
|
|
}
|
|
|
|
// POPCNTL: Count of Number of Bits Set to 1.
|
|
//
|
|
// Forms:
|
|
//
|
|
// POPCNTL m32 r32
|
|
// POPCNTL r32 r32
|
|
//
|
|
// Construct and append a POPCNTL instruction to the active function.
|
|
// Operates on the global context.
|
|
func POPCNTL(mr, r operand.Op) { ctx.POPCNTL(mr, r) }
|
|
|
|
// POPCNTQ: Count of Number of Bits Set to 1.
|
|
//
|
|
// Forms:
|
|
//
|
|
// POPCNTQ m64 r64
|
|
// POPCNTQ r64 r64
|
|
//
|
|
// Construct and append a POPCNTQ instruction to the active function.
|
|
func (c *Context) POPCNTQ(mr, r operand.Op) {
|
|
c.addinstruction(x86.POPCNTQ(mr, r))
|
|
}
|
|
|
|
// POPCNTQ: Count of Number of Bits Set to 1.
|
|
//
|
|
// Forms:
|
|
//
|
|
// POPCNTQ m64 r64
|
|
// POPCNTQ r64 r64
|
|
//
|
|
// Construct and append a POPCNTQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func POPCNTQ(mr, r operand.Op) { ctx.POPCNTQ(mr, r) }
|
|
|
|
// POPCNTW: Count of Number of Bits Set to 1.
|
|
//
|
|
// Forms:
|
|
//
|
|
// POPCNTW m16 r16
|
|
// POPCNTW r16 r16
|
|
//
|
|
// Construct and append a POPCNTW instruction to the active function.
|
|
func (c *Context) POPCNTW(mr, r operand.Op) {
|
|
c.addinstruction(x86.POPCNTW(mr, r))
|
|
}
|
|
|
|
// POPCNTW: Count of Number of Bits Set to 1.
|
|
//
|
|
// Forms:
|
|
//
|
|
// POPCNTW m16 r16
|
|
// POPCNTW r16 r16
|
|
//
|
|
// Construct and append a POPCNTW instruction to the active function.
|
|
// Operates on the global context.
|
|
func POPCNTW(mr, r operand.Op) { ctx.POPCNTW(mr, r) }
|
|
|
|
// POPQ: Pop a Value from the Stack.
|
|
//
|
|
// Forms:
|
|
//
|
|
// POPQ m64
|
|
// POPQ r64
|
|
//
|
|
// Construct and append a POPQ instruction to the active function.
|
|
func (c *Context) POPQ(mr operand.Op) {
|
|
c.addinstruction(x86.POPQ(mr))
|
|
}
|
|
|
|
// POPQ: Pop a Value from the Stack.
|
|
//
|
|
// Forms:
|
|
//
|
|
// POPQ m64
|
|
// POPQ r64
|
|
//
|
|
// Construct and append a POPQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func POPQ(mr operand.Op) { ctx.POPQ(mr) }
|
|
|
|
// POPW: Pop a Value from the Stack.
|
|
//
|
|
// Forms:
|
|
//
|
|
// POPW m16
|
|
// POPW r16
|
|
//
|
|
// Construct and append a POPW instruction to the active function.
|
|
func (c *Context) POPW(mr operand.Op) {
|
|
c.addinstruction(x86.POPW(mr))
|
|
}
|
|
|
|
// POPW: Pop a Value from the Stack.
|
|
//
|
|
// Forms:
|
|
//
|
|
// POPW m16
|
|
// POPW r16
|
|
//
|
|
// Construct and append a POPW instruction to the active function.
|
|
// Operates on the global context.
|
|
func POPW(mr operand.Op) { ctx.POPW(mr) }
|
|
|
|
// POR: Packed Bitwise Logical OR.
|
|
//
|
|
// Forms:
|
|
//
|
|
// POR m128 xmm
|
|
// POR xmm xmm
|
|
//
|
|
// Construct and append a POR instruction to the active function.
|
|
func (c *Context) POR(mx, x operand.Op) {
|
|
c.addinstruction(x86.POR(mx, x))
|
|
}
|
|
|
|
// POR: Packed Bitwise Logical OR.
|
|
//
|
|
// Forms:
|
|
//
|
|
// POR m128 xmm
|
|
// POR xmm xmm
|
|
//
|
|
// Construct and append a POR instruction to the active function.
|
|
// Operates on the global context.
|
|
func POR(mx, x operand.Op) { ctx.POR(mx, x) }
|
|
|
|
// PREFETCHNTA: Prefetch Data Into Caches using NTA Hint.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PREFETCHNTA m8
|
|
//
|
|
// Construct and append a PREFETCHNTA instruction to the active function.
|
|
func (c *Context) PREFETCHNTA(m operand.Op) {
|
|
c.addinstruction(x86.PREFETCHNTA(m))
|
|
}
|
|
|
|
// PREFETCHNTA: Prefetch Data Into Caches using NTA Hint.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PREFETCHNTA m8
|
|
//
|
|
// Construct and append a PREFETCHNTA instruction to the active function.
|
|
// Operates on the global context.
|
|
func PREFETCHNTA(m operand.Op) { ctx.PREFETCHNTA(m) }
|
|
|
|
// PREFETCHT0: Prefetch Data Into Caches using T0 Hint.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PREFETCHT0 m8
|
|
//
|
|
// Construct and append a PREFETCHT0 instruction to the active function.
|
|
func (c *Context) PREFETCHT0(m operand.Op) {
|
|
c.addinstruction(x86.PREFETCHT0(m))
|
|
}
|
|
|
|
// PREFETCHT0: Prefetch Data Into Caches using T0 Hint.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PREFETCHT0 m8
|
|
//
|
|
// Construct and append a PREFETCHT0 instruction to the active function.
|
|
// Operates on the global context.
|
|
func PREFETCHT0(m operand.Op) { ctx.PREFETCHT0(m) }
|
|
|
|
// PREFETCHT1: Prefetch Data Into Caches using T1 Hint.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PREFETCHT1 m8
|
|
//
|
|
// Construct and append a PREFETCHT1 instruction to the active function.
|
|
func (c *Context) PREFETCHT1(m operand.Op) {
|
|
c.addinstruction(x86.PREFETCHT1(m))
|
|
}
|
|
|
|
// PREFETCHT1: Prefetch Data Into Caches using T1 Hint.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PREFETCHT1 m8
|
|
//
|
|
// Construct and append a PREFETCHT1 instruction to the active function.
|
|
// Operates on the global context.
|
|
func PREFETCHT1(m operand.Op) { ctx.PREFETCHT1(m) }
|
|
|
|
// PREFETCHT2: Prefetch Data Into Caches using T2 Hint.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PREFETCHT2 m8
|
|
//
|
|
// Construct and append a PREFETCHT2 instruction to the active function.
|
|
func (c *Context) PREFETCHT2(m operand.Op) {
|
|
c.addinstruction(x86.PREFETCHT2(m))
|
|
}
|
|
|
|
// PREFETCHT2: Prefetch Data Into Caches using T2 Hint.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PREFETCHT2 m8
|
|
//
|
|
// Construct and append a PREFETCHT2 instruction to the active function.
|
|
// Operates on the global context.
|
|
func PREFETCHT2(m operand.Op) { ctx.PREFETCHT2(m) }
|
|
|
|
// PSADBW: Compute Sum of Absolute Differences.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSADBW m128 xmm
|
|
// PSADBW xmm xmm
|
|
//
|
|
// Construct and append a PSADBW instruction to the active function.
|
|
func (c *Context) PSADBW(mx, x operand.Op) {
|
|
c.addinstruction(x86.PSADBW(mx, x))
|
|
}
|
|
|
|
// PSADBW: Compute Sum of Absolute Differences.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSADBW m128 xmm
|
|
// PSADBW xmm xmm
|
|
//
|
|
// Construct and append a PSADBW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PSADBW(mx, x operand.Op) { ctx.PSADBW(mx, x) }
|
|
|
|
// PSHUFB: Packed Shuffle Bytes.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSHUFB m128 xmm
|
|
// PSHUFB xmm xmm
|
|
//
|
|
// Construct and append a PSHUFB instruction to the active function.
|
|
func (c *Context) PSHUFB(mx, x operand.Op) {
|
|
c.addinstruction(x86.PSHUFB(mx, x))
|
|
}
|
|
|
|
// PSHUFB: Packed Shuffle Bytes.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSHUFB m128 xmm
|
|
// PSHUFB xmm xmm
|
|
//
|
|
// Construct and append a PSHUFB instruction to the active function.
|
|
// Operates on the global context.
|
|
func PSHUFB(mx, x operand.Op) { ctx.PSHUFB(mx, x) }
|
|
|
|
// PSHUFD: Shuffle Packed Doublewords.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSHUFD imm8 m128 xmm
|
|
// PSHUFD imm8 xmm xmm
|
|
//
|
|
// Construct and append a PSHUFD instruction to the active function.
|
|
func (c *Context) PSHUFD(i, mx, x operand.Op) {
|
|
c.addinstruction(x86.PSHUFD(i, mx, x))
|
|
}
|
|
|
|
// PSHUFD: Shuffle Packed Doublewords.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSHUFD imm8 m128 xmm
|
|
// PSHUFD imm8 xmm xmm
|
|
//
|
|
// Construct and append a PSHUFD instruction to the active function.
|
|
// Operates on the global context.
|
|
func PSHUFD(i, mx, x operand.Op) { ctx.PSHUFD(i, mx, x) }
|
|
|
|
// PSHUFHW: Shuffle Packed High Words.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSHUFHW imm8 m128 xmm
|
|
// PSHUFHW imm8 xmm xmm
|
|
//
|
|
// Construct and append a PSHUFHW instruction to the active function.
|
|
func (c *Context) PSHUFHW(i, mx, x operand.Op) {
|
|
c.addinstruction(x86.PSHUFHW(i, mx, x))
|
|
}
|
|
|
|
// PSHUFHW: Shuffle Packed High Words.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSHUFHW imm8 m128 xmm
|
|
// PSHUFHW imm8 xmm xmm
|
|
//
|
|
// Construct and append a PSHUFHW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PSHUFHW(i, mx, x operand.Op) { ctx.PSHUFHW(i, mx, x) }
|
|
|
|
// PSHUFL: Shuffle Packed Doublewords.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSHUFL imm8 m128 xmm
|
|
// PSHUFL imm8 xmm xmm
|
|
//
|
|
// Construct and append a PSHUFL instruction to the active function.
|
|
func (c *Context) PSHUFL(i, mx, x operand.Op) {
|
|
c.addinstruction(x86.PSHUFL(i, mx, x))
|
|
}
|
|
|
|
// PSHUFL: Shuffle Packed Doublewords.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSHUFL imm8 m128 xmm
|
|
// PSHUFL imm8 xmm xmm
|
|
//
|
|
// Construct and append a PSHUFL instruction to the active function.
|
|
// Operates on the global context.
|
|
func PSHUFL(i, mx, x operand.Op) { ctx.PSHUFL(i, mx, x) }
|
|
|
|
// PSHUFLW: Shuffle Packed Low Words.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSHUFLW imm8 m128 xmm
|
|
// PSHUFLW imm8 xmm xmm
|
|
//
|
|
// Construct and append a PSHUFLW instruction to the active function.
|
|
func (c *Context) PSHUFLW(i, mx, x operand.Op) {
|
|
c.addinstruction(x86.PSHUFLW(i, mx, x))
|
|
}
|
|
|
|
// PSHUFLW: Shuffle Packed Low Words.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSHUFLW imm8 m128 xmm
|
|
// PSHUFLW imm8 xmm xmm
|
|
//
|
|
// Construct and append a PSHUFLW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PSHUFLW(i, mx, x operand.Op) { ctx.PSHUFLW(i, mx, x) }
|
|
|
|
// PSIGNB: Packed Sign of Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSIGNB m128 xmm
|
|
// PSIGNB xmm xmm
|
|
//
|
|
// Construct and append a PSIGNB instruction to the active function.
|
|
func (c *Context) PSIGNB(mx, x operand.Op) {
|
|
c.addinstruction(x86.PSIGNB(mx, x))
|
|
}
|
|
|
|
// PSIGNB: Packed Sign of Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSIGNB m128 xmm
|
|
// PSIGNB xmm xmm
|
|
//
|
|
// Construct and append a PSIGNB instruction to the active function.
|
|
// Operates on the global context.
|
|
func PSIGNB(mx, x operand.Op) { ctx.PSIGNB(mx, x) }
|
|
|
|
// PSIGND: Packed Sign of Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSIGND m128 xmm
|
|
// PSIGND xmm xmm
|
|
//
|
|
// Construct and append a PSIGND instruction to the active function.
|
|
func (c *Context) PSIGND(mx, x operand.Op) {
|
|
c.addinstruction(x86.PSIGND(mx, x))
|
|
}
|
|
|
|
// PSIGND: Packed Sign of Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSIGND m128 xmm
|
|
// PSIGND xmm xmm
|
|
//
|
|
// Construct and append a PSIGND instruction to the active function.
|
|
// Operates on the global context.
|
|
func PSIGND(mx, x operand.Op) { ctx.PSIGND(mx, x) }
|
|
|
|
// PSIGNW: Packed Sign of Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSIGNW m128 xmm
|
|
// PSIGNW xmm xmm
|
|
//
|
|
// Construct and append a PSIGNW instruction to the active function.
|
|
func (c *Context) PSIGNW(mx, x operand.Op) {
|
|
c.addinstruction(x86.PSIGNW(mx, x))
|
|
}
|
|
|
|
// PSIGNW: Packed Sign of Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSIGNW m128 xmm
|
|
// PSIGNW xmm xmm
|
|
//
|
|
// Construct and append a PSIGNW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PSIGNW(mx, x operand.Op) { ctx.PSIGNW(mx, x) }
|
|
|
|
// PSLLDQ: Shift Packed Double Quadword Left Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSLLDQ imm8 xmm
|
|
//
|
|
// Construct and append a PSLLDQ instruction to the active function.
|
|
func (c *Context) PSLLDQ(i, x operand.Op) {
|
|
c.addinstruction(x86.PSLLDQ(i, x))
|
|
}
|
|
|
|
// PSLLDQ: Shift Packed Double Quadword Left Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSLLDQ imm8 xmm
|
|
//
|
|
// Construct and append a PSLLDQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func PSLLDQ(i, x operand.Op) { ctx.PSLLDQ(i, x) }
|
|
|
|
// PSLLL: Shift Packed Doubleword Data Left Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSLLL imm8 xmm
|
|
// PSLLL m128 xmm
|
|
// PSLLL xmm xmm
|
|
//
|
|
// Construct and append a PSLLL instruction to the active function.
|
|
func (c *Context) PSLLL(imx, x operand.Op) {
|
|
c.addinstruction(x86.PSLLL(imx, x))
|
|
}
|
|
|
|
// PSLLL: Shift Packed Doubleword Data Left Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSLLL imm8 xmm
|
|
// PSLLL m128 xmm
|
|
// PSLLL xmm xmm
|
|
//
|
|
// Construct and append a PSLLL instruction to the active function.
|
|
// Operates on the global context.
|
|
func PSLLL(imx, x operand.Op) { ctx.PSLLL(imx, x) }
|
|
|
|
// PSLLO: Shift Packed Double Quadword Left Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSLLO imm8 xmm
|
|
//
|
|
// Construct and append a PSLLO instruction to the active function.
|
|
func (c *Context) PSLLO(i, x operand.Op) {
|
|
c.addinstruction(x86.PSLLO(i, x))
|
|
}
|
|
|
|
// PSLLO: Shift Packed Double Quadword Left Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSLLO imm8 xmm
|
|
//
|
|
// Construct and append a PSLLO instruction to the active function.
|
|
// Operates on the global context.
|
|
func PSLLO(i, x operand.Op) { ctx.PSLLO(i, x) }
|
|
|
|
// PSLLQ: Shift Packed Quadword Data Left Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSLLQ imm8 xmm
|
|
// PSLLQ m128 xmm
|
|
// PSLLQ xmm xmm
|
|
//
|
|
// Construct and append a PSLLQ instruction to the active function.
|
|
func (c *Context) PSLLQ(imx, x operand.Op) {
|
|
c.addinstruction(x86.PSLLQ(imx, x))
|
|
}
|
|
|
|
// PSLLQ: Shift Packed Quadword Data Left Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSLLQ imm8 xmm
|
|
// PSLLQ m128 xmm
|
|
// PSLLQ xmm xmm
|
|
//
|
|
// Construct and append a PSLLQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func PSLLQ(imx, x operand.Op) { ctx.PSLLQ(imx, x) }
|
|
|
|
// PSLLW: Shift Packed Word Data Left Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSLLW imm8 xmm
|
|
// PSLLW m128 xmm
|
|
// PSLLW xmm xmm
|
|
//
|
|
// Construct and append a PSLLW instruction to the active function.
|
|
func (c *Context) PSLLW(imx, x operand.Op) {
|
|
c.addinstruction(x86.PSLLW(imx, x))
|
|
}
|
|
|
|
// PSLLW: Shift Packed Word Data Left Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSLLW imm8 xmm
|
|
// PSLLW m128 xmm
|
|
// PSLLW xmm xmm
|
|
//
|
|
// Construct and append a PSLLW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PSLLW(imx, x operand.Op) { ctx.PSLLW(imx, x) }
|
|
|
|
// PSRAL: Shift Packed Doubleword Data Right Arithmetic.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSRAL imm8 xmm
|
|
// PSRAL m128 xmm
|
|
// PSRAL xmm xmm
|
|
//
|
|
// Construct and append a PSRAL instruction to the active function.
|
|
func (c *Context) PSRAL(imx, x operand.Op) {
|
|
c.addinstruction(x86.PSRAL(imx, x))
|
|
}
|
|
|
|
// PSRAL: Shift Packed Doubleword Data Right Arithmetic.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSRAL imm8 xmm
|
|
// PSRAL m128 xmm
|
|
// PSRAL xmm xmm
|
|
//
|
|
// Construct and append a PSRAL instruction to the active function.
|
|
// Operates on the global context.
|
|
func PSRAL(imx, x operand.Op) { ctx.PSRAL(imx, x) }
|
|
|
|
// PSRAW: Shift Packed Word Data Right Arithmetic.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSRAW imm8 xmm
|
|
// PSRAW m128 xmm
|
|
// PSRAW xmm xmm
|
|
//
|
|
// Construct and append a PSRAW instruction to the active function.
|
|
func (c *Context) PSRAW(imx, x operand.Op) {
|
|
c.addinstruction(x86.PSRAW(imx, x))
|
|
}
|
|
|
|
// PSRAW: Shift Packed Word Data Right Arithmetic.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSRAW imm8 xmm
|
|
// PSRAW m128 xmm
|
|
// PSRAW xmm xmm
|
|
//
|
|
// Construct and append a PSRAW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PSRAW(imx, x operand.Op) { ctx.PSRAW(imx, x) }
|
|
|
|
// PSRLDQ: Shift Packed Double Quadword Right Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSRLDQ imm8 xmm
|
|
//
|
|
// Construct and append a PSRLDQ instruction to the active function.
|
|
func (c *Context) PSRLDQ(i, x operand.Op) {
|
|
c.addinstruction(x86.PSRLDQ(i, x))
|
|
}
|
|
|
|
// PSRLDQ: Shift Packed Double Quadword Right Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSRLDQ imm8 xmm
|
|
//
|
|
// Construct and append a PSRLDQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func PSRLDQ(i, x operand.Op) { ctx.PSRLDQ(i, x) }
|
|
|
|
// PSRLL: Shift Packed Doubleword Data Right Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSRLL imm8 xmm
|
|
// PSRLL m128 xmm
|
|
// PSRLL xmm xmm
|
|
//
|
|
// Construct and append a PSRLL instruction to the active function.
|
|
func (c *Context) PSRLL(imx, x operand.Op) {
|
|
c.addinstruction(x86.PSRLL(imx, x))
|
|
}
|
|
|
|
// PSRLL: Shift Packed Doubleword Data Right Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSRLL imm8 xmm
|
|
// PSRLL m128 xmm
|
|
// PSRLL xmm xmm
|
|
//
|
|
// Construct and append a PSRLL instruction to the active function.
|
|
// Operates on the global context.
|
|
func PSRLL(imx, x operand.Op) { ctx.PSRLL(imx, x) }
|
|
|
|
// PSRLO: Shift Packed Double Quadword Right Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSRLO imm8 xmm
|
|
//
|
|
// Construct and append a PSRLO instruction to the active function.
|
|
func (c *Context) PSRLO(i, x operand.Op) {
|
|
c.addinstruction(x86.PSRLO(i, x))
|
|
}
|
|
|
|
// PSRLO: Shift Packed Double Quadword Right Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSRLO imm8 xmm
|
|
//
|
|
// Construct and append a PSRLO instruction to the active function.
|
|
// Operates on the global context.
|
|
func PSRLO(i, x operand.Op) { ctx.PSRLO(i, x) }
|
|
|
|
// PSRLQ: Shift Packed Quadword Data Right Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSRLQ imm8 xmm
|
|
// PSRLQ m128 xmm
|
|
// PSRLQ xmm xmm
|
|
//
|
|
// Construct and append a PSRLQ instruction to the active function.
|
|
func (c *Context) PSRLQ(imx, x operand.Op) {
|
|
c.addinstruction(x86.PSRLQ(imx, x))
|
|
}
|
|
|
|
// PSRLQ: Shift Packed Quadword Data Right Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSRLQ imm8 xmm
|
|
// PSRLQ m128 xmm
|
|
// PSRLQ xmm xmm
|
|
//
|
|
// Construct and append a PSRLQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func PSRLQ(imx, x operand.Op) { ctx.PSRLQ(imx, x) }
|
|
|
|
// PSRLW: Shift Packed Word Data Right Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSRLW imm8 xmm
|
|
// PSRLW m128 xmm
|
|
// PSRLW xmm xmm
|
|
//
|
|
// Construct and append a PSRLW instruction to the active function.
|
|
func (c *Context) PSRLW(imx, x operand.Op) {
|
|
c.addinstruction(x86.PSRLW(imx, x))
|
|
}
|
|
|
|
// PSRLW: Shift Packed Word Data Right Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSRLW imm8 xmm
|
|
// PSRLW m128 xmm
|
|
// PSRLW xmm xmm
|
|
//
|
|
// Construct and append a PSRLW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PSRLW(imx, x operand.Op) { ctx.PSRLW(imx, x) }
|
|
|
|
// PSUBB: Subtract Packed Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSUBB m128 xmm
|
|
// PSUBB xmm xmm
|
|
//
|
|
// Construct and append a PSUBB instruction to the active function.
|
|
func (c *Context) PSUBB(mx, x operand.Op) {
|
|
c.addinstruction(x86.PSUBB(mx, x))
|
|
}
|
|
|
|
// PSUBB: Subtract Packed Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSUBB m128 xmm
|
|
// PSUBB xmm xmm
|
|
//
|
|
// Construct and append a PSUBB instruction to the active function.
|
|
// Operates on the global context.
|
|
func PSUBB(mx, x operand.Op) { ctx.PSUBB(mx, x) }
|
|
|
|
// PSUBL: Subtract Packed Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSUBL m128 xmm
|
|
// PSUBL xmm xmm
|
|
//
|
|
// Construct and append a PSUBL instruction to the active function.
|
|
func (c *Context) PSUBL(mx, x operand.Op) {
|
|
c.addinstruction(x86.PSUBL(mx, x))
|
|
}
|
|
|
|
// PSUBL: Subtract Packed Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSUBL m128 xmm
|
|
// PSUBL xmm xmm
|
|
//
|
|
// Construct and append a PSUBL instruction to the active function.
|
|
// Operates on the global context.
|
|
func PSUBL(mx, x operand.Op) { ctx.PSUBL(mx, x) }
|
|
|
|
// PSUBQ: Subtract Packed Quadword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSUBQ m128 xmm
|
|
// PSUBQ xmm xmm
|
|
//
|
|
// Construct and append a PSUBQ instruction to the active function.
|
|
func (c *Context) PSUBQ(mx, x operand.Op) {
|
|
c.addinstruction(x86.PSUBQ(mx, x))
|
|
}
|
|
|
|
// PSUBQ: Subtract Packed Quadword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSUBQ m128 xmm
|
|
// PSUBQ xmm xmm
|
|
//
|
|
// Construct and append a PSUBQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func PSUBQ(mx, x operand.Op) { ctx.PSUBQ(mx, x) }
|
|
|
|
// PSUBSB: Subtract Packed Signed Byte Integers with Signed Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSUBSB m128 xmm
|
|
// PSUBSB xmm xmm
|
|
//
|
|
// Construct and append a PSUBSB instruction to the active function.
|
|
func (c *Context) PSUBSB(mx, x operand.Op) {
|
|
c.addinstruction(x86.PSUBSB(mx, x))
|
|
}
|
|
|
|
// PSUBSB: Subtract Packed Signed Byte Integers with Signed Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSUBSB m128 xmm
|
|
// PSUBSB xmm xmm
|
|
//
|
|
// Construct and append a PSUBSB instruction to the active function.
|
|
// Operates on the global context.
|
|
func PSUBSB(mx, x operand.Op) { ctx.PSUBSB(mx, x) }
|
|
|
|
// PSUBSW: Subtract Packed Signed Word Integers with Signed Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSUBSW m128 xmm
|
|
// PSUBSW xmm xmm
|
|
//
|
|
// Construct and append a PSUBSW instruction to the active function.
|
|
func (c *Context) PSUBSW(mx, x operand.Op) {
|
|
c.addinstruction(x86.PSUBSW(mx, x))
|
|
}
|
|
|
|
// PSUBSW: Subtract Packed Signed Word Integers with Signed Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSUBSW m128 xmm
|
|
// PSUBSW xmm xmm
|
|
//
|
|
// Construct and append a PSUBSW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PSUBSW(mx, x operand.Op) { ctx.PSUBSW(mx, x) }
|
|
|
|
// PSUBUSB: Subtract Packed Unsigned Byte Integers with Unsigned Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSUBUSB m128 xmm
|
|
// PSUBUSB xmm xmm
|
|
//
|
|
// Construct and append a PSUBUSB instruction to the active function.
|
|
func (c *Context) PSUBUSB(mx, x operand.Op) {
|
|
c.addinstruction(x86.PSUBUSB(mx, x))
|
|
}
|
|
|
|
// PSUBUSB: Subtract Packed Unsigned Byte Integers with Unsigned Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSUBUSB m128 xmm
|
|
// PSUBUSB xmm xmm
|
|
//
|
|
// Construct and append a PSUBUSB instruction to the active function.
|
|
// Operates on the global context.
|
|
func PSUBUSB(mx, x operand.Op) { ctx.PSUBUSB(mx, x) }
|
|
|
|
// PSUBUSW: Subtract Packed Unsigned Word Integers with Unsigned Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSUBUSW m128 xmm
|
|
// PSUBUSW xmm xmm
|
|
//
|
|
// Construct and append a PSUBUSW instruction to the active function.
|
|
func (c *Context) PSUBUSW(mx, x operand.Op) {
|
|
c.addinstruction(x86.PSUBUSW(mx, x))
|
|
}
|
|
|
|
// PSUBUSW: Subtract Packed Unsigned Word Integers with Unsigned Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSUBUSW m128 xmm
|
|
// PSUBUSW xmm xmm
|
|
//
|
|
// Construct and append a PSUBUSW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PSUBUSW(mx, x operand.Op) { ctx.PSUBUSW(mx, x) }
|
|
|
|
// PSUBW: Subtract Packed Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSUBW m128 xmm
|
|
// PSUBW xmm xmm
|
|
//
|
|
// Construct and append a PSUBW instruction to the active function.
|
|
func (c *Context) PSUBW(mx, x operand.Op) {
|
|
c.addinstruction(x86.PSUBW(mx, x))
|
|
}
|
|
|
|
// PSUBW: Subtract Packed Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PSUBW m128 xmm
|
|
// PSUBW xmm xmm
|
|
//
|
|
// Construct and append a PSUBW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PSUBW(mx, x operand.Op) { ctx.PSUBW(mx, x) }
|
|
|
|
// PTEST: Packed Logical Compare.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PTEST m128 xmm
|
|
// PTEST xmm xmm
|
|
//
|
|
// Construct and append a PTEST instruction to the active function.
|
|
func (c *Context) PTEST(mx, x operand.Op) {
|
|
c.addinstruction(x86.PTEST(mx, x))
|
|
}
|
|
|
|
// PTEST: Packed Logical Compare.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PTEST m128 xmm
|
|
// PTEST xmm xmm
|
|
//
|
|
// Construct and append a PTEST instruction to the active function.
|
|
// Operates on the global context.
|
|
func PTEST(mx, x operand.Op) { ctx.PTEST(mx, x) }
|
|
|
|
// PUNPCKHBW: Unpack and Interleave High-Order Bytes into Words.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PUNPCKHBW m128 xmm
|
|
// PUNPCKHBW xmm xmm
|
|
//
|
|
// Construct and append a PUNPCKHBW instruction to the active function.
|
|
func (c *Context) PUNPCKHBW(mx, x operand.Op) {
|
|
c.addinstruction(x86.PUNPCKHBW(mx, x))
|
|
}
|
|
|
|
// PUNPCKHBW: Unpack and Interleave High-Order Bytes into Words.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PUNPCKHBW m128 xmm
|
|
// PUNPCKHBW xmm xmm
|
|
//
|
|
// Construct and append a PUNPCKHBW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PUNPCKHBW(mx, x operand.Op) { ctx.PUNPCKHBW(mx, x) }
|
|
|
|
// PUNPCKHLQ: Unpack and Interleave High-Order Doublewords into Quadwords.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PUNPCKHLQ m128 xmm
|
|
// PUNPCKHLQ xmm xmm
|
|
//
|
|
// Construct and append a PUNPCKHLQ instruction to the active function.
|
|
func (c *Context) PUNPCKHLQ(mx, x operand.Op) {
|
|
c.addinstruction(x86.PUNPCKHLQ(mx, x))
|
|
}
|
|
|
|
// PUNPCKHLQ: Unpack and Interleave High-Order Doublewords into Quadwords.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PUNPCKHLQ m128 xmm
|
|
// PUNPCKHLQ xmm xmm
|
|
//
|
|
// Construct and append a PUNPCKHLQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func PUNPCKHLQ(mx, x operand.Op) { ctx.PUNPCKHLQ(mx, x) }
|
|
|
|
// PUNPCKHQDQ: Unpack and Interleave High-Order Quadwords into Double Quadwords.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PUNPCKHQDQ m128 xmm
|
|
// PUNPCKHQDQ xmm xmm
|
|
//
|
|
// Construct and append a PUNPCKHQDQ instruction to the active function.
|
|
func (c *Context) PUNPCKHQDQ(mx, x operand.Op) {
|
|
c.addinstruction(x86.PUNPCKHQDQ(mx, x))
|
|
}
|
|
|
|
// PUNPCKHQDQ: Unpack and Interleave High-Order Quadwords into Double Quadwords.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PUNPCKHQDQ m128 xmm
|
|
// PUNPCKHQDQ xmm xmm
|
|
//
|
|
// Construct and append a PUNPCKHQDQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func PUNPCKHQDQ(mx, x operand.Op) { ctx.PUNPCKHQDQ(mx, x) }
|
|
|
|
// PUNPCKHWL: Unpack and Interleave High-Order Words into Doublewords.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PUNPCKHWL m128 xmm
|
|
// PUNPCKHWL xmm xmm
|
|
//
|
|
// Construct and append a PUNPCKHWL instruction to the active function.
|
|
func (c *Context) PUNPCKHWL(mx, x operand.Op) {
|
|
c.addinstruction(x86.PUNPCKHWL(mx, x))
|
|
}
|
|
|
|
// PUNPCKHWL: Unpack and Interleave High-Order Words into Doublewords.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PUNPCKHWL m128 xmm
|
|
// PUNPCKHWL xmm xmm
|
|
//
|
|
// Construct and append a PUNPCKHWL instruction to the active function.
|
|
// Operates on the global context.
|
|
func PUNPCKHWL(mx, x operand.Op) { ctx.PUNPCKHWL(mx, x) }
|
|
|
|
// PUNPCKLBW: Unpack and Interleave Low-Order Bytes into Words.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PUNPCKLBW m128 xmm
|
|
// PUNPCKLBW xmm xmm
|
|
//
|
|
// Construct and append a PUNPCKLBW instruction to the active function.
|
|
func (c *Context) PUNPCKLBW(mx, x operand.Op) {
|
|
c.addinstruction(x86.PUNPCKLBW(mx, x))
|
|
}
|
|
|
|
// PUNPCKLBW: Unpack and Interleave Low-Order Bytes into Words.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PUNPCKLBW m128 xmm
|
|
// PUNPCKLBW xmm xmm
|
|
//
|
|
// Construct and append a PUNPCKLBW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PUNPCKLBW(mx, x operand.Op) { ctx.PUNPCKLBW(mx, x) }
|
|
|
|
// PUNPCKLLQ: Unpack and Interleave Low-Order Doublewords into Quadwords.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PUNPCKLLQ m128 xmm
|
|
// PUNPCKLLQ xmm xmm
|
|
//
|
|
// Construct and append a PUNPCKLLQ instruction to the active function.
|
|
func (c *Context) PUNPCKLLQ(mx, x operand.Op) {
|
|
c.addinstruction(x86.PUNPCKLLQ(mx, x))
|
|
}
|
|
|
|
// PUNPCKLLQ: Unpack and Interleave Low-Order Doublewords into Quadwords.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PUNPCKLLQ m128 xmm
|
|
// PUNPCKLLQ xmm xmm
|
|
//
|
|
// Construct and append a PUNPCKLLQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func PUNPCKLLQ(mx, x operand.Op) { ctx.PUNPCKLLQ(mx, x) }
|
|
|
|
// PUNPCKLQDQ: Unpack and Interleave Low-Order Quadwords into Double Quadwords.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PUNPCKLQDQ m128 xmm
|
|
// PUNPCKLQDQ xmm xmm
|
|
//
|
|
// Construct and append a PUNPCKLQDQ instruction to the active function.
|
|
func (c *Context) PUNPCKLQDQ(mx, x operand.Op) {
|
|
c.addinstruction(x86.PUNPCKLQDQ(mx, x))
|
|
}
|
|
|
|
// PUNPCKLQDQ: Unpack and Interleave Low-Order Quadwords into Double Quadwords.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PUNPCKLQDQ m128 xmm
|
|
// PUNPCKLQDQ xmm xmm
|
|
//
|
|
// Construct and append a PUNPCKLQDQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func PUNPCKLQDQ(mx, x operand.Op) { ctx.PUNPCKLQDQ(mx, x) }
|
|
|
|
// PUNPCKLWL: Unpack and Interleave Low-Order Words into Doublewords.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PUNPCKLWL m128 xmm
|
|
// PUNPCKLWL xmm xmm
|
|
//
|
|
// Construct and append a PUNPCKLWL instruction to the active function.
|
|
func (c *Context) PUNPCKLWL(mx, x operand.Op) {
|
|
c.addinstruction(x86.PUNPCKLWL(mx, x))
|
|
}
|
|
|
|
// PUNPCKLWL: Unpack and Interleave Low-Order Words into Doublewords.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PUNPCKLWL m128 xmm
|
|
// PUNPCKLWL xmm xmm
|
|
//
|
|
// Construct and append a PUNPCKLWL instruction to the active function.
|
|
// Operates on the global context.
|
|
func PUNPCKLWL(mx, x operand.Op) { ctx.PUNPCKLWL(mx, x) }
|
|
|
|
// PUSHQ: Push Value Onto the Stack.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PUSHQ imm32
|
|
// PUSHQ imm8
|
|
// PUSHQ m64
|
|
// PUSHQ r64
|
|
//
|
|
// Construct and append a PUSHQ instruction to the active function.
|
|
func (c *Context) PUSHQ(imr operand.Op) {
|
|
c.addinstruction(x86.PUSHQ(imr))
|
|
}
|
|
|
|
// PUSHQ: Push Value Onto the Stack.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PUSHQ imm32
|
|
// PUSHQ imm8
|
|
// PUSHQ m64
|
|
// PUSHQ r64
|
|
//
|
|
// Construct and append a PUSHQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func PUSHQ(imr operand.Op) { ctx.PUSHQ(imr) }
|
|
|
|
// PUSHW: Push Value Onto the Stack.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PUSHW m16
|
|
// PUSHW r16
|
|
//
|
|
// Construct and append a PUSHW instruction to the active function.
|
|
func (c *Context) PUSHW(mr operand.Op) {
|
|
c.addinstruction(x86.PUSHW(mr))
|
|
}
|
|
|
|
// PUSHW: Push Value Onto the Stack.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PUSHW m16
|
|
// PUSHW r16
|
|
//
|
|
// Construct and append a PUSHW instruction to the active function.
|
|
// Operates on the global context.
|
|
func PUSHW(mr operand.Op) { ctx.PUSHW(mr) }
|
|
|
|
// PXOR: Packed Bitwise Logical Exclusive OR.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PXOR m128 xmm
|
|
// PXOR xmm xmm
|
|
//
|
|
// Construct and append a PXOR instruction to the active function.
|
|
func (c *Context) PXOR(mx, x operand.Op) {
|
|
c.addinstruction(x86.PXOR(mx, x))
|
|
}
|
|
|
|
// PXOR: Packed Bitwise Logical Exclusive OR.
|
|
//
|
|
// Forms:
|
|
//
|
|
// PXOR m128 xmm
|
|
// PXOR xmm xmm
|
|
//
|
|
// Construct and append a PXOR instruction to the active function.
|
|
// Operates on the global context.
|
|
func PXOR(mx, x operand.Op) { ctx.PXOR(mx, x) }
|
|
|
|
// RCLB: Rotate Left through Carry Flag.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RCLB 1 m8
|
|
// RCLB 1 r8
|
|
// RCLB cl m8
|
|
// RCLB cl r8
|
|
// RCLB imm8 m8
|
|
// RCLB imm8 r8
|
|
//
|
|
// Construct and append a RCLB instruction to the active function.
|
|
func (c *Context) RCLB(ci, mr operand.Op) {
|
|
c.addinstruction(x86.RCLB(ci, mr))
|
|
}
|
|
|
|
// RCLB: Rotate Left through Carry Flag.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RCLB 1 m8
|
|
// RCLB 1 r8
|
|
// RCLB cl m8
|
|
// RCLB cl r8
|
|
// RCLB imm8 m8
|
|
// RCLB imm8 r8
|
|
//
|
|
// Construct and append a RCLB instruction to the active function.
|
|
// Operates on the global context.
|
|
func RCLB(ci, mr operand.Op) { ctx.RCLB(ci, mr) }
|
|
|
|
// RCLL: Rotate Left through Carry Flag.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RCLL 1 m32
|
|
// RCLL 1 r32
|
|
// RCLL cl m32
|
|
// RCLL cl r32
|
|
// RCLL imm8 m32
|
|
// RCLL imm8 r32
|
|
//
|
|
// Construct and append a RCLL instruction to the active function.
|
|
func (c *Context) RCLL(ci, mr operand.Op) {
|
|
c.addinstruction(x86.RCLL(ci, mr))
|
|
}
|
|
|
|
// RCLL: Rotate Left through Carry Flag.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RCLL 1 m32
|
|
// RCLL 1 r32
|
|
// RCLL cl m32
|
|
// RCLL cl r32
|
|
// RCLL imm8 m32
|
|
// RCLL imm8 r32
|
|
//
|
|
// Construct and append a RCLL instruction to the active function.
|
|
// Operates on the global context.
|
|
func RCLL(ci, mr operand.Op) { ctx.RCLL(ci, mr) }
|
|
|
|
// RCLQ: Rotate Left through Carry Flag.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RCLQ 1 m64
|
|
// RCLQ 1 r64
|
|
// RCLQ cl m64
|
|
// RCLQ cl r64
|
|
// RCLQ imm8 m64
|
|
// RCLQ imm8 r64
|
|
//
|
|
// Construct and append a RCLQ instruction to the active function.
|
|
func (c *Context) RCLQ(ci, mr operand.Op) {
|
|
c.addinstruction(x86.RCLQ(ci, mr))
|
|
}
|
|
|
|
// RCLQ: Rotate Left through Carry Flag.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RCLQ 1 m64
|
|
// RCLQ 1 r64
|
|
// RCLQ cl m64
|
|
// RCLQ cl r64
|
|
// RCLQ imm8 m64
|
|
// RCLQ imm8 r64
|
|
//
|
|
// Construct and append a RCLQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func RCLQ(ci, mr operand.Op) { ctx.RCLQ(ci, mr) }
|
|
|
|
// RCLW: Rotate Left through Carry Flag.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RCLW 1 m16
|
|
// RCLW 1 r16
|
|
// RCLW cl m16
|
|
// RCLW cl r16
|
|
// RCLW imm8 m16
|
|
// RCLW imm8 r16
|
|
//
|
|
// Construct and append a RCLW instruction to the active function.
|
|
func (c *Context) RCLW(ci, mr operand.Op) {
|
|
c.addinstruction(x86.RCLW(ci, mr))
|
|
}
|
|
|
|
// RCLW: Rotate Left through Carry Flag.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RCLW 1 m16
|
|
// RCLW 1 r16
|
|
// RCLW cl m16
|
|
// RCLW cl r16
|
|
// RCLW imm8 m16
|
|
// RCLW imm8 r16
|
|
//
|
|
// Construct and append a RCLW instruction to the active function.
|
|
// Operates on the global context.
|
|
func RCLW(ci, mr operand.Op) { ctx.RCLW(ci, mr) }
|
|
|
|
// RCPPS: Compute Approximate Reciprocals of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RCPPS m128 xmm
|
|
// RCPPS xmm xmm
|
|
//
|
|
// Construct and append a RCPPS instruction to the active function.
|
|
func (c *Context) RCPPS(mx, x operand.Op) {
|
|
c.addinstruction(x86.RCPPS(mx, x))
|
|
}
|
|
|
|
// RCPPS: Compute Approximate Reciprocals of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RCPPS m128 xmm
|
|
// RCPPS xmm xmm
|
|
//
|
|
// Construct and append a RCPPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func RCPPS(mx, x operand.Op) { ctx.RCPPS(mx, x) }
|
|
|
|
// RCPSS: Compute Approximate Reciprocal of Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RCPSS m32 xmm
|
|
// RCPSS xmm xmm
|
|
//
|
|
// Construct and append a RCPSS instruction to the active function.
|
|
func (c *Context) RCPSS(mx, x operand.Op) {
|
|
c.addinstruction(x86.RCPSS(mx, x))
|
|
}
|
|
|
|
// RCPSS: Compute Approximate Reciprocal of Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RCPSS m32 xmm
|
|
// RCPSS xmm xmm
|
|
//
|
|
// Construct and append a RCPSS instruction to the active function.
|
|
// Operates on the global context.
|
|
func RCPSS(mx, x operand.Op) { ctx.RCPSS(mx, x) }
|
|
|
|
// RCRB: Rotate Right through Carry Flag.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RCRB 1 m8
|
|
// RCRB 1 r8
|
|
// RCRB cl m8
|
|
// RCRB cl r8
|
|
// RCRB imm8 m8
|
|
// RCRB imm8 r8
|
|
//
|
|
// Construct and append a RCRB instruction to the active function.
|
|
func (c *Context) RCRB(ci, mr operand.Op) {
|
|
c.addinstruction(x86.RCRB(ci, mr))
|
|
}
|
|
|
|
// RCRB: Rotate Right through Carry Flag.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RCRB 1 m8
|
|
// RCRB 1 r8
|
|
// RCRB cl m8
|
|
// RCRB cl r8
|
|
// RCRB imm8 m8
|
|
// RCRB imm8 r8
|
|
//
|
|
// Construct and append a RCRB instruction to the active function.
|
|
// Operates on the global context.
|
|
func RCRB(ci, mr operand.Op) { ctx.RCRB(ci, mr) }
|
|
|
|
// RCRL: Rotate Right through Carry Flag.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RCRL 1 m32
|
|
// RCRL 1 r32
|
|
// RCRL cl m32
|
|
// RCRL cl r32
|
|
// RCRL imm8 m32
|
|
// RCRL imm8 r32
|
|
//
|
|
// Construct and append a RCRL instruction to the active function.
|
|
func (c *Context) RCRL(ci, mr operand.Op) {
|
|
c.addinstruction(x86.RCRL(ci, mr))
|
|
}
|
|
|
|
// RCRL: Rotate Right through Carry Flag.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RCRL 1 m32
|
|
// RCRL 1 r32
|
|
// RCRL cl m32
|
|
// RCRL cl r32
|
|
// RCRL imm8 m32
|
|
// RCRL imm8 r32
|
|
//
|
|
// Construct and append a RCRL instruction to the active function.
|
|
// Operates on the global context.
|
|
func RCRL(ci, mr operand.Op) { ctx.RCRL(ci, mr) }
|
|
|
|
// RCRQ: Rotate Right through Carry Flag.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RCRQ 1 m64
|
|
// RCRQ 1 r64
|
|
// RCRQ cl m64
|
|
// RCRQ cl r64
|
|
// RCRQ imm8 m64
|
|
// RCRQ imm8 r64
|
|
//
|
|
// Construct and append a RCRQ instruction to the active function.
|
|
func (c *Context) RCRQ(ci, mr operand.Op) {
|
|
c.addinstruction(x86.RCRQ(ci, mr))
|
|
}
|
|
|
|
// RCRQ: Rotate Right through Carry Flag.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RCRQ 1 m64
|
|
// RCRQ 1 r64
|
|
// RCRQ cl m64
|
|
// RCRQ cl r64
|
|
// RCRQ imm8 m64
|
|
// RCRQ imm8 r64
|
|
//
|
|
// Construct and append a RCRQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func RCRQ(ci, mr operand.Op) { ctx.RCRQ(ci, mr) }
|
|
|
|
// RCRW: Rotate Right through Carry Flag.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RCRW 1 m16
|
|
// RCRW 1 r16
|
|
// RCRW cl m16
|
|
// RCRW cl r16
|
|
// RCRW imm8 m16
|
|
// RCRW imm8 r16
|
|
//
|
|
// Construct and append a RCRW instruction to the active function.
|
|
func (c *Context) RCRW(ci, mr operand.Op) {
|
|
c.addinstruction(x86.RCRW(ci, mr))
|
|
}
|
|
|
|
// RCRW: Rotate Right through Carry Flag.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RCRW 1 m16
|
|
// RCRW 1 r16
|
|
// RCRW cl m16
|
|
// RCRW cl r16
|
|
// RCRW imm8 m16
|
|
// RCRW imm8 r16
|
|
//
|
|
// Construct and append a RCRW instruction to the active function.
|
|
// Operates on the global context.
|
|
func RCRW(ci, mr operand.Op) { ctx.RCRW(ci, mr) }
|
|
|
|
// RDRANDL: Read Random Number.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RDRANDL r16
|
|
// RDRANDL r32
|
|
// RDRANDL r64
|
|
//
|
|
// Construct and append a RDRANDL instruction to the active function.
|
|
func (c *Context) RDRANDL(r operand.Op) {
|
|
c.addinstruction(x86.RDRANDL(r))
|
|
}
|
|
|
|
// RDRANDL: Read Random Number.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RDRANDL r16
|
|
// RDRANDL r32
|
|
// RDRANDL r64
|
|
//
|
|
// Construct and append a RDRANDL instruction to the active function.
|
|
// Operates on the global context.
|
|
func RDRANDL(r operand.Op) { ctx.RDRANDL(r) }
|
|
|
|
// RDSEEDL: Read Random SEED.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RDSEEDL r16
|
|
// RDSEEDL r32
|
|
// RDSEEDL r64
|
|
//
|
|
// Construct and append a RDSEEDL instruction to the active function.
|
|
func (c *Context) RDSEEDL(r operand.Op) {
|
|
c.addinstruction(x86.RDSEEDL(r))
|
|
}
|
|
|
|
// RDSEEDL: Read Random SEED.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RDSEEDL r16
|
|
// RDSEEDL r32
|
|
// RDSEEDL r64
|
|
//
|
|
// Construct and append a RDSEEDL instruction to the active function.
|
|
// Operates on the global context.
|
|
func RDSEEDL(r operand.Op) { ctx.RDSEEDL(r) }
|
|
|
|
// RDTSC: Read Time-Stamp Counter.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RDTSC
|
|
//
|
|
// Construct and append a RDTSC instruction to the active function.
|
|
func (c *Context) RDTSC() {
|
|
c.addinstruction(x86.RDTSC())
|
|
}
|
|
|
|
// RDTSC: Read Time-Stamp Counter.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RDTSC
|
|
//
|
|
// Construct and append a RDTSC instruction to the active function.
|
|
// Operates on the global context.
|
|
func RDTSC() { ctx.RDTSC() }
|
|
|
|
// RDTSCP: Read Time-Stamp Counter and Processor ID.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RDTSCP
|
|
//
|
|
// Construct and append a RDTSCP instruction to the active function.
|
|
func (c *Context) RDTSCP() {
|
|
c.addinstruction(x86.RDTSCP())
|
|
}
|
|
|
|
// RDTSCP: Read Time-Stamp Counter and Processor ID.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RDTSCP
|
|
//
|
|
// Construct and append a RDTSCP instruction to the active function.
|
|
// Operates on the global context.
|
|
func RDTSCP() { ctx.RDTSCP() }
|
|
|
|
// RET: Return from Procedure.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RET
|
|
//
|
|
// Construct and append a RET instruction to the active function.
|
|
func (c *Context) RET() {
|
|
c.addinstruction(x86.RET())
|
|
}
|
|
|
|
// RET: Return from Procedure.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RET
|
|
//
|
|
// Construct and append a RET instruction to the active function.
|
|
// Operates on the global context.
|
|
func RET() { ctx.RET() }
|
|
|
|
// RETFL: Return from Procedure.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RETFL imm16
|
|
//
|
|
// Construct and append a RETFL instruction to the active function.
|
|
func (c *Context) RETFL(i operand.Op) {
|
|
c.addinstruction(x86.RETFL(i))
|
|
}
|
|
|
|
// RETFL: Return from Procedure.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RETFL imm16
|
|
//
|
|
// Construct and append a RETFL instruction to the active function.
|
|
// Operates on the global context.
|
|
func RETFL(i operand.Op) { ctx.RETFL(i) }
|
|
|
|
// RETFQ: Return from Procedure.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RETFQ imm16
|
|
//
|
|
// Construct and append a RETFQ instruction to the active function.
|
|
func (c *Context) RETFQ(i operand.Op) {
|
|
c.addinstruction(x86.RETFQ(i))
|
|
}
|
|
|
|
// RETFQ: Return from Procedure.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RETFQ imm16
|
|
//
|
|
// Construct and append a RETFQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func RETFQ(i operand.Op) { ctx.RETFQ(i) }
|
|
|
|
// RETFW: Return from Procedure.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RETFW imm16
|
|
//
|
|
// Construct and append a RETFW instruction to the active function.
|
|
func (c *Context) RETFW(i operand.Op) {
|
|
c.addinstruction(x86.RETFW(i))
|
|
}
|
|
|
|
// RETFW: Return from Procedure.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RETFW imm16
|
|
//
|
|
// Construct and append a RETFW instruction to the active function.
|
|
// Operates on the global context.
|
|
func RETFW(i operand.Op) { ctx.RETFW(i) }
|
|
|
|
// ROLB: Rotate Left.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ROLB 1 m8
|
|
// ROLB 1 r8
|
|
// ROLB cl m8
|
|
// ROLB cl r8
|
|
// ROLB imm8 m8
|
|
// ROLB imm8 r8
|
|
//
|
|
// Construct and append a ROLB instruction to the active function.
|
|
func (c *Context) ROLB(ci, mr operand.Op) {
|
|
c.addinstruction(x86.ROLB(ci, mr))
|
|
}
|
|
|
|
// ROLB: Rotate Left.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ROLB 1 m8
|
|
// ROLB 1 r8
|
|
// ROLB cl m8
|
|
// ROLB cl r8
|
|
// ROLB imm8 m8
|
|
// ROLB imm8 r8
|
|
//
|
|
// Construct and append a ROLB instruction to the active function.
|
|
// Operates on the global context.
|
|
func ROLB(ci, mr operand.Op) { ctx.ROLB(ci, mr) }
|
|
|
|
// ROLL: Rotate Left.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ROLL 1 m32
|
|
// ROLL 1 r32
|
|
// ROLL cl m32
|
|
// ROLL cl r32
|
|
// ROLL imm8 m32
|
|
// ROLL imm8 r32
|
|
//
|
|
// Construct and append a ROLL instruction to the active function.
|
|
func (c *Context) ROLL(ci, mr operand.Op) {
|
|
c.addinstruction(x86.ROLL(ci, mr))
|
|
}
|
|
|
|
// ROLL: Rotate Left.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ROLL 1 m32
|
|
// ROLL 1 r32
|
|
// ROLL cl m32
|
|
// ROLL cl r32
|
|
// ROLL imm8 m32
|
|
// ROLL imm8 r32
|
|
//
|
|
// Construct and append a ROLL instruction to the active function.
|
|
// Operates on the global context.
|
|
func ROLL(ci, mr operand.Op) { ctx.ROLL(ci, mr) }
|
|
|
|
// ROLQ: Rotate Left.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ROLQ 1 m64
|
|
// ROLQ 1 r64
|
|
// ROLQ cl m64
|
|
// ROLQ cl r64
|
|
// ROLQ imm8 m64
|
|
// ROLQ imm8 r64
|
|
//
|
|
// Construct and append a ROLQ instruction to the active function.
|
|
func (c *Context) ROLQ(ci, mr operand.Op) {
|
|
c.addinstruction(x86.ROLQ(ci, mr))
|
|
}
|
|
|
|
// ROLQ: Rotate Left.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ROLQ 1 m64
|
|
// ROLQ 1 r64
|
|
// ROLQ cl m64
|
|
// ROLQ cl r64
|
|
// ROLQ imm8 m64
|
|
// ROLQ imm8 r64
|
|
//
|
|
// Construct and append a ROLQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func ROLQ(ci, mr operand.Op) { ctx.ROLQ(ci, mr) }
|
|
|
|
// ROLW: Rotate Left.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ROLW 1 m16
|
|
// ROLW 1 r16
|
|
// ROLW cl m16
|
|
// ROLW cl r16
|
|
// ROLW imm8 m16
|
|
// ROLW imm8 r16
|
|
//
|
|
// Construct and append a ROLW instruction to the active function.
|
|
func (c *Context) ROLW(ci, mr operand.Op) {
|
|
c.addinstruction(x86.ROLW(ci, mr))
|
|
}
|
|
|
|
// ROLW: Rotate Left.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ROLW 1 m16
|
|
// ROLW 1 r16
|
|
// ROLW cl m16
|
|
// ROLW cl r16
|
|
// ROLW imm8 m16
|
|
// ROLW imm8 r16
|
|
//
|
|
// Construct and append a ROLW instruction to the active function.
|
|
// Operates on the global context.
|
|
func ROLW(ci, mr operand.Op) { ctx.ROLW(ci, mr) }
|
|
|
|
// RORB: Rotate Right.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RORB 1 m8
|
|
// RORB 1 r8
|
|
// RORB cl m8
|
|
// RORB cl r8
|
|
// RORB imm8 m8
|
|
// RORB imm8 r8
|
|
//
|
|
// Construct and append a RORB instruction to the active function.
|
|
func (c *Context) RORB(ci, mr operand.Op) {
|
|
c.addinstruction(x86.RORB(ci, mr))
|
|
}
|
|
|
|
// RORB: Rotate Right.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RORB 1 m8
|
|
// RORB 1 r8
|
|
// RORB cl m8
|
|
// RORB cl r8
|
|
// RORB imm8 m8
|
|
// RORB imm8 r8
|
|
//
|
|
// Construct and append a RORB instruction to the active function.
|
|
// Operates on the global context.
|
|
func RORB(ci, mr operand.Op) { ctx.RORB(ci, mr) }
|
|
|
|
// RORL: Rotate Right.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RORL 1 m32
|
|
// RORL 1 r32
|
|
// RORL cl m32
|
|
// RORL cl r32
|
|
// RORL imm8 m32
|
|
// RORL imm8 r32
|
|
//
|
|
// Construct and append a RORL instruction to the active function.
|
|
func (c *Context) RORL(ci, mr operand.Op) {
|
|
c.addinstruction(x86.RORL(ci, mr))
|
|
}
|
|
|
|
// RORL: Rotate Right.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RORL 1 m32
|
|
// RORL 1 r32
|
|
// RORL cl m32
|
|
// RORL cl r32
|
|
// RORL imm8 m32
|
|
// RORL imm8 r32
|
|
//
|
|
// Construct and append a RORL instruction to the active function.
|
|
// Operates on the global context.
|
|
func RORL(ci, mr operand.Op) { ctx.RORL(ci, mr) }
|
|
|
|
// RORQ: Rotate Right.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RORQ 1 m64
|
|
// RORQ 1 r64
|
|
// RORQ cl m64
|
|
// RORQ cl r64
|
|
// RORQ imm8 m64
|
|
// RORQ imm8 r64
|
|
//
|
|
// Construct and append a RORQ instruction to the active function.
|
|
func (c *Context) RORQ(ci, mr operand.Op) {
|
|
c.addinstruction(x86.RORQ(ci, mr))
|
|
}
|
|
|
|
// RORQ: Rotate Right.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RORQ 1 m64
|
|
// RORQ 1 r64
|
|
// RORQ cl m64
|
|
// RORQ cl r64
|
|
// RORQ imm8 m64
|
|
// RORQ imm8 r64
|
|
//
|
|
// Construct and append a RORQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func RORQ(ci, mr operand.Op) { ctx.RORQ(ci, mr) }
|
|
|
|
// RORW: Rotate Right.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RORW 1 m16
|
|
// RORW 1 r16
|
|
// RORW cl m16
|
|
// RORW cl r16
|
|
// RORW imm8 m16
|
|
// RORW imm8 r16
|
|
//
|
|
// Construct and append a RORW instruction to the active function.
|
|
func (c *Context) RORW(ci, mr operand.Op) {
|
|
c.addinstruction(x86.RORW(ci, mr))
|
|
}
|
|
|
|
// RORW: Rotate Right.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RORW 1 m16
|
|
// RORW 1 r16
|
|
// RORW cl m16
|
|
// RORW cl r16
|
|
// RORW imm8 m16
|
|
// RORW imm8 r16
|
|
//
|
|
// Construct and append a RORW instruction to the active function.
|
|
// Operates on the global context.
|
|
func RORW(ci, mr operand.Op) { ctx.RORW(ci, mr) }
|
|
|
|
// RORXL: Rotate Right Logical Without Affecting Flags.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RORXL imm8 m32 r32
|
|
// RORXL imm8 r32 r32
|
|
//
|
|
// Construct and append a RORXL instruction to the active function.
|
|
func (c *Context) RORXL(i, mr, r operand.Op) {
|
|
c.addinstruction(x86.RORXL(i, mr, r))
|
|
}
|
|
|
|
// RORXL: Rotate Right Logical Without Affecting Flags.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RORXL imm8 m32 r32
|
|
// RORXL imm8 r32 r32
|
|
//
|
|
// Construct and append a RORXL instruction to the active function.
|
|
// Operates on the global context.
|
|
func RORXL(i, mr, r operand.Op) { ctx.RORXL(i, mr, r) }
|
|
|
|
// RORXQ: Rotate Right Logical Without Affecting Flags.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RORXQ imm8 m64 r64
|
|
// RORXQ imm8 r64 r64
|
|
//
|
|
// Construct and append a RORXQ instruction to the active function.
|
|
func (c *Context) RORXQ(i, mr, r operand.Op) {
|
|
c.addinstruction(x86.RORXQ(i, mr, r))
|
|
}
|
|
|
|
// RORXQ: Rotate Right Logical Without Affecting Flags.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RORXQ imm8 m64 r64
|
|
// RORXQ imm8 r64 r64
|
|
//
|
|
// Construct and append a RORXQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func RORXQ(i, mr, r operand.Op) { ctx.RORXQ(i, mr, r) }
|
|
|
|
// ROUNDPD: Round Packed Double Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ROUNDPD imm8 m128 xmm
|
|
// ROUNDPD imm8 xmm xmm
|
|
//
|
|
// Construct and append a ROUNDPD instruction to the active function.
|
|
func (c *Context) ROUNDPD(i, mx, x operand.Op) {
|
|
c.addinstruction(x86.ROUNDPD(i, mx, x))
|
|
}
|
|
|
|
// ROUNDPD: Round Packed Double Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ROUNDPD imm8 m128 xmm
|
|
// ROUNDPD imm8 xmm xmm
|
|
//
|
|
// Construct and append a ROUNDPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func ROUNDPD(i, mx, x operand.Op) { ctx.ROUNDPD(i, mx, x) }
|
|
|
|
// ROUNDPS: Round Packed Single Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ROUNDPS imm8 m128 xmm
|
|
// ROUNDPS imm8 xmm xmm
|
|
//
|
|
// Construct and append a ROUNDPS instruction to the active function.
|
|
func (c *Context) ROUNDPS(i, mx, x operand.Op) {
|
|
c.addinstruction(x86.ROUNDPS(i, mx, x))
|
|
}
|
|
|
|
// ROUNDPS: Round Packed Single Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ROUNDPS imm8 m128 xmm
|
|
// ROUNDPS imm8 xmm xmm
|
|
//
|
|
// Construct and append a ROUNDPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func ROUNDPS(i, mx, x operand.Op) { ctx.ROUNDPS(i, mx, x) }
|
|
|
|
// ROUNDSD: Round Scalar Double Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ROUNDSD imm8 m64 xmm
|
|
// ROUNDSD imm8 xmm xmm
|
|
//
|
|
// Construct and append a ROUNDSD instruction to the active function.
|
|
func (c *Context) ROUNDSD(i, mx, x operand.Op) {
|
|
c.addinstruction(x86.ROUNDSD(i, mx, x))
|
|
}
|
|
|
|
// ROUNDSD: Round Scalar Double Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ROUNDSD imm8 m64 xmm
|
|
// ROUNDSD imm8 xmm xmm
|
|
//
|
|
// Construct and append a ROUNDSD instruction to the active function.
|
|
// Operates on the global context.
|
|
func ROUNDSD(i, mx, x operand.Op) { ctx.ROUNDSD(i, mx, x) }
|
|
|
|
// ROUNDSS: Round Scalar Single Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ROUNDSS imm8 m32 xmm
|
|
// ROUNDSS imm8 xmm xmm
|
|
//
|
|
// Construct and append a ROUNDSS instruction to the active function.
|
|
func (c *Context) ROUNDSS(i, mx, x operand.Op) {
|
|
c.addinstruction(x86.ROUNDSS(i, mx, x))
|
|
}
|
|
|
|
// ROUNDSS: Round Scalar Single Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// ROUNDSS imm8 m32 xmm
|
|
// ROUNDSS imm8 xmm xmm
|
|
//
|
|
// Construct and append a ROUNDSS instruction to the active function.
|
|
// Operates on the global context.
|
|
func ROUNDSS(i, mx, x operand.Op) { ctx.ROUNDSS(i, mx, x) }
|
|
|
|
// RSQRTPS: Compute Reciprocals of Square Roots of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RSQRTPS m128 xmm
|
|
// RSQRTPS xmm xmm
|
|
//
|
|
// Construct and append a RSQRTPS instruction to the active function.
|
|
func (c *Context) RSQRTPS(mx, x operand.Op) {
|
|
c.addinstruction(x86.RSQRTPS(mx, x))
|
|
}
|
|
|
|
// RSQRTPS: Compute Reciprocals of Square Roots of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RSQRTPS m128 xmm
|
|
// RSQRTPS xmm xmm
|
|
//
|
|
// Construct and append a RSQRTPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func RSQRTPS(mx, x operand.Op) { ctx.RSQRTPS(mx, x) }
|
|
|
|
// RSQRTSS: Compute Reciprocal of Square Root of Scalar Single-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RSQRTSS m32 xmm
|
|
// RSQRTSS xmm xmm
|
|
//
|
|
// Construct and append a RSQRTSS instruction to the active function.
|
|
func (c *Context) RSQRTSS(mx, x operand.Op) {
|
|
c.addinstruction(x86.RSQRTSS(mx, x))
|
|
}
|
|
|
|
// RSQRTSS: Compute Reciprocal of Square Root of Scalar Single-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// RSQRTSS m32 xmm
|
|
// RSQRTSS xmm xmm
|
|
//
|
|
// Construct and append a RSQRTSS instruction to the active function.
|
|
// Operates on the global context.
|
|
func RSQRTSS(mx, x operand.Op) { ctx.RSQRTSS(mx, x) }
|
|
|
|
// SALB: Arithmetic Shift Left.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SALB 1 m8
|
|
// SALB 1 r8
|
|
// SALB cl m8
|
|
// SALB cl r8
|
|
// SALB imm8 m8
|
|
// SALB imm8 r8
|
|
//
|
|
// Construct and append a SALB instruction to the active function.
|
|
func (c *Context) SALB(ci, mr operand.Op) {
|
|
c.addinstruction(x86.SALB(ci, mr))
|
|
}
|
|
|
|
// SALB: Arithmetic Shift Left.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SALB 1 m8
|
|
// SALB 1 r8
|
|
// SALB cl m8
|
|
// SALB cl r8
|
|
// SALB imm8 m8
|
|
// SALB imm8 r8
|
|
//
|
|
// Construct and append a SALB instruction to the active function.
|
|
// Operates on the global context.
|
|
func SALB(ci, mr operand.Op) { ctx.SALB(ci, mr) }
|
|
|
|
// SALL: Arithmetic Shift Left.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SALL 1 m32
|
|
// SALL 1 r32
|
|
// SALL cl m32
|
|
// SALL cl r32
|
|
// SALL imm8 m32
|
|
// SALL imm8 r32
|
|
//
|
|
// Construct and append a SALL instruction to the active function.
|
|
func (c *Context) SALL(ci, mr operand.Op) {
|
|
c.addinstruction(x86.SALL(ci, mr))
|
|
}
|
|
|
|
// SALL: Arithmetic Shift Left.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SALL 1 m32
|
|
// SALL 1 r32
|
|
// SALL cl m32
|
|
// SALL cl r32
|
|
// SALL imm8 m32
|
|
// SALL imm8 r32
|
|
//
|
|
// Construct and append a SALL instruction to the active function.
|
|
// Operates on the global context.
|
|
func SALL(ci, mr operand.Op) { ctx.SALL(ci, mr) }
|
|
|
|
// SALQ: Arithmetic Shift Left.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SALQ 1 m64
|
|
// SALQ 1 r64
|
|
// SALQ cl m64
|
|
// SALQ cl r64
|
|
// SALQ imm8 m64
|
|
// SALQ imm8 r64
|
|
//
|
|
// Construct and append a SALQ instruction to the active function.
|
|
func (c *Context) SALQ(ci, mr operand.Op) {
|
|
c.addinstruction(x86.SALQ(ci, mr))
|
|
}
|
|
|
|
// SALQ: Arithmetic Shift Left.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SALQ 1 m64
|
|
// SALQ 1 r64
|
|
// SALQ cl m64
|
|
// SALQ cl r64
|
|
// SALQ imm8 m64
|
|
// SALQ imm8 r64
|
|
//
|
|
// Construct and append a SALQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func SALQ(ci, mr operand.Op) { ctx.SALQ(ci, mr) }
|
|
|
|
// SALW: Arithmetic Shift Left.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SALW 1 m16
|
|
// SALW 1 r16
|
|
// SALW cl m16
|
|
// SALW cl r16
|
|
// SALW imm8 m16
|
|
// SALW imm8 r16
|
|
//
|
|
// Construct and append a SALW instruction to the active function.
|
|
func (c *Context) SALW(ci, mr operand.Op) {
|
|
c.addinstruction(x86.SALW(ci, mr))
|
|
}
|
|
|
|
// SALW: Arithmetic Shift Left.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SALW 1 m16
|
|
// SALW 1 r16
|
|
// SALW cl m16
|
|
// SALW cl r16
|
|
// SALW imm8 m16
|
|
// SALW imm8 r16
|
|
//
|
|
// Construct and append a SALW instruction to the active function.
|
|
// Operates on the global context.
|
|
func SALW(ci, mr operand.Op) { ctx.SALW(ci, mr) }
|
|
|
|
// SARB: Arithmetic Shift Right.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SARB 1 m8
|
|
// SARB 1 r8
|
|
// SARB cl m8
|
|
// SARB cl r8
|
|
// SARB imm8 m8
|
|
// SARB imm8 r8
|
|
//
|
|
// Construct and append a SARB instruction to the active function.
|
|
func (c *Context) SARB(ci, mr operand.Op) {
|
|
c.addinstruction(x86.SARB(ci, mr))
|
|
}
|
|
|
|
// SARB: Arithmetic Shift Right.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SARB 1 m8
|
|
// SARB 1 r8
|
|
// SARB cl m8
|
|
// SARB cl r8
|
|
// SARB imm8 m8
|
|
// SARB imm8 r8
|
|
//
|
|
// Construct and append a SARB instruction to the active function.
|
|
// Operates on the global context.
|
|
func SARB(ci, mr operand.Op) { ctx.SARB(ci, mr) }
|
|
|
|
// SARL: Arithmetic Shift Right.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SARL 1 m32
|
|
// SARL 1 r32
|
|
// SARL cl m32
|
|
// SARL cl r32
|
|
// SARL imm8 m32
|
|
// SARL imm8 r32
|
|
//
|
|
// Construct and append a SARL instruction to the active function.
|
|
func (c *Context) SARL(ci, mr operand.Op) {
|
|
c.addinstruction(x86.SARL(ci, mr))
|
|
}
|
|
|
|
// SARL: Arithmetic Shift Right.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SARL 1 m32
|
|
// SARL 1 r32
|
|
// SARL cl m32
|
|
// SARL cl r32
|
|
// SARL imm8 m32
|
|
// SARL imm8 r32
|
|
//
|
|
// Construct and append a SARL instruction to the active function.
|
|
// Operates on the global context.
|
|
func SARL(ci, mr operand.Op) { ctx.SARL(ci, mr) }
|
|
|
|
// SARQ: Arithmetic Shift Right.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SARQ 1 m64
|
|
// SARQ 1 r64
|
|
// SARQ cl m64
|
|
// SARQ cl r64
|
|
// SARQ imm8 m64
|
|
// SARQ imm8 r64
|
|
//
|
|
// Construct and append a SARQ instruction to the active function.
|
|
func (c *Context) SARQ(ci, mr operand.Op) {
|
|
c.addinstruction(x86.SARQ(ci, mr))
|
|
}
|
|
|
|
// SARQ: Arithmetic Shift Right.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SARQ 1 m64
|
|
// SARQ 1 r64
|
|
// SARQ cl m64
|
|
// SARQ cl r64
|
|
// SARQ imm8 m64
|
|
// SARQ imm8 r64
|
|
//
|
|
// Construct and append a SARQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func SARQ(ci, mr operand.Op) { ctx.SARQ(ci, mr) }
|
|
|
|
// SARW: Arithmetic Shift Right.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SARW 1 m16
|
|
// SARW 1 r16
|
|
// SARW cl m16
|
|
// SARW cl r16
|
|
// SARW imm8 m16
|
|
// SARW imm8 r16
|
|
//
|
|
// Construct and append a SARW instruction to the active function.
|
|
func (c *Context) SARW(ci, mr operand.Op) {
|
|
c.addinstruction(x86.SARW(ci, mr))
|
|
}
|
|
|
|
// SARW: Arithmetic Shift Right.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SARW 1 m16
|
|
// SARW 1 r16
|
|
// SARW cl m16
|
|
// SARW cl r16
|
|
// SARW imm8 m16
|
|
// SARW imm8 r16
|
|
//
|
|
// Construct and append a SARW instruction to the active function.
|
|
// Operates on the global context.
|
|
func SARW(ci, mr operand.Op) { ctx.SARW(ci, mr) }
|
|
|
|
// SARXL: Arithmetic Shift Right Without Affecting Flags.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SARXL r32 m32 r32
|
|
// SARXL r32 r32 r32
|
|
//
|
|
// Construct and append a SARXL instruction to the active function.
|
|
func (c *Context) SARXL(r, mr, r1 operand.Op) {
|
|
c.addinstruction(x86.SARXL(r, mr, r1))
|
|
}
|
|
|
|
// SARXL: Arithmetic Shift Right Without Affecting Flags.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SARXL r32 m32 r32
|
|
// SARXL r32 r32 r32
|
|
//
|
|
// Construct and append a SARXL instruction to the active function.
|
|
// Operates on the global context.
|
|
func SARXL(r, mr, r1 operand.Op) { ctx.SARXL(r, mr, r1) }
|
|
|
|
// SARXQ: Arithmetic Shift Right Without Affecting Flags.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SARXQ r64 m64 r64
|
|
// SARXQ r64 r64 r64
|
|
//
|
|
// Construct and append a SARXQ instruction to the active function.
|
|
func (c *Context) SARXQ(r, mr, r1 operand.Op) {
|
|
c.addinstruction(x86.SARXQ(r, mr, r1))
|
|
}
|
|
|
|
// SARXQ: Arithmetic Shift Right Without Affecting Flags.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SARXQ r64 m64 r64
|
|
// SARXQ r64 r64 r64
|
|
//
|
|
// Construct and append a SARXQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func SARXQ(r, mr, r1 operand.Op) { ctx.SARXQ(r, mr, r1) }
|
|
|
|
// SBBB: Subtract with Borrow.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SBBB imm8 al
|
|
// SBBB imm8 m8
|
|
// SBBB imm8 r8
|
|
// SBBB m8 r8
|
|
// SBBB r8 m8
|
|
// SBBB r8 r8
|
|
//
|
|
// Construct and append a SBBB instruction to the active function.
|
|
func (c *Context) SBBB(imr, amr operand.Op) {
|
|
c.addinstruction(x86.SBBB(imr, amr))
|
|
}
|
|
|
|
// SBBB: Subtract with Borrow.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SBBB imm8 al
|
|
// SBBB imm8 m8
|
|
// SBBB imm8 r8
|
|
// SBBB m8 r8
|
|
// SBBB r8 m8
|
|
// SBBB r8 r8
|
|
//
|
|
// Construct and append a SBBB instruction to the active function.
|
|
// Operates on the global context.
|
|
func SBBB(imr, amr operand.Op) { ctx.SBBB(imr, amr) }
|
|
|
|
// SBBL: Subtract with Borrow.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SBBL imm32 eax
|
|
// SBBL imm32 m32
|
|
// SBBL imm32 r32
|
|
// SBBL imm8 m32
|
|
// SBBL imm8 r32
|
|
// SBBL m32 r32
|
|
// SBBL r32 m32
|
|
// SBBL r32 r32
|
|
//
|
|
// Construct and append a SBBL instruction to the active function.
|
|
func (c *Context) SBBL(imr, emr operand.Op) {
|
|
c.addinstruction(x86.SBBL(imr, emr))
|
|
}
|
|
|
|
// SBBL: Subtract with Borrow.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SBBL imm32 eax
|
|
// SBBL imm32 m32
|
|
// SBBL imm32 r32
|
|
// SBBL imm8 m32
|
|
// SBBL imm8 r32
|
|
// SBBL m32 r32
|
|
// SBBL r32 m32
|
|
// SBBL r32 r32
|
|
//
|
|
// Construct and append a SBBL instruction to the active function.
|
|
// Operates on the global context.
|
|
func SBBL(imr, emr operand.Op) { ctx.SBBL(imr, emr) }
|
|
|
|
// SBBQ: Subtract with Borrow.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SBBQ imm32 m64
|
|
// SBBQ imm32 r64
|
|
// SBBQ imm32 rax
|
|
// SBBQ imm8 m64
|
|
// SBBQ imm8 r64
|
|
// SBBQ m64 r64
|
|
// SBBQ r64 m64
|
|
// SBBQ r64 r64
|
|
//
|
|
// Construct and append a SBBQ instruction to the active function.
|
|
func (c *Context) SBBQ(imr, mr operand.Op) {
|
|
c.addinstruction(x86.SBBQ(imr, mr))
|
|
}
|
|
|
|
// SBBQ: Subtract with Borrow.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SBBQ imm32 m64
|
|
// SBBQ imm32 r64
|
|
// SBBQ imm32 rax
|
|
// SBBQ imm8 m64
|
|
// SBBQ imm8 r64
|
|
// SBBQ m64 r64
|
|
// SBBQ r64 m64
|
|
// SBBQ r64 r64
|
|
//
|
|
// Construct and append a SBBQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func SBBQ(imr, mr operand.Op) { ctx.SBBQ(imr, mr) }
|
|
|
|
// SBBW: Subtract with Borrow.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SBBW imm16 ax
|
|
// SBBW imm16 m16
|
|
// SBBW imm16 r16
|
|
// SBBW imm8 m16
|
|
// SBBW imm8 r16
|
|
// SBBW m16 r16
|
|
// SBBW r16 m16
|
|
// SBBW r16 r16
|
|
//
|
|
// Construct and append a SBBW instruction to the active function.
|
|
func (c *Context) SBBW(imr, amr operand.Op) {
|
|
c.addinstruction(x86.SBBW(imr, amr))
|
|
}
|
|
|
|
// SBBW: Subtract with Borrow.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SBBW imm16 ax
|
|
// SBBW imm16 m16
|
|
// SBBW imm16 r16
|
|
// SBBW imm8 m16
|
|
// SBBW imm8 r16
|
|
// SBBW m16 r16
|
|
// SBBW r16 m16
|
|
// SBBW r16 r16
|
|
//
|
|
// Construct and append a SBBW instruction to the active function.
|
|
// Operates on the global context.
|
|
func SBBW(imr, amr operand.Op) { ctx.SBBW(imr, amr) }
|
|
|
|
// SETCC: Set byte if above or equal (CF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// SETCC m8
|
|
// SETCC r8
|
|
//
|
|
// Construct and append a SETCC instruction to the active function.
|
|
func (c *Context) SETCC(mr operand.Op) {
|
|
c.addinstruction(x86.SETCC(mr))
|
|
}
|
|
|
|
// SETCC: Set byte if above or equal (CF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// SETCC m8
|
|
// SETCC r8
|
|
//
|
|
// Construct and append a SETCC instruction to the active function.
|
|
// Operates on the global context.
|
|
func SETCC(mr operand.Op) { ctx.SETCC(mr) }
|
|
|
|
// SETCS: Set byte if below (CF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// SETCS m8
|
|
// SETCS r8
|
|
//
|
|
// Construct and append a SETCS instruction to the active function.
|
|
func (c *Context) SETCS(mr operand.Op) {
|
|
c.addinstruction(x86.SETCS(mr))
|
|
}
|
|
|
|
// SETCS: Set byte if below (CF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// SETCS m8
|
|
// SETCS r8
|
|
//
|
|
// Construct and append a SETCS instruction to the active function.
|
|
// Operates on the global context.
|
|
func SETCS(mr operand.Op) { ctx.SETCS(mr) }
|
|
|
|
// SETEQ: Set byte if equal (ZF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// SETEQ m8
|
|
// SETEQ r8
|
|
//
|
|
// Construct and append a SETEQ instruction to the active function.
|
|
func (c *Context) SETEQ(mr operand.Op) {
|
|
c.addinstruction(x86.SETEQ(mr))
|
|
}
|
|
|
|
// SETEQ: Set byte if equal (ZF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// SETEQ m8
|
|
// SETEQ r8
|
|
//
|
|
// Construct and append a SETEQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func SETEQ(mr operand.Op) { ctx.SETEQ(mr) }
|
|
|
|
// SETGE: Set byte if greater or equal (SF == OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// SETGE m8
|
|
// SETGE r8
|
|
//
|
|
// Construct and append a SETGE instruction to the active function.
|
|
func (c *Context) SETGE(mr operand.Op) {
|
|
c.addinstruction(x86.SETGE(mr))
|
|
}
|
|
|
|
// SETGE: Set byte if greater or equal (SF == OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// SETGE m8
|
|
// SETGE r8
|
|
//
|
|
// Construct and append a SETGE instruction to the active function.
|
|
// Operates on the global context.
|
|
func SETGE(mr operand.Op) { ctx.SETGE(mr) }
|
|
|
|
// SETGT: Set byte if greater (ZF == 0 and SF == OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// SETGT m8
|
|
// SETGT r8
|
|
//
|
|
// Construct and append a SETGT instruction to the active function.
|
|
func (c *Context) SETGT(mr operand.Op) {
|
|
c.addinstruction(x86.SETGT(mr))
|
|
}
|
|
|
|
// SETGT: Set byte if greater (ZF == 0 and SF == OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// SETGT m8
|
|
// SETGT r8
|
|
//
|
|
// Construct and append a SETGT instruction to the active function.
|
|
// Operates on the global context.
|
|
func SETGT(mr operand.Op) { ctx.SETGT(mr) }
|
|
|
|
// SETHI: Set byte if above (CF == 0 and ZF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// SETHI m8
|
|
// SETHI r8
|
|
//
|
|
// Construct and append a SETHI instruction to the active function.
|
|
func (c *Context) SETHI(mr operand.Op) {
|
|
c.addinstruction(x86.SETHI(mr))
|
|
}
|
|
|
|
// SETHI: Set byte if above (CF == 0 and ZF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// SETHI m8
|
|
// SETHI r8
|
|
//
|
|
// Construct and append a SETHI instruction to the active function.
|
|
// Operates on the global context.
|
|
func SETHI(mr operand.Op) { ctx.SETHI(mr) }
|
|
|
|
// SETLE: Set byte if less or equal (ZF == 1 or SF != OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// SETLE m8
|
|
// SETLE r8
|
|
//
|
|
// Construct and append a SETLE instruction to the active function.
|
|
func (c *Context) SETLE(mr operand.Op) {
|
|
c.addinstruction(x86.SETLE(mr))
|
|
}
|
|
|
|
// SETLE: Set byte if less or equal (ZF == 1 or SF != OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// SETLE m8
|
|
// SETLE r8
|
|
//
|
|
// Construct and append a SETLE instruction to the active function.
|
|
// Operates on the global context.
|
|
func SETLE(mr operand.Op) { ctx.SETLE(mr) }
|
|
|
|
// SETLS: Set byte if below or equal (CF == 1 or ZF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// SETLS m8
|
|
// SETLS r8
|
|
//
|
|
// Construct and append a SETLS instruction to the active function.
|
|
func (c *Context) SETLS(mr operand.Op) {
|
|
c.addinstruction(x86.SETLS(mr))
|
|
}
|
|
|
|
// SETLS: Set byte if below or equal (CF == 1 or ZF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// SETLS m8
|
|
// SETLS r8
|
|
//
|
|
// Construct and append a SETLS instruction to the active function.
|
|
// Operates on the global context.
|
|
func SETLS(mr operand.Op) { ctx.SETLS(mr) }
|
|
|
|
// SETLT: Set byte if less (SF != OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// SETLT m8
|
|
// SETLT r8
|
|
//
|
|
// Construct and append a SETLT instruction to the active function.
|
|
func (c *Context) SETLT(mr operand.Op) {
|
|
c.addinstruction(x86.SETLT(mr))
|
|
}
|
|
|
|
// SETLT: Set byte if less (SF != OF).
|
|
//
|
|
// Forms:
|
|
//
|
|
// SETLT m8
|
|
// SETLT r8
|
|
//
|
|
// Construct and append a SETLT instruction to the active function.
|
|
// Operates on the global context.
|
|
func SETLT(mr operand.Op) { ctx.SETLT(mr) }
|
|
|
|
// SETMI: Set byte if sign (SF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// SETMI m8
|
|
// SETMI r8
|
|
//
|
|
// Construct and append a SETMI instruction to the active function.
|
|
func (c *Context) SETMI(mr operand.Op) {
|
|
c.addinstruction(x86.SETMI(mr))
|
|
}
|
|
|
|
// SETMI: Set byte if sign (SF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// SETMI m8
|
|
// SETMI r8
|
|
//
|
|
// Construct and append a SETMI instruction to the active function.
|
|
// Operates on the global context.
|
|
func SETMI(mr operand.Op) { ctx.SETMI(mr) }
|
|
|
|
// SETNE: Set byte if not equal (ZF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// SETNE m8
|
|
// SETNE r8
|
|
//
|
|
// Construct and append a SETNE instruction to the active function.
|
|
func (c *Context) SETNE(mr operand.Op) {
|
|
c.addinstruction(x86.SETNE(mr))
|
|
}
|
|
|
|
// SETNE: Set byte if not equal (ZF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// SETNE m8
|
|
// SETNE r8
|
|
//
|
|
// Construct and append a SETNE instruction to the active function.
|
|
// Operates on the global context.
|
|
func SETNE(mr operand.Op) { ctx.SETNE(mr) }
|
|
|
|
// SETOC: Set byte if not overflow (OF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// SETOC m8
|
|
// SETOC r8
|
|
//
|
|
// Construct and append a SETOC instruction to the active function.
|
|
func (c *Context) SETOC(mr operand.Op) {
|
|
c.addinstruction(x86.SETOC(mr))
|
|
}
|
|
|
|
// SETOC: Set byte if not overflow (OF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// SETOC m8
|
|
// SETOC r8
|
|
//
|
|
// Construct and append a SETOC instruction to the active function.
|
|
// Operates on the global context.
|
|
func SETOC(mr operand.Op) { ctx.SETOC(mr) }
|
|
|
|
// SETOS: Set byte if overflow (OF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// SETOS m8
|
|
// SETOS r8
|
|
//
|
|
// Construct and append a SETOS instruction to the active function.
|
|
func (c *Context) SETOS(mr operand.Op) {
|
|
c.addinstruction(x86.SETOS(mr))
|
|
}
|
|
|
|
// SETOS: Set byte if overflow (OF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// SETOS m8
|
|
// SETOS r8
|
|
//
|
|
// Construct and append a SETOS instruction to the active function.
|
|
// Operates on the global context.
|
|
func SETOS(mr operand.Op) { ctx.SETOS(mr) }
|
|
|
|
// SETPC: Set byte if not parity (PF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// SETPC m8
|
|
// SETPC r8
|
|
//
|
|
// Construct and append a SETPC instruction to the active function.
|
|
func (c *Context) SETPC(mr operand.Op) {
|
|
c.addinstruction(x86.SETPC(mr))
|
|
}
|
|
|
|
// SETPC: Set byte if not parity (PF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// SETPC m8
|
|
// SETPC r8
|
|
//
|
|
// Construct and append a SETPC instruction to the active function.
|
|
// Operates on the global context.
|
|
func SETPC(mr operand.Op) { ctx.SETPC(mr) }
|
|
|
|
// SETPL: Set byte if not sign (SF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// SETPL m8
|
|
// SETPL r8
|
|
//
|
|
// Construct and append a SETPL instruction to the active function.
|
|
func (c *Context) SETPL(mr operand.Op) {
|
|
c.addinstruction(x86.SETPL(mr))
|
|
}
|
|
|
|
// SETPL: Set byte if not sign (SF == 0).
|
|
//
|
|
// Forms:
|
|
//
|
|
// SETPL m8
|
|
// SETPL r8
|
|
//
|
|
// Construct and append a SETPL instruction to the active function.
|
|
// Operates on the global context.
|
|
func SETPL(mr operand.Op) { ctx.SETPL(mr) }
|
|
|
|
// SETPS: Set byte if parity (PF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// SETPS m8
|
|
// SETPS r8
|
|
//
|
|
// Construct and append a SETPS instruction to the active function.
|
|
func (c *Context) SETPS(mr operand.Op) {
|
|
c.addinstruction(x86.SETPS(mr))
|
|
}
|
|
|
|
// SETPS: Set byte if parity (PF == 1).
|
|
//
|
|
// Forms:
|
|
//
|
|
// SETPS m8
|
|
// SETPS r8
|
|
//
|
|
// Construct and append a SETPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func SETPS(mr operand.Op) { ctx.SETPS(mr) }
|
|
|
|
// SFENCE: Store Fence.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SFENCE
|
|
//
|
|
// Construct and append a SFENCE instruction to the active function.
|
|
func (c *Context) SFENCE() {
|
|
c.addinstruction(x86.SFENCE())
|
|
}
|
|
|
|
// SFENCE: Store Fence.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SFENCE
|
|
//
|
|
// Construct and append a SFENCE instruction to the active function.
|
|
// Operates on the global context.
|
|
func SFENCE() { ctx.SFENCE() }
|
|
|
|
// SHA1MSG1: Perform an Intermediate Calculation for the Next Four SHA1 Message Doublewords.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHA1MSG1 m128 xmm
|
|
// SHA1MSG1 xmm xmm
|
|
//
|
|
// Construct and append a SHA1MSG1 instruction to the active function.
|
|
func (c *Context) SHA1MSG1(mx, x operand.Op) {
|
|
c.addinstruction(x86.SHA1MSG1(mx, x))
|
|
}
|
|
|
|
// SHA1MSG1: Perform an Intermediate Calculation for the Next Four SHA1 Message Doublewords.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHA1MSG1 m128 xmm
|
|
// SHA1MSG1 xmm xmm
|
|
//
|
|
// Construct and append a SHA1MSG1 instruction to the active function.
|
|
// Operates on the global context.
|
|
func SHA1MSG1(mx, x operand.Op) { ctx.SHA1MSG1(mx, x) }
|
|
|
|
// SHA1MSG2: Perform a Final Calculation for the Next Four SHA1 Message Doublewords.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHA1MSG2 m128 xmm
|
|
// SHA1MSG2 xmm xmm
|
|
//
|
|
// Construct and append a SHA1MSG2 instruction to the active function.
|
|
func (c *Context) SHA1MSG2(mx, x operand.Op) {
|
|
c.addinstruction(x86.SHA1MSG2(mx, x))
|
|
}
|
|
|
|
// SHA1MSG2: Perform a Final Calculation for the Next Four SHA1 Message Doublewords.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHA1MSG2 m128 xmm
|
|
// SHA1MSG2 xmm xmm
|
|
//
|
|
// Construct and append a SHA1MSG2 instruction to the active function.
|
|
// Operates on the global context.
|
|
func SHA1MSG2(mx, x operand.Op) { ctx.SHA1MSG2(mx, x) }
|
|
|
|
// SHA1NEXTE: Calculate SHA1 State Variable E after Four Rounds.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHA1NEXTE m128 xmm
|
|
// SHA1NEXTE xmm xmm
|
|
//
|
|
// Construct and append a SHA1NEXTE instruction to the active function.
|
|
func (c *Context) SHA1NEXTE(mx, x operand.Op) {
|
|
c.addinstruction(x86.SHA1NEXTE(mx, x))
|
|
}
|
|
|
|
// SHA1NEXTE: Calculate SHA1 State Variable E after Four Rounds.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHA1NEXTE m128 xmm
|
|
// SHA1NEXTE xmm xmm
|
|
//
|
|
// Construct and append a SHA1NEXTE instruction to the active function.
|
|
// Operates on the global context.
|
|
func SHA1NEXTE(mx, x operand.Op) { ctx.SHA1NEXTE(mx, x) }
|
|
|
|
// SHA1RNDS4: Perform Four Rounds of SHA1 Operation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHA1RNDS4 imm2u m128 xmm
|
|
// SHA1RNDS4 imm2u xmm xmm
|
|
//
|
|
// Construct and append a SHA1RNDS4 instruction to the active function.
|
|
func (c *Context) SHA1RNDS4(i, mx, x operand.Op) {
|
|
c.addinstruction(x86.SHA1RNDS4(i, mx, x))
|
|
}
|
|
|
|
// SHA1RNDS4: Perform Four Rounds of SHA1 Operation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHA1RNDS4 imm2u m128 xmm
|
|
// SHA1RNDS4 imm2u xmm xmm
|
|
//
|
|
// Construct and append a SHA1RNDS4 instruction to the active function.
|
|
// Operates on the global context.
|
|
func SHA1RNDS4(i, mx, x operand.Op) { ctx.SHA1RNDS4(i, mx, x) }
|
|
|
|
// SHA256MSG1: Perform an Intermediate Calculation for the Next Four SHA256 Message Doublewords.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHA256MSG1 m128 xmm
|
|
// SHA256MSG1 xmm xmm
|
|
//
|
|
// Construct and append a SHA256MSG1 instruction to the active function.
|
|
func (c *Context) SHA256MSG1(mx, x operand.Op) {
|
|
c.addinstruction(x86.SHA256MSG1(mx, x))
|
|
}
|
|
|
|
// SHA256MSG1: Perform an Intermediate Calculation for the Next Four SHA256 Message Doublewords.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHA256MSG1 m128 xmm
|
|
// SHA256MSG1 xmm xmm
|
|
//
|
|
// Construct and append a SHA256MSG1 instruction to the active function.
|
|
// Operates on the global context.
|
|
func SHA256MSG1(mx, x operand.Op) { ctx.SHA256MSG1(mx, x) }
|
|
|
|
// SHA256MSG2: Perform a Final Calculation for the Next Four SHA256 Message Doublewords.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHA256MSG2 m128 xmm
|
|
// SHA256MSG2 xmm xmm
|
|
//
|
|
// Construct and append a SHA256MSG2 instruction to the active function.
|
|
func (c *Context) SHA256MSG2(mx, x operand.Op) {
|
|
c.addinstruction(x86.SHA256MSG2(mx, x))
|
|
}
|
|
|
|
// SHA256MSG2: Perform a Final Calculation for the Next Four SHA256 Message Doublewords.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHA256MSG2 m128 xmm
|
|
// SHA256MSG2 xmm xmm
|
|
//
|
|
// Construct and append a SHA256MSG2 instruction to the active function.
|
|
// Operates on the global context.
|
|
func SHA256MSG2(mx, x operand.Op) { ctx.SHA256MSG2(mx, x) }
|
|
|
|
// SHA256RNDS2: Perform Two Rounds of SHA256 Operation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHA256RNDS2 xmm0 m128 xmm
|
|
// SHA256RNDS2 xmm0 xmm xmm
|
|
//
|
|
// Construct and append a SHA256RNDS2 instruction to the active function.
|
|
func (c *Context) SHA256RNDS2(x, mx, x1 operand.Op) {
|
|
c.addinstruction(x86.SHA256RNDS2(x, mx, x1))
|
|
}
|
|
|
|
// SHA256RNDS2: Perform Two Rounds of SHA256 Operation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHA256RNDS2 xmm0 m128 xmm
|
|
// SHA256RNDS2 xmm0 xmm xmm
|
|
//
|
|
// Construct and append a SHA256RNDS2 instruction to the active function.
|
|
// Operates on the global context.
|
|
func SHA256RNDS2(x, mx, x1 operand.Op) { ctx.SHA256RNDS2(x, mx, x1) }
|
|
|
|
// SHLB: Logical Shift Left.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHLB 1 m8
|
|
// SHLB 1 r8
|
|
// SHLB cl m8
|
|
// SHLB cl r8
|
|
// SHLB imm8 m8
|
|
// SHLB imm8 r8
|
|
//
|
|
// Construct and append a SHLB instruction to the active function.
|
|
func (c *Context) SHLB(ci, mr operand.Op) {
|
|
c.addinstruction(x86.SHLB(ci, mr))
|
|
}
|
|
|
|
// SHLB: Logical Shift Left.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHLB 1 m8
|
|
// SHLB 1 r8
|
|
// SHLB cl m8
|
|
// SHLB cl r8
|
|
// SHLB imm8 m8
|
|
// SHLB imm8 r8
|
|
//
|
|
// Construct and append a SHLB instruction to the active function.
|
|
// Operates on the global context.
|
|
func SHLB(ci, mr operand.Op) { ctx.SHLB(ci, mr) }
|
|
|
|
// SHLL: Logical Shift Left.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHLL 1 m32
|
|
// SHLL 1 r32
|
|
// SHLL cl m32
|
|
// SHLL cl r32
|
|
// SHLL cl r32 m32
|
|
// SHLL cl r32 r32
|
|
// SHLL imm8 m32
|
|
// SHLL imm8 r32
|
|
// SHLL imm8 r32 m32
|
|
// SHLL imm8 r32 r32
|
|
//
|
|
// Construct and append a SHLL instruction to the active function.
|
|
func (c *Context) SHLL(ops ...operand.Op) {
|
|
c.addinstruction(x86.SHLL(ops...))
|
|
}
|
|
|
|
// SHLL: Logical Shift Left.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHLL 1 m32
|
|
// SHLL 1 r32
|
|
// SHLL cl m32
|
|
// SHLL cl r32
|
|
// SHLL cl r32 m32
|
|
// SHLL cl r32 r32
|
|
// SHLL imm8 m32
|
|
// SHLL imm8 r32
|
|
// SHLL imm8 r32 m32
|
|
// SHLL imm8 r32 r32
|
|
//
|
|
// Construct and append a SHLL instruction to the active function.
|
|
// Operates on the global context.
|
|
func SHLL(ops ...operand.Op) { ctx.SHLL(ops...) }
|
|
|
|
// SHLQ: Logical Shift Left.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHLQ 1 m64
|
|
// SHLQ 1 r64
|
|
// SHLQ cl m64
|
|
// SHLQ cl r64
|
|
// SHLQ cl r64 m64
|
|
// SHLQ cl r64 r64
|
|
// SHLQ imm8 m64
|
|
// SHLQ imm8 r64
|
|
// SHLQ imm8 r64 m64
|
|
// SHLQ imm8 r64 r64
|
|
//
|
|
// Construct and append a SHLQ instruction to the active function.
|
|
func (c *Context) SHLQ(ops ...operand.Op) {
|
|
c.addinstruction(x86.SHLQ(ops...))
|
|
}
|
|
|
|
// SHLQ: Logical Shift Left.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHLQ 1 m64
|
|
// SHLQ 1 r64
|
|
// SHLQ cl m64
|
|
// SHLQ cl r64
|
|
// SHLQ cl r64 m64
|
|
// SHLQ cl r64 r64
|
|
// SHLQ imm8 m64
|
|
// SHLQ imm8 r64
|
|
// SHLQ imm8 r64 m64
|
|
// SHLQ imm8 r64 r64
|
|
//
|
|
// Construct and append a SHLQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func SHLQ(ops ...operand.Op) { ctx.SHLQ(ops...) }
|
|
|
|
// SHLW: Logical Shift Left.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHLW 1 m16
|
|
// SHLW 1 r16
|
|
// SHLW cl m16
|
|
// SHLW cl r16
|
|
// SHLW cl r16 m16
|
|
// SHLW cl r16 r16
|
|
// SHLW imm8 m16
|
|
// SHLW imm8 r16
|
|
// SHLW imm8 r16 m16
|
|
// SHLW imm8 r16 r16
|
|
//
|
|
// Construct and append a SHLW instruction to the active function.
|
|
func (c *Context) SHLW(ops ...operand.Op) {
|
|
c.addinstruction(x86.SHLW(ops...))
|
|
}
|
|
|
|
// SHLW: Logical Shift Left.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHLW 1 m16
|
|
// SHLW 1 r16
|
|
// SHLW cl m16
|
|
// SHLW cl r16
|
|
// SHLW cl r16 m16
|
|
// SHLW cl r16 r16
|
|
// SHLW imm8 m16
|
|
// SHLW imm8 r16
|
|
// SHLW imm8 r16 m16
|
|
// SHLW imm8 r16 r16
|
|
//
|
|
// Construct and append a SHLW instruction to the active function.
|
|
// Operates on the global context.
|
|
func SHLW(ops ...operand.Op) { ctx.SHLW(ops...) }
|
|
|
|
// SHLXL: Logical Shift Left Without Affecting Flags.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHLXL r32 m32 r32
|
|
// SHLXL r32 r32 r32
|
|
//
|
|
// Construct and append a SHLXL instruction to the active function.
|
|
func (c *Context) SHLXL(r, mr, r1 operand.Op) {
|
|
c.addinstruction(x86.SHLXL(r, mr, r1))
|
|
}
|
|
|
|
// SHLXL: Logical Shift Left Without Affecting Flags.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHLXL r32 m32 r32
|
|
// SHLXL r32 r32 r32
|
|
//
|
|
// Construct and append a SHLXL instruction to the active function.
|
|
// Operates on the global context.
|
|
func SHLXL(r, mr, r1 operand.Op) { ctx.SHLXL(r, mr, r1) }
|
|
|
|
// SHLXQ: Logical Shift Left Without Affecting Flags.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHLXQ r64 m64 r64
|
|
// SHLXQ r64 r64 r64
|
|
//
|
|
// Construct and append a SHLXQ instruction to the active function.
|
|
func (c *Context) SHLXQ(r, mr, r1 operand.Op) {
|
|
c.addinstruction(x86.SHLXQ(r, mr, r1))
|
|
}
|
|
|
|
// SHLXQ: Logical Shift Left Without Affecting Flags.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHLXQ r64 m64 r64
|
|
// SHLXQ r64 r64 r64
|
|
//
|
|
// Construct and append a SHLXQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func SHLXQ(r, mr, r1 operand.Op) { ctx.SHLXQ(r, mr, r1) }
|
|
|
|
// SHRB: Logical Shift Right.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHRB 1 m8
|
|
// SHRB 1 r8
|
|
// SHRB cl m8
|
|
// SHRB cl r8
|
|
// SHRB imm8 m8
|
|
// SHRB imm8 r8
|
|
//
|
|
// Construct and append a SHRB instruction to the active function.
|
|
func (c *Context) SHRB(ci, mr operand.Op) {
|
|
c.addinstruction(x86.SHRB(ci, mr))
|
|
}
|
|
|
|
// SHRB: Logical Shift Right.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHRB 1 m8
|
|
// SHRB 1 r8
|
|
// SHRB cl m8
|
|
// SHRB cl r8
|
|
// SHRB imm8 m8
|
|
// SHRB imm8 r8
|
|
//
|
|
// Construct and append a SHRB instruction to the active function.
|
|
// Operates on the global context.
|
|
func SHRB(ci, mr operand.Op) { ctx.SHRB(ci, mr) }
|
|
|
|
// SHRL: Logical Shift Right.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHRL 1 m32
|
|
// SHRL 1 r32
|
|
// SHRL cl m32
|
|
// SHRL cl r32
|
|
// SHRL cl r32 m32
|
|
// SHRL cl r32 r32
|
|
// SHRL imm8 m32
|
|
// SHRL imm8 r32
|
|
// SHRL imm8 r32 m32
|
|
// SHRL imm8 r32 r32
|
|
//
|
|
// Construct and append a SHRL instruction to the active function.
|
|
func (c *Context) SHRL(ops ...operand.Op) {
|
|
c.addinstruction(x86.SHRL(ops...))
|
|
}
|
|
|
|
// SHRL: Logical Shift Right.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHRL 1 m32
|
|
// SHRL 1 r32
|
|
// SHRL cl m32
|
|
// SHRL cl r32
|
|
// SHRL cl r32 m32
|
|
// SHRL cl r32 r32
|
|
// SHRL imm8 m32
|
|
// SHRL imm8 r32
|
|
// SHRL imm8 r32 m32
|
|
// SHRL imm8 r32 r32
|
|
//
|
|
// Construct and append a SHRL instruction to the active function.
|
|
// Operates on the global context.
|
|
func SHRL(ops ...operand.Op) { ctx.SHRL(ops...) }
|
|
|
|
// SHRQ: Logical Shift Right.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHRQ 1 m64
|
|
// SHRQ 1 r64
|
|
// SHRQ cl m64
|
|
// SHRQ cl r64
|
|
// SHRQ cl r64 m64
|
|
// SHRQ cl r64 r64
|
|
// SHRQ imm8 m64
|
|
// SHRQ imm8 r64
|
|
// SHRQ imm8 r64 m64
|
|
// SHRQ imm8 r64 r64
|
|
//
|
|
// Construct and append a SHRQ instruction to the active function.
|
|
func (c *Context) SHRQ(ops ...operand.Op) {
|
|
c.addinstruction(x86.SHRQ(ops...))
|
|
}
|
|
|
|
// SHRQ: Logical Shift Right.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHRQ 1 m64
|
|
// SHRQ 1 r64
|
|
// SHRQ cl m64
|
|
// SHRQ cl r64
|
|
// SHRQ cl r64 m64
|
|
// SHRQ cl r64 r64
|
|
// SHRQ imm8 m64
|
|
// SHRQ imm8 r64
|
|
// SHRQ imm8 r64 m64
|
|
// SHRQ imm8 r64 r64
|
|
//
|
|
// Construct and append a SHRQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func SHRQ(ops ...operand.Op) { ctx.SHRQ(ops...) }
|
|
|
|
// SHRW: Logical Shift Right.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHRW 1 m16
|
|
// SHRW 1 r16
|
|
// SHRW cl m16
|
|
// SHRW cl r16
|
|
// SHRW cl r16 m16
|
|
// SHRW cl r16 r16
|
|
// SHRW imm8 m16
|
|
// SHRW imm8 r16
|
|
// SHRW imm8 r16 m16
|
|
// SHRW imm8 r16 r16
|
|
//
|
|
// Construct and append a SHRW instruction to the active function.
|
|
func (c *Context) SHRW(ops ...operand.Op) {
|
|
c.addinstruction(x86.SHRW(ops...))
|
|
}
|
|
|
|
// SHRW: Logical Shift Right.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHRW 1 m16
|
|
// SHRW 1 r16
|
|
// SHRW cl m16
|
|
// SHRW cl r16
|
|
// SHRW cl r16 m16
|
|
// SHRW cl r16 r16
|
|
// SHRW imm8 m16
|
|
// SHRW imm8 r16
|
|
// SHRW imm8 r16 m16
|
|
// SHRW imm8 r16 r16
|
|
//
|
|
// Construct and append a SHRW instruction to the active function.
|
|
// Operates on the global context.
|
|
func SHRW(ops ...operand.Op) { ctx.SHRW(ops...) }
|
|
|
|
// SHRXL: Logical Shift Right Without Affecting Flags.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHRXL r32 m32 r32
|
|
// SHRXL r32 r32 r32
|
|
//
|
|
// Construct and append a SHRXL instruction to the active function.
|
|
func (c *Context) SHRXL(r, mr, r1 operand.Op) {
|
|
c.addinstruction(x86.SHRXL(r, mr, r1))
|
|
}
|
|
|
|
// SHRXL: Logical Shift Right Without Affecting Flags.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHRXL r32 m32 r32
|
|
// SHRXL r32 r32 r32
|
|
//
|
|
// Construct and append a SHRXL instruction to the active function.
|
|
// Operates on the global context.
|
|
func SHRXL(r, mr, r1 operand.Op) { ctx.SHRXL(r, mr, r1) }
|
|
|
|
// SHRXQ: Logical Shift Right Without Affecting Flags.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHRXQ r64 m64 r64
|
|
// SHRXQ r64 r64 r64
|
|
//
|
|
// Construct and append a SHRXQ instruction to the active function.
|
|
func (c *Context) SHRXQ(r, mr, r1 operand.Op) {
|
|
c.addinstruction(x86.SHRXQ(r, mr, r1))
|
|
}
|
|
|
|
// SHRXQ: Logical Shift Right Without Affecting Flags.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHRXQ r64 m64 r64
|
|
// SHRXQ r64 r64 r64
|
|
//
|
|
// Construct and append a SHRXQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func SHRXQ(r, mr, r1 operand.Op) { ctx.SHRXQ(r, mr, r1) }
|
|
|
|
// SHUFPD: Shuffle Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHUFPD imm8 m128 xmm
|
|
// SHUFPD imm8 xmm xmm
|
|
//
|
|
// Construct and append a SHUFPD instruction to the active function.
|
|
func (c *Context) SHUFPD(i, mx, x operand.Op) {
|
|
c.addinstruction(x86.SHUFPD(i, mx, x))
|
|
}
|
|
|
|
// SHUFPD: Shuffle Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHUFPD imm8 m128 xmm
|
|
// SHUFPD imm8 xmm xmm
|
|
//
|
|
// Construct and append a SHUFPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func SHUFPD(i, mx, x operand.Op) { ctx.SHUFPD(i, mx, x) }
|
|
|
|
// SHUFPS: Shuffle Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHUFPS imm8 m128 xmm
|
|
// SHUFPS imm8 xmm xmm
|
|
//
|
|
// Construct and append a SHUFPS instruction to the active function.
|
|
func (c *Context) SHUFPS(i, mx, x operand.Op) {
|
|
c.addinstruction(x86.SHUFPS(i, mx, x))
|
|
}
|
|
|
|
// SHUFPS: Shuffle Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SHUFPS imm8 m128 xmm
|
|
// SHUFPS imm8 xmm xmm
|
|
//
|
|
// Construct and append a SHUFPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func SHUFPS(i, mx, x operand.Op) { ctx.SHUFPS(i, mx, x) }
|
|
|
|
// SQRTPD: Compute Square Roots of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SQRTPD m128 xmm
|
|
// SQRTPD xmm xmm
|
|
//
|
|
// Construct and append a SQRTPD instruction to the active function.
|
|
func (c *Context) SQRTPD(mx, x operand.Op) {
|
|
c.addinstruction(x86.SQRTPD(mx, x))
|
|
}
|
|
|
|
// SQRTPD: Compute Square Roots of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SQRTPD m128 xmm
|
|
// SQRTPD xmm xmm
|
|
//
|
|
// Construct and append a SQRTPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func SQRTPD(mx, x operand.Op) { ctx.SQRTPD(mx, x) }
|
|
|
|
// SQRTPS: Compute Square Roots of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SQRTPS m128 xmm
|
|
// SQRTPS xmm xmm
|
|
//
|
|
// Construct and append a SQRTPS instruction to the active function.
|
|
func (c *Context) SQRTPS(mx, x operand.Op) {
|
|
c.addinstruction(x86.SQRTPS(mx, x))
|
|
}
|
|
|
|
// SQRTPS: Compute Square Roots of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SQRTPS m128 xmm
|
|
// SQRTPS xmm xmm
|
|
//
|
|
// Construct and append a SQRTPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func SQRTPS(mx, x operand.Op) { ctx.SQRTPS(mx, x) }
|
|
|
|
// SQRTSD: Compute Square Root of Scalar Double-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SQRTSD m64 xmm
|
|
// SQRTSD xmm xmm
|
|
//
|
|
// Construct and append a SQRTSD instruction to the active function.
|
|
func (c *Context) SQRTSD(mx, x operand.Op) {
|
|
c.addinstruction(x86.SQRTSD(mx, x))
|
|
}
|
|
|
|
// SQRTSD: Compute Square Root of Scalar Double-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SQRTSD m64 xmm
|
|
// SQRTSD xmm xmm
|
|
//
|
|
// Construct and append a SQRTSD instruction to the active function.
|
|
// Operates on the global context.
|
|
func SQRTSD(mx, x operand.Op) { ctx.SQRTSD(mx, x) }
|
|
|
|
// SQRTSS: Compute Square Root of Scalar Single-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SQRTSS m32 xmm
|
|
// SQRTSS xmm xmm
|
|
//
|
|
// Construct and append a SQRTSS instruction to the active function.
|
|
func (c *Context) SQRTSS(mx, x operand.Op) {
|
|
c.addinstruction(x86.SQRTSS(mx, x))
|
|
}
|
|
|
|
// SQRTSS: Compute Square Root of Scalar Single-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SQRTSS m32 xmm
|
|
// SQRTSS xmm xmm
|
|
//
|
|
// Construct and append a SQRTSS instruction to the active function.
|
|
// Operates on the global context.
|
|
func SQRTSS(mx, x operand.Op) { ctx.SQRTSS(mx, x) }
|
|
|
|
// STC: Set Carry Flag.
|
|
//
|
|
// Forms:
|
|
//
|
|
// STC
|
|
//
|
|
// Construct and append a STC instruction to the active function.
|
|
func (c *Context) STC() {
|
|
c.addinstruction(x86.STC())
|
|
}
|
|
|
|
// STC: Set Carry Flag.
|
|
//
|
|
// Forms:
|
|
//
|
|
// STC
|
|
//
|
|
// Construct and append a STC instruction to the active function.
|
|
// Operates on the global context.
|
|
func STC() { ctx.STC() }
|
|
|
|
// STD: Set Direction Flag.
|
|
//
|
|
// Forms:
|
|
//
|
|
// STD
|
|
//
|
|
// Construct and append a STD instruction to the active function.
|
|
func (c *Context) STD() {
|
|
c.addinstruction(x86.STD())
|
|
}
|
|
|
|
// STD: Set Direction Flag.
|
|
//
|
|
// Forms:
|
|
//
|
|
// STD
|
|
//
|
|
// Construct and append a STD instruction to the active function.
|
|
// Operates on the global context.
|
|
func STD() { ctx.STD() }
|
|
|
|
// STMXCSR: Store MXCSR Register State.
|
|
//
|
|
// Forms:
|
|
//
|
|
// STMXCSR m32
|
|
//
|
|
// Construct and append a STMXCSR instruction to the active function.
|
|
func (c *Context) STMXCSR(m operand.Op) {
|
|
c.addinstruction(x86.STMXCSR(m))
|
|
}
|
|
|
|
// STMXCSR: Store MXCSR Register State.
|
|
//
|
|
// Forms:
|
|
//
|
|
// STMXCSR m32
|
|
//
|
|
// Construct and append a STMXCSR instruction to the active function.
|
|
// Operates on the global context.
|
|
func STMXCSR(m operand.Op) { ctx.STMXCSR(m) }
|
|
|
|
// SUBB: Subtract.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SUBB imm8 al
|
|
// SUBB imm8 m8
|
|
// SUBB imm8 r8
|
|
// SUBB m8 r8
|
|
// SUBB r8 m8
|
|
// SUBB r8 r8
|
|
//
|
|
// Construct and append a SUBB instruction to the active function.
|
|
func (c *Context) SUBB(imr, amr operand.Op) {
|
|
c.addinstruction(x86.SUBB(imr, amr))
|
|
}
|
|
|
|
// SUBB: Subtract.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SUBB imm8 al
|
|
// SUBB imm8 m8
|
|
// SUBB imm8 r8
|
|
// SUBB m8 r8
|
|
// SUBB r8 m8
|
|
// SUBB r8 r8
|
|
//
|
|
// Construct and append a SUBB instruction to the active function.
|
|
// Operates on the global context.
|
|
func SUBB(imr, amr operand.Op) { ctx.SUBB(imr, amr) }
|
|
|
|
// SUBL: Subtract.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SUBL imm32 eax
|
|
// SUBL imm32 m32
|
|
// SUBL imm32 r32
|
|
// SUBL imm8 m32
|
|
// SUBL imm8 r32
|
|
// SUBL m32 r32
|
|
// SUBL r32 m32
|
|
// SUBL r32 r32
|
|
//
|
|
// Construct and append a SUBL instruction to the active function.
|
|
func (c *Context) SUBL(imr, emr operand.Op) {
|
|
c.addinstruction(x86.SUBL(imr, emr))
|
|
}
|
|
|
|
// SUBL: Subtract.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SUBL imm32 eax
|
|
// SUBL imm32 m32
|
|
// SUBL imm32 r32
|
|
// SUBL imm8 m32
|
|
// SUBL imm8 r32
|
|
// SUBL m32 r32
|
|
// SUBL r32 m32
|
|
// SUBL r32 r32
|
|
//
|
|
// Construct and append a SUBL instruction to the active function.
|
|
// Operates on the global context.
|
|
func SUBL(imr, emr operand.Op) { ctx.SUBL(imr, emr) }
|
|
|
|
// SUBPD: Subtract Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SUBPD m128 xmm
|
|
// SUBPD xmm xmm
|
|
//
|
|
// Construct and append a SUBPD instruction to the active function.
|
|
func (c *Context) SUBPD(mx, x operand.Op) {
|
|
c.addinstruction(x86.SUBPD(mx, x))
|
|
}
|
|
|
|
// SUBPD: Subtract Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SUBPD m128 xmm
|
|
// SUBPD xmm xmm
|
|
//
|
|
// Construct and append a SUBPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func SUBPD(mx, x operand.Op) { ctx.SUBPD(mx, x) }
|
|
|
|
// SUBPS: Subtract Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SUBPS m128 xmm
|
|
// SUBPS xmm xmm
|
|
//
|
|
// Construct and append a SUBPS instruction to the active function.
|
|
func (c *Context) SUBPS(mx, x operand.Op) {
|
|
c.addinstruction(x86.SUBPS(mx, x))
|
|
}
|
|
|
|
// SUBPS: Subtract Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SUBPS m128 xmm
|
|
// SUBPS xmm xmm
|
|
//
|
|
// Construct and append a SUBPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func SUBPS(mx, x operand.Op) { ctx.SUBPS(mx, x) }
|
|
|
|
// SUBQ: Subtract.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SUBQ imm32 m64
|
|
// SUBQ imm32 r64
|
|
// SUBQ imm32 rax
|
|
// SUBQ imm8 m64
|
|
// SUBQ imm8 r64
|
|
// SUBQ m64 r64
|
|
// SUBQ r64 m64
|
|
// SUBQ r64 r64
|
|
//
|
|
// Construct and append a SUBQ instruction to the active function.
|
|
func (c *Context) SUBQ(imr, mr operand.Op) {
|
|
c.addinstruction(x86.SUBQ(imr, mr))
|
|
}
|
|
|
|
// SUBQ: Subtract.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SUBQ imm32 m64
|
|
// SUBQ imm32 r64
|
|
// SUBQ imm32 rax
|
|
// SUBQ imm8 m64
|
|
// SUBQ imm8 r64
|
|
// SUBQ m64 r64
|
|
// SUBQ r64 m64
|
|
// SUBQ r64 r64
|
|
//
|
|
// Construct and append a SUBQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func SUBQ(imr, mr operand.Op) { ctx.SUBQ(imr, mr) }
|
|
|
|
// SUBSD: Subtract Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SUBSD m64 xmm
|
|
// SUBSD xmm xmm
|
|
//
|
|
// Construct and append a SUBSD instruction to the active function.
|
|
func (c *Context) SUBSD(mx, x operand.Op) {
|
|
c.addinstruction(x86.SUBSD(mx, x))
|
|
}
|
|
|
|
// SUBSD: Subtract Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SUBSD m64 xmm
|
|
// SUBSD xmm xmm
|
|
//
|
|
// Construct and append a SUBSD instruction to the active function.
|
|
// Operates on the global context.
|
|
func SUBSD(mx, x operand.Op) { ctx.SUBSD(mx, x) }
|
|
|
|
// SUBSS: Subtract Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SUBSS m32 xmm
|
|
// SUBSS xmm xmm
|
|
//
|
|
// Construct and append a SUBSS instruction to the active function.
|
|
func (c *Context) SUBSS(mx, x operand.Op) {
|
|
c.addinstruction(x86.SUBSS(mx, x))
|
|
}
|
|
|
|
// SUBSS: Subtract Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SUBSS m32 xmm
|
|
// SUBSS xmm xmm
|
|
//
|
|
// Construct and append a SUBSS instruction to the active function.
|
|
// Operates on the global context.
|
|
func SUBSS(mx, x operand.Op) { ctx.SUBSS(mx, x) }
|
|
|
|
// SUBW: Subtract.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SUBW imm16 ax
|
|
// SUBW imm16 m16
|
|
// SUBW imm16 r16
|
|
// SUBW imm8 m16
|
|
// SUBW imm8 r16
|
|
// SUBW m16 r16
|
|
// SUBW r16 m16
|
|
// SUBW r16 r16
|
|
//
|
|
// Construct and append a SUBW instruction to the active function.
|
|
func (c *Context) SUBW(imr, amr operand.Op) {
|
|
c.addinstruction(x86.SUBW(imr, amr))
|
|
}
|
|
|
|
// SUBW: Subtract.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SUBW imm16 ax
|
|
// SUBW imm16 m16
|
|
// SUBW imm16 r16
|
|
// SUBW imm8 m16
|
|
// SUBW imm8 r16
|
|
// SUBW m16 r16
|
|
// SUBW r16 m16
|
|
// SUBW r16 r16
|
|
//
|
|
// Construct and append a SUBW instruction to the active function.
|
|
// Operates on the global context.
|
|
func SUBW(imr, amr operand.Op) { ctx.SUBW(imr, amr) }
|
|
|
|
// SYSCALL: Fast System Call.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SYSCALL
|
|
//
|
|
// Construct and append a SYSCALL instruction to the active function.
|
|
func (c *Context) SYSCALL() {
|
|
c.addinstruction(x86.SYSCALL())
|
|
}
|
|
|
|
// SYSCALL: Fast System Call.
|
|
//
|
|
// Forms:
|
|
//
|
|
// SYSCALL
|
|
//
|
|
// Construct and append a SYSCALL instruction to the active function.
|
|
// Operates on the global context.
|
|
func SYSCALL() { ctx.SYSCALL() }
|
|
|
|
// TESTB: Logical Compare.
|
|
//
|
|
// Forms:
|
|
//
|
|
// TESTB imm8 al
|
|
// TESTB imm8 m8
|
|
// TESTB imm8 r8
|
|
// TESTB r8 m8
|
|
// TESTB r8 r8
|
|
//
|
|
// Construct and append a TESTB instruction to the active function.
|
|
func (c *Context) TESTB(ir, amr operand.Op) {
|
|
c.addinstruction(x86.TESTB(ir, amr))
|
|
}
|
|
|
|
// TESTB: Logical Compare.
|
|
//
|
|
// Forms:
|
|
//
|
|
// TESTB imm8 al
|
|
// TESTB imm8 m8
|
|
// TESTB imm8 r8
|
|
// TESTB r8 m8
|
|
// TESTB r8 r8
|
|
//
|
|
// Construct and append a TESTB instruction to the active function.
|
|
// Operates on the global context.
|
|
func TESTB(ir, amr operand.Op) { ctx.TESTB(ir, amr) }
|
|
|
|
// TESTL: Logical Compare.
|
|
//
|
|
// Forms:
|
|
//
|
|
// TESTL imm32 eax
|
|
// TESTL imm32 m32
|
|
// TESTL imm32 r32
|
|
// TESTL r32 m32
|
|
// TESTL r32 r32
|
|
//
|
|
// Construct and append a TESTL instruction to the active function.
|
|
func (c *Context) TESTL(ir, emr operand.Op) {
|
|
c.addinstruction(x86.TESTL(ir, emr))
|
|
}
|
|
|
|
// TESTL: Logical Compare.
|
|
//
|
|
// Forms:
|
|
//
|
|
// TESTL imm32 eax
|
|
// TESTL imm32 m32
|
|
// TESTL imm32 r32
|
|
// TESTL r32 m32
|
|
// TESTL r32 r32
|
|
//
|
|
// Construct and append a TESTL instruction to the active function.
|
|
// Operates on the global context.
|
|
func TESTL(ir, emr operand.Op) { ctx.TESTL(ir, emr) }
|
|
|
|
// TESTQ: Logical Compare.
|
|
//
|
|
// Forms:
|
|
//
|
|
// TESTQ imm32 m64
|
|
// TESTQ imm32 r64
|
|
// TESTQ imm32 rax
|
|
// TESTQ r64 m64
|
|
// TESTQ r64 r64
|
|
//
|
|
// Construct and append a TESTQ instruction to the active function.
|
|
func (c *Context) TESTQ(ir, mr operand.Op) {
|
|
c.addinstruction(x86.TESTQ(ir, mr))
|
|
}
|
|
|
|
// TESTQ: Logical Compare.
|
|
//
|
|
// Forms:
|
|
//
|
|
// TESTQ imm32 m64
|
|
// TESTQ imm32 r64
|
|
// TESTQ imm32 rax
|
|
// TESTQ r64 m64
|
|
// TESTQ r64 r64
|
|
//
|
|
// Construct and append a TESTQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func TESTQ(ir, mr operand.Op) { ctx.TESTQ(ir, mr) }
|
|
|
|
// TESTW: Logical Compare.
|
|
//
|
|
// Forms:
|
|
//
|
|
// TESTW imm16 ax
|
|
// TESTW imm16 m16
|
|
// TESTW imm16 r16
|
|
// TESTW r16 m16
|
|
// TESTW r16 r16
|
|
//
|
|
// Construct and append a TESTW instruction to the active function.
|
|
func (c *Context) TESTW(ir, amr operand.Op) {
|
|
c.addinstruction(x86.TESTW(ir, amr))
|
|
}
|
|
|
|
// TESTW: Logical Compare.
|
|
//
|
|
// Forms:
|
|
//
|
|
// TESTW imm16 ax
|
|
// TESTW imm16 m16
|
|
// TESTW imm16 r16
|
|
// TESTW r16 m16
|
|
// TESTW r16 r16
|
|
//
|
|
// Construct and append a TESTW instruction to the active function.
|
|
// Operates on the global context.
|
|
func TESTW(ir, amr operand.Op) { ctx.TESTW(ir, amr) }
|
|
|
|
// TZCNTL: Count the Number of Trailing Zero Bits.
|
|
//
|
|
// Forms:
|
|
//
|
|
// TZCNTL m32 r32
|
|
// TZCNTL r32 r32
|
|
//
|
|
// Construct and append a TZCNTL instruction to the active function.
|
|
func (c *Context) TZCNTL(mr, r operand.Op) {
|
|
c.addinstruction(x86.TZCNTL(mr, r))
|
|
}
|
|
|
|
// TZCNTL: Count the Number of Trailing Zero Bits.
|
|
//
|
|
// Forms:
|
|
//
|
|
// TZCNTL m32 r32
|
|
// TZCNTL r32 r32
|
|
//
|
|
// Construct and append a TZCNTL instruction to the active function.
|
|
// Operates on the global context.
|
|
func TZCNTL(mr, r operand.Op) { ctx.TZCNTL(mr, r) }
|
|
|
|
// TZCNTQ: Count the Number of Trailing Zero Bits.
|
|
//
|
|
// Forms:
|
|
//
|
|
// TZCNTQ m64 r64
|
|
// TZCNTQ r64 r64
|
|
//
|
|
// Construct and append a TZCNTQ instruction to the active function.
|
|
func (c *Context) TZCNTQ(mr, r operand.Op) {
|
|
c.addinstruction(x86.TZCNTQ(mr, r))
|
|
}
|
|
|
|
// TZCNTQ: Count the Number of Trailing Zero Bits.
|
|
//
|
|
// Forms:
|
|
//
|
|
// TZCNTQ m64 r64
|
|
// TZCNTQ r64 r64
|
|
//
|
|
// Construct and append a TZCNTQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func TZCNTQ(mr, r operand.Op) { ctx.TZCNTQ(mr, r) }
|
|
|
|
// TZCNTW: Count the Number of Trailing Zero Bits.
|
|
//
|
|
// Forms:
|
|
//
|
|
// TZCNTW m16 r16
|
|
// TZCNTW r16 r16
|
|
//
|
|
// Construct and append a TZCNTW instruction to the active function.
|
|
func (c *Context) TZCNTW(mr, r operand.Op) {
|
|
c.addinstruction(x86.TZCNTW(mr, r))
|
|
}
|
|
|
|
// TZCNTW: Count the Number of Trailing Zero Bits.
|
|
//
|
|
// Forms:
|
|
//
|
|
// TZCNTW m16 r16
|
|
// TZCNTW r16 r16
|
|
//
|
|
// Construct and append a TZCNTW instruction to the active function.
|
|
// Operates on the global context.
|
|
func TZCNTW(mr, r operand.Op) { ctx.TZCNTW(mr, r) }
|
|
|
|
// UCOMISD: Unordered Compare Scalar Double-Precision Floating-Point Values and Set EFLAGS.
|
|
//
|
|
// Forms:
|
|
//
|
|
// UCOMISD m64 xmm
|
|
// UCOMISD xmm xmm
|
|
//
|
|
// Construct and append a UCOMISD instruction to the active function.
|
|
func (c *Context) UCOMISD(mx, x operand.Op) {
|
|
c.addinstruction(x86.UCOMISD(mx, x))
|
|
}
|
|
|
|
// UCOMISD: Unordered Compare Scalar Double-Precision Floating-Point Values and Set EFLAGS.
|
|
//
|
|
// Forms:
|
|
//
|
|
// UCOMISD m64 xmm
|
|
// UCOMISD xmm xmm
|
|
//
|
|
// Construct and append a UCOMISD instruction to the active function.
|
|
// Operates on the global context.
|
|
func UCOMISD(mx, x operand.Op) { ctx.UCOMISD(mx, x) }
|
|
|
|
// UCOMISS: Unordered Compare Scalar Single-Precision Floating-Point Values and Set EFLAGS.
|
|
//
|
|
// Forms:
|
|
//
|
|
// UCOMISS m32 xmm
|
|
// UCOMISS xmm xmm
|
|
//
|
|
// Construct and append a UCOMISS instruction to the active function.
|
|
func (c *Context) UCOMISS(mx, x operand.Op) {
|
|
c.addinstruction(x86.UCOMISS(mx, x))
|
|
}
|
|
|
|
// UCOMISS: Unordered Compare Scalar Single-Precision Floating-Point Values and Set EFLAGS.
|
|
//
|
|
// Forms:
|
|
//
|
|
// UCOMISS m32 xmm
|
|
// UCOMISS xmm xmm
|
|
//
|
|
// Construct and append a UCOMISS instruction to the active function.
|
|
// Operates on the global context.
|
|
func UCOMISS(mx, x operand.Op) { ctx.UCOMISS(mx, x) }
|
|
|
|
// UD2: Undefined Instruction.
|
|
//
|
|
// Forms:
|
|
//
|
|
// UD2
|
|
//
|
|
// Construct and append a UD2 instruction to the active function.
|
|
func (c *Context) UD2() {
|
|
c.addinstruction(x86.UD2())
|
|
}
|
|
|
|
// UD2: Undefined Instruction.
|
|
//
|
|
// Forms:
|
|
//
|
|
// UD2
|
|
//
|
|
// Construct and append a UD2 instruction to the active function.
|
|
// Operates on the global context.
|
|
func UD2() { ctx.UD2() }
|
|
|
|
// UNPCKHPD: Unpack and Interleave High Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// UNPCKHPD m128 xmm
|
|
// UNPCKHPD xmm xmm
|
|
//
|
|
// Construct and append a UNPCKHPD instruction to the active function.
|
|
func (c *Context) UNPCKHPD(mx, x operand.Op) {
|
|
c.addinstruction(x86.UNPCKHPD(mx, x))
|
|
}
|
|
|
|
// UNPCKHPD: Unpack and Interleave High Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// UNPCKHPD m128 xmm
|
|
// UNPCKHPD xmm xmm
|
|
//
|
|
// Construct and append a UNPCKHPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func UNPCKHPD(mx, x operand.Op) { ctx.UNPCKHPD(mx, x) }
|
|
|
|
// UNPCKHPS: Unpack and Interleave High Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// UNPCKHPS m128 xmm
|
|
// UNPCKHPS xmm xmm
|
|
//
|
|
// Construct and append a UNPCKHPS instruction to the active function.
|
|
func (c *Context) UNPCKHPS(mx, x operand.Op) {
|
|
c.addinstruction(x86.UNPCKHPS(mx, x))
|
|
}
|
|
|
|
// UNPCKHPS: Unpack and Interleave High Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// UNPCKHPS m128 xmm
|
|
// UNPCKHPS xmm xmm
|
|
//
|
|
// Construct and append a UNPCKHPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func UNPCKHPS(mx, x operand.Op) { ctx.UNPCKHPS(mx, x) }
|
|
|
|
// UNPCKLPD: Unpack and Interleave Low Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// UNPCKLPD m128 xmm
|
|
// UNPCKLPD xmm xmm
|
|
//
|
|
// Construct and append a UNPCKLPD instruction to the active function.
|
|
func (c *Context) UNPCKLPD(mx, x operand.Op) {
|
|
c.addinstruction(x86.UNPCKLPD(mx, x))
|
|
}
|
|
|
|
// UNPCKLPD: Unpack and Interleave Low Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// UNPCKLPD m128 xmm
|
|
// UNPCKLPD xmm xmm
|
|
//
|
|
// Construct and append a UNPCKLPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func UNPCKLPD(mx, x operand.Op) { ctx.UNPCKLPD(mx, x) }
|
|
|
|
// UNPCKLPS: Unpack and Interleave Low Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// UNPCKLPS m128 xmm
|
|
// UNPCKLPS xmm xmm
|
|
//
|
|
// Construct and append a UNPCKLPS instruction to the active function.
|
|
func (c *Context) UNPCKLPS(mx, x operand.Op) {
|
|
c.addinstruction(x86.UNPCKLPS(mx, x))
|
|
}
|
|
|
|
// UNPCKLPS: Unpack and Interleave Low Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// UNPCKLPS m128 xmm
|
|
// UNPCKLPS xmm xmm
|
|
//
|
|
// Construct and append a UNPCKLPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func UNPCKLPS(mx, x operand.Op) { ctx.UNPCKLPS(mx, x) }
|
|
|
|
// VADDPD: Add Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDPD m128 xmm xmm
|
|
// VADDPD m256 ymm ymm
|
|
// VADDPD xmm xmm xmm
|
|
// VADDPD ymm ymm ymm
|
|
// VADDPD m128 xmm k xmm
|
|
// VADDPD m256 ymm k ymm
|
|
// VADDPD xmm xmm k xmm
|
|
// VADDPD ymm ymm k ymm
|
|
// VADDPD m512 zmm k zmm
|
|
// VADDPD m512 zmm zmm
|
|
// VADDPD zmm zmm k zmm
|
|
// VADDPD zmm zmm zmm
|
|
//
|
|
// Construct and append a VADDPD instruction to the active function.
|
|
func (c *Context) VADDPD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VADDPD(ops...))
|
|
}
|
|
|
|
// VADDPD: Add Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDPD m128 xmm xmm
|
|
// VADDPD m256 ymm ymm
|
|
// VADDPD xmm xmm xmm
|
|
// VADDPD ymm ymm ymm
|
|
// VADDPD m128 xmm k xmm
|
|
// VADDPD m256 ymm k ymm
|
|
// VADDPD xmm xmm k xmm
|
|
// VADDPD ymm ymm k ymm
|
|
// VADDPD m512 zmm k zmm
|
|
// VADDPD m512 zmm zmm
|
|
// VADDPD zmm zmm k zmm
|
|
// VADDPD zmm zmm zmm
|
|
//
|
|
// Construct and append a VADDPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VADDPD(ops ...operand.Op) { ctx.VADDPD(ops...) }
|
|
|
|
// VADDPD_BCST: Add Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDPD.BCST m64 xmm k xmm
|
|
// VADDPD.BCST m64 xmm xmm
|
|
// VADDPD.BCST m64 ymm k ymm
|
|
// VADDPD.BCST m64 ymm ymm
|
|
// VADDPD.BCST m64 zmm k zmm
|
|
// VADDPD.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VADDPD.BCST instruction to the active function.
|
|
func (c *Context) VADDPD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VADDPD_BCST(ops...))
|
|
}
|
|
|
|
// VADDPD_BCST: Add Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDPD.BCST m64 xmm k xmm
|
|
// VADDPD.BCST m64 xmm xmm
|
|
// VADDPD.BCST m64 ymm k ymm
|
|
// VADDPD.BCST m64 ymm ymm
|
|
// VADDPD.BCST m64 zmm k zmm
|
|
// VADDPD.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VADDPD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VADDPD_BCST(ops ...operand.Op) { ctx.VADDPD_BCST(ops...) }
|
|
|
|
// VADDPD_BCST_Z: Add Packed Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDPD.BCST.Z m64 xmm k xmm
|
|
// VADDPD.BCST.Z m64 ymm k ymm
|
|
// VADDPD.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VADDPD.BCST.Z instruction to the active function.
|
|
func (c *Context) VADDPD_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VADDPD_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VADDPD_BCST_Z: Add Packed Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDPD.BCST.Z m64 xmm k xmm
|
|
// VADDPD.BCST.Z m64 ymm k ymm
|
|
// VADDPD.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VADDPD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VADDPD_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VADDPD_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VADDPD_RD_SAE: Add Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDPD.RD_SAE zmm zmm k zmm
|
|
// VADDPD.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VADDPD.RD_SAE instruction to the active function.
|
|
func (c *Context) VADDPD_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VADDPD_RD_SAE(ops...))
|
|
}
|
|
|
|
// VADDPD_RD_SAE: Add Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDPD.RD_SAE zmm zmm k zmm
|
|
// VADDPD.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VADDPD.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VADDPD_RD_SAE(ops ...operand.Op) { ctx.VADDPD_RD_SAE(ops...) }
|
|
|
|
// VADDPD_RD_SAE_Z: Add Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDPD.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VADDPD.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VADDPD_RD_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VADDPD_RD_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VADDPD_RD_SAE_Z: Add Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDPD.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VADDPD.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VADDPD_RD_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VADDPD_RD_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VADDPD_RN_SAE: Add Packed Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDPD.RN_SAE zmm zmm k zmm
|
|
// VADDPD.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VADDPD.RN_SAE instruction to the active function.
|
|
func (c *Context) VADDPD_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VADDPD_RN_SAE(ops...))
|
|
}
|
|
|
|
// VADDPD_RN_SAE: Add Packed Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDPD.RN_SAE zmm zmm k zmm
|
|
// VADDPD.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VADDPD.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VADDPD_RN_SAE(ops ...operand.Op) { ctx.VADDPD_RN_SAE(ops...) }
|
|
|
|
// VADDPD_RN_SAE_Z: Add Packed Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDPD.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VADDPD.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VADDPD_RN_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VADDPD_RN_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VADDPD_RN_SAE_Z: Add Packed Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDPD.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VADDPD.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VADDPD_RN_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VADDPD_RN_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VADDPD_RU_SAE: Add Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDPD.RU_SAE zmm zmm k zmm
|
|
// VADDPD.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VADDPD.RU_SAE instruction to the active function.
|
|
func (c *Context) VADDPD_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VADDPD_RU_SAE(ops...))
|
|
}
|
|
|
|
// VADDPD_RU_SAE: Add Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDPD.RU_SAE zmm zmm k zmm
|
|
// VADDPD.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VADDPD.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VADDPD_RU_SAE(ops ...operand.Op) { ctx.VADDPD_RU_SAE(ops...) }
|
|
|
|
// VADDPD_RU_SAE_Z: Add Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDPD.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VADDPD.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VADDPD_RU_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VADDPD_RU_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VADDPD_RU_SAE_Z: Add Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDPD.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VADDPD.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VADDPD_RU_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VADDPD_RU_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VADDPD_RZ_SAE: Add Packed Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDPD.RZ_SAE zmm zmm k zmm
|
|
// VADDPD.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VADDPD.RZ_SAE instruction to the active function.
|
|
func (c *Context) VADDPD_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VADDPD_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VADDPD_RZ_SAE: Add Packed Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDPD.RZ_SAE zmm zmm k zmm
|
|
// VADDPD.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VADDPD.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VADDPD_RZ_SAE(ops ...operand.Op) { ctx.VADDPD_RZ_SAE(ops...) }
|
|
|
|
// VADDPD_RZ_SAE_Z: Add Packed Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDPD.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VADDPD.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VADDPD_RZ_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VADDPD_RZ_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VADDPD_RZ_SAE_Z: Add Packed Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDPD.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VADDPD.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VADDPD_RZ_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VADDPD_RZ_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VADDPD_Z: Add Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDPD.Z m128 xmm k xmm
|
|
// VADDPD.Z m256 ymm k ymm
|
|
// VADDPD.Z xmm xmm k xmm
|
|
// VADDPD.Z ymm ymm k ymm
|
|
// VADDPD.Z m512 zmm k zmm
|
|
// VADDPD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VADDPD.Z instruction to the active function.
|
|
func (c *Context) VADDPD_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VADDPD_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VADDPD_Z: Add Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDPD.Z m128 xmm k xmm
|
|
// VADDPD.Z m256 ymm k ymm
|
|
// VADDPD.Z xmm xmm k xmm
|
|
// VADDPD.Z ymm ymm k ymm
|
|
// VADDPD.Z m512 zmm k zmm
|
|
// VADDPD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VADDPD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VADDPD_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VADDPD_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VADDPS: Add Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDPS m128 xmm xmm
|
|
// VADDPS m256 ymm ymm
|
|
// VADDPS xmm xmm xmm
|
|
// VADDPS ymm ymm ymm
|
|
// VADDPS m128 xmm k xmm
|
|
// VADDPS m256 ymm k ymm
|
|
// VADDPS xmm xmm k xmm
|
|
// VADDPS ymm ymm k ymm
|
|
// VADDPS m512 zmm k zmm
|
|
// VADDPS m512 zmm zmm
|
|
// VADDPS zmm zmm k zmm
|
|
// VADDPS zmm zmm zmm
|
|
//
|
|
// Construct and append a VADDPS instruction to the active function.
|
|
func (c *Context) VADDPS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VADDPS(ops...))
|
|
}
|
|
|
|
// VADDPS: Add Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDPS m128 xmm xmm
|
|
// VADDPS m256 ymm ymm
|
|
// VADDPS xmm xmm xmm
|
|
// VADDPS ymm ymm ymm
|
|
// VADDPS m128 xmm k xmm
|
|
// VADDPS m256 ymm k ymm
|
|
// VADDPS xmm xmm k xmm
|
|
// VADDPS ymm ymm k ymm
|
|
// VADDPS m512 zmm k zmm
|
|
// VADDPS m512 zmm zmm
|
|
// VADDPS zmm zmm k zmm
|
|
// VADDPS zmm zmm zmm
|
|
//
|
|
// Construct and append a VADDPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VADDPS(ops ...operand.Op) { ctx.VADDPS(ops...) }
|
|
|
|
// VADDPS_BCST: Add Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDPS.BCST m32 xmm k xmm
|
|
// VADDPS.BCST m32 xmm xmm
|
|
// VADDPS.BCST m32 ymm k ymm
|
|
// VADDPS.BCST m32 ymm ymm
|
|
// VADDPS.BCST m32 zmm k zmm
|
|
// VADDPS.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VADDPS.BCST instruction to the active function.
|
|
func (c *Context) VADDPS_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VADDPS_BCST(ops...))
|
|
}
|
|
|
|
// VADDPS_BCST: Add Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDPS.BCST m32 xmm k xmm
|
|
// VADDPS.BCST m32 xmm xmm
|
|
// VADDPS.BCST m32 ymm k ymm
|
|
// VADDPS.BCST m32 ymm ymm
|
|
// VADDPS.BCST m32 zmm k zmm
|
|
// VADDPS.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VADDPS.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VADDPS_BCST(ops ...operand.Op) { ctx.VADDPS_BCST(ops...) }
|
|
|
|
// VADDPS_BCST_Z: Add Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDPS.BCST.Z m32 xmm k xmm
|
|
// VADDPS.BCST.Z m32 ymm k ymm
|
|
// VADDPS.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VADDPS.BCST.Z instruction to the active function.
|
|
func (c *Context) VADDPS_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VADDPS_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VADDPS_BCST_Z: Add Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDPS.BCST.Z m32 xmm k xmm
|
|
// VADDPS.BCST.Z m32 ymm k ymm
|
|
// VADDPS.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VADDPS.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VADDPS_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VADDPS_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VADDPS_RD_SAE: Add Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDPS.RD_SAE zmm zmm k zmm
|
|
// VADDPS.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VADDPS.RD_SAE instruction to the active function.
|
|
func (c *Context) VADDPS_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VADDPS_RD_SAE(ops...))
|
|
}
|
|
|
|
// VADDPS_RD_SAE: Add Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDPS.RD_SAE zmm zmm k zmm
|
|
// VADDPS.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VADDPS.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VADDPS_RD_SAE(ops ...operand.Op) { ctx.VADDPS_RD_SAE(ops...) }
|
|
|
|
// VADDPS_RD_SAE_Z: Add Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDPS.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VADDPS.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VADDPS_RD_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VADDPS_RD_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VADDPS_RD_SAE_Z: Add Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDPS.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VADDPS.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VADDPS_RD_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VADDPS_RD_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VADDPS_RN_SAE: Add Packed Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDPS.RN_SAE zmm zmm k zmm
|
|
// VADDPS.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VADDPS.RN_SAE instruction to the active function.
|
|
func (c *Context) VADDPS_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VADDPS_RN_SAE(ops...))
|
|
}
|
|
|
|
// VADDPS_RN_SAE: Add Packed Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDPS.RN_SAE zmm zmm k zmm
|
|
// VADDPS.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VADDPS.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VADDPS_RN_SAE(ops ...operand.Op) { ctx.VADDPS_RN_SAE(ops...) }
|
|
|
|
// VADDPS_RN_SAE_Z: Add Packed Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDPS.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VADDPS.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VADDPS_RN_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VADDPS_RN_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VADDPS_RN_SAE_Z: Add Packed Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDPS.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VADDPS.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VADDPS_RN_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VADDPS_RN_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VADDPS_RU_SAE: Add Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDPS.RU_SAE zmm zmm k zmm
|
|
// VADDPS.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VADDPS.RU_SAE instruction to the active function.
|
|
func (c *Context) VADDPS_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VADDPS_RU_SAE(ops...))
|
|
}
|
|
|
|
// VADDPS_RU_SAE: Add Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDPS.RU_SAE zmm zmm k zmm
|
|
// VADDPS.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VADDPS.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VADDPS_RU_SAE(ops ...operand.Op) { ctx.VADDPS_RU_SAE(ops...) }
|
|
|
|
// VADDPS_RU_SAE_Z: Add Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDPS.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VADDPS.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VADDPS_RU_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VADDPS_RU_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VADDPS_RU_SAE_Z: Add Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDPS.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VADDPS.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VADDPS_RU_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VADDPS_RU_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VADDPS_RZ_SAE: Add Packed Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDPS.RZ_SAE zmm zmm k zmm
|
|
// VADDPS.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VADDPS.RZ_SAE instruction to the active function.
|
|
func (c *Context) VADDPS_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VADDPS_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VADDPS_RZ_SAE: Add Packed Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDPS.RZ_SAE zmm zmm k zmm
|
|
// VADDPS.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VADDPS.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VADDPS_RZ_SAE(ops ...operand.Op) { ctx.VADDPS_RZ_SAE(ops...) }
|
|
|
|
// VADDPS_RZ_SAE_Z: Add Packed Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDPS.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VADDPS.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VADDPS_RZ_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VADDPS_RZ_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VADDPS_RZ_SAE_Z: Add Packed Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDPS.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VADDPS.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VADDPS_RZ_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VADDPS_RZ_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VADDPS_Z: Add Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDPS.Z m128 xmm k xmm
|
|
// VADDPS.Z m256 ymm k ymm
|
|
// VADDPS.Z xmm xmm k xmm
|
|
// VADDPS.Z ymm ymm k ymm
|
|
// VADDPS.Z m512 zmm k zmm
|
|
// VADDPS.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VADDPS.Z instruction to the active function.
|
|
func (c *Context) VADDPS_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VADDPS_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VADDPS_Z: Add Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDPS.Z m128 xmm k xmm
|
|
// VADDPS.Z m256 ymm k ymm
|
|
// VADDPS.Z xmm xmm k xmm
|
|
// VADDPS.Z ymm ymm k ymm
|
|
// VADDPS.Z m512 zmm k zmm
|
|
// VADDPS.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VADDPS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VADDPS_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VADDPS_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VADDSD: Add Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDSD m64 xmm xmm
|
|
// VADDSD xmm xmm xmm
|
|
// VADDSD m64 xmm k xmm
|
|
// VADDSD xmm xmm k xmm
|
|
//
|
|
// Construct and append a VADDSD instruction to the active function.
|
|
func (c *Context) VADDSD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VADDSD(ops...))
|
|
}
|
|
|
|
// VADDSD: Add Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDSD m64 xmm xmm
|
|
// VADDSD xmm xmm xmm
|
|
// VADDSD m64 xmm k xmm
|
|
// VADDSD xmm xmm k xmm
|
|
//
|
|
// Construct and append a VADDSD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VADDSD(ops ...operand.Op) { ctx.VADDSD(ops...) }
|
|
|
|
// VADDSD_RD_SAE: Add Scalar Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDSD.RD_SAE xmm xmm k xmm
|
|
// VADDSD.RD_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VADDSD.RD_SAE instruction to the active function.
|
|
func (c *Context) VADDSD_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VADDSD_RD_SAE(ops...))
|
|
}
|
|
|
|
// VADDSD_RD_SAE: Add Scalar Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDSD.RD_SAE xmm xmm k xmm
|
|
// VADDSD.RD_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VADDSD.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VADDSD_RD_SAE(ops ...operand.Op) { ctx.VADDSD_RD_SAE(ops...) }
|
|
|
|
// VADDSD_RD_SAE_Z: Add Scalar Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDSD.RD_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VADDSD.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VADDSD_RD_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VADDSD_RD_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VADDSD_RD_SAE_Z: Add Scalar Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDSD.RD_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VADDSD.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VADDSD_RD_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VADDSD_RD_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VADDSD_RN_SAE: Add Scalar Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDSD.RN_SAE xmm xmm k xmm
|
|
// VADDSD.RN_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VADDSD.RN_SAE instruction to the active function.
|
|
func (c *Context) VADDSD_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VADDSD_RN_SAE(ops...))
|
|
}
|
|
|
|
// VADDSD_RN_SAE: Add Scalar Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDSD.RN_SAE xmm xmm k xmm
|
|
// VADDSD.RN_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VADDSD.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VADDSD_RN_SAE(ops ...operand.Op) { ctx.VADDSD_RN_SAE(ops...) }
|
|
|
|
// VADDSD_RN_SAE_Z: Add Scalar Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDSD.RN_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VADDSD.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VADDSD_RN_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VADDSD_RN_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VADDSD_RN_SAE_Z: Add Scalar Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDSD.RN_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VADDSD.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VADDSD_RN_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VADDSD_RN_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VADDSD_RU_SAE: Add Scalar Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDSD.RU_SAE xmm xmm k xmm
|
|
// VADDSD.RU_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VADDSD.RU_SAE instruction to the active function.
|
|
func (c *Context) VADDSD_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VADDSD_RU_SAE(ops...))
|
|
}
|
|
|
|
// VADDSD_RU_SAE: Add Scalar Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDSD.RU_SAE xmm xmm k xmm
|
|
// VADDSD.RU_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VADDSD.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VADDSD_RU_SAE(ops ...operand.Op) { ctx.VADDSD_RU_SAE(ops...) }
|
|
|
|
// VADDSD_RU_SAE_Z: Add Scalar Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDSD.RU_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VADDSD.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VADDSD_RU_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VADDSD_RU_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VADDSD_RU_SAE_Z: Add Scalar Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDSD.RU_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VADDSD.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VADDSD_RU_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VADDSD_RU_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VADDSD_RZ_SAE: Add Scalar Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDSD.RZ_SAE xmm xmm k xmm
|
|
// VADDSD.RZ_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VADDSD.RZ_SAE instruction to the active function.
|
|
func (c *Context) VADDSD_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VADDSD_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VADDSD_RZ_SAE: Add Scalar Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDSD.RZ_SAE xmm xmm k xmm
|
|
// VADDSD.RZ_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VADDSD.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VADDSD_RZ_SAE(ops ...operand.Op) { ctx.VADDSD_RZ_SAE(ops...) }
|
|
|
|
// VADDSD_RZ_SAE_Z: Add Scalar Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDSD.RZ_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VADDSD.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VADDSD_RZ_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VADDSD_RZ_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VADDSD_RZ_SAE_Z: Add Scalar Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDSD.RZ_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VADDSD.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VADDSD_RZ_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VADDSD_RZ_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VADDSD_Z: Add Scalar Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDSD.Z m64 xmm k xmm
|
|
// VADDSD.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VADDSD.Z instruction to the active function.
|
|
func (c *Context) VADDSD_Z(mx, x, k, x1 operand.Op) {
|
|
c.addinstruction(x86.VADDSD_Z(mx, x, k, x1))
|
|
}
|
|
|
|
// VADDSD_Z: Add Scalar Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDSD.Z m64 xmm k xmm
|
|
// VADDSD.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VADDSD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VADDSD_Z(mx, x, k, x1 operand.Op) { ctx.VADDSD_Z(mx, x, k, x1) }
|
|
|
|
// VADDSS: Add Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDSS m32 xmm xmm
|
|
// VADDSS xmm xmm xmm
|
|
// VADDSS m32 xmm k xmm
|
|
// VADDSS xmm xmm k xmm
|
|
//
|
|
// Construct and append a VADDSS instruction to the active function.
|
|
func (c *Context) VADDSS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VADDSS(ops...))
|
|
}
|
|
|
|
// VADDSS: Add Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDSS m32 xmm xmm
|
|
// VADDSS xmm xmm xmm
|
|
// VADDSS m32 xmm k xmm
|
|
// VADDSS xmm xmm k xmm
|
|
//
|
|
// Construct and append a VADDSS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VADDSS(ops ...operand.Op) { ctx.VADDSS(ops...) }
|
|
|
|
// VADDSS_RD_SAE: Add Scalar Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDSS.RD_SAE xmm xmm k xmm
|
|
// VADDSS.RD_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VADDSS.RD_SAE instruction to the active function.
|
|
func (c *Context) VADDSS_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VADDSS_RD_SAE(ops...))
|
|
}
|
|
|
|
// VADDSS_RD_SAE: Add Scalar Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDSS.RD_SAE xmm xmm k xmm
|
|
// VADDSS.RD_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VADDSS.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VADDSS_RD_SAE(ops ...operand.Op) { ctx.VADDSS_RD_SAE(ops...) }
|
|
|
|
// VADDSS_RD_SAE_Z: Add Scalar Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDSS.RD_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VADDSS.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VADDSS_RD_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VADDSS_RD_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VADDSS_RD_SAE_Z: Add Scalar Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDSS.RD_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VADDSS.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VADDSS_RD_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VADDSS_RD_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VADDSS_RN_SAE: Add Scalar Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDSS.RN_SAE xmm xmm k xmm
|
|
// VADDSS.RN_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VADDSS.RN_SAE instruction to the active function.
|
|
func (c *Context) VADDSS_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VADDSS_RN_SAE(ops...))
|
|
}
|
|
|
|
// VADDSS_RN_SAE: Add Scalar Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDSS.RN_SAE xmm xmm k xmm
|
|
// VADDSS.RN_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VADDSS.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VADDSS_RN_SAE(ops ...operand.Op) { ctx.VADDSS_RN_SAE(ops...) }
|
|
|
|
// VADDSS_RN_SAE_Z: Add Scalar Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDSS.RN_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VADDSS.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VADDSS_RN_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VADDSS_RN_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VADDSS_RN_SAE_Z: Add Scalar Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDSS.RN_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VADDSS.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VADDSS_RN_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VADDSS_RN_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VADDSS_RU_SAE: Add Scalar Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDSS.RU_SAE xmm xmm k xmm
|
|
// VADDSS.RU_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VADDSS.RU_SAE instruction to the active function.
|
|
func (c *Context) VADDSS_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VADDSS_RU_SAE(ops...))
|
|
}
|
|
|
|
// VADDSS_RU_SAE: Add Scalar Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDSS.RU_SAE xmm xmm k xmm
|
|
// VADDSS.RU_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VADDSS.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VADDSS_RU_SAE(ops ...operand.Op) { ctx.VADDSS_RU_SAE(ops...) }
|
|
|
|
// VADDSS_RU_SAE_Z: Add Scalar Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDSS.RU_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VADDSS.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VADDSS_RU_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VADDSS_RU_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VADDSS_RU_SAE_Z: Add Scalar Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDSS.RU_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VADDSS.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VADDSS_RU_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VADDSS_RU_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VADDSS_RZ_SAE: Add Scalar Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDSS.RZ_SAE xmm xmm k xmm
|
|
// VADDSS.RZ_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VADDSS.RZ_SAE instruction to the active function.
|
|
func (c *Context) VADDSS_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VADDSS_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VADDSS_RZ_SAE: Add Scalar Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDSS.RZ_SAE xmm xmm k xmm
|
|
// VADDSS.RZ_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VADDSS.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VADDSS_RZ_SAE(ops ...operand.Op) { ctx.VADDSS_RZ_SAE(ops...) }
|
|
|
|
// VADDSS_RZ_SAE_Z: Add Scalar Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDSS.RZ_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VADDSS.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VADDSS_RZ_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VADDSS_RZ_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VADDSS_RZ_SAE_Z: Add Scalar Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDSS.RZ_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VADDSS.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VADDSS_RZ_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VADDSS_RZ_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VADDSS_Z: Add Scalar Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDSS.Z m32 xmm k xmm
|
|
// VADDSS.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VADDSS.Z instruction to the active function.
|
|
func (c *Context) VADDSS_Z(mx, x, k, x1 operand.Op) {
|
|
c.addinstruction(x86.VADDSS_Z(mx, x, k, x1))
|
|
}
|
|
|
|
// VADDSS_Z: Add Scalar Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDSS.Z m32 xmm k xmm
|
|
// VADDSS.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VADDSS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VADDSS_Z(mx, x, k, x1 operand.Op) { ctx.VADDSS_Z(mx, x, k, x1) }
|
|
|
|
// VADDSUBPD: Packed Double-FP Add/Subtract.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDSUBPD m128 xmm xmm
|
|
// VADDSUBPD m256 ymm ymm
|
|
// VADDSUBPD xmm xmm xmm
|
|
// VADDSUBPD ymm ymm ymm
|
|
//
|
|
// Construct and append a VADDSUBPD instruction to the active function.
|
|
func (c *Context) VADDSUBPD(mxy, xy, xy1 operand.Op) {
|
|
c.addinstruction(x86.VADDSUBPD(mxy, xy, xy1))
|
|
}
|
|
|
|
// VADDSUBPD: Packed Double-FP Add/Subtract.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDSUBPD m128 xmm xmm
|
|
// VADDSUBPD m256 ymm ymm
|
|
// VADDSUBPD xmm xmm xmm
|
|
// VADDSUBPD ymm ymm ymm
|
|
//
|
|
// Construct and append a VADDSUBPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VADDSUBPD(mxy, xy, xy1 operand.Op) { ctx.VADDSUBPD(mxy, xy, xy1) }
|
|
|
|
// VADDSUBPS: Packed Single-FP Add/Subtract.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDSUBPS m128 xmm xmm
|
|
// VADDSUBPS m256 ymm ymm
|
|
// VADDSUBPS xmm xmm xmm
|
|
// VADDSUBPS ymm ymm ymm
|
|
//
|
|
// Construct and append a VADDSUBPS instruction to the active function.
|
|
func (c *Context) VADDSUBPS(mxy, xy, xy1 operand.Op) {
|
|
c.addinstruction(x86.VADDSUBPS(mxy, xy, xy1))
|
|
}
|
|
|
|
// VADDSUBPS: Packed Single-FP Add/Subtract.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VADDSUBPS m128 xmm xmm
|
|
// VADDSUBPS m256 ymm ymm
|
|
// VADDSUBPS xmm xmm xmm
|
|
// VADDSUBPS ymm ymm ymm
|
|
//
|
|
// Construct and append a VADDSUBPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VADDSUBPS(mxy, xy, xy1 operand.Op) { ctx.VADDSUBPS(mxy, xy, xy1) }
|
|
|
|
// VAESDEC: Perform One Round of an AES Decryption Flow.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VAESDEC m128 xmm xmm
|
|
// VAESDEC xmm xmm xmm
|
|
//
|
|
// Construct and append a VAESDEC instruction to the active function.
|
|
func (c *Context) VAESDEC(mx, x, x1 operand.Op) {
|
|
c.addinstruction(x86.VAESDEC(mx, x, x1))
|
|
}
|
|
|
|
// VAESDEC: Perform One Round of an AES Decryption Flow.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VAESDEC m128 xmm xmm
|
|
// VAESDEC xmm xmm xmm
|
|
//
|
|
// Construct and append a VAESDEC instruction to the active function.
|
|
// Operates on the global context.
|
|
func VAESDEC(mx, x, x1 operand.Op) { ctx.VAESDEC(mx, x, x1) }
|
|
|
|
// VAESDECLAST: Perform Last Round of an AES Decryption Flow.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VAESDECLAST m128 xmm xmm
|
|
// VAESDECLAST xmm xmm xmm
|
|
//
|
|
// Construct and append a VAESDECLAST instruction to the active function.
|
|
func (c *Context) VAESDECLAST(mx, x, x1 operand.Op) {
|
|
c.addinstruction(x86.VAESDECLAST(mx, x, x1))
|
|
}
|
|
|
|
// VAESDECLAST: Perform Last Round of an AES Decryption Flow.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VAESDECLAST m128 xmm xmm
|
|
// VAESDECLAST xmm xmm xmm
|
|
//
|
|
// Construct and append a VAESDECLAST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VAESDECLAST(mx, x, x1 operand.Op) { ctx.VAESDECLAST(mx, x, x1) }
|
|
|
|
// VAESENC: Perform One Round of an AES Encryption Flow.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VAESENC m128 xmm xmm
|
|
// VAESENC xmm xmm xmm
|
|
//
|
|
// Construct and append a VAESENC instruction to the active function.
|
|
func (c *Context) VAESENC(mx, x, x1 operand.Op) {
|
|
c.addinstruction(x86.VAESENC(mx, x, x1))
|
|
}
|
|
|
|
// VAESENC: Perform One Round of an AES Encryption Flow.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VAESENC m128 xmm xmm
|
|
// VAESENC xmm xmm xmm
|
|
//
|
|
// Construct and append a VAESENC instruction to the active function.
|
|
// Operates on the global context.
|
|
func VAESENC(mx, x, x1 operand.Op) { ctx.VAESENC(mx, x, x1) }
|
|
|
|
// VAESENCLAST: Perform Last Round of an AES Encryption Flow.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VAESENCLAST m128 xmm xmm
|
|
// VAESENCLAST xmm xmm xmm
|
|
//
|
|
// Construct and append a VAESENCLAST instruction to the active function.
|
|
func (c *Context) VAESENCLAST(mx, x, x1 operand.Op) {
|
|
c.addinstruction(x86.VAESENCLAST(mx, x, x1))
|
|
}
|
|
|
|
// VAESENCLAST: Perform Last Round of an AES Encryption Flow.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VAESENCLAST m128 xmm xmm
|
|
// VAESENCLAST xmm xmm xmm
|
|
//
|
|
// Construct and append a VAESENCLAST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VAESENCLAST(mx, x, x1 operand.Op) { ctx.VAESENCLAST(mx, x, x1) }
|
|
|
|
// VAESIMC: Perform the AES InvMixColumn Transformation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VAESIMC m128 xmm
|
|
// VAESIMC xmm xmm
|
|
//
|
|
// Construct and append a VAESIMC instruction to the active function.
|
|
func (c *Context) VAESIMC(mx, x operand.Op) {
|
|
c.addinstruction(x86.VAESIMC(mx, x))
|
|
}
|
|
|
|
// VAESIMC: Perform the AES InvMixColumn Transformation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VAESIMC m128 xmm
|
|
// VAESIMC xmm xmm
|
|
//
|
|
// Construct and append a VAESIMC instruction to the active function.
|
|
// Operates on the global context.
|
|
func VAESIMC(mx, x operand.Op) { ctx.VAESIMC(mx, x) }
|
|
|
|
// VAESKEYGENASSIST: AES Round Key Generation Assist.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VAESKEYGENASSIST imm8 m128 xmm
|
|
// VAESKEYGENASSIST imm8 xmm xmm
|
|
//
|
|
// Construct and append a VAESKEYGENASSIST instruction to the active function.
|
|
func (c *Context) VAESKEYGENASSIST(i, mx, x operand.Op) {
|
|
c.addinstruction(x86.VAESKEYGENASSIST(i, mx, x))
|
|
}
|
|
|
|
// VAESKEYGENASSIST: AES Round Key Generation Assist.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VAESKEYGENASSIST imm8 m128 xmm
|
|
// VAESKEYGENASSIST imm8 xmm xmm
|
|
//
|
|
// Construct and append a VAESKEYGENASSIST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VAESKEYGENASSIST(i, mx, x operand.Op) { ctx.VAESKEYGENASSIST(i, mx, x) }
|
|
|
|
// VALIGND: Align Doubleword Vectors.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VALIGND imm8 m128 xmm k xmm
|
|
// VALIGND imm8 m128 xmm xmm
|
|
// VALIGND imm8 m256 ymm k ymm
|
|
// VALIGND imm8 m256 ymm ymm
|
|
// VALIGND imm8 xmm xmm k xmm
|
|
// VALIGND imm8 xmm xmm xmm
|
|
// VALIGND imm8 ymm ymm k ymm
|
|
// VALIGND imm8 ymm ymm ymm
|
|
// VALIGND imm8 m512 zmm k zmm
|
|
// VALIGND imm8 m512 zmm zmm
|
|
// VALIGND imm8 zmm zmm k zmm
|
|
// VALIGND imm8 zmm zmm zmm
|
|
//
|
|
// Construct and append a VALIGND instruction to the active function.
|
|
func (c *Context) VALIGND(ops ...operand.Op) {
|
|
c.addinstruction(x86.VALIGND(ops...))
|
|
}
|
|
|
|
// VALIGND: Align Doubleword Vectors.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VALIGND imm8 m128 xmm k xmm
|
|
// VALIGND imm8 m128 xmm xmm
|
|
// VALIGND imm8 m256 ymm k ymm
|
|
// VALIGND imm8 m256 ymm ymm
|
|
// VALIGND imm8 xmm xmm k xmm
|
|
// VALIGND imm8 xmm xmm xmm
|
|
// VALIGND imm8 ymm ymm k ymm
|
|
// VALIGND imm8 ymm ymm ymm
|
|
// VALIGND imm8 m512 zmm k zmm
|
|
// VALIGND imm8 m512 zmm zmm
|
|
// VALIGND imm8 zmm zmm k zmm
|
|
// VALIGND imm8 zmm zmm zmm
|
|
//
|
|
// Construct and append a VALIGND instruction to the active function.
|
|
// Operates on the global context.
|
|
func VALIGND(ops ...operand.Op) { ctx.VALIGND(ops...) }
|
|
|
|
// VALIGND_BCST: Align Doubleword Vectors (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VALIGND.BCST imm8 m32 xmm k xmm
|
|
// VALIGND.BCST imm8 m32 xmm xmm
|
|
// VALIGND.BCST imm8 m32 ymm k ymm
|
|
// VALIGND.BCST imm8 m32 ymm ymm
|
|
// VALIGND.BCST imm8 m32 zmm k zmm
|
|
// VALIGND.BCST imm8 m32 zmm zmm
|
|
//
|
|
// Construct and append a VALIGND.BCST instruction to the active function.
|
|
func (c *Context) VALIGND_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VALIGND_BCST(ops...))
|
|
}
|
|
|
|
// VALIGND_BCST: Align Doubleword Vectors (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VALIGND.BCST imm8 m32 xmm k xmm
|
|
// VALIGND.BCST imm8 m32 xmm xmm
|
|
// VALIGND.BCST imm8 m32 ymm k ymm
|
|
// VALIGND.BCST imm8 m32 ymm ymm
|
|
// VALIGND.BCST imm8 m32 zmm k zmm
|
|
// VALIGND.BCST imm8 m32 zmm zmm
|
|
//
|
|
// Construct and append a VALIGND.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VALIGND_BCST(ops ...operand.Op) { ctx.VALIGND_BCST(ops...) }
|
|
|
|
// VALIGND_BCST_Z: Align Doubleword Vectors (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VALIGND.BCST.Z imm8 m32 xmm k xmm
|
|
// VALIGND.BCST.Z imm8 m32 ymm k ymm
|
|
// VALIGND.BCST.Z imm8 m32 zmm k zmm
|
|
//
|
|
// Construct and append a VALIGND.BCST.Z instruction to the active function.
|
|
func (c *Context) VALIGND_BCST_Z(i, m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VALIGND_BCST_Z(i, m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VALIGND_BCST_Z: Align Doubleword Vectors (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VALIGND.BCST.Z imm8 m32 xmm k xmm
|
|
// VALIGND.BCST.Z imm8 m32 ymm k ymm
|
|
// VALIGND.BCST.Z imm8 m32 zmm k zmm
|
|
//
|
|
// Construct and append a VALIGND.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VALIGND_BCST_Z(i, m, xyz, k, xyz1 operand.Op) { ctx.VALIGND_BCST_Z(i, m, xyz, k, xyz1) }
|
|
|
|
// VALIGND_Z: Align Doubleword Vectors (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VALIGND.Z imm8 m128 xmm k xmm
|
|
// VALIGND.Z imm8 m256 ymm k ymm
|
|
// VALIGND.Z imm8 xmm xmm k xmm
|
|
// VALIGND.Z imm8 ymm ymm k ymm
|
|
// VALIGND.Z imm8 m512 zmm k zmm
|
|
// VALIGND.Z imm8 zmm zmm k zmm
|
|
//
|
|
// Construct and append a VALIGND.Z instruction to the active function.
|
|
func (c *Context) VALIGND_Z(i, mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VALIGND_Z(i, mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VALIGND_Z: Align Doubleword Vectors (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VALIGND.Z imm8 m128 xmm k xmm
|
|
// VALIGND.Z imm8 m256 ymm k ymm
|
|
// VALIGND.Z imm8 xmm xmm k xmm
|
|
// VALIGND.Z imm8 ymm ymm k ymm
|
|
// VALIGND.Z imm8 m512 zmm k zmm
|
|
// VALIGND.Z imm8 zmm zmm k zmm
|
|
//
|
|
// Construct and append a VALIGND.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VALIGND_Z(i, mxyz, xyz, k, xyz1 operand.Op) { ctx.VALIGND_Z(i, mxyz, xyz, k, xyz1) }
|
|
|
|
// VALIGNQ: Align Quadword Vectors.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VALIGNQ imm8 m128 xmm k xmm
|
|
// VALIGNQ imm8 m128 xmm xmm
|
|
// VALIGNQ imm8 m256 ymm k ymm
|
|
// VALIGNQ imm8 m256 ymm ymm
|
|
// VALIGNQ imm8 xmm xmm k xmm
|
|
// VALIGNQ imm8 xmm xmm xmm
|
|
// VALIGNQ imm8 ymm ymm k ymm
|
|
// VALIGNQ imm8 ymm ymm ymm
|
|
// VALIGNQ imm8 m512 zmm k zmm
|
|
// VALIGNQ imm8 m512 zmm zmm
|
|
// VALIGNQ imm8 zmm zmm k zmm
|
|
// VALIGNQ imm8 zmm zmm zmm
|
|
//
|
|
// Construct and append a VALIGNQ instruction to the active function.
|
|
func (c *Context) VALIGNQ(ops ...operand.Op) {
|
|
c.addinstruction(x86.VALIGNQ(ops...))
|
|
}
|
|
|
|
// VALIGNQ: Align Quadword Vectors.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VALIGNQ imm8 m128 xmm k xmm
|
|
// VALIGNQ imm8 m128 xmm xmm
|
|
// VALIGNQ imm8 m256 ymm k ymm
|
|
// VALIGNQ imm8 m256 ymm ymm
|
|
// VALIGNQ imm8 xmm xmm k xmm
|
|
// VALIGNQ imm8 xmm xmm xmm
|
|
// VALIGNQ imm8 ymm ymm k ymm
|
|
// VALIGNQ imm8 ymm ymm ymm
|
|
// VALIGNQ imm8 m512 zmm k zmm
|
|
// VALIGNQ imm8 m512 zmm zmm
|
|
// VALIGNQ imm8 zmm zmm k zmm
|
|
// VALIGNQ imm8 zmm zmm zmm
|
|
//
|
|
// Construct and append a VALIGNQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VALIGNQ(ops ...operand.Op) { ctx.VALIGNQ(ops...) }
|
|
|
|
// VALIGNQ_BCST: Align Quadword Vectors (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VALIGNQ.BCST imm8 m64 xmm k xmm
|
|
// VALIGNQ.BCST imm8 m64 xmm xmm
|
|
// VALIGNQ.BCST imm8 m64 ymm k ymm
|
|
// VALIGNQ.BCST imm8 m64 ymm ymm
|
|
// VALIGNQ.BCST imm8 m64 zmm k zmm
|
|
// VALIGNQ.BCST imm8 m64 zmm zmm
|
|
//
|
|
// Construct and append a VALIGNQ.BCST instruction to the active function.
|
|
func (c *Context) VALIGNQ_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VALIGNQ_BCST(ops...))
|
|
}
|
|
|
|
// VALIGNQ_BCST: Align Quadword Vectors (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VALIGNQ.BCST imm8 m64 xmm k xmm
|
|
// VALIGNQ.BCST imm8 m64 xmm xmm
|
|
// VALIGNQ.BCST imm8 m64 ymm k ymm
|
|
// VALIGNQ.BCST imm8 m64 ymm ymm
|
|
// VALIGNQ.BCST imm8 m64 zmm k zmm
|
|
// VALIGNQ.BCST imm8 m64 zmm zmm
|
|
//
|
|
// Construct and append a VALIGNQ.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VALIGNQ_BCST(ops ...operand.Op) { ctx.VALIGNQ_BCST(ops...) }
|
|
|
|
// VALIGNQ_BCST_Z: Align Quadword Vectors (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VALIGNQ.BCST.Z imm8 m64 xmm k xmm
|
|
// VALIGNQ.BCST.Z imm8 m64 ymm k ymm
|
|
// VALIGNQ.BCST.Z imm8 m64 zmm k zmm
|
|
//
|
|
// Construct and append a VALIGNQ.BCST.Z instruction to the active function.
|
|
func (c *Context) VALIGNQ_BCST_Z(i, m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VALIGNQ_BCST_Z(i, m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VALIGNQ_BCST_Z: Align Quadword Vectors (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VALIGNQ.BCST.Z imm8 m64 xmm k xmm
|
|
// VALIGNQ.BCST.Z imm8 m64 ymm k ymm
|
|
// VALIGNQ.BCST.Z imm8 m64 zmm k zmm
|
|
//
|
|
// Construct and append a VALIGNQ.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VALIGNQ_BCST_Z(i, m, xyz, k, xyz1 operand.Op) { ctx.VALIGNQ_BCST_Z(i, m, xyz, k, xyz1) }
|
|
|
|
// VALIGNQ_Z: Align Quadword Vectors (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VALIGNQ.Z imm8 m128 xmm k xmm
|
|
// VALIGNQ.Z imm8 m256 ymm k ymm
|
|
// VALIGNQ.Z imm8 xmm xmm k xmm
|
|
// VALIGNQ.Z imm8 ymm ymm k ymm
|
|
// VALIGNQ.Z imm8 m512 zmm k zmm
|
|
// VALIGNQ.Z imm8 zmm zmm k zmm
|
|
//
|
|
// Construct and append a VALIGNQ.Z instruction to the active function.
|
|
func (c *Context) VALIGNQ_Z(i, mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VALIGNQ_Z(i, mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VALIGNQ_Z: Align Quadword Vectors (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VALIGNQ.Z imm8 m128 xmm k xmm
|
|
// VALIGNQ.Z imm8 m256 ymm k ymm
|
|
// VALIGNQ.Z imm8 xmm xmm k xmm
|
|
// VALIGNQ.Z imm8 ymm ymm k ymm
|
|
// VALIGNQ.Z imm8 m512 zmm k zmm
|
|
// VALIGNQ.Z imm8 zmm zmm k zmm
|
|
//
|
|
// Construct and append a VALIGNQ.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VALIGNQ_Z(i, mxyz, xyz, k, xyz1 operand.Op) { ctx.VALIGNQ_Z(i, mxyz, xyz, k, xyz1) }
|
|
|
|
// VANDNPD: Bitwise Logical AND NOT of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VANDNPD m128 xmm xmm
|
|
// VANDNPD m256 ymm ymm
|
|
// VANDNPD xmm xmm xmm
|
|
// VANDNPD ymm ymm ymm
|
|
// VANDNPD m128 xmm k xmm
|
|
// VANDNPD m256 ymm k ymm
|
|
// VANDNPD xmm xmm k xmm
|
|
// VANDNPD ymm ymm k ymm
|
|
// VANDNPD m512 zmm k zmm
|
|
// VANDNPD m512 zmm zmm
|
|
// VANDNPD zmm zmm k zmm
|
|
// VANDNPD zmm zmm zmm
|
|
//
|
|
// Construct and append a VANDNPD instruction to the active function.
|
|
func (c *Context) VANDNPD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VANDNPD(ops...))
|
|
}
|
|
|
|
// VANDNPD: Bitwise Logical AND NOT of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VANDNPD m128 xmm xmm
|
|
// VANDNPD m256 ymm ymm
|
|
// VANDNPD xmm xmm xmm
|
|
// VANDNPD ymm ymm ymm
|
|
// VANDNPD m128 xmm k xmm
|
|
// VANDNPD m256 ymm k ymm
|
|
// VANDNPD xmm xmm k xmm
|
|
// VANDNPD ymm ymm k ymm
|
|
// VANDNPD m512 zmm k zmm
|
|
// VANDNPD m512 zmm zmm
|
|
// VANDNPD zmm zmm k zmm
|
|
// VANDNPD zmm zmm zmm
|
|
//
|
|
// Construct and append a VANDNPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VANDNPD(ops ...operand.Op) { ctx.VANDNPD(ops...) }
|
|
|
|
// VANDNPD_BCST: Bitwise Logical AND NOT of Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VANDNPD.BCST m64 xmm k xmm
|
|
// VANDNPD.BCST m64 xmm xmm
|
|
// VANDNPD.BCST m64 ymm k ymm
|
|
// VANDNPD.BCST m64 ymm ymm
|
|
// VANDNPD.BCST m64 zmm k zmm
|
|
// VANDNPD.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VANDNPD.BCST instruction to the active function.
|
|
func (c *Context) VANDNPD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VANDNPD_BCST(ops...))
|
|
}
|
|
|
|
// VANDNPD_BCST: Bitwise Logical AND NOT of Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VANDNPD.BCST m64 xmm k xmm
|
|
// VANDNPD.BCST m64 xmm xmm
|
|
// VANDNPD.BCST m64 ymm k ymm
|
|
// VANDNPD.BCST m64 ymm ymm
|
|
// VANDNPD.BCST m64 zmm k zmm
|
|
// VANDNPD.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VANDNPD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VANDNPD_BCST(ops ...operand.Op) { ctx.VANDNPD_BCST(ops...) }
|
|
|
|
// VANDNPD_BCST_Z: Bitwise Logical AND NOT of Packed Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VANDNPD.BCST.Z m64 xmm k xmm
|
|
// VANDNPD.BCST.Z m64 ymm k ymm
|
|
// VANDNPD.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VANDNPD.BCST.Z instruction to the active function.
|
|
func (c *Context) VANDNPD_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VANDNPD_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VANDNPD_BCST_Z: Bitwise Logical AND NOT of Packed Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VANDNPD.BCST.Z m64 xmm k xmm
|
|
// VANDNPD.BCST.Z m64 ymm k ymm
|
|
// VANDNPD.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VANDNPD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VANDNPD_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VANDNPD_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VANDNPD_Z: Bitwise Logical AND NOT of Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VANDNPD.Z m128 xmm k xmm
|
|
// VANDNPD.Z m256 ymm k ymm
|
|
// VANDNPD.Z xmm xmm k xmm
|
|
// VANDNPD.Z ymm ymm k ymm
|
|
// VANDNPD.Z m512 zmm k zmm
|
|
// VANDNPD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VANDNPD.Z instruction to the active function.
|
|
func (c *Context) VANDNPD_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VANDNPD_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VANDNPD_Z: Bitwise Logical AND NOT of Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VANDNPD.Z m128 xmm k xmm
|
|
// VANDNPD.Z m256 ymm k ymm
|
|
// VANDNPD.Z xmm xmm k xmm
|
|
// VANDNPD.Z ymm ymm k ymm
|
|
// VANDNPD.Z m512 zmm k zmm
|
|
// VANDNPD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VANDNPD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VANDNPD_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VANDNPD_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VANDNPS: Bitwise Logical AND NOT of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VANDNPS m128 xmm xmm
|
|
// VANDNPS m256 ymm ymm
|
|
// VANDNPS xmm xmm xmm
|
|
// VANDNPS ymm ymm ymm
|
|
// VANDNPS m128 xmm k xmm
|
|
// VANDNPS m256 ymm k ymm
|
|
// VANDNPS xmm xmm k xmm
|
|
// VANDNPS ymm ymm k ymm
|
|
// VANDNPS m512 zmm k zmm
|
|
// VANDNPS m512 zmm zmm
|
|
// VANDNPS zmm zmm k zmm
|
|
// VANDNPS zmm zmm zmm
|
|
//
|
|
// Construct and append a VANDNPS instruction to the active function.
|
|
func (c *Context) VANDNPS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VANDNPS(ops...))
|
|
}
|
|
|
|
// VANDNPS: Bitwise Logical AND NOT of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VANDNPS m128 xmm xmm
|
|
// VANDNPS m256 ymm ymm
|
|
// VANDNPS xmm xmm xmm
|
|
// VANDNPS ymm ymm ymm
|
|
// VANDNPS m128 xmm k xmm
|
|
// VANDNPS m256 ymm k ymm
|
|
// VANDNPS xmm xmm k xmm
|
|
// VANDNPS ymm ymm k ymm
|
|
// VANDNPS m512 zmm k zmm
|
|
// VANDNPS m512 zmm zmm
|
|
// VANDNPS zmm zmm k zmm
|
|
// VANDNPS zmm zmm zmm
|
|
//
|
|
// Construct and append a VANDNPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VANDNPS(ops ...operand.Op) { ctx.VANDNPS(ops...) }
|
|
|
|
// VANDNPS_BCST: Bitwise Logical AND NOT of Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VANDNPS.BCST m32 xmm k xmm
|
|
// VANDNPS.BCST m32 xmm xmm
|
|
// VANDNPS.BCST m32 ymm k ymm
|
|
// VANDNPS.BCST m32 ymm ymm
|
|
// VANDNPS.BCST m32 zmm k zmm
|
|
// VANDNPS.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VANDNPS.BCST instruction to the active function.
|
|
func (c *Context) VANDNPS_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VANDNPS_BCST(ops...))
|
|
}
|
|
|
|
// VANDNPS_BCST: Bitwise Logical AND NOT of Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VANDNPS.BCST m32 xmm k xmm
|
|
// VANDNPS.BCST m32 xmm xmm
|
|
// VANDNPS.BCST m32 ymm k ymm
|
|
// VANDNPS.BCST m32 ymm ymm
|
|
// VANDNPS.BCST m32 zmm k zmm
|
|
// VANDNPS.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VANDNPS.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VANDNPS_BCST(ops ...operand.Op) { ctx.VANDNPS_BCST(ops...) }
|
|
|
|
// VANDNPS_BCST_Z: Bitwise Logical AND NOT of Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VANDNPS.BCST.Z m32 xmm k xmm
|
|
// VANDNPS.BCST.Z m32 ymm k ymm
|
|
// VANDNPS.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VANDNPS.BCST.Z instruction to the active function.
|
|
func (c *Context) VANDNPS_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VANDNPS_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VANDNPS_BCST_Z: Bitwise Logical AND NOT of Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VANDNPS.BCST.Z m32 xmm k xmm
|
|
// VANDNPS.BCST.Z m32 ymm k ymm
|
|
// VANDNPS.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VANDNPS.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VANDNPS_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VANDNPS_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VANDNPS_Z: Bitwise Logical AND NOT of Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VANDNPS.Z m128 xmm k xmm
|
|
// VANDNPS.Z m256 ymm k ymm
|
|
// VANDNPS.Z xmm xmm k xmm
|
|
// VANDNPS.Z ymm ymm k ymm
|
|
// VANDNPS.Z m512 zmm k zmm
|
|
// VANDNPS.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VANDNPS.Z instruction to the active function.
|
|
func (c *Context) VANDNPS_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VANDNPS_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VANDNPS_Z: Bitwise Logical AND NOT of Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VANDNPS.Z m128 xmm k xmm
|
|
// VANDNPS.Z m256 ymm k ymm
|
|
// VANDNPS.Z xmm xmm k xmm
|
|
// VANDNPS.Z ymm ymm k ymm
|
|
// VANDNPS.Z m512 zmm k zmm
|
|
// VANDNPS.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VANDNPS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VANDNPS_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VANDNPS_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VANDPD: Bitwise Logical AND of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VANDPD m128 xmm xmm
|
|
// VANDPD m256 ymm ymm
|
|
// VANDPD xmm xmm xmm
|
|
// VANDPD ymm ymm ymm
|
|
// VANDPD m128 xmm k xmm
|
|
// VANDPD m256 ymm k ymm
|
|
// VANDPD xmm xmm k xmm
|
|
// VANDPD ymm ymm k ymm
|
|
// VANDPD m512 zmm k zmm
|
|
// VANDPD m512 zmm zmm
|
|
// VANDPD zmm zmm k zmm
|
|
// VANDPD zmm zmm zmm
|
|
//
|
|
// Construct and append a VANDPD instruction to the active function.
|
|
func (c *Context) VANDPD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VANDPD(ops...))
|
|
}
|
|
|
|
// VANDPD: Bitwise Logical AND of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VANDPD m128 xmm xmm
|
|
// VANDPD m256 ymm ymm
|
|
// VANDPD xmm xmm xmm
|
|
// VANDPD ymm ymm ymm
|
|
// VANDPD m128 xmm k xmm
|
|
// VANDPD m256 ymm k ymm
|
|
// VANDPD xmm xmm k xmm
|
|
// VANDPD ymm ymm k ymm
|
|
// VANDPD m512 zmm k zmm
|
|
// VANDPD m512 zmm zmm
|
|
// VANDPD zmm zmm k zmm
|
|
// VANDPD zmm zmm zmm
|
|
//
|
|
// Construct and append a VANDPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VANDPD(ops ...operand.Op) { ctx.VANDPD(ops...) }
|
|
|
|
// VANDPD_BCST: Bitwise Logical AND of Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VANDPD.BCST m64 xmm k xmm
|
|
// VANDPD.BCST m64 xmm xmm
|
|
// VANDPD.BCST m64 ymm k ymm
|
|
// VANDPD.BCST m64 ymm ymm
|
|
// VANDPD.BCST m64 zmm k zmm
|
|
// VANDPD.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VANDPD.BCST instruction to the active function.
|
|
func (c *Context) VANDPD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VANDPD_BCST(ops...))
|
|
}
|
|
|
|
// VANDPD_BCST: Bitwise Logical AND of Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VANDPD.BCST m64 xmm k xmm
|
|
// VANDPD.BCST m64 xmm xmm
|
|
// VANDPD.BCST m64 ymm k ymm
|
|
// VANDPD.BCST m64 ymm ymm
|
|
// VANDPD.BCST m64 zmm k zmm
|
|
// VANDPD.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VANDPD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VANDPD_BCST(ops ...operand.Op) { ctx.VANDPD_BCST(ops...) }
|
|
|
|
// VANDPD_BCST_Z: Bitwise Logical AND of Packed Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VANDPD.BCST.Z m64 xmm k xmm
|
|
// VANDPD.BCST.Z m64 ymm k ymm
|
|
// VANDPD.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VANDPD.BCST.Z instruction to the active function.
|
|
func (c *Context) VANDPD_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VANDPD_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VANDPD_BCST_Z: Bitwise Logical AND of Packed Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VANDPD.BCST.Z m64 xmm k xmm
|
|
// VANDPD.BCST.Z m64 ymm k ymm
|
|
// VANDPD.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VANDPD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VANDPD_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VANDPD_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VANDPD_Z: Bitwise Logical AND of Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VANDPD.Z m128 xmm k xmm
|
|
// VANDPD.Z m256 ymm k ymm
|
|
// VANDPD.Z xmm xmm k xmm
|
|
// VANDPD.Z ymm ymm k ymm
|
|
// VANDPD.Z m512 zmm k zmm
|
|
// VANDPD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VANDPD.Z instruction to the active function.
|
|
func (c *Context) VANDPD_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VANDPD_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VANDPD_Z: Bitwise Logical AND of Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VANDPD.Z m128 xmm k xmm
|
|
// VANDPD.Z m256 ymm k ymm
|
|
// VANDPD.Z xmm xmm k xmm
|
|
// VANDPD.Z ymm ymm k ymm
|
|
// VANDPD.Z m512 zmm k zmm
|
|
// VANDPD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VANDPD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VANDPD_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VANDPD_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VANDPS: Bitwise Logical AND of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VANDPS m128 xmm xmm
|
|
// VANDPS m256 ymm ymm
|
|
// VANDPS xmm xmm xmm
|
|
// VANDPS ymm ymm ymm
|
|
// VANDPS m128 xmm k xmm
|
|
// VANDPS m256 ymm k ymm
|
|
// VANDPS xmm xmm k xmm
|
|
// VANDPS ymm ymm k ymm
|
|
// VANDPS m512 zmm k zmm
|
|
// VANDPS m512 zmm zmm
|
|
// VANDPS zmm zmm k zmm
|
|
// VANDPS zmm zmm zmm
|
|
//
|
|
// Construct and append a VANDPS instruction to the active function.
|
|
func (c *Context) VANDPS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VANDPS(ops...))
|
|
}
|
|
|
|
// VANDPS: Bitwise Logical AND of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VANDPS m128 xmm xmm
|
|
// VANDPS m256 ymm ymm
|
|
// VANDPS xmm xmm xmm
|
|
// VANDPS ymm ymm ymm
|
|
// VANDPS m128 xmm k xmm
|
|
// VANDPS m256 ymm k ymm
|
|
// VANDPS xmm xmm k xmm
|
|
// VANDPS ymm ymm k ymm
|
|
// VANDPS m512 zmm k zmm
|
|
// VANDPS m512 zmm zmm
|
|
// VANDPS zmm zmm k zmm
|
|
// VANDPS zmm zmm zmm
|
|
//
|
|
// Construct and append a VANDPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VANDPS(ops ...operand.Op) { ctx.VANDPS(ops...) }
|
|
|
|
// VANDPS_BCST: Bitwise Logical AND of Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VANDPS.BCST m32 xmm k xmm
|
|
// VANDPS.BCST m32 xmm xmm
|
|
// VANDPS.BCST m32 ymm k ymm
|
|
// VANDPS.BCST m32 ymm ymm
|
|
// VANDPS.BCST m32 zmm k zmm
|
|
// VANDPS.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VANDPS.BCST instruction to the active function.
|
|
func (c *Context) VANDPS_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VANDPS_BCST(ops...))
|
|
}
|
|
|
|
// VANDPS_BCST: Bitwise Logical AND of Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VANDPS.BCST m32 xmm k xmm
|
|
// VANDPS.BCST m32 xmm xmm
|
|
// VANDPS.BCST m32 ymm k ymm
|
|
// VANDPS.BCST m32 ymm ymm
|
|
// VANDPS.BCST m32 zmm k zmm
|
|
// VANDPS.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VANDPS.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VANDPS_BCST(ops ...operand.Op) { ctx.VANDPS_BCST(ops...) }
|
|
|
|
// VANDPS_BCST_Z: Bitwise Logical AND of Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VANDPS.BCST.Z m32 xmm k xmm
|
|
// VANDPS.BCST.Z m32 ymm k ymm
|
|
// VANDPS.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VANDPS.BCST.Z instruction to the active function.
|
|
func (c *Context) VANDPS_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VANDPS_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VANDPS_BCST_Z: Bitwise Logical AND of Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VANDPS.BCST.Z m32 xmm k xmm
|
|
// VANDPS.BCST.Z m32 ymm k ymm
|
|
// VANDPS.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VANDPS.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VANDPS_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VANDPS_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VANDPS_Z: Bitwise Logical AND of Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VANDPS.Z m128 xmm k xmm
|
|
// VANDPS.Z m256 ymm k ymm
|
|
// VANDPS.Z xmm xmm k xmm
|
|
// VANDPS.Z ymm ymm k ymm
|
|
// VANDPS.Z m512 zmm k zmm
|
|
// VANDPS.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VANDPS.Z instruction to the active function.
|
|
func (c *Context) VANDPS_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VANDPS_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VANDPS_Z: Bitwise Logical AND of Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VANDPS.Z m128 xmm k xmm
|
|
// VANDPS.Z m256 ymm k ymm
|
|
// VANDPS.Z xmm xmm k xmm
|
|
// VANDPS.Z ymm ymm k ymm
|
|
// VANDPS.Z m512 zmm k zmm
|
|
// VANDPS.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VANDPS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VANDPS_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VANDPS_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VBLENDMPD: Blend Packed Double-Precision Floating-Point Vectors Using an OpMask Control.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBLENDMPD m128 xmm k xmm
|
|
// VBLENDMPD m128 xmm xmm
|
|
// VBLENDMPD m256 ymm k ymm
|
|
// VBLENDMPD m256 ymm ymm
|
|
// VBLENDMPD xmm xmm k xmm
|
|
// VBLENDMPD xmm xmm xmm
|
|
// VBLENDMPD ymm ymm k ymm
|
|
// VBLENDMPD ymm ymm ymm
|
|
// VBLENDMPD m512 zmm k zmm
|
|
// VBLENDMPD m512 zmm zmm
|
|
// VBLENDMPD zmm zmm k zmm
|
|
// VBLENDMPD zmm zmm zmm
|
|
//
|
|
// Construct and append a VBLENDMPD instruction to the active function.
|
|
func (c *Context) VBLENDMPD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VBLENDMPD(ops...))
|
|
}
|
|
|
|
// VBLENDMPD: Blend Packed Double-Precision Floating-Point Vectors Using an OpMask Control.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBLENDMPD m128 xmm k xmm
|
|
// VBLENDMPD m128 xmm xmm
|
|
// VBLENDMPD m256 ymm k ymm
|
|
// VBLENDMPD m256 ymm ymm
|
|
// VBLENDMPD xmm xmm k xmm
|
|
// VBLENDMPD xmm xmm xmm
|
|
// VBLENDMPD ymm ymm k ymm
|
|
// VBLENDMPD ymm ymm ymm
|
|
// VBLENDMPD m512 zmm k zmm
|
|
// VBLENDMPD m512 zmm zmm
|
|
// VBLENDMPD zmm zmm k zmm
|
|
// VBLENDMPD zmm zmm zmm
|
|
//
|
|
// Construct and append a VBLENDMPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VBLENDMPD(ops ...operand.Op) { ctx.VBLENDMPD(ops...) }
|
|
|
|
// VBLENDMPD_BCST: Blend Packed Double-Precision Floating-Point Vectors Using an OpMask Control (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBLENDMPD.BCST m64 xmm k xmm
|
|
// VBLENDMPD.BCST m64 xmm xmm
|
|
// VBLENDMPD.BCST m64 ymm k ymm
|
|
// VBLENDMPD.BCST m64 ymm ymm
|
|
// VBLENDMPD.BCST m64 zmm k zmm
|
|
// VBLENDMPD.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VBLENDMPD.BCST instruction to the active function.
|
|
func (c *Context) VBLENDMPD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VBLENDMPD_BCST(ops...))
|
|
}
|
|
|
|
// VBLENDMPD_BCST: Blend Packed Double-Precision Floating-Point Vectors Using an OpMask Control (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBLENDMPD.BCST m64 xmm k xmm
|
|
// VBLENDMPD.BCST m64 xmm xmm
|
|
// VBLENDMPD.BCST m64 ymm k ymm
|
|
// VBLENDMPD.BCST m64 ymm ymm
|
|
// VBLENDMPD.BCST m64 zmm k zmm
|
|
// VBLENDMPD.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VBLENDMPD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VBLENDMPD_BCST(ops ...operand.Op) { ctx.VBLENDMPD_BCST(ops...) }
|
|
|
|
// VBLENDMPD_BCST_Z: Blend Packed Double-Precision Floating-Point Vectors Using an OpMask Control (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBLENDMPD.BCST.Z m64 xmm k xmm
|
|
// VBLENDMPD.BCST.Z m64 ymm k ymm
|
|
// VBLENDMPD.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VBLENDMPD.BCST.Z instruction to the active function.
|
|
func (c *Context) VBLENDMPD_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VBLENDMPD_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VBLENDMPD_BCST_Z: Blend Packed Double-Precision Floating-Point Vectors Using an OpMask Control (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBLENDMPD.BCST.Z m64 xmm k xmm
|
|
// VBLENDMPD.BCST.Z m64 ymm k ymm
|
|
// VBLENDMPD.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VBLENDMPD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VBLENDMPD_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VBLENDMPD_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VBLENDMPD_Z: Blend Packed Double-Precision Floating-Point Vectors Using an OpMask Control (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBLENDMPD.Z m128 xmm k xmm
|
|
// VBLENDMPD.Z m256 ymm k ymm
|
|
// VBLENDMPD.Z xmm xmm k xmm
|
|
// VBLENDMPD.Z ymm ymm k ymm
|
|
// VBLENDMPD.Z m512 zmm k zmm
|
|
// VBLENDMPD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VBLENDMPD.Z instruction to the active function.
|
|
func (c *Context) VBLENDMPD_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VBLENDMPD_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VBLENDMPD_Z: Blend Packed Double-Precision Floating-Point Vectors Using an OpMask Control (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBLENDMPD.Z m128 xmm k xmm
|
|
// VBLENDMPD.Z m256 ymm k ymm
|
|
// VBLENDMPD.Z xmm xmm k xmm
|
|
// VBLENDMPD.Z ymm ymm k ymm
|
|
// VBLENDMPD.Z m512 zmm k zmm
|
|
// VBLENDMPD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VBLENDMPD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VBLENDMPD_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VBLENDMPD_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VBLENDMPS: Blend Packed Single-Precision Floating-Point Vectors Using an OpMask Control.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBLENDMPS m128 xmm k xmm
|
|
// VBLENDMPS m128 xmm xmm
|
|
// VBLENDMPS m256 ymm k ymm
|
|
// VBLENDMPS m256 ymm ymm
|
|
// VBLENDMPS xmm xmm k xmm
|
|
// VBLENDMPS xmm xmm xmm
|
|
// VBLENDMPS ymm ymm k ymm
|
|
// VBLENDMPS ymm ymm ymm
|
|
// VBLENDMPS m512 zmm k zmm
|
|
// VBLENDMPS m512 zmm zmm
|
|
// VBLENDMPS zmm zmm k zmm
|
|
// VBLENDMPS zmm zmm zmm
|
|
//
|
|
// Construct and append a VBLENDMPS instruction to the active function.
|
|
func (c *Context) VBLENDMPS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VBLENDMPS(ops...))
|
|
}
|
|
|
|
// VBLENDMPS: Blend Packed Single-Precision Floating-Point Vectors Using an OpMask Control.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBLENDMPS m128 xmm k xmm
|
|
// VBLENDMPS m128 xmm xmm
|
|
// VBLENDMPS m256 ymm k ymm
|
|
// VBLENDMPS m256 ymm ymm
|
|
// VBLENDMPS xmm xmm k xmm
|
|
// VBLENDMPS xmm xmm xmm
|
|
// VBLENDMPS ymm ymm k ymm
|
|
// VBLENDMPS ymm ymm ymm
|
|
// VBLENDMPS m512 zmm k zmm
|
|
// VBLENDMPS m512 zmm zmm
|
|
// VBLENDMPS zmm zmm k zmm
|
|
// VBLENDMPS zmm zmm zmm
|
|
//
|
|
// Construct and append a VBLENDMPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VBLENDMPS(ops ...operand.Op) { ctx.VBLENDMPS(ops...) }
|
|
|
|
// VBLENDMPS_BCST: Blend Packed Single-Precision Floating-Point Vectors Using an OpMask Control (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBLENDMPS.BCST m32 xmm k xmm
|
|
// VBLENDMPS.BCST m32 xmm xmm
|
|
// VBLENDMPS.BCST m32 ymm k ymm
|
|
// VBLENDMPS.BCST m32 ymm ymm
|
|
// VBLENDMPS.BCST m32 zmm k zmm
|
|
// VBLENDMPS.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VBLENDMPS.BCST instruction to the active function.
|
|
func (c *Context) VBLENDMPS_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VBLENDMPS_BCST(ops...))
|
|
}
|
|
|
|
// VBLENDMPS_BCST: Blend Packed Single-Precision Floating-Point Vectors Using an OpMask Control (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBLENDMPS.BCST m32 xmm k xmm
|
|
// VBLENDMPS.BCST m32 xmm xmm
|
|
// VBLENDMPS.BCST m32 ymm k ymm
|
|
// VBLENDMPS.BCST m32 ymm ymm
|
|
// VBLENDMPS.BCST m32 zmm k zmm
|
|
// VBLENDMPS.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VBLENDMPS.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VBLENDMPS_BCST(ops ...operand.Op) { ctx.VBLENDMPS_BCST(ops...) }
|
|
|
|
// VBLENDMPS_BCST_Z: Blend Packed Single-Precision Floating-Point Vectors Using an OpMask Control (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBLENDMPS.BCST.Z m32 xmm k xmm
|
|
// VBLENDMPS.BCST.Z m32 ymm k ymm
|
|
// VBLENDMPS.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VBLENDMPS.BCST.Z instruction to the active function.
|
|
func (c *Context) VBLENDMPS_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VBLENDMPS_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VBLENDMPS_BCST_Z: Blend Packed Single-Precision Floating-Point Vectors Using an OpMask Control (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBLENDMPS.BCST.Z m32 xmm k xmm
|
|
// VBLENDMPS.BCST.Z m32 ymm k ymm
|
|
// VBLENDMPS.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VBLENDMPS.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VBLENDMPS_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VBLENDMPS_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VBLENDMPS_Z: Blend Packed Single-Precision Floating-Point Vectors Using an OpMask Control (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBLENDMPS.Z m128 xmm k xmm
|
|
// VBLENDMPS.Z m256 ymm k ymm
|
|
// VBLENDMPS.Z xmm xmm k xmm
|
|
// VBLENDMPS.Z ymm ymm k ymm
|
|
// VBLENDMPS.Z m512 zmm k zmm
|
|
// VBLENDMPS.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VBLENDMPS.Z instruction to the active function.
|
|
func (c *Context) VBLENDMPS_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VBLENDMPS_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VBLENDMPS_Z: Blend Packed Single-Precision Floating-Point Vectors Using an OpMask Control (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBLENDMPS.Z m128 xmm k xmm
|
|
// VBLENDMPS.Z m256 ymm k ymm
|
|
// VBLENDMPS.Z xmm xmm k xmm
|
|
// VBLENDMPS.Z ymm ymm k ymm
|
|
// VBLENDMPS.Z m512 zmm k zmm
|
|
// VBLENDMPS.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VBLENDMPS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VBLENDMPS_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VBLENDMPS_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VBLENDPD: Blend Packed Double Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBLENDPD imm8 m128 xmm xmm
|
|
// VBLENDPD imm8 m256 ymm ymm
|
|
// VBLENDPD imm8 xmm xmm xmm
|
|
// VBLENDPD imm8 ymm ymm ymm
|
|
//
|
|
// Construct and append a VBLENDPD instruction to the active function.
|
|
func (c *Context) VBLENDPD(i, mxy, xy, xy1 operand.Op) {
|
|
c.addinstruction(x86.VBLENDPD(i, mxy, xy, xy1))
|
|
}
|
|
|
|
// VBLENDPD: Blend Packed Double Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBLENDPD imm8 m128 xmm xmm
|
|
// VBLENDPD imm8 m256 ymm ymm
|
|
// VBLENDPD imm8 xmm xmm xmm
|
|
// VBLENDPD imm8 ymm ymm ymm
|
|
//
|
|
// Construct and append a VBLENDPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VBLENDPD(i, mxy, xy, xy1 operand.Op) { ctx.VBLENDPD(i, mxy, xy, xy1) }
|
|
|
|
// VBLENDPS: Blend Packed Single Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBLENDPS imm8 m128 xmm xmm
|
|
// VBLENDPS imm8 m256 ymm ymm
|
|
// VBLENDPS imm8 xmm xmm xmm
|
|
// VBLENDPS imm8 ymm ymm ymm
|
|
//
|
|
// Construct and append a VBLENDPS instruction to the active function.
|
|
func (c *Context) VBLENDPS(i, mxy, xy, xy1 operand.Op) {
|
|
c.addinstruction(x86.VBLENDPS(i, mxy, xy, xy1))
|
|
}
|
|
|
|
// VBLENDPS: Blend Packed Single Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBLENDPS imm8 m128 xmm xmm
|
|
// VBLENDPS imm8 m256 ymm ymm
|
|
// VBLENDPS imm8 xmm xmm xmm
|
|
// VBLENDPS imm8 ymm ymm ymm
|
|
//
|
|
// Construct and append a VBLENDPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VBLENDPS(i, mxy, xy, xy1 operand.Op) { ctx.VBLENDPS(i, mxy, xy, xy1) }
|
|
|
|
// VBLENDVPD: Variable Blend Packed Double Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBLENDVPD xmm m128 xmm xmm
|
|
// VBLENDVPD xmm xmm xmm xmm
|
|
// VBLENDVPD ymm m256 ymm ymm
|
|
// VBLENDVPD ymm ymm ymm ymm
|
|
//
|
|
// Construct and append a VBLENDVPD instruction to the active function.
|
|
func (c *Context) VBLENDVPD(xy, mxy, xy1, xy2 operand.Op) {
|
|
c.addinstruction(x86.VBLENDVPD(xy, mxy, xy1, xy2))
|
|
}
|
|
|
|
// VBLENDVPD: Variable Blend Packed Double Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBLENDVPD xmm m128 xmm xmm
|
|
// VBLENDVPD xmm xmm xmm xmm
|
|
// VBLENDVPD ymm m256 ymm ymm
|
|
// VBLENDVPD ymm ymm ymm ymm
|
|
//
|
|
// Construct and append a VBLENDVPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VBLENDVPD(xy, mxy, xy1, xy2 operand.Op) { ctx.VBLENDVPD(xy, mxy, xy1, xy2) }
|
|
|
|
// VBLENDVPS: Variable Blend Packed Single Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBLENDVPS xmm m128 xmm xmm
|
|
// VBLENDVPS xmm xmm xmm xmm
|
|
// VBLENDVPS ymm m256 ymm ymm
|
|
// VBLENDVPS ymm ymm ymm ymm
|
|
//
|
|
// Construct and append a VBLENDVPS instruction to the active function.
|
|
func (c *Context) VBLENDVPS(xy, mxy, xy1, xy2 operand.Op) {
|
|
c.addinstruction(x86.VBLENDVPS(xy, mxy, xy1, xy2))
|
|
}
|
|
|
|
// VBLENDVPS: Variable Blend Packed Single Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBLENDVPS xmm m128 xmm xmm
|
|
// VBLENDVPS xmm xmm xmm xmm
|
|
// VBLENDVPS ymm m256 ymm ymm
|
|
// VBLENDVPS ymm ymm ymm ymm
|
|
//
|
|
// Construct and append a VBLENDVPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VBLENDVPS(xy, mxy, xy1, xy2 operand.Op) { ctx.VBLENDVPS(xy, mxy, xy1, xy2) }
|
|
|
|
// VBROADCASTF128: Broadcast 128 Bit of Floating-Point Data.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBROADCASTF128 m128 ymm
|
|
//
|
|
// Construct and append a VBROADCASTF128 instruction to the active function.
|
|
func (c *Context) VBROADCASTF128(m, y operand.Op) {
|
|
c.addinstruction(x86.VBROADCASTF128(m, y))
|
|
}
|
|
|
|
// VBROADCASTF128: Broadcast 128 Bit of Floating-Point Data.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBROADCASTF128 m128 ymm
|
|
//
|
|
// Construct and append a VBROADCASTF128 instruction to the active function.
|
|
// Operates on the global context.
|
|
func VBROADCASTF128(m, y operand.Op) { ctx.VBROADCASTF128(m, y) }
|
|
|
|
// VBROADCASTF32X2: Broadcast Two Single-Precision Floating-Point Elements.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBROADCASTF32X2 m64 k ymm
|
|
// VBROADCASTF32X2 m64 ymm
|
|
// VBROADCASTF32X2 xmm k ymm
|
|
// VBROADCASTF32X2 xmm ymm
|
|
// VBROADCASTF32X2 m64 k zmm
|
|
// VBROADCASTF32X2 m64 zmm
|
|
// VBROADCASTF32X2 xmm k zmm
|
|
// VBROADCASTF32X2 xmm zmm
|
|
//
|
|
// Construct and append a VBROADCASTF32X2 instruction to the active function.
|
|
func (c *Context) VBROADCASTF32X2(ops ...operand.Op) {
|
|
c.addinstruction(x86.VBROADCASTF32X2(ops...))
|
|
}
|
|
|
|
// VBROADCASTF32X2: Broadcast Two Single-Precision Floating-Point Elements.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBROADCASTF32X2 m64 k ymm
|
|
// VBROADCASTF32X2 m64 ymm
|
|
// VBROADCASTF32X2 xmm k ymm
|
|
// VBROADCASTF32X2 xmm ymm
|
|
// VBROADCASTF32X2 m64 k zmm
|
|
// VBROADCASTF32X2 m64 zmm
|
|
// VBROADCASTF32X2 xmm k zmm
|
|
// VBROADCASTF32X2 xmm zmm
|
|
//
|
|
// Construct and append a VBROADCASTF32X2 instruction to the active function.
|
|
// Operates on the global context.
|
|
func VBROADCASTF32X2(ops ...operand.Op) { ctx.VBROADCASTF32X2(ops...) }
|
|
|
|
// VBROADCASTF32X2_Z: Broadcast Two Single-Precision Floating-Point Elements (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBROADCASTF32X2.Z m64 k ymm
|
|
// VBROADCASTF32X2.Z xmm k ymm
|
|
// VBROADCASTF32X2.Z m64 k zmm
|
|
// VBROADCASTF32X2.Z xmm k zmm
|
|
//
|
|
// Construct and append a VBROADCASTF32X2.Z instruction to the active function.
|
|
func (c *Context) VBROADCASTF32X2_Z(mx, k, yz operand.Op) {
|
|
c.addinstruction(x86.VBROADCASTF32X2_Z(mx, k, yz))
|
|
}
|
|
|
|
// VBROADCASTF32X2_Z: Broadcast Two Single-Precision Floating-Point Elements (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBROADCASTF32X2.Z m64 k ymm
|
|
// VBROADCASTF32X2.Z xmm k ymm
|
|
// VBROADCASTF32X2.Z m64 k zmm
|
|
// VBROADCASTF32X2.Z xmm k zmm
|
|
//
|
|
// Construct and append a VBROADCASTF32X2.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VBROADCASTF32X2_Z(mx, k, yz operand.Op) { ctx.VBROADCASTF32X2_Z(mx, k, yz) }
|
|
|
|
// VBROADCASTF32X4: Broadcast Four Single-Precision Floating-Point Elements.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBROADCASTF32X4 m128 k ymm
|
|
// VBROADCASTF32X4 m128 ymm
|
|
// VBROADCASTF32X4 m128 k zmm
|
|
// VBROADCASTF32X4 m128 zmm
|
|
//
|
|
// Construct and append a VBROADCASTF32X4 instruction to the active function.
|
|
func (c *Context) VBROADCASTF32X4(ops ...operand.Op) {
|
|
c.addinstruction(x86.VBROADCASTF32X4(ops...))
|
|
}
|
|
|
|
// VBROADCASTF32X4: Broadcast Four Single-Precision Floating-Point Elements.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBROADCASTF32X4 m128 k ymm
|
|
// VBROADCASTF32X4 m128 ymm
|
|
// VBROADCASTF32X4 m128 k zmm
|
|
// VBROADCASTF32X4 m128 zmm
|
|
//
|
|
// Construct and append a VBROADCASTF32X4 instruction to the active function.
|
|
// Operates on the global context.
|
|
func VBROADCASTF32X4(ops ...operand.Op) { ctx.VBROADCASTF32X4(ops...) }
|
|
|
|
// VBROADCASTF32X4_Z: Broadcast Four Single-Precision Floating-Point Elements (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBROADCASTF32X4.Z m128 k ymm
|
|
// VBROADCASTF32X4.Z m128 k zmm
|
|
//
|
|
// Construct and append a VBROADCASTF32X4.Z instruction to the active function.
|
|
func (c *Context) VBROADCASTF32X4_Z(m, k, yz operand.Op) {
|
|
c.addinstruction(x86.VBROADCASTF32X4_Z(m, k, yz))
|
|
}
|
|
|
|
// VBROADCASTF32X4_Z: Broadcast Four Single-Precision Floating-Point Elements (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBROADCASTF32X4.Z m128 k ymm
|
|
// VBROADCASTF32X4.Z m128 k zmm
|
|
//
|
|
// Construct and append a VBROADCASTF32X4.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VBROADCASTF32X4_Z(m, k, yz operand.Op) { ctx.VBROADCASTF32X4_Z(m, k, yz) }
|
|
|
|
// VBROADCASTF32X8: Broadcast Eight Single-Precision Floating-Point Elements.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBROADCASTF32X8 m256 k zmm
|
|
// VBROADCASTF32X8 m256 zmm
|
|
//
|
|
// Construct and append a VBROADCASTF32X8 instruction to the active function.
|
|
func (c *Context) VBROADCASTF32X8(ops ...operand.Op) {
|
|
c.addinstruction(x86.VBROADCASTF32X8(ops...))
|
|
}
|
|
|
|
// VBROADCASTF32X8: Broadcast Eight Single-Precision Floating-Point Elements.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBROADCASTF32X8 m256 k zmm
|
|
// VBROADCASTF32X8 m256 zmm
|
|
//
|
|
// Construct and append a VBROADCASTF32X8 instruction to the active function.
|
|
// Operates on the global context.
|
|
func VBROADCASTF32X8(ops ...operand.Op) { ctx.VBROADCASTF32X8(ops...) }
|
|
|
|
// VBROADCASTF32X8_Z: Broadcast Eight Single-Precision Floating-Point Elements (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBROADCASTF32X8.Z m256 k zmm
|
|
//
|
|
// Construct and append a VBROADCASTF32X8.Z instruction to the active function.
|
|
func (c *Context) VBROADCASTF32X8_Z(m, k, z operand.Op) {
|
|
c.addinstruction(x86.VBROADCASTF32X8_Z(m, k, z))
|
|
}
|
|
|
|
// VBROADCASTF32X8_Z: Broadcast Eight Single-Precision Floating-Point Elements (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBROADCASTF32X8.Z m256 k zmm
|
|
//
|
|
// Construct and append a VBROADCASTF32X8.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VBROADCASTF32X8_Z(m, k, z operand.Op) { ctx.VBROADCASTF32X8_Z(m, k, z) }
|
|
|
|
// VBROADCASTF64X2: Broadcast Two Double-Precision Floating-Point Elements.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBROADCASTF64X2 m128 k ymm
|
|
// VBROADCASTF64X2 m128 ymm
|
|
// VBROADCASTF64X2 m128 k zmm
|
|
// VBROADCASTF64X2 m128 zmm
|
|
//
|
|
// Construct and append a VBROADCASTF64X2 instruction to the active function.
|
|
func (c *Context) VBROADCASTF64X2(ops ...operand.Op) {
|
|
c.addinstruction(x86.VBROADCASTF64X2(ops...))
|
|
}
|
|
|
|
// VBROADCASTF64X2: Broadcast Two Double-Precision Floating-Point Elements.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBROADCASTF64X2 m128 k ymm
|
|
// VBROADCASTF64X2 m128 ymm
|
|
// VBROADCASTF64X2 m128 k zmm
|
|
// VBROADCASTF64X2 m128 zmm
|
|
//
|
|
// Construct and append a VBROADCASTF64X2 instruction to the active function.
|
|
// Operates on the global context.
|
|
func VBROADCASTF64X2(ops ...operand.Op) { ctx.VBROADCASTF64X2(ops...) }
|
|
|
|
// VBROADCASTF64X2_Z: Broadcast Two Double-Precision Floating-Point Elements (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBROADCASTF64X2.Z m128 k ymm
|
|
// VBROADCASTF64X2.Z m128 k zmm
|
|
//
|
|
// Construct and append a VBROADCASTF64X2.Z instruction to the active function.
|
|
func (c *Context) VBROADCASTF64X2_Z(m, k, yz operand.Op) {
|
|
c.addinstruction(x86.VBROADCASTF64X2_Z(m, k, yz))
|
|
}
|
|
|
|
// VBROADCASTF64X2_Z: Broadcast Two Double-Precision Floating-Point Elements (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBROADCASTF64X2.Z m128 k ymm
|
|
// VBROADCASTF64X2.Z m128 k zmm
|
|
//
|
|
// Construct and append a VBROADCASTF64X2.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VBROADCASTF64X2_Z(m, k, yz operand.Op) { ctx.VBROADCASTF64X2_Z(m, k, yz) }
|
|
|
|
// VBROADCASTF64X4: Broadcast Four Double-Precision Floating-Point Elements.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBROADCASTF64X4 m256 k zmm
|
|
// VBROADCASTF64X4 m256 zmm
|
|
//
|
|
// Construct and append a VBROADCASTF64X4 instruction to the active function.
|
|
func (c *Context) VBROADCASTF64X4(ops ...operand.Op) {
|
|
c.addinstruction(x86.VBROADCASTF64X4(ops...))
|
|
}
|
|
|
|
// VBROADCASTF64X4: Broadcast Four Double-Precision Floating-Point Elements.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBROADCASTF64X4 m256 k zmm
|
|
// VBROADCASTF64X4 m256 zmm
|
|
//
|
|
// Construct and append a VBROADCASTF64X4 instruction to the active function.
|
|
// Operates on the global context.
|
|
func VBROADCASTF64X4(ops ...operand.Op) { ctx.VBROADCASTF64X4(ops...) }
|
|
|
|
// VBROADCASTF64X4_Z: Broadcast Four Double-Precision Floating-Point Elements (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBROADCASTF64X4.Z m256 k zmm
|
|
//
|
|
// Construct and append a VBROADCASTF64X4.Z instruction to the active function.
|
|
func (c *Context) VBROADCASTF64X4_Z(m, k, z operand.Op) {
|
|
c.addinstruction(x86.VBROADCASTF64X4_Z(m, k, z))
|
|
}
|
|
|
|
// VBROADCASTF64X4_Z: Broadcast Four Double-Precision Floating-Point Elements (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBROADCASTF64X4.Z m256 k zmm
|
|
//
|
|
// Construct and append a VBROADCASTF64X4.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VBROADCASTF64X4_Z(m, k, z operand.Op) { ctx.VBROADCASTF64X4_Z(m, k, z) }
|
|
|
|
// VBROADCASTI128: Broadcast 128 Bits of Integer Data.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBROADCASTI128 m128 ymm
|
|
//
|
|
// Construct and append a VBROADCASTI128 instruction to the active function.
|
|
func (c *Context) VBROADCASTI128(m, y operand.Op) {
|
|
c.addinstruction(x86.VBROADCASTI128(m, y))
|
|
}
|
|
|
|
// VBROADCASTI128: Broadcast 128 Bits of Integer Data.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBROADCASTI128 m128 ymm
|
|
//
|
|
// Construct and append a VBROADCASTI128 instruction to the active function.
|
|
// Operates on the global context.
|
|
func VBROADCASTI128(m, y operand.Op) { ctx.VBROADCASTI128(m, y) }
|
|
|
|
// VBROADCASTI32X2: Broadcast Two Doubleword Elements.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBROADCASTI32X2 m64 k xmm
|
|
// VBROADCASTI32X2 m64 k ymm
|
|
// VBROADCASTI32X2 m64 xmm
|
|
// VBROADCASTI32X2 m64 ymm
|
|
// VBROADCASTI32X2 xmm k xmm
|
|
// VBROADCASTI32X2 xmm k ymm
|
|
// VBROADCASTI32X2 xmm xmm
|
|
// VBROADCASTI32X2 xmm ymm
|
|
// VBROADCASTI32X2 m64 k zmm
|
|
// VBROADCASTI32X2 m64 zmm
|
|
// VBROADCASTI32X2 xmm k zmm
|
|
// VBROADCASTI32X2 xmm zmm
|
|
//
|
|
// Construct and append a VBROADCASTI32X2 instruction to the active function.
|
|
func (c *Context) VBROADCASTI32X2(ops ...operand.Op) {
|
|
c.addinstruction(x86.VBROADCASTI32X2(ops...))
|
|
}
|
|
|
|
// VBROADCASTI32X2: Broadcast Two Doubleword Elements.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBROADCASTI32X2 m64 k xmm
|
|
// VBROADCASTI32X2 m64 k ymm
|
|
// VBROADCASTI32X2 m64 xmm
|
|
// VBROADCASTI32X2 m64 ymm
|
|
// VBROADCASTI32X2 xmm k xmm
|
|
// VBROADCASTI32X2 xmm k ymm
|
|
// VBROADCASTI32X2 xmm xmm
|
|
// VBROADCASTI32X2 xmm ymm
|
|
// VBROADCASTI32X2 m64 k zmm
|
|
// VBROADCASTI32X2 m64 zmm
|
|
// VBROADCASTI32X2 xmm k zmm
|
|
// VBROADCASTI32X2 xmm zmm
|
|
//
|
|
// Construct and append a VBROADCASTI32X2 instruction to the active function.
|
|
// Operates on the global context.
|
|
func VBROADCASTI32X2(ops ...operand.Op) { ctx.VBROADCASTI32X2(ops...) }
|
|
|
|
// VBROADCASTI32X2_Z: Broadcast Two Doubleword Elements (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBROADCASTI32X2.Z m64 k xmm
|
|
// VBROADCASTI32X2.Z m64 k ymm
|
|
// VBROADCASTI32X2.Z xmm k xmm
|
|
// VBROADCASTI32X2.Z xmm k ymm
|
|
// VBROADCASTI32X2.Z m64 k zmm
|
|
// VBROADCASTI32X2.Z xmm k zmm
|
|
//
|
|
// Construct and append a VBROADCASTI32X2.Z instruction to the active function.
|
|
func (c *Context) VBROADCASTI32X2_Z(mx, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VBROADCASTI32X2_Z(mx, k, xyz))
|
|
}
|
|
|
|
// VBROADCASTI32X2_Z: Broadcast Two Doubleword Elements (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBROADCASTI32X2.Z m64 k xmm
|
|
// VBROADCASTI32X2.Z m64 k ymm
|
|
// VBROADCASTI32X2.Z xmm k xmm
|
|
// VBROADCASTI32X2.Z xmm k ymm
|
|
// VBROADCASTI32X2.Z m64 k zmm
|
|
// VBROADCASTI32X2.Z xmm k zmm
|
|
//
|
|
// Construct and append a VBROADCASTI32X2.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VBROADCASTI32X2_Z(mx, k, xyz operand.Op) { ctx.VBROADCASTI32X2_Z(mx, k, xyz) }
|
|
|
|
// VBROADCASTI32X4: Broadcast Four Doubleword Elements.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBROADCASTI32X4 m128 k ymm
|
|
// VBROADCASTI32X4 m128 ymm
|
|
// VBROADCASTI32X4 m128 k zmm
|
|
// VBROADCASTI32X4 m128 zmm
|
|
//
|
|
// Construct and append a VBROADCASTI32X4 instruction to the active function.
|
|
func (c *Context) VBROADCASTI32X4(ops ...operand.Op) {
|
|
c.addinstruction(x86.VBROADCASTI32X4(ops...))
|
|
}
|
|
|
|
// VBROADCASTI32X4: Broadcast Four Doubleword Elements.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBROADCASTI32X4 m128 k ymm
|
|
// VBROADCASTI32X4 m128 ymm
|
|
// VBROADCASTI32X4 m128 k zmm
|
|
// VBROADCASTI32X4 m128 zmm
|
|
//
|
|
// Construct and append a VBROADCASTI32X4 instruction to the active function.
|
|
// Operates on the global context.
|
|
func VBROADCASTI32X4(ops ...operand.Op) { ctx.VBROADCASTI32X4(ops...) }
|
|
|
|
// VBROADCASTI32X4_Z: Broadcast Four Doubleword Elements (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBROADCASTI32X4.Z m128 k ymm
|
|
// VBROADCASTI32X4.Z m128 k zmm
|
|
//
|
|
// Construct and append a VBROADCASTI32X4.Z instruction to the active function.
|
|
func (c *Context) VBROADCASTI32X4_Z(m, k, yz operand.Op) {
|
|
c.addinstruction(x86.VBROADCASTI32X4_Z(m, k, yz))
|
|
}
|
|
|
|
// VBROADCASTI32X4_Z: Broadcast Four Doubleword Elements (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBROADCASTI32X4.Z m128 k ymm
|
|
// VBROADCASTI32X4.Z m128 k zmm
|
|
//
|
|
// Construct and append a VBROADCASTI32X4.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VBROADCASTI32X4_Z(m, k, yz operand.Op) { ctx.VBROADCASTI32X4_Z(m, k, yz) }
|
|
|
|
// VBROADCASTI32X8: Broadcast Eight Doubleword Elements.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBROADCASTI32X8 m256 k zmm
|
|
// VBROADCASTI32X8 m256 zmm
|
|
//
|
|
// Construct and append a VBROADCASTI32X8 instruction to the active function.
|
|
func (c *Context) VBROADCASTI32X8(ops ...operand.Op) {
|
|
c.addinstruction(x86.VBROADCASTI32X8(ops...))
|
|
}
|
|
|
|
// VBROADCASTI32X8: Broadcast Eight Doubleword Elements.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBROADCASTI32X8 m256 k zmm
|
|
// VBROADCASTI32X8 m256 zmm
|
|
//
|
|
// Construct and append a VBROADCASTI32X8 instruction to the active function.
|
|
// Operates on the global context.
|
|
func VBROADCASTI32X8(ops ...operand.Op) { ctx.VBROADCASTI32X8(ops...) }
|
|
|
|
// VBROADCASTI32X8_Z: Broadcast Eight Doubleword Elements (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBROADCASTI32X8.Z m256 k zmm
|
|
//
|
|
// Construct and append a VBROADCASTI32X8.Z instruction to the active function.
|
|
func (c *Context) VBROADCASTI32X8_Z(m, k, z operand.Op) {
|
|
c.addinstruction(x86.VBROADCASTI32X8_Z(m, k, z))
|
|
}
|
|
|
|
// VBROADCASTI32X8_Z: Broadcast Eight Doubleword Elements (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBROADCASTI32X8.Z m256 k zmm
|
|
//
|
|
// Construct and append a VBROADCASTI32X8.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VBROADCASTI32X8_Z(m, k, z operand.Op) { ctx.VBROADCASTI32X8_Z(m, k, z) }
|
|
|
|
// VBROADCASTI64X2: Broadcast Two Quadword Elements.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBROADCASTI64X2 m128 k ymm
|
|
// VBROADCASTI64X2 m128 ymm
|
|
// VBROADCASTI64X2 m128 k zmm
|
|
// VBROADCASTI64X2 m128 zmm
|
|
//
|
|
// Construct and append a VBROADCASTI64X2 instruction to the active function.
|
|
func (c *Context) VBROADCASTI64X2(ops ...operand.Op) {
|
|
c.addinstruction(x86.VBROADCASTI64X2(ops...))
|
|
}
|
|
|
|
// VBROADCASTI64X2: Broadcast Two Quadword Elements.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBROADCASTI64X2 m128 k ymm
|
|
// VBROADCASTI64X2 m128 ymm
|
|
// VBROADCASTI64X2 m128 k zmm
|
|
// VBROADCASTI64X2 m128 zmm
|
|
//
|
|
// Construct and append a VBROADCASTI64X2 instruction to the active function.
|
|
// Operates on the global context.
|
|
func VBROADCASTI64X2(ops ...operand.Op) { ctx.VBROADCASTI64X2(ops...) }
|
|
|
|
// VBROADCASTI64X2_Z: Broadcast Two Quadword Elements (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBROADCASTI64X2.Z m128 k ymm
|
|
// VBROADCASTI64X2.Z m128 k zmm
|
|
//
|
|
// Construct and append a VBROADCASTI64X2.Z instruction to the active function.
|
|
func (c *Context) VBROADCASTI64X2_Z(m, k, yz operand.Op) {
|
|
c.addinstruction(x86.VBROADCASTI64X2_Z(m, k, yz))
|
|
}
|
|
|
|
// VBROADCASTI64X2_Z: Broadcast Two Quadword Elements (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBROADCASTI64X2.Z m128 k ymm
|
|
// VBROADCASTI64X2.Z m128 k zmm
|
|
//
|
|
// Construct and append a VBROADCASTI64X2.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VBROADCASTI64X2_Z(m, k, yz operand.Op) { ctx.VBROADCASTI64X2_Z(m, k, yz) }
|
|
|
|
// VBROADCASTI64X4: Broadcast Four Quadword Elements.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBROADCASTI64X4 m256 k zmm
|
|
// VBROADCASTI64X4 m256 zmm
|
|
//
|
|
// Construct and append a VBROADCASTI64X4 instruction to the active function.
|
|
func (c *Context) VBROADCASTI64X4(ops ...operand.Op) {
|
|
c.addinstruction(x86.VBROADCASTI64X4(ops...))
|
|
}
|
|
|
|
// VBROADCASTI64X4: Broadcast Four Quadword Elements.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBROADCASTI64X4 m256 k zmm
|
|
// VBROADCASTI64X4 m256 zmm
|
|
//
|
|
// Construct and append a VBROADCASTI64X4 instruction to the active function.
|
|
// Operates on the global context.
|
|
func VBROADCASTI64X4(ops ...operand.Op) { ctx.VBROADCASTI64X4(ops...) }
|
|
|
|
// VBROADCASTI64X4_Z: Broadcast Four Quadword Elements (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBROADCASTI64X4.Z m256 k zmm
|
|
//
|
|
// Construct and append a VBROADCASTI64X4.Z instruction to the active function.
|
|
func (c *Context) VBROADCASTI64X4_Z(m, k, z operand.Op) {
|
|
c.addinstruction(x86.VBROADCASTI64X4_Z(m, k, z))
|
|
}
|
|
|
|
// VBROADCASTI64X4_Z: Broadcast Four Quadword Elements (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBROADCASTI64X4.Z m256 k zmm
|
|
//
|
|
// Construct and append a VBROADCASTI64X4.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VBROADCASTI64X4_Z(m, k, z operand.Op) { ctx.VBROADCASTI64X4_Z(m, k, z) }
|
|
|
|
// VBROADCASTSD: Broadcast Double-Precision Floating-Point Element.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBROADCASTSD xmm ymm
|
|
// VBROADCASTSD m64 ymm
|
|
// VBROADCASTSD m64 k ymm
|
|
// VBROADCASTSD xmm k ymm
|
|
// VBROADCASTSD m64 k zmm
|
|
// VBROADCASTSD m64 zmm
|
|
// VBROADCASTSD xmm k zmm
|
|
// VBROADCASTSD xmm zmm
|
|
//
|
|
// Construct and append a VBROADCASTSD instruction to the active function.
|
|
func (c *Context) VBROADCASTSD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VBROADCASTSD(ops...))
|
|
}
|
|
|
|
// VBROADCASTSD: Broadcast Double-Precision Floating-Point Element.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBROADCASTSD xmm ymm
|
|
// VBROADCASTSD m64 ymm
|
|
// VBROADCASTSD m64 k ymm
|
|
// VBROADCASTSD xmm k ymm
|
|
// VBROADCASTSD m64 k zmm
|
|
// VBROADCASTSD m64 zmm
|
|
// VBROADCASTSD xmm k zmm
|
|
// VBROADCASTSD xmm zmm
|
|
//
|
|
// Construct and append a VBROADCASTSD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VBROADCASTSD(ops ...operand.Op) { ctx.VBROADCASTSD(ops...) }
|
|
|
|
// VBROADCASTSD_Z: Broadcast Double-Precision Floating-Point Element (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBROADCASTSD.Z m64 k ymm
|
|
// VBROADCASTSD.Z xmm k ymm
|
|
// VBROADCASTSD.Z m64 k zmm
|
|
// VBROADCASTSD.Z xmm k zmm
|
|
//
|
|
// Construct and append a VBROADCASTSD.Z instruction to the active function.
|
|
func (c *Context) VBROADCASTSD_Z(mx, k, yz operand.Op) {
|
|
c.addinstruction(x86.VBROADCASTSD_Z(mx, k, yz))
|
|
}
|
|
|
|
// VBROADCASTSD_Z: Broadcast Double-Precision Floating-Point Element (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBROADCASTSD.Z m64 k ymm
|
|
// VBROADCASTSD.Z xmm k ymm
|
|
// VBROADCASTSD.Z m64 k zmm
|
|
// VBROADCASTSD.Z xmm k zmm
|
|
//
|
|
// Construct and append a VBROADCASTSD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VBROADCASTSD_Z(mx, k, yz operand.Op) { ctx.VBROADCASTSD_Z(mx, k, yz) }
|
|
|
|
// VBROADCASTSS: Broadcast Single-Precision Floating-Point Element.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBROADCASTSS xmm xmm
|
|
// VBROADCASTSS xmm ymm
|
|
// VBROADCASTSS m32 xmm
|
|
// VBROADCASTSS m32 ymm
|
|
// VBROADCASTSS m32 k ymm
|
|
// VBROADCASTSS xmm k ymm
|
|
// VBROADCASTSS m32 k zmm
|
|
// VBROADCASTSS m32 zmm
|
|
// VBROADCASTSS xmm k zmm
|
|
// VBROADCASTSS xmm zmm
|
|
//
|
|
// Construct and append a VBROADCASTSS instruction to the active function.
|
|
func (c *Context) VBROADCASTSS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VBROADCASTSS(ops...))
|
|
}
|
|
|
|
// VBROADCASTSS: Broadcast Single-Precision Floating-Point Element.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBROADCASTSS xmm xmm
|
|
// VBROADCASTSS xmm ymm
|
|
// VBROADCASTSS m32 xmm
|
|
// VBROADCASTSS m32 ymm
|
|
// VBROADCASTSS m32 k ymm
|
|
// VBROADCASTSS xmm k ymm
|
|
// VBROADCASTSS m32 k zmm
|
|
// VBROADCASTSS m32 zmm
|
|
// VBROADCASTSS xmm k zmm
|
|
// VBROADCASTSS xmm zmm
|
|
//
|
|
// Construct and append a VBROADCASTSS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VBROADCASTSS(ops ...operand.Op) { ctx.VBROADCASTSS(ops...) }
|
|
|
|
// VBROADCASTSS_Z: Broadcast Single-Precision Floating-Point Element (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBROADCASTSS.Z m32 k ymm
|
|
// VBROADCASTSS.Z xmm k ymm
|
|
// VBROADCASTSS.Z m32 k zmm
|
|
// VBROADCASTSS.Z xmm k zmm
|
|
//
|
|
// Construct and append a VBROADCASTSS.Z instruction to the active function.
|
|
func (c *Context) VBROADCASTSS_Z(mx, k, yz operand.Op) {
|
|
c.addinstruction(x86.VBROADCASTSS_Z(mx, k, yz))
|
|
}
|
|
|
|
// VBROADCASTSS_Z: Broadcast Single-Precision Floating-Point Element (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VBROADCASTSS.Z m32 k ymm
|
|
// VBROADCASTSS.Z xmm k ymm
|
|
// VBROADCASTSS.Z m32 k zmm
|
|
// VBROADCASTSS.Z xmm k zmm
|
|
//
|
|
// Construct and append a VBROADCASTSS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VBROADCASTSS_Z(mx, k, yz operand.Op) { ctx.VBROADCASTSS_Z(mx, k, yz) }
|
|
|
|
// VCMPPD: Compare Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCMPPD imm8 m128 xmm xmm
|
|
// VCMPPD imm8 m256 ymm ymm
|
|
// VCMPPD imm8 xmm xmm xmm
|
|
// VCMPPD imm8 ymm ymm ymm
|
|
// VCMPPD imm8 m128 xmm k k
|
|
// VCMPPD imm8 m128 xmm k
|
|
// VCMPPD imm8 m256 ymm k k
|
|
// VCMPPD imm8 m256 ymm k
|
|
// VCMPPD imm8 xmm xmm k k
|
|
// VCMPPD imm8 xmm xmm k
|
|
// VCMPPD imm8 ymm ymm k k
|
|
// VCMPPD imm8 ymm ymm k
|
|
// VCMPPD imm8 m512 zmm k k
|
|
// VCMPPD imm8 m512 zmm k
|
|
// VCMPPD imm8 zmm zmm k k
|
|
// VCMPPD imm8 zmm zmm k
|
|
//
|
|
// Construct and append a VCMPPD instruction to the active function.
|
|
func (c *Context) VCMPPD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCMPPD(ops...))
|
|
}
|
|
|
|
// VCMPPD: Compare Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCMPPD imm8 m128 xmm xmm
|
|
// VCMPPD imm8 m256 ymm ymm
|
|
// VCMPPD imm8 xmm xmm xmm
|
|
// VCMPPD imm8 ymm ymm ymm
|
|
// VCMPPD imm8 m128 xmm k k
|
|
// VCMPPD imm8 m128 xmm k
|
|
// VCMPPD imm8 m256 ymm k k
|
|
// VCMPPD imm8 m256 ymm k
|
|
// VCMPPD imm8 xmm xmm k k
|
|
// VCMPPD imm8 xmm xmm k
|
|
// VCMPPD imm8 ymm ymm k k
|
|
// VCMPPD imm8 ymm ymm k
|
|
// VCMPPD imm8 m512 zmm k k
|
|
// VCMPPD imm8 m512 zmm k
|
|
// VCMPPD imm8 zmm zmm k k
|
|
// VCMPPD imm8 zmm zmm k
|
|
//
|
|
// Construct and append a VCMPPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCMPPD(ops ...operand.Op) { ctx.VCMPPD(ops...) }
|
|
|
|
// VCMPPD_BCST: Compare Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCMPPD.BCST imm8 m64 xmm k k
|
|
// VCMPPD.BCST imm8 m64 xmm k
|
|
// VCMPPD.BCST imm8 m64 ymm k k
|
|
// VCMPPD.BCST imm8 m64 ymm k
|
|
// VCMPPD.BCST imm8 m64 zmm k k
|
|
// VCMPPD.BCST imm8 m64 zmm k
|
|
//
|
|
// Construct and append a VCMPPD.BCST instruction to the active function.
|
|
func (c *Context) VCMPPD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCMPPD_BCST(ops...))
|
|
}
|
|
|
|
// VCMPPD_BCST: Compare Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCMPPD.BCST imm8 m64 xmm k k
|
|
// VCMPPD.BCST imm8 m64 xmm k
|
|
// VCMPPD.BCST imm8 m64 ymm k k
|
|
// VCMPPD.BCST imm8 m64 ymm k
|
|
// VCMPPD.BCST imm8 m64 zmm k k
|
|
// VCMPPD.BCST imm8 m64 zmm k
|
|
//
|
|
// Construct and append a VCMPPD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCMPPD_BCST(ops ...operand.Op) { ctx.VCMPPD_BCST(ops...) }
|
|
|
|
// VCMPPD_SAE: Compare Packed Double-Precision Floating-Point Values (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCMPPD.SAE imm8 zmm zmm k k
|
|
// VCMPPD.SAE imm8 zmm zmm k
|
|
//
|
|
// Construct and append a VCMPPD.SAE instruction to the active function.
|
|
func (c *Context) VCMPPD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCMPPD_SAE(ops...))
|
|
}
|
|
|
|
// VCMPPD_SAE: Compare Packed Double-Precision Floating-Point Values (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCMPPD.SAE imm8 zmm zmm k k
|
|
// VCMPPD.SAE imm8 zmm zmm k
|
|
//
|
|
// Construct and append a VCMPPD.SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCMPPD_SAE(ops ...operand.Op) { ctx.VCMPPD_SAE(ops...) }
|
|
|
|
// VCMPPS: Compare Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCMPPS imm8 m128 xmm xmm
|
|
// VCMPPS imm8 m256 ymm ymm
|
|
// VCMPPS imm8 xmm xmm xmm
|
|
// VCMPPS imm8 ymm ymm ymm
|
|
// VCMPPS imm8 m128 xmm k k
|
|
// VCMPPS imm8 m128 xmm k
|
|
// VCMPPS imm8 m256 ymm k k
|
|
// VCMPPS imm8 m256 ymm k
|
|
// VCMPPS imm8 xmm xmm k k
|
|
// VCMPPS imm8 xmm xmm k
|
|
// VCMPPS imm8 ymm ymm k k
|
|
// VCMPPS imm8 ymm ymm k
|
|
// VCMPPS imm8 m512 zmm k k
|
|
// VCMPPS imm8 m512 zmm k
|
|
// VCMPPS imm8 zmm zmm k k
|
|
// VCMPPS imm8 zmm zmm k
|
|
//
|
|
// Construct and append a VCMPPS instruction to the active function.
|
|
func (c *Context) VCMPPS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCMPPS(ops...))
|
|
}
|
|
|
|
// VCMPPS: Compare Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCMPPS imm8 m128 xmm xmm
|
|
// VCMPPS imm8 m256 ymm ymm
|
|
// VCMPPS imm8 xmm xmm xmm
|
|
// VCMPPS imm8 ymm ymm ymm
|
|
// VCMPPS imm8 m128 xmm k k
|
|
// VCMPPS imm8 m128 xmm k
|
|
// VCMPPS imm8 m256 ymm k k
|
|
// VCMPPS imm8 m256 ymm k
|
|
// VCMPPS imm8 xmm xmm k k
|
|
// VCMPPS imm8 xmm xmm k
|
|
// VCMPPS imm8 ymm ymm k k
|
|
// VCMPPS imm8 ymm ymm k
|
|
// VCMPPS imm8 m512 zmm k k
|
|
// VCMPPS imm8 m512 zmm k
|
|
// VCMPPS imm8 zmm zmm k k
|
|
// VCMPPS imm8 zmm zmm k
|
|
//
|
|
// Construct and append a VCMPPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCMPPS(ops ...operand.Op) { ctx.VCMPPS(ops...) }
|
|
|
|
// VCMPPS_BCST: Compare Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCMPPS.BCST imm8 m32 xmm k k
|
|
// VCMPPS.BCST imm8 m32 xmm k
|
|
// VCMPPS.BCST imm8 m32 ymm k k
|
|
// VCMPPS.BCST imm8 m32 ymm k
|
|
// VCMPPS.BCST imm8 m32 zmm k k
|
|
// VCMPPS.BCST imm8 m32 zmm k
|
|
//
|
|
// Construct and append a VCMPPS.BCST instruction to the active function.
|
|
func (c *Context) VCMPPS_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCMPPS_BCST(ops...))
|
|
}
|
|
|
|
// VCMPPS_BCST: Compare Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCMPPS.BCST imm8 m32 xmm k k
|
|
// VCMPPS.BCST imm8 m32 xmm k
|
|
// VCMPPS.BCST imm8 m32 ymm k k
|
|
// VCMPPS.BCST imm8 m32 ymm k
|
|
// VCMPPS.BCST imm8 m32 zmm k k
|
|
// VCMPPS.BCST imm8 m32 zmm k
|
|
//
|
|
// Construct and append a VCMPPS.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCMPPS_BCST(ops ...operand.Op) { ctx.VCMPPS_BCST(ops...) }
|
|
|
|
// VCMPPS_SAE: Compare Packed Single-Precision Floating-Point Values (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCMPPS.SAE imm8 zmm zmm k k
|
|
// VCMPPS.SAE imm8 zmm zmm k
|
|
//
|
|
// Construct and append a VCMPPS.SAE instruction to the active function.
|
|
func (c *Context) VCMPPS_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCMPPS_SAE(ops...))
|
|
}
|
|
|
|
// VCMPPS_SAE: Compare Packed Single-Precision Floating-Point Values (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCMPPS.SAE imm8 zmm zmm k k
|
|
// VCMPPS.SAE imm8 zmm zmm k
|
|
//
|
|
// Construct and append a VCMPPS.SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCMPPS_SAE(ops ...operand.Op) { ctx.VCMPPS_SAE(ops...) }
|
|
|
|
// VCMPSD: Compare Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCMPSD imm8 m64 xmm xmm
|
|
// VCMPSD imm8 xmm xmm xmm
|
|
// VCMPSD imm8 m64 xmm k k
|
|
// VCMPSD imm8 m64 xmm k
|
|
// VCMPSD imm8 xmm xmm k k
|
|
// VCMPSD imm8 xmm xmm k
|
|
//
|
|
// Construct and append a VCMPSD instruction to the active function.
|
|
func (c *Context) VCMPSD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCMPSD(ops...))
|
|
}
|
|
|
|
// VCMPSD: Compare Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCMPSD imm8 m64 xmm xmm
|
|
// VCMPSD imm8 xmm xmm xmm
|
|
// VCMPSD imm8 m64 xmm k k
|
|
// VCMPSD imm8 m64 xmm k
|
|
// VCMPSD imm8 xmm xmm k k
|
|
// VCMPSD imm8 xmm xmm k
|
|
//
|
|
// Construct and append a VCMPSD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCMPSD(ops ...operand.Op) { ctx.VCMPSD(ops...) }
|
|
|
|
// VCMPSD_SAE: Compare Scalar Double-Precision Floating-Point Values (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCMPSD.SAE imm8 xmm xmm k k
|
|
// VCMPSD.SAE imm8 xmm xmm k
|
|
//
|
|
// Construct and append a VCMPSD.SAE instruction to the active function.
|
|
func (c *Context) VCMPSD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCMPSD_SAE(ops...))
|
|
}
|
|
|
|
// VCMPSD_SAE: Compare Scalar Double-Precision Floating-Point Values (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCMPSD.SAE imm8 xmm xmm k k
|
|
// VCMPSD.SAE imm8 xmm xmm k
|
|
//
|
|
// Construct and append a VCMPSD.SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCMPSD_SAE(ops ...operand.Op) { ctx.VCMPSD_SAE(ops...) }
|
|
|
|
// VCMPSS: Compare Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCMPSS imm8 m32 xmm xmm
|
|
// VCMPSS imm8 xmm xmm xmm
|
|
// VCMPSS imm8 m32 xmm k k
|
|
// VCMPSS imm8 m32 xmm k
|
|
// VCMPSS imm8 xmm xmm k k
|
|
// VCMPSS imm8 xmm xmm k
|
|
//
|
|
// Construct and append a VCMPSS instruction to the active function.
|
|
func (c *Context) VCMPSS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCMPSS(ops...))
|
|
}
|
|
|
|
// VCMPSS: Compare Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCMPSS imm8 m32 xmm xmm
|
|
// VCMPSS imm8 xmm xmm xmm
|
|
// VCMPSS imm8 m32 xmm k k
|
|
// VCMPSS imm8 m32 xmm k
|
|
// VCMPSS imm8 xmm xmm k k
|
|
// VCMPSS imm8 xmm xmm k
|
|
//
|
|
// Construct and append a VCMPSS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCMPSS(ops ...operand.Op) { ctx.VCMPSS(ops...) }
|
|
|
|
// VCMPSS_SAE: Compare Scalar Single-Precision Floating-Point Values (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCMPSS.SAE imm8 xmm xmm k k
|
|
// VCMPSS.SAE imm8 xmm xmm k
|
|
//
|
|
// Construct and append a VCMPSS.SAE instruction to the active function.
|
|
func (c *Context) VCMPSS_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCMPSS_SAE(ops...))
|
|
}
|
|
|
|
// VCMPSS_SAE: Compare Scalar Single-Precision Floating-Point Values (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCMPSS.SAE imm8 xmm xmm k k
|
|
// VCMPSS.SAE imm8 xmm xmm k
|
|
//
|
|
// Construct and append a VCMPSS.SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCMPSS_SAE(ops ...operand.Op) { ctx.VCMPSS_SAE(ops...) }
|
|
|
|
// VCOMISD: Compare Scalar Ordered Double-Precision Floating-Point Values and Set EFLAGS.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCOMISD m64 xmm
|
|
// VCOMISD xmm xmm
|
|
//
|
|
// Construct and append a VCOMISD instruction to the active function.
|
|
func (c *Context) VCOMISD(mx, x operand.Op) {
|
|
c.addinstruction(x86.VCOMISD(mx, x))
|
|
}
|
|
|
|
// VCOMISD: Compare Scalar Ordered Double-Precision Floating-Point Values and Set EFLAGS.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCOMISD m64 xmm
|
|
// VCOMISD xmm xmm
|
|
//
|
|
// Construct and append a VCOMISD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCOMISD(mx, x operand.Op) { ctx.VCOMISD(mx, x) }
|
|
|
|
// VCOMISD_SAE: Compare Scalar Ordered Double-Precision Floating-Point Values and Set EFLAGS (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCOMISD.SAE xmm xmm
|
|
//
|
|
// Construct and append a VCOMISD.SAE instruction to the active function.
|
|
func (c *Context) VCOMISD_SAE(x, x1 operand.Op) {
|
|
c.addinstruction(x86.VCOMISD_SAE(x, x1))
|
|
}
|
|
|
|
// VCOMISD_SAE: Compare Scalar Ordered Double-Precision Floating-Point Values and Set EFLAGS (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCOMISD.SAE xmm xmm
|
|
//
|
|
// Construct and append a VCOMISD.SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCOMISD_SAE(x, x1 operand.Op) { ctx.VCOMISD_SAE(x, x1) }
|
|
|
|
// VCOMISS: Compare Scalar Ordered Single-Precision Floating-Point Values and Set EFLAGS.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCOMISS m32 xmm
|
|
// VCOMISS xmm xmm
|
|
//
|
|
// Construct and append a VCOMISS instruction to the active function.
|
|
func (c *Context) VCOMISS(mx, x operand.Op) {
|
|
c.addinstruction(x86.VCOMISS(mx, x))
|
|
}
|
|
|
|
// VCOMISS: Compare Scalar Ordered Single-Precision Floating-Point Values and Set EFLAGS.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCOMISS m32 xmm
|
|
// VCOMISS xmm xmm
|
|
//
|
|
// Construct and append a VCOMISS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCOMISS(mx, x operand.Op) { ctx.VCOMISS(mx, x) }
|
|
|
|
// VCOMISS_SAE: Compare Scalar Ordered Single-Precision Floating-Point Values and Set EFLAGS (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCOMISS.SAE xmm xmm
|
|
//
|
|
// Construct and append a VCOMISS.SAE instruction to the active function.
|
|
func (c *Context) VCOMISS_SAE(x, x1 operand.Op) {
|
|
c.addinstruction(x86.VCOMISS_SAE(x, x1))
|
|
}
|
|
|
|
// VCOMISS_SAE: Compare Scalar Ordered Single-Precision Floating-Point Values and Set EFLAGS (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCOMISS.SAE xmm xmm
|
|
//
|
|
// Construct and append a VCOMISS.SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCOMISS_SAE(x, x1 operand.Op) { ctx.VCOMISS_SAE(x, x1) }
|
|
|
|
// VCOMPRESSPD: Store Sparse Packed Double-Precision Floating-Point Values into Dense Memory/Register.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCOMPRESSPD xmm k m128
|
|
// VCOMPRESSPD xmm k xmm
|
|
// VCOMPRESSPD xmm m128
|
|
// VCOMPRESSPD xmm xmm
|
|
// VCOMPRESSPD ymm k m256
|
|
// VCOMPRESSPD ymm k ymm
|
|
// VCOMPRESSPD ymm m256
|
|
// VCOMPRESSPD ymm ymm
|
|
// VCOMPRESSPD zmm k m512
|
|
// VCOMPRESSPD zmm k zmm
|
|
// VCOMPRESSPD zmm m512
|
|
// VCOMPRESSPD zmm zmm
|
|
//
|
|
// Construct and append a VCOMPRESSPD instruction to the active function.
|
|
func (c *Context) VCOMPRESSPD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCOMPRESSPD(ops...))
|
|
}
|
|
|
|
// VCOMPRESSPD: Store Sparse Packed Double-Precision Floating-Point Values into Dense Memory/Register.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCOMPRESSPD xmm k m128
|
|
// VCOMPRESSPD xmm k xmm
|
|
// VCOMPRESSPD xmm m128
|
|
// VCOMPRESSPD xmm xmm
|
|
// VCOMPRESSPD ymm k m256
|
|
// VCOMPRESSPD ymm k ymm
|
|
// VCOMPRESSPD ymm m256
|
|
// VCOMPRESSPD ymm ymm
|
|
// VCOMPRESSPD zmm k m512
|
|
// VCOMPRESSPD zmm k zmm
|
|
// VCOMPRESSPD zmm m512
|
|
// VCOMPRESSPD zmm zmm
|
|
//
|
|
// Construct and append a VCOMPRESSPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCOMPRESSPD(ops ...operand.Op) { ctx.VCOMPRESSPD(ops...) }
|
|
|
|
// VCOMPRESSPD_Z: Store Sparse Packed Double-Precision Floating-Point Values into Dense Memory/Register (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCOMPRESSPD.Z xmm k m128
|
|
// VCOMPRESSPD.Z xmm k xmm
|
|
// VCOMPRESSPD.Z ymm k m256
|
|
// VCOMPRESSPD.Z ymm k ymm
|
|
// VCOMPRESSPD.Z zmm k m512
|
|
// VCOMPRESSPD.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCOMPRESSPD.Z instruction to the active function.
|
|
func (c *Context) VCOMPRESSPD_Z(xyz, k, mxyz operand.Op) {
|
|
c.addinstruction(x86.VCOMPRESSPD_Z(xyz, k, mxyz))
|
|
}
|
|
|
|
// VCOMPRESSPD_Z: Store Sparse Packed Double-Precision Floating-Point Values into Dense Memory/Register (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCOMPRESSPD.Z xmm k m128
|
|
// VCOMPRESSPD.Z xmm k xmm
|
|
// VCOMPRESSPD.Z ymm k m256
|
|
// VCOMPRESSPD.Z ymm k ymm
|
|
// VCOMPRESSPD.Z zmm k m512
|
|
// VCOMPRESSPD.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCOMPRESSPD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCOMPRESSPD_Z(xyz, k, mxyz operand.Op) { ctx.VCOMPRESSPD_Z(xyz, k, mxyz) }
|
|
|
|
// VCOMPRESSPS: Store Sparse Packed Single-Precision Floating-Point Values into Dense Memory/Register.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCOMPRESSPS xmm k m128
|
|
// VCOMPRESSPS xmm k xmm
|
|
// VCOMPRESSPS xmm m128
|
|
// VCOMPRESSPS xmm xmm
|
|
// VCOMPRESSPS ymm k m256
|
|
// VCOMPRESSPS ymm k ymm
|
|
// VCOMPRESSPS ymm m256
|
|
// VCOMPRESSPS ymm ymm
|
|
// VCOMPRESSPS zmm k m512
|
|
// VCOMPRESSPS zmm k zmm
|
|
// VCOMPRESSPS zmm m512
|
|
// VCOMPRESSPS zmm zmm
|
|
//
|
|
// Construct and append a VCOMPRESSPS instruction to the active function.
|
|
func (c *Context) VCOMPRESSPS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCOMPRESSPS(ops...))
|
|
}
|
|
|
|
// VCOMPRESSPS: Store Sparse Packed Single-Precision Floating-Point Values into Dense Memory/Register.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCOMPRESSPS xmm k m128
|
|
// VCOMPRESSPS xmm k xmm
|
|
// VCOMPRESSPS xmm m128
|
|
// VCOMPRESSPS xmm xmm
|
|
// VCOMPRESSPS ymm k m256
|
|
// VCOMPRESSPS ymm k ymm
|
|
// VCOMPRESSPS ymm m256
|
|
// VCOMPRESSPS ymm ymm
|
|
// VCOMPRESSPS zmm k m512
|
|
// VCOMPRESSPS zmm k zmm
|
|
// VCOMPRESSPS zmm m512
|
|
// VCOMPRESSPS zmm zmm
|
|
//
|
|
// Construct and append a VCOMPRESSPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCOMPRESSPS(ops ...operand.Op) { ctx.VCOMPRESSPS(ops...) }
|
|
|
|
// VCOMPRESSPS_Z: Store Sparse Packed Single-Precision Floating-Point Values into Dense Memory/Register (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCOMPRESSPS.Z xmm k m128
|
|
// VCOMPRESSPS.Z xmm k xmm
|
|
// VCOMPRESSPS.Z ymm k m256
|
|
// VCOMPRESSPS.Z ymm k ymm
|
|
// VCOMPRESSPS.Z zmm k m512
|
|
// VCOMPRESSPS.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCOMPRESSPS.Z instruction to the active function.
|
|
func (c *Context) VCOMPRESSPS_Z(xyz, k, mxyz operand.Op) {
|
|
c.addinstruction(x86.VCOMPRESSPS_Z(xyz, k, mxyz))
|
|
}
|
|
|
|
// VCOMPRESSPS_Z: Store Sparse Packed Single-Precision Floating-Point Values into Dense Memory/Register (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCOMPRESSPS.Z xmm k m128
|
|
// VCOMPRESSPS.Z xmm k xmm
|
|
// VCOMPRESSPS.Z ymm k m256
|
|
// VCOMPRESSPS.Z ymm k ymm
|
|
// VCOMPRESSPS.Z zmm k m512
|
|
// VCOMPRESSPS.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCOMPRESSPS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCOMPRESSPS_Z(xyz, k, mxyz operand.Op) { ctx.VCOMPRESSPS_Z(xyz, k, mxyz) }
|
|
|
|
// VCVTDQ2PD: Convert Packed Dword Integers to Packed Double-Precision FP Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTDQ2PD m128 ymm
|
|
// VCVTDQ2PD m64 xmm
|
|
// VCVTDQ2PD xmm xmm
|
|
// VCVTDQ2PD xmm ymm
|
|
// VCVTDQ2PD m128 k ymm
|
|
// VCVTDQ2PD m64 k xmm
|
|
// VCVTDQ2PD xmm k xmm
|
|
// VCVTDQ2PD xmm k ymm
|
|
// VCVTDQ2PD m256 k zmm
|
|
// VCVTDQ2PD m256 zmm
|
|
// VCVTDQ2PD ymm k zmm
|
|
// VCVTDQ2PD ymm zmm
|
|
//
|
|
// Construct and append a VCVTDQ2PD instruction to the active function.
|
|
func (c *Context) VCVTDQ2PD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTDQ2PD(ops...))
|
|
}
|
|
|
|
// VCVTDQ2PD: Convert Packed Dword Integers to Packed Double-Precision FP Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTDQ2PD m128 ymm
|
|
// VCVTDQ2PD m64 xmm
|
|
// VCVTDQ2PD xmm xmm
|
|
// VCVTDQ2PD xmm ymm
|
|
// VCVTDQ2PD m128 k ymm
|
|
// VCVTDQ2PD m64 k xmm
|
|
// VCVTDQ2PD xmm k xmm
|
|
// VCVTDQ2PD xmm k ymm
|
|
// VCVTDQ2PD m256 k zmm
|
|
// VCVTDQ2PD m256 zmm
|
|
// VCVTDQ2PD ymm k zmm
|
|
// VCVTDQ2PD ymm zmm
|
|
//
|
|
// Construct and append a VCVTDQ2PD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTDQ2PD(ops ...operand.Op) { ctx.VCVTDQ2PD(ops...) }
|
|
|
|
// VCVTDQ2PD_BCST: Convert Packed Dword Integers to Packed Double-Precision FP Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTDQ2PD.BCST m32 k xmm
|
|
// VCVTDQ2PD.BCST m32 k ymm
|
|
// VCVTDQ2PD.BCST m32 xmm
|
|
// VCVTDQ2PD.BCST m32 ymm
|
|
// VCVTDQ2PD.BCST m32 k zmm
|
|
// VCVTDQ2PD.BCST m32 zmm
|
|
//
|
|
// Construct and append a VCVTDQ2PD.BCST instruction to the active function.
|
|
func (c *Context) VCVTDQ2PD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTDQ2PD_BCST(ops...))
|
|
}
|
|
|
|
// VCVTDQ2PD_BCST: Convert Packed Dword Integers to Packed Double-Precision FP Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTDQ2PD.BCST m32 k xmm
|
|
// VCVTDQ2PD.BCST m32 k ymm
|
|
// VCVTDQ2PD.BCST m32 xmm
|
|
// VCVTDQ2PD.BCST m32 ymm
|
|
// VCVTDQ2PD.BCST m32 k zmm
|
|
// VCVTDQ2PD.BCST m32 zmm
|
|
//
|
|
// Construct and append a VCVTDQ2PD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTDQ2PD_BCST(ops ...operand.Op) { ctx.VCVTDQ2PD_BCST(ops...) }
|
|
|
|
// VCVTDQ2PD_BCST_Z: Convert Packed Dword Integers to Packed Double-Precision FP Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTDQ2PD.BCST.Z m32 k xmm
|
|
// VCVTDQ2PD.BCST.Z m32 k ymm
|
|
// VCVTDQ2PD.BCST.Z m32 k zmm
|
|
//
|
|
// Construct and append a VCVTDQ2PD.BCST.Z instruction to the active function.
|
|
func (c *Context) VCVTDQ2PD_BCST_Z(m, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VCVTDQ2PD_BCST_Z(m, k, xyz))
|
|
}
|
|
|
|
// VCVTDQ2PD_BCST_Z: Convert Packed Dword Integers to Packed Double-Precision FP Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTDQ2PD.BCST.Z m32 k xmm
|
|
// VCVTDQ2PD.BCST.Z m32 k ymm
|
|
// VCVTDQ2PD.BCST.Z m32 k zmm
|
|
//
|
|
// Construct and append a VCVTDQ2PD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTDQ2PD_BCST_Z(m, k, xyz operand.Op) { ctx.VCVTDQ2PD_BCST_Z(m, k, xyz) }
|
|
|
|
// VCVTDQ2PD_Z: Convert Packed Dword Integers to Packed Double-Precision FP Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTDQ2PD.Z m128 k ymm
|
|
// VCVTDQ2PD.Z m64 k xmm
|
|
// VCVTDQ2PD.Z xmm k xmm
|
|
// VCVTDQ2PD.Z xmm k ymm
|
|
// VCVTDQ2PD.Z m256 k zmm
|
|
// VCVTDQ2PD.Z ymm k zmm
|
|
//
|
|
// Construct and append a VCVTDQ2PD.Z instruction to the active function.
|
|
func (c *Context) VCVTDQ2PD_Z(mxy, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VCVTDQ2PD_Z(mxy, k, xyz))
|
|
}
|
|
|
|
// VCVTDQ2PD_Z: Convert Packed Dword Integers to Packed Double-Precision FP Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTDQ2PD.Z m128 k ymm
|
|
// VCVTDQ2PD.Z m64 k xmm
|
|
// VCVTDQ2PD.Z xmm k xmm
|
|
// VCVTDQ2PD.Z xmm k ymm
|
|
// VCVTDQ2PD.Z m256 k zmm
|
|
// VCVTDQ2PD.Z ymm k zmm
|
|
//
|
|
// Construct and append a VCVTDQ2PD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTDQ2PD_Z(mxy, k, xyz operand.Op) { ctx.VCVTDQ2PD_Z(mxy, k, xyz) }
|
|
|
|
// VCVTDQ2PS: Convert Packed Dword Integers to Packed Single-Precision FP Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTDQ2PS m128 xmm
|
|
// VCVTDQ2PS m256 ymm
|
|
// VCVTDQ2PS xmm xmm
|
|
// VCVTDQ2PS ymm ymm
|
|
// VCVTDQ2PS m128 k xmm
|
|
// VCVTDQ2PS m256 k ymm
|
|
// VCVTDQ2PS xmm k xmm
|
|
// VCVTDQ2PS ymm k ymm
|
|
// VCVTDQ2PS m512 k zmm
|
|
// VCVTDQ2PS m512 zmm
|
|
// VCVTDQ2PS zmm k zmm
|
|
// VCVTDQ2PS zmm zmm
|
|
//
|
|
// Construct and append a VCVTDQ2PS instruction to the active function.
|
|
func (c *Context) VCVTDQ2PS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTDQ2PS(ops...))
|
|
}
|
|
|
|
// VCVTDQ2PS: Convert Packed Dword Integers to Packed Single-Precision FP Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTDQ2PS m128 xmm
|
|
// VCVTDQ2PS m256 ymm
|
|
// VCVTDQ2PS xmm xmm
|
|
// VCVTDQ2PS ymm ymm
|
|
// VCVTDQ2PS m128 k xmm
|
|
// VCVTDQ2PS m256 k ymm
|
|
// VCVTDQ2PS xmm k xmm
|
|
// VCVTDQ2PS ymm k ymm
|
|
// VCVTDQ2PS m512 k zmm
|
|
// VCVTDQ2PS m512 zmm
|
|
// VCVTDQ2PS zmm k zmm
|
|
// VCVTDQ2PS zmm zmm
|
|
//
|
|
// Construct and append a VCVTDQ2PS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTDQ2PS(ops ...operand.Op) { ctx.VCVTDQ2PS(ops...) }
|
|
|
|
// VCVTDQ2PS_BCST: Convert Packed Dword Integers to Packed Single-Precision FP Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTDQ2PS.BCST m32 k xmm
|
|
// VCVTDQ2PS.BCST m32 k ymm
|
|
// VCVTDQ2PS.BCST m32 xmm
|
|
// VCVTDQ2PS.BCST m32 ymm
|
|
// VCVTDQ2PS.BCST m32 k zmm
|
|
// VCVTDQ2PS.BCST m32 zmm
|
|
//
|
|
// Construct and append a VCVTDQ2PS.BCST instruction to the active function.
|
|
func (c *Context) VCVTDQ2PS_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTDQ2PS_BCST(ops...))
|
|
}
|
|
|
|
// VCVTDQ2PS_BCST: Convert Packed Dword Integers to Packed Single-Precision FP Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTDQ2PS.BCST m32 k xmm
|
|
// VCVTDQ2PS.BCST m32 k ymm
|
|
// VCVTDQ2PS.BCST m32 xmm
|
|
// VCVTDQ2PS.BCST m32 ymm
|
|
// VCVTDQ2PS.BCST m32 k zmm
|
|
// VCVTDQ2PS.BCST m32 zmm
|
|
//
|
|
// Construct and append a VCVTDQ2PS.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTDQ2PS_BCST(ops ...operand.Op) { ctx.VCVTDQ2PS_BCST(ops...) }
|
|
|
|
// VCVTDQ2PS_BCST_Z: Convert Packed Dword Integers to Packed Single-Precision FP Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTDQ2PS.BCST.Z m32 k xmm
|
|
// VCVTDQ2PS.BCST.Z m32 k ymm
|
|
// VCVTDQ2PS.BCST.Z m32 k zmm
|
|
//
|
|
// Construct and append a VCVTDQ2PS.BCST.Z instruction to the active function.
|
|
func (c *Context) VCVTDQ2PS_BCST_Z(m, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VCVTDQ2PS_BCST_Z(m, k, xyz))
|
|
}
|
|
|
|
// VCVTDQ2PS_BCST_Z: Convert Packed Dword Integers to Packed Single-Precision FP Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTDQ2PS.BCST.Z m32 k xmm
|
|
// VCVTDQ2PS.BCST.Z m32 k ymm
|
|
// VCVTDQ2PS.BCST.Z m32 k zmm
|
|
//
|
|
// Construct and append a VCVTDQ2PS.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTDQ2PS_BCST_Z(m, k, xyz operand.Op) { ctx.VCVTDQ2PS_BCST_Z(m, k, xyz) }
|
|
|
|
// VCVTDQ2PS_RD_SAE: Convert Packed Dword Integers to Packed Single-Precision FP Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTDQ2PS.RD_SAE zmm k zmm
|
|
// VCVTDQ2PS.RD_SAE zmm zmm
|
|
//
|
|
// Construct and append a VCVTDQ2PS.RD_SAE instruction to the active function.
|
|
func (c *Context) VCVTDQ2PS_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTDQ2PS_RD_SAE(ops...))
|
|
}
|
|
|
|
// VCVTDQ2PS_RD_SAE: Convert Packed Dword Integers to Packed Single-Precision FP Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTDQ2PS.RD_SAE zmm k zmm
|
|
// VCVTDQ2PS.RD_SAE zmm zmm
|
|
//
|
|
// Construct and append a VCVTDQ2PS.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTDQ2PS_RD_SAE(ops ...operand.Op) { ctx.VCVTDQ2PS_RD_SAE(ops...) }
|
|
|
|
// VCVTDQ2PS_RD_SAE_Z: Convert Packed Dword Integers to Packed Single-Precision FP Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTDQ2PS.RD_SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTDQ2PS.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VCVTDQ2PS_RD_SAE_Z(z, k, z1 operand.Op) {
|
|
c.addinstruction(x86.VCVTDQ2PS_RD_SAE_Z(z, k, z1))
|
|
}
|
|
|
|
// VCVTDQ2PS_RD_SAE_Z: Convert Packed Dword Integers to Packed Single-Precision FP Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTDQ2PS.RD_SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTDQ2PS.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTDQ2PS_RD_SAE_Z(z, k, z1 operand.Op) { ctx.VCVTDQ2PS_RD_SAE_Z(z, k, z1) }
|
|
|
|
// VCVTDQ2PS_RN_SAE: Convert Packed Dword Integers to Packed Single-Precision FP Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTDQ2PS.RN_SAE zmm k zmm
|
|
// VCVTDQ2PS.RN_SAE zmm zmm
|
|
//
|
|
// Construct and append a VCVTDQ2PS.RN_SAE instruction to the active function.
|
|
func (c *Context) VCVTDQ2PS_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTDQ2PS_RN_SAE(ops...))
|
|
}
|
|
|
|
// VCVTDQ2PS_RN_SAE: Convert Packed Dword Integers to Packed Single-Precision FP Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTDQ2PS.RN_SAE zmm k zmm
|
|
// VCVTDQ2PS.RN_SAE zmm zmm
|
|
//
|
|
// Construct and append a VCVTDQ2PS.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTDQ2PS_RN_SAE(ops ...operand.Op) { ctx.VCVTDQ2PS_RN_SAE(ops...) }
|
|
|
|
// VCVTDQ2PS_RN_SAE_Z: Convert Packed Dword Integers to Packed Single-Precision FP Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTDQ2PS.RN_SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTDQ2PS.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VCVTDQ2PS_RN_SAE_Z(z, k, z1 operand.Op) {
|
|
c.addinstruction(x86.VCVTDQ2PS_RN_SAE_Z(z, k, z1))
|
|
}
|
|
|
|
// VCVTDQ2PS_RN_SAE_Z: Convert Packed Dword Integers to Packed Single-Precision FP Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTDQ2PS.RN_SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTDQ2PS.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTDQ2PS_RN_SAE_Z(z, k, z1 operand.Op) { ctx.VCVTDQ2PS_RN_SAE_Z(z, k, z1) }
|
|
|
|
// VCVTDQ2PS_RU_SAE: Convert Packed Dword Integers to Packed Single-Precision FP Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTDQ2PS.RU_SAE zmm k zmm
|
|
// VCVTDQ2PS.RU_SAE zmm zmm
|
|
//
|
|
// Construct and append a VCVTDQ2PS.RU_SAE instruction to the active function.
|
|
func (c *Context) VCVTDQ2PS_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTDQ2PS_RU_SAE(ops...))
|
|
}
|
|
|
|
// VCVTDQ2PS_RU_SAE: Convert Packed Dword Integers to Packed Single-Precision FP Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTDQ2PS.RU_SAE zmm k zmm
|
|
// VCVTDQ2PS.RU_SAE zmm zmm
|
|
//
|
|
// Construct and append a VCVTDQ2PS.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTDQ2PS_RU_SAE(ops ...operand.Op) { ctx.VCVTDQ2PS_RU_SAE(ops...) }
|
|
|
|
// VCVTDQ2PS_RU_SAE_Z: Convert Packed Dword Integers to Packed Single-Precision FP Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTDQ2PS.RU_SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTDQ2PS.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VCVTDQ2PS_RU_SAE_Z(z, k, z1 operand.Op) {
|
|
c.addinstruction(x86.VCVTDQ2PS_RU_SAE_Z(z, k, z1))
|
|
}
|
|
|
|
// VCVTDQ2PS_RU_SAE_Z: Convert Packed Dword Integers to Packed Single-Precision FP Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTDQ2PS.RU_SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTDQ2PS.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTDQ2PS_RU_SAE_Z(z, k, z1 operand.Op) { ctx.VCVTDQ2PS_RU_SAE_Z(z, k, z1) }
|
|
|
|
// VCVTDQ2PS_RZ_SAE: Convert Packed Dword Integers to Packed Single-Precision FP Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTDQ2PS.RZ_SAE zmm k zmm
|
|
// VCVTDQ2PS.RZ_SAE zmm zmm
|
|
//
|
|
// Construct and append a VCVTDQ2PS.RZ_SAE instruction to the active function.
|
|
func (c *Context) VCVTDQ2PS_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTDQ2PS_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VCVTDQ2PS_RZ_SAE: Convert Packed Dword Integers to Packed Single-Precision FP Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTDQ2PS.RZ_SAE zmm k zmm
|
|
// VCVTDQ2PS.RZ_SAE zmm zmm
|
|
//
|
|
// Construct and append a VCVTDQ2PS.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTDQ2PS_RZ_SAE(ops ...operand.Op) { ctx.VCVTDQ2PS_RZ_SAE(ops...) }
|
|
|
|
// VCVTDQ2PS_RZ_SAE_Z: Convert Packed Dword Integers to Packed Single-Precision FP Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTDQ2PS.RZ_SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTDQ2PS.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VCVTDQ2PS_RZ_SAE_Z(z, k, z1 operand.Op) {
|
|
c.addinstruction(x86.VCVTDQ2PS_RZ_SAE_Z(z, k, z1))
|
|
}
|
|
|
|
// VCVTDQ2PS_RZ_SAE_Z: Convert Packed Dword Integers to Packed Single-Precision FP Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTDQ2PS.RZ_SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTDQ2PS.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTDQ2PS_RZ_SAE_Z(z, k, z1 operand.Op) { ctx.VCVTDQ2PS_RZ_SAE_Z(z, k, z1) }
|
|
|
|
// VCVTDQ2PS_Z: Convert Packed Dword Integers to Packed Single-Precision FP Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTDQ2PS.Z m128 k xmm
|
|
// VCVTDQ2PS.Z m256 k ymm
|
|
// VCVTDQ2PS.Z xmm k xmm
|
|
// VCVTDQ2PS.Z ymm k ymm
|
|
// VCVTDQ2PS.Z m512 k zmm
|
|
// VCVTDQ2PS.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTDQ2PS.Z instruction to the active function.
|
|
func (c *Context) VCVTDQ2PS_Z(mxyz, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VCVTDQ2PS_Z(mxyz, k, xyz))
|
|
}
|
|
|
|
// VCVTDQ2PS_Z: Convert Packed Dword Integers to Packed Single-Precision FP Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTDQ2PS.Z m128 k xmm
|
|
// VCVTDQ2PS.Z m256 k ymm
|
|
// VCVTDQ2PS.Z xmm k xmm
|
|
// VCVTDQ2PS.Z ymm k ymm
|
|
// VCVTDQ2PS.Z m512 k zmm
|
|
// VCVTDQ2PS.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTDQ2PS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTDQ2PS_Z(mxyz, k, xyz operand.Op) { ctx.VCVTDQ2PS_Z(mxyz, k, xyz) }
|
|
|
|
// VCVTPD2DQ: Convert Packed Double-Precision FP Values to Packed Dword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2DQ m512 k ymm
|
|
// VCVTPD2DQ m512 ymm
|
|
// VCVTPD2DQ zmm k ymm
|
|
// VCVTPD2DQ zmm ymm
|
|
//
|
|
// Construct and append a VCVTPD2DQ instruction to the active function.
|
|
func (c *Context) VCVTPD2DQ(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2DQ(ops...))
|
|
}
|
|
|
|
// VCVTPD2DQ: Convert Packed Double-Precision FP Values to Packed Dword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2DQ m512 k ymm
|
|
// VCVTPD2DQ m512 ymm
|
|
// VCVTPD2DQ zmm k ymm
|
|
// VCVTPD2DQ zmm ymm
|
|
//
|
|
// Construct and append a VCVTPD2DQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2DQ(ops ...operand.Op) { ctx.VCVTPD2DQ(ops...) }
|
|
|
|
// VCVTPD2DQX: Convert Packed Double-Precision FP Values to Packed Dword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2DQX m128 xmm
|
|
// VCVTPD2DQX xmm xmm
|
|
// VCVTPD2DQX m128 k xmm
|
|
// VCVTPD2DQX xmm k xmm
|
|
//
|
|
// Construct and append a VCVTPD2DQX instruction to the active function.
|
|
func (c *Context) VCVTPD2DQX(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2DQX(ops...))
|
|
}
|
|
|
|
// VCVTPD2DQX: Convert Packed Double-Precision FP Values to Packed Dword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2DQX m128 xmm
|
|
// VCVTPD2DQX xmm xmm
|
|
// VCVTPD2DQX m128 k xmm
|
|
// VCVTPD2DQX xmm k xmm
|
|
//
|
|
// Construct and append a VCVTPD2DQX instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2DQX(ops ...operand.Op) { ctx.VCVTPD2DQX(ops...) }
|
|
|
|
// VCVTPD2DQX_BCST: Convert Packed Double-Precision FP Values to Packed Dword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2DQX.BCST m64 k xmm
|
|
// VCVTPD2DQX.BCST m64 xmm
|
|
//
|
|
// Construct and append a VCVTPD2DQX.BCST instruction to the active function.
|
|
func (c *Context) VCVTPD2DQX_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2DQX_BCST(ops...))
|
|
}
|
|
|
|
// VCVTPD2DQX_BCST: Convert Packed Double-Precision FP Values to Packed Dword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2DQX.BCST m64 k xmm
|
|
// VCVTPD2DQX.BCST m64 xmm
|
|
//
|
|
// Construct and append a VCVTPD2DQX.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2DQX_BCST(ops ...operand.Op) { ctx.VCVTPD2DQX_BCST(ops...) }
|
|
|
|
// VCVTPD2DQX_BCST_Z: Convert Packed Double-Precision FP Values to Packed Dword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2DQX.BCST.Z m64 k xmm
|
|
//
|
|
// Construct and append a VCVTPD2DQX.BCST.Z instruction to the active function.
|
|
func (c *Context) VCVTPD2DQX_BCST_Z(m, k, x operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2DQX_BCST_Z(m, k, x))
|
|
}
|
|
|
|
// VCVTPD2DQX_BCST_Z: Convert Packed Double-Precision FP Values to Packed Dword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2DQX.BCST.Z m64 k xmm
|
|
//
|
|
// Construct and append a VCVTPD2DQX.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2DQX_BCST_Z(m, k, x operand.Op) { ctx.VCVTPD2DQX_BCST_Z(m, k, x) }
|
|
|
|
// VCVTPD2DQX_Z: Convert Packed Double-Precision FP Values to Packed Dword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2DQX.Z m128 k xmm
|
|
// VCVTPD2DQX.Z xmm k xmm
|
|
//
|
|
// Construct and append a VCVTPD2DQX.Z instruction to the active function.
|
|
func (c *Context) VCVTPD2DQX_Z(mx, k, x operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2DQX_Z(mx, k, x))
|
|
}
|
|
|
|
// VCVTPD2DQX_Z: Convert Packed Double-Precision FP Values to Packed Dword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2DQX.Z m128 k xmm
|
|
// VCVTPD2DQX.Z xmm k xmm
|
|
//
|
|
// Construct and append a VCVTPD2DQX.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2DQX_Z(mx, k, x operand.Op) { ctx.VCVTPD2DQX_Z(mx, k, x) }
|
|
|
|
// VCVTPD2DQY: Convert Packed Double-Precision FP Values to Packed Dword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2DQY m256 xmm
|
|
// VCVTPD2DQY ymm xmm
|
|
// VCVTPD2DQY m256 k xmm
|
|
// VCVTPD2DQY ymm k xmm
|
|
//
|
|
// Construct and append a VCVTPD2DQY instruction to the active function.
|
|
func (c *Context) VCVTPD2DQY(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2DQY(ops...))
|
|
}
|
|
|
|
// VCVTPD2DQY: Convert Packed Double-Precision FP Values to Packed Dword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2DQY m256 xmm
|
|
// VCVTPD2DQY ymm xmm
|
|
// VCVTPD2DQY m256 k xmm
|
|
// VCVTPD2DQY ymm k xmm
|
|
//
|
|
// Construct and append a VCVTPD2DQY instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2DQY(ops ...operand.Op) { ctx.VCVTPD2DQY(ops...) }
|
|
|
|
// VCVTPD2DQY_BCST: Convert Packed Double-Precision FP Values to Packed Dword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2DQY.BCST m64 k xmm
|
|
// VCVTPD2DQY.BCST m64 xmm
|
|
//
|
|
// Construct and append a VCVTPD2DQY.BCST instruction to the active function.
|
|
func (c *Context) VCVTPD2DQY_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2DQY_BCST(ops...))
|
|
}
|
|
|
|
// VCVTPD2DQY_BCST: Convert Packed Double-Precision FP Values to Packed Dword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2DQY.BCST m64 k xmm
|
|
// VCVTPD2DQY.BCST m64 xmm
|
|
//
|
|
// Construct and append a VCVTPD2DQY.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2DQY_BCST(ops ...operand.Op) { ctx.VCVTPD2DQY_BCST(ops...) }
|
|
|
|
// VCVTPD2DQY_BCST_Z: Convert Packed Double-Precision FP Values to Packed Dword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2DQY.BCST.Z m64 k xmm
|
|
//
|
|
// Construct and append a VCVTPD2DQY.BCST.Z instruction to the active function.
|
|
func (c *Context) VCVTPD2DQY_BCST_Z(m, k, x operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2DQY_BCST_Z(m, k, x))
|
|
}
|
|
|
|
// VCVTPD2DQY_BCST_Z: Convert Packed Double-Precision FP Values to Packed Dword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2DQY.BCST.Z m64 k xmm
|
|
//
|
|
// Construct and append a VCVTPD2DQY.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2DQY_BCST_Z(m, k, x operand.Op) { ctx.VCVTPD2DQY_BCST_Z(m, k, x) }
|
|
|
|
// VCVTPD2DQY_Z: Convert Packed Double-Precision FP Values to Packed Dword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2DQY.Z m256 k xmm
|
|
// VCVTPD2DQY.Z ymm k xmm
|
|
//
|
|
// Construct and append a VCVTPD2DQY.Z instruction to the active function.
|
|
func (c *Context) VCVTPD2DQY_Z(my, k, x operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2DQY_Z(my, k, x))
|
|
}
|
|
|
|
// VCVTPD2DQY_Z: Convert Packed Double-Precision FP Values to Packed Dword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2DQY.Z m256 k xmm
|
|
// VCVTPD2DQY.Z ymm k xmm
|
|
//
|
|
// Construct and append a VCVTPD2DQY.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2DQY_Z(my, k, x operand.Op) { ctx.VCVTPD2DQY_Z(my, k, x) }
|
|
|
|
// VCVTPD2DQ_BCST: Convert Packed Double-Precision FP Values to Packed Dword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2DQ.BCST m64 k ymm
|
|
// VCVTPD2DQ.BCST m64 ymm
|
|
//
|
|
// Construct and append a VCVTPD2DQ.BCST instruction to the active function.
|
|
func (c *Context) VCVTPD2DQ_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2DQ_BCST(ops...))
|
|
}
|
|
|
|
// VCVTPD2DQ_BCST: Convert Packed Double-Precision FP Values to Packed Dword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2DQ.BCST m64 k ymm
|
|
// VCVTPD2DQ.BCST m64 ymm
|
|
//
|
|
// Construct and append a VCVTPD2DQ.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2DQ_BCST(ops ...operand.Op) { ctx.VCVTPD2DQ_BCST(ops...) }
|
|
|
|
// VCVTPD2DQ_BCST_Z: Convert Packed Double-Precision FP Values to Packed Dword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2DQ.BCST.Z m64 k ymm
|
|
//
|
|
// Construct and append a VCVTPD2DQ.BCST.Z instruction to the active function.
|
|
func (c *Context) VCVTPD2DQ_BCST_Z(m, k, y operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2DQ_BCST_Z(m, k, y))
|
|
}
|
|
|
|
// VCVTPD2DQ_BCST_Z: Convert Packed Double-Precision FP Values to Packed Dword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2DQ.BCST.Z m64 k ymm
|
|
//
|
|
// Construct and append a VCVTPD2DQ.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2DQ_BCST_Z(m, k, y operand.Op) { ctx.VCVTPD2DQ_BCST_Z(m, k, y) }
|
|
|
|
// VCVTPD2DQ_RD_SAE: Convert Packed Double-Precision FP Values to Packed Dword Integers (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2DQ.RD_SAE zmm k ymm
|
|
// VCVTPD2DQ.RD_SAE zmm ymm
|
|
//
|
|
// Construct and append a VCVTPD2DQ.RD_SAE instruction to the active function.
|
|
func (c *Context) VCVTPD2DQ_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2DQ_RD_SAE(ops...))
|
|
}
|
|
|
|
// VCVTPD2DQ_RD_SAE: Convert Packed Double-Precision FP Values to Packed Dword Integers (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2DQ.RD_SAE zmm k ymm
|
|
// VCVTPD2DQ.RD_SAE zmm ymm
|
|
//
|
|
// Construct and append a VCVTPD2DQ.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2DQ_RD_SAE(ops ...operand.Op) { ctx.VCVTPD2DQ_RD_SAE(ops...) }
|
|
|
|
// VCVTPD2DQ_RD_SAE_Z: Convert Packed Double-Precision FP Values to Packed Dword Integers (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2DQ.RD_SAE.Z zmm k ymm
|
|
//
|
|
// Construct and append a VCVTPD2DQ.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VCVTPD2DQ_RD_SAE_Z(z, k, y operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2DQ_RD_SAE_Z(z, k, y))
|
|
}
|
|
|
|
// VCVTPD2DQ_RD_SAE_Z: Convert Packed Double-Precision FP Values to Packed Dword Integers (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2DQ.RD_SAE.Z zmm k ymm
|
|
//
|
|
// Construct and append a VCVTPD2DQ.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2DQ_RD_SAE_Z(z, k, y operand.Op) { ctx.VCVTPD2DQ_RD_SAE_Z(z, k, y) }
|
|
|
|
// VCVTPD2DQ_RN_SAE: Convert Packed Double-Precision FP Values to Packed Dword Integers (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2DQ.RN_SAE zmm k ymm
|
|
// VCVTPD2DQ.RN_SAE zmm ymm
|
|
//
|
|
// Construct and append a VCVTPD2DQ.RN_SAE instruction to the active function.
|
|
func (c *Context) VCVTPD2DQ_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2DQ_RN_SAE(ops...))
|
|
}
|
|
|
|
// VCVTPD2DQ_RN_SAE: Convert Packed Double-Precision FP Values to Packed Dword Integers (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2DQ.RN_SAE zmm k ymm
|
|
// VCVTPD2DQ.RN_SAE zmm ymm
|
|
//
|
|
// Construct and append a VCVTPD2DQ.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2DQ_RN_SAE(ops ...operand.Op) { ctx.VCVTPD2DQ_RN_SAE(ops...) }
|
|
|
|
// VCVTPD2DQ_RN_SAE_Z: Convert Packed Double-Precision FP Values to Packed Dword Integers (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2DQ.RN_SAE.Z zmm k ymm
|
|
//
|
|
// Construct and append a VCVTPD2DQ.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VCVTPD2DQ_RN_SAE_Z(z, k, y operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2DQ_RN_SAE_Z(z, k, y))
|
|
}
|
|
|
|
// VCVTPD2DQ_RN_SAE_Z: Convert Packed Double-Precision FP Values to Packed Dword Integers (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2DQ.RN_SAE.Z zmm k ymm
|
|
//
|
|
// Construct and append a VCVTPD2DQ.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2DQ_RN_SAE_Z(z, k, y operand.Op) { ctx.VCVTPD2DQ_RN_SAE_Z(z, k, y) }
|
|
|
|
// VCVTPD2DQ_RU_SAE: Convert Packed Double-Precision FP Values to Packed Dword Integers (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2DQ.RU_SAE zmm k ymm
|
|
// VCVTPD2DQ.RU_SAE zmm ymm
|
|
//
|
|
// Construct and append a VCVTPD2DQ.RU_SAE instruction to the active function.
|
|
func (c *Context) VCVTPD2DQ_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2DQ_RU_SAE(ops...))
|
|
}
|
|
|
|
// VCVTPD2DQ_RU_SAE: Convert Packed Double-Precision FP Values to Packed Dword Integers (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2DQ.RU_SAE zmm k ymm
|
|
// VCVTPD2DQ.RU_SAE zmm ymm
|
|
//
|
|
// Construct and append a VCVTPD2DQ.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2DQ_RU_SAE(ops ...operand.Op) { ctx.VCVTPD2DQ_RU_SAE(ops...) }
|
|
|
|
// VCVTPD2DQ_RU_SAE_Z: Convert Packed Double-Precision FP Values to Packed Dword Integers (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2DQ.RU_SAE.Z zmm k ymm
|
|
//
|
|
// Construct and append a VCVTPD2DQ.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VCVTPD2DQ_RU_SAE_Z(z, k, y operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2DQ_RU_SAE_Z(z, k, y))
|
|
}
|
|
|
|
// VCVTPD2DQ_RU_SAE_Z: Convert Packed Double-Precision FP Values to Packed Dword Integers (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2DQ.RU_SAE.Z zmm k ymm
|
|
//
|
|
// Construct and append a VCVTPD2DQ.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2DQ_RU_SAE_Z(z, k, y operand.Op) { ctx.VCVTPD2DQ_RU_SAE_Z(z, k, y) }
|
|
|
|
// VCVTPD2DQ_RZ_SAE: Convert Packed Double-Precision FP Values to Packed Dword Integers (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2DQ.RZ_SAE zmm k ymm
|
|
// VCVTPD2DQ.RZ_SAE zmm ymm
|
|
//
|
|
// Construct and append a VCVTPD2DQ.RZ_SAE instruction to the active function.
|
|
func (c *Context) VCVTPD2DQ_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2DQ_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VCVTPD2DQ_RZ_SAE: Convert Packed Double-Precision FP Values to Packed Dword Integers (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2DQ.RZ_SAE zmm k ymm
|
|
// VCVTPD2DQ.RZ_SAE zmm ymm
|
|
//
|
|
// Construct and append a VCVTPD2DQ.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2DQ_RZ_SAE(ops ...operand.Op) { ctx.VCVTPD2DQ_RZ_SAE(ops...) }
|
|
|
|
// VCVTPD2DQ_RZ_SAE_Z: Convert Packed Double-Precision FP Values to Packed Dword Integers (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2DQ.RZ_SAE.Z zmm k ymm
|
|
//
|
|
// Construct and append a VCVTPD2DQ.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VCVTPD2DQ_RZ_SAE_Z(z, k, y operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2DQ_RZ_SAE_Z(z, k, y))
|
|
}
|
|
|
|
// VCVTPD2DQ_RZ_SAE_Z: Convert Packed Double-Precision FP Values to Packed Dword Integers (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2DQ.RZ_SAE.Z zmm k ymm
|
|
//
|
|
// Construct and append a VCVTPD2DQ.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2DQ_RZ_SAE_Z(z, k, y operand.Op) { ctx.VCVTPD2DQ_RZ_SAE_Z(z, k, y) }
|
|
|
|
// VCVTPD2DQ_Z: Convert Packed Double-Precision FP Values to Packed Dword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2DQ.Z m512 k ymm
|
|
// VCVTPD2DQ.Z zmm k ymm
|
|
//
|
|
// Construct and append a VCVTPD2DQ.Z instruction to the active function.
|
|
func (c *Context) VCVTPD2DQ_Z(mz, k, y operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2DQ_Z(mz, k, y))
|
|
}
|
|
|
|
// VCVTPD2DQ_Z: Convert Packed Double-Precision FP Values to Packed Dword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2DQ.Z m512 k ymm
|
|
// VCVTPD2DQ.Z zmm k ymm
|
|
//
|
|
// Construct and append a VCVTPD2DQ.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2DQ_Z(mz, k, y operand.Op) { ctx.VCVTPD2DQ_Z(mz, k, y) }
|
|
|
|
// VCVTPD2PS: Convert Packed Double-Precision FP Values to Packed Single-Precision FP Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2PS m512 k ymm
|
|
// VCVTPD2PS m512 ymm
|
|
// VCVTPD2PS zmm k ymm
|
|
// VCVTPD2PS zmm ymm
|
|
//
|
|
// Construct and append a VCVTPD2PS instruction to the active function.
|
|
func (c *Context) VCVTPD2PS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2PS(ops...))
|
|
}
|
|
|
|
// VCVTPD2PS: Convert Packed Double-Precision FP Values to Packed Single-Precision FP Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2PS m512 k ymm
|
|
// VCVTPD2PS m512 ymm
|
|
// VCVTPD2PS zmm k ymm
|
|
// VCVTPD2PS zmm ymm
|
|
//
|
|
// Construct and append a VCVTPD2PS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2PS(ops ...operand.Op) { ctx.VCVTPD2PS(ops...) }
|
|
|
|
// VCVTPD2PSX: Convert Packed Double-Precision FP Values to Packed Single-Precision FP Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2PSX m128 xmm
|
|
// VCVTPD2PSX xmm xmm
|
|
// VCVTPD2PSX m128 k xmm
|
|
// VCVTPD2PSX xmm k xmm
|
|
//
|
|
// Construct and append a VCVTPD2PSX instruction to the active function.
|
|
func (c *Context) VCVTPD2PSX(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2PSX(ops...))
|
|
}
|
|
|
|
// VCVTPD2PSX: Convert Packed Double-Precision FP Values to Packed Single-Precision FP Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2PSX m128 xmm
|
|
// VCVTPD2PSX xmm xmm
|
|
// VCVTPD2PSX m128 k xmm
|
|
// VCVTPD2PSX xmm k xmm
|
|
//
|
|
// Construct and append a VCVTPD2PSX instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2PSX(ops ...operand.Op) { ctx.VCVTPD2PSX(ops...) }
|
|
|
|
// VCVTPD2PSX_BCST: Convert Packed Double-Precision FP Values to Packed Single-Precision FP Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2PSX.BCST m64 k xmm
|
|
// VCVTPD2PSX.BCST m64 xmm
|
|
//
|
|
// Construct and append a VCVTPD2PSX.BCST instruction to the active function.
|
|
func (c *Context) VCVTPD2PSX_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2PSX_BCST(ops...))
|
|
}
|
|
|
|
// VCVTPD2PSX_BCST: Convert Packed Double-Precision FP Values to Packed Single-Precision FP Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2PSX.BCST m64 k xmm
|
|
// VCVTPD2PSX.BCST m64 xmm
|
|
//
|
|
// Construct and append a VCVTPD2PSX.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2PSX_BCST(ops ...operand.Op) { ctx.VCVTPD2PSX_BCST(ops...) }
|
|
|
|
// VCVTPD2PSX_BCST_Z: Convert Packed Double-Precision FP Values to Packed Single-Precision FP Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2PSX.BCST.Z m64 k xmm
|
|
//
|
|
// Construct and append a VCVTPD2PSX.BCST.Z instruction to the active function.
|
|
func (c *Context) VCVTPD2PSX_BCST_Z(m, k, x operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2PSX_BCST_Z(m, k, x))
|
|
}
|
|
|
|
// VCVTPD2PSX_BCST_Z: Convert Packed Double-Precision FP Values to Packed Single-Precision FP Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2PSX.BCST.Z m64 k xmm
|
|
//
|
|
// Construct and append a VCVTPD2PSX.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2PSX_BCST_Z(m, k, x operand.Op) { ctx.VCVTPD2PSX_BCST_Z(m, k, x) }
|
|
|
|
// VCVTPD2PSX_Z: Convert Packed Double-Precision FP Values to Packed Single-Precision FP Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2PSX.Z m128 k xmm
|
|
// VCVTPD2PSX.Z xmm k xmm
|
|
//
|
|
// Construct and append a VCVTPD2PSX.Z instruction to the active function.
|
|
func (c *Context) VCVTPD2PSX_Z(mx, k, x operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2PSX_Z(mx, k, x))
|
|
}
|
|
|
|
// VCVTPD2PSX_Z: Convert Packed Double-Precision FP Values to Packed Single-Precision FP Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2PSX.Z m128 k xmm
|
|
// VCVTPD2PSX.Z xmm k xmm
|
|
//
|
|
// Construct and append a VCVTPD2PSX.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2PSX_Z(mx, k, x operand.Op) { ctx.VCVTPD2PSX_Z(mx, k, x) }
|
|
|
|
// VCVTPD2PSY: Convert Packed Double-Precision FP Values to Packed Single-Precision FP Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2PSY m256 xmm
|
|
// VCVTPD2PSY ymm xmm
|
|
// VCVTPD2PSY m256 k xmm
|
|
// VCVTPD2PSY ymm k xmm
|
|
//
|
|
// Construct and append a VCVTPD2PSY instruction to the active function.
|
|
func (c *Context) VCVTPD2PSY(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2PSY(ops...))
|
|
}
|
|
|
|
// VCVTPD2PSY: Convert Packed Double-Precision FP Values to Packed Single-Precision FP Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2PSY m256 xmm
|
|
// VCVTPD2PSY ymm xmm
|
|
// VCVTPD2PSY m256 k xmm
|
|
// VCVTPD2PSY ymm k xmm
|
|
//
|
|
// Construct and append a VCVTPD2PSY instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2PSY(ops ...operand.Op) { ctx.VCVTPD2PSY(ops...) }
|
|
|
|
// VCVTPD2PSY_BCST: Convert Packed Double-Precision FP Values to Packed Single-Precision FP Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2PSY.BCST m64 k xmm
|
|
// VCVTPD2PSY.BCST m64 xmm
|
|
//
|
|
// Construct and append a VCVTPD2PSY.BCST instruction to the active function.
|
|
func (c *Context) VCVTPD2PSY_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2PSY_BCST(ops...))
|
|
}
|
|
|
|
// VCVTPD2PSY_BCST: Convert Packed Double-Precision FP Values to Packed Single-Precision FP Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2PSY.BCST m64 k xmm
|
|
// VCVTPD2PSY.BCST m64 xmm
|
|
//
|
|
// Construct and append a VCVTPD2PSY.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2PSY_BCST(ops ...operand.Op) { ctx.VCVTPD2PSY_BCST(ops...) }
|
|
|
|
// VCVTPD2PSY_BCST_Z: Convert Packed Double-Precision FP Values to Packed Single-Precision FP Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2PSY.BCST.Z m64 k xmm
|
|
//
|
|
// Construct and append a VCVTPD2PSY.BCST.Z instruction to the active function.
|
|
func (c *Context) VCVTPD2PSY_BCST_Z(m, k, x operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2PSY_BCST_Z(m, k, x))
|
|
}
|
|
|
|
// VCVTPD2PSY_BCST_Z: Convert Packed Double-Precision FP Values to Packed Single-Precision FP Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2PSY.BCST.Z m64 k xmm
|
|
//
|
|
// Construct and append a VCVTPD2PSY.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2PSY_BCST_Z(m, k, x operand.Op) { ctx.VCVTPD2PSY_BCST_Z(m, k, x) }
|
|
|
|
// VCVTPD2PSY_Z: Convert Packed Double-Precision FP Values to Packed Single-Precision FP Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2PSY.Z m256 k xmm
|
|
// VCVTPD2PSY.Z ymm k xmm
|
|
//
|
|
// Construct and append a VCVTPD2PSY.Z instruction to the active function.
|
|
func (c *Context) VCVTPD2PSY_Z(my, k, x operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2PSY_Z(my, k, x))
|
|
}
|
|
|
|
// VCVTPD2PSY_Z: Convert Packed Double-Precision FP Values to Packed Single-Precision FP Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2PSY.Z m256 k xmm
|
|
// VCVTPD2PSY.Z ymm k xmm
|
|
//
|
|
// Construct and append a VCVTPD2PSY.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2PSY_Z(my, k, x operand.Op) { ctx.VCVTPD2PSY_Z(my, k, x) }
|
|
|
|
// VCVTPD2PS_BCST: Convert Packed Double-Precision FP Values to Packed Single-Precision FP Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2PS.BCST m64 k ymm
|
|
// VCVTPD2PS.BCST m64 ymm
|
|
//
|
|
// Construct and append a VCVTPD2PS.BCST instruction to the active function.
|
|
func (c *Context) VCVTPD2PS_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2PS_BCST(ops...))
|
|
}
|
|
|
|
// VCVTPD2PS_BCST: Convert Packed Double-Precision FP Values to Packed Single-Precision FP Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2PS.BCST m64 k ymm
|
|
// VCVTPD2PS.BCST m64 ymm
|
|
//
|
|
// Construct and append a VCVTPD2PS.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2PS_BCST(ops ...operand.Op) { ctx.VCVTPD2PS_BCST(ops...) }
|
|
|
|
// VCVTPD2PS_BCST_Z: Convert Packed Double-Precision FP Values to Packed Single-Precision FP Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2PS.BCST.Z m64 k ymm
|
|
//
|
|
// Construct and append a VCVTPD2PS.BCST.Z instruction to the active function.
|
|
func (c *Context) VCVTPD2PS_BCST_Z(m, k, y operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2PS_BCST_Z(m, k, y))
|
|
}
|
|
|
|
// VCVTPD2PS_BCST_Z: Convert Packed Double-Precision FP Values to Packed Single-Precision FP Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2PS.BCST.Z m64 k ymm
|
|
//
|
|
// Construct and append a VCVTPD2PS.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2PS_BCST_Z(m, k, y operand.Op) { ctx.VCVTPD2PS_BCST_Z(m, k, y) }
|
|
|
|
// VCVTPD2PS_RD_SAE: Convert Packed Double-Precision FP Values to Packed Single-Precision FP Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2PS.RD_SAE zmm k ymm
|
|
// VCVTPD2PS.RD_SAE zmm ymm
|
|
//
|
|
// Construct and append a VCVTPD2PS.RD_SAE instruction to the active function.
|
|
func (c *Context) VCVTPD2PS_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2PS_RD_SAE(ops...))
|
|
}
|
|
|
|
// VCVTPD2PS_RD_SAE: Convert Packed Double-Precision FP Values to Packed Single-Precision FP Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2PS.RD_SAE zmm k ymm
|
|
// VCVTPD2PS.RD_SAE zmm ymm
|
|
//
|
|
// Construct and append a VCVTPD2PS.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2PS_RD_SAE(ops ...operand.Op) { ctx.VCVTPD2PS_RD_SAE(ops...) }
|
|
|
|
// VCVTPD2PS_RD_SAE_Z: Convert Packed Double-Precision FP Values to Packed Single-Precision FP Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2PS.RD_SAE.Z zmm k ymm
|
|
//
|
|
// Construct and append a VCVTPD2PS.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VCVTPD2PS_RD_SAE_Z(z, k, y operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2PS_RD_SAE_Z(z, k, y))
|
|
}
|
|
|
|
// VCVTPD2PS_RD_SAE_Z: Convert Packed Double-Precision FP Values to Packed Single-Precision FP Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2PS.RD_SAE.Z zmm k ymm
|
|
//
|
|
// Construct and append a VCVTPD2PS.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2PS_RD_SAE_Z(z, k, y operand.Op) { ctx.VCVTPD2PS_RD_SAE_Z(z, k, y) }
|
|
|
|
// VCVTPD2PS_RN_SAE: Convert Packed Double-Precision FP Values to Packed Single-Precision FP Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2PS.RN_SAE zmm k ymm
|
|
// VCVTPD2PS.RN_SAE zmm ymm
|
|
//
|
|
// Construct and append a VCVTPD2PS.RN_SAE instruction to the active function.
|
|
func (c *Context) VCVTPD2PS_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2PS_RN_SAE(ops...))
|
|
}
|
|
|
|
// VCVTPD2PS_RN_SAE: Convert Packed Double-Precision FP Values to Packed Single-Precision FP Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2PS.RN_SAE zmm k ymm
|
|
// VCVTPD2PS.RN_SAE zmm ymm
|
|
//
|
|
// Construct and append a VCVTPD2PS.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2PS_RN_SAE(ops ...operand.Op) { ctx.VCVTPD2PS_RN_SAE(ops...) }
|
|
|
|
// VCVTPD2PS_RN_SAE_Z: Convert Packed Double-Precision FP Values to Packed Single-Precision FP Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2PS.RN_SAE.Z zmm k ymm
|
|
//
|
|
// Construct and append a VCVTPD2PS.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VCVTPD2PS_RN_SAE_Z(z, k, y operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2PS_RN_SAE_Z(z, k, y))
|
|
}
|
|
|
|
// VCVTPD2PS_RN_SAE_Z: Convert Packed Double-Precision FP Values to Packed Single-Precision FP Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2PS.RN_SAE.Z zmm k ymm
|
|
//
|
|
// Construct and append a VCVTPD2PS.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2PS_RN_SAE_Z(z, k, y operand.Op) { ctx.VCVTPD2PS_RN_SAE_Z(z, k, y) }
|
|
|
|
// VCVTPD2PS_RU_SAE: Convert Packed Double-Precision FP Values to Packed Single-Precision FP Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2PS.RU_SAE zmm k ymm
|
|
// VCVTPD2PS.RU_SAE zmm ymm
|
|
//
|
|
// Construct and append a VCVTPD2PS.RU_SAE instruction to the active function.
|
|
func (c *Context) VCVTPD2PS_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2PS_RU_SAE(ops...))
|
|
}
|
|
|
|
// VCVTPD2PS_RU_SAE: Convert Packed Double-Precision FP Values to Packed Single-Precision FP Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2PS.RU_SAE zmm k ymm
|
|
// VCVTPD2PS.RU_SAE zmm ymm
|
|
//
|
|
// Construct and append a VCVTPD2PS.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2PS_RU_SAE(ops ...operand.Op) { ctx.VCVTPD2PS_RU_SAE(ops...) }
|
|
|
|
// VCVTPD2PS_RU_SAE_Z: Convert Packed Double-Precision FP Values to Packed Single-Precision FP Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2PS.RU_SAE.Z zmm k ymm
|
|
//
|
|
// Construct and append a VCVTPD2PS.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VCVTPD2PS_RU_SAE_Z(z, k, y operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2PS_RU_SAE_Z(z, k, y))
|
|
}
|
|
|
|
// VCVTPD2PS_RU_SAE_Z: Convert Packed Double-Precision FP Values to Packed Single-Precision FP Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2PS.RU_SAE.Z zmm k ymm
|
|
//
|
|
// Construct and append a VCVTPD2PS.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2PS_RU_SAE_Z(z, k, y operand.Op) { ctx.VCVTPD2PS_RU_SAE_Z(z, k, y) }
|
|
|
|
// VCVTPD2PS_RZ_SAE: Convert Packed Double-Precision FP Values to Packed Single-Precision FP Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2PS.RZ_SAE zmm k ymm
|
|
// VCVTPD2PS.RZ_SAE zmm ymm
|
|
//
|
|
// Construct and append a VCVTPD2PS.RZ_SAE instruction to the active function.
|
|
func (c *Context) VCVTPD2PS_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2PS_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VCVTPD2PS_RZ_SAE: Convert Packed Double-Precision FP Values to Packed Single-Precision FP Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2PS.RZ_SAE zmm k ymm
|
|
// VCVTPD2PS.RZ_SAE zmm ymm
|
|
//
|
|
// Construct and append a VCVTPD2PS.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2PS_RZ_SAE(ops ...operand.Op) { ctx.VCVTPD2PS_RZ_SAE(ops...) }
|
|
|
|
// VCVTPD2PS_RZ_SAE_Z: Convert Packed Double-Precision FP Values to Packed Single-Precision FP Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2PS.RZ_SAE.Z zmm k ymm
|
|
//
|
|
// Construct and append a VCVTPD2PS.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VCVTPD2PS_RZ_SAE_Z(z, k, y operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2PS_RZ_SAE_Z(z, k, y))
|
|
}
|
|
|
|
// VCVTPD2PS_RZ_SAE_Z: Convert Packed Double-Precision FP Values to Packed Single-Precision FP Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2PS.RZ_SAE.Z zmm k ymm
|
|
//
|
|
// Construct and append a VCVTPD2PS.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2PS_RZ_SAE_Z(z, k, y operand.Op) { ctx.VCVTPD2PS_RZ_SAE_Z(z, k, y) }
|
|
|
|
// VCVTPD2PS_Z: Convert Packed Double-Precision FP Values to Packed Single-Precision FP Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2PS.Z m512 k ymm
|
|
// VCVTPD2PS.Z zmm k ymm
|
|
//
|
|
// Construct and append a VCVTPD2PS.Z instruction to the active function.
|
|
func (c *Context) VCVTPD2PS_Z(mz, k, y operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2PS_Z(mz, k, y))
|
|
}
|
|
|
|
// VCVTPD2PS_Z: Convert Packed Double-Precision FP Values to Packed Single-Precision FP Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2PS.Z m512 k ymm
|
|
// VCVTPD2PS.Z zmm k ymm
|
|
//
|
|
// Construct and append a VCVTPD2PS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2PS_Z(mz, k, y operand.Op) { ctx.VCVTPD2PS_Z(mz, k, y) }
|
|
|
|
// VCVTPD2QQ: Convert Packed Double-Precision Floating-Point Values to Packed Quadword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2QQ m128 k xmm
|
|
// VCVTPD2QQ m128 xmm
|
|
// VCVTPD2QQ m256 k ymm
|
|
// VCVTPD2QQ m256 ymm
|
|
// VCVTPD2QQ xmm k xmm
|
|
// VCVTPD2QQ xmm xmm
|
|
// VCVTPD2QQ ymm k ymm
|
|
// VCVTPD2QQ ymm ymm
|
|
// VCVTPD2QQ m512 k zmm
|
|
// VCVTPD2QQ m512 zmm
|
|
// VCVTPD2QQ zmm k zmm
|
|
// VCVTPD2QQ zmm zmm
|
|
//
|
|
// Construct and append a VCVTPD2QQ instruction to the active function.
|
|
func (c *Context) VCVTPD2QQ(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2QQ(ops...))
|
|
}
|
|
|
|
// VCVTPD2QQ: Convert Packed Double-Precision Floating-Point Values to Packed Quadword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2QQ m128 k xmm
|
|
// VCVTPD2QQ m128 xmm
|
|
// VCVTPD2QQ m256 k ymm
|
|
// VCVTPD2QQ m256 ymm
|
|
// VCVTPD2QQ xmm k xmm
|
|
// VCVTPD2QQ xmm xmm
|
|
// VCVTPD2QQ ymm k ymm
|
|
// VCVTPD2QQ ymm ymm
|
|
// VCVTPD2QQ m512 k zmm
|
|
// VCVTPD2QQ m512 zmm
|
|
// VCVTPD2QQ zmm k zmm
|
|
// VCVTPD2QQ zmm zmm
|
|
//
|
|
// Construct and append a VCVTPD2QQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2QQ(ops ...operand.Op) { ctx.VCVTPD2QQ(ops...) }
|
|
|
|
// VCVTPD2QQ_BCST: Convert Packed Double-Precision Floating-Point Values to Packed Quadword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2QQ.BCST m64 k xmm
|
|
// VCVTPD2QQ.BCST m64 k ymm
|
|
// VCVTPD2QQ.BCST m64 xmm
|
|
// VCVTPD2QQ.BCST m64 ymm
|
|
// VCVTPD2QQ.BCST m64 k zmm
|
|
// VCVTPD2QQ.BCST m64 zmm
|
|
//
|
|
// Construct and append a VCVTPD2QQ.BCST instruction to the active function.
|
|
func (c *Context) VCVTPD2QQ_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2QQ_BCST(ops...))
|
|
}
|
|
|
|
// VCVTPD2QQ_BCST: Convert Packed Double-Precision Floating-Point Values to Packed Quadword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2QQ.BCST m64 k xmm
|
|
// VCVTPD2QQ.BCST m64 k ymm
|
|
// VCVTPD2QQ.BCST m64 xmm
|
|
// VCVTPD2QQ.BCST m64 ymm
|
|
// VCVTPD2QQ.BCST m64 k zmm
|
|
// VCVTPD2QQ.BCST m64 zmm
|
|
//
|
|
// Construct and append a VCVTPD2QQ.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2QQ_BCST(ops ...operand.Op) { ctx.VCVTPD2QQ_BCST(ops...) }
|
|
|
|
// VCVTPD2QQ_BCST_Z: Convert Packed Double-Precision Floating-Point Values to Packed Quadword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2QQ.BCST.Z m64 k xmm
|
|
// VCVTPD2QQ.BCST.Z m64 k ymm
|
|
// VCVTPD2QQ.BCST.Z m64 k zmm
|
|
//
|
|
// Construct and append a VCVTPD2QQ.BCST.Z instruction to the active function.
|
|
func (c *Context) VCVTPD2QQ_BCST_Z(m, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2QQ_BCST_Z(m, k, xyz))
|
|
}
|
|
|
|
// VCVTPD2QQ_BCST_Z: Convert Packed Double-Precision Floating-Point Values to Packed Quadword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2QQ.BCST.Z m64 k xmm
|
|
// VCVTPD2QQ.BCST.Z m64 k ymm
|
|
// VCVTPD2QQ.BCST.Z m64 k zmm
|
|
//
|
|
// Construct and append a VCVTPD2QQ.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2QQ_BCST_Z(m, k, xyz operand.Op) { ctx.VCVTPD2QQ_BCST_Z(m, k, xyz) }
|
|
|
|
// VCVTPD2QQ_RD_SAE: Convert Packed Double-Precision Floating-Point Values to Packed Quadword Integers (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2QQ.RD_SAE zmm k zmm
|
|
// VCVTPD2QQ.RD_SAE zmm zmm
|
|
//
|
|
// Construct and append a VCVTPD2QQ.RD_SAE instruction to the active function.
|
|
func (c *Context) VCVTPD2QQ_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2QQ_RD_SAE(ops...))
|
|
}
|
|
|
|
// VCVTPD2QQ_RD_SAE: Convert Packed Double-Precision Floating-Point Values to Packed Quadword Integers (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2QQ.RD_SAE zmm k zmm
|
|
// VCVTPD2QQ.RD_SAE zmm zmm
|
|
//
|
|
// Construct and append a VCVTPD2QQ.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2QQ_RD_SAE(ops ...operand.Op) { ctx.VCVTPD2QQ_RD_SAE(ops...) }
|
|
|
|
// VCVTPD2QQ_RD_SAE_Z: Convert Packed Double-Precision Floating-Point Values to Packed Quadword Integers (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2QQ.RD_SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTPD2QQ.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VCVTPD2QQ_RD_SAE_Z(z, k, z1 operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2QQ_RD_SAE_Z(z, k, z1))
|
|
}
|
|
|
|
// VCVTPD2QQ_RD_SAE_Z: Convert Packed Double-Precision Floating-Point Values to Packed Quadword Integers (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2QQ.RD_SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTPD2QQ.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2QQ_RD_SAE_Z(z, k, z1 operand.Op) { ctx.VCVTPD2QQ_RD_SAE_Z(z, k, z1) }
|
|
|
|
// VCVTPD2QQ_RN_SAE: Convert Packed Double-Precision Floating-Point Values to Packed Quadword Integers (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2QQ.RN_SAE zmm k zmm
|
|
// VCVTPD2QQ.RN_SAE zmm zmm
|
|
//
|
|
// Construct and append a VCVTPD2QQ.RN_SAE instruction to the active function.
|
|
func (c *Context) VCVTPD2QQ_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2QQ_RN_SAE(ops...))
|
|
}
|
|
|
|
// VCVTPD2QQ_RN_SAE: Convert Packed Double-Precision Floating-Point Values to Packed Quadword Integers (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2QQ.RN_SAE zmm k zmm
|
|
// VCVTPD2QQ.RN_SAE zmm zmm
|
|
//
|
|
// Construct and append a VCVTPD2QQ.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2QQ_RN_SAE(ops ...operand.Op) { ctx.VCVTPD2QQ_RN_SAE(ops...) }
|
|
|
|
// VCVTPD2QQ_RN_SAE_Z: Convert Packed Double-Precision Floating-Point Values to Packed Quadword Integers (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2QQ.RN_SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTPD2QQ.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VCVTPD2QQ_RN_SAE_Z(z, k, z1 operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2QQ_RN_SAE_Z(z, k, z1))
|
|
}
|
|
|
|
// VCVTPD2QQ_RN_SAE_Z: Convert Packed Double-Precision Floating-Point Values to Packed Quadword Integers (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2QQ.RN_SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTPD2QQ.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2QQ_RN_SAE_Z(z, k, z1 operand.Op) { ctx.VCVTPD2QQ_RN_SAE_Z(z, k, z1) }
|
|
|
|
// VCVTPD2QQ_RU_SAE: Convert Packed Double-Precision Floating-Point Values to Packed Quadword Integers (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2QQ.RU_SAE zmm k zmm
|
|
// VCVTPD2QQ.RU_SAE zmm zmm
|
|
//
|
|
// Construct and append a VCVTPD2QQ.RU_SAE instruction to the active function.
|
|
func (c *Context) VCVTPD2QQ_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2QQ_RU_SAE(ops...))
|
|
}
|
|
|
|
// VCVTPD2QQ_RU_SAE: Convert Packed Double-Precision Floating-Point Values to Packed Quadword Integers (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2QQ.RU_SAE zmm k zmm
|
|
// VCVTPD2QQ.RU_SAE zmm zmm
|
|
//
|
|
// Construct and append a VCVTPD2QQ.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2QQ_RU_SAE(ops ...operand.Op) { ctx.VCVTPD2QQ_RU_SAE(ops...) }
|
|
|
|
// VCVTPD2QQ_RU_SAE_Z: Convert Packed Double-Precision Floating-Point Values to Packed Quadword Integers (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2QQ.RU_SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTPD2QQ.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VCVTPD2QQ_RU_SAE_Z(z, k, z1 operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2QQ_RU_SAE_Z(z, k, z1))
|
|
}
|
|
|
|
// VCVTPD2QQ_RU_SAE_Z: Convert Packed Double-Precision Floating-Point Values to Packed Quadword Integers (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2QQ.RU_SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTPD2QQ.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2QQ_RU_SAE_Z(z, k, z1 operand.Op) { ctx.VCVTPD2QQ_RU_SAE_Z(z, k, z1) }
|
|
|
|
// VCVTPD2QQ_RZ_SAE: Convert Packed Double-Precision Floating-Point Values to Packed Quadword Integers (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2QQ.RZ_SAE zmm k zmm
|
|
// VCVTPD2QQ.RZ_SAE zmm zmm
|
|
//
|
|
// Construct and append a VCVTPD2QQ.RZ_SAE instruction to the active function.
|
|
func (c *Context) VCVTPD2QQ_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2QQ_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VCVTPD2QQ_RZ_SAE: Convert Packed Double-Precision Floating-Point Values to Packed Quadword Integers (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2QQ.RZ_SAE zmm k zmm
|
|
// VCVTPD2QQ.RZ_SAE zmm zmm
|
|
//
|
|
// Construct and append a VCVTPD2QQ.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2QQ_RZ_SAE(ops ...operand.Op) { ctx.VCVTPD2QQ_RZ_SAE(ops...) }
|
|
|
|
// VCVTPD2QQ_RZ_SAE_Z: Convert Packed Double-Precision Floating-Point Values to Packed Quadword Integers (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2QQ.RZ_SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTPD2QQ.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VCVTPD2QQ_RZ_SAE_Z(z, k, z1 operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2QQ_RZ_SAE_Z(z, k, z1))
|
|
}
|
|
|
|
// VCVTPD2QQ_RZ_SAE_Z: Convert Packed Double-Precision Floating-Point Values to Packed Quadword Integers (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2QQ.RZ_SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTPD2QQ.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2QQ_RZ_SAE_Z(z, k, z1 operand.Op) { ctx.VCVTPD2QQ_RZ_SAE_Z(z, k, z1) }
|
|
|
|
// VCVTPD2QQ_Z: Convert Packed Double-Precision Floating-Point Values to Packed Quadword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2QQ.Z m128 k xmm
|
|
// VCVTPD2QQ.Z m256 k ymm
|
|
// VCVTPD2QQ.Z xmm k xmm
|
|
// VCVTPD2QQ.Z ymm k ymm
|
|
// VCVTPD2QQ.Z m512 k zmm
|
|
// VCVTPD2QQ.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTPD2QQ.Z instruction to the active function.
|
|
func (c *Context) VCVTPD2QQ_Z(mxyz, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2QQ_Z(mxyz, k, xyz))
|
|
}
|
|
|
|
// VCVTPD2QQ_Z: Convert Packed Double-Precision Floating-Point Values to Packed Quadword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2QQ.Z m128 k xmm
|
|
// VCVTPD2QQ.Z m256 k ymm
|
|
// VCVTPD2QQ.Z xmm k xmm
|
|
// VCVTPD2QQ.Z ymm k ymm
|
|
// VCVTPD2QQ.Z m512 k zmm
|
|
// VCVTPD2QQ.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTPD2QQ.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2QQ_Z(mxyz, k, xyz operand.Op) { ctx.VCVTPD2QQ_Z(mxyz, k, xyz) }
|
|
|
|
// VCVTPD2UDQ: Convert Packed Double-Precision Floating-Point Values to Packed Unsigned Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2UDQ m512 k ymm
|
|
// VCVTPD2UDQ m512 ymm
|
|
// VCVTPD2UDQ zmm k ymm
|
|
// VCVTPD2UDQ zmm ymm
|
|
//
|
|
// Construct and append a VCVTPD2UDQ instruction to the active function.
|
|
func (c *Context) VCVTPD2UDQ(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2UDQ(ops...))
|
|
}
|
|
|
|
// VCVTPD2UDQ: Convert Packed Double-Precision Floating-Point Values to Packed Unsigned Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2UDQ m512 k ymm
|
|
// VCVTPD2UDQ m512 ymm
|
|
// VCVTPD2UDQ zmm k ymm
|
|
// VCVTPD2UDQ zmm ymm
|
|
//
|
|
// Construct and append a VCVTPD2UDQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2UDQ(ops ...operand.Op) { ctx.VCVTPD2UDQ(ops...) }
|
|
|
|
// VCVTPD2UDQX: Convert Packed Double-Precision Floating-Point Values to Packed Unsigned Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2UDQX m128 k xmm
|
|
// VCVTPD2UDQX m128 xmm
|
|
// VCVTPD2UDQX xmm k xmm
|
|
// VCVTPD2UDQX xmm xmm
|
|
//
|
|
// Construct and append a VCVTPD2UDQX instruction to the active function.
|
|
func (c *Context) VCVTPD2UDQX(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2UDQX(ops...))
|
|
}
|
|
|
|
// VCVTPD2UDQX: Convert Packed Double-Precision Floating-Point Values to Packed Unsigned Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2UDQX m128 k xmm
|
|
// VCVTPD2UDQX m128 xmm
|
|
// VCVTPD2UDQX xmm k xmm
|
|
// VCVTPD2UDQX xmm xmm
|
|
//
|
|
// Construct and append a VCVTPD2UDQX instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2UDQX(ops ...operand.Op) { ctx.VCVTPD2UDQX(ops...) }
|
|
|
|
// VCVTPD2UDQX_BCST: Convert Packed Double-Precision Floating-Point Values to Packed Unsigned Doubleword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2UDQX.BCST m64 k xmm
|
|
// VCVTPD2UDQX.BCST m64 xmm
|
|
//
|
|
// Construct and append a VCVTPD2UDQX.BCST instruction to the active function.
|
|
func (c *Context) VCVTPD2UDQX_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2UDQX_BCST(ops...))
|
|
}
|
|
|
|
// VCVTPD2UDQX_BCST: Convert Packed Double-Precision Floating-Point Values to Packed Unsigned Doubleword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2UDQX.BCST m64 k xmm
|
|
// VCVTPD2UDQX.BCST m64 xmm
|
|
//
|
|
// Construct and append a VCVTPD2UDQX.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2UDQX_BCST(ops ...operand.Op) { ctx.VCVTPD2UDQX_BCST(ops...) }
|
|
|
|
// VCVTPD2UDQX_BCST_Z: Convert Packed Double-Precision Floating-Point Values to Packed Unsigned Doubleword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2UDQX.BCST.Z m64 k xmm
|
|
//
|
|
// Construct and append a VCVTPD2UDQX.BCST.Z instruction to the active function.
|
|
func (c *Context) VCVTPD2UDQX_BCST_Z(m, k, x operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2UDQX_BCST_Z(m, k, x))
|
|
}
|
|
|
|
// VCVTPD2UDQX_BCST_Z: Convert Packed Double-Precision Floating-Point Values to Packed Unsigned Doubleword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2UDQX.BCST.Z m64 k xmm
|
|
//
|
|
// Construct and append a VCVTPD2UDQX.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2UDQX_BCST_Z(m, k, x operand.Op) { ctx.VCVTPD2UDQX_BCST_Z(m, k, x) }
|
|
|
|
// VCVTPD2UDQX_Z: Convert Packed Double-Precision Floating-Point Values to Packed Unsigned Doubleword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2UDQX.Z m128 k xmm
|
|
// VCVTPD2UDQX.Z xmm k xmm
|
|
//
|
|
// Construct and append a VCVTPD2UDQX.Z instruction to the active function.
|
|
func (c *Context) VCVTPD2UDQX_Z(mx, k, x operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2UDQX_Z(mx, k, x))
|
|
}
|
|
|
|
// VCVTPD2UDQX_Z: Convert Packed Double-Precision Floating-Point Values to Packed Unsigned Doubleword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2UDQX.Z m128 k xmm
|
|
// VCVTPD2UDQX.Z xmm k xmm
|
|
//
|
|
// Construct and append a VCVTPD2UDQX.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2UDQX_Z(mx, k, x operand.Op) { ctx.VCVTPD2UDQX_Z(mx, k, x) }
|
|
|
|
// VCVTPD2UDQY: Convert Packed Double-Precision Floating-Point Values to Packed Unsigned Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2UDQY m256 k xmm
|
|
// VCVTPD2UDQY m256 xmm
|
|
// VCVTPD2UDQY ymm k xmm
|
|
// VCVTPD2UDQY ymm xmm
|
|
//
|
|
// Construct and append a VCVTPD2UDQY instruction to the active function.
|
|
func (c *Context) VCVTPD2UDQY(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2UDQY(ops...))
|
|
}
|
|
|
|
// VCVTPD2UDQY: Convert Packed Double-Precision Floating-Point Values to Packed Unsigned Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2UDQY m256 k xmm
|
|
// VCVTPD2UDQY m256 xmm
|
|
// VCVTPD2UDQY ymm k xmm
|
|
// VCVTPD2UDQY ymm xmm
|
|
//
|
|
// Construct and append a VCVTPD2UDQY instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2UDQY(ops ...operand.Op) { ctx.VCVTPD2UDQY(ops...) }
|
|
|
|
// VCVTPD2UDQY_BCST: Convert Packed Double-Precision Floating-Point Values to Packed Unsigned Doubleword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2UDQY.BCST m64 k xmm
|
|
// VCVTPD2UDQY.BCST m64 xmm
|
|
//
|
|
// Construct and append a VCVTPD2UDQY.BCST instruction to the active function.
|
|
func (c *Context) VCVTPD2UDQY_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2UDQY_BCST(ops...))
|
|
}
|
|
|
|
// VCVTPD2UDQY_BCST: Convert Packed Double-Precision Floating-Point Values to Packed Unsigned Doubleword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2UDQY.BCST m64 k xmm
|
|
// VCVTPD2UDQY.BCST m64 xmm
|
|
//
|
|
// Construct and append a VCVTPD2UDQY.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2UDQY_BCST(ops ...operand.Op) { ctx.VCVTPD2UDQY_BCST(ops...) }
|
|
|
|
// VCVTPD2UDQY_BCST_Z: Convert Packed Double-Precision Floating-Point Values to Packed Unsigned Doubleword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2UDQY.BCST.Z m64 k xmm
|
|
//
|
|
// Construct and append a VCVTPD2UDQY.BCST.Z instruction to the active function.
|
|
func (c *Context) VCVTPD2UDQY_BCST_Z(m, k, x operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2UDQY_BCST_Z(m, k, x))
|
|
}
|
|
|
|
// VCVTPD2UDQY_BCST_Z: Convert Packed Double-Precision Floating-Point Values to Packed Unsigned Doubleword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2UDQY.BCST.Z m64 k xmm
|
|
//
|
|
// Construct and append a VCVTPD2UDQY.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2UDQY_BCST_Z(m, k, x operand.Op) { ctx.VCVTPD2UDQY_BCST_Z(m, k, x) }
|
|
|
|
// VCVTPD2UDQY_Z: Convert Packed Double-Precision Floating-Point Values to Packed Unsigned Doubleword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2UDQY.Z m256 k xmm
|
|
// VCVTPD2UDQY.Z ymm k xmm
|
|
//
|
|
// Construct and append a VCVTPD2UDQY.Z instruction to the active function.
|
|
func (c *Context) VCVTPD2UDQY_Z(my, k, x operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2UDQY_Z(my, k, x))
|
|
}
|
|
|
|
// VCVTPD2UDQY_Z: Convert Packed Double-Precision Floating-Point Values to Packed Unsigned Doubleword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2UDQY.Z m256 k xmm
|
|
// VCVTPD2UDQY.Z ymm k xmm
|
|
//
|
|
// Construct and append a VCVTPD2UDQY.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2UDQY_Z(my, k, x operand.Op) { ctx.VCVTPD2UDQY_Z(my, k, x) }
|
|
|
|
// VCVTPD2UDQ_BCST: Convert Packed Double-Precision Floating-Point Values to Packed Unsigned Doubleword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2UDQ.BCST m64 k ymm
|
|
// VCVTPD2UDQ.BCST m64 ymm
|
|
//
|
|
// Construct and append a VCVTPD2UDQ.BCST instruction to the active function.
|
|
func (c *Context) VCVTPD2UDQ_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2UDQ_BCST(ops...))
|
|
}
|
|
|
|
// VCVTPD2UDQ_BCST: Convert Packed Double-Precision Floating-Point Values to Packed Unsigned Doubleword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2UDQ.BCST m64 k ymm
|
|
// VCVTPD2UDQ.BCST m64 ymm
|
|
//
|
|
// Construct and append a VCVTPD2UDQ.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2UDQ_BCST(ops ...operand.Op) { ctx.VCVTPD2UDQ_BCST(ops...) }
|
|
|
|
// VCVTPD2UDQ_BCST_Z: Convert Packed Double-Precision Floating-Point Values to Packed Unsigned Doubleword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2UDQ.BCST.Z m64 k ymm
|
|
//
|
|
// Construct and append a VCVTPD2UDQ.BCST.Z instruction to the active function.
|
|
func (c *Context) VCVTPD2UDQ_BCST_Z(m, k, y operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2UDQ_BCST_Z(m, k, y))
|
|
}
|
|
|
|
// VCVTPD2UDQ_BCST_Z: Convert Packed Double-Precision Floating-Point Values to Packed Unsigned Doubleword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2UDQ.BCST.Z m64 k ymm
|
|
//
|
|
// Construct and append a VCVTPD2UDQ.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2UDQ_BCST_Z(m, k, y operand.Op) { ctx.VCVTPD2UDQ_BCST_Z(m, k, y) }
|
|
|
|
// VCVTPD2UDQ_RD_SAE: Convert Packed Double-Precision Floating-Point Values to Packed Unsigned Doubleword Integers (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2UDQ.RD_SAE zmm k ymm
|
|
// VCVTPD2UDQ.RD_SAE zmm ymm
|
|
//
|
|
// Construct and append a VCVTPD2UDQ.RD_SAE instruction to the active function.
|
|
func (c *Context) VCVTPD2UDQ_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2UDQ_RD_SAE(ops...))
|
|
}
|
|
|
|
// VCVTPD2UDQ_RD_SAE: Convert Packed Double-Precision Floating-Point Values to Packed Unsigned Doubleword Integers (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2UDQ.RD_SAE zmm k ymm
|
|
// VCVTPD2UDQ.RD_SAE zmm ymm
|
|
//
|
|
// Construct and append a VCVTPD2UDQ.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2UDQ_RD_SAE(ops ...operand.Op) { ctx.VCVTPD2UDQ_RD_SAE(ops...) }
|
|
|
|
// VCVTPD2UDQ_RD_SAE_Z: Convert Packed Double-Precision Floating-Point Values to Packed Unsigned Doubleword Integers (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2UDQ.RD_SAE.Z zmm k ymm
|
|
//
|
|
// Construct and append a VCVTPD2UDQ.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VCVTPD2UDQ_RD_SAE_Z(z, k, y operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2UDQ_RD_SAE_Z(z, k, y))
|
|
}
|
|
|
|
// VCVTPD2UDQ_RD_SAE_Z: Convert Packed Double-Precision Floating-Point Values to Packed Unsigned Doubleword Integers (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2UDQ.RD_SAE.Z zmm k ymm
|
|
//
|
|
// Construct and append a VCVTPD2UDQ.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2UDQ_RD_SAE_Z(z, k, y operand.Op) { ctx.VCVTPD2UDQ_RD_SAE_Z(z, k, y) }
|
|
|
|
// VCVTPD2UDQ_RN_SAE: Convert Packed Double-Precision Floating-Point Values to Packed Unsigned Doubleword Integers (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2UDQ.RN_SAE zmm k ymm
|
|
// VCVTPD2UDQ.RN_SAE zmm ymm
|
|
//
|
|
// Construct and append a VCVTPD2UDQ.RN_SAE instruction to the active function.
|
|
func (c *Context) VCVTPD2UDQ_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2UDQ_RN_SAE(ops...))
|
|
}
|
|
|
|
// VCVTPD2UDQ_RN_SAE: Convert Packed Double-Precision Floating-Point Values to Packed Unsigned Doubleword Integers (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2UDQ.RN_SAE zmm k ymm
|
|
// VCVTPD2UDQ.RN_SAE zmm ymm
|
|
//
|
|
// Construct and append a VCVTPD2UDQ.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2UDQ_RN_SAE(ops ...operand.Op) { ctx.VCVTPD2UDQ_RN_SAE(ops...) }
|
|
|
|
// VCVTPD2UDQ_RN_SAE_Z: Convert Packed Double-Precision Floating-Point Values to Packed Unsigned Doubleword Integers (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2UDQ.RN_SAE.Z zmm k ymm
|
|
//
|
|
// Construct and append a VCVTPD2UDQ.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VCVTPD2UDQ_RN_SAE_Z(z, k, y operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2UDQ_RN_SAE_Z(z, k, y))
|
|
}
|
|
|
|
// VCVTPD2UDQ_RN_SAE_Z: Convert Packed Double-Precision Floating-Point Values to Packed Unsigned Doubleword Integers (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2UDQ.RN_SAE.Z zmm k ymm
|
|
//
|
|
// Construct and append a VCVTPD2UDQ.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2UDQ_RN_SAE_Z(z, k, y operand.Op) { ctx.VCVTPD2UDQ_RN_SAE_Z(z, k, y) }
|
|
|
|
// VCVTPD2UDQ_RU_SAE: Convert Packed Double-Precision Floating-Point Values to Packed Unsigned Doubleword Integers (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2UDQ.RU_SAE zmm k ymm
|
|
// VCVTPD2UDQ.RU_SAE zmm ymm
|
|
//
|
|
// Construct and append a VCVTPD2UDQ.RU_SAE instruction to the active function.
|
|
func (c *Context) VCVTPD2UDQ_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2UDQ_RU_SAE(ops...))
|
|
}
|
|
|
|
// VCVTPD2UDQ_RU_SAE: Convert Packed Double-Precision Floating-Point Values to Packed Unsigned Doubleword Integers (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2UDQ.RU_SAE zmm k ymm
|
|
// VCVTPD2UDQ.RU_SAE zmm ymm
|
|
//
|
|
// Construct and append a VCVTPD2UDQ.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2UDQ_RU_SAE(ops ...operand.Op) { ctx.VCVTPD2UDQ_RU_SAE(ops...) }
|
|
|
|
// VCVTPD2UDQ_RU_SAE_Z: Convert Packed Double-Precision Floating-Point Values to Packed Unsigned Doubleword Integers (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2UDQ.RU_SAE.Z zmm k ymm
|
|
//
|
|
// Construct and append a VCVTPD2UDQ.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VCVTPD2UDQ_RU_SAE_Z(z, k, y operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2UDQ_RU_SAE_Z(z, k, y))
|
|
}
|
|
|
|
// VCVTPD2UDQ_RU_SAE_Z: Convert Packed Double-Precision Floating-Point Values to Packed Unsigned Doubleword Integers (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2UDQ.RU_SAE.Z zmm k ymm
|
|
//
|
|
// Construct and append a VCVTPD2UDQ.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2UDQ_RU_SAE_Z(z, k, y operand.Op) { ctx.VCVTPD2UDQ_RU_SAE_Z(z, k, y) }
|
|
|
|
// VCVTPD2UDQ_RZ_SAE: Convert Packed Double-Precision Floating-Point Values to Packed Unsigned Doubleword Integers (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2UDQ.RZ_SAE zmm k ymm
|
|
// VCVTPD2UDQ.RZ_SAE zmm ymm
|
|
//
|
|
// Construct and append a VCVTPD2UDQ.RZ_SAE instruction to the active function.
|
|
func (c *Context) VCVTPD2UDQ_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2UDQ_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VCVTPD2UDQ_RZ_SAE: Convert Packed Double-Precision Floating-Point Values to Packed Unsigned Doubleword Integers (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2UDQ.RZ_SAE zmm k ymm
|
|
// VCVTPD2UDQ.RZ_SAE zmm ymm
|
|
//
|
|
// Construct and append a VCVTPD2UDQ.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2UDQ_RZ_SAE(ops ...operand.Op) { ctx.VCVTPD2UDQ_RZ_SAE(ops...) }
|
|
|
|
// VCVTPD2UDQ_RZ_SAE_Z: Convert Packed Double-Precision Floating-Point Values to Packed Unsigned Doubleword Integers (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2UDQ.RZ_SAE.Z zmm k ymm
|
|
//
|
|
// Construct and append a VCVTPD2UDQ.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VCVTPD2UDQ_RZ_SAE_Z(z, k, y operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2UDQ_RZ_SAE_Z(z, k, y))
|
|
}
|
|
|
|
// VCVTPD2UDQ_RZ_SAE_Z: Convert Packed Double-Precision Floating-Point Values to Packed Unsigned Doubleword Integers (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2UDQ.RZ_SAE.Z zmm k ymm
|
|
//
|
|
// Construct and append a VCVTPD2UDQ.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2UDQ_RZ_SAE_Z(z, k, y operand.Op) { ctx.VCVTPD2UDQ_RZ_SAE_Z(z, k, y) }
|
|
|
|
// VCVTPD2UDQ_Z: Convert Packed Double-Precision Floating-Point Values to Packed Unsigned Doubleword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2UDQ.Z m512 k ymm
|
|
// VCVTPD2UDQ.Z zmm k ymm
|
|
//
|
|
// Construct and append a VCVTPD2UDQ.Z instruction to the active function.
|
|
func (c *Context) VCVTPD2UDQ_Z(mz, k, y operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2UDQ_Z(mz, k, y))
|
|
}
|
|
|
|
// VCVTPD2UDQ_Z: Convert Packed Double-Precision Floating-Point Values to Packed Unsigned Doubleword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2UDQ.Z m512 k ymm
|
|
// VCVTPD2UDQ.Z zmm k ymm
|
|
//
|
|
// Construct and append a VCVTPD2UDQ.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2UDQ_Z(mz, k, y operand.Op) { ctx.VCVTPD2UDQ_Z(mz, k, y) }
|
|
|
|
// VCVTPD2UQQ: Convert Packed Double-Precision Floating-Point Values to Packed Unsigned Quadword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2UQQ m128 k xmm
|
|
// VCVTPD2UQQ m128 xmm
|
|
// VCVTPD2UQQ m256 k ymm
|
|
// VCVTPD2UQQ m256 ymm
|
|
// VCVTPD2UQQ xmm k xmm
|
|
// VCVTPD2UQQ xmm xmm
|
|
// VCVTPD2UQQ ymm k ymm
|
|
// VCVTPD2UQQ ymm ymm
|
|
// VCVTPD2UQQ m512 k zmm
|
|
// VCVTPD2UQQ m512 zmm
|
|
// VCVTPD2UQQ zmm k zmm
|
|
// VCVTPD2UQQ zmm zmm
|
|
//
|
|
// Construct and append a VCVTPD2UQQ instruction to the active function.
|
|
func (c *Context) VCVTPD2UQQ(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2UQQ(ops...))
|
|
}
|
|
|
|
// VCVTPD2UQQ: Convert Packed Double-Precision Floating-Point Values to Packed Unsigned Quadword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2UQQ m128 k xmm
|
|
// VCVTPD2UQQ m128 xmm
|
|
// VCVTPD2UQQ m256 k ymm
|
|
// VCVTPD2UQQ m256 ymm
|
|
// VCVTPD2UQQ xmm k xmm
|
|
// VCVTPD2UQQ xmm xmm
|
|
// VCVTPD2UQQ ymm k ymm
|
|
// VCVTPD2UQQ ymm ymm
|
|
// VCVTPD2UQQ m512 k zmm
|
|
// VCVTPD2UQQ m512 zmm
|
|
// VCVTPD2UQQ zmm k zmm
|
|
// VCVTPD2UQQ zmm zmm
|
|
//
|
|
// Construct and append a VCVTPD2UQQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2UQQ(ops ...operand.Op) { ctx.VCVTPD2UQQ(ops...) }
|
|
|
|
// VCVTPD2UQQ_BCST: Convert Packed Double-Precision Floating-Point Values to Packed Unsigned Quadword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2UQQ.BCST m64 k xmm
|
|
// VCVTPD2UQQ.BCST m64 k ymm
|
|
// VCVTPD2UQQ.BCST m64 xmm
|
|
// VCVTPD2UQQ.BCST m64 ymm
|
|
// VCVTPD2UQQ.BCST m64 k zmm
|
|
// VCVTPD2UQQ.BCST m64 zmm
|
|
//
|
|
// Construct and append a VCVTPD2UQQ.BCST instruction to the active function.
|
|
func (c *Context) VCVTPD2UQQ_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2UQQ_BCST(ops...))
|
|
}
|
|
|
|
// VCVTPD2UQQ_BCST: Convert Packed Double-Precision Floating-Point Values to Packed Unsigned Quadword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2UQQ.BCST m64 k xmm
|
|
// VCVTPD2UQQ.BCST m64 k ymm
|
|
// VCVTPD2UQQ.BCST m64 xmm
|
|
// VCVTPD2UQQ.BCST m64 ymm
|
|
// VCVTPD2UQQ.BCST m64 k zmm
|
|
// VCVTPD2UQQ.BCST m64 zmm
|
|
//
|
|
// Construct and append a VCVTPD2UQQ.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2UQQ_BCST(ops ...operand.Op) { ctx.VCVTPD2UQQ_BCST(ops...) }
|
|
|
|
// VCVTPD2UQQ_BCST_Z: Convert Packed Double-Precision Floating-Point Values to Packed Unsigned Quadword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2UQQ.BCST.Z m64 k xmm
|
|
// VCVTPD2UQQ.BCST.Z m64 k ymm
|
|
// VCVTPD2UQQ.BCST.Z m64 k zmm
|
|
//
|
|
// Construct and append a VCVTPD2UQQ.BCST.Z instruction to the active function.
|
|
func (c *Context) VCVTPD2UQQ_BCST_Z(m, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2UQQ_BCST_Z(m, k, xyz))
|
|
}
|
|
|
|
// VCVTPD2UQQ_BCST_Z: Convert Packed Double-Precision Floating-Point Values to Packed Unsigned Quadword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2UQQ.BCST.Z m64 k xmm
|
|
// VCVTPD2UQQ.BCST.Z m64 k ymm
|
|
// VCVTPD2UQQ.BCST.Z m64 k zmm
|
|
//
|
|
// Construct and append a VCVTPD2UQQ.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2UQQ_BCST_Z(m, k, xyz operand.Op) { ctx.VCVTPD2UQQ_BCST_Z(m, k, xyz) }
|
|
|
|
// VCVTPD2UQQ_RD_SAE: Convert Packed Double-Precision Floating-Point Values to Packed Unsigned Quadword Integers (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2UQQ.RD_SAE zmm k zmm
|
|
// VCVTPD2UQQ.RD_SAE zmm zmm
|
|
//
|
|
// Construct and append a VCVTPD2UQQ.RD_SAE instruction to the active function.
|
|
func (c *Context) VCVTPD2UQQ_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2UQQ_RD_SAE(ops...))
|
|
}
|
|
|
|
// VCVTPD2UQQ_RD_SAE: Convert Packed Double-Precision Floating-Point Values to Packed Unsigned Quadword Integers (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2UQQ.RD_SAE zmm k zmm
|
|
// VCVTPD2UQQ.RD_SAE zmm zmm
|
|
//
|
|
// Construct and append a VCVTPD2UQQ.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2UQQ_RD_SAE(ops ...operand.Op) { ctx.VCVTPD2UQQ_RD_SAE(ops...) }
|
|
|
|
// VCVTPD2UQQ_RD_SAE_Z: Convert Packed Double-Precision Floating-Point Values to Packed Unsigned Quadword Integers (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2UQQ.RD_SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTPD2UQQ.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VCVTPD2UQQ_RD_SAE_Z(z, k, z1 operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2UQQ_RD_SAE_Z(z, k, z1))
|
|
}
|
|
|
|
// VCVTPD2UQQ_RD_SAE_Z: Convert Packed Double-Precision Floating-Point Values to Packed Unsigned Quadword Integers (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2UQQ.RD_SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTPD2UQQ.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2UQQ_RD_SAE_Z(z, k, z1 operand.Op) { ctx.VCVTPD2UQQ_RD_SAE_Z(z, k, z1) }
|
|
|
|
// VCVTPD2UQQ_RN_SAE: Convert Packed Double-Precision Floating-Point Values to Packed Unsigned Quadword Integers (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2UQQ.RN_SAE zmm k zmm
|
|
// VCVTPD2UQQ.RN_SAE zmm zmm
|
|
//
|
|
// Construct and append a VCVTPD2UQQ.RN_SAE instruction to the active function.
|
|
func (c *Context) VCVTPD2UQQ_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2UQQ_RN_SAE(ops...))
|
|
}
|
|
|
|
// VCVTPD2UQQ_RN_SAE: Convert Packed Double-Precision Floating-Point Values to Packed Unsigned Quadword Integers (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2UQQ.RN_SAE zmm k zmm
|
|
// VCVTPD2UQQ.RN_SAE zmm zmm
|
|
//
|
|
// Construct and append a VCVTPD2UQQ.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2UQQ_RN_SAE(ops ...operand.Op) { ctx.VCVTPD2UQQ_RN_SAE(ops...) }
|
|
|
|
// VCVTPD2UQQ_RN_SAE_Z: Convert Packed Double-Precision Floating-Point Values to Packed Unsigned Quadword Integers (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2UQQ.RN_SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTPD2UQQ.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VCVTPD2UQQ_RN_SAE_Z(z, k, z1 operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2UQQ_RN_SAE_Z(z, k, z1))
|
|
}
|
|
|
|
// VCVTPD2UQQ_RN_SAE_Z: Convert Packed Double-Precision Floating-Point Values to Packed Unsigned Quadword Integers (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2UQQ.RN_SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTPD2UQQ.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2UQQ_RN_SAE_Z(z, k, z1 operand.Op) { ctx.VCVTPD2UQQ_RN_SAE_Z(z, k, z1) }
|
|
|
|
// VCVTPD2UQQ_RU_SAE: Convert Packed Double-Precision Floating-Point Values to Packed Unsigned Quadword Integers (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2UQQ.RU_SAE zmm k zmm
|
|
// VCVTPD2UQQ.RU_SAE zmm zmm
|
|
//
|
|
// Construct and append a VCVTPD2UQQ.RU_SAE instruction to the active function.
|
|
func (c *Context) VCVTPD2UQQ_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2UQQ_RU_SAE(ops...))
|
|
}
|
|
|
|
// VCVTPD2UQQ_RU_SAE: Convert Packed Double-Precision Floating-Point Values to Packed Unsigned Quadword Integers (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2UQQ.RU_SAE zmm k zmm
|
|
// VCVTPD2UQQ.RU_SAE zmm zmm
|
|
//
|
|
// Construct and append a VCVTPD2UQQ.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2UQQ_RU_SAE(ops ...operand.Op) { ctx.VCVTPD2UQQ_RU_SAE(ops...) }
|
|
|
|
// VCVTPD2UQQ_RU_SAE_Z: Convert Packed Double-Precision Floating-Point Values to Packed Unsigned Quadword Integers (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2UQQ.RU_SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTPD2UQQ.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VCVTPD2UQQ_RU_SAE_Z(z, k, z1 operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2UQQ_RU_SAE_Z(z, k, z1))
|
|
}
|
|
|
|
// VCVTPD2UQQ_RU_SAE_Z: Convert Packed Double-Precision Floating-Point Values to Packed Unsigned Quadword Integers (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2UQQ.RU_SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTPD2UQQ.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2UQQ_RU_SAE_Z(z, k, z1 operand.Op) { ctx.VCVTPD2UQQ_RU_SAE_Z(z, k, z1) }
|
|
|
|
// VCVTPD2UQQ_RZ_SAE: Convert Packed Double-Precision Floating-Point Values to Packed Unsigned Quadword Integers (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2UQQ.RZ_SAE zmm k zmm
|
|
// VCVTPD2UQQ.RZ_SAE zmm zmm
|
|
//
|
|
// Construct and append a VCVTPD2UQQ.RZ_SAE instruction to the active function.
|
|
func (c *Context) VCVTPD2UQQ_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2UQQ_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VCVTPD2UQQ_RZ_SAE: Convert Packed Double-Precision Floating-Point Values to Packed Unsigned Quadword Integers (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2UQQ.RZ_SAE zmm k zmm
|
|
// VCVTPD2UQQ.RZ_SAE zmm zmm
|
|
//
|
|
// Construct and append a VCVTPD2UQQ.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2UQQ_RZ_SAE(ops ...operand.Op) { ctx.VCVTPD2UQQ_RZ_SAE(ops...) }
|
|
|
|
// VCVTPD2UQQ_RZ_SAE_Z: Convert Packed Double-Precision Floating-Point Values to Packed Unsigned Quadword Integers (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2UQQ.RZ_SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTPD2UQQ.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VCVTPD2UQQ_RZ_SAE_Z(z, k, z1 operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2UQQ_RZ_SAE_Z(z, k, z1))
|
|
}
|
|
|
|
// VCVTPD2UQQ_RZ_SAE_Z: Convert Packed Double-Precision Floating-Point Values to Packed Unsigned Quadword Integers (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2UQQ.RZ_SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTPD2UQQ.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2UQQ_RZ_SAE_Z(z, k, z1 operand.Op) { ctx.VCVTPD2UQQ_RZ_SAE_Z(z, k, z1) }
|
|
|
|
// VCVTPD2UQQ_Z: Convert Packed Double-Precision Floating-Point Values to Packed Unsigned Quadword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2UQQ.Z m128 k xmm
|
|
// VCVTPD2UQQ.Z m256 k ymm
|
|
// VCVTPD2UQQ.Z xmm k xmm
|
|
// VCVTPD2UQQ.Z ymm k ymm
|
|
// VCVTPD2UQQ.Z m512 k zmm
|
|
// VCVTPD2UQQ.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTPD2UQQ.Z instruction to the active function.
|
|
func (c *Context) VCVTPD2UQQ_Z(mxyz, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VCVTPD2UQQ_Z(mxyz, k, xyz))
|
|
}
|
|
|
|
// VCVTPD2UQQ_Z: Convert Packed Double-Precision Floating-Point Values to Packed Unsigned Quadword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPD2UQQ.Z m128 k xmm
|
|
// VCVTPD2UQQ.Z m256 k ymm
|
|
// VCVTPD2UQQ.Z xmm k xmm
|
|
// VCVTPD2UQQ.Z ymm k ymm
|
|
// VCVTPD2UQQ.Z m512 k zmm
|
|
// VCVTPD2UQQ.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTPD2UQQ.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPD2UQQ_Z(mxyz, k, xyz operand.Op) { ctx.VCVTPD2UQQ_Z(mxyz, k, xyz) }
|
|
|
|
// VCVTPH2PS: Convert Half-Precision FP Values to Single-Precision FP Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPH2PS m128 ymm
|
|
// VCVTPH2PS m64 xmm
|
|
// VCVTPH2PS xmm xmm
|
|
// VCVTPH2PS xmm ymm
|
|
// VCVTPH2PS m128 k ymm
|
|
// VCVTPH2PS m64 k xmm
|
|
// VCVTPH2PS xmm k xmm
|
|
// VCVTPH2PS xmm k ymm
|
|
// VCVTPH2PS m256 k zmm
|
|
// VCVTPH2PS m256 zmm
|
|
// VCVTPH2PS ymm k zmm
|
|
// VCVTPH2PS ymm zmm
|
|
//
|
|
// Construct and append a VCVTPH2PS instruction to the active function.
|
|
func (c *Context) VCVTPH2PS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTPH2PS(ops...))
|
|
}
|
|
|
|
// VCVTPH2PS: Convert Half-Precision FP Values to Single-Precision FP Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPH2PS m128 ymm
|
|
// VCVTPH2PS m64 xmm
|
|
// VCVTPH2PS xmm xmm
|
|
// VCVTPH2PS xmm ymm
|
|
// VCVTPH2PS m128 k ymm
|
|
// VCVTPH2PS m64 k xmm
|
|
// VCVTPH2PS xmm k xmm
|
|
// VCVTPH2PS xmm k ymm
|
|
// VCVTPH2PS m256 k zmm
|
|
// VCVTPH2PS m256 zmm
|
|
// VCVTPH2PS ymm k zmm
|
|
// VCVTPH2PS ymm zmm
|
|
//
|
|
// Construct and append a VCVTPH2PS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPH2PS(ops ...operand.Op) { ctx.VCVTPH2PS(ops...) }
|
|
|
|
// VCVTPH2PS_SAE: Convert Half-Precision FP Values to Single-Precision FP Values (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPH2PS.SAE ymm k zmm
|
|
// VCVTPH2PS.SAE ymm zmm
|
|
//
|
|
// Construct and append a VCVTPH2PS.SAE instruction to the active function.
|
|
func (c *Context) VCVTPH2PS_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTPH2PS_SAE(ops...))
|
|
}
|
|
|
|
// VCVTPH2PS_SAE: Convert Half-Precision FP Values to Single-Precision FP Values (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPH2PS.SAE ymm k zmm
|
|
// VCVTPH2PS.SAE ymm zmm
|
|
//
|
|
// Construct and append a VCVTPH2PS.SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPH2PS_SAE(ops ...operand.Op) { ctx.VCVTPH2PS_SAE(ops...) }
|
|
|
|
// VCVTPH2PS_SAE_Z: Convert Half-Precision FP Values to Single-Precision FP Values (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPH2PS.SAE.Z ymm k zmm
|
|
//
|
|
// Construct and append a VCVTPH2PS.SAE.Z instruction to the active function.
|
|
func (c *Context) VCVTPH2PS_SAE_Z(y, k, z operand.Op) {
|
|
c.addinstruction(x86.VCVTPH2PS_SAE_Z(y, k, z))
|
|
}
|
|
|
|
// VCVTPH2PS_SAE_Z: Convert Half-Precision FP Values to Single-Precision FP Values (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPH2PS.SAE.Z ymm k zmm
|
|
//
|
|
// Construct and append a VCVTPH2PS.SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPH2PS_SAE_Z(y, k, z operand.Op) { ctx.VCVTPH2PS_SAE_Z(y, k, z) }
|
|
|
|
// VCVTPH2PS_Z: Convert Half-Precision FP Values to Single-Precision FP Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPH2PS.Z m128 k ymm
|
|
// VCVTPH2PS.Z m64 k xmm
|
|
// VCVTPH2PS.Z xmm k xmm
|
|
// VCVTPH2PS.Z xmm k ymm
|
|
// VCVTPH2PS.Z m256 k zmm
|
|
// VCVTPH2PS.Z ymm k zmm
|
|
//
|
|
// Construct and append a VCVTPH2PS.Z instruction to the active function.
|
|
func (c *Context) VCVTPH2PS_Z(mxy, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VCVTPH2PS_Z(mxy, k, xyz))
|
|
}
|
|
|
|
// VCVTPH2PS_Z: Convert Half-Precision FP Values to Single-Precision FP Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPH2PS.Z m128 k ymm
|
|
// VCVTPH2PS.Z m64 k xmm
|
|
// VCVTPH2PS.Z xmm k xmm
|
|
// VCVTPH2PS.Z xmm k ymm
|
|
// VCVTPH2PS.Z m256 k zmm
|
|
// VCVTPH2PS.Z ymm k zmm
|
|
//
|
|
// Construct and append a VCVTPH2PS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPH2PS_Z(mxy, k, xyz operand.Op) { ctx.VCVTPH2PS_Z(mxy, k, xyz) }
|
|
|
|
// VCVTPS2DQ: Convert Packed Single-Precision FP Values to Packed Dword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2DQ m128 xmm
|
|
// VCVTPS2DQ m256 ymm
|
|
// VCVTPS2DQ xmm xmm
|
|
// VCVTPS2DQ ymm ymm
|
|
// VCVTPS2DQ m128 k xmm
|
|
// VCVTPS2DQ m256 k ymm
|
|
// VCVTPS2DQ xmm k xmm
|
|
// VCVTPS2DQ ymm k ymm
|
|
// VCVTPS2DQ m512 k zmm
|
|
// VCVTPS2DQ m512 zmm
|
|
// VCVTPS2DQ zmm k zmm
|
|
// VCVTPS2DQ zmm zmm
|
|
//
|
|
// Construct and append a VCVTPS2DQ instruction to the active function.
|
|
func (c *Context) VCVTPS2DQ(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTPS2DQ(ops...))
|
|
}
|
|
|
|
// VCVTPS2DQ: Convert Packed Single-Precision FP Values to Packed Dword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2DQ m128 xmm
|
|
// VCVTPS2DQ m256 ymm
|
|
// VCVTPS2DQ xmm xmm
|
|
// VCVTPS2DQ ymm ymm
|
|
// VCVTPS2DQ m128 k xmm
|
|
// VCVTPS2DQ m256 k ymm
|
|
// VCVTPS2DQ xmm k xmm
|
|
// VCVTPS2DQ ymm k ymm
|
|
// VCVTPS2DQ m512 k zmm
|
|
// VCVTPS2DQ m512 zmm
|
|
// VCVTPS2DQ zmm k zmm
|
|
// VCVTPS2DQ zmm zmm
|
|
//
|
|
// Construct and append a VCVTPS2DQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPS2DQ(ops ...operand.Op) { ctx.VCVTPS2DQ(ops...) }
|
|
|
|
// VCVTPS2DQ_BCST: Convert Packed Single-Precision FP Values to Packed Dword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2DQ.BCST m32 k xmm
|
|
// VCVTPS2DQ.BCST m32 k ymm
|
|
// VCVTPS2DQ.BCST m32 xmm
|
|
// VCVTPS2DQ.BCST m32 ymm
|
|
// VCVTPS2DQ.BCST m32 k zmm
|
|
// VCVTPS2DQ.BCST m32 zmm
|
|
//
|
|
// Construct and append a VCVTPS2DQ.BCST instruction to the active function.
|
|
func (c *Context) VCVTPS2DQ_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTPS2DQ_BCST(ops...))
|
|
}
|
|
|
|
// VCVTPS2DQ_BCST: Convert Packed Single-Precision FP Values to Packed Dword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2DQ.BCST m32 k xmm
|
|
// VCVTPS2DQ.BCST m32 k ymm
|
|
// VCVTPS2DQ.BCST m32 xmm
|
|
// VCVTPS2DQ.BCST m32 ymm
|
|
// VCVTPS2DQ.BCST m32 k zmm
|
|
// VCVTPS2DQ.BCST m32 zmm
|
|
//
|
|
// Construct and append a VCVTPS2DQ.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPS2DQ_BCST(ops ...operand.Op) { ctx.VCVTPS2DQ_BCST(ops...) }
|
|
|
|
// VCVTPS2DQ_BCST_Z: Convert Packed Single-Precision FP Values to Packed Dword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2DQ.BCST.Z m32 k xmm
|
|
// VCVTPS2DQ.BCST.Z m32 k ymm
|
|
// VCVTPS2DQ.BCST.Z m32 k zmm
|
|
//
|
|
// Construct and append a VCVTPS2DQ.BCST.Z instruction to the active function.
|
|
func (c *Context) VCVTPS2DQ_BCST_Z(m, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VCVTPS2DQ_BCST_Z(m, k, xyz))
|
|
}
|
|
|
|
// VCVTPS2DQ_BCST_Z: Convert Packed Single-Precision FP Values to Packed Dword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2DQ.BCST.Z m32 k xmm
|
|
// VCVTPS2DQ.BCST.Z m32 k ymm
|
|
// VCVTPS2DQ.BCST.Z m32 k zmm
|
|
//
|
|
// Construct and append a VCVTPS2DQ.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPS2DQ_BCST_Z(m, k, xyz operand.Op) { ctx.VCVTPS2DQ_BCST_Z(m, k, xyz) }
|
|
|
|
// VCVTPS2DQ_RD_SAE: Convert Packed Single-Precision FP Values to Packed Dword Integers (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2DQ.RD_SAE zmm k zmm
|
|
// VCVTPS2DQ.RD_SAE zmm zmm
|
|
//
|
|
// Construct and append a VCVTPS2DQ.RD_SAE instruction to the active function.
|
|
func (c *Context) VCVTPS2DQ_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTPS2DQ_RD_SAE(ops...))
|
|
}
|
|
|
|
// VCVTPS2DQ_RD_SAE: Convert Packed Single-Precision FP Values to Packed Dword Integers (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2DQ.RD_SAE zmm k zmm
|
|
// VCVTPS2DQ.RD_SAE zmm zmm
|
|
//
|
|
// Construct and append a VCVTPS2DQ.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPS2DQ_RD_SAE(ops ...operand.Op) { ctx.VCVTPS2DQ_RD_SAE(ops...) }
|
|
|
|
// VCVTPS2DQ_RD_SAE_Z: Convert Packed Single-Precision FP Values to Packed Dword Integers (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2DQ.RD_SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTPS2DQ.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VCVTPS2DQ_RD_SAE_Z(z, k, z1 operand.Op) {
|
|
c.addinstruction(x86.VCVTPS2DQ_RD_SAE_Z(z, k, z1))
|
|
}
|
|
|
|
// VCVTPS2DQ_RD_SAE_Z: Convert Packed Single-Precision FP Values to Packed Dword Integers (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2DQ.RD_SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTPS2DQ.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPS2DQ_RD_SAE_Z(z, k, z1 operand.Op) { ctx.VCVTPS2DQ_RD_SAE_Z(z, k, z1) }
|
|
|
|
// VCVTPS2DQ_RN_SAE: Convert Packed Single-Precision FP Values to Packed Dword Integers (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2DQ.RN_SAE zmm k zmm
|
|
// VCVTPS2DQ.RN_SAE zmm zmm
|
|
//
|
|
// Construct and append a VCVTPS2DQ.RN_SAE instruction to the active function.
|
|
func (c *Context) VCVTPS2DQ_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTPS2DQ_RN_SAE(ops...))
|
|
}
|
|
|
|
// VCVTPS2DQ_RN_SAE: Convert Packed Single-Precision FP Values to Packed Dword Integers (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2DQ.RN_SAE zmm k zmm
|
|
// VCVTPS2DQ.RN_SAE zmm zmm
|
|
//
|
|
// Construct and append a VCVTPS2DQ.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPS2DQ_RN_SAE(ops ...operand.Op) { ctx.VCVTPS2DQ_RN_SAE(ops...) }
|
|
|
|
// VCVTPS2DQ_RN_SAE_Z: Convert Packed Single-Precision FP Values to Packed Dword Integers (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2DQ.RN_SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTPS2DQ.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VCVTPS2DQ_RN_SAE_Z(z, k, z1 operand.Op) {
|
|
c.addinstruction(x86.VCVTPS2DQ_RN_SAE_Z(z, k, z1))
|
|
}
|
|
|
|
// VCVTPS2DQ_RN_SAE_Z: Convert Packed Single-Precision FP Values to Packed Dword Integers (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2DQ.RN_SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTPS2DQ.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPS2DQ_RN_SAE_Z(z, k, z1 operand.Op) { ctx.VCVTPS2DQ_RN_SAE_Z(z, k, z1) }
|
|
|
|
// VCVTPS2DQ_RU_SAE: Convert Packed Single-Precision FP Values to Packed Dword Integers (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2DQ.RU_SAE zmm k zmm
|
|
// VCVTPS2DQ.RU_SAE zmm zmm
|
|
//
|
|
// Construct and append a VCVTPS2DQ.RU_SAE instruction to the active function.
|
|
func (c *Context) VCVTPS2DQ_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTPS2DQ_RU_SAE(ops...))
|
|
}
|
|
|
|
// VCVTPS2DQ_RU_SAE: Convert Packed Single-Precision FP Values to Packed Dword Integers (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2DQ.RU_SAE zmm k zmm
|
|
// VCVTPS2DQ.RU_SAE zmm zmm
|
|
//
|
|
// Construct and append a VCVTPS2DQ.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPS2DQ_RU_SAE(ops ...operand.Op) { ctx.VCVTPS2DQ_RU_SAE(ops...) }
|
|
|
|
// VCVTPS2DQ_RU_SAE_Z: Convert Packed Single-Precision FP Values to Packed Dword Integers (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2DQ.RU_SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTPS2DQ.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VCVTPS2DQ_RU_SAE_Z(z, k, z1 operand.Op) {
|
|
c.addinstruction(x86.VCVTPS2DQ_RU_SAE_Z(z, k, z1))
|
|
}
|
|
|
|
// VCVTPS2DQ_RU_SAE_Z: Convert Packed Single-Precision FP Values to Packed Dword Integers (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2DQ.RU_SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTPS2DQ.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPS2DQ_RU_SAE_Z(z, k, z1 operand.Op) { ctx.VCVTPS2DQ_RU_SAE_Z(z, k, z1) }
|
|
|
|
// VCVTPS2DQ_RZ_SAE: Convert Packed Single-Precision FP Values to Packed Dword Integers (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2DQ.RZ_SAE zmm k zmm
|
|
// VCVTPS2DQ.RZ_SAE zmm zmm
|
|
//
|
|
// Construct and append a VCVTPS2DQ.RZ_SAE instruction to the active function.
|
|
func (c *Context) VCVTPS2DQ_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTPS2DQ_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VCVTPS2DQ_RZ_SAE: Convert Packed Single-Precision FP Values to Packed Dword Integers (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2DQ.RZ_SAE zmm k zmm
|
|
// VCVTPS2DQ.RZ_SAE zmm zmm
|
|
//
|
|
// Construct and append a VCVTPS2DQ.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPS2DQ_RZ_SAE(ops ...operand.Op) { ctx.VCVTPS2DQ_RZ_SAE(ops...) }
|
|
|
|
// VCVTPS2DQ_RZ_SAE_Z: Convert Packed Single-Precision FP Values to Packed Dword Integers (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2DQ.RZ_SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTPS2DQ.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VCVTPS2DQ_RZ_SAE_Z(z, k, z1 operand.Op) {
|
|
c.addinstruction(x86.VCVTPS2DQ_RZ_SAE_Z(z, k, z1))
|
|
}
|
|
|
|
// VCVTPS2DQ_RZ_SAE_Z: Convert Packed Single-Precision FP Values to Packed Dword Integers (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2DQ.RZ_SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTPS2DQ.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPS2DQ_RZ_SAE_Z(z, k, z1 operand.Op) { ctx.VCVTPS2DQ_RZ_SAE_Z(z, k, z1) }
|
|
|
|
// VCVTPS2DQ_Z: Convert Packed Single-Precision FP Values to Packed Dword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2DQ.Z m128 k xmm
|
|
// VCVTPS2DQ.Z m256 k ymm
|
|
// VCVTPS2DQ.Z xmm k xmm
|
|
// VCVTPS2DQ.Z ymm k ymm
|
|
// VCVTPS2DQ.Z m512 k zmm
|
|
// VCVTPS2DQ.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTPS2DQ.Z instruction to the active function.
|
|
func (c *Context) VCVTPS2DQ_Z(mxyz, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VCVTPS2DQ_Z(mxyz, k, xyz))
|
|
}
|
|
|
|
// VCVTPS2DQ_Z: Convert Packed Single-Precision FP Values to Packed Dword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2DQ.Z m128 k xmm
|
|
// VCVTPS2DQ.Z m256 k ymm
|
|
// VCVTPS2DQ.Z xmm k xmm
|
|
// VCVTPS2DQ.Z ymm k ymm
|
|
// VCVTPS2DQ.Z m512 k zmm
|
|
// VCVTPS2DQ.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTPS2DQ.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPS2DQ_Z(mxyz, k, xyz operand.Op) { ctx.VCVTPS2DQ_Z(mxyz, k, xyz) }
|
|
|
|
// VCVTPS2PD: Convert Packed Single-Precision FP Values to Packed Double-Precision FP Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2PD m128 ymm
|
|
// VCVTPS2PD m64 xmm
|
|
// VCVTPS2PD xmm xmm
|
|
// VCVTPS2PD xmm ymm
|
|
// VCVTPS2PD m64 k xmm
|
|
// VCVTPS2PD xmm k xmm
|
|
// VCVTPS2PD m256 k zmm
|
|
// VCVTPS2PD m256 zmm
|
|
// VCVTPS2PD ymm k zmm
|
|
// VCVTPS2PD ymm zmm
|
|
// VCVTPS2PD m128 k ymm
|
|
// VCVTPS2PD xmm k ymm
|
|
//
|
|
// Construct and append a VCVTPS2PD instruction to the active function.
|
|
func (c *Context) VCVTPS2PD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTPS2PD(ops...))
|
|
}
|
|
|
|
// VCVTPS2PD: Convert Packed Single-Precision FP Values to Packed Double-Precision FP Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2PD m128 ymm
|
|
// VCVTPS2PD m64 xmm
|
|
// VCVTPS2PD xmm xmm
|
|
// VCVTPS2PD xmm ymm
|
|
// VCVTPS2PD m64 k xmm
|
|
// VCVTPS2PD xmm k xmm
|
|
// VCVTPS2PD m256 k zmm
|
|
// VCVTPS2PD m256 zmm
|
|
// VCVTPS2PD ymm k zmm
|
|
// VCVTPS2PD ymm zmm
|
|
// VCVTPS2PD m128 k ymm
|
|
// VCVTPS2PD xmm k ymm
|
|
//
|
|
// Construct and append a VCVTPS2PD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPS2PD(ops ...operand.Op) { ctx.VCVTPS2PD(ops...) }
|
|
|
|
// VCVTPS2PD_BCST: Convert Packed Single-Precision FP Values to Packed Double-Precision FP Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2PD.BCST m32 k xmm
|
|
// VCVTPS2PD.BCST m32 xmm
|
|
// VCVTPS2PD.BCST m32 k zmm
|
|
// VCVTPS2PD.BCST m32 zmm
|
|
// VCVTPS2PD.BCST m32 k ymm
|
|
// VCVTPS2PD.BCST m32 ymm
|
|
//
|
|
// Construct and append a VCVTPS2PD.BCST instruction to the active function.
|
|
func (c *Context) VCVTPS2PD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTPS2PD_BCST(ops...))
|
|
}
|
|
|
|
// VCVTPS2PD_BCST: Convert Packed Single-Precision FP Values to Packed Double-Precision FP Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2PD.BCST m32 k xmm
|
|
// VCVTPS2PD.BCST m32 xmm
|
|
// VCVTPS2PD.BCST m32 k zmm
|
|
// VCVTPS2PD.BCST m32 zmm
|
|
// VCVTPS2PD.BCST m32 k ymm
|
|
// VCVTPS2PD.BCST m32 ymm
|
|
//
|
|
// Construct and append a VCVTPS2PD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPS2PD_BCST(ops ...operand.Op) { ctx.VCVTPS2PD_BCST(ops...) }
|
|
|
|
// VCVTPS2PD_BCST_Z: Convert Packed Single-Precision FP Values to Packed Double-Precision FP Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2PD.BCST.Z m32 k xmm
|
|
// VCVTPS2PD.BCST.Z m32 k zmm
|
|
// VCVTPS2PD.BCST.Z m32 k ymm
|
|
//
|
|
// Construct and append a VCVTPS2PD.BCST.Z instruction to the active function.
|
|
func (c *Context) VCVTPS2PD_BCST_Z(m, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VCVTPS2PD_BCST_Z(m, k, xyz))
|
|
}
|
|
|
|
// VCVTPS2PD_BCST_Z: Convert Packed Single-Precision FP Values to Packed Double-Precision FP Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2PD.BCST.Z m32 k xmm
|
|
// VCVTPS2PD.BCST.Z m32 k zmm
|
|
// VCVTPS2PD.BCST.Z m32 k ymm
|
|
//
|
|
// Construct and append a VCVTPS2PD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPS2PD_BCST_Z(m, k, xyz operand.Op) { ctx.VCVTPS2PD_BCST_Z(m, k, xyz) }
|
|
|
|
// VCVTPS2PD_SAE: Convert Packed Single-Precision FP Values to Packed Double-Precision FP Values (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2PD.SAE ymm k zmm
|
|
// VCVTPS2PD.SAE ymm zmm
|
|
//
|
|
// Construct and append a VCVTPS2PD.SAE instruction to the active function.
|
|
func (c *Context) VCVTPS2PD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTPS2PD_SAE(ops...))
|
|
}
|
|
|
|
// VCVTPS2PD_SAE: Convert Packed Single-Precision FP Values to Packed Double-Precision FP Values (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2PD.SAE ymm k zmm
|
|
// VCVTPS2PD.SAE ymm zmm
|
|
//
|
|
// Construct and append a VCVTPS2PD.SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPS2PD_SAE(ops ...operand.Op) { ctx.VCVTPS2PD_SAE(ops...) }
|
|
|
|
// VCVTPS2PD_SAE_Z: Convert Packed Single-Precision FP Values to Packed Double-Precision FP Values (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2PD.SAE.Z ymm k zmm
|
|
//
|
|
// Construct and append a VCVTPS2PD.SAE.Z instruction to the active function.
|
|
func (c *Context) VCVTPS2PD_SAE_Z(y, k, z operand.Op) {
|
|
c.addinstruction(x86.VCVTPS2PD_SAE_Z(y, k, z))
|
|
}
|
|
|
|
// VCVTPS2PD_SAE_Z: Convert Packed Single-Precision FP Values to Packed Double-Precision FP Values (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2PD.SAE.Z ymm k zmm
|
|
//
|
|
// Construct and append a VCVTPS2PD.SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPS2PD_SAE_Z(y, k, z operand.Op) { ctx.VCVTPS2PD_SAE_Z(y, k, z) }
|
|
|
|
// VCVTPS2PD_Z: Convert Packed Single-Precision FP Values to Packed Double-Precision FP Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2PD.Z m64 k xmm
|
|
// VCVTPS2PD.Z xmm k xmm
|
|
// VCVTPS2PD.Z m256 k zmm
|
|
// VCVTPS2PD.Z ymm k zmm
|
|
// VCVTPS2PD.Z m128 k ymm
|
|
// VCVTPS2PD.Z xmm k ymm
|
|
//
|
|
// Construct and append a VCVTPS2PD.Z instruction to the active function.
|
|
func (c *Context) VCVTPS2PD_Z(mxy, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VCVTPS2PD_Z(mxy, k, xyz))
|
|
}
|
|
|
|
// VCVTPS2PD_Z: Convert Packed Single-Precision FP Values to Packed Double-Precision FP Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2PD.Z m64 k xmm
|
|
// VCVTPS2PD.Z xmm k xmm
|
|
// VCVTPS2PD.Z m256 k zmm
|
|
// VCVTPS2PD.Z ymm k zmm
|
|
// VCVTPS2PD.Z m128 k ymm
|
|
// VCVTPS2PD.Z xmm k ymm
|
|
//
|
|
// Construct and append a VCVTPS2PD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPS2PD_Z(mxy, k, xyz operand.Op) { ctx.VCVTPS2PD_Z(mxy, k, xyz) }
|
|
|
|
// VCVTPS2PH: Convert Single-Precision FP value to Half-Precision FP value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2PH imm8 xmm m64
|
|
// VCVTPS2PH imm8 xmm xmm
|
|
// VCVTPS2PH imm8 ymm m128
|
|
// VCVTPS2PH imm8 ymm xmm
|
|
// VCVTPS2PH imm8 xmm k m64
|
|
// VCVTPS2PH imm8 xmm k xmm
|
|
// VCVTPS2PH imm8 ymm k m128
|
|
// VCVTPS2PH imm8 ymm k xmm
|
|
// VCVTPS2PH imm8 zmm k m256
|
|
// VCVTPS2PH imm8 zmm k ymm
|
|
// VCVTPS2PH imm8 zmm m256
|
|
// VCVTPS2PH imm8 zmm ymm
|
|
//
|
|
// Construct and append a VCVTPS2PH instruction to the active function.
|
|
func (c *Context) VCVTPS2PH(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTPS2PH(ops...))
|
|
}
|
|
|
|
// VCVTPS2PH: Convert Single-Precision FP value to Half-Precision FP value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2PH imm8 xmm m64
|
|
// VCVTPS2PH imm8 xmm xmm
|
|
// VCVTPS2PH imm8 ymm m128
|
|
// VCVTPS2PH imm8 ymm xmm
|
|
// VCVTPS2PH imm8 xmm k m64
|
|
// VCVTPS2PH imm8 xmm k xmm
|
|
// VCVTPS2PH imm8 ymm k m128
|
|
// VCVTPS2PH imm8 ymm k xmm
|
|
// VCVTPS2PH imm8 zmm k m256
|
|
// VCVTPS2PH imm8 zmm k ymm
|
|
// VCVTPS2PH imm8 zmm m256
|
|
// VCVTPS2PH imm8 zmm ymm
|
|
//
|
|
// Construct and append a VCVTPS2PH instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPS2PH(ops ...operand.Op) { ctx.VCVTPS2PH(ops...) }
|
|
|
|
// VCVTPS2PH_SAE: Convert Single-Precision FP value to Half-Precision FP value (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2PH.SAE imm8 zmm k ymm
|
|
// VCVTPS2PH.SAE imm8 zmm ymm
|
|
//
|
|
// Construct and append a VCVTPS2PH.SAE instruction to the active function.
|
|
func (c *Context) VCVTPS2PH_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTPS2PH_SAE(ops...))
|
|
}
|
|
|
|
// VCVTPS2PH_SAE: Convert Single-Precision FP value to Half-Precision FP value (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2PH.SAE imm8 zmm k ymm
|
|
// VCVTPS2PH.SAE imm8 zmm ymm
|
|
//
|
|
// Construct and append a VCVTPS2PH.SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPS2PH_SAE(ops ...operand.Op) { ctx.VCVTPS2PH_SAE(ops...) }
|
|
|
|
// VCVTPS2PH_SAE_Z: Convert Single-Precision FP value to Half-Precision FP value (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2PH.SAE.Z imm8 zmm k ymm
|
|
//
|
|
// Construct and append a VCVTPS2PH.SAE.Z instruction to the active function.
|
|
func (c *Context) VCVTPS2PH_SAE_Z(i, z, k, y operand.Op) {
|
|
c.addinstruction(x86.VCVTPS2PH_SAE_Z(i, z, k, y))
|
|
}
|
|
|
|
// VCVTPS2PH_SAE_Z: Convert Single-Precision FP value to Half-Precision FP value (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2PH.SAE.Z imm8 zmm k ymm
|
|
//
|
|
// Construct and append a VCVTPS2PH.SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPS2PH_SAE_Z(i, z, k, y operand.Op) { ctx.VCVTPS2PH_SAE_Z(i, z, k, y) }
|
|
|
|
// VCVTPS2PH_Z: Convert Single-Precision FP value to Half-Precision FP value (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2PH.Z imm8 xmm k m64
|
|
// VCVTPS2PH.Z imm8 xmm k xmm
|
|
// VCVTPS2PH.Z imm8 ymm k m128
|
|
// VCVTPS2PH.Z imm8 ymm k xmm
|
|
// VCVTPS2PH.Z imm8 zmm k m256
|
|
// VCVTPS2PH.Z imm8 zmm k ymm
|
|
//
|
|
// Construct and append a VCVTPS2PH.Z instruction to the active function.
|
|
func (c *Context) VCVTPS2PH_Z(i, xyz, k, mxy operand.Op) {
|
|
c.addinstruction(x86.VCVTPS2PH_Z(i, xyz, k, mxy))
|
|
}
|
|
|
|
// VCVTPS2PH_Z: Convert Single-Precision FP value to Half-Precision FP value (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2PH.Z imm8 xmm k m64
|
|
// VCVTPS2PH.Z imm8 xmm k xmm
|
|
// VCVTPS2PH.Z imm8 ymm k m128
|
|
// VCVTPS2PH.Z imm8 ymm k xmm
|
|
// VCVTPS2PH.Z imm8 zmm k m256
|
|
// VCVTPS2PH.Z imm8 zmm k ymm
|
|
//
|
|
// Construct and append a VCVTPS2PH.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPS2PH_Z(i, xyz, k, mxy operand.Op) { ctx.VCVTPS2PH_Z(i, xyz, k, mxy) }
|
|
|
|
// VCVTPS2QQ: Convert Packed Single Precision Floating-Point Values to Packed Singed Quadword Integer Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2QQ m128 k ymm
|
|
// VCVTPS2QQ m128 ymm
|
|
// VCVTPS2QQ m64 k xmm
|
|
// VCVTPS2QQ m64 xmm
|
|
// VCVTPS2QQ xmm k xmm
|
|
// VCVTPS2QQ xmm k ymm
|
|
// VCVTPS2QQ xmm xmm
|
|
// VCVTPS2QQ xmm ymm
|
|
// VCVTPS2QQ m256 k zmm
|
|
// VCVTPS2QQ m256 zmm
|
|
// VCVTPS2QQ ymm k zmm
|
|
// VCVTPS2QQ ymm zmm
|
|
//
|
|
// Construct and append a VCVTPS2QQ instruction to the active function.
|
|
func (c *Context) VCVTPS2QQ(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTPS2QQ(ops...))
|
|
}
|
|
|
|
// VCVTPS2QQ: Convert Packed Single Precision Floating-Point Values to Packed Singed Quadword Integer Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2QQ m128 k ymm
|
|
// VCVTPS2QQ m128 ymm
|
|
// VCVTPS2QQ m64 k xmm
|
|
// VCVTPS2QQ m64 xmm
|
|
// VCVTPS2QQ xmm k xmm
|
|
// VCVTPS2QQ xmm k ymm
|
|
// VCVTPS2QQ xmm xmm
|
|
// VCVTPS2QQ xmm ymm
|
|
// VCVTPS2QQ m256 k zmm
|
|
// VCVTPS2QQ m256 zmm
|
|
// VCVTPS2QQ ymm k zmm
|
|
// VCVTPS2QQ ymm zmm
|
|
//
|
|
// Construct and append a VCVTPS2QQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPS2QQ(ops ...operand.Op) { ctx.VCVTPS2QQ(ops...) }
|
|
|
|
// VCVTPS2QQ_BCST: Convert Packed Single Precision Floating-Point Values to Packed Singed Quadword Integer Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2QQ.BCST m32 k xmm
|
|
// VCVTPS2QQ.BCST m32 k ymm
|
|
// VCVTPS2QQ.BCST m32 xmm
|
|
// VCVTPS2QQ.BCST m32 ymm
|
|
// VCVTPS2QQ.BCST m32 k zmm
|
|
// VCVTPS2QQ.BCST m32 zmm
|
|
//
|
|
// Construct and append a VCVTPS2QQ.BCST instruction to the active function.
|
|
func (c *Context) VCVTPS2QQ_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTPS2QQ_BCST(ops...))
|
|
}
|
|
|
|
// VCVTPS2QQ_BCST: Convert Packed Single Precision Floating-Point Values to Packed Singed Quadword Integer Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2QQ.BCST m32 k xmm
|
|
// VCVTPS2QQ.BCST m32 k ymm
|
|
// VCVTPS2QQ.BCST m32 xmm
|
|
// VCVTPS2QQ.BCST m32 ymm
|
|
// VCVTPS2QQ.BCST m32 k zmm
|
|
// VCVTPS2QQ.BCST m32 zmm
|
|
//
|
|
// Construct and append a VCVTPS2QQ.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPS2QQ_BCST(ops ...operand.Op) { ctx.VCVTPS2QQ_BCST(ops...) }
|
|
|
|
// VCVTPS2QQ_BCST_Z: Convert Packed Single Precision Floating-Point Values to Packed Singed Quadword Integer Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2QQ.BCST.Z m32 k xmm
|
|
// VCVTPS2QQ.BCST.Z m32 k ymm
|
|
// VCVTPS2QQ.BCST.Z m32 k zmm
|
|
//
|
|
// Construct and append a VCVTPS2QQ.BCST.Z instruction to the active function.
|
|
func (c *Context) VCVTPS2QQ_BCST_Z(m, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VCVTPS2QQ_BCST_Z(m, k, xyz))
|
|
}
|
|
|
|
// VCVTPS2QQ_BCST_Z: Convert Packed Single Precision Floating-Point Values to Packed Singed Quadword Integer Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2QQ.BCST.Z m32 k xmm
|
|
// VCVTPS2QQ.BCST.Z m32 k ymm
|
|
// VCVTPS2QQ.BCST.Z m32 k zmm
|
|
//
|
|
// Construct and append a VCVTPS2QQ.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPS2QQ_BCST_Z(m, k, xyz operand.Op) { ctx.VCVTPS2QQ_BCST_Z(m, k, xyz) }
|
|
|
|
// VCVTPS2QQ_RD_SAE: Convert Packed Single Precision Floating-Point Values to Packed Singed Quadword Integer Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2QQ.RD_SAE ymm k zmm
|
|
// VCVTPS2QQ.RD_SAE ymm zmm
|
|
//
|
|
// Construct and append a VCVTPS2QQ.RD_SAE instruction to the active function.
|
|
func (c *Context) VCVTPS2QQ_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTPS2QQ_RD_SAE(ops...))
|
|
}
|
|
|
|
// VCVTPS2QQ_RD_SAE: Convert Packed Single Precision Floating-Point Values to Packed Singed Quadword Integer Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2QQ.RD_SAE ymm k zmm
|
|
// VCVTPS2QQ.RD_SAE ymm zmm
|
|
//
|
|
// Construct and append a VCVTPS2QQ.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPS2QQ_RD_SAE(ops ...operand.Op) { ctx.VCVTPS2QQ_RD_SAE(ops...) }
|
|
|
|
// VCVTPS2QQ_RD_SAE_Z: Convert Packed Single Precision Floating-Point Values to Packed Singed Quadword Integer Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2QQ.RD_SAE.Z ymm k zmm
|
|
//
|
|
// Construct and append a VCVTPS2QQ.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VCVTPS2QQ_RD_SAE_Z(y, k, z operand.Op) {
|
|
c.addinstruction(x86.VCVTPS2QQ_RD_SAE_Z(y, k, z))
|
|
}
|
|
|
|
// VCVTPS2QQ_RD_SAE_Z: Convert Packed Single Precision Floating-Point Values to Packed Singed Quadword Integer Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2QQ.RD_SAE.Z ymm k zmm
|
|
//
|
|
// Construct and append a VCVTPS2QQ.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPS2QQ_RD_SAE_Z(y, k, z operand.Op) { ctx.VCVTPS2QQ_RD_SAE_Z(y, k, z) }
|
|
|
|
// VCVTPS2QQ_RN_SAE: Convert Packed Single Precision Floating-Point Values to Packed Singed Quadword Integer Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2QQ.RN_SAE ymm k zmm
|
|
// VCVTPS2QQ.RN_SAE ymm zmm
|
|
//
|
|
// Construct and append a VCVTPS2QQ.RN_SAE instruction to the active function.
|
|
func (c *Context) VCVTPS2QQ_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTPS2QQ_RN_SAE(ops...))
|
|
}
|
|
|
|
// VCVTPS2QQ_RN_SAE: Convert Packed Single Precision Floating-Point Values to Packed Singed Quadword Integer Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2QQ.RN_SAE ymm k zmm
|
|
// VCVTPS2QQ.RN_SAE ymm zmm
|
|
//
|
|
// Construct and append a VCVTPS2QQ.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPS2QQ_RN_SAE(ops ...operand.Op) { ctx.VCVTPS2QQ_RN_SAE(ops...) }
|
|
|
|
// VCVTPS2QQ_RN_SAE_Z: Convert Packed Single Precision Floating-Point Values to Packed Singed Quadword Integer Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2QQ.RN_SAE.Z ymm k zmm
|
|
//
|
|
// Construct and append a VCVTPS2QQ.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VCVTPS2QQ_RN_SAE_Z(y, k, z operand.Op) {
|
|
c.addinstruction(x86.VCVTPS2QQ_RN_SAE_Z(y, k, z))
|
|
}
|
|
|
|
// VCVTPS2QQ_RN_SAE_Z: Convert Packed Single Precision Floating-Point Values to Packed Singed Quadword Integer Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2QQ.RN_SAE.Z ymm k zmm
|
|
//
|
|
// Construct and append a VCVTPS2QQ.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPS2QQ_RN_SAE_Z(y, k, z operand.Op) { ctx.VCVTPS2QQ_RN_SAE_Z(y, k, z) }
|
|
|
|
// VCVTPS2QQ_RU_SAE: Convert Packed Single Precision Floating-Point Values to Packed Singed Quadword Integer Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2QQ.RU_SAE ymm k zmm
|
|
// VCVTPS2QQ.RU_SAE ymm zmm
|
|
//
|
|
// Construct and append a VCVTPS2QQ.RU_SAE instruction to the active function.
|
|
func (c *Context) VCVTPS2QQ_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTPS2QQ_RU_SAE(ops...))
|
|
}
|
|
|
|
// VCVTPS2QQ_RU_SAE: Convert Packed Single Precision Floating-Point Values to Packed Singed Quadword Integer Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2QQ.RU_SAE ymm k zmm
|
|
// VCVTPS2QQ.RU_SAE ymm zmm
|
|
//
|
|
// Construct and append a VCVTPS2QQ.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPS2QQ_RU_SAE(ops ...operand.Op) { ctx.VCVTPS2QQ_RU_SAE(ops...) }
|
|
|
|
// VCVTPS2QQ_RU_SAE_Z: Convert Packed Single Precision Floating-Point Values to Packed Singed Quadword Integer Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2QQ.RU_SAE.Z ymm k zmm
|
|
//
|
|
// Construct and append a VCVTPS2QQ.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VCVTPS2QQ_RU_SAE_Z(y, k, z operand.Op) {
|
|
c.addinstruction(x86.VCVTPS2QQ_RU_SAE_Z(y, k, z))
|
|
}
|
|
|
|
// VCVTPS2QQ_RU_SAE_Z: Convert Packed Single Precision Floating-Point Values to Packed Singed Quadword Integer Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2QQ.RU_SAE.Z ymm k zmm
|
|
//
|
|
// Construct and append a VCVTPS2QQ.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPS2QQ_RU_SAE_Z(y, k, z operand.Op) { ctx.VCVTPS2QQ_RU_SAE_Z(y, k, z) }
|
|
|
|
// VCVTPS2QQ_RZ_SAE: Convert Packed Single Precision Floating-Point Values to Packed Singed Quadword Integer Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2QQ.RZ_SAE ymm k zmm
|
|
// VCVTPS2QQ.RZ_SAE ymm zmm
|
|
//
|
|
// Construct and append a VCVTPS2QQ.RZ_SAE instruction to the active function.
|
|
func (c *Context) VCVTPS2QQ_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTPS2QQ_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VCVTPS2QQ_RZ_SAE: Convert Packed Single Precision Floating-Point Values to Packed Singed Quadword Integer Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2QQ.RZ_SAE ymm k zmm
|
|
// VCVTPS2QQ.RZ_SAE ymm zmm
|
|
//
|
|
// Construct and append a VCVTPS2QQ.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPS2QQ_RZ_SAE(ops ...operand.Op) { ctx.VCVTPS2QQ_RZ_SAE(ops...) }
|
|
|
|
// VCVTPS2QQ_RZ_SAE_Z: Convert Packed Single Precision Floating-Point Values to Packed Singed Quadword Integer Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2QQ.RZ_SAE.Z ymm k zmm
|
|
//
|
|
// Construct and append a VCVTPS2QQ.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VCVTPS2QQ_RZ_SAE_Z(y, k, z operand.Op) {
|
|
c.addinstruction(x86.VCVTPS2QQ_RZ_SAE_Z(y, k, z))
|
|
}
|
|
|
|
// VCVTPS2QQ_RZ_SAE_Z: Convert Packed Single Precision Floating-Point Values to Packed Singed Quadword Integer Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2QQ.RZ_SAE.Z ymm k zmm
|
|
//
|
|
// Construct and append a VCVTPS2QQ.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPS2QQ_RZ_SAE_Z(y, k, z operand.Op) { ctx.VCVTPS2QQ_RZ_SAE_Z(y, k, z) }
|
|
|
|
// VCVTPS2QQ_Z: Convert Packed Single Precision Floating-Point Values to Packed Singed Quadword Integer Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2QQ.Z m128 k ymm
|
|
// VCVTPS2QQ.Z m64 k xmm
|
|
// VCVTPS2QQ.Z xmm k xmm
|
|
// VCVTPS2QQ.Z xmm k ymm
|
|
// VCVTPS2QQ.Z m256 k zmm
|
|
// VCVTPS2QQ.Z ymm k zmm
|
|
//
|
|
// Construct and append a VCVTPS2QQ.Z instruction to the active function.
|
|
func (c *Context) VCVTPS2QQ_Z(mxy, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VCVTPS2QQ_Z(mxy, k, xyz))
|
|
}
|
|
|
|
// VCVTPS2QQ_Z: Convert Packed Single Precision Floating-Point Values to Packed Singed Quadword Integer Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2QQ.Z m128 k ymm
|
|
// VCVTPS2QQ.Z m64 k xmm
|
|
// VCVTPS2QQ.Z xmm k xmm
|
|
// VCVTPS2QQ.Z xmm k ymm
|
|
// VCVTPS2QQ.Z m256 k zmm
|
|
// VCVTPS2QQ.Z ymm k zmm
|
|
//
|
|
// Construct and append a VCVTPS2QQ.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPS2QQ_Z(mxy, k, xyz operand.Op) { ctx.VCVTPS2QQ_Z(mxy, k, xyz) }
|
|
|
|
// VCVTPS2UDQ: Convert Packed Single-Precision Floating-Point Values to Packed Unsigned Doubleword Integer Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2UDQ m128 k xmm
|
|
// VCVTPS2UDQ m128 xmm
|
|
// VCVTPS2UDQ m256 k ymm
|
|
// VCVTPS2UDQ m256 ymm
|
|
// VCVTPS2UDQ xmm k xmm
|
|
// VCVTPS2UDQ xmm xmm
|
|
// VCVTPS2UDQ ymm k ymm
|
|
// VCVTPS2UDQ ymm ymm
|
|
// VCVTPS2UDQ m512 k zmm
|
|
// VCVTPS2UDQ m512 zmm
|
|
// VCVTPS2UDQ zmm k zmm
|
|
// VCVTPS2UDQ zmm zmm
|
|
//
|
|
// Construct and append a VCVTPS2UDQ instruction to the active function.
|
|
func (c *Context) VCVTPS2UDQ(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTPS2UDQ(ops...))
|
|
}
|
|
|
|
// VCVTPS2UDQ: Convert Packed Single-Precision Floating-Point Values to Packed Unsigned Doubleword Integer Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2UDQ m128 k xmm
|
|
// VCVTPS2UDQ m128 xmm
|
|
// VCVTPS2UDQ m256 k ymm
|
|
// VCVTPS2UDQ m256 ymm
|
|
// VCVTPS2UDQ xmm k xmm
|
|
// VCVTPS2UDQ xmm xmm
|
|
// VCVTPS2UDQ ymm k ymm
|
|
// VCVTPS2UDQ ymm ymm
|
|
// VCVTPS2UDQ m512 k zmm
|
|
// VCVTPS2UDQ m512 zmm
|
|
// VCVTPS2UDQ zmm k zmm
|
|
// VCVTPS2UDQ zmm zmm
|
|
//
|
|
// Construct and append a VCVTPS2UDQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPS2UDQ(ops ...operand.Op) { ctx.VCVTPS2UDQ(ops...) }
|
|
|
|
// VCVTPS2UDQ_BCST: Convert Packed Single-Precision Floating-Point Values to Packed Unsigned Doubleword Integer Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2UDQ.BCST m32 k xmm
|
|
// VCVTPS2UDQ.BCST m32 k ymm
|
|
// VCVTPS2UDQ.BCST m32 xmm
|
|
// VCVTPS2UDQ.BCST m32 ymm
|
|
// VCVTPS2UDQ.BCST m32 k zmm
|
|
// VCVTPS2UDQ.BCST m32 zmm
|
|
//
|
|
// Construct and append a VCVTPS2UDQ.BCST instruction to the active function.
|
|
func (c *Context) VCVTPS2UDQ_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTPS2UDQ_BCST(ops...))
|
|
}
|
|
|
|
// VCVTPS2UDQ_BCST: Convert Packed Single-Precision Floating-Point Values to Packed Unsigned Doubleword Integer Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2UDQ.BCST m32 k xmm
|
|
// VCVTPS2UDQ.BCST m32 k ymm
|
|
// VCVTPS2UDQ.BCST m32 xmm
|
|
// VCVTPS2UDQ.BCST m32 ymm
|
|
// VCVTPS2UDQ.BCST m32 k zmm
|
|
// VCVTPS2UDQ.BCST m32 zmm
|
|
//
|
|
// Construct and append a VCVTPS2UDQ.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPS2UDQ_BCST(ops ...operand.Op) { ctx.VCVTPS2UDQ_BCST(ops...) }
|
|
|
|
// VCVTPS2UDQ_BCST_Z: Convert Packed Single-Precision Floating-Point Values to Packed Unsigned Doubleword Integer Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2UDQ.BCST.Z m32 k xmm
|
|
// VCVTPS2UDQ.BCST.Z m32 k ymm
|
|
// VCVTPS2UDQ.BCST.Z m32 k zmm
|
|
//
|
|
// Construct and append a VCVTPS2UDQ.BCST.Z instruction to the active function.
|
|
func (c *Context) VCVTPS2UDQ_BCST_Z(m, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VCVTPS2UDQ_BCST_Z(m, k, xyz))
|
|
}
|
|
|
|
// VCVTPS2UDQ_BCST_Z: Convert Packed Single-Precision Floating-Point Values to Packed Unsigned Doubleword Integer Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2UDQ.BCST.Z m32 k xmm
|
|
// VCVTPS2UDQ.BCST.Z m32 k ymm
|
|
// VCVTPS2UDQ.BCST.Z m32 k zmm
|
|
//
|
|
// Construct and append a VCVTPS2UDQ.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPS2UDQ_BCST_Z(m, k, xyz operand.Op) { ctx.VCVTPS2UDQ_BCST_Z(m, k, xyz) }
|
|
|
|
// VCVTPS2UDQ_RD_SAE: Convert Packed Single-Precision Floating-Point Values to Packed Unsigned Doubleword Integer Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2UDQ.RD_SAE zmm k zmm
|
|
// VCVTPS2UDQ.RD_SAE zmm zmm
|
|
//
|
|
// Construct and append a VCVTPS2UDQ.RD_SAE instruction to the active function.
|
|
func (c *Context) VCVTPS2UDQ_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTPS2UDQ_RD_SAE(ops...))
|
|
}
|
|
|
|
// VCVTPS2UDQ_RD_SAE: Convert Packed Single-Precision Floating-Point Values to Packed Unsigned Doubleword Integer Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2UDQ.RD_SAE zmm k zmm
|
|
// VCVTPS2UDQ.RD_SAE zmm zmm
|
|
//
|
|
// Construct and append a VCVTPS2UDQ.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPS2UDQ_RD_SAE(ops ...operand.Op) { ctx.VCVTPS2UDQ_RD_SAE(ops...) }
|
|
|
|
// VCVTPS2UDQ_RD_SAE_Z: Convert Packed Single-Precision Floating-Point Values to Packed Unsigned Doubleword Integer Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2UDQ.RD_SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTPS2UDQ.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VCVTPS2UDQ_RD_SAE_Z(z, k, z1 operand.Op) {
|
|
c.addinstruction(x86.VCVTPS2UDQ_RD_SAE_Z(z, k, z1))
|
|
}
|
|
|
|
// VCVTPS2UDQ_RD_SAE_Z: Convert Packed Single-Precision Floating-Point Values to Packed Unsigned Doubleword Integer Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2UDQ.RD_SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTPS2UDQ.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPS2UDQ_RD_SAE_Z(z, k, z1 operand.Op) { ctx.VCVTPS2UDQ_RD_SAE_Z(z, k, z1) }
|
|
|
|
// VCVTPS2UDQ_RN_SAE: Convert Packed Single-Precision Floating-Point Values to Packed Unsigned Doubleword Integer Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2UDQ.RN_SAE zmm k zmm
|
|
// VCVTPS2UDQ.RN_SAE zmm zmm
|
|
//
|
|
// Construct and append a VCVTPS2UDQ.RN_SAE instruction to the active function.
|
|
func (c *Context) VCVTPS2UDQ_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTPS2UDQ_RN_SAE(ops...))
|
|
}
|
|
|
|
// VCVTPS2UDQ_RN_SAE: Convert Packed Single-Precision Floating-Point Values to Packed Unsigned Doubleword Integer Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2UDQ.RN_SAE zmm k zmm
|
|
// VCVTPS2UDQ.RN_SAE zmm zmm
|
|
//
|
|
// Construct and append a VCVTPS2UDQ.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPS2UDQ_RN_SAE(ops ...operand.Op) { ctx.VCVTPS2UDQ_RN_SAE(ops...) }
|
|
|
|
// VCVTPS2UDQ_RN_SAE_Z: Convert Packed Single-Precision Floating-Point Values to Packed Unsigned Doubleword Integer Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2UDQ.RN_SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTPS2UDQ.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VCVTPS2UDQ_RN_SAE_Z(z, k, z1 operand.Op) {
|
|
c.addinstruction(x86.VCVTPS2UDQ_RN_SAE_Z(z, k, z1))
|
|
}
|
|
|
|
// VCVTPS2UDQ_RN_SAE_Z: Convert Packed Single-Precision Floating-Point Values to Packed Unsigned Doubleword Integer Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2UDQ.RN_SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTPS2UDQ.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPS2UDQ_RN_SAE_Z(z, k, z1 operand.Op) { ctx.VCVTPS2UDQ_RN_SAE_Z(z, k, z1) }
|
|
|
|
// VCVTPS2UDQ_RU_SAE: Convert Packed Single-Precision Floating-Point Values to Packed Unsigned Doubleword Integer Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2UDQ.RU_SAE zmm k zmm
|
|
// VCVTPS2UDQ.RU_SAE zmm zmm
|
|
//
|
|
// Construct and append a VCVTPS2UDQ.RU_SAE instruction to the active function.
|
|
func (c *Context) VCVTPS2UDQ_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTPS2UDQ_RU_SAE(ops...))
|
|
}
|
|
|
|
// VCVTPS2UDQ_RU_SAE: Convert Packed Single-Precision Floating-Point Values to Packed Unsigned Doubleword Integer Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2UDQ.RU_SAE zmm k zmm
|
|
// VCVTPS2UDQ.RU_SAE zmm zmm
|
|
//
|
|
// Construct and append a VCVTPS2UDQ.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPS2UDQ_RU_SAE(ops ...operand.Op) { ctx.VCVTPS2UDQ_RU_SAE(ops...) }
|
|
|
|
// VCVTPS2UDQ_RU_SAE_Z: Convert Packed Single-Precision Floating-Point Values to Packed Unsigned Doubleword Integer Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2UDQ.RU_SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTPS2UDQ.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VCVTPS2UDQ_RU_SAE_Z(z, k, z1 operand.Op) {
|
|
c.addinstruction(x86.VCVTPS2UDQ_RU_SAE_Z(z, k, z1))
|
|
}
|
|
|
|
// VCVTPS2UDQ_RU_SAE_Z: Convert Packed Single-Precision Floating-Point Values to Packed Unsigned Doubleword Integer Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2UDQ.RU_SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTPS2UDQ.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPS2UDQ_RU_SAE_Z(z, k, z1 operand.Op) { ctx.VCVTPS2UDQ_RU_SAE_Z(z, k, z1) }
|
|
|
|
// VCVTPS2UDQ_RZ_SAE: Convert Packed Single-Precision Floating-Point Values to Packed Unsigned Doubleword Integer Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2UDQ.RZ_SAE zmm k zmm
|
|
// VCVTPS2UDQ.RZ_SAE zmm zmm
|
|
//
|
|
// Construct and append a VCVTPS2UDQ.RZ_SAE instruction to the active function.
|
|
func (c *Context) VCVTPS2UDQ_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTPS2UDQ_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VCVTPS2UDQ_RZ_SAE: Convert Packed Single-Precision Floating-Point Values to Packed Unsigned Doubleword Integer Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2UDQ.RZ_SAE zmm k zmm
|
|
// VCVTPS2UDQ.RZ_SAE zmm zmm
|
|
//
|
|
// Construct and append a VCVTPS2UDQ.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPS2UDQ_RZ_SAE(ops ...operand.Op) { ctx.VCVTPS2UDQ_RZ_SAE(ops...) }
|
|
|
|
// VCVTPS2UDQ_RZ_SAE_Z: Convert Packed Single-Precision Floating-Point Values to Packed Unsigned Doubleword Integer Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2UDQ.RZ_SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTPS2UDQ.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VCVTPS2UDQ_RZ_SAE_Z(z, k, z1 operand.Op) {
|
|
c.addinstruction(x86.VCVTPS2UDQ_RZ_SAE_Z(z, k, z1))
|
|
}
|
|
|
|
// VCVTPS2UDQ_RZ_SAE_Z: Convert Packed Single-Precision Floating-Point Values to Packed Unsigned Doubleword Integer Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2UDQ.RZ_SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTPS2UDQ.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPS2UDQ_RZ_SAE_Z(z, k, z1 operand.Op) { ctx.VCVTPS2UDQ_RZ_SAE_Z(z, k, z1) }
|
|
|
|
// VCVTPS2UDQ_Z: Convert Packed Single-Precision Floating-Point Values to Packed Unsigned Doubleword Integer Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2UDQ.Z m128 k xmm
|
|
// VCVTPS2UDQ.Z m256 k ymm
|
|
// VCVTPS2UDQ.Z xmm k xmm
|
|
// VCVTPS2UDQ.Z ymm k ymm
|
|
// VCVTPS2UDQ.Z m512 k zmm
|
|
// VCVTPS2UDQ.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTPS2UDQ.Z instruction to the active function.
|
|
func (c *Context) VCVTPS2UDQ_Z(mxyz, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VCVTPS2UDQ_Z(mxyz, k, xyz))
|
|
}
|
|
|
|
// VCVTPS2UDQ_Z: Convert Packed Single-Precision Floating-Point Values to Packed Unsigned Doubleword Integer Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2UDQ.Z m128 k xmm
|
|
// VCVTPS2UDQ.Z m256 k ymm
|
|
// VCVTPS2UDQ.Z xmm k xmm
|
|
// VCVTPS2UDQ.Z ymm k ymm
|
|
// VCVTPS2UDQ.Z m512 k zmm
|
|
// VCVTPS2UDQ.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTPS2UDQ.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPS2UDQ_Z(mxyz, k, xyz operand.Op) { ctx.VCVTPS2UDQ_Z(mxyz, k, xyz) }
|
|
|
|
// VCVTPS2UQQ: Convert Packed Single Precision Floating-Point Values to Packed Unsigned Quadword Integer Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2UQQ m128 k ymm
|
|
// VCVTPS2UQQ m128 ymm
|
|
// VCVTPS2UQQ m64 k xmm
|
|
// VCVTPS2UQQ m64 xmm
|
|
// VCVTPS2UQQ xmm k xmm
|
|
// VCVTPS2UQQ xmm k ymm
|
|
// VCVTPS2UQQ xmm xmm
|
|
// VCVTPS2UQQ xmm ymm
|
|
// VCVTPS2UQQ m256 k zmm
|
|
// VCVTPS2UQQ m256 zmm
|
|
// VCVTPS2UQQ ymm k zmm
|
|
// VCVTPS2UQQ ymm zmm
|
|
//
|
|
// Construct and append a VCVTPS2UQQ instruction to the active function.
|
|
func (c *Context) VCVTPS2UQQ(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTPS2UQQ(ops...))
|
|
}
|
|
|
|
// VCVTPS2UQQ: Convert Packed Single Precision Floating-Point Values to Packed Unsigned Quadword Integer Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2UQQ m128 k ymm
|
|
// VCVTPS2UQQ m128 ymm
|
|
// VCVTPS2UQQ m64 k xmm
|
|
// VCVTPS2UQQ m64 xmm
|
|
// VCVTPS2UQQ xmm k xmm
|
|
// VCVTPS2UQQ xmm k ymm
|
|
// VCVTPS2UQQ xmm xmm
|
|
// VCVTPS2UQQ xmm ymm
|
|
// VCVTPS2UQQ m256 k zmm
|
|
// VCVTPS2UQQ m256 zmm
|
|
// VCVTPS2UQQ ymm k zmm
|
|
// VCVTPS2UQQ ymm zmm
|
|
//
|
|
// Construct and append a VCVTPS2UQQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPS2UQQ(ops ...operand.Op) { ctx.VCVTPS2UQQ(ops...) }
|
|
|
|
// VCVTPS2UQQ_BCST: Convert Packed Single Precision Floating-Point Values to Packed Unsigned Quadword Integer Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2UQQ.BCST m32 k xmm
|
|
// VCVTPS2UQQ.BCST m32 k ymm
|
|
// VCVTPS2UQQ.BCST m32 xmm
|
|
// VCVTPS2UQQ.BCST m32 ymm
|
|
// VCVTPS2UQQ.BCST m32 k zmm
|
|
// VCVTPS2UQQ.BCST m32 zmm
|
|
//
|
|
// Construct and append a VCVTPS2UQQ.BCST instruction to the active function.
|
|
func (c *Context) VCVTPS2UQQ_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTPS2UQQ_BCST(ops...))
|
|
}
|
|
|
|
// VCVTPS2UQQ_BCST: Convert Packed Single Precision Floating-Point Values to Packed Unsigned Quadword Integer Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2UQQ.BCST m32 k xmm
|
|
// VCVTPS2UQQ.BCST m32 k ymm
|
|
// VCVTPS2UQQ.BCST m32 xmm
|
|
// VCVTPS2UQQ.BCST m32 ymm
|
|
// VCVTPS2UQQ.BCST m32 k zmm
|
|
// VCVTPS2UQQ.BCST m32 zmm
|
|
//
|
|
// Construct and append a VCVTPS2UQQ.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPS2UQQ_BCST(ops ...operand.Op) { ctx.VCVTPS2UQQ_BCST(ops...) }
|
|
|
|
// VCVTPS2UQQ_BCST_Z: Convert Packed Single Precision Floating-Point Values to Packed Unsigned Quadword Integer Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2UQQ.BCST.Z m32 k xmm
|
|
// VCVTPS2UQQ.BCST.Z m32 k ymm
|
|
// VCVTPS2UQQ.BCST.Z m32 k zmm
|
|
//
|
|
// Construct and append a VCVTPS2UQQ.BCST.Z instruction to the active function.
|
|
func (c *Context) VCVTPS2UQQ_BCST_Z(m, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VCVTPS2UQQ_BCST_Z(m, k, xyz))
|
|
}
|
|
|
|
// VCVTPS2UQQ_BCST_Z: Convert Packed Single Precision Floating-Point Values to Packed Unsigned Quadword Integer Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2UQQ.BCST.Z m32 k xmm
|
|
// VCVTPS2UQQ.BCST.Z m32 k ymm
|
|
// VCVTPS2UQQ.BCST.Z m32 k zmm
|
|
//
|
|
// Construct and append a VCVTPS2UQQ.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPS2UQQ_BCST_Z(m, k, xyz operand.Op) { ctx.VCVTPS2UQQ_BCST_Z(m, k, xyz) }
|
|
|
|
// VCVTPS2UQQ_RD_SAE: Convert Packed Single Precision Floating-Point Values to Packed Unsigned Quadword Integer Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2UQQ.RD_SAE ymm k zmm
|
|
// VCVTPS2UQQ.RD_SAE ymm zmm
|
|
//
|
|
// Construct and append a VCVTPS2UQQ.RD_SAE instruction to the active function.
|
|
func (c *Context) VCVTPS2UQQ_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTPS2UQQ_RD_SAE(ops...))
|
|
}
|
|
|
|
// VCVTPS2UQQ_RD_SAE: Convert Packed Single Precision Floating-Point Values to Packed Unsigned Quadword Integer Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2UQQ.RD_SAE ymm k zmm
|
|
// VCVTPS2UQQ.RD_SAE ymm zmm
|
|
//
|
|
// Construct and append a VCVTPS2UQQ.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPS2UQQ_RD_SAE(ops ...operand.Op) { ctx.VCVTPS2UQQ_RD_SAE(ops...) }
|
|
|
|
// VCVTPS2UQQ_RD_SAE_Z: Convert Packed Single Precision Floating-Point Values to Packed Unsigned Quadword Integer Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2UQQ.RD_SAE.Z ymm k zmm
|
|
//
|
|
// Construct and append a VCVTPS2UQQ.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VCVTPS2UQQ_RD_SAE_Z(y, k, z operand.Op) {
|
|
c.addinstruction(x86.VCVTPS2UQQ_RD_SAE_Z(y, k, z))
|
|
}
|
|
|
|
// VCVTPS2UQQ_RD_SAE_Z: Convert Packed Single Precision Floating-Point Values to Packed Unsigned Quadword Integer Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2UQQ.RD_SAE.Z ymm k zmm
|
|
//
|
|
// Construct and append a VCVTPS2UQQ.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPS2UQQ_RD_SAE_Z(y, k, z operand.Op) { ctx.VCVTPS2UQQ_RD_SAE_Z(y, k, z) }
|
|
|
|
// VCVTPS2UQQ_RN_SAE: Convert Packed Single Precision Floating-Point Values to Packed Unsigned Quadword Integer Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2UQQ.RN_SAE ymm k zmm
|
|
// VCVTPS2UQQ.RN_SAE ymm zmm
|
|
//
|
|
// Construct and append a VCVTPS2UQQ.RN_SAE instruction to the active function.
|
|
func (c *Context) VCVTPS2UQQ_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTPS2UQQ_RN_SAE(ops...))
|
|
}
|
|
|
|
// VCVTPS2UQQ_RN_SAE: Convert Packed Single Precision Floating-Point Values to Packed Unsigned Quadword Integer Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2UQQ.RN_SAE ymm k zmm
|
|
// VCVTPS2UQQ.RN_SAE ymm zmm
|
|
//
|
|
// Construct and append a VCVTPS2UQQ.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPS2UQQ_RN_SAE(ops ...operand.Op) { ctx.VCVTPS2UQQ_RN_SAE(ops...) }
|
|
|
|
// VCVTPS2UQQ_RN_SAE_Z: Convert Packed Single Precision Floating-Point Values to Packed Unsigned Quadword Integer Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2UQQ.RN_SAE.Z ymm k zmm
|
|
//
|
|
// Construct and append a VCVTPS2UQQ.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VCVTPS2UQQ_RN_SAE_Z(y, k, z operand.Op) {
|
|
c.addinstruction(x86.VCVTPS2UQQ_RN_SAE_Z(y, k, z))
|
|
}
|
|
|
|
// VCVTPS2UQQ_RN_SAE_Z: Convert Packed Single Precision Floating-Point Values to Packed Unsigned Quadword Integer Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2UQQ.RN_SAE.Z ymm k zmm
|
|
//
|
|
// Construct and append a VCVTPS2UQQ.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPS2UQQ_RN_SAE_Z(y, k, z operand.Op) { ctx.VCVTPS2UQQ_RN_SAE_Z(y, k, z) }
|
|
|
|
// VCVTPS2UQQ_RU_SAE: Convert Packed Single Precision Floating-Point Values to Packed Unsigned Quadword Integer Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2UQQ.RU_SAE ymm k zmm
|
|
// VCVTPS2UQQ.RU_SAE ymm zmm
|
|
//
|
|
// Construct and append a VCVTPS2UQQ.RU_SAE instruction to the active function.
|
|
func (c *Context) VCVTPS2UQQ_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTPS2UQQ_RU_SAE(ops...))
|
|
}
|
|
|
|
// VCVTPS2UQQ_RU_SAE: Convert Packed Single Precision Floating-Point Values to Packed Unsigned Quadword Integer Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2UQQ.RU_SAE ymm k zmm
|
|
// VCVTPS2UQQ.RU_SAE ymm zmm
|
|
//
|
|
// Construct and append a VCVTPS2UQQ.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPS2UQQ_RU_SAE(ops ...operand.Op) { ctx.VCVTPS2UQQ_RU_SAE(ops...) }
|
|
|
|
// VCVTPS2UQQ_RU_SAE_Z: Convert Packed Single Precision Floating-Point Values to Packed Unsigned Quadword Integer Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2UQQ.RU_SAE.Z ymm k zmm
|
|
//
|
|
// Construct and append a VCVTPS2UQQ.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VCVTPS2UQQ_RU_SAE_Z(y, k, z operand.Op) {
|
|
c.addinstruction(x86.VCVTPS2UQQ_RU_SAE_Z(y, k, z))
|
|
}
|
|
|
|
// VCVTPS2UQQ_RU_SAE_Z: Convert Packed Single Precision Floating-Point Values to Packed Unsigned Quadword Integer Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2UQQ.RU_SAE.Z ymm k zmm
|
|
//
|
|
// Construct and append a VCVTPS2UQQ.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPS2UQQ_RU_SAE_Z(y, k, z operand.Op) { ctx.VCVTPS2UQQ_RU_SAE_Z(y, k, z) }
|
|
|
|
// VCVTPS2UQQ_RZ_SAE: Convert Packed Single Precision Floating-Point Values to Packed Unsigned Quadword Integer Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2UQQ.RZ_SAE ymm k zmm
|
|
// VCVTPS2UQQ.RZ_SAE ymm zmm
|
|
//
|
|
// Construct and append a VCVTPS2UQQ.RZ_SAE instruction to the active function.
|
|
func (c *Context) VCVTPS2UQQ_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTPS2UQQ_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VCVTPS2UQQ_RZ_SAE: Convert Packed Single Precision Floating-Point Values to Packed Unsigned Quadword Integer Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2UQQ.RZ_SAE ymm k zmm
|
|
// VCVTPS2UQQ.RZ_SAE ymm zmm
|
|
//
|
|
// Construct and append a VCVTPS2UQQ.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPS2UQQ_RZ_SAE(ops ...operand.Op) { ctx.VCVTPS2UQQ_RZ_SAE(ops...) }
|
|
|
|
// VCVTPS2UQQ_RZ_SAE_Z: Convert Packed Single Precision Floating-Point Values to Packed Unsigned Quadword Integer Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2UQQ.RZ_SAE.Z ymm k zmm
|
|
//
|
|
// Construct and append a VCVTPS2UQQ.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VCVTPS2UQQ_RZ_SAE_Z(y, k, z operand.Op) {
|
|
c.addinstruction(x86.VCVTPS2UQQ_RZ_SAE_Z(y, k, z))
|
|
}
|
|
|
|
// VCVTPS2UQQ_RZ_SAE_Z: Convert Packed Single Precision Floating-Point Values to Packed Unsigned Quadword Integer Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2UQQ.RZ_SAE.Z ymm k zmm
|
|
//
|
|
// Construct and append a VCVTPS2UQQ.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPS2UQQ_RZ_SAE_Z(y, k, z operand.Op) { ctx.VCVTPS2UQQ_RZ_SAE_Z(y, k, z) }
|
|
|
|
// VCVTPS2UQQ_Z: Convert Packed Single Precision Floating-Point Values to Packed Unsigned Quadword Integer Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2UQQ.Z m128 k ymm
|
|
// VCVTPS2UQQ.Z m64 k xmm
|
|
// VCVTPS2UQQ.Z xmm k xmm
|
|
// VCVTPS2UQQ.Z xmm k ymm
|
|
// VCVTPS2UQQ.Z m256 k zmm
|
|
// VCVTPS2UQQ.Z ymm k zmm
|
|
//
|
|
// Construct and append a VCVTPS2UQQ.Z instruction to the active function.
|
|
func (c *Context) VCVTPS2UQQ_Z(mxy, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VCVTPS2UQQ_Z(mxy, k, xyz))
|
|
}
|
|
|
|
// VCVTPS2UQQ_Z: Convert Packed Single Precision Floating-Point Values to Packed Unsigned Quadword Integer Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTPS2UQQ.Z m128 k ymm
|
|
// VCVTPS2UQQ.Z m64 k xmm
|
|
// VCVTPS2UQQ.Z xmm k xmm
|
|
// VCVTPS2UQQ.Z xmm k ymm
|
|
// VCVTPS2UQQ.Z m256 k zmm
|
|
// VCVTPS2UQQ.Z ymm k zmm
|
|
//
|
|
// Construct and append a VCVTPS2UQQ.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTPS2UQQ_Z(mxy, k, xyz operand.Op) { ctx.VCVTPS2UQQ_Z(mxy, k, xyz) }
|
|
|
|
// VCVTQQ2PD: Convert Packed Quadword Integers to Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTQQ2PD m128 k xmm
|
|
// VCVTQQ2PD m128 xmm
|
|
// VCVTQQ2PD m256 k ymm
|
|
// VCVTQQ2PD m256 ymm
|
|
// VCVTQQ2PD xmm k xmm
|
|
// VCVTQQ2PD xmm xmm
|
|
// VCVTQQ2PD ymm k ymm
|
|
// VCVTQQ2PD ymm ymm
|
|
// VCVTQQ2PD m512 k zmm
|
|
// VCVTQQ2PD m512 zmm
|
|
// VCVTQQ2PD zmm k zmm
|
|
// VCVTQQ2PD zmm zmm
|
|
//
|
|
// Construct and append a VCVTQQ2PD instruction to the active function.
|
|
func (c *Context) VCVTQQ2PD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTQQ2PD(ops...))
|
|
}
|
|
|
|
// VCVTQQ2PD: Convert Packed Quadword Integers to Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTQQ2PD m128 k xmm
|
|
// VCVTQQ2PD m128 xmm
|
|
// VCVTQQ2PD m256 k ymm
|
|
// VCVTQQ2PD m256 ymm
|
|
// VCVTQQ2PD xmm k xmm
|
|
// VCVTQQ2PD xmm xmm
|
|
// VCVTQQ2PD ymm k ymm
|
|
// VCVTQQ2PD ymm ymm
|
|
// VCVTQQ2PD m512 k zmm
|
|
// VCVTQQ2PD m512 zmm
|
|
// VCVTQQ2PD zmm k zmm
|
|
// VCVTQQ2PD zmm zmm
|
|
//
|
|
// Construct and append a VCVTQQ2PD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTQQ2PD(ops ...operand.Op) { ctx.VCVTQQ2PD(ops...) }
|
|
|
|
// VCVTQQ2PD_BCST: Convert Packed Quadword Integers to Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTQQ2PD.BCST m64 k xmm
|
|
// VCVTQQ2PD.BCST m64 k ymm
|
|
// VCVTQQ2PD.BCST m64 xmm
|
|
// VCVTQQ2PD.BCST m64 ymm
|
|
// VCVTQQ2PD.BCST m64 k zmm
|
|
// VCVTQQ2PD.BCST m64 zmm
|
|
//
|
|
// Construct and append a VCVTQQ2PD.BCST instruction to the active function.
|
|
func (c *Context) VCVTQQ2PD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTQQ2PD_BCST(ops...))
|
|
}
|
|
|
|
// VCVTQQ2PD_BCST: Convert Packed Quadword Integers to Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTQQ2PD.BCST m64 k xmm
|
|
// VCVTQQ2PD.BCST m64 k ymm
|
|
// VCVTQQ2PD.BCST m64 xmm
|
|
// VCVTQQ2PD.BCST m64 ymm
|
|
// VCVTQQ2PD.BCST m64 k zmm
|
|
// VCVTQQ2PD.BCST m64 zmm
|
|
//
|
|
// Construct and append a VCVTQQ2PD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTQQ2PD_BCST(ops ...operand.Op) { ctx.VCVTQQ2PD_BCST(ops...) }
|
|
|
|
// VCVTQQ2PD_BCST_Z: Convert Packed Quadword Integers to Packed Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTQQ2PD.BCST.Z m64 k xmm
|
|
// VCVTQQ2PD.BCST.Z m64 k ymm
|
|
// VCVTQQ2PD.BCST.Z m64 k zmm
|
|
//
|
|
// Construct and append a VCVTQQ2PD.BCST.Z instruction to the active function.
|
|
func (c *Context) VCVTQQ2PD_BCST_Z(m, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VCVTQQ2PD_BCST_Z(m, k, xyz))
|
|
}
|
|
|
|
// VCVTQQ2PD_BCST_Z: Convert Packed Quadword Integers to Packed Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTQQ2PD.BCST.Z m64 k xmm
|
|
// VCVTQQ2PD.BCST.Z m64 k ymm
|
|
// VCVTQQ2PD.BCST.Z m64 k zmm
|
|
//
|
|
// Construct and append a VCVTQQ2PD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTQQ2PD_BCST_Z(m, k, xyz operand.Op) { ctx.VCVTQQ2PD_BCST_Z(m, k, xyz) }
|
|
|
|
// VCVTQQ2PD_RD_SAE: Convert Packed Quadword Integers to Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTQQ2PD.RD_SAE zmm k zmm
|
|
// VCVTQQ2PD.RD_SAE zmm zmm
|
|
//
|
|
// Construct and append a VCVTQQ2PD.RD_SAE instruction to the active function.
|
|
func (c *Context) VCVTQQ2PD_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTQQ2PD_RD_SAE(ops...))
|
|
}
|
|
|
|
// VCVTQQ2PD_RD_SAE: Convert Packed Quadword Integers to Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTQQ2PD.RD_SAE zmm k zmm
|
|
// VCVTQQ2PD.RD_SAE zmm zmm
|
|
//
|
|
// Construct and append a VCVTQQ2PD.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTQQ2PD_RD_SAE(ops ...operand.Op) { ctx.VCVTQQ2PD_RD_SAE(ops...) }
|
|
|
|
// VCVTQQ2PD_RD_SAE_Z: Convert Packed Quadword Integers to Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTQQ2PD.RD_SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTQQ2PD.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VCVTQQ2PD_RD_SAE_Z(z, k, z1 operand.Op) {
|
|
c.addinstruction(x86.VCVTQQ2PD_RD_SAE_Z(z, k, z1))
|
|
}
|
|
|
|
// VCVTQQ2PD_RD_SAE_Z: Convert Packed Quadword Integers to Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTQQ2PD.RD_SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTQQ2PD.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTQQ2PD_RD_SAE_Z(z, k, z1 operand.Op) { ctx.VCVTQQ2PD_RD_SAE_Z(z, k, z1) }
|
|
|
|
// VCVTQQ2PD_RN_SAE: Convert Packed Quadword Integers to Packed Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTQQ2PD.RN_SAE zmm k zmm
|
|
// VCVTQQ2PD.RN_SAE zmm zmm
|
|
//
|
|
// Construct and append a VCVTQQ2PD.RN_SAE instruction to the active function.
|
|
func (c *Context) VCVTQQ2PD_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTQQ2PD_RN_SAE(ops...))
|
|
}
|
|
|
|
// VCVTQQ2PD_RN_SAE: Convert Packed Quadword Integers to Packed Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTQQ2PD.RN_SAE zmm k zmm
|
|
// VCVTQQ2PD.RN_SAE zmm zmm
|
|
//
|
|
// Construct and append a VCVTQQ2PD.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTQQ2PD_RN_SAE(ops ...operand.Op) { ctx.VCVTQQ2PD_RN_SAE(ops...) }
|
|
|
|
// VCVTQQ2PD_RN_SAE_Z: Convert Packed Quadword Integers to Packed Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTQQ2PD.RN_SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTQQ2PD.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VCVTQQ2PD_RN_SAE_Z(z, k, z1 operand.Op) {
|
|
c.addinstruction(x86.VCVTQQ2PD_RN_SAE_Z(z, k, z1))
|
|
}
|
|
|
|
// VCVTQQ2PD_RN_SAE_Z: Convert Packed Quadword Integers to Packed Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTQQ2PD.RN_SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTQQ2PD.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTQQ2PD_RN_SAE_Z(z, k, z1 operand.Op) { ctx.VCVTQQ2PD_RN_SAE_Z(z, k, z1) }
|
|
|
|
// VCVTQQ2PD_RU_SAE: Convert Packed Quadword Integers to Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTQQ2PD.RU_SAE zmm k zmm
|
|
// VCVTQQ2PD.RU_SAE zmm zmm
|
|
//
|
|
// Construct and append a VCVTQQ2PD.RU_SAE instruction to the active function.
|
|
func (c *Context) VCVTQQ2PD_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTQQ2PD_RU_SAE(ops...))
|
|
}
|
|
|
|
// VCVTQQ2PD_RU_SAE: Convert Packed Quadword Integers to Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTQQ2PD.RU_SAE zmm k zmm
|
|
// VCVTQQ2PD.RU_SAE zmm zmm
|
|
//
|
|
// Construct and append a VCVTQQ2PD.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTQQ2PD_RU_SAE(ops ...operand.Op) { ctx.VCVTQQ2PD_RU_SAE(ops...) }
|
|
|
|
// VCVTQQ2PD_RU_SAE_Z: Convert Packed Quadword Integers to Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTQQ2PD.RU_SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTQQ2PD.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VCVTQQ2PD_RU_SAE_Z(z, k, z1 operand.Op) {
|
|
c.addinstruction(x86.VCVTQQ2PD_RU_SAE_Z(z, k, z1))
|
|
}
|
|
|
|
// VCVTQQ2PD_RU_SAE_Z: Convert Packed Quadword Integers to Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTQQ2PD.RU_SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTQQ2PD.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTQQ2PD_RU_SAE_Z(z, k, z1 operand.Op) { ctx.VCVTQQ2PD_RU_SAE_Z(z, k, z1) }
|
|
|
|
// VCVTQQ2PD_RZ_SAE: Convert Packed Quadword Integers to Packed Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTQQ2PD.RZ_SAE zmm k zmm
|
|
// VCVTQQ2PD.RZ_SAE zmm zmm
|
|
//
|
|
// Construct and append a VCVTQQ2PD.RZ_SAE instruction to the active function.
|
|
func (c *Context) VCVTQQ2PD_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTQQ2PD_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VCVTQQ2PD_RZ_SAE: Convert Packed Quadword Integers to Packed Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTQQ2PD.RZ_SAE zmm k zmm
|
|
// VCVTQQ2PD.RZ_SAE zmm zmm
|
|
//
|
|
// Construct and append a VCVTQQ2PD.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTQQ2PD_RZ_SAE(ops ...operand.Op) { ctx.VCVTQQ2PD_RZ_SAE(ops...) }
|
|
|
|
// VCVTQQ2PD_RZ_SAE_Z: Convert Packed Quadword Integers to Packed Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTQQ2PD.RZ_SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTQQ2PD.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VCVTQQ2PD_RZ_SAE_Z(z, k, z1 operand.Op) {
|
|
c.addinstruction(x86.VCVTQQ2PD_RZ_SAE_Z(z, k, z1))
|
|
}
|
|
|
|
// VCVTQQ2PD_RZ_SAE_Z: Convert Packed Quadword Integers to Packed Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTQQ2PD.RZ_SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTQQ2PD.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTQQ2PD_RZ_SAE_Z(z, k, z1 operand.Op) { ctx.VCVTQQ2PD_RZ_SAE_Z(z, k, z1) }
|
|
|
|
// VCVTQQ2PD_Z: Convert Packed Quadword Integers to Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTQQ2PD.Z m128 k xmm
|
|
// VCVTQQ2PD.Z m256 k ymm
|
|
// VCVTQQ2PD.Z xmm k xmm
|
|
// VCVTQQ2PD.Z ymm k ymm
|
|
// VCVTQQ2PD.Z m512 k zmm
|
|
// VCVTQQ2PD.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTQQ2PD.Z instruction to the active function.
|
|
func (c *Context) VCVTQQ2PD_Z(mxyz, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VCVTQQ2PD_Z(mxyz, k, xyz))
|
|
}
|
|
|
|
// VCVTQQ2PD_Z: Convert Packed Quadword Integers to Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTQQ2PD.Z m128 k xmm
|
|
// VCVTQQ2PD.Z m256 k ymm
|
|
// VCVTQQ2PD.Z xmm k xmm
|
|
// VCVTQQ2PD.Z ymm k ymm
|
|
// VCVTQQ2PD.Z m512 k zmm
|
|
// VCVTQQ2PD.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTQQ2PD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTQQ2PD_Z(mxyz, k, xyz operand.Op) { ctx.VCVTQQ2PD_Z(mxyz, k, xyz) }
|
|
|
|
// VCVTQQ2PS: Convert Packed Quadword Integers to Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTQQ2PS m512 k ymm
|
|
// VCVTQQ2PS m512 ymm
|
|
// VCVTQQ2PS zmm k ymm
|
|
// VCVTQQ2PS zmm ymm
|
|
//
|
|
// Construct and append a VCVTQQ2PS instruction to the active function.
|
|
func (c *Context) VCVTQQ2PS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTQQ2PS(ops...))
|
|
}
|
|
|
|
// VCVTQQ2PS: Convert Packed Quadword Integers to Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTQQ2PS m512 k ymm
|
|
// VCVTQQ2PS m512 ymm
|
|
// VCVTQQ2PS zmm k ymm
|
|
// VCVTQQ2PS zmm ymm
|
|
//
|
|
// Construct and append a VCVTQQ2PS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTQQ2PS(ops ...operand.Op) { ctx.VCVTQQ2PS(ops...) }
|
|
|
|
// VCVTQQ2PSX: Convert Packed Quadword Integers to Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTQQ2PSX m128 k xmm
|
|
// VCVTQQ2PSX m128 xmm
|
|
// VCVTQQ2PSX xmm k xmm
|
|
// VCVTQQ2PSX xmm xmm
|
|
//
|
|
// Construct and append a VCVTQQ2PSX instruction to the active function.
|
|
func (c *Context) VCVTQQ2PSX(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTQQ2PSX(ops...))
|
|
}
|
|
|
|
// VCVTQQ2PSX: Convert Packed Quadword Integers to Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTQQ2PSX m128 k xmm
|
|
// VCVTQQ2PSX m128 xmm
|
|
// VCVTQQ2PSX xmm k xmm
|
|
// VCVTQQ2PSX xmm xmm
|
|
//
|
|
// Construct and append a VCVTQQ2PSX instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTQQ2PSX(ops ...operand.Op) { ctx.VCVTQQ2PSX(ops...) }
|
|
|
|
// VCVTQQ2PSX_BCST: Convert Packed Quadword Integers to Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTQQ2PSX.BCST m64 k xmm
|
|
// VCVTQQ2PSX.BCST m64 xmm
|
|
//
|
|
// Construct and append a VCVTQQ2PSX.BCST instruction to the active function.
|
|
func (c *Context) VCVTQQ2PSX_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTQQ2PSX_BCST(ops...))
|
|
}
|
|
|
|
// VCVTQQ2PSX_BCST: Convert Packed Quadword Integers to Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTQQ2PSX.BCST m64 k xmm
|
|
// VCVTQQ2PSX.BCST m64 xmm
|
|
//
|
|
// Construct and append a VCVTQQ2PSX.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTQQ2PSX_BCST(ops ...operand.Op) { ctx.VCVTQQ2PSX_BCST(ops...) }
|
|
|
|
// VCVTQQ2PSX_BCST_Z: Convert Packed Quadword Integers to Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTQQ2PSX.BCST.Z m64 k xmm
|
|
//
|
|
// Construct and append a VCVTQQ2PSX.BCST.Z instruction to the active function.
|
|
func (c *Context) VCVTQQ2PSX_BCST_Z(m, k, x operand.Op) {
|
|
c.addinstruction(x86.VCVTQQ2PSX_BCST_Z(m, k, x))
|
|
}
|
|
|
|
// VCVTQQ2PSX_BCST_Z: Convert Packed Quadword Integers to Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTQQ2PSX.BCST.Z m64 k xmm
|
|
//
|
|
// Construct and append a VCVTQQ2PSX.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTQQ2PSX_BCST_Z(m, k, x operand.Op) { ctx.VCVTQQ2PSX_BCST_Z(m, k, x) }
|
|
|
|
// VCVTQQ2PSX_Z: Convert Packed Quadword Integers to Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTQQ2PSX.Z m128 k xmm
|
|
// VCVTQQ2PSX.Z xmm k xmm
|
|
//
|
|
// Construct and append a VCVTQQ2PSX.Z instruction to the active function.
|
|
func (c *Context) VCVTQQ2PSX_Z(mx, k, x operand.Op) {
|
|
c.addinstruction(x86.VCVTQQ2PSX_Z(mx, k, x))
|
|
}
|
|
|
|
// VCVTQQ2PSX_Z: Convert Packed Quadword Integers to Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTQQ2PSX.Z m128 k xmm
|
|
// VCVTQQ2PSX.Z xmm k xmm
|
|
//
|
|
// Construct and append a VCVTQQ2PSX.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTQQ2PSX_Z(mx, k, x operand.Op) { ctx.VCVTQQ2PSX_Z(mx, k, x) }
|
|
|
|
// VCVTQQ2PSY: Convert Packed Quadword Integers to Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTQQ2PSY m256 k xmm
|
|
// VCVTQQ2PSY m256 xmm
|
|
// VCVTQQ2PSY ymm k xmm
|
|
// VCVTQQ2PSY ymm xmm
|
|
//
|
|
// Construct and append a VCVTQQ2PSY instruction to the active function.
|
|
func (c *Context) VCVTQQ2PSY(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTQQ2PSY(ops...))
|
|
}
|
|
|
|
// VCVTQQ2PSY: Convert Packed Quadword Integers to Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTQQ2PSY m256 k xmm
|
|
// VCVTQQ2PSY m256 xmm
|
|
// VCVTQQ2PSY ymm k xmm
|
|
// VCVTQQ2PSY ymm xmm
|
|
//
|
|
// Construct and append a VCVTQQ2PSY instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTQQ2PSY(ops ...operand.Op) { ctx.VCVTQQ2PSY(ops...) }
|
|
|
|
// VCVTQQ2PSY_BCST: Convert Packed Quadword Integers to Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTQQ2PSY.BCST m64 k xmm
|
|
// VCVTQQ2PSY.BCST m64 xmm
|
|
//
|
|
// Construct and append a VCVTQQ2PSY.BCST instruction to the active function.
|
|
func (c *Context) VCVTQQ2PSY_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTQQ2PSY_BCST(ops...))
|
|
}
|
|
|
|
// VCVTQQ2PSY_BCST: Convert Packed Quadword Integers to Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTQQ2PSY.BCST m64 k xmm
|
|
// VCVTQQ2PSY.BCST m64 xmm
|
|
//
|
|
// Construct and append a VCVTQQ2PSY.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTQQ2PSY_BCST(ops ...operand.Op) { ctx.VCVTQQ2PSY_BCST(ops...) }
|
|
|
|
// VCVTQQ2PSY_BCST_Z: Convert Packed Quadword Integers to Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTQQ2PSY.BCST.Z m64 k xmm
|
|
//
|
|
// Construct and append a VCVTQQ2PSY.BCST.Z instruction to the active function.
|
|
func (c *Context) VCVTQQ2PSY_BCST_Z(m, k, x operand.Op) {
|
|
c.addinstruction(x86.VCVTQQ2PSY_BCST_Z(m, k, x))
|
|
}
|
|
|
|
// VCVTQQ2PSY_BCST_Z: Convert Packed Quadword Integers to Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTQQ2PSY.BCST.Z m64 k xmm
|
|
//
|
|
// Construct and append a VCVTQQ2PSY.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTQQ2PSY_BCST_Z(m, k, x operand.Op) { ctx.VCVTQQ2PSY_BCST_Z(m, k, x) }
|
|
|
|
// VCVTQQ2PSY_Z: Convert Packed Quadword Integers to Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTQQ2PSY.Z m256 k xmm
|
|
// VCVTQQ2PSY.Z ymm k xmm
|
|
//
|
|
// Construct and append a VCVTQQ2PSY.Z instruction to the active function.
|
|
func (c *Context) VCVTQQ2PSY_Z(my, k, x operand.Op) {
|
|
c.addinstruction(x86.VCVTQQ2PSY_Z(my, k, x))
|
|
}
|
|
|
|
// VCVTQQ2PSY_Z: Convert Packed Quadword Integers to Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTQQ2PSY.Z m256 k xmm
|
|
// VCVTQQ2PSY.Z ymm k xmm
|
|
//
|
|
// Construct and append a VCVTQQ2PSY.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTQQ2PSY_Z(my, k, x operand.Op) { ctx.VCVTQQ2PSY_Z(my, k, x) }
|
|
|
|
// VCVTQQ2PS_BCST: Convert Packed Quadword Integers to Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTQQ2PS.BCST m64 k ymm
|
|
// VCVTQQ2PS.BCST m64 ymm
|
|
//
|
|
// Construct and append a VCVTQQ2PS.BCST instruction to the active function.
|
|
func (c *Context) VCVTQQ2PS_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTQQ2PS_BCST(ops...))
|
|
}
|
|
|
|
// VCVTQQ2PS_BCST: Convert Packed Quadword Integers to Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTQQ2PS.BCST m64 k ymm
|
|
// VCVTQQ2PS.BCST m64 ymm
|
|
//
|
|
// Construct and append a VCVTQQ2PS.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTQQ2PS_BCST(ops ...operand.Op) { ctx.VCVTQQ2PS_BCST(ops...) }
|
|
|
|
// VCVTQQ2PS_BCST_Z: Convert Packed Quadword Integers to Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTQQ2PS.BCST.Z m64 k ymm
|
|
//
|
|
// Construct and append a VCVTQQ2PS.BCST.Z instruction to the active function.
|
|
func (c *Context) VCVTQQ2PS_BCST_Z(m, k, y operand.Op) {
|
|
c.addinstruction(x86.VCVTQQ2PS_BCST_Z(m, k, y))
|
|
}
|
|
|
|
// VCVTQQ2PS_BCST_Z: Convert Packed Quadword Integers to Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTQQ2PS.BCST.Z m64 k ymm
|
|
//
|
|
// Construct and append a VCVTQQ2PS.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTQQ2PS_BCST_Z(m, k, y operand.Op) { ctx.VCVTQQ2PS_BCST_Z(m, k, y) }
|
|
|
|
// VCVTQQ2PS_RD_SAE: Convert Packed Quadword Integers to Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTQQ2PS.RD_SAE zmm k ymm
|
|
// VCVTQQ2PS.RD_SAE zmm ymm
|
|
//
|
|
// Construct and append a VCVTQQ2PS.RD_SAE instruction to the active function.
|
|
func (c *Context) VCVTQQ2PS_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTQQ2PS_RD_SAE(ops...))
|
|
}
|
|
|
|
// VCVTQQ2PS_RD_SAE: Convert Packed Quadword Integers to Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTQQ2PS.RD_SAE zmm k ymm
|
|
// VCVTQQ2PS.RD_SAE zmm ymm
|
|
//
|
|
// Construct and append a VCVTQQ2PS.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTQQ2PS_RD_SAE(ops ...operand.Op) { ctx.VCVTQQ2PS_RD_SAE(ops...) }
|
|
|
|
// VCVTQQ2PS_RD_SAE_Z: Convert Packed Quadword Integers to Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTQQ2PS.RD_SAE.Z zmm k ymm
|
|
//
|
|
// Construct and append a VCVTQQ2PS.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VCVTQQ2PS_RD_SAE_Z(z, k, y operand.Op) {
|
|
c.addinstruction(x86.VCVTQQ2PS_RD_SAE_Z(z, k, y))
|
|
}
|
|
|
|
// VCVTQQ2PS_RD_SAE_Z: Convert Packed Quadword Integers to Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTQQ2PS.RD_SAE.Z zmm k ymm
|
|
//
|
|
// Construct and append a VCVTQQ2PS.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTQQ2PS_RD_SAE_Z(z, k, y operand.Op) { ctx.VCVTQQ2PS_RD_SAE_Z(z, k, y) }
|
|
|
|
// VCVTQQ2PS_RN_SAE: Convert Packed Quadword Integers to Packed Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTQQ2PS.RN_SAE zmm k ymm
|
|
// VCVTQQ2PS.RN_SAE zmm ymm
|
|
//
|
|
// Construct and append a VCVTQQ2PS.RN_SAE instruction to the active function.
|
|
func (c *Context) VCVTQQ2PS_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTQQ2PS_RN_SAE(ops...))
|
|
}
|
|
|
|
// VCVTQQ2PS_RN_SAE: Convert Packed Quadword Integers to Packed Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTQQ2PS.RN_SAE zmm k ymm
|
|
// VCVTQQ2PS.RN_SAE zmm ymm
|
|
//
|
|
// Construct and append a VCVTQQ2PS.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTQQ2PS_RN_SAE(ops ...operand.Op) { ctx.VCVTQQ2PS_RN_SAE(ops...) }
|
|
|
|
// VCVTQQ2PS_RN_SAE_Z: Convert Packed Quadword Integers to Packed Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTQQ2PS.RN_SAE.Z zmm k ymm
|
|
//
|
|
// Construct and append a VCVTQQ2PS.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VCVTQQ2PS_RN_SAE_Z(z, k, y operand.Op) {
|
|
c.addinstruction(x86.VCVTQQ2PS_RN_SAE_Z(z, k, y))
|
|
}
|
|
|
|
// VCVTQQ2PS_RN_SAE_Z: Convert Packed Quadword Integers to Packed Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTQQ2PS.RN_SAE.Z zmm k ymm
|
|
//
|
|
// Construct and append a VCVTQQ2PS.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTQQ2PS_RN_SAE_Z(z, k, y operand.Op) { ctx.VCVTQQ2PS_RN_SAE_Z(z, k, y) }
|
|
|
|
// VCVTQQ2PS_RU_SAE: Convert Packed Quadword Integers to Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTQQ2PS.RU_SAE zmm k ymm
|
|
// VCVTQQ2PS.RU_SAE zmm ymm
|
|
//
|
|
// Construct and append a VCVTQQ2PS.RU_SAE instruction to the active function.
|
|
func (c *Context) VCVTQQ2PS_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTQQ2PS_RU_SAE(ops...))
|
|
}
|
|
|
|
// VCVTQQ2PS_RU_SAE: Convert Packed Quadword Integers to Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTQQ2PS.RU_SAE zmm k ymm
|
|
// VCVTQQ2PS.RU_SAE zmm ymm
|
|
//
|
|
// Construct and append a VCVTQQ2PS.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTQQ2PS_RU_SAE(ops ...operand.Op) { ctx.VCVTQQ2PS_RU_SAE(ops...) }
|
|
|
|
// VCVTQQ2PS_RU_SAE_Z: Convert Packed Quadword Integers to Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTQQ2PS.RU_SAE.Z zmm k ymm
|
|
//
|
|
// Construct and append a VCVTQQ2PS.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VCVTQQ2PS_RU_SAE_Z(z, k, y operand.Op) {
|
|
c.addinstruction(x86.VCVTQQ2PS_RU_SAE_Z(z, k, y))
|
|
}
|
|
|
|
// VCVTQQ2PS_RU_SAE_Z: Convert Packed Quadword Integers to Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTQQ2PS.RU_SAE.Z zmm k ymm
|
|
//
|
|
// Construct and append a VCVTQQ2PS.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTQQ2PS_RU_SAE_Z(z, k, y operand.Op) { ctx.VCVTQQ2PS_RU_SAE_Z(z, k, y) }
|
|
|
|
// VCVTQQ2PS_RZ_SAE: Convert Packed Quadword Integers to Packed Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTQQ2PS.RZ_SAE zmm k ymm
|
|
// VCVTQQ2PS.RZ_SAE zmm ymm
|
|
//
|
|
// Construct and append a VCVTQQ2PS.RZ_SAE instruction to the active function.
|
|
func (c *Context) VCVTQQ2PS_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTQQ2PS_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VCVTQQ2PS_RZ_SAE: Convert Packed Quadword Integers to Packed Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTQQ2PS.RZ_SAE zmm k ymm
|
|
// VCVTQQ2PS.RZ_SAE zmm ymm
|
|
//
|
|
// Construct and append a VCVTQQ2PS.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTQQ2PS_RZ_SAE(ops ...operand.Op) { ctx.VCVTQQ2PS_RZ_SAE(ops...) }
|
|
|
|
// VCVTQQ2PS_RZ_SAE_Z: Convert Packed Quadword Integers to Packed Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTQQ2PS.RZ_SAE.Z zmm k ymm
|
|
//
|
|
// Construct and append a VCVTQQ2PS.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VCVTQQ2PS_RZ_SAE_Z(z, k, y operand.Op) {
|
|
c.addinstruction(x86.VCVTQQ2PS_RZ_SAE_Z(z, k, y))
|
|
}
|
|
|
|
// VCVTQQ2PS_RZ_SAE_Z: Convert Packed Quadword Integers to Packed Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTQQ2PS.RZ_SAE.Z zmm k ymm
|
|
//
|
|
// Construct and append a VCVTQQ2PS.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTQQ2PS_RZ_SAE_Z(z, k, y operand.Op) { ctx.VCVTQQ2PS_RZ_SAE_Z(z, k, y) }
|
|
|
|
// VCVTQQ2PS_Z: Convert Packed Quadword Integers to Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTQQ2PS.Z m512 k ymm
|
|
// VCVTQQ2PS.Z zmm k ymm
|
|
//
|
|
// Construct and append a VCVTQQ2PS.Z instruction to the active function.
|
|
func (c *Context) VCVTQQ2PS_Z(mz, k, y operand.Op) {
|
|
c.addinstruction(x86.VCVTQQ2PS_Z(mz, k, y))
|
|
}
|
|
|
|
// VCVTQQ2PS_Z: Convert Packed Quadword Integers to Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTQQ2PS.Z m512 k ymm
|
|
// VCVTQQ2PS.Z zmm k ymm
|
|
//
|
|
// Construct and append a VCVTQQ2PS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTQQ2PS_Z(mz, k, y operand.Op) { ctx.VCVTQQ2PS_Z(mz, k, y) }
|
|
|
|
// VCVTSD2SI: Convert Scalar Double-Precision FP Value to Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSD2SI m64 r32
|
|
// VCVTSD2SI xmm r32
|
|
//
|
|
// Construct and append a VCVTSD2SI instruction to the active function.
|
|
func (c *Context) VCVTSD2SI(mx, r operand.Op) {
|
|
c.addinstruction(x86.VCVTSD2SI(mx, r))
|
|
}
|
|
|
|
// VCVTSD2SI: Convert Scalar Double-Precision FP Value to Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSD2SI m64 r32
|
|
// VCVTSD2SI xmm r32
|
|
//
|
|
// Construct and append a VCVTSD2SI instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTSD2SI(mx, r operand.Op) { ctx.VCVTSD2SI(mx, r) }
|
|
|
|
// VCVTSD2SIQ: Convert Scalar Double-Precision FP Value to Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSD2SIQ m64 r64
|
|
// VCVTSD2SIQ xmm r64
|
|
//
|
|
// Construct and append a VCVTSD2SIQ instruction to the active function.
|
|
func (c *Context) VCVTSD2SIQ(mx, r operand.Op) {
|
|
c.addinstruction(x86.VCVTSD2SIQ(mx, r))
|
|
}
|
|
|
|
// VCVTSD2SIQ: Convert Scalar Double-Precision FP Value to Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSD2SIQ m64 r64
|
|
// VCVTSD2SIQ xmm r64
|
|
//
|
|
// Construct and append a VCVTSD2SIQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTSD2SIQ(mx, r operand.Op) { ctx.VCVTSD2SIQ(mx, r) }
|
|
|
|
// VCVTSD2SIQ_RD_SAE: Convert Scalar Double-Precision FP Value to Integer (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSD2SIQ.RD_SAE xmm r64
|
|
//
|
|
// Construct and append a VCVTSD2SIQ.RD_SAE instruction to the active function.
|
|
func (c *Context) VCVTSD2SIQ_RD_SAE(x, r operand.Op) {
|
|
c.addinstruction(x86.VCVTSD2SIQ_RD_SAE(x, r))
|
|
}
|
|
|
|
// VCVTSD2SIQ_RD_SAE: Convert Scalar Double-Precision FP Value to Integer (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSD2SIQ.RD_SAE xmm r64
|
|
//
|
|
// Construct and append a VCVTSD2SIQ.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTSD2SIQ_RD_SAE(x, r operand.Op) { ctx.VCVTSD2SIQ_RD_SAE(x, r) }
|
|
|
|
// VCVTSD2SIQ_RN_SAE: Convert Scalar Double-Precision FP Value to Integer (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSD2SIQ.RN_SAE xmm r64
|
|
//
|
|
// Construct and append a VCVTSD2SIQ.RN_SAE instruction to the active function.
|
|
func (c *Context) VCVTSD2SIQ_RN_SAE(x, r operand.Op) {
|
|
c.addinstruction(x86.VCVTSD2SIQ_RN_SAE(x, r))
|
|
}
|
|
|
|
// VCVTSD2SIQ_RN_SAE: Convert Scalar Double-Precision FP Value to Integer (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSD2SIQ.RN_SAE xmm r64
|
|
//
|
|
// Construct and append a VCVTSD2SIQ.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTSD2SIQ_RN_SAE(x, r operand.Op) { ctx.VCVTSD2SIQ_RN_SAE(x, r) }
|
|
|
|
// VCVTSD2SIQ_RU_SAE: Convert Scalar Double-Precision FP Value to Integer (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSD2SIQ.RU_SAE xmm r64
|
|
//
|
|
// Construct and append a VCVTSD2SIQ.RU_SAE instruction to the active function.
|
|
func (c *Context) VCVTSD2SIQ_RU_SAE(x, r operand.Op) {
|
|
c.addinstruction(x86.VCVTSD2SIQ_RU_SAE(x, r))
|
|
}
|
|
|
|
// VCVTSD2SIQ_RU_SAE: Convert Scalar Double-Precision FP Value to Integer (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSD2SIQ.RU_SAE xmm r64
|
|
//
|
|
// Construct and append a VCVTSD2SIQ.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTSD2SIQ_RU_SAE(x, r operand.Op) { ctx.VCVTSD2SIQ_RU_SAE(x, r) }
|
|
|
|
// VCVTSD2SIQ_RZ_SAE: Convert Scalar Double-Precision FP Value to Integer (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSD2SIQ.RZ_SAE xmm r64
|
|
//
|
|
// Construct and append a VCVTSD2SIQ.RZ_SAE instruction to the active function.
|
|
func (c *Context) VCVTSD2SIQ_RZ_SAE(x, r operand.Op) {
|
|
c.addinstruction(x86.VCVTSD2SIQ_RZ_SAE(x, r))
|
|
}
|
|
|
|
// VCVTSD2SIQ_RZ_SAE: Convert Scalar Double-Precision FP Value to Integer (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSD2SIQ.RZ_SAE xmm r64
|
|
//
|
|
// Construct and append a VCVTSD2SIQ.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTSD2SIQ_RZ_SAE(x, r operand.Op) { ctx.VCVTSD2SIQ_RZ_SAE(x, r) }
|
|
|
|
// VCVTSD2SI_RD_SAE: Convert Scalar Double-Precision FP Value to Integer (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSD2SI.RD_SAE xmm r32
|
|
//
|
|
// Construct and append a VCVTSD2SI.RD_SAE instruction to the active function.
|
|
func (c *Context) VCVTSD2SI_RD_SAE(x, r operand.Op) {
|
|
c.addinstruction(x86.VCVTSD2SI_RD_SAE(x, r))
|
|
}
|
|
|
|
// VCVTSD2SI_RD_SAE: Convert Scalar Double-Precision FP Value to Integer (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSD2SI.RD_SAE xmm r32
|
|
//
|
|
// Construct and append a VCVTSD2SI.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTSD2SI_RD_SAE(x, r operand.Op) { ctx.VCVTSD2SI_RD_SAE(x, r) }
|
|
|
|
// VCVTSD2SI_RN_SAE: Convert Scalar Double-Precision FP Value to Integer (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSD2SI.RN_SAE xmm r32
|
|
//
|
|
// Construct and append a VCVTSD2SI.RN_SAE instruction to the active function.
|
|
func (c *Context) VCVTSD2SI_RN_SAE(x, r operand.Op) {
|
|
c.addinstruction(x86.VCVTSD2SI_RN_SAE(x, r))
|
|
}
|
|
|
|
// VCVTSD2SI_RN_SAE: Convert Scalar Double-Precision FP Value to Integer (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSD2SI.RN_SAE xmm r32
|
|
//
|
|
// Construct and append a VCVTSD2SI.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTSD2SI_RN_SAE(x, r operand.Op) { ctx.VCVTSD2SI_RN_SAE(x, r) }
|
|
|
|
// VCVTSD2SI_RU_SAE: Convert Scalar Double-Precision FP Value to Integer (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSD2SI.RU_SAE xmm r32
|
|
//
|
|
// Construct and append a VCVTSD2SI.RU_SAE instruction to the active function.
|
|
func (c *Context) VCVTSD2SI_RU_SAE(x, r operand.Op) {
|
|
c.addinstruction(x86.VCVTSD2SI_RU_SAE(x, r))
|
|
}
|
|
|
|
// VCVTSD2SI_RU_SAE: Convert Scalar Double-Precision FP Value to Integer (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSD2SI.RU_SAE xmm r32
|
|
//
|
|
// Construct and append a VCVTSD2SI.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTSD2SI_RU_SAE(x, r operand.Op) { ctx.VCVTSD2SI_RU_SAE(x, r) }
|
|
|
|
// VCVTSD2SI_RZ_SAE: Convert Scalar Double-Precision FP Value to Integer (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSD2SI.RZ_SAE xmm r32
|
|
//
|
|
// Construct and append a VCVTSD2SI.RZ_SAE instruction to the active function.
|
|
func (c *Context) VCVTSD2SI_RZ_SAE(x, r operand.Op) {
|
|
c.addinstruction(x86.VCVTSD2SI_RZ_SAE(x, r))
|
|
}
|
|
|
|
// VCVTSD2SI_RZ_SAE: Convert Scalar Double-Precision FP Value to Integer (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSD2SI.RZ_SAE xmm r32
|
|
//
|
|
// Construct and append a VCVTSD2SI.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTSD2SI_RZ_SAE(x, r operand.Op) { ctx.VCVTSD2SI_RZ_SAE(x, r) }
|
|
|
|
// VCVTSD2SS: Convert Scalar Double-Precision FP Value to Scalar Single-Precision FP Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSD2SS m64 xmm xmm
|
|
// VCVTSD2SS xmm xmm xmm
|
|
// VCVTSD2SS m64 xmm k xmm
|
|
// VCVTSD2SS xmm xmm k xmm
|
|
//
|
|
// Construct and append a VCVTSD2SS instruction to the active function.
|
|
func (c *Context) VCVTSD2SS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTSD2SS(ops...))
|
|
}
|
|
|
|
// VCVTSD2SS: Convert Scalar Double-Precision FP Value to Scalar Single-Precision FP Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSD2SS m64 xmm xmm
|
|
// VCVTSD2SS xmm xmm xmm
|
|
// VCVTSD2SS m64 xmm k xmm
|
|
// VCVTSD2SS xmm xmm k xmm
|
|
//
|
|
// Construct and append a VCVTSD2SS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTSD2SS(ops ...operand.Op) { ctx.VCVTSD2SS(ops...) }
|
|
|
|
// VCVTSD2SS_RD_SAE: Convert Scalar Double-Precision FP Value to Scalar Single-Precision FP Value (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSD2SS.RD_SAE xmm xmm k xmm
|
|
// VCVTSD2SS.RD_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VCVTSD2SS.RD_SAE instruction to the active function.
|
|
func (c *Context) VCVTSD2SS_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTSD2SS_RD_SAE(ops...))
|
|
}
|
|
|
|
// VCVTSD2SS_RD_SAE: Convert Scalar Double-Precision FP Value to Scalar Single-Precision FP Value (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSD2SS.RD_SAE xmm xmm k xmm
|
|
// VCVTSD2SS.RD_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VCVTSD2SS.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTSD2SS_RD_SAE(ops ...operand.Op) { ctx.VCVTSD2SS_RD_SAE(ops...) }
|
|
|
|
// VCVTSD2SS_RD_SAE_Z: Convert Scalar Double-Precision FP Value to Scalar Single-Precision FP Value (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSD2SS.RD_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VCVTSD2SS.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VCVTSD2SS_RD_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VCVTSD2SS_RD_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VCVTSD2SS_RD_SAE_Z: Convert Scalar Double-Precision FP Value to Scalar Single-Precision FP Value (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSD2SS.RD_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VCVTSD2SS.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTSD2SS_RD_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VCVTSD2SS_RD_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VCVTSD2SS_RN_SAE: Convert Scalar Double-Precision FP Value to Scalar Single-Precision FP Value (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSD2SS.RN_SAE xmm xmm k xmm
|
|
// VCVTSD2SS.RN_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VCVTSD2SS.RN_SAE instruction to the active function.
|
|
func (c *Context) VCVTSD2SS_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTSD2SS_RN_SAE(ops...))
|
|
}
|
|
|
|
// VCVTSD2SS_RN_SAE: Convert Scalar Double-Precision FP Value to Scalar Single-Precision FP Value (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSD2SS.RN_SAE xmm xmm k xmm
|
|
// VCVTSD2SS.RN_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VCVTSD2SS.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTSD2SS_RN_SAE(ops ...operand.Op) { ctx.VCVTSD2SS_RN_SAE(ops...) }
|
|
|
|
// VCVTSD2SS_RN_SAE_Z: Convert Scalar Double-Precision FP Value to Scalar Single-Precision FP Value (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSD2SS.RN_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VCVTSD2SS.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VCVTSD2SS_RN_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VCVTSD2SS_RN_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VCVTSD2SS_RN_SAE_Z: Convert Scalar Double-Precision FP Value to Scalar Single-Precision FP Value (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSD2SS.RN_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VCVTSD2SS.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTSD2SS_RN_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VCVTSD2SS_RN_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VCVTSD2SS_RU_SAE: Convert Scalar Double-Precision FP Value to Scalar Single-Precision FP Value (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSD2SS.RU_SAE xmm xmm k xmm
|
|
// VCVTSD2SS.RU_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VCVTSD2SS.RU_SAE instruction to the active function.
|
|
func (c *Context) VCVTSD2SS_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTSD2SS_RU_SAE(ops...))
|
|
}
|
|
|
|
// VCVTSD2SS_RU_SAE: Convert Scalar Double-Precision FP Value to Scalar Single-Precision FP Value (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSD2SS.RU_SAE xmm xmm k xmm
|
|
// VCVTSD2SS.RU_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VCVTSD2SS.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTSD2SS_RU_SAE(ops ...operand.Op) { ctx.VCVTSD2SS_RU_SAE(ops...) }
|
|
|
|
// VCVTSD2SS_RU_SAE_Z: Convert Scalar Double-Precision FP Value to Scalar Single-Precision FP Value (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSD2SS.RU_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VCVTSD2SS.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VCVTSD2SS_RU_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VCVTSD2SS_RU_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VCVTSD2SS_RU_SAE_Z: Convert Scalar Double-Precision FP Value to Scalar Single-Precision FP Value (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSD2SS.RU_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VCVTSD2SS.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTSD2SS_RU_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VCVTSD2SS_RU_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VCVTSD2SS_RZ_SAE: Convert Scalar Double-Precision FP Value to Scalar Single-Precision FP Value (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSD2SS.RZ_SAE xmm xmm k xmm
|
|
// VCVTSD2SS.RZ_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VCVTSD2SS.RZ_SAE instruction to the active function.
|
|
func (c *Context) VCVTSD2SS_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTSD2SS_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VCVTSD2SS_RZ_SAE: Convert Scalar Double-Precision FP Value to Scalar Single-Precision FP Value (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSD2SS.RZ_SAE xmm xmm k xmm
|
|
// VCVTSD2SS.RZ_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VCVTSD2SS.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTSD2SS_RZ_SAE(ops ...operand.Op) { ctx.VCVTSD2SS_RZ_SAE(ops...) }
|
|
|
|
// VCVTSD2SS_RZ_SAE_Z: Convert Scalar Double-Precision FP Value to Scalar Single-Precision FP Value (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSD2SS.RZ_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VCVTSD2SS.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VCVTSD2SS_RZ_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VCVTSD2SS_RZ_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VCVTSD2SS_RZ_SAE_Z: Convert Scalar Double-Precision FP Value to Scalar Single-Precision FP Value (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSD2SS.RZ_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VCVTSD2SS.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTSD2SS_RZ_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VCVTSD2SS_RZ_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VCVTSD2SS_Z: Convert Scalar Double-Precision FP Value to Scalar Single-Precision FP Value (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSD2SS.Z m64 xmm k xmm
|
|
// VCVTSD2SS.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VCVTSD2SS.Z instruction to the active function.
|
|
func (c *Context) VCVTSD2SS_Z(mx, x, k, x1 operand.Op) {
|
|
c.addinstruction(x86.VCVTSD2SS_Z(mx, x, k, x1))
|
|
}
|
|
|
|
// VCVTSD2SS_Z: Convert Scalar Double-Precision FP Value to Scalar Single-Precision FP Value (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSD2SS.Z m64 xmm k xmm
|
|
// VCVTSD2SS.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VCVTSD2SS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTSD2SS_Z(mx, x, k, x1 operand.Op) { ctx.VCVTSD2SS_Z(mx, x, k, x1) }
|
|
|
|
// VCVTSD2USIL: Convert Scalar Double-Precision Floating-Point Value to Unsigned Doubleword Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSD2USIL m64 r32
|
|
// VCVTSD2USIL xmm r32
|
|
//
|
|
// Construct and append a VCVTSD2USIL instruction to the active function.
|
|
func (c *Context) VCVTSD2USIL(mx, r operand.Op) {
|
|
c.addinstruction(x86.VCVTSD2USIL(mx, r))
|
|
}
|
|
|
|
// VCVTSD2USIL: Convert Scalar Double-Precision Floating-Point Value to Unsigned Doubleword Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSD2USIL m64 r32
|
|
// VCVTSD2USIL xmm r32
|
|
//
|
|
// Construct and append a VCVTSD2USIL instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTSD2USIL(mx, r operand.Op) { ctx.VCVTSD2USIL(mx, r) }
|
|
|
|
// VCVTSD2USIL_RD_SAE: Convert Scalar Double-Precision Floating-Point Value to Unsigned Doubleword Integer (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSD2USIL.RD_SAE xmm r32
|
|
//
|
|
// Construct and append a VCVTSD2USIL.RD_SAE instruction to the active function.
|
|
func (c *Context) VCVTSD2USIL_RD_SAE(x, r operand.Op) {
|
|
c.addinstruction(x86.VCVTSD2USIL_RD_SAE(x, r))
|
|
}
|
|
|
|
// VCVTSD2USIL_RD_SAE: Convert Scalar Double-Precision Floating-Point Value to Unsigned Doubleword Integer (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSD2USIL.RD_SAE xmm r32
|
|
//
|
|
// Construct and append a VCVTSD2USIL.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTSD2USIL_RD_SAE(x, r operand.Op) { ctx.VCVTSD2USIL_RD_SAE(x, r) }
|
|
|
|
// VCVTSD2USIL_RN_SAE: Convert Scalar Double-Precision Floating-Point Value to Unsigned Doubleword Integer (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSD2USIL.RN_SAE xmm r32
|
|
//
|
|
// Construct and append a VCVTSD2USIL.RN_SAE instruction to the active function.
|
|
func (c *Context) VCVTSD2USIL_RN_SAE(x, r operand.Op) {
|
|
c.addinstruction(x86.VCVTSD2USIL_RN_SAE(x, r))
|
|
}
|
|
|
|
// VCVTSD2USIL_RN_SAE: Convert Scalar Double-Precision Floating-Point Value to Unsigned Doubleword Integer (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSD2USIL.RN_SAE xmm r32
|
|
//
|
|
// Construct and append a VCVTSD2USIL.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTSD2USIL_RN_SAE(x, r operand.Op) { ctx.VCVTSD2USIL_RN_SAE(x, r) }
|
|
|
|
// VCVTSD2USIL_RU_SAE: Convert Scalar Double-Precision Floating-Point Value to Unsigned Doubleword Integer (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSD2USIL.RU_SAE xmm r32
|
|
//
|
|
// Construct and append a VCVTSD2USIL.RU_SAE instruction to the active function.
|
|
func (c *Context) VCVTSD2USIL_RU_SAE(x, r operand.Op) {
|
|
c.addinstruction(x86.VCVTSD2USIL_RU_SAE(x, r))
|
|
}
|
|
|
|
// VCVTSD2USIL_RU_SAE: Convert Scalar Double-Precision Floating-Point Value to Unsigned Doubleword Integer (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSD2USIL.RU_SAE xmm r32
|
|
//
|
|
// Construct and append a VCVTSD2USIL.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTSD2USIL_RU_SAE(x, r operand.Op) { ctx.VCVTSD2USIL_RU_SAE(x, r) }
|
|
|
|
// VCVTSD2USIL_RZ_SAE: Convert Scalar Double-Precision Floating-Point Value to Unsigned Doubleword Integer (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSD2USIL.RZ_SAE xmm r32
|
|
//
|
|
// Construct and append a VCVTSD2USIL.RZ_SAE instruction to the active function.
|
|
func (c *Context) VCVTSD2USIL_RZ_SAE(x, r operand.Op) {
|
|
c.addinstruction(x86.VCVTSD2USIL_RZ_SAE(x, r))
|
|
}
|
|
|
|
// VCVTSD2USIL_RZ_SAE: Convert Scalar Double-Precision Floating-Point Value to Unsigned Doubleword Integer (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSD2USIL.RZ_SAE xmm r32
|
|
//
|
|
// Construct and append a VCVTSD2USIL.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTSD2USIL_RZ_SAE(x, r operand.Op) { ctx.VCVTSD2USIL_RZ_SAE(x, r) }
|
|
|
|
// VCVTSD2USIQ: Convert Scalar Double-Precision Floating-Point Value to Unsigned Doubleword Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSD2USIQ m64 r64
|
|
// VCVTSD2USIQ xmm r64
|
|
//
|
|
// Construct and append a VCVTSD2USIQ instruction to the active function.
|
|
func (c *Context) VCVTSD2USIQ(mx, r operand.Op) {
|
|
c.addinstruction(x86.VCVTSD2USIQ(mx, r))
|
|
}
|
|
|
|
// VCVTSD2USIQ: Convert Scalar Double-Precision Floating-Point Value to Unsigned Doubleword Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSD2USIQ m64 r64
|
|
// VCVTSD2USIQ xmm r64
|
|
//
|
|
// Construct and append a VCVTSD2USIQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTSD2USIQ(mx, r operand.Op) { ctx.VCVTSD2USIQ(mx, r) }
|
|
|
|
// VCVTSD2USIQ_RD_SAE: Convert Scalar Double-Precision Floating-Point Value to Unsigned Doubleword Integer (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSD2USIQ.RD_SAE xmm r64
|
|
//
|
|
// Construct and append a VCVTSD2USIQ.RD_SAE instruction to the active function.
|
|
func (c *Context) VCVTSD2USIQ_RD_SAE(x, r operand.Op) {
|
|
c.addinstruction(x86.VCVTSD2USIQ_RD_SAE(x, r))
|
|
}
|
|
|
|
// VCVTSD2USIQ_RD_SAE: Convert Scalar Double-Precision Floating-Point Value to Unsigned Doubleword Integer (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSD2USIQ.RD_SAE xmm r64
|
|
//
|
|
// Construct and append a VCVTSD2USIQ.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTSD2USIQ_RD_SAE(x, r operand.Op) { ctx.VCVTSD2USIQ_RD_SAE(x, r) }
|
|
|
|
// VCVTSD2USIQ_RN_SAE: Convert Scalar Double-Precision Floating-Point Value to Unsigned Doubleword Integer (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSD2USIQ.RN_SAE xmm r64
|
|
//
|
|
// Construct and append a VCVTSD2USIQ.RN_SAE instruction to the active function.
|
|
func (c *Context) VCVTSD2USIQ_RN_SAE(x, r operand.Op) {
|
|
c.addinstruction(x86.VCVTSD2USIQ_RN_SAE(x, r))
|
|
}
|
|
|
|
// VCVTSD2USIQ_RN_SAE: Convert Scalar Double-Precision Floating-Point Value to Unsigned Doubleword Integer (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSD2USIQ.RN_SAE xmm r64
|
|
//
|
|
// Construct and append a VCVTSD2USIQ.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTSD2USIQ_RN_SAE(x, r operand.Op) { ctx.VCVTSD2USIQ_RN_SAE(x, r) }
|
|
|
|
// VCVTSD2USIQ_RU_SAE: Convert Scalar Double-Precision Floating-Point Value to Unsigned Doubleword Integer (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSD2USIQ.RU_SAE xmm r64
|
|
//
|
|
// Construct and append a VCVTSD2USIQ.RU_SAE instruction to the active function.
|
|
func (c *Context) VCVTSD2USIQ_RU_SAE(x, r operand.Op) {
|
|
c.addinstruction(x86.VCVTSD2USIQ_RU_SAE(x, r))
|
|
}
|
|
|
|
// VCVTSD2USIQ_RU_SAE: Convert Scalar Double-Precision Floating-Point Value to Unsigned Doubleword Integer (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSD2USIQ.RU_SAE xmm r64
|
|
//
|
|
// Construct and append a VCVTSD2USIQ.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTSD2USIQ_RU_SAE(x, r operand.Op) { ctx.VCVTSD2USIQ_RU_SAE(x, r) }
|
|
|
|
// VCVTSD2USIQ_RZ_SAE: Convert Scalar Double-Precision Floating-Point Value to Unsigned Doubleword Integer (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSD2USIQ.RZ_SAE xmm r64
|
|
//
|
|
// Construct and append a VCVTSD2USIQ.RZ_SAE instruction to the active function.
|
|
func (c *Context) VCVTSD2USIQ_RZ_SAE(x, r operand.Op) {
|
|
c.addinstruction(x86.VCVTSD2USIQ_RZ_SAE(x, r))
|
|
}
|
|
|
|
// VCVTSD2USIQ_RZ_SAE: Convert Scalar Double-Precision Floating-Point Value to Unsigned Doubleword Integer (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSD2USIQ.RZ_SAE xmm r64
|
|
//
|
|
// Construct and append a VCVTSD2USIQ.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTSD2USIQ_RZ_SAE(x, r operand.Op) { ctx.VCVTSD2USIQ_RZ_SAE(x, r) }
|
|
|
|
// VCVTSI2SDL: Convert Dword Integer to Scalar Double-Precision FP Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSI2SDL m32 xmm xmm
|
|
// VCVTSI2SDL r32 xmm xmm
|
|
//
|
|
// Construct and append a VCVTSI2SDL instruction to the active function.
|
|
func (c *Context) VCVTSI2SDL(mr, x, x1 operand.Op) {
|
|
c.addinstruction(x86.VCVTSI2SDL(mr, x, x1))
|
|
}
|
|
|
|
// VCVTSI2SDL: Convert Dword Integer to Scalar Double-Precision FP Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSI2SDL m32 xmm xmm
|
|
// VCVTSI2SDL r32 xmm xmm
|
|
//
|
|
// Construct and append a VCVTSI2SDL instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTSI2SDL(mr, x, x1 operand.Op) { ctx.VCVTSI2SDL(mr, x, x1) }
|
|
|
|
// VCVTSI2SDQ: Convert Dword Integer to Scalar Double-Precision FP Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSI2SDQ m64 xmm xmm
|
|
// VCVTSI2SDQ r64 xmm xmm
|
|
//
|
|
// Construct and append a VCVTSI2SDQ instruction to the active function.
|
|
func (c *Context) VCVTSI2SDQ(mr, x, x1 operand.Op) {
|
|
c.addinstruction(x86.VCVTSI2SDQ(mr, x, x1))
|
|
}
|
|
|
|
// VCVTSI2SDQ: Convert Dword Integer to Scalar Double-Precision FP Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSI2SDQ m64 xmm xmm
|
|
// VCVTSI2SDQ r64 xmm xmm
|
|
//
|
|
// Construct and append a VCVTSI2SDQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTSI2SDQ(mr, x, x1 operand.Op) { ctx.VCVTSI2SDQ(mr, x, x1) }
|
|
|
|
// VCVTSI2SDQ_RD_SAE: Convert Dword Integer to Scalar Double-Precision FP Value (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSI2SDQ.RD_SAE r64 xmm xmm
|
|
//
|
|
// Construct and append a VCVTSI2SDQ.RD_SAE instruction to the active function.
|
|
func (c *Context) VCVTSI2SDQ_RD_SAE(r, x, x1 operand.Op) {
|
|
c.addinstruction(x86.VCVTSI2SDQ_RD_SAE(r, x, x1))
|
|
}
|
|
|
|
// VCVTSI2SDQ_RD_SAE: Convert Dword Integer to Scalar Double-Precision FP Value (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSI2SDQ.RD_SAE r64 xmm xmm
|
|
//
|
|
// Construct and append a VCVTSI2SDQ.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTSI2SDQ_RD_SAE(r, x, x1 operand.Op) { ctx.VCVTSI2SDQ_RD_SAE(r, x, x1) }
|
|
|
|
// VCVTSI2SDQ_RN_SAE: Convert Dword Integer to Scalar Double-Precision FP Value (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSI2SDQ.RN_SAE r64 xmm xmm
|
|
//
|
|
// Construct and append a VCVTSI2SDQ.RN_SAE instruction to the active function.
|
|
func (c *Context) VCVTSI2SDQ_RN_SAE(r, x, x1 operand.Op) {
|
|
c.addinstruction(x86.VCVTSI2SDQ_RN_SAE(r, x, x1))
|
|
}
|
|
|
|
// VCVTSI2SDQ_RN_SAE: Convert Dword Integer to Scalar Double-Precision FP Value (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSI2SDQ.RN_SAE r64 xmm xmm
|
|
//
|
|
// Construct and append a VCVTSI2SDQ.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTSI2SDQ_RN_SAE(r, x, x1 operand.Op) { ctx.VCVTSI2SDQ_RN_SAE(r, x, x1) }
|
|
|
|
// VCVTSI2SDQ_RU_SAE: Convert Dword Integer to Scalar Double-Precision FP Value (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSI2SDQ.RU_SAE r64 xmm xmm
|
|
//
|
|
// Construct and append a VCVTSI2SDQ.RU_SAE instruction to the active function.
|
|
func (c *Context) VCVTSI2SDQ_RU_SAE(r, x, x1 operand.Op) {
|
|
c.addinstruction(x86.VCVTSI2SDQ_RU_SAE(r, x, x1))
|
|
}
|
|
|
|
// VCVTSI2SDQ_RU_SAE: Convert Dword Integer to Scalar Double-Precision FP Value (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSI2SDQ.RU_SAE r64 xmm xmm
|
|
//
|
|
// Construct and append a VCVTSI2SDQ.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTSI2SDQ_RU_SAE(r, x, x1 operand.Op) { ctx.VCVTSI2SDQ_RU_SAE(r, x, x1) }
|
|
|
|
// VCVTSI2SDQ_RZ_SAE: Convert Dword Integer to Scalar Double-Precision FP Value (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSI2SDQ.RZ_SAE r64 xmm xmm
|
|
//
|
|
// Construct and append a VCVTSI2SDQ.RZ_SAE instruction to the active function.
|
|
func (c *Context) VCVTSI2SDQ_RZ_SAE(r, x, x1 operand.Op) {
|
|
c.addinstruction(x86.VCVTSI2SDQ_RZ_SAE(r, x, x1))
|
|
}
|
|
|
|
// VCVTSI2SDQ_RZ_SAE: Convert Dword Integer to Scalar Double-Precision FP Value (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSI2SDQ.RZ_SAE r64 xmm xmm
|
|
//
|
|
// Construct and append a VCVTSI2SDQ.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTSI2SDQ_RZ_SAE(r, x, x1 operand.Op) { ctx.VCVTSI2SDQ_RZ_SAE(r, x, x1) }
|
|
|
|
// VCVTSI2SSL: Convert Dword Integer to Scalar Single-Precision FP Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSI2SSL m32 xmm xmm
|
|
// VCVTSI2SSL r32 xmm xmm
|
|
//
|
|
// Construct and append a VCVTSI2SSL instruction to the active function.
|
|
func (c *Context) VCVTSI2SSL(mr, x, x1 operand.Op) {
|
|
c.addinstruction(x86.VCVTSI2SSL(mr, x, x1))
|
|
}
|
|
|
|
// VCVTSI2SSL: Convert Dword Integer to Scalar Single-Precision FP Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSI2SSL m32 xmm xmm
|
|
// VCVTSI2SSL r32 xmm xmm
|
|
//
|
|
// Construct and append a VCVTSI2SSL instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTSI2SSL(mr, x, x1 operand.Op) { ctx.VCVTSI2SSL(mr, x, x1) }
|
|
|
|
// VCVTSI2SSL_RD_SAE: Convert Dword Integer to Scalar Single-Precision FP Value (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSI2SSL.RD_SAE r32 xmm xmm
|
|
//
|
|
// Construct and append a VCVTSI2SSL.RD_SAE instruction to the active function.
|
|
func (c *Context) VCVTSI2SSL_RD_SAE(r, x, x1 operand.Op) {
|
|
c.addinstruction(x86.VCVTSI2SSL_RD_SAE(r, x, x1))
|
|
}
|
|
|
|
// VCVTSI2SSL_RD_SAE: Convert Dword Integer to Scalar Single-Precision FP Value (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSI2SSL.RD_SAE r32 xmm xmm
|
|
//
|
|
// Construct and append a VCVTSI2SSL.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTSI2SSL_RD_SAE(r, x, x1 operand.Op) { ctx.VCVTSI2SSL_RD_SAE(r, x, x1) }
|
|
|
|
// VCVTSI2SSL_RN_SAE: Convert Dword Integer to Scalar Single-Precision FP Value (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSI2SSL.RN_SAE r32 xmm xmm
|
|
//
|
|
// Construct and append a VCVTSI2SSL.RN_SAE instruction to the active function.
|
|
func (c *Context) VCVTSI2SSL_RN_SAE(r, x, x1 operand.Op) {
|
|
c.addinstruction(x86.VCVTSI2SSL_RN_SAE(r, x, x1))
|
|
}
|
|
|
|
// VCVTSI2SSL_RN_SAE: Convert Dword Integer to Scalar Single-Precision FP Value (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSI2SSL.RN_SAE r32 xmm xmm
|
|
//
|
|
// Construct and append a VCVTSI2SSL.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTSI2SSL_RN_SAE(r, x, x1 operand.Op) { ctx.VCVTSI2SSL_RN_SAE(r, x, x1) }
|
|
|
|
// VCVTSI2SSL_RU_SAE: Convert Dword Integer to Scalar Single-Precision FP Value (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSI2SSL.RU_SAE r32 xmm xmm
|
|
//
|
|
// Construct and append a VCVTSI2SSL.RU_SAE instruction to the active function.
|
|
func (c *Context) VCVTSI2SSL_RU_SAE(r, x, x1 operand.Op) {
|
|
c.addinstruction(x86.VCVTSI2SSL_RU_SAE(r, x, x1))
|
|
}
|
|
|
|
// VCVTSI2SSL_RU_SAE: Convert Dword Integer to Scalar Single-Precision FP Value (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSI2SSL.RU_SAE r32 xmm xmm
|
|
//
|
|
// Construct and append a VCVTSI2SSL.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTSI2SSL_RU_SAE(r, x, x1 operand.Op) { ctx.VCVTSI2SSL_RU_SAE(r, x, x1) }
|
|
|
|
// VCVTSI2SSL_RZ_SAE: Convert Dword Integer to Scalar Single-Precision FP Value (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSI2SSL.RZ_SAE r32 xmm xmm
|
|
//
|
|
// Construct and append a VCVTSI2SSL.RZ_SAE instruction to the active function.
|
|
func (c *Context) VCVTSI2SSL_RZ_SAE(r, x, x1 operand.Op) {
|
|
c.addinstruction(x86.VCVTSI2SSL_RZ_SAE(r, x, x1))
|
|
}
|
|
|
|
// VCVTSI2SSL_RZ_SAE: Convert Dword Integer to Scalar Single-Precision FP Value (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSI2SSL.RZ_SAE r32 xmm xmm
|
|
//
|
|
// Construct and append a VCVTSI2SSL.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTSI2SSL_RZ_SAE(r, x, x1 operand.Op) { ctx.VCVTSI2SSL_RZ_SAE(r, x, x1) }
|
|
|
|
// VCVTSI2SSQ: Convert Dword Integer to Scalar Single-Precision FP Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSI2SSQ m64 xmm xmm
|
|
// VCVTSI2SSQ r64 xmm xmm
|
|
//
|
|
// Construct and append a VCVTSI2SSQ instruction to the active function.
|
|
func (c *Context) VCVTSI2SSQ(mr, x, x1 operand.Op) {
|
|
c.addinstruction(x86.VCVTSI2SSQ(mr, x, x1))
|
|
}
|
|
|
|
// VCVTSI2SSQ: Convert Dword Integer to Scalar Single-Precision FP Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSI2SSQ m64 xmm xmm
|
|
// VCVTSI2SSQ r64 xmm xmm
|
|
//
|
|
// Construct and append a VCVTSI2SSQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTSI2SSQ(mr, x, x1 operand.Op) { ctx.VCVTSI2SSQ(mr, x, x1) }
|
|
|
|
// VCVTSI2SSQ_RD_SAE: Convert Dword Integer to Scalar Single-Precision FP Value (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSI2SSQ.RD_SAE r64 xmm xmm
|
|
//
|
|
// Construct and append a VCVTSI2SSQ.RD_SAE instruction to the active function.
|
|
func (c *Context) VCVTSI2SSQ_RD_SAE(r, x, x1 operand.Op) {
|
|
c.addinstruction(x86.VCVTSI2SSQ_RD_SAE(r, x, x1))
|
|
}
|
|
|
|
// VCVTSI2SSQ_RD_SAE: Convert Dword Integer to Scalar Single-Precision FP Value (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSI2SSQ.RD_SAE r64 xmm xmm
|
|
//
|
|
// Construct and append a VCVTSI2SSQ.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTSI2SSQ_RD_SAE(r, x, x1 operand.Op) { ctx.VCVTSI2SSQ_RD_SAE(r, x, x1) }
|
|
|
|
// VCVTSI2SSQ_RN_SAE: Convert Dword Integer to Scalar Single-Precision FP Value (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSI2SSQ.RN_SAE r64 xmm xmm
|
|
//
|
|
// Construct and append a VCVTSI2SSQ.RN_SAE instruction to the active function.
|
|
func (c *Context) VCVTSI2SSQ_RN_SAE(r, x, x1 operand.Op) {
|
|
c.addinstruction(x86.VCVTSI2SSQ_RN_SAE(r, x, x1))
|
|
}
|
|
|
|
// VCVTSI2SSQ_RN_SAE: Convert Dword Integer to Scalar Single-Precision FP Value (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSI2SSQ.RN_SAE r64 xmm xmm
|
|
//
|
|
// Construct and append a VCVTSI2SSQ.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTSI2SSQ_RN_SAE(r, x, x1 operand.Op) { ctx.VCVTSI2SSQ_RN_SAE(r, x, x1) }
|
|
|
|
// VCVTSI2SSQ_RU_SAE: Convert Dword Integer to Scalar Single-Precision FP Value (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSI2SSQ.RU_SAE r64 xmm xmm
|
|
//
|
|
// Construct and append a VCVTSI2SSQ.RU_SAE instruction to the active function.
|
|
func (c *Context) VCVTSI2SSQ_RU_SAE(r, x, x1 operand.Op) {
|
|
c.addinstruction(x86.VCVTSI2SSQ_RU_SAE(r, x, x1))
|
|
}
|
|
|
|
// VCVTSI2SSQ_RU_SAE: Convert Dword Integer to Scalar Single-Precision FP Value (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSI2SSQ.RU_SAE r64 xmm xmm
|
|
//
|
|
// Construct and append a VCVTSI2SSQ.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTSI2SSQ_RU_SAE(r, x, x1 operand.Op) { ctx.VCVTSI2SSQ_RU_SAE(r, x, x1) }
|
|
|
|
// VCVTSI2SSQ_RZ_SAE: Convert Dword Integer to Scalar Single-Precision FP Value (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSI2SSQ.RZ_SAE r64 xmm xmm
|
|
//
|
|
// Construct and append a VCVTSI2SSQ.RZ_SAE instruction to the active function.
|
|
func (c *Context) VCVTSI2SSQ_RZ_SAE(r, x, x1 operand.Op) {
|
|
c.addinstruction(x86.VCVTSI2SSQ_RZ_SAE(r, x, x1))
|
|
}
|
|
|
|
// VCVTSI2SSQ_RZ_SAE: Convert Dword Integer to Scalar Single-Precision FP Value (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSI2SSQ.RZ_SAE r64 xmm xmm
|
|
//
|
|
// Construct and append a VCVTSI2SSQ.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTSI2SSQ_RZ_SAE(r, x, x1 operand.Op) { ctx.VCVTSI2SSQ_RZ_SAE(r, x, x1) }
|
|
|
|
// VCVTSS2SD: Convert Scalar Single-Precision FP Value to Scalar Double-Precision FP Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSS2SD m32 xmm xmm
|
|
// VCVTSS2SD xmm xmm xmm
|
|
// VCVTSS2SD m32 xmm k xmm
|
|
// VCVTSS2SD xmm xmm k xmm
|
|
//
|
|
// Construct and append a VCVTSS2SD instruction to the active function.
|
|
func (c *Context) VCVTSS2SD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTSS2SD(ops...))
|
|
}
|
|
|
|
// VCVTSS2SD: Convert Scalar Single-Precision FP Value to Scalar Double-Precision FP Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSS2SD m32 xmm xmm
|
|
// VCVTSS2SD xmm xmm xmm
|
|
// VCVTSS2SD m32 xmm k xmm
|
|
// VCVTSS2SD xmm xmm k xmm
|
|
//
|
|
// Construct and append a VCVTSS2SD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTSS2SD(ops ...operand.Op) { ctx.VCVTSS2SD(ops...) }
|
|
|
|
// VCVTSS2SD_SAE: Convert Scalar Single-Precision FP Value to Scalar Double-Precision FP Value (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSS2SD.SAE xmm xmm k xmm
|
|
// VCVTSS2SD.SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VCVTSS2SD.SAE instruction to the active function.
|
|
func (c *Context) VCVTSS2SD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTSS2SD_SAE(ops...))
|
|
}
|
|
|
|
// VCVTSS2SD_SAE: Convert Scalar Single-Precision FP Value to Scalar Double-Precision FP Value (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSS2SD.SAE xmm xmm k xmm
|
|
// VCVTSS2SD.SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VCVTSS2SD.SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTSS2SD_SAE(ops ...operand.Op) { ctx.VCVTSS2SD_SAE(ops...) }
|
|
|
|
// VCVTSS2SD_SAE_Z: Convert Scalar Single-Precision FP Value to Scalar Double-Precision FP Value (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSS2SD.SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VCVTSS2SD.SAE.Z instruction to the active function.
|
|
func (c *Context) VCVTSS2SD_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VCVTSS2SD_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VCVTSS2SD_SAE_Z: Convert Scalar Single-Precision FP Value to Scalar Double-Precision FP Value (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSS2SD.SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VCVTSS2SD.SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTSS2SD_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VCVTSS2SD_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VCVTSS2SD_Z: Convert Scalar Single-Precision FP Value to Scalar Double-Precision FP Value (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSS2SD.Z m32 xmm k xmm
|
|
// VCVTSS2SD.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VCVTSS2SD.Z instruction to the active function.
|
|
func (c *Context) VCVTSS2SD_Z(mx, x, k, x1 operand.Op) {
|
|
c.addinstruction(x86.VCVTSS2SD_Z(mx, x, k, x1))
|
|
}
|
|
|
|
// VCVTSS2SD_Z: Convert Scalar Single-Precision FP Value to Scalar Double-Precision FP Value (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSS2SD.Z m32 xmm k xmm
|
|
// VCVTSS2SD.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VCVTSS2SD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTSS2SD_Z(mx, x, k, x1 operand.Op) { ctx.VCVTSS2SD_Z(mx, x, k, x1) }
|
|
|
|
// VCVTSS2SI: Convert Scalar Single-Precision FP Value to Dword Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSS2SI m32 r32
|
|
// VCVTSS2SI xmm r32
|
|
//
|
|
// Construct and append a VCVTSS2SI instruction to the active function.
|
|
func (c *Context) VCVTSS2SI(mx, r operand.Op) {
|
|
c.addinstruction(x86.VCVTSS2SI(mx, r))
|
|
}
|
|
|
|
// VCVTSS2SI: Convert Scalar Single-Precision FP Value to Dword Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSS2SI m32 r32
|
|
// VCVTSS2SI xmm r32
|
|
//
|
|
// Construct and append a VCVTSS2SI instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTSS2SI(mx, r operand.Op) { ctx.VCVTSS2SI(mx, r) }
|
|
|
|
// VCVTSS2SIQ: Convert Scalar Single-Precision FP Value to Dword Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSS2SIQ m32 r64
|
|
// VCVTSS2SIQ xmm r64
|
|
//
|
|
// Construct and append a VCVTSS2SIQ instruction to the active function.
|
|
func (c *Context) VCVTSS2SIQ(mx, r operand.Op) {
|
|
c.addinstruction(x86.VCVTSS2SIQ(mx, r))
|
|
}
|
|
|
|
// VCVTSS2SIQ: Convert Scalar Single-Precision FP Value to Dword Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSS2SIQ m32 r64
|
|
// VCVTSS2SIQ xmm r64
|
|
//
|
|
// Construct and append a VCVTSS2SIQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTSS2SIQ(mx, r operand.Op) { ctx.VCVTSS2SIQ(mx, r) }
|
|
|
|
// VCVTSS2SIQ_RD_SAE: Convert Scalar Single-Precision FP Value to Dword Integer (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSS2SIQ.RD_SAE xmm r64
|
|
//
|
|
// Construct and append a VCVTSS2SIQ.RD_SAE instruction to the active function.
|
|
func (c *Context) VCVTSS2SIQ_RD_SAE(x, r operand.Op) {
|
|
c.addinstruction(x86.VCVTSS2SIQ_RD_SAE(x, r))
|
|
}
|
|
|
|
// VCVTSS2SIQ_RD_SAE: Convert Scalar Single-Precision FP Value to Dword Integer (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSS2SIQ.RD_SAE xmm r64
|
|
//
|
|
// Construct and append a VCVTSS2SIQ.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTSS2SIQ_RD_SAE(x, r operand.Op) { ctx.VCVTSS2SIQ_RD_SAE(x, r) }
|
|
|
|
// VCVTSS2SIQ_RN_SAE: Convert Scalar Single-Precision FP Value to Dword Integer (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSS2SIQ.RN_SAE xmm r64
|
|
//
|
|
// Construct and append a VCVTSS2SIQ.RN_SAE instruction to the active function.
|
|
func (c *Context) VCVTSS2SIQ_RN_SAE(x, r operand.Op) {
|
|
c.addinstruction(x86.VCVTSS2SIQ_RN_SAE(x, r))
|
|
}
|
|
|
|
// VCVTSS2SIQ_RN_SAE: Convert Scalar Single-Precision FP Value to Dword Integer (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSS2SIQ.RN_SAE xmm r64
|
|
//
|
|
// Construct and append a VCVTSS2SIQ.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTSS2SIQ_RN_SAE(x, r operand.Op) { ctx.VCVTSS2SIQ_RN_SAE(x, r) }
|
|
|
|
// VCVTSS2SIQ_RU_SAE: Convert Scalar Single-Precision FP Value to Dword Integer (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSS2SIQ.RU_SAE xmm r64
|
|
//
|
|
// Construct and append a VCVTSS2SIQ.RU_SAE instruction to the active function.
|
|
func (c *Context) VCVTSS2SIQ_RU_SAE(x, r operand.Op) {
|
|
c.addinstruction(x86.VCVTSS2SIQ_RU_SAE(x, r))
|
|
}
|
|
|
|
// VCVTSS2SIQ_RU_SAE: Convert Scalar Single-Precision FP Value to Dword Integer (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSS2SIQ.RU_SAE xmm r64
|
|
//
|
|
// Construct and append a VCVTSS2SIQ.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTSS2SIQ_RU_SAE(x, r operand.Op) { ctx.VCVTSS2SIQ_RU_SAE(x, r) }
|
|
|
|
// VCVTSS2SIQ_RZ_SAE: Convert Scalar Single-Precision FP Value to Dword Integer (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSS2SIQ.RZ_SAE xmm r64
|
|
//
|
|
// Construct and append a VCVTSS2SIQ.RZ_SAE instruction to the active function.
|
|
func (c *Context) VCVTSS2SIQ_RZ_SAE(x, r operand.Op) {
|
|
c.addinstruction(x86.VCVTSS2SIQ_RZ_SAE(x, r))
|
|
}
|
|
|
|
// VCVTSS2SIQ_RZ_SAE: Convert Scalar Single-Precision FP Value to Dword Integer (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSS2SIQ.RZ_SAE xmm r64
|
|
//
|
|
// Construct and append a VCVTSS2SIQ.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTSS2SIQ_RZ_SAE(x, r operand.Op) { ctx.VCVTSS2SIQ_RZ_SAE(x, r) }
|
|
|
|
// VCVTSS2SI_RD_SAE: Convert Scalar Single-Precision FP Value to Dword Integer (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSS2SI.RD_SAE xmm r32
|
|
//
|
|
// Construct and append a VCVTSS2SI.RD_SAE instruction to the active function.
|
|
func (c *Context) VCVTSS2SI_RD_SAE(x, r operand.Op) {
|
|
c.addinstruction(x86.VCVTSS2SI_RD_SAE(x, r))
|
|
}
|
|
|
|
// VCVTSS2SI_RD_SAE: Convert Scalar Single-Precision FP Value to Dword Integer (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSS2SI.RD_SAE xmm r32
|
|
//
|
|
// Construct and append a VCVTSS2SI.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTSS2SI_RD_SAE(x, r operand.Op) { ctx.VCVTSS2SI_RD_SAE(x, r) }
|
|
|
|
// VCVTSS2SI_RN_SAE: Convert Scalar Single-Precision FP Value to Dword Integer (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSS2SI.RN_SAE xmm r32
|
|
//
|
|
// Construct and append a VCVTSS2SI.RN_SAE instruction to the active function.
|
|
func (c *Context) VCVTSS2SI_RN_SAE(x, r operand.Op) {
|
|
c.addinstruction(x86.VCVTSS2SI_RN_SAE(x, r))
|
|
}
|
|
|
|
// VCVTSS2SI_RN_SAE: Convert Scalar Single-Precision FP Value to Dword Integer (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSS2SI.RN_SAE xmm r32
|
|
//
|
|
// Construct and append a VCVTSS2SI.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTSS2SI_RN_SAE(x, r operand.Op) { ctx.VCVTSS2SI_RN_SAE(x, r) }
|
|
|
|
// VCVTSS2SI_RU_SAE: Convert Scalar Single-Precision FP Value to Dword Integer (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSS2SI.RU_SAE xmm r32
|
|
//
|
|
// Construct and append a VCVTSS2SI.RU_SAE instruction to the active function.
|
|
func (c *Context) VCVTSS2SI_RU_SAE(x, r operand.Op) {
|
|
c.addinstruction(x86.VCVTSS2SI_RU_SAE(x, r))
|
|
}
|
|
|
|
// VCVTSS2SI_RU_SAE: Convert Scalar Single-Precision FP Value to Dword Integer (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSS2SI.RU_SAE xmm r32
|
|
//
|
|
// Construct and append a VCVTSS2SI.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTSS2SI_RU_SAE(x, r operand.Op) { ctx.VCVTSS2SI_RU_SAE(x, r) }
|
|
|
|
// VCVTSS2SI_RZ_SAE: Convert Scalar Single-Precision FP Value to Dword Integer (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSS2SI.RZ_SAE xmm r32
|
|
//
|
|
// Construct and append a VCVTSS2SI.RZ_SAE instruction to the active function.
|
|
func (c *Context) VCVTSS2SI_RZ_SAE(x, r operand.Op) {
|
|
c.addinstruction(x86.VCVTSS2SI_RZ_SAE(x, r))
|
|
}
|
|
|
|
// VCVTSS2SI_RZ_SAE: Convert Scalar Single-Precision FP Value to Dword Integer (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSS2SI.RZ_SAE xmm r32
|
|
//
|
|
// Construct and append a VCVTSS2SI.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTSS2SI_RZ_SAE(x, r operand.Op) { ctx.VCVTSS2SI_RZ_SAE(x, r) }
|
|
|
|
// VCVTSS2USIL: Convert Scalar Single-Precision Floating-Point Value to Unsigned Doubleword Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSS2USIL m32 r32
|
|
// VCVTSS2USIL xmm r32
|
|
//
|
|
// Construct and append a VCVTSS2USIL instruction to the active function.
|
|
func (c *Context) VCVTSS2USIL(mx, r operand.Op) {
|
|
c.addinstruction(x86.VCVTSS2USIL(mx, r))
|
|
}
|
|
|
|
// VCVTSS2USIL: Convert Scalar Single-Precision Floating-Point Value to Unsigned Doubleword Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSS2USIL m32 r32
|
|
// VCVTSS2USIL xmm r32
|
|
//
|
|
// Construct and append a VCVTSS2USIL instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTSS2USIL(mx, r operand.Op) { ctx.VCVTSS2USIL(mx, r) }
|
|
|
|
// VCVTSS2USIL_RD_SAE: Convert Scalar Single-Precision Floating-Point Value to Unsigned Doubleword Integer (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSS2USIL.RD_SAE xmm r32
|
|
//
|
|
// Construct and append a VCVTSS2USIL.RD_SAE instruction to the active function.
|
|
func (c *Context) VCVTSS2USIL_RD_SAE(x, r operand.Op) {
|
|
c.addinstruction(x86.VCVTSS2USIL_RD_SAE(x, r))
|
|
}
|
|
|
|
// VCVTSS2USIL_RD_SAE: Convert Scalar Single-Precision Floating-Point Value to Unsigned Doubleword Integer (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSS2USIL.RD_SAE xmm r32
|
|
//
|
|
// Construct and append a VCVTSS2USIL.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTSS2USIL_RD_SAE(x, r operand.Op) { ctx.VCVTSS2USIL_RD_SAE(x, r) }
|
|
|
|
// VCVTSS2USIL_RN_SAE: Convert Scalar Single-Precision Floating-Point Value to Unsigned Doubleword Integer (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSS2USIL.RN_SAE xmm r32
|
|
//
|
|
// Construct and append a VCVTSS2USIL.RN_SAE instruction to the active function.
|
|
func (c *Context) VCVTSS2USIL_RN_SAE(x, r operand.Op) {
|
|
c.addinstruction(x86.VCVTSS2USIL_RN_SAE(x, r))
|
|
}
|
|
|
|
// VCVTSS2USIL_RN_SAE: Convert Scalar Single-Precision Floating-Point Value to Unsigned Doubleword Integer (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSS2USIL.RN_SAE xmm r32
|
|
//
|
|
// Construct and append a VCVTSS2USIL.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTSS2USIL_RN_SAE(x, r operand.Op) { ctx.VCVTSS2USIL_RN_SAE(x, r) }
|
|
|
|
// VCVTSS2USIL_RU_SAE: Convert Scalar Single-Precision Floating-Point Value to Unsigned Doubleword Integer (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSS2USIL.RU_SAE xmm r32
|
|
//
|
|
// Construct and append a VCVTSS2USIL.RU_SAE instruction to the active function.
|
|
func (c *Context) VCVTSS2USIL_RU_SAE(x, r operand.Op) {
|
|
c.addinstruction(x86.VCVTSS2USIL_RU_SAE(x, r))
|
|
}
|
|
|
|
// VCVTSS2USIL_RU_SAE: Convert Scalar Single-Precision Floating-Point Value to Unsigned Doubleword Integer (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSS2USIL.RU_SAE xmm r32
|
|
//
|
|
// Construct and append a VCVTSS2USIL.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTSS2USIL_RU_SAE(x, r operand.Op) { ctx.VCVTSS2USIL_RU_SAE(x, r) }
|
|
|
|
// VCVTSS2USIL_RZ_SAE: Convert Scalar Single-Precision Floating-Point Value to Unsigned Doubleword Integer (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSS2USIL.RZ_SAE xmm r32
|
|
//
|
|
// Construct and append a VCVTSS2USIL.RZ_SAE instruction to the active function.
|
|
func (c *Context) VCVTSS2USIL_RZ_SAE(x, r operand.Op) {
|
|
c.addinstruction(x86.VCVTSS2USIL_RZ_SAE(x, r))
|
|
}
|
|
|
|
// VCVTSS2USIL_RZ_SAE: Convert Scalar Single-Precision Floating-Point Value to Unsigned Doubleword Integer (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSS2USIL.RZ_SAE xmm r32
|
|
//
|
|
// Construct and append a VCVTSS2USIL.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTSS2USIL_RZ_SAE(x, r operand.Op) { ctx.VCVTSS2USIL_RZ_SAE(x, r) }
|
|
|
|
// VCVTSS2USIQ: Convert Scalar Single-Precision Floating-Point Value to Unsigned Doubleword Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSS2USIQ m32 r64
|
|
// VCVTSS2USIQ xmm r64
|
|
//
|
|
// Construct and append a VCVTSS2USIQ instruction to the active function.
|
|
func (c *Context) VCVTSS2USIQ(mx, r operand.Op) {
|
|
c.addinstruction(x86.VCVTSS2USIQ(mx, r))
|
|
}
|
|
|
|
// VCVTSS2USIQ: Convert Scalar Single-Precision Floating-Point Value to Unsigned Doubleword Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSS2USIQ m32 r64
|
|
// VCVTSS2USIQ xmm r64
|
|
//
|
|
// Construct and append a VCVTSS2USIQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTSS2USIQ(mx, r operand.Op) { ctx.VCVTSS2USIQ(mx, r) }
|
|
|
|
// VCVTSS2USIQ_RD_SAE: Convert Scalar Single-Precision Floating-Point Value to Unsigned Doubleword Integer (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSS2USIQ.RD_SAE xmm r64
|
|
//
|
|
// Construct and append a VCVTSS2USIQ.RD_SAE instruction to the active function.
|
|
func (c *Context) VCVTSS2USIQ_RD_SAE(x, r operand.Op) {
|
|
c.addinstruction(x86.VCVTSS2USIQ_RD_SAE(x, r))
|
|
}
|
|
|
|
// VCVTSS2USIQ_RD_SAE: Convert Scalar Single-Precision Floating-Point Value to Unsigned Doubleword Integer (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSS2USIQ.RD_SAE xmm r64
|
|
//
|
|
// Construct and append a VCVTSS2USIQ.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTSS2USIQ_RD_SAE(x, r operand.Op) { ctx.VCVTSS2USIQ_RD_SAE(x, r) }
|
|
|
|
// VCVTSS2USIQ_RN_SAE: Convert Scalar Single-Precision Floating-Point Value to Unsigned Doubleword Integer (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSS2USIQ.RN_SAE xmm r64
|
|
//
|
|
// Construct and append a VCVTSS2USIQ.RN_SAE instruction to the active function.
|
|
func (c *Context) VCVTSS2USIQ_RN_SAE(x, r operand.Op) {
|
|
c.addinstruction(x86.VCVTSS2USIQ_RN_SAE(x, r))
|
|
}
|
|
|
|
// VCVTSS2USIQ_RN_SAE: Convert Scalar Single-Precision Floating-Point Value to Unsigned Doubleword Integer (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSS2USIQ.RN_SAE xmm r64
|
|
//
|
|
// Construct and append a VCVTSS2USIQ.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTSS2USIQ_RN_SAE(x, r operand.Op) { ctx.VCVTSS2USIQ_RN_SAE(x, r) }
|
|
|
|
// VCVTSS2USIQ_RU_SAE: Convert Scalar Single-Precision Floating-Point Value to Unsigned Doubleword Integer (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSS2USIQ.RU_SAE xmm r64
|
|
//
|
|
// Construct and append a VCVTSS2USIQ.RU_SAE instruction to the active function.
|
|
func (c *Context) VCVTSS2USIQ_RU_SAE(x, r operand.Op) {
|
|
c.addinstruction(x86.VCVTSS2USIQ_RU_SAE(x, r))
|
|
}
|
|
|
|
// VCVTSS2USIQ_RU_SAE: Convert Scalar Single-Precision Floating-Point Value to Unsigned Doubleword Integer (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSS2USIQ.RU_SAE xmm r64
|
|
//
|
|
// Construct and append a VCVTSS2USIQ.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTSS2USIQ_RU_SAE(x, r operand.Op) { ctx.VCVTSS2USIQ_RU_SAE(x, r) }
|
|
|
|
// VCVTSS2USIQ_RZ_SAE: Convert Scalar Single-Precision Floating-Point Value to Unsigned Doubleword Integer (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSS2USIQ.RZ_SAE xmm r64
|
|
//
|
|
// Construct and append a VCVTSS2USIQ.RZ_SAE instruction to the active function.
|
|
func (c *Context) VCVTSS2USIQ_RZ_SAE(x, r operand.Op) {
|
|
c.addinstruction(x86.VCVTSS2USIQ_RZ_SAE(x, r))
|
|
}
|
|
|
|
// VCVTSS2USIQ_RZ_SAE: Convert Scalar Single-Precision Floating-Point Value to Unsigned Doubleword Integer (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTSS2USIQ.RZ_SAE xmm r64
|
|
//
|
|
// Construct and append a VCVTSS2USIQ.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTSS2USIQ_RZ_SAE(x, r operand.Op) { ctx.VCVTSS2USIQ_RZ_SAE(x, r) }
|
|
|
|
// VCVTTPD2DQ: Convert with Truncation Packed Double-Precision FP Values to Packed Dword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2DQ m512 k ymm
|
|
// VCVTTPD2DQ m512 ymm
|
|
// VCVTTPD2DQ zmm k ymm
|
|
// VCVTTPD2DQ zmm ymm
|
|
//
|
|
// Construct and append a VCVTTPD2DQ instruction to the active function.
|
|
func (c *Context) VCVTTPD2DQ(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTTPD2DQ(ops...))
|
|
}
|
|
|
|
// VCVTTPD2DQ: Convert with Truncation Packed Double-Precision FP Values to Packed Dword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2DQ m512 k ymm
|
|
// VCVTTPD2DQ m512 ymm
|
|
// VCVTTPD2DQ zmm k ymm
|
|
// VCVTTPD2DQ zmm ymm
|
|
//
|
|
// Construct and append a VCVTTPD2DQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTPD2DQ(ops ...operand.Op) { ctx.VCVTTPD2DQ(ops...) }
|
|
|
|
// VCVTTPD2DQX: Convert with Truncation Packed Double-Precision FP Values to Packed Dword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2DQX m128 xmm
|
|
// VCVTTPD2DQX xmm xmm
|
|
// VCVTTPD2DQX m128 k xmm
|
|
// VCVTTPD2DQX xmm k xmm
|
|
//
|
|
// Construct and append a VCVTTPD2DQX instruction to the active function.
|
|
func (c *Context) VCVTTPD2DQX(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTTPD2DQX(ops...))
|
|
}
|
|
|
|
// VCVTTPD2DQX: Convert with Truncation Packed Double-Precision FP Values to Packed Dword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2DQX m128 xmm
|
|
// VCVTTPD2DQX xmm xmm
|
|
// VCVTTPD2DQX m128 k xmm
|
|
// VCVTTPD2DQX xmm k xmm
|
|
//
|
|
// Construct and append a VCVTTPD2DQX instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTPD2DQX(ops ...operand.Op) { ctx.VCVTTPD2DQX(ops...) }
|
|
|
|
// VCVTTPD2DQX_BCST: Convert with Truncation Packed Double-Precision FP Values to Packed Dword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2DQX.BCST m64 k xmm
|
|
// VCVTTPD2DQX.BCST m64 xmm
|
|
//
|
|
// Construct and append a VCVTTPD2DQX.BCST instruction to the active function.
|
|
func (c *Context) VCVTTPD2DQX_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTTPD2DQX_BCST(ops...))
|
|
}
|
|
|
|
// VCVTTPD2DQX_BCST: Convert with Truncation Packed Double-Precision FP Values to Packed Dword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2DQX.BCST m64 k xmm
|
|
// VCVTTPD2DQX.BCST m64 xmm
|
|
//
|
|
// Construct and append a VCVTTPD2DQX.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTPD2DQX_BCST(ops ...operand.Op) { ctx.VCVTTPD2DQX_BCST(ops...) }
|
|
|
|
// VCVTTPD2DQX_BCST_Z: Convert with Truncation Packed Double-Precision FP Values to Packed Dword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2DQX.BCST.Z m64 k xmm
|
|
//
|
|
// Construct and append a VCVTTPD2DQX.BCST.Z instruction to the active function.
|
|
func (c *Context) VCVTTPD2DQX_BCST_Z(m, k, x operand.Op) {
|
|
c.addinstruction(x86.VCVTTPD2DQX_BCST_Z(m, k, x))
|
|
}
|
|
|
|
// VCVTTPD2DQX_BCST_Z: Convert with Truncation Packed Double-Precision FP Values to Packed Dword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2DQX.BCST.Z m64 k xmm
|
|
//
|
|
// Construct and append a VCVTTPD2DQX.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTPD2DQX_BCST_Z(m, k, x operand.Op) { ctx.VCVTTPD2DQX_BCST_Z(m, k, x) }
|
|
|
|
// VCVTTPD2DQX_Z: Convert with Truncation Packed Double-Precision FP Values to Packed Dword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2DQX.Z m128 k xmm
|
|
// VCVTTPD2DQX.Z xmm k xmm
|
|
//
|
|
// Construct and append a VCVTTPD2DQX.Z instruction to the active function.
|
|
func (c *Context) VCVTTPD2DQX_Z(mx, k, x operand.Op) {
|
|
c.addinstruction(x86.VCVTTPD2DQX_Z(mx, k, x))
|
|
}
|
|
|
|
// VCVTTPD2DQX_Z: Convert with Truncation Packed Double-Precision FP Values to Packed Dword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2DQX.Z m128 k xmm
|
|
// VCVTTPD2DQX.Z xmm k xmm
|
|
//
|
|
// Construct and append a VCVTTPD2DQX.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTPD2DQX_Z(mx, k, x operand.Op) { ctx.VCVTTPD2DQX_Z(mx, k, x) }
|
|
|
|
// VCVTTPD2DQY: Convert with Truncation Packed Double-Precision FP Values to Packed Dword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2DQY m256 xmm
|
|
// VCVTTPD2DQY ymm xmm
|
|
// VCVTTPD2DQY m256 k xmm
|
|
// VCVTTPD2DQY ymm k xmm
|
|
//
|
|
// Construct and append a VCVTTPD2DQY instruction to the active function.
|
|
func (c *Context) VCVTTPD2DQY(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTTPD2DQY(ops...))
|
|
}
|
|
|
|
// VCVTTPD2DQY: Convert with Truncation Packed Double-Precision FP Values to Packed Dword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2DQY m256 xmm
|
|
// VCVTTPD2DQY ymm xmm
|
|
// VCVTTPD2DQY m256 k xmm
|
|
// VCVTTPD2DQY ymm k xmm
|
|
//
|
|
// Construct and append a VCVTTPD2DQY instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTPD2DQY(ops ...operand.Op) { ctx.VCVTTPD2DQY(ops...) }
|
|
|
|
// VCVTTPD2DQY_BCST: Convert with Truncation Packed Double-Precision FP Values to Packed Dword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2DQY.BCST m64 k xmm
|
|
// VCVTTPD2DQY.BCST m64 xmm
|
|
//
|
|
// Construct and append a VCVTTPD2DQY.BCST instruction to the active function.
|
|
func (c *Context) VCVTTPD2DQY_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTTPD2DQY_BCST(ops...))
|
|
}
|
|
|
|
// VCVTTPD2DQY_BCST: Convert with Truncation Packed Double-Precision FP Values to Packed Dword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2DQY.BCST m64 k xmm
|
|
// VCVTTPD2DQY.BCST m64 xmm
|
|
//
|
|
// Construct and append a VCVTTPD2DQY.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTPD2DQY_BCST(ops ...operand.Op) { ctx.VCVTTPD2DQY_BCST(ops...) }
|
|
|
|
// VCVTTPD2DQY_BCST_Z: Convert with Truncation Packed Double-Precision FP Values to Packed Dword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2DQY.BCST.Z m64 k xmm
|
|
//
|
|
// Construct and append a VCVTTPD2DQY.BCST.Z instruction to the active function.
|
|
func (c *Context) VCVTTPD2DQY_BCST_Z(m, k, x operand.Op) {
|
|
c.addinstruction(x86.VCVTTPD2DQY_BCST_Z(m, k, x))
|
|
}
|
|
|
|
// VCVTTPD2DQY_BCST_Z: Convert with Truncation Packed Double-Precision FP Values to Packed Dword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2DQY.BCST.Z m64 k xmm
|
|
//
|
|
// Construct and append a VCVTTPD2DQY.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTPD2DQY_BCST_Z(m, k, x operand.Op) { ctx.VCVTTPD2DQY_BCST_Z(m, k, x) }
|
|
|
|
// VCVTTPD2DQY_Z: Convert with Truncation Packed Double-Precision FP Values to Packed Dword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2DQY.Z m256 k xmm
|
|
// VCVTTPD2DQY.Z ymm k xmm
|
|
//
|
|
// Construct and append a VCVTTPD2DQY.Z instruction to the active function.
|
|
func (c *Context) VCVTTPD2DQY_Z(my, k, x operand.Op) {
|
|
c.addinstruction(x86.VCVTTPD2DQY_Z(my, k, x))
|
|
}
|
|
|
|
// VCVTTPD2DQY_Z: Convert with Truncation Packed Double-Precision FP Values to Packed Dword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2DQY.Z m256 k xmm
|
|
// VCVTTPD2DQY.Z ymm k xmm
|
|
//
|
|
// Construct and append a VCVTTPD2DQY.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTPD2DQY_Z(my, k, x operand.Op) { ctx.VCVTTPD2DQY_Z(my, k, x) }
|
|
|
|
// VCVTTPD2DQ_BCST: Convert with Truncation Packed Double-Precision FP Values to Packed Dword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2DQ.BCST m64 k ymm
|
|
// VCVTTPD2DQ.BCST m64 ymm
|
|
//
|
|
// Construct and append a VCVTTPD2DQ.BCST instruction to the active function.
|
|
func (c *Context) VCVTTPD2DQ_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTTPD2DQ_BCST(ops...))
|
|
}
|
|
|
|
// VCVTTPD2DQ_BCST: Convert with Truncation Packed Double-Precision FP Values to Packed Dword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2DQ.BCST m64 k ymm
|
|
// VCVTTPD2DQ.BCST m64 ymm
|
|
//
|
|
// Construct and append a VCVTTPD2DQ.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTPD2DQ_BCST(ops ...operand.Op) { ctx.VCVTTPD2DQ_BCST(ops...) }
|
|
|
|
// VCVTTPD2DQ_BCST_Z: Convert with Truncation Packed Double-Precision FP Values to Packed Dword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2DQ.BCST.Z m64 k ymm
|
|
//
|
|
// Construct and append a VCVTTPD2DQ.BCST.Z instruction to the active function.
|
|
func (c *Context) VCVTTPD2DQ_BCST_Z(m, k, y operand.Op) {
|
|
c.addinstruction(x86.VCVTTPD2DQ_BCST_Z(m, k, y))
|
|
}
|
|
|
|
// VCVTTPD2DQ_BCST_Z: Convert with Truncation Packed Double-Precision FP Values to Packed Dword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2DQ.BCST.Z m64 k ymm
|
|
//
|
|
// Construct and append a VCVTTPD2DQ.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTPD2DQ_BCST_Z(m, k, y operand.Op) { ctx.VCVTTPD2DQ_BCST_Z(m, k, y) }
|
|
|
|
// VCVTTPD2DQ_SAE: Convert with Truncation Packed Double-Precision FP Values to Packed Dword Integers (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2DQ.SAE zmm k ymm
|
|
// VCVTTPD2DQ.SAE zmm ymm
|
|
//
|
|
// Construct and append a VCVTTPD2DQ.SAE instruction to the active function.
|
|
func (c *Context) VCVTTPD2DQ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTTPD2DQ_SAE(ops...))
|
|
}
|
|
|
|
// VCVTTPD2DQ_SAE: Convert with Truncation Packed Double-Precision FP Values to Packed Dword Integers (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2DQ.SAE zmm k ymm
|
|
// VCVTTPD2DQ.SAE zmm ymm
|
|
//
|
|
// Construct and append a VCVTTPD2DQ.SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTPD2DQ_SAE(ops ...operand.Op) { ctx.VCVTTPD2DQ_SAE(ops...) }
|
|
|
|
// VCVTTPD2DQ_SAE_Z: Convert with Truncation Packed Double-Precision FP Values to Packed Dword Integers (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2DQ.SAE.Z zmm k ymm
|
|
//
|
|
// Construct and append a VCVTTPD2DQ.SAE.Z instruction to the active function.
|
|
func (c *Context) VCVTTPD2DQ_SAE_Z(z, k, y operand.Op) {
|
|
c.addinstruction(x86.VCVTTPD2DQ_SAE_Z(z, k, y))
|
|
}
|
|
|
|
// VCVTTPD2DQ_SAE_Z: Convert with Truncation Packed Double-Precision FP Values to Packed Dword Integers (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2DQ.SAE.Z zmm k ymm
|
|
//
|
|
// Construct and append a VCVTTPD2DQ.SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTPD2DQ_SAE_Z(z, k, y operand.Op) { ctx.VCVTTPD2DQ_SAE_Z(z, k, y) }
|
|
|
|
// VCVTTPD2DQ_Z: Convert with Truncation Packed Double-Precision FP Values to Packed Dword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2DQ.Z m512 k ymm
|
|
// VCVTTPD2DQ.Z zmm k ymm
|
|
//
|
|
// Construct and append a VCVTTPD2DQ.Z instruction to the active function.
|
|
func (c *Context) VCVTTPD2DQ_Z(mz, k, y operand.Op) {
|
|
c.addinstruction(x86.VCVTTPD2DQ_Z(mz, k, y))
|
|
}
|
|
|
|
// VCVTTPD2DQ_Z: Convert with Truncation Packed Double-Precision FP Values to Packed Dword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2DQ.Z m512 k ymm
|
|
// VCVTTPD2DQ.Z zmm k ymm
|
|
//
|
|
// Construct and append a VCVTTPD2DQ.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTPD2DQ_Z(mz, k, y operand.Op) { ctx.VCVTTPD2DQ_Z(mz, k, y) }
|
|
|
|
// VCVTTPD2QQ: Convert with Truncation Packed Double-Precision Floating-Point Values to Packed Quadword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2QQ m128 k xmm
|
|
// VCVTTPD2QQ m128 xmm
|
|
// VCVTTPD2QQ m256 k ymm
|
|
// VCVTTPD2QQ m256 ymm
|
|
// VCVTTPD2QQ xmm k xmm
|
|
// VCVTTPD2QQ xmm xmm
|
|
// VCVTTPD2QQ ymm k ymm
|
|
// VCVTTPD2QQ ymm ymm
|
|
// VCVTTPD2QQ m512 k zmm
|
|
// VCVTTPD2QQ m512 zmm
|
|
// VCVTTPD2QQ zmm k zmm
|
|
// VCVTTPD2QQ zmm zmm
|
|
//
|
|
// Construct and append a VCVTTPD2QQ instruction to the active function.
|
|
func (c *Context) VCVTTPD2QQ(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTTPD2QQ(ops...))
|
|
}
|
|
|
|
// VCVTTPD2QQ: Convert with Truncation Packed Double-Precision Floating-Point Values to Packed Quadword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2QQ m128 k xmm
|
|
// VCVTTPD2QQ m128 xmm
|
|
// VCVTTPD2QQ m256 k ymm
|
|
// VCVTTPD2QQ m256 ymm
|
|
// VCVTTPD2QQ xmm k xmm
|
|
// VCVTTPD2QQ xmm xmm
|
|
// VCVTTPD2QQ ymm k ymm
|
|
// VCVTTPD2QQ ymm ymm
|
|
// VCVTTPD2QQ m512 k zmm
|
|
// VCVTTPD2QQ m512 zmm
|
|
// VCVTTPD2QQ zmm k zmm
|
|
// VCVTTPD2QQ zmm zmm
|
|
//
|
|
// Construct and append a VCVTTPD2QQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTPD2QQ(ops ...operand.Op) { ctx.VCVTTPD2QQ(ops...) }
|
|
|
|
// VCVTTPD2QQ_BCST: Convert with Truncation Packed Double-Precision Floating-Point Values to Packed Quadword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2QQ.BCST m64 k xmm
|
|
// VCVTTPD2QQ.BCST m64 k ymm
|
|
// VCVTTPD2QQ.BCST m64 xmm
|
|
// VCVTTPD2QQ.BCST m64 ymm
|
|
// VCVTTPD2QQ.BCST m64 k zmm
|
|
// VCVTTPD2QQ.BCST m64 zmm
|
|
//
|
|
// Construct and append a VCVTTPD2QQ.BCST instruction to the active function.
|
|
func (c *Context) VCVTTPD2QQ_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTTPD2QQ_BCST(ops...))
|
|
}
|
|
|
|
// VCVTTPD2QQ_BCST: Convert with Truncation Packed Double-Precision Floating-Point Values to Packed Quadword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2QQ.BCST m64 k xmm
|
|
// VCVTTPD2QQ.BCST m64 k ymm
|
|
// VCVTTPD2QQ.BCST m64 xmm
|
|
// VCVTTPD2QQ.BCST m64 ymm
|
|
// VCVTTPD2QQ.BCST m64 k zmm
|
|
// VCVTTPD2QQ.BCST m64 zmm
|
|
//
|
|
// Construct and append a VCVTTPD2QQ.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTPD2QQ_BCST(ops ...operand.Op) { ctx.VCVTTPD2QQ_BCST(ops...) }
|
|
|
|
// VCVTTPD2QQ_BCST_Z: Convert with Truncation Packed Double-Precision Floating-Point Values to Packed Quadword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2QQ.BCST.Z m64 k xmm
|
|
// VCVTTPD2QQ.BCST.Z m64 k ymm
|
|
// VCVTTPD2QQ.BCST.Z m64 k zmm
|
|
//
|
|
// Construct and append a VCVTTPD2QQ.BCST.Z instruction to the active function.
|
|
func (c *Context) VCVTTPD2QQ_BCST_Z(m, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VCVTTPD2QQ_BCST_Z(m, k, xyz))
|
|
}
|
|
|
|
// VCVTTPD2QQ_BCST_Z: Convert with Truncation Packed Double-Precision Floating-Point Values to Packed Quadword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2QQ.BCST.Z m64 k xmm
|
|
// VCVTTPD2QQ.BCST.Z m64 k ymm
|
|
// VCVTTPD2QQ.BCST.Z m64 k zmm
|
|
//
|
|
// Construct and append a VCVTTPD2QQ.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTPD2QQ_BCST_Z(m, k, xyz operand.Op) { ctx.VCVTTPD2QQ_BCST_Z(m, k, xyz) }
|
|
|
|
// VCVTTPD2QQ_SAE: Convert with Truncation Packed Double-Precision Floating-Point Values to Packed Quadword Integers (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2QQ.SAE zmm k zmm
|
|
// VCVTTPD2QQ.SAE zmm zmm
|
|
//
|
|
// Construct and append a VCVTTPD2QQ.SAE instruction to the active function.
|
|
func (c *Context) VCVTTPD2QQ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTTPD2QQ_SAE(ops...))
|
|
}
|
|
|
|
// VCVTTPD2QQ_SAE: Convert with Truncation Packed Double-Precision Floating-Point Values to Packed Quadword Integers (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2QQ.SAE zmm k zmm
|
|
// VCVTTPD2QQ.SAE zmm zmm
|
|
//
|
|
// Construct and append a VCVTTPD2QQ.SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTPD2QQ_SAE(ops ...operand.Op) { ctx.VCVTTPD2QQ_SAE(ops...) }
|
|
|
|
// VCVTTPD2QQ_SAE_Z: Convert with Truncation Packed Double-Precision Floating-Point Values to Packed Quadword Integers (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2QQ.SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTTPD2QQ.SAE.Z instruction to the active function.
|
|
func (c *Context) VCVTTPD2QQ_SAE_Z(z, k, z1 operand.Op) {
|
|
c.addinstruction(x86.VCVTTPD2QQ_SAE_Z(z, k, z1))
|
|
}
|
|
|
|
// VCVTTPD2QQ_SAE_Z: Convert with Truncation Packed Double-Precision Floating-Point Values to Packed Quadword Integers (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2QQ.SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTTPD2QQ.SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTPD2QQ_SAE_Z(z, k, z1 operand.Op) { ctx.VCVTTPD2QQ_SAE_Z(z, k, z1) }
|
|
|
|
// VCVTTPD2QQ_Z: Convert with Truncation Packed Double-Precision Floating-Point Values to Packed Quadword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2QQ.Z m128 k xmm
|
|
// VCVTTPD2QQ.Z m256 k ymm
|
|
// VCVTTPD2QQ.Z xmm k xmm
|
|
// VCVTTPD2QQ.Z ymm k ymm
|
|
// VCVTTPD2QQ.Z m512 k zmm
|
|
// VCVTTPD2QQ.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTTPD2QQ.Z instruction to the active function.
|
|
func (c *Context) VCVTTPD2QQ_Z(mxyz, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VCVTTPD2QQ_Z(mxyz, k, xyz))
|
|
}
|
|
|
|
// VCVTTPD2QQ_Z: Convert with Truncation Packed Double-Precision Floating-Point Values to Packed Quadword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2QQ.Z m128 k xmm
|
|
// VCVTTPD2QQ.Z m256 k ymm
|
|
// VCVTTPD2QQ.Z xmm k xmm
|
|
// VCVTTPD2QQ.Z ymm k ymm
|
|
// VCVTTPD2QQ.Z m512 k zmm
|
|
// VCVTTPD2QQ.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTTPD2QQ.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTPD2QQ_Z(mxyz, k, xyz operand.Op) { ctx.VCVTTPD2QQ_Z(mxyz, k, xyz) }
|
|
|
|
// VCVTTPD2UDQ: Convert with Truncation Packed Double-Precision Floating-Point Values to Packed Unsigned Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2UDQ m512 k ymm
|
|
// VCVTTPD2UDQ m512 ymm
|
|
// VCVTTPD2UDQ zmm k ymm
|
|
// VCVTTPD2UDQ zmm ymm
|
|
//
|
|
// Construct and append a VCVTTPD2UDQ instruction to the active function.
|
|
func (c *Context) VCVTTPD2UDQ(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTTPD2UDQ(ops...))
|
|
}
|
|
|
|
// VCVTTPD2UDQ: Convert with Truncation Packed Double-Precision Floating-Point Values to Packed Unsigned Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2UDQ m512 k ymm
|
|
// VCVTTPD2UDQ m512 ymm
|
|
// VCVTTPD2UDQ zmm k ymm
|
|
// VCVTTPD2UDQ zmm ymm
|
|
//
|
|
// Construct and append a VCVTTPD2UDQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTPD2UDQ(ops ...operand.Op) { ctx.VCVTTPD2UDQ(ops...) }
|
|
|
|
// VCVTTPD2UDQX: Convert with Truncation Packed Double-Precision Floating-Point Values to Packed Unsigned Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2UDQX m128 k xmm
|
|
// VCVTTPD2UDQX m128 xmm
|
|
// VCVTTPD2UDQX xmm k xmm
|
|
// VCVTTPD2UDQX xmm xmm
|
|
//
|
|
// Construct and append a VCVTTPD2UDQX instruction to the active function.
|
|
func (c *Context) VCVTTPD2UDQX(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTTPD2UDQX(ops...))
|
|
}
|
|
|
|
// VCVTTPD2UDQX: Convert with Truncation Packed Double-Precision Floating-Point Values to Packed Unsigned Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2UDQX m128 k xmm
|
|
// VCVTTPD2UDQX m128 xmm
|
|
// VCVTTPD2UDQX xmm k xmm
|
|
// VCVTTPD2UDQX xmm xmm
|
|
//
|
|
// Construct and append a VCVTTPD2UDQX instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTPD2UDQX(ops ...operand.Op) { ctx.VCVTTPD2UDQX(ops...) }
|
|
|
|
// VCVTTPD2UDQX_BCST: Convert with Truncation Packed Double-Precision Floating-Point Values to Packed Unsigned Doubleword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2UDQX.BCST m64 k xmm
|
|
// VCVTTPD2UDQX.BCST m64 xmm
|
|
//
|
|
// Construct and append a VCVTTPD2UDQX.BCST instruction to the active function.
|
|
func (c *Context) VCVTTPD2UDQX_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTTPD2UDQX_BCST(ops...))
|
|
}
|
|
|
|
// VCVTTPD2UDQX_BCST: Convert with Truncation Packed Double-Precision Floating-Point Values to Packed Unsigned Doubleword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2UDQX.BCST m64 k xmm
|
|
// VCVTTPD2UDQX.BCST m64 xmm
|
|
//
|
|
// Construct and append a VCVTTPD2UDQX.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTPD2UDQX_BCST(ops ...operand.Op) { ctx.VCVTTPD2UDQX_BCST(ops...) }
|
|
|
|
// VCVTTPD2UDQX_BCST_Z: Convert with Truncation Packed Double-Precision Floating-Point Values to Packed Unsigned Doubleword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2UDQX.BCST.Z m64 k xmm
|
|
//
|
|
// Construct and append a VCVTTPD2UDQX.BCST.Z instruction to the active function.
|
|
func (c *Context) VCVTTPD2UDQX_BCST_Z(m, k, x operand.Op) {
|
|
c.addinstruction(x86.VCVTTPD2UDQX_BCST_Z(m, k, x))
|
|
}
|
|
|
|
// VCVTTPD2UDQX_BCST_Z: Convert with Truncation Packed Double-Precision Floating-Point Values to Packed Unsigned Doubleword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2UDQX.BCST.Z m64 k xmm
|
|
//
|
|
// Construct and append a VCVTTPD2UDQX.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTPD2UDQX_BCST_Z(m, k, x operand.Op) { ctx.VCVTTPD2UDQX_BCST_Z(m, k, x) }
|
|
|
|
// VCVTTPD2UDQX_Z: Convert with Truncation Packed Double-Precision Floating-Point Values to Packed Unsigned Doubleword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2UDQX.Z m128 k xmm
|
|
// VCVTTPD2UDQX.Z xmm k xmm
|
|
//
|
|
// Construct and append a VCVTTPD2UDQX.Z instruction to the active function.
|
|
func (c *Context) VCVTTPD2UDQX_Z(mx, k, x operand.Op) {
|
|
c.addinstruction(x86.VCVTTPD2UDQX_Z(mx, k, x))
|
|
}
|
|
|
|
// VCVTTPD2UDQX_Z: Convert with Truncation Packed Double-Precision Floating-Point Values to Packed Unsigned Doubleword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2UDQX.Z m128 k xmm
|
|
// VCVTTPD2UDQX.Z xmm k xmm
|
|
//
|
|
// Construct and append a VCVTTPD2UDQX.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTPD2UDQX_Z(mx, k, x operand.Op) { ctx.VCVTTPD2UDQX_Z(mx, k, x) }
|
|
|
|
// VCVTTPD2UDQY: Convert with Truncation Packed Double-Precision Floating-Point Values to Packed Unsigned Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2UDQY m256 k xmm
|
|
// VCVTTPD2UDQY m256 xmm
|
|
// VCVTTPD2UDQY ymm k xmm
|
|
// VCVTTPD2UDQY ymm xmm
|
|
//
|
|
// Construct and append a VCVTTPD2UDQY instruction to the active function.
|
|
func (c *Context) VCVTTPD2UDQY(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTTPD2UDQY(ops...))
|
|
}
|
|
|
|
// VCVTTPD2UDQY: Convert with Truncation Packed Double-Precision Floating-Point Values to Packed Unsigned Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2UDQY m256 k xmm
|
|
// VCVTTPD2UDQY m256 xmm
|
|
// VCVTTPD2UDQY ymm k xmm
|
|
// VCVTTPD2UDQY ymm xmm
|
|
//
|
|
// Construct and append a VCVTTPD2UDQY instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTPD2UDQY(ops ...operand.Op) { ctx.VCVTTPD2UDQY(ops...) }
|
|
|
|
// VCVTTPD2UDQY_BCST: Convert with Truncation Packed Double-Precision Floating-Point Values to Packed Unsigned Doubleword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2UDQY.BCST m64 k xmm
|
|
// VCVTTPD2UDQY.BCST m64 xmm
|
|
//
|
|
// Construct and append a VCVTTPD2UDQY.BCST instruction to the active function.
|
|
func (c *Context) VCVTTPD2UDQY_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTTPD2UDQY_BCST(ops...))
|
|
}
|
|
|
|
// VCVTTPD2UDQY_BCST: Convert with Truncation Packed Double-Precision Floating-Point Values to Packed Unsigned Doubleword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2UDQY.BCST m64 k xmm
|
|
// VCVTTPD2UDQY.BCST m64 xmm
|
|
//
|
|
// Construct and append a VCVTTPD2UDQY.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTPD2UDQY_BCST(ops ...operand.Op) { ctx.VCVTTPD2UDQY_BCST(ops...) }
|
|
|
|
// VCVTTPD2UDQY_BCST_Z: Convert with Truncation Packed Double-Precision Floating-Point Values to Packed Unsigned Doubleword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2UDQY.BCST.Z m64 k xmm
|
|
//
|
|
// Construct and append a VCVTTPD2UDQY.BCST.Z instruction to the active function.
|
|
func (c *Context) VCVTTPD2UDQY_BCST_Z(m, k, x operand.Op) {
|
|
c.addinstruction(x86.VCVTTPD2UDQY_BCST_Z(m, k, x))
|
|
}
|
|
|
|
// VCVTTPD2UDQY_BCST_Z: Convert with Truncation Packed Double-Precision Floating-Point Values to Packed Unsigned Doubleword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2UDQY.BCST.Z m64 k xmm
|
|
//
|
|
// Construct and append a VCVTTPD2UDQY.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTPD2UDQY_BCST_Z(m, k, x operand.Op) { ctx.VCVTTPD2UDQY_BCST_Z(m, k, x) }
|
|
|
|
// VCVTTPD2UDQY_Z: Convert with Truncation Packed Double-Precision Floating-Point Values to Packed Unsigned Doubleword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2UDQY.Z m256 k xmm
|
|
// VCVTTPD2UDQY.Z ymm k xmm
|
|
//
|
|
// Construct and append a VCVTTPD2UDQY.Z instruction to the active function.
|
|
func (c *Context) VCVTTPD2UDQY_Z(my, k, x operand.Op) {
|
|
c.addinstruction(x86.VCVTTPD2UDQY_Z(my, k, x))
|
|
}
|
|
|
|
// VCVTTPD2UDQY_Z: Convert with Truncation Packed Double-Precision Floating-Point Values to Packed Unsigned Doubleword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2UDQY.Z m256 k xmm
|
|
// VCVTTPD2UDQY.Z ymm k xmm
|
|
//
|
|
// Construct and append a VCVTTPD2UDQY.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTPD2UDQY_Z(my, k, x operand.Op) { ctx.VCVTTPD2UDQY_Z(my, k, x) }
|
|
|
|
// VCVTTPD2UDQ_BCST: Convert with Truncation Packed Double-Precision Floating-Point Values to Packed Unsigned Doubleword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2UDQ.BCST m64 k ymm
|
|
// VCVTTPD2UDQ.BCST m64 ymm
|
|
//
|
|
// Construct and append a VCVTTPD2UDQ.BCST instruction to the active function.
|
|
func (c *Context) VCVTTPD2UDQ_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTTPD2UDQ_BCST(ops...))
|
|
}
|
|
|
|
// VCVTTPD2UDQ_BCST: Convert with Truncation Packed Double-Precision Floating-Point Values to Packed Unsigned Doubleword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2UDQ.BCST m64 k ymm
|
|
// VCVTTPD2UDQ.BCST m64 ymm
|
|
//
|
|
// Construct and append a VCVTTPD2UDQ.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTPD2UDQ_BCST(ops ...operand.Op) { ctx.VCVTTPD2UDQ_BCST(ops...) }
|
|
|
|
// VCVTTPD2UDQ_BCST_Z: Convert with Truncation Packed Double-Precision Floating-Point Values to Packed Unsigned Doubleword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2UDQ.BCST.Z m64 k ymm
|
|
//
|
|
// Construct and append a VCVTTPD2UDQ.BCST.Z instruction to the active function.
|
|
func (c *Context) VCVTTPD2UDQ_BCST_Z(m, k, y operand.Op) {
|
|
c.addinstruction(x86.VCVTTPD2UDQ_BCST_Z(m, k, y))
|
|
}
|
|
|
|
// VCVTTPD2UDQ_BCST_Z: Convert with Truncation Packed Double-Precision Floating-Point Values to Packed Unsigned Doubleword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2UDQ.BCST.Z m64 k ymm
|
|
//
|
|
// Construct and append a VCVTTPD2UDQ.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTPD2UDQ_BCST_Z(m, k, y operand.Op) { ctx.VCVTTPD2UDQ_BCST_Z(m, k, y) }
|
|
|
|
// VCVTTPD2UDQ_SAE: Convert with Truncation Packed Double-Precision Floating-Point Values to Packed Unsigned Doubleword Integers (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2UDQ.SAE zmm k ymm
|
|
// VCVTTPD2UDQ.SAE zmm ymm
|
|
//
|
|
// Construct and append a VCVTTPD2UDQ.SAE instruction to the active function.
|
|
func (c *Context) VCVTTPD2UDQ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTTPD2UDQ_SAE(ops...))
|
|
}
|
|
|
|
// VCVTTPD2UDQ_SAE: Convert with Truncation Packed Double-Precision Floating-Point Values to Packed Unsigned Doubleword Integers (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2UDQ.SAE zmm k ymm
|
|
// VCVTTPD2UDQ.SAE zmm ymm
|
|
//
|
|
// Construct and append a VCVTTPD2UDQ.SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTPD2UDQ_SAE(ops ...operand.Op) { ctx.VCVTTPD2UDQ_SAE(ops...) }
|
|
|
|
// VCVTTPD2UDQ_SAE_Z: Convert with Truncation Packed Double-Precision Floating-Point Values to Packed Unsigned Doubleword Integers (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2UDQ.SAE.Z zmm k ymm
|
|
//
|
|
// Construct and append a VCVTTPD2UDQ.SAE.Z instruction to the active function.
|
|
func (c *Context) VCVTTPD2UDQ_SAE_Z(z, k, y operand.Op) {
|
|
c.addinstruction(x86.VCVTTPD2UDQ_SAE_Z(z, k, y))
|
|
}
|
|
|
|
// VCVTTPD2UDQ_SAE_Z: Convert with Truncation Packed Double-Precision Floating-Point Values to Packed Unsigned Doubleword Integers (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2UDQ.SAE.Z zmm k ymm
|
|
//
|
|
// Construct and append a VCVTTPD2UDQ.SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTPD2UDQ_SAE_Z(z, k, y operand.Op) { ctx.VCVTTPD2UDQ_SAE_Z(z, k, y) }
|
|
|
|
// VCVTTPD2UDQ_Z: Convert with Truncation Packed Double-Precision Floating-Point Values to Packed Unsigned Doubleword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2UDQ.Z m512 k ymm
|
|
// VCVTTPD2UDQ.Z zmm k ymm
|
|
//
|
|
// Construct and append a VCVTTPD2UDQ.Z instruction to the active function.
|
|
func (c *Context) VCVTTPD2UDQ_Z(mz, k, y operand.Op) {
|
|
c.addinstruction(x86.VCVTTPD2UDQ_Z(mz, k, y))
|
|
}
|
|
|
|
// VCVTTPD2UDQ_Z: Convert with Truncation Packed Double-Precision Floating-Point Values to Packed Unsigned Doubleword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2UDQ.Z m512 k ymm
|
|
// VCVTTPD2UDQ.Z zmm k ymm
|
|
//
|
|
// Construct and append a VCVTTPD2UDQ.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTPD2UDQ_Z(mz, k, y operand.Op) { ctx.VCVTTPD2UDQ_Z(mz, k, y) }
|
|
|
|
// VCVTTPD2UQQ: Convert with Truncation Packed Double-Precision Floating-Point Values to Packed Unsigned Quadword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2UQQ m128 k xmm
|
|
// VCVTTPD2UQQ m128 xmm
|
|
// VCVTTPD2UQQ m256 k ymm
|
|
// VCVTTPD2UQQ m256 ymm
|
|
// VCVTTPD2UQQ xmm k xmm
|
|
// VCVTTPD2UQQ xmm xmm
|
|
// VCVTTPD2UQQ ymm k ymm
|
|
// VCVTTPD2UQQ ymm ymm
|
|
// VCVTTPD2UQQ m512 k zmm
|
|
// VCVTTPD2UQQ m512 zmm
|
|
// VCVTTPD2UQQ zmm k zmm
|
|
// VCVTTPD2UQQ zmm zmm
|
|
//
|
|
// Construct and append a VCVTTPD2UQQ instruction to the active function.
|
|
func (c *Context) VCVTTPD2UQQ(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTTPD2UQQ(ops...))
|
|
}
|
|
|
|
// VCVTTPD2UQQ: Convert with Truncation Packed Double-Precision Floating-Point Values to Packed Unsigned Quadword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2UQQ m128 k xmm
|
|
// VCVTTPD2UQQ m128 xmm
|
|
// VCVTTPD2UQQ m256 k ymm
|
|
// VCVTTPD2UQQ m256 ymm
|
|
// VCVTTPD2UQQ xmm k xmm
|
|
// VCVTTPD2UQQ xmm xmm
|
|
// VCVTTPD2UQQ ymm k ymm
|
|
// VCVTTPD2UQQ ymm ymm
|
|
// VCVTTPD2UQQ m512 k zmm
|
|
// VCVTTPD2UQQ m512 zmm
|
|
// VCVTTPD2UQQ zmm k zmm
|
|
// VCVTTPD2UQQ zmm zmm
|
|
//
|
|
// Construct and append a VCVTTPD2UQQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTPD2UQQ(ops ...operand.Op) { ctx.VCVTTPD2UQQ(ops...) }
|
|
|
|
// VCVTTPD2UQQ_BCST: Convert with Truncation Packed Double-Precision Floating-Point Values to Packed Unsigned Quadword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2UQQ.BCST m64 k xmm
|
|
// VCVTTPD2UQQ.BCST m64 k ymm
|
|
// VCVTTPD2UQQ.BCST m64 xmm
|
|
// VCVTTPD2UQQ.BCST m64 ymm
|
|
// VCVTTPD2UQQ.BCST m64 k zmm
|
|
// VCVTTPD2UQQ.BCST m64 zmm
|
|
//
|
|
// Construct and append a VCVTTPD2UQQ.BCST instruction to the active function.
|
|
func (c *Context) VCVTTPD2UQQ_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTTPD2UQQ_BCST(ops...))
|
|
}
|
|
|
|
// VCVTTPD2UQQ_BCST: Convert with Truncation Packed Double-Precision Floating-Point Values to Packed Unsigned Quadword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2UQQ.BCST m64 k xmm
|
|
// VCVTTPD2UQQ.BCST m64 k ymm
|
|
// VCVTTPD2UQQ.BCST m64 xmm
|
|
// VCVTTPD2UQQ.BCST m64 ymm
|
|
// VCVTTPD2UQQ.BCST m64 k zmm
|
|
// VCVTTPD2UQQ.BCST m64 zmm
|
|
//
|
|
// Construct and append a VCVTTPD2UQQ.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTPD2UQQ_BCST(ops ...operand.Op) { ctx.VCVTTPD2UQQ_BCST(ops...) }
|
|
|
|
// VCVTTPD2UQQ_BCST_Z: Convert with Truncation Packed Double-Precision Floating-Point Values to Packed Unsigned Quadword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2UQQ.BCST.Z m64 k xmm
|
|
// VCVTTPD2UQQ.BCST.Z m64 k ymm
|
|
// VCVTTPD2UQQ.BCST.Z m64 k zmm
|
|
//
|
|
// Construct and append a VCVTTPD2UQQ.BCST.Z instruction to the active function.
|
|
func (c *Context) VCVTTPD2UQQ_BCST_Z(m, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VCVTTPD2UQQ_BCST_Z(m, k, xyz))
|
|
}
|
|
|
|
// VCVTTPD2UQQ_BCST_Z: Convert with Truncation Packed Double-Precision Floating-Point Values to Packed Unsigned Quadword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2UQQ.BCST.Z m64 k xmm
|
|
// VCVTTPD2UQQ.BCST.Z m64 k ymm
|
|
// VCVTTPD2UQQ.BCST.Z m64 k zmm
|
|
//
|
|
// Construct and append a VCVTTPD2UQQ.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTPD2UQQ_BCST_Z(m, k, xyz operand.Op) { ctx.VCVTTPD2UQQ_BCST_Z(m, k, xyz) }
|
|
|
|
// VCVTTPD2UQQ_SAE: Convert with Truncation Packed Double-Precision Floating-Point Values to Packed Unsigned Quadword Integers (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2UQQ.SAE zmm k zmm
|
|
// VCVTTPD2UQQ.SAE zmm zmm
|
|
//
|
|
// Construct and append a VCVTTPD2UQQ.SAE instruction to the active function.
|
|
func (c *Context) VCVTTPD2UQQ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTTPD2UQQ_SAE(ops...))
|
|
}
|
|
|
|
// VCVTTPD2UQQ_SAE: Convert with Truncation Packed Double-Precision Floating-Point Values to Packed Unsigned Quadword Integers (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2UQQ.SAE zmm k zmm
|
|
// VCVTTPD2UQQ.SAE zmm zmm
|
|
//
|
|
// Construct and append a VCVTTPD2UQQ.SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTPD2UQQ_SAE(ops ...operand.Op) { ctx.VCVTTPD2UQQ_SAE(ops...) }
|
|
|
|
// VCVTTPD2UQQ_SAE_Z: Convert with Truncation Packed Double-Precision Floating-Point Values to Packed Unsigned Quadword Integers (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2UQQ.SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTTPD2UQQ.SAE.Z instruction to the active function.
|
|
func (c *Context) VCVTTPD2UQQ_SAE_Z(z, k, z1 operand.Op) {
|
|
c.addinstruction(x86.VCVTTPD2UQQ_SAE_Z(z, k, z1))
|
|
}
|
|
|
|
// VCVTTPD2UQQ_SAE_Z: Convert with Truncation Packed Double-Precision Floating-Point Values to Packed Unsigned Quadword Integers (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2UQQ.SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTTPD2UQQ.SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTPD2UQQ_SAE_Z(z, k, z1 operand.Op) { ctx.VCVTTPD2UQQ_SAE_Z(z, k, z1) }
|
|
|
|
// VCVTTPD2UQQ_Z: Convert with Truncation Packed Double-Precision Floating-Point Values to Packed Unsigned Quadword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2UQQ.Z m128 k xmm
|
|
// VCVTTPD2UQQ.Z m256 k ymm
|
|
// VCVTTPD2UQQ.Z xmm k xmm
|
|
// VCVTTPD2UQQ.Z ymm k ymm
|
|
// VCVTTPD2UQQ.Z m512 k zmm
|
|
// VCVTTPD2UQQ.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTTPD2UQQ.Z instruction to the active function.
|
|
func (c *Context) VCVTTPD2UQQ_Z(mxyz, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VCVTTPD2UQQ_Z(mxyz, k, xyz))
|
|
}
|
|
|
|
// VCVTTPD2UQQ_Z: Convert with Truncation Packed Double-Precision Floating-Point Values to Packed Unsigned Quadword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPD2UQQ.Z m128 k xmm
|
|
// VCVTTPD2UQQ.Z m256 k ymm
|
|
// VCVTTPD2UQQ.Z xmm k xmm
|
|
// VCVTTPD2UQQ.Z ymm k ymm
|
|
// VCVTTPD2UQQ.Z m512 k zmm
|
|
// VCVTTPD2UQQ.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTTPD2UQQ.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTPD2UQQ_Z(mxyz, k, xyz operand.Op) { ctx.VCVTTPD2UQQ_Z(mxyz, k, xyz) }
|
|
|
|
// VCVTTPS2DQ: Convert with Truncation Packed Single-Precision FP Values to Packed Dword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPS2DQ m128 xmm
|
|
// VCVTTPS2DQ m256 ymm
|
|
// VCVTTPS2DQ xmm xmm
|
|
// VCVTTPS2DQ ymm ymm
|
|
// VCVTTPS2DQ m128 k xmm
|
|
// VCVTTPS2DQ m256 k ymm
|
|
// VCVTTPS2DQ xmm k xmm
|
|
// VCVTTPS2DQ ymm k ymm
|
|
// VCVTTPS2DQ m512 k zmm
|
|
// VCVTTPS2DQ m512 zmm
|
|
// VCVTTPS2DQ zmm k zmm
|
|
// VCVTTPS2DQ zmm zmm
|
|
//
|
|
// Construct and append a VCVTTPS2DQ instruction to the active function.
|
|
func (c *Context) VCVTTPS2DQ(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTTPS2DQ(ops...))
|
|
}
|
|
|
|
// VCVTTPS2DQ: Convert with Truncation Packed Single-Precision FP Values to Packed Dword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPS2DQ m128 xmm
|
|
// VCVTTPS2DQ m256 ymm
|
|
// VCVTTPS2DQ xmm xmm
|
|
// VCVTTPS2DQ ymm ymm
|
|
// VCVTTPS2DQ m128 k xmm
|
|
// VCVTTPS2DQ m256 k ymm
|
|
// VCVTTPS2DQ xmm k xmm
|
|
// VCVTTPS2DQ ymm k ymm
|
|
// VCVTTPS2DQ m512 k zmm
|
|
// VCVTTPS2DQ m512 zmm
|
|
// VCVTTPS2DQ zmm k zmm
|
|
// VCVTTPS2DQ zmm zmm
|
|
//
|
|
// Construct and append a VCVTTPS2DQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTPS2DQ(ops ...operand.Op) { ctx.VCVTTPS2DQ(ops...) }
|
|
|
|
// VCVTTPS2DQ_BCST: Convert with Truncation Packed Single-Precision FP Values to Packed Dword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPS2DQ.BCST m32 k xmm
|
|
// VCVTTPS2DQ.BCST m32 k ymm
|
|
// VCVTTPS2DQ.BCST m32 xmm
|
|
// VCVTTPS2DQ.BCST m32 ymm
|
|
// VCVTTPS2DQ.BCST m32 k zmm
|
|
// VCVTTPS2DQ.BCST m32 zmm
|
|
//
|
|
// Construct and append a VCVTTPS2DQ.BCST instruction to the active function.
|
|
func (c *Context) VCVTTPS2DQ_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTTPS2DQ_BCST(ops...))
|
|
}
|
|
|
|
// VCVTTPS2DQ_BCST: Convert with Truncation Packed Single-Precision FP Values to Packed Dword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPS2DQ.BCST m32 k xmm
|
|
// VCVTTPS2DQ.BCST m32 k ymm
|
|
// VCVTTPS2DQ.BCST m32 xmm
|
|
// VCVTTPS2DQ.BCST m32 ymm
|
|
// VCVTTPS2DQ.BCST m32 k zmm
|
|
// VCVTTPS2DQ.BCST m32 zmm
|
|
//
|
|
// Construct and append a VCVTTPS2DQ.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTPS2DQ_BCST(ops ...operand.Op) { ctx.VCVTTPS2DQ_BCST(ops...) }
|
|
|
|
// VCVTTPS2DQ_BCST_Z: Convert with Truncation Packed Single-Precision FP Values to Packed Dword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPS2DQ.BCST.Z m32 k xmm
|
|
// VCVTTPS2DQ.BCST.Z m32 k ymm
|
|
// VCVTTPS2DQ.BCST.Z m32 k zmm
|
|
//
|
|
// Construct and append a VCVTTPS2DQ.BCST.Z instruction to the active function.
|
|
func (c *Context) VCVTTPS2DQ_BCST_Z(m, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VCVTTPS2DQ_BCST_Z(m, k, xyz))
|
|
}
|
|
|
|
// VCVTTPS2DQ_BCST_Z: Convert with Truncation Packed Single-Precision FP Values to Packed Dword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPS2DQ.BCST.Z m32 k xmm
|
|
// VCVTTPS2DQ.BCST.Z m32 k ymm
|
|
// VCVTTPS2DQ.BCST.Z m32 k zmm
|
|
//
|
|
// Construct and append a VCVTTPS2DQ.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTPS2DQ_BCST_Z(m, k, xyz operand.Op) { ctx.VCVTTPS2DQ_BCST_Z(m, k, xyz) }
|
|
|
|
// VCVTTPS2DQ_SAE: Convert with Truncation Packed Single-Precision FP Values to Packed Dword Integers (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPS2DQ.SAE zmm k zmm
|
|
// VCVTTPS2DQ.SAE zmm zmm
|
|
//
|
|
// Construct and append a VCVTTPS2DQ.SAE instruction to the active function.
|
|
func (c *Context) VCVTTPS2DQ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTTPS2DQ_SAE(ops...))
|
|
}
|
|
|
|
// VCVTTPS2DQ_SAE: Convert with Truncation Packed Single-Precision FP Values to Packed Dword Integers (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPS2DQ.SAE zmm k zmm
|
|
// VCVTTPS2DQ.SAE zmm zmm
|
|
//
|
|
// Construct and append a VCVTTPS2DQ.SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTPS2DQ_SAE(ops ...operand.Op) { ctx.VCVTTPS2DQ_SAE(ops...) }
|
|
|
|
// VCVTTPS2DQ_SAE_Z: Convert with Truncation Packed Single-Precision FP Values to Packed Dword Integers (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPS2DQ.SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTTPS2DQ.SAE.Z instruction to the active function.
|
|
func (c *Context) VCVTTPS2DQ_SAE_Z(z, k, z1 operand.Op) {
|
|
c.addinstruction(x86.VCVTTPS2DQ_SAE_Z(z, k, z1))
|
|
}
|
|
|
|
// VCVTTPS2DQ_SAE_Z: Convert with Truncation Packed Single-Precision FP Values to Packed Dword Integers (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPS2DQ.SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTTPS2DQ.SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTPS2DQ_SAE_Z(z, k, z1 operand.Op) { ctx.VCVTTPS2DQ_SAE_Z(z, k, z1) }
|
|
|
|
// VCVTTPS2DQ_Z: Convert with Truncation Packed Single-Precision FP Values to Packed Dword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPS2DQ.Z m128 k xmm
|
|
// VCVTTPS2DQ.Z m256 k ymm
|
|
// VCVTTPS2DQ.Z xmm k xmm
|
|
// VCVTTPS2DQ.Z ymm k ymm
|
|
// VCVTTPS2DQ.Z m512 k zmm
|
|
// VCVTTPS2DQ.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTTPS2DQ.Z instruction to the active function.
|
|
func (c *Context) VCVTTPS2DQ_Z(mxyz, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VCVTTPS2DQ_Z(mxyz, k, xyz))
|
|
}
|
|
|
|
// VCVTTPS2DQ_Z: Convert with Truncation Packed Single-Precision FP Values to Packed Dword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPS2DQ.Z m128 k xmm
|
|
// VCVTTPS2DQ.Z m256 k ymm
|
|
// VCVTTPS2DQ.Z xmm k xmm
|
|
// VCVTTPS2DQ.Z ymm k ymm
|
|
// VCVTTPS2DQ.Z m512 k zmm
|
|
// VCVTTPS2DQ.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTTPS2DQ.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTPS2DQ_Z(mxyz, k, xyz operand.Op) { ctx.VCVTTPS2DQ_Z(mxyz, k, xyz) }
|
|
|
|
// VCVTTPS2QQ: Convert with Truncation Packed Single Precision Floating-Point Values to Packed Singed Quadword Integer Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPS2QQ m128 k ymm
|
|
// VCVTTPS2QQ m128 ymm
|
|
// VCVTTPS2QQ m64 k xmm
|
|
// VCVTTPS2QQ m64 xmm
|
|
// VCVTTPS2QQ xmm k xmm
|
|
// VCVTTPS2QQ xmm k ymm
|
|
// VCVTTPS2QQ xmm xmm
|
|
// VCVTTPS2QQ xmm ymm
|
|
// VCVTTPS2QQ m256 k zmm
|
|
// VCVTTPS2QQ m256 zmm
|
|
// VCVTTPS2QQ ymm k zmm
|
|
// VCVTTPS2QQ ymm zmm
|
|
//
|
|
// Construct and append a VCVTTPS2QQ instruction to the active function.
|
|
func (c *Context) VCVTTPS2QQ(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTTPS2QQ(ops...))
|
|
}
|
|
|
|
// VCVTTPS2QQ: Convert with Truncation Packed Single Precision Floating-Point Values to Packed Singed Quadword Integer Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPS2QQ m128 k ymm
|
|
// VCVTTPS2QQ m128 ymm
|
|
// VCVTTPS2QQ m64 k xmm
|
|
// VCVTTPS2QQ m64 xmm
|
|
// VCVTTPS2QQ xmm k xmm
|
|
// VCVTTPS2QQ xmm k ymm
|
|
// VCVTTPS2QQ xmm xmm
|
|
// VCVTTPS2QQ xmm ymm
|
|
// VCVTTPS2QQ m256 k zmm
|
|
// VCVTTPS2QQ m256 zmm
|
|
// VCVTTPS2QQ ymm k zmm
|
|
// VCVTTPS2QQ ymm zmm
|
|
//
|
|
// Construct and append a VCVTTPS2QQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTPS2QQ(ops ...operand.Op) { ctx.VCVTTPS2QQ(ops...) }
|
|
|
|
// VCVTTPS2QQ_BCST: Convert with Truncation Packed Single Precision Floating-Point Values to Packed Singed Quadword Integer Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPS2QQ.BCST m32 k xmm
|
|
// VCVTTPS2QQ.BCST m32 k ymm
|
|
// VCVTTPS2QQ.BCST m32 xmm
|
|
// VCVTTPS2QQ.BCST m32 ymm
|
|
// VCVTTPS2QQ.BCST m32 k zmm
|
|
// VCVTTPS2QQ.BCST m32 zmm
|
|
//
|
|
// Construct and append a VCVTTPS2QQ.BCST instruction to the active function.
|
|
func (c *Context) VCVTTPS2QQ_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTTPS2QQ_BCST(ops...))
|
|
}
|
|
|
|
// VCVTTPS2QQ_BCST: Convert with Truncation Packed Single Precision Floating-Point Values to Packed Singed Quadword Integer Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPS2QQ.BCST m32 k xmm
|
|
// VCVTTPS2QQ.BCST m32 k ymm
|
|
// VCVTTPS2QQ.BCST m32 xmm
|
|
// VCVTTPS2QQ.BCST m32 ymm
|
|
// VCVTTPS2QQ.BCST m32 k zmm
|
|
// VCVTTPS2QQ.BCST m32 zmm
|
|
//
|
|
// Construct and append a VCVTTPS2QQ.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTPS2QQ_BCST(ops ...operand.Op) { ctx.VCVTTPS2QQ_BCST(ops...) }
|
|
|
|
// VCVTTPS2QQ_BCST_Z: Convert with Truncation Packed Single Precision Floating-Point Values to Packed Singed Quadword Integer Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPS2QQ.BCST.Z m32 k xmm
|
|
// VCVTTPS2QQ.BCST.Z m32 k ymm
|
|
// VCVTTPS2QQ.BCST.Z m32 k zmm
|
|
//
|
|
// Construct and append a VCVTTPS2QQ.BCST.Z instruction to the active function.
|
|
func (c *Context) VCVTTPS2QQ_BCST_Z(m, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VCVTTPS2QQ_BCST_Z(m, k, xyz))
|
|
}
|
|
|
|
// VCVTTPS2QQ_BCST_Z: Convert with Truncation Packed Single Precision Floating-Point Values to Packed Singed Quadword Integer Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPS2QQ.BCST.Z m32 k xmm
|
|
// VCVTTPS2QQ.BCST.Z m32 k ymm
|
|
// VCVTTPS2QQ.BCST.Z m32 k zmm
|
|
//
|
|
// Construct and append a VCVTTPS2QQ.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTPS2QQ_BCST_Z(m, k, xyz operand.Op) { ctx.VCVTTPS2QQ_BCST_Z(m, k, xyz) }
|
|
|
|
// VCVTTPS2QQ_SAE: Convert with Truncation Packed Single Precision Floating-Point Values to Packed Singed Quadword Integer Values (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPS2QQ.SAE ymm k zmm
|
|
// VCVTTPS2QQ.SAE ymm zmm
|
|
//
|
|
// Construct and append a VCVTTPS2QQ.SAE instruction to the active function.
|
|
func (c *Context) VCVTTPS2QQ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTTPS2QQ_SAE(ops...))
|
|
}
|
|
|
|
// VCVTTPS2QQ_SAE: Convert with Truncation Packed Single Precision Floating-Point Values to Packed Singed Quadword Integer Values (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPS2QQ.SAE ymm k zmm
|
|
// VCVTTPS2QQ.SAE ymm zmm
|
|
//
|
|
// Construct and append a VCVTTPS2QQ.SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTPS2QQ_SAE(ops ...operand.Op) { ctx.VCVTTPS2QQ_SAE(ops...) }
|
|
|
|
// VCVTTPS2QQ_SAE_Z: Convert with Truncation Packed Single Precision Floating-Point Values to Packed Singed Quadword Integer Values (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPS2QQ.SAE.Z ymm k zmm
|
|
//
|
|
// Construct and append a VCVTTPS2QQ.SAE.Z instruction to the active function.
|
|
func (c *Context) VCVTTPS2QQ_SAE_Z(y, k, z operand.Op) {
|
|
c.addinstruction(x86.VCVTTPS2QQ_SAE_Z(y, k, z))
|
|
}
|
|
|
|
// VCVTTPS2QQ_SAE_Z: Convert with Truncation Packed Single Precision Floating-Point Values to Packed Singed Quadword Integer Values (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPS2QQ.SAE.Z ymm k zmm
|
|
//
|
|
// Construct and append a VCVTTPS2QQ.SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTPS2QQ_SAE_Z(y, k, z operand.Op) { ctx.VCVTTPS2QQ_SAE_Z(y, k, z) }
|
|
|
|
// VCVTTPS2QQ_Z: Convert with Truncation Packed Single Precision Floating-Point Values to Packed Singed Quadword Integer Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPS2QQ.Z m128 k ymm
|
|
// VCVTTPS2QQ.Z m64 k xmm
|
|
// VCVTTPS2QQ.Z xmm k xmm
|
|
// VCVTTPS2QQ.Z xmm k ymm
|
|
// VCVTTPS2QQ.Z m256 k zmm
|
|
// VCVTTPS2QQ.Z ymm k zmm
|
|
//
|
|
// Construct and append a VCVTTPS2QQ.Z instruction to the active function.
|
|
func (c *Context) VCVTTPS2QQ_Z(mxy, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VCVTTPS2QQ_Z(mxy, k, xyz))
|
|
}
|
|
|
|
// VCVTTPS2QQ_Z: Convert with Truncation Packed Single Precision Floating-Point Values to Packed Singed Quadword Integer Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPS2QQ.Z m128 k ymm
|
|
// VCVTTPS2QQ.Z m64 k xmm
|
|
// VCVTTPS2QQ.Z xmm k xmm
|
|
// VCVTTPS2QQ.Z xmm k ymm
|
|
// VCVTTPS2QQ.Z m256 k zmm
|
|
// VCVTTPS2QQ.Z ymm k zmm
|
|
//
|
|
// Construct and append a VCVTTPS2QQ.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTPS2QQ_Z(mxy, k, xyz operand.Op) { ctx.VCVTTPS2QQ_Z(mxy, k, xyz) }
|
|
|
|
// VCVTTPS2UDQ: Convert with Truncation Packed Single-Precision Floating-Point Values to Packed Unsigned Doubleword Integer Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPS2UDQ m128 k xmm
|
|
// VCVTTPS2UDQ m128 xmm
|
|
// VCVTTPS2UDQ m256 k ymm
|
|
// VCVTTPS2UDQ m256 ymm
|
|
// VCVTTPS2UDQ xmm k xmm
|
|
// VCVTTPS2UDQ xmm xmm
|
|
// VCVTTPS2UDQ ymm k ymm
|
|
// VCVTTPS2UDQ ymm ymm
|
|
// VCVTTPS2UDQ m512 k zmm
|
|
// VCVTTPS2UDQ m512 zmm
|
|
// VCVTTPS2UDQ zmm k zmm
|
|
// VCVTTPS2UDQ zmm zmm
|
|
//
|
|
// Construct and append a VCVTTPS2UDQ instruction to the active function.
|
|
func (c *Context) VCVTTPS2UDQ(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTTPS2UDQ(ops...))
|
|
}
|
|
|
|
// VCVTTPS2UDQ: Convert with Truncation Packed Single-Precision Floating-Point Values to Packed Unsigned Doubleword Integer Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPS2UDQ m128 k xmm
|
|
// VCVTTPS2UDQ m128 xmm
|
|
// VCVTTPS2UDQ m256 k ymm
|
|
// VCVTTPS2UDQ m256 ymm
|
|
// VCVTTPS2UDQ xmm k xmm
|
|
// VCVTTPS2UDQ xmm xmm
|
|
// VCVTTPS2UDQ ymm k ymm
|
|
// VCVTTPS2UDQ ymm ymm
|
|
// VCVTTPS2UDQ m512 k zmm
|
|
// VCVTTPS2UDQ m512 zmm
|
|
// VCVTTPS2UDQ zmm k zmm
|
|
// VCVTTPS2UDQ zmm zmm
|
|
//
|
|
// Construct and append a VCVTTPS2UDQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTPS2UDQ(ops ...operand.Op) { ctx.VCVTTPS2UDQ(ops...) }
|
|
|
|
// VCVTTPS2UDQ_BCST: Convert with Truncation Packed Single-Precision Floating-Point Values to Packed Unsigned Doubleword Integer Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPS2UDQ.BCST m32 k xmm
|
|
// VCVTTPS2UDQ.BCST m32 k ymm
|
|
// VCVTTPS2UDQ.BCST m32 xmm
|
|
// VCVTTPS2UDQ.BCST m32 ymm
|
|
// VCVTTPS2UDQ.BCST m32 k zmm
|
|
// VCVTTPS2UDQ.BCST m32 zmm
|
|
//
|
|
// Construct and append a VCVTTPS2UDQ.BCST instruction to the active function.
|
|
func (c *Context) VCVTTPS2UDQ_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTTPS2UDQ_BCST(ops...))
|
|
}
|
|
|
|
// VCVTTPS2UDQ_BCST: Convert with Truncation Packed Single-Precision Floating-Point Values to Packed Unsigned Doubleword Integer Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPS2UDQ.BCST m32 k xmm
|
|
// VCVTTPS2UDQ.BCST m32 k ymm
|
|
// VCVTTPS2UDQ.BCST m32 xmm
|
|
// VCVTTPS2UDQ.BCST m32 ymm
|
|
// VCVTTPS2UDQ.BCST m32 k zmm
|
|
// VCVTTPS2UDQ.BCST m32 zmm
|
|
//
|
|
// Construct and append a VCVTTPS2UDQ.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTPS2UDQ_BCST(ops ...operand.Op) { ctx.VCVTTPS2UDQ_BCST(ops...) }
|
|
|
|
// VCVTTPS2UDQ_BCST_Z: Convert with Truncation Packed Single-Precision Floating-Point Values to Packed Unsigned Doubleword Integer Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPS2UDQ.BCST.Z m32 k xmm
|
|
// VCVTTPS2UDQ.BCST.Z m32 k ymm
|
|
// VCVTTPS2UDQ.BCST.Z m32 k zmm
|
|
//
|
|
// Construct and append a VCVTTPS2UDQ.BCST.Z instruction to the active function.
|
|
func (c *Context) VCVTTPS2UDQ_BCST_Z(m, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VCVTTPS2UDQ_BCST_Z(m, k, xyz))
|
|
}
|
|
|
|
// VCVTTPS2UDQ_BCST_Z: Convert with Truncation Packed Single-Precision Floating-Point Values to Packed Unsigned Doubleword Integer Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPS2UDQ.BCST.Z m32 k xmm
|
|
// VCVTTPS2UDQ.BCST.Z m32 k ymm
|
|
// VCVTTPS2UDQ.BCST.Z m32 k zmm
|
|
//
|
|
// Construct and append a VCVTTPS2UDQ.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTPS2UDQ_BCST_Z(m, k, xyz operand.Op) { ctx.VCVTTPS2UDQ_BCST_Z(m, k, xyz) }
|
|
|
|
// VCVTTPS2UDQ_SAE: Convert with Truncation Packed Single-Precision Floating-Point Values to Packed Unsigned Doubleword Integer Values (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPS2UDQ.SAE zmm k zmm
|
|
// VCVTTPS2UDQ.SAE zmm zmm
|
|
//
|
|
// Construct and append a VCVTTPS2UDQ.SAE instruction to the active function.
|
|
func (c *Context) VCVTTPS2UDQ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTTPS2UDQ_SAE(ops...))
|
|
}
|
|
|
|
// VCVTTPS2UDQ_SAE: Convert with Truncation Packed Single-Precision Floating-Point Values to Packed Unsigned Doubleword Integer Values (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPS2UDQ.SAE zmm k zmm
|
|
// VCVTTPS2UDQ.SAE zmm zmm
|
|
//
|
|
// Construct and append a VCVTTPS2UDQ.SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTPS2UDQ_SAE(ops ...operand.Op) { ctx.VCVTTPS2UDQ_SAE(ops...) }
|
|
|
|
// VCVTTPS2UDQ_SAE_Z: Convert with Truncation Packed Single-Precision Floating-Point Values to Packed Unsigned Doubleword Integer Values (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPS2UDQ.SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTTPS2UDQ.SAE.Z instruction to the active function.
|
|
func (c *Context) VCVTTPS2UDQ_SAE_Z(z, k, z1 operand.Op) {
|
|
c.addinstruction(x86.VCVTTPS2UDQ_SAE_Z(z, k, z1))
|
|
}
|
|
|
|
// VCVTTPS2UDQ_SAE_Z: Convert with Truncation Packed Single-Precision Floating-Point Values to Packed Unsigned Doubleword Integer Values (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPS2UDQ.SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTTPS2UDQ.SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTPS2UDQ_SAE_Z(z, k, z1 operand.Op) { ctx.VCVTTPS2UDQ_SAE_Z(z, k, z1) }
|
|
|
|
// VCVTTPS2UDQ_Z: Convert with Truncation Packed Single-Precision Floating-Point Values to Packed Unsigned Doubleword Integer Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPS2UDQ.Z m128 k xmm
|
|
// VCVTTPS2UDQ.Z m256 k ymm
|
|
// VCVTTPS2UDQ.Z xmm k xmm
|
|
// VCVTTPS2UDQ.Z ymm k ymm
|
|
// VCVTTPS2UDQ.Z m512 k zmm
|
|
// VCVTTPS2UDQ.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTTPS2UDQ.Z instruction to the active function.
|
|
func (c *Context) VCVTTPS2UDQ_Z(mxyz, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VCVTTPS2UDQ_Z(mxyz, k, xyz))
|
|
}
|
|
|
|
// VCVTTPS2UDQ_Z: Convert with Truncation Packed Single-Precision Floating-Point Values to Packed Unsigned Doubleword Integer Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPS2UDQ.Z m128 k xmm
|
|
// VCVTTPS2UDQ.Z m256 k ymm
|
|
// VCVTTPS2UDQ.Z xmm k xmm
|
|
// VCVTTPS2UDQ.Z ymm k ymm
|
|
// VCVTTPS2UDQ.Z m512 k zmm
|
|
// VCVTTPS2UDQ.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTTPS2UDQ.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTPS2UDQ_Z(mxyz, k, xyz operand.Op) { ctx.VCVTTPS2UDQ_Z(mxyz, k, xyz) }
|
|
|
|
// VCVTTPS2UQQ: Convert with Truncation Packed Single Precision Floating-Point Values to Packed Unsigned Quadword Integer Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPS2UQQ m128 k ymm
|
|
// VCVTTPS2UQQ m128 ymm
|
|
// VCVTTPS2UQQ m64 k xmm
|
|
// VCVTTPS2UQQ m64 xmm
|
|
// VCVTTPS2UQQ xmm k xmm
|
|
// VCVTTPS2UQQ xmm k ymm
|
|
// VCVTTPS2UQQ xmm xmm
|
|
// VCVTTPS2UQQ xmm ymm
|
|
// VCVTTPS2UQQ m256 k zmm
|
|
// VCVTTPS2UQQ m256 zmm
|
|
// VCVTTPS2UQQ ymm k zmm
|
|
// VCVTTPS2UQQ ymm zmm
|
|
//
|
|
// Construct and append a VCVTTPS2UQQ instruction to the active function.
|
|
func (c *Context) VCVTTPS2UQQ(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTTPS2UQQ(ops...))
|
|
}
|
|
|
|
// VCVTTPS2UQQ: Convert with Truncation Packed Single Precision Floating-Point Values to Packed Unsigned Quadword Integer Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPS2UQQ m128 k ymm
|
|
// VCVTTPS2UQQ m128 ymm
|
|
// VCVTTPS2UQQ m64 k xmm
|
|
// VCVTTPS2UQQ m64 xmm
|
|
// VCVTTPS2UQQ xmm k xmm
|
|
// VCVTTPS2UQQ xmm k ymm
|
|
// VCVTTPS2UQQ xmm xmm
|
|
// VCVTTPS2UQQ xmm ymm
|
|
// VCVTTPS2UQQ m256 k zmm
|
|
// VCVTTPS2UQQ m256 zmm
|
|
// VCVTTPS2UQQ ymm k zmm
|
|
// VCVTTPS2UQQ ymm zmm
|
|
//
|
|
// Construct and append a VCVTTPS2UQQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTPS2UQQ(ops ...operand.Op) { ctx.VCVTTPS2UQQ(ops...) }
|
|
|
|
// VCVTTPS2UQQ_BCST: Convert with Truncation Packed Single Precision Floating-Point Values to Packed Unsigned Quadword Integer Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPS2UQQ.BCST m32 k xmm
|
|
// VCVTTPS2UQQ.BCST m32 k ymm
|
|
// VCVTTPS2UQQ.BCST m32 xmm
|
|
// VCVTTPS2UQQ.BCST m32 ymm
|
|
// VCVTTPS2UQQ.BCST m32 k zmm
|
|
// VCVTTPS2UQQ.BCST m32 zmm
|
|
//
|
|
// Construct and append a VCVTTPS2UQQ.BCST instruction to the active function.
|
|
func (c *Context) VCVTTPS2UQQ_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTTPS2UQQ_BCST(ops...))
|
|
}
|
|
|
|
// VCVTTPS2UQQ_BCST: Convert with Truncation Packed Single Precision Floating-Point Values to Packed Unsigned Quadword Integer Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPS2UQQ.BCST m32 k xmm
|
|
// VCVTTPS2UQQ.BCST m32 k ymm
|
|
// VCVTTPS2UQQ.BCST m32 xmm
|
|
// VCVTTPS2UQQ.BCST m32 ymm
|
|
// VCVTTPS2UQQ.BCST m32 k zmm
|
|
// VCVTTPS2UQQ.BCST m32 zmm
|
|
//
|
|
// Construct and append a VCVTTPS2UQQ.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTPS2UQQ_BCST(ops ...operand.Op) { ctx.VCVTTPS2UQQ_BCST(ops...) }
|
|
|
|
// VCVTTPS2UQQ_BCST_Z: Convert with Truncation Packed Single Precision Floating-Point Values to Packed Unsigned Quadword Integer Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPS2UQQ.BCST.Z m32 k xmm
|
|
// VCVTTPS2UQQ.BCST.Z m32 k ymm
|
|
// VCVTTPS2UQQ.BCST.Z m32 k zmm
|
|
//
|
|
// Construct and append a VCVTTPS2UQQ.BCST.Z instruction to the active function.
|
|
func (c *Context) VCVTTPS2UQQ_BCST_Z(m, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VCVTTPS2UQQ_BCST_Z(m, k, xyz))
|
|
}
|
|
|
|
// VCVTTPS2UQQ_BCST_Z: Convert with Truncation Packed Single Precision Floating-Point Values to Packed Unsigned Quadword Integer Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPS2UQQ.BCST.Z m32 k xmm
|
|
// VCVTTPS2UQQ.BCST.Z m32 k ymm
|
|
// VCVTTPS2UQQ.BCST.Z m32 k zmm
|
|
//
|
|
// Construct and append a VCVTTPS2UQQ.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTPS2UQQ_BCST_Z(m, k, xyz operand.Op) { ctx.VCVTTPS2UQQ_BCST_Z(m, k, xyz) }
|
|
|
|
// VCVTTPS2UQQ_SAE: Convert with Truncation Packed Single Precision Floating-Point Values to Packed Unsigned Quadword Integer Values (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPS2UQQ.SAE ymm k zmm
|
|
// VCVTTPS2UQQ.SAE ymm zmm
|
|
//
|
|
// Construct and append a VCVTTPS2UQQ.SAE instruction to the active function.
|
|
func (c *Context) VCVTTPS2UQQ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTTPS2UQQ_SAE(ops...))
|
|
}
|
|
|
|
// VCVTTPS2UQQ_SAE: Convert with Truncation Packed Single Precision Floating-Point Values to Packed Unsigned Quadword Integer Values (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPS2UQQ.SAE ymm k zmm
|
|
// VCVTTPS2UQQ.SAE ymm zmm
|
|
//
|
|
// Construct and append a VCVTTPS2UQQ.SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTPS2UQQ_SAE(ops ...operand.Op) { ctx.VCVTTPS2UQQ_SAE(ops...) }
|
|
|
|
// VCVTTPS2UQQ_SAE_Z: Convert with Truncation Packed Single Precision Floating-Point Values to Packed Unsigned Quadword Integer Values (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPS2UQQ.SAE.Z ymm k zmm
|
|
//
|
|
// Construct and append a VCVTTPS2UQQ.SAE.Z instruction to the active function.
|
|
func (c *Context) VCVTTPS2UQQ_SAE_Z(y, k, z operand.Op) {
|
|
c.addinstruction(x86.VCVTTPS2UQQ_SAE_Z(y, k, z))
|
|
}
|
|
|
|
// VCVTTPS2UQQ_SAE_Z: Convert with Truncation Packed Single Precision Floating-Point Values to Packed Unsigned Quadword Integer Values (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPS2UQQ.SAE.Z ymm k zmm
|
|
//
|
|
// Construct and append a VCVTTPS2UQQ.SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTPS2UQQ_SAE_Z(y, k, z operand.Op) { ctx.VCVTTPS2UQQ_SAE_Z(y, k, z) }
|
|
|
|
// VCVTTPS2UQQ_Z: Convert with Truncation Packed Single Precision Floating-Point Values to Packed Unsigned Quadword Integer Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPS2UQQ.Z m128 k ymm
|
|
// VCVTTPS2UQQ.Z m64 k xmm
|
|
// VCVTTPS2UQQ.Z xmm k xmm
|
|
// VCVTTPS2UQQ.Z xmm k ymm
|
|
// VCVTTPS2UQQ.Z m256 k zmm
|
|
// VCVTTPS2UQQ.Z ymm k zmm
|
|
//
|
|
// Construct and append a VCVTTPS2UQQ.Z instruction to the active function.
|
|
func (c *Context) VCVTTPS2UQQ_Z(mxy, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VCVTTPS2UQQ_Z(mxy, k, xyz))
|
|
}
|
|
|
|
// VCVTTPS2UQQ_Z: Convert with Truncation Packed Single Precision Floating-Point Values to Packed Unsigned Quadword Integer Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTPS2UQQ.Z m128 k ymm
|
|
// VCVTTPS2UQQ.Z m64 k xmm
|
|
// VCVTTPS2UQQ.Z xmm k xmm
|
|
// VCVTTPS2UQQ.Z xmm k ymm
|
|
// VCVTTPS2UQQ.Z m256 k zmm
|
|
// VCVTTPS2UQQ.Z ymm k zmm
|
|
//
|
|
// Construct and append a VCVTTPS2UQQ.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTPS2UQQ_Z(mxy, k, xyz operand.Op) { ctx.VCVTTPS2UQQ_Z(mxy, k, xyz) }
|
|
|
|
// VCVTTSD2SI: Convert with Truncation Scalar Double-Precision FP Value to Signed Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTSD2SI m64 r32
|
|
// VCVTTSD2SI xmm r32
|
|
//
|
|
// Construct and append a VCVTTSD2SI instruction to the active function.
|
|
func (c *Context) VCVTTSD2SI(mx, r operand.Op) {
|
|
c.addinstruction(x86.VCVTTSD2SI(mx, r))
|
|
}
|
|
|
|
// VCVTTSD2SI: Convert with Truncation Scalar Double-Precision FP Value to Signed Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTSD2SI m64 r32
|
|
// VCVTTSD2SI xmm r32
|
|
//
|
|
// Construct and append a VCVTTSD2SI instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTSD2SI(mx, r operand.Op) { ctx.VCVTTSD2SI(mx, r) }
|
|
|
|
// VCVTTSD2SIQ: Convert with Truncation Scalar Double-Precision FP Value to Signed Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTSD2SIQ m64 r64
|
|
// VCVTTSD2SIQ xmm r64
|
|
//
|
|
// Construct and append a VCVTTSD2SIQ instruction to the active function.
|
|
func (c *Context) VCVTTSD2SIQ(mx, r operand.Op) {
|
|
c.addinstruction(x86.VCVTTSD2SIQ(mx, r))
|
|
}
|
|
|
|
// VCVTTSD2SIQ: Convert with Truncation Scalar Double-Precision FP Value to Signed Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTSD2SIQ m64 r64
|
|
// VCVTTSD2SIQ xmm r64
|
|
//
|
|
// Construct and append a VCVTTSD2SIQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTSD2SIQ(mx, r operand.Op) { ctx.VCVTTSD2SIQ(mx, r) }
|
|
|
|
// VCVTTSD2SIQ_SAE: Convert with Truncation Scalar Double-Precision FP Value to Signed Integer (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTSD2SIQ.SAE xmm r64
|
|
//
|
|
// Construct and append a VCVTTSD2SIQ.SAE instruction to the active function.
|
|
func (c *Context) VCVTTSD2SIQ_SAE(x, r operand.Op) {
|
|
c.addinstruction(x86.VCVTTSD2SIQ_SAE(x, r))
|
|
}
|
|
|
|
// VCVTTSD2SIQ_SAE: Convert with Truncation Scalar Double-Precision FP Value to Signed Integer (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTSD2SIQ.SAE xmm r64
|
|
//
|
|
// Construct and append a VCVTTSD2SIQ.SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTSD2SIQ_SAE(x, r operand.Op) { ctx.VCVTTSD2SIQ_SAE(x, r) }
|
|
|
|
// VCVTTSD2SI_SAE: Convert with Truncation Scalar Double-Precision FP Value to Signed Integer (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTSD2SI.SAE xmm r32
|
|
//
|
|
// Construct and append a VCVTTSD2SI.SAE instruction to the active function.
|
|
func (c *Context) VCVTTSD2SI_SAE(x, r operand.Op) {
|
|
c.addinstruction(x86.VCVTTSD2SI_SAE(x, r))
|
|
}
|
|
|
|
// VCVTTSD2SI_SAE: Convert with Truncation Scalar Double-Precision FP Value to Signed Integer (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTSD2SI.SAE xmm r32
|
|
//
|
|
// Construct and append a VCVTTSD2SI.SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTSD2SI_SAE(x, r operand.Op) { ctx.VCVTTSD2SI_SAE(x, r) }
|
|
|
|
// VCVTTSD2USIL: Convert with Truncation Scalar Double-Precision Floating-Point Value to Unsigned Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTSD2USIL m64 r32
|
|
// VCVTTSD2USIL xmm r32
|
|
//
|
|
// Construct and append a VCVTTSD2USIL instruction to the active function.
|
|
func (c *Context) VCVTTSD2USIL(mx, r operand.Op) {
|
|
c.addinstruction(x86.VCVTTSD2USIL(mx, r))
|
|
}
|
|
|
|
// VCVTTSD2USIL: Convert with Truncation Scalar Double-Precision Floating-Point Value to Unsigned Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTSD2USIL m64 r32
|
|
// VCVTTSD2USIL xmm r32
|
|
//
|
|
// Construct and append a VCVTTSD2USIL instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTSD2USIL(mx, r operand.Op) { ctx.VCVTTSD2USIL(mx, r) }
|
|
|
|
// VCVTTSD2USIL_SAE: Convert with Truncation Scalar Double-Precision Floating-Point Value to Unsigned Integer (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTSD2USIL.SAE xmm r32
|
|
//
|
|
// Construct and append a VCVTTSD2USIL.SAE instruction to the active function.
|
|
func (c *Context) VCVTTSD2USIL_SAE(x, r operand.Op) {
|
|
c.addinstruction(x86.VCVTTSD2USIL_SAE(x, r))
|
|
}
|
|
|
|
// VCVTTSD2USIL_SAE: Convert with Truncation Scalar Double-Precision Floating-Point Value to Unsigned Integer (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTSD2USIL.SAE xmm r32
|
|
//
|
|
// Construct and append a VCVTTSD2USIL.SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTSD2USIL_SAE(x, r operand.Op) { ctx.VCVTTSD2USIL_SAE(x, r) }
|
|
|
|
// VCVTTSD2USIQ: Convert with Truncation Scalar Double-Precision Floating-Point Value to Unsigned Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTSD2USIQ m64 r64
|
|
// VCVTTSD2USIQ xmm r64
|
|
//
|
|
// Construct and append a VCVTTSD2USIQ instruction to the active function.
|
|
func (c *Context) VCVTTSD2USIQ(mx, r operand.Op) {
|
|
c.addinstruction(x86.VCVTTSD2USIQ(mx, r))
|
|
}
|
|
|
|
// VCVTTSD2USIQ: Convert with Truncation Scalar Double-Precision Floating-Point Value to Unsigned Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTSD2USIQ m64 r64
|
|
// VCVTTSD2USIQ xmm r64
|
|
//
|
|
// Construct and append a VCVTTSD2USIQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTSD2USIQ(mx, r operand.Op) { ctx.VCVTTSD2USIQ(mx, r) }
|
|
|
|
// VCVTTSD2USIQ_SAE: Convert with Truncation Scalar Double-Precision Floating-Point Value to Unsigned Integer (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTSD2USIQ.SAE xmm r64
|
|
//
|
|
// Construct and append a VCVTTSD2USIQ.SAE instruction to the active function.
|
|
func (c *Context) VCVTTSD2USIQ_SAE(x, r operand.Op) {
|
|
c.addinstruction(x86.VCVTTSD2USIQ_SAE(x, r))
|
|
}
|
|
|
|
// VCVTTSD2USIQ_SAE: Convert with Truncation Scalar Double-Precision Floating-Point Value to Unsigned Integer (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTSD2USIQ.SAE xmm r64
|
|
//
|
|
// Construct and append a VCVTTSD2USIQ.SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTSD2USIQ_SAE(x, r operand.Op) { ctx.VCVTTSD2USIQ_SAE(x, r) }
|
|
|
|
// VCVTTSS2SI: Convert with Truncation Scalar Single-Precision FP Value to Dword Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTSS2SI m32 r32
|
|
// VCVTTSS2SI xmm r32
|
|
//
|
|
// Construct and append a VCVTTSS2SI instruction to the active function.
|
|
func (c *Context) VCVTTSS2SI(mx, r operand.Op) {
|
|
c.addinstruction(x86.VCVTTSS2SI(mx, r))
|
|
}
|
|
|
|
// VCVTTSS2SI: Convert with Truncation Scalar Single-Precision FP Value to Dword Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTSS2SI m32 r32
|
|
// VCVTTSS2SI xmm r32
|
|
//
|
|
// Construct and append a VCVTTSS2SI instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTSS2SI(mx, r operand.Op) { ctx.VCVTTSS2SI(mx, r) }
|
|
|
|
// VCVTTSS2SIQ: Convert with Truncation Scalar Single-Precision FP Value to Dword Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTSS2SIQ m32 r64
|
|
// VCVTTSS2SIQ xmm r64
|
|
//
|
|
// Construct and append a VCVTTSS2SIQ instruction to the active function.
|
|
func (c *Context) VCVTTSS2SIQ(mx, r operand.Op) {
|
|
c.addinstruction(x86.VCVTTSS2SIQ(mx, r))
|
|
}
|
|
|
|
// VCVTTSS2SIQ: Convert with Truncation Scalar Single-Precision FP Value to Dword Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTSS2SIQ m32 r64
|
|
// VCVTTSS2SIQ xmm r64
|
|
//
|
|
// Construct and append a VCVTTSS2SIQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTSS2SIQ(mx, r operand.Op) { ctx.VCVTTSS2SIQ(mx, r) }
|
|
|
|
// VCVTTSS2SIQ_SAE: Convert with Truncation Scalar Single-Precision FP Value to Dword Integer (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTSS2SIQ.SAE xmm r64
|
|
//
|
|
// Construct and append a VCVTTSS2SIQ.SAE instruction to the active function.
|
|
func (c *Context) VCVTTSS2SIQ_SAE(x, r operand.Op) {
|
|
c.addinstruction(x86.VCVTTSS2SIQ_SAE(x, r))
|
|
}
|
|
|
|
// VCVTTSS2SIQ_SAE: Convert with Truncation Scalar Single-Precision FP Value to Dword Integer (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTSS2SIQ.SAE xmm r64
|
|
//
|
|
// Construct and append a VCVTTSS2SIQ.SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTSS2SIQ_SAE(x, r operand.Op) { ctx.VCVTTSS2SIQ_SAE(x, r) }
|
|
|
|
// VCVTTSS2SI_SAE: Convert with Truncation Scalar Single-Precision FP Value to Dword Integer (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTSS2SI.SAE xmm r32
|
|
//
|
|
// Construct and append a VCVTTSS2SI.SAE instruction to the active function.
|
|
func (c *Context) VCVTTSS2SI_SAE(x, r operand.Op) {
|
|
c.addinstruction(x86.VCVTTSS2SI_SAE(x, r))
|
|
}
|
|
|
|
// VCVTTSS2SI_SAE: Convert with Truncation Scalar Single-Precision FP Value to Dword Integer (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTSS2SI.SAE xmm r32
|
|
//
|
|
// Construct and append a VCVTTSS2SI.SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTSS2SI_SAE(x, r operand.Op) { ctx.VCVTTSS2SI_SAE(x, r) }
|
|
|
|
// VCVTTSS2USIL: Convert with Truncation Scalar Single-Precision Floating-Point Value to Unsigned Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTSS2USIL m32 r32
|
|
// VCVTTSS2USIL xmm r32
|
|
//
|
|
// Construct and append a VCVTTSS2USIL instruction to the active function.
|
|
func (c *Context) VCVTTSS2USIL(mx, r operand.Op) {
|
|
c.addinstruction(x86.VCVTTSS2USIL(mx, r))
|
|
}
|
|
|
|
// VCVTTSS2USIL: Convert with Truncation Scalar Single-Precision Floating-Point Value to Unsigned Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTSS2USIL m32 r32
|
|
// VCVTTSS2USIL xmm r32
|
|
//
|
|
// Construct and append a VCVTTSS2USIL instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTSS2USIL(mx, r operand.Op) { ctx.VCVTTSS2USIL(mx, r) }
|
|
|
|
// VCVTTSS2USIL_SAE: Convert with Truncation Scalar Single-Precision Floating-Point Value to Unsigned Integer (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTSS2USIL.SAE xmm r32
|
|
//
|
|
// Construct and append a VCVTTSS2USIL.SAE instruction to the active function.
|
|
func (c *Context) VCVTTSS2USIL_SAE(x, r operand.Op) {
|
|
c.addinstruction(x86.VCVTTSS2USIL_SAE(x, r))
|
|
}
|
|
|
|
// VCVTTSS2USIL_SAE: Convert with Truncation Scalar Single-Precision Floating-Point Value to Unsigned Integer (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTSS2USIL.SAE xmm r32
|
|
//
|
|
// Construct and append a VCVTTSS2USIL.SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTSS2USIL_SAE(x, r operand.Op) { ctx.VCVTTSS2USIL_SAE(x, r) }
|
|
|
|
// VCVTTSS2USIQ: Convert with Truncation Scalar Single-Precision Floating-Point Value to Unsigned Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTSS2USIQ m32 r64
|
|
// VCVTTSS2USIQ xmm r64
|
|
//
|
|
// Construct and append a VCVTTSS2USIQ instruction to the active function.
|
|
func (c *Context) VCVTTSS2USIQ(mx, r operand.Op) {
|
|
c.addinstruction(x86.VCVTTSS2USIQ(mx, r))
|
|
}
|
|
|
|
// VCVTTSS2USIQ: Convert with Truncation Scalar Single-Precision Floating-Point Value to Unsigned Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTSS2USIQ m32 r64
|
|
// VCVTTSS2USIQ xmm r64
|
|
//
|
|
// Construct and append a VCVTTSS2USIQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTSS2USIQ(mx, r operand.Op) { ctx.VCVTTSS2USIQ(mx, r) }
|
|
|
|
// VCVTTSS2USIQ_SAE: Convert with Truncation Scalar Single-Precision Floating-Point Value to Unsigned Integer (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTSS2USIQ.SAE xmm r64
|
|
//
|
|
// Construct and append a VCVTTSS2USIQ.SAE instruction to the active function.
|
|
func (c *Context) VCVTTSS2USIQ_SAE(x, r operand.Op) {
|
|
c.addinstruction(x86.VCVTTSS2USIQ_SAE(x, r))
|
|
}
|
|
|
|
// VCVTTSS2USIQ_SAE: Convert with Truncation Scalar Single-Precision Floating-Point Value to Unsigned Integer (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTTSS2USIQ.SAE xmm r64
|
|
//
|
|
// Construct and append a VCVTTSS2USIQ.SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTTSS2USIQ_SAE(x, r operand.Op) { ctx.VCVTTSS2USIQ_SAE(x, r) }
|
|
|
|
// VCVTUDQ2PD: Convert Packed Unsigned Doubleword Integers to Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUDQ2PD m128 k ymm
|
|
// VCVTUDQ2PD m128 ymm
|
|
// VCVTUDQ2PD m64 k xmm
|
|
// VCVTUDQ2PD m64 xmm
|
|
// VCVTUDQ2PD xmm k xmm
|
|
// VCVTUDQ2PD xmm k ymm
|
|
// VCVTUDQ2PD xmm xmm
|
|
// VCVTUDQ2PD xmm ymm
|
|
// VCVTUDQ2PD m256 k zmm
|
|
// VCVTUDQ2PD m256 zmm
|
|
// VCVTUDQ2PD ymm k zmm
|
|
// VCVTUDQ2PD ymm zmm
|
|
//
|
|
// Construct and append a VCVTUDQ2PD instruction to the active function.
|
|
func (c *Context) VCVTUDQ2PD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTUDQ2PD(ops...))
|
|
}
|
|
|
|
// VCVTUDQ2PD: Convert Packed Unsigned Doubleword Integers to Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUDQ2PD m128 k ymm
|
|
// VCVTUDQ2PD m128 ymm
|
|
// VCVTUDQ2PD m64 k xmm
|
|
// VCVTUDQ2PD m64 xmm
|
|
// VCVTUDQ2PD xmm k xmm
|
|
// VCVTUDQ2PD xmm k ymm
|
|
// VCVTUDQ2PD xmm xmm
|
|
// VCVTUDQ2PD xmm ymm
|
|
// VCVTUDQ2PD m256 k zmm
|
|
// VCVTUDQ2PD m256 zmm
|
|
// VCVTUDQ2PD ymm k zmm
|
|
// VCVTUDQ2PD ymm zmm
|
|
//
|
|
// Construct and append a VCVTUDQ2PD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTUDQ2PD(ops ...operand.Op) { ctx.VCVTUDQ2PD(ops...) }
|
|
|
|
// VCVTUDQ2PD_BCST: Convert Packed Unsigned Doubleword Integers to Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUDQ2PD.BCST m32 k xmm
|
|
// VCVTUDQ2PD.BCST m32 k ymm
|
|
// VCVTUDQ2PD.BCST m32 xmm
|
|
// VCVTUDQ2PD.BCST m32 ymm
|
|
// VCVTUDQ2PD.BCST m32 k zmm
|
|
// VCVTUDQ2PD.BCST m32 zmm
|
|
//
|
|
// Construct and append a VCVTUDQ2PD.BCST instruction to the active function.
|
|
func (c *Context) VCVTUDQ2PD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTUDQ2PD_BCST(ops...))
|
|
}
|
|
|
|
// VCVTUDQ2PD_BCST: Convert Packed Unsigned Doubleword Integers to Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUDQ2PD.BCST m32 k xmm
|
|
// VCVTUDQ2PD.BCST m32 k ymm
|
|
// VCVTUDQ2PD.BCST m32 xmm
|
|
// VCVTUDQ2PD.BCST m32 ymm
|
|
// VCVTUDQ2PD.BCST m32 k zmm
|
|
// VCVTUDQ2PD.BCST m32 zmm
|
|
//
|
|
// Construct and append a VCVTUDQ2PD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTUDQ2PD_BCST(ops ...operand.Op) { ctx.VCVTUDQ2PD_BCST(ops...) }
|
|
|
|
// VCVTUDQ2PD_BCST_Z: Convert Packed Unsigned Doubleword Integers to Packed Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUDQ2PD.BCST.Z m32 k xmm
|
|
// VCVTUDQ2PD.BCST.Z m32 k ymm
|
|
// VCVTUDQ2PD.BCST.Z m32 k zmm
|
|
//
|
|
// Construct and append a VCVTUDQ2PD.BCST.Z instruction to the active function.
|
|
func (c *Context) VCVTUDQ2PD_BCST_Z(m, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VCVTUDQ2PD_BCST_Z(m, k, xyz))
|
|
}
|
|
|
|
// VCVTUDQ2PD_BCST_Z: Convert Packed Unsigned Doubleword Integers to Packed Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUDQ2PD.BCST.Z m32 k xmm
|
|
// VCVTUDQ2PD.BCST.Z m32 k ymm
|
|
// VCVTUDQ2PD.BCST.Z m32 k zmm
|
|
//
|
|
// Construct and append a VCVTUDQ2PD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTUDQ2PD_BCST_Z(m, k, xyz operand.Op) { ctx.VCVTUDQ2PD_BCST_Z(m, k, xyz) }
|
|
|
|
// VCVTUDQ2PD_Z: Convert Packed Unsigned Doubleword Integers to Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUDQ2PD.Z m128 k ymm
|
|
// VCVTUDQ2PD.Z m64 k xmm
|
|
// VCVTUDQ2PD.Z xmm k xmm
|
|
// VCVTUDQ2PD.Z xmm k ymm
|
|
// VCVTUDQ2PD.Z m256 k zmm
|
|
// VCVTUDQ2PD.Z ymm k zmm
|
|
//
|
|
// Construct and append a VCVTUDQ2PD.Z instruction to the active function.
|
|
func (c *Context) VCVTUDQ2PD_Z(mxy, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VCVTUDQ2PD_Z(mxy, k, xyz))
|
|
}
|
|
|
|
// VCVTUDQ2PD_Z: Convert Packed Unsigned Doubleword Integers to Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUDQ2PD.Z m128 k ymm
|
|
// VCVTUDQ2PD.Z m64 k xmm
|
|
// VCVTUDQ2PD.Z xmm k xmm
|
|
// VCVTUDQ2PD.Z xmm k ymm
|
|
// VCVTUDQ2PD.Z m256 k zmm
|
|
// VCVTUDQ2PD.Z ymm k zmm
|
|
//
|
|
// Construct and append a VCVTUDQ2PD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTUDQ2PD_Z(mxy, k, xyz operand.Op) { ctx.VCVTUDQ2PD_Z(mxy, k, xyz) }
|
|
|
|
// VCVTUDQ2PS: Convert Packed Unsigned Doubleword Integers to Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUDQ2PS m128 k xmm
|
|
// VCVTUDQ2PS m128 xmm
|
|
// VCVTUDQ2PS m256 k ymm
|
|
// VCVTUDQ2PS m256 ymm
|
|
// VCVTUDQ2PS xmm k xmm
|
|
// VCVTUDQ2PS xmm xmm
|
|
// VCVTUDQ2PS ymm k ymm
|
|
// VCVTUDQ2PS ymm ymm
|
|
// VCVTUDQ2PS m512 k zmm
|
|
// VCVTUDQ2PS m512 zmm
|
|
// VCVTUDQ2PS zmm k zmm
|
|
// VCVTUDQ2PS zmm zmm
|
|
//
|
|
// Construct and append a VCVTUDQ2PS instruction to the active function.
|
|
func (c *Context) VCVTUDQ2PS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTUDQ2PS(ops...))
|
|
}
|
|
|
|
// VCVTUDQ2PS: Convert Packed Unsigned Doubleword Integers to Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUDQ2PS m128 k xmm
|
|
// VCVTUDQ2PS m128 xmm
|
|
// VCVTUDQ2PS m256 k ymm
|
|
// VCVTUDQ2PS m256 ymm
|
|
// VCVTUDQ2PS xmm k xmm
|
|
// VCVTUDQ2PS xmm xmm
|
|
// VCVTUDQ2PS ymm k ymm
|
|
// VCVTUDQ2PS ymm ymm
|
|
// VCVTUDQ2PS m512 k zmm
|
|
// VCVTUDQ2PS m512 zmm
|
|
// VCVTUDQ2PS zmm k zmm
|
|
// VCVTUDQ2PS zmm zmm
|
|
//
|
|
// Construct and append a VCVTUDQ2PS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTUDQ2PS(ops ...operand.Op) { ctx.VCVTUDQ2PS(ops...) }
|
|
|
|
// VCVTUDQ2PS_BCST: Convert Packed Unsigned Doubleword Integers to Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUDQ2PS.BCST m32 k xmm
|
|
// VCVTUDQ2PS.BCST m32 k ymm
|
|
// VCVTUDQ2PS.BCST m32 xmm
|
|
// VCVTUDQ2PS.BCST m32 ymm
|
|
// VCVTUDQ2PS.BCST m32 k zmm
|
|
// VCVTUDQ2PS.BCST m32 zmm
|
|
//
|
|
// Construct and append a VCVTUDQ2PS.BCST instruction to the active function.
|
|
func (c *Context) VCVTUDQ2PS_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTUDQ2PS_BCST(ops...))
|
|
}
|
|
|
|
// VCVTUDQ2PS_BCST: Convert Packed Unsigned Doubleword Integers to Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUDQ2PS.BCST m32 k xmm
|
|
// VCVTUDQ2PS.BCST m32 k ymm
|
|
// VCVTUDQ2PS.BCST m32 xmm
|
|
// VCVTUDQ2PS.BCST m32 ymm
|
|
// VCVTUDQ2PS.BCST m32 k zmm
|
|
// VCVTUDQ2PS.BCST m32 zmm
|
|
//
|
|
// Construct and append a VCVTUDQ2PS.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTUDQ2PS_BCST(ops ...operand.Op) { ctx.VCVTUDQ2PS_BCST(ops...) }
|
|
|
|
// VCVTUDQ2PS_BCST_Z: Convert Packed Unsigned Doubleword Integers to Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUDQ2PS.BCST.Z m32 k xmm
|
|
// VCVTUDQ2PS.BCST.Z m32 k ymm
|
|
// VCVTUDQ2PS.BCST.Z m32 k zmm
|
|
//
|
|
// Construct and append a VCVTUDQ2PS.BCST.Z instruction to the active function.
|
|
func (c *Context) VCVTUDQ2PS_BCST_Z(m, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VCVTUDQ2PS_BCST_Z(m, k, xyz))
|
|
}
|
|
|
|
// VCVTUDQ2PS_BCST_Z: Convert Packed Unsigned Doubleword Integers to Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUDQ2PS.BCST.Z m32 k xmm
|
|
// VCVTUDQ2PS.BCST.Z m32 k ymm
|
|
// VCVTUDQ2PS.BCST.Z m32 k zmm
|
|
//
|
|
// Construct and append a VCVTUDQ2PS.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTUDQ2PS_BCST_Z(m, k, xyz operand.Op) { ctx.VCVTUDQ2PS_BCST_Z(m, k, xyz) }
|
|
|
|
// VCVTUDQ2PS_RD_SAE: Convert Packed Unsigned Doubleword Integers to Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUDQ2PS.RD_SAE zmm k zmm
|
|
// VCVTUDQ2PS.RD_SAE zmm zmm
|
|
//
|
|
// Construct and append a VCVTUDQ2PS.RD_SAE instruction to the active function.
|
|
func (c *Context) VCVTUDQ2PS_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTUDQ2PS_RD_SAE(ops...))
|
|
}
|
|
|
|
// VCVTUDQ2PS_RD_SAE: Convert Packed Unsigned Doubleword Integers to Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUDQ2PS.RD_SAE zmm k zmm
|
|
// VCVTUDQ2PS.RD_SAE zmm zmm
|
|
//
|
|
// Construct and append a VCVTUDQ2PS.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTUDQ2PS_RD_SAE(ops ...operand.Op) { ctx.VCVTUDQ2PS_RD_SAE(ops...) }
|
|
|
|
// VCVTUDQ2PS_RD_SAE_Z: Convert Packed Unsigned Doubleword Integers to Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUDQ2PS.RD_SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTUDQ2PS.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VCVTUDQ2PS_RD_SAE_Z(z, k, z1 operand.Op) {
|
|
c.addinstruction(x86.VCVTUDQ2PS_RD_SAE_Z(z, k, z1))
|
|
}
|
|
|
|
// VCVTUDQ2PS_RD_SAE_Z: Convert Packed Unsigned Doubleword Integers to Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUDQ2PS.RD_SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTUDQ2PS.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTUDQ2PS_RD_SAE_Z(z, k, z1 operand.Op) { ctx.VCVTUDQ2PS_RD_SAE_Z(z, k, z1) }
|
|
|
|
// VCVTUDQ2PS_RN_SAE: Convert Packed Unsigned Doubleword Integers to Packed Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUDQ2PS.RN_SAE zmm k zmm
|
|
// VCVTUDQ2PS.RN_SAE zmm zmm
|
|
//
|
|
// Construct and append a VCVTUDQ2PS.RN_SAE instruction to the active function.
|
|
func (c *Context) VCVTUDQ2PS_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTUDQ2PS_RN_SAE(ops...))
|
|
}
|
|
|
|
// VCVTUDQ2PS_RN_SAE: Convert Packed Unsigned Doubleword Integers to Packed Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUDQ2PS.RN_SAE zmm k zmm
|
|
// VCVTUDQ2PS.RN_SAE zmm zmm
|
|
//
|
|
// Construct and append a VCVTUDQ2PS.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTUDQ2PS_RN_SAE(ops ...operand.Op) { ctx.VCVTUDQ2PS_RN_SAE(ops...) }
|
|
|
|
// VCVTUDQ2PS_RN_SAE_Z: Convert Packed Unsigned Doubleword Integers to Packed Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUDQ2PS.RN_SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTUDQ2PS.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VCVTUDQ2PS_RN_SAE_Z(z, k, z1 operand.Op) {
|
|
c.addinstruction(x86.VCVTUDQ2PS_RN_SAE_Z(z, k, z1))
|
|
}
|
|
|
|
// VCVTUDQ2PS_RN_SAE_Z: Convert Packed Unsigned Doubleword Integers to Packed Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUDQ2PS.RN_SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTUDQ2PS.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTUDQ2PS_RN_SAE_Z(z, k, z1 operand.Op) { ctx.VCVTUDQ2PS_RN_SAE_Z(z, k, z1) }
|
|
|
|
// VCVTUDQ2PS_RU_SAE: Convert Packed Unsigned Doubleword Integers to Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUDQ2PS.RU_SAE zmm k zmm
|
|
// VCVTUDQ2PS.RU_SAE zmm zmm
|
|
//
|
|
// Construct and append a VCVTUDQ2PS.RU_SAE instruction to the active function.
|
|
func (c *Context) VCVTUDQ2PS_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTUDQ2PS_RU_SAE(ops...))
|
|
}
|
|
|
|
// VCVTUDQ2PS_RU_SAE: Convert Packed Unsigned Doubleword Integers to Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUDQ2PS.RU_SAE zmm k zmm
|
|
// VCVTUDQ2PS.RU_SAE zmm zmm
|
|
//
|
|
// Construct and append a VCVTUDQ2PS.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTUDQ2PS_RU_SAE(ops ...operand.Op) { ctx.VCVTUDQ2PS_RU_SAE(ops...) }
|
|
|
|
// VCVTUDQ2PS_RU_SAE_Z: Convert Packed Unsigned Doubleword Integers to Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUDQ2PS.RU_SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTUDQ2PS.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VCVTUDQ2PS_RU_SAE_Z(z, k, z1 operand.Op) {
|
|
c.addinstruction(x86.VCVTUDQ2PS_RU_SAE_Z(z, k, z1))
|
|
}
|
|
|
|
// VCVTUDQ2PS_RU_SAE_Z: Convert Packed Unsigned Doubleword Integers to Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUDQ2PS.RU_SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTUDQ2PS.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTUDQ2PS_RU_SAE_Z(z, k, z1 operand.Op) { ctx.VCVTUDQ2PS_RU_SAE_Z(z, k, z1) }
|
|
|
|
// VCVTUDQ2PS_RZ_SAE: Convert Packed Unsigned Doubleword Integers to Packed Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUDQ2PS.RZ_SAE zmm k zmm
|
|
// VCVTUDQ2PS.RZ_SAE zmm zmm
|
|
//
|
|
// Construct and append a VCVTUDQ2PS.RZ_SAE instruction to the active function.
|
|
func (c *Context) VCVTUDQ2PS_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTUDQ2PS_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VCVTUDQ2PS_RZ_SAE: Convert Packed Unsigned Doubleword Integers to Packed Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUDQ2PS.RZ_SAE zmm k zmm
|
|
// VCVTUDQ2PS.RZ_SAE zmm zmm
|
|
//
|
|
// Construct and append a VCVTUDQ2PS.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTUDQ2PS_RZ_SAE(ops ...operand.Op) { ctx.VCVTUDQ2PS_RZ_SAE(ops...) }
|
|
|
|
// VCVTUDQ2PS_RZ_SAE_Z: Convert Packed Unsigned Doubleword Integers to Packed Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUDQ2PS.RZ_SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTUDQ2PS.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VCVTUDQ2PS_RZ_SAE_Z(z, k, z1 operand.Op) {
|
|
c.addinstruction(x86.VCVTUDQ2PS_RZ_SAE_Z(z, k, z1))
|
|
}
|
|
|
|
// VCVTUDQ2PS_RZ_SAE_Z: Convert Packed Unsigned Doubleword Integers to Packed Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUDQ2PS.RZ_SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTUDQ2PS.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTUDQ2PS_RZ_SAE_Z(z, k, z1 operand.Op) { ctx.VCVTUDQ2PS_RZ_SAE_Z(z, k, z1) }
|
|
|
|
// VCVTUDQ2PS_Z: Convert Packed Unsigned Doubleword Integers to Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUDQ2PS.Z m128 k xmm
|
|
// VCVTUDQ2PS.Z m256 k ymm
|
|
// VCVTUDQ2PS.Z xmm k xmm
|
|
// VCVTUDQ2PS.Z ymm k ymm
|
|
// VCVTUDQ2PS.Z m512 k zmm
|
|
// VCVTUDQ2PS.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTUDQ2PS.Z instruction to the active function.
|
|
func (c *Context) VCVTUDQ2PS_Z(mxyz, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VCVTUDQ2PS_Z(mxyz, k, xyz))
|
|
}
|
|
|
|
// VCVTUDQ2PS_Z: Convert Packed Unsigned Doubleword Integers to Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUDQ2PS.Z m128 k xmm
|
|
// VCVTUDQ2PS.Z m256 k ymm
|
|
// VCVTUDQ2PS.Z xmm k xmm
|
|
// VCVTUDQ2PS.Z ymm k ymm
|
|
// VCVTUDQ2PS.Z m512 k zmm
|
|
// VCVTUDQ2PS.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTUDQ2PS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTUDQ2PS_Z(mxyz, k, xyz operand.Op) { ctx.VCVTUDQ2PS_Z(mxyz, k, xyz) }
|
|
|
|
// VCVTUQQ2PD: Convert Packed Unsigned Quadword Integers to Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUQQ2PD m128 k xmm
|
|
// VCVTUQQ2PD m128 xmm
|
|
// VCVTUQQ2PD m256 k ymm
|
|
// VCVTUQQ2PD m256 ymm
|
|
// VCVTUQQ2PD xmm k xmm
|
|
// VCVTUQQ2PD xmm xmm
|
|
// VCVTUQQ2PD ymm k ymm
|
|
// VCVTUQQ2PD ymm ymm
|
|
// VCVTUQQ2PD m512 k zmm
|
|
// VCVTUQQ2PD m512 zmm
|
|
// VCVTUQQ2PD zmm k zmm
|
|
// VCVTUQQ2PD zmm zmm
|
|
//
|
|
// Construct and append a VCVTUQQ2PD instruction to the active function.
|
|
func (c *Context) VCVTUQQ2PD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTUQQ2PD(ops...))
|
|
}
|
|
|
|
// VCVTUQQ2PD: Convert Packed Unsigned Quadword Integers to Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUQQ2PD m128 k xmm
|
|
// VCVTUQQ2PD m128 xmm
|
|
// VCVTUQQ2PD m256 k ymm
|
|
// VCVTUQQ2PD m256 ymm
|
|
// VCVTUQQ2PD xmm k xmm
|
|
// VCVTUQQ2PD xmm xmm
|
|
// VCVTUQQ2PD ymm k ymm
|
|
// VCVTUQQ2PD ymm ymm
|
|
// VCVTUQQ2PD m512 k zmm
|
|
// VCVTUQQ2PD m512 zmm
|
|
// VCVTUQQ2PD zmm k zmm
|
|
// VCVTUQQ2PD zmm zmm
|
|
//
|
|
// Construct and append a VCVTUQQ2PD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTUQQ2PD(ops ...operand.Op) { ctx.VCVTUQQ2PD(ops...) }
|
|
|
|
// VCVTUQQ2PD_BCST: Convert Packed Unsigned Quadword Integers to Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUQQ2PD.BCST m64 k xmm
|
|
// VCVTUQQ2PD.BCST m64 k ymm
|
|
// VCVTUQQ2PD.BCST m64 xmm
|
|
// VCVTUQQ2PD.BCST m64 ymm
|
|
// VCVTUQQ2PD.BCST m64 k zmm
|
|
// VCVTUQQ2PD.BCST m64 zmm
|
|
//
|
|
// Construct and append a VCVTUQQ2PD.BCST instruction to the active function.
|
|
func (c *Context) VCVTUQQ2PD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTUQQ2PD_BCST(ops...))
|
|
}
|
|
|
|
// VCVTUQQ2PD_BCST: Convert Packed Unsigned Quadword Integers to Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUQQ2PD.BCST m64 k xmm
|
|
// VCVTUQQ2PD.BCST m64 k ymm
|
|
// VCVTUQQ2PD.BCST m64 xmm
|
|
// VCVTUQQ2PD.BCST m64 ymm
|
|
// VCVTUQQ2PD.BCST m64 k zmm
|
|
// VCVTUQQ2PD.BCST m64 zmm
|
|
//
|
|
// Construct and append a VCVTUQQ2PD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTUQQ2PD_BCST(ops ...operand.Op) { ctx.VCVTUQQ2PD_BCST(ops...) }
|
|
|
|
// VCVTUQQ2PD_BCST_Z: Convert Packed Unsigned Quadword Integers to Packed Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUQQ2PD.BCST.Z m64 k xmm
|
|
// VCVTUQQ2PD.BCST.Z m64 k ymm
|
|
// VCVTUQQ2PD.BCST.Z m64 k zmm
|
|
//
|
|
// Construct and append a VCVTUQQ2PD.BCST.Z instruction to the active function.
|
|
func (c *Context) VCVTUQQ2PD_BCST_Z(m, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VCVTUQQ2PD_BCST_Z(m, k, xyz))
|
|
}
|
|
|
|
// VCVTUQQ2PD_BCST_Z: Convert Packed Unsigned Quadword Integers to Packed Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUQQ2PD.BCST.Z m64 k xmm
|
|
// VCVTUQQ2PD.BCST.Z m64 k ymm
|
|
// VCVTUQQ2PD.BCST.Z m64 k zmm
|
|
//
|
|
// Construct and append a VCVTUQQ2PD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTUQQ2PD_BCST_Z(m, k, xyz operand.Op) { ctx.VCVTUQQ2PD_BCST_Z(m, k, xyz) }
|
|
|
|
// VCVTUQQ2PD_RD_SAE: Convert Packed Unsigned Quadword Integers to Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUQQ2PD.RD_SAE zmm k zmm
|
|
// VCVTUQQ2PD.RD_SAE zmm zmm
|
|
//
|
|
// Construct and append a VCVTUQQ2PD.RD_SAE instruction to the active function.
|
|
func (c *Context) VCVTUQQ2PD_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTUQQ2PD_RD_SAE(ops...))
|
|
}
|
|
|
|
// VCVTUQQ2PD_RD_SAE: Convert Packed Unsigned Quadword Integers to Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUQQ2PD.RD_SAE zmm k zmm
|
|
// VCVTUQQ2PD.RD_SAE zmm zmm
|
|
//
|
|
// Construct and append a VCVTUQQ2PD.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTUQQ2PD_RD_SAE(ops ...operand.Op) { ctx.VCVTUQQ2PD_RD_SAE(ops...) }
|
|
|
|
// VCVTUQQ2PD_RD_SAE_Z: Convert Packed Unsigned Quadword Integers to Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUQQ2PD.RD_SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTUQQ2PD.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VCVTUQQ2PD_RD_SAE_Z(z, k, z1 operand.Op) {
|
|
c.addinstruction(x86.VCVTUQQ2PD_RD_SAE_Z(z, k, z1))
|
|
}
|
|
|
|
// VCVTUQQ2PD_RD_SAE_Z: Convert Packed Unsigned Quadword Integers to Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUQQ2PD.RD_SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTUQQ2PD.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTUQQ2PD_RD_SAE_Z(z, k, z1 operand.Op) { ctx.VCVTUQQ2PD_RD_SAE_Z(z, k, z1) }
|
|
|
|
// VCVTUQQ2PD_RN_SAE: Convert Packed Unsigned Quadword Integers to Packed Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUQQ2PD.RN_SAE zmm k zmm
|
|
// VCVTUQQ2PD.RN_SAE zmm zmm
|
|
//
|
|
// Construct and append a VCVTUQQ2PD.RN_SAE instruction to the active function.
|
|
func (c *Context) VCVTUQQ2PD_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTUQQ2PD_RN_SAE(ops...))
|
|
}
|
|
|
|
// VCVTUQQ2PD_RN_SAE: Convert Packed Unsigned Quadword Integers to Packed Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUQQ2PD.RN_SAE zmm k zmm
|
|
// VCVTUQQ2PD.RN_SAE zmm zmm
|
|
//
|
|
// Construct and append a VCVTUQQ2PD.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTUQQ2PD_RN_SAE(ops ...operand.Op) { ctx.VCVTUQQ2PD_RN_SAE(ops...) }
|
|
|
|
// VCVTUQQ2PD_RN_SAE_Z: Convert Packed Unsigned Quadword Integers to Packed Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUQQ2PD.RN_SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTUQQ2PD.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VCVTUQQ2PD_RN_SAE_Z(z, k, z1 operand.Op) {
|
|
c.addinstruction(x86.VCVTUQQ2PD_RN_SAE_Z(z, k, z1))
|
|
}
|
|
|
|
// VCVTUQQ2PD_RN_SAE_Z: Convert Packed Unsigned Quadword Integers to Packed Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUQQ2PD.RN_SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTUQQ2PD.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTUQQ2PD_RN_SAE_Z(z, k, z1 operand.Op) { ctx.VCVTUQQ2PD_RN_SAE_Z(z, k, z1) }
|
|
|
|
// VCVTUQQ2PD_RU_SAE: Convert Packed Unsigned Quadword Integers to Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUQQ2PD.RU_SAE zmm k zmm
|
|
// VCVTUQQ2PD.RU_SAE zmm zmm
|
|
//
|
|
// Construct and append a VCVTUQQ2PD.RU_SAE instruction to the active function.
|
|
func (c *Context) VCVTUQQ2PD_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTUQQ2PD_RU_SAE(ops...))
|
|
}
|
|
|
|
// VCVTUQQ2PD_RU_SAE: Convert Packed Unsigned Quadword Integers to Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUQQ2PD.RU_SAE zmm k zmm
|
|
// VCVTUQQ2PD.RU_SAE zmm zmm
|
|
//
|
|
// Construct and append a VCVTUQQ2PD.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTUQQ2PD_RU_SAE(ops ...operand.Op) { ctx.VCVTUQQ2PD_RU_SAE(ops...) }
|
|
|
|
// VCVTUQQ2PD_RU_SAE_Z: Convert Packed Unsigned Quadword Integers to Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUQQ2PD.RU_SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTUQQ2PD.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VCVTUQQ2PD_RU_SAE_Z(z, k, z1 operand.Op) {
|
|
c.addinstruction(x86.VCVTUQQ2PD_RU_SAE_Z(z, k, z1))
|
|
}
|
|
|
|
// VCVTUQQ2PD_RU_SAE_Z: Convert Packed Unsigned Quadword Integers to Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUQQ2PD.RU_SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTUQQ2PD.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTUQQ2PD_RU_SAE_Z(z, k, z1 operand.Op) { ctx.VCVTUQQ2PD_RU_SAE_Z(z, k, z1) }
|
|
|
|
// VCVTUQQ2PD_RZ_SAE: Convert Packed Unsigned Quadword Integers to Packed Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUQQ2PD.RZ_SAE zmm k zmm
|
|
// VCVTUQQ2PD.RZ_SAE zmm zmm
|
|
//
|
|
// Construct and append a VCVTUQQ2PD.RZ_SAE instruction to the active function.
|
|
func (c *Context) VCVTUQQ2PD_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTUQQ2PD_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VCVTUQQ2PD_RZ_SAE: Convert Packed Unsigned Quadword Integers to Packed Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUQQ2PD.RZ_SAE zmm k zmm
|
|
// VCVTUQQ2PD.RZ_SAE zmm zmm
|
|
//
|
|
// Construct and append a VCVTUQQ2PD.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTUQQ2PD_RZ_SAE(ops ...operand.Op) { ctx.VCVTUQQ2PD_RZ_SAE(ops...) }
|
|
|
|
// VCVTUQQ2PD_RZ_SAE_Z: Convert Packed Unsigned Quadword Integers to Packed Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUQQ2PD.RZ_SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTUQQ2PD.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VCVTUQQ2PD_RZ_SAE_Z(z, k, z1 operand.Op) {
|
|
c.addinstruction(x86.VCVTUQQ2PD_RZ_SAE_Z(z, k, z1))
|
|
}
|
|
|
|
// VCVTUQQ2PD_RZ_SAE_Z: Convert Packed Unsigned Quadword Integers to Packed Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUQQ2PD.RZ_SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTUQQ2PD.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTUQQ2PD_RZ_SAE_Z(z, k, z1 operand.Op) { ctx.VCVTUQQ2PD_RZ_SAE_Z(z, k, z1) }
|
|
|
|
// VCVTUQQ2PD_Z: Convert Packed Unsigned Quadword Integers to Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUQQ2PD.Z m128 k xmm
|
|
// VCVTUQQ2PD.Z m256 k ymm
|
|
// VCVTUQQ2PD.Z xmm k xmm
|
|
// VCVTUQQ2PD.Z ymm k ymm
|
|
// VCVTUQQ2PD.Z m512 k zmm
|
|
// VCVTUQQ2PD.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTUQQ2PD.Z instruction to the active function.
|
|
func (c *Context) VCVTUQQ2PD_Z(mxyz, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VCVTUQQ2PD_Z(mxyz, k, xyz))
|
|
}
|
|
|
|
// VCVTUQQ2PD_Z: Convert Packed Unsigned Quadword Integers to Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUQQ2PD.Z m128 k xmm
|
|
// VCVTUQQ2PD.Z m256 k ymm
|
|
// VCVTUQQ2PD.Z xmm k xmm
|
|
// VCVTUQQ2PD.Z ymm k ymm
|
|
// VCVTUQQ2PD.Z m512 k zmm
|
|
// VCVTUQQ2PD.Z zmm k zmm
|
|
//
|
|
// Construct and append a VCVTUQQ2PD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTUQQ2PD_Z(mxyz, k, xyz operand.Op) { ctx.VCVTUQQ2PD_Z(mxyz, k, xyz) }
|
|
|
|
// VCVTUQQ2PS: Convert Packed Unsigned Quadword Integers to Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUQQ2PS m512 k ymm
|
|
// VCVTUQQ2PS m512 ymm
|
|
// VCVTUQQ2PS zmm k ymm
|
|
// VCVTUQQ2PS zmm ymm
|
|
//
|
|
// Construct and append a VCVTUQQ2PS instruction to the active function.
|
|
func (c *Context) VCVTUQQ2PS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTUQQ2PS(ops...))
|
|
}
|
|
|
|
// VCVTUQQ2PS: Convert Packed Unsigned Quadword Integers to Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUQQ2PS m512 k ymm
|
|
// VCVTUQQ2PS m512 ymm
|
|
// VCVTUQQ2PS zmm k ymm
|
|
// VCVTUQQ2PS zmm ymm
|
|
//
|
|
// Construct and append a VCVTUQQ2PS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTUQQ2PS(ops ...operand.Op) { ctx.VCVTUQQ2PS(ops...) }
|
|
|
|
// VCVTUQQ2PSX: Convert Packed Unsigned Quadword Integers to Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUQQ2PSX m128 k xmm
|
|
// VCVTUQQ2PSX m128 xmm
|
|
// VCVTUQQ2PSX xmm k xmm
|
|
// VCVTUQQ2PSX xmm xmm
|
|
//
|
|
// Construct and append a VCVTUQQ2PSX instruction to the active function.
|
|
func (c *Context) VCVTUQQ2PSX(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTUQQ2PSX(ops...))
|
|
}
|
|
|
|
// VCVTUQQ2PSX: Convert Packed Unsigned Quadword Integers to Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUQQ2PSX m128 k xmm
|
|
// VCVTUQQ2PSX m128 xmm
|
|
// VCVTUQQ2PSX xmm k xmm
|
|
// VCVTUQQ2PSX xmm xmm
|
|
//
|
|
// Construct and append a VCVTUQQ2PSX instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTUQQ2PSX(ops ...operand.Op) { ctx.VCVTUQQ2PSX(ops...) }
|
|
|
|
// VCVTUQQ2PSX_BCST: Convert Packed Unsigned Quadword Integers to Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUQQ2PSX.BCST m64 k xmm
|
|
// VCVTUQQ2PSX.BCST m64 xmm
|
|
//
|
|
// Construct and append a VCVTUQQ2PSX.BCST instruction to the active function.
|
|
func (c *Context) VCVTUQQ2PSX_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTUQQ2PSX_BCST(ops...))
|
|
}
|
|
|
|
// VCVTUQQ2PSX_BCST: Convert Packed Unsigned Quadword Integers to Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUQQ2PSX.BCST m64 k xmm
|
|
// VCVTUQQ2PSX.BCST m64 xmm
|
|
//
|
|
// Construct and append a VCVTUQQ2PSX.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTUQQ2PSX_BCST(ops ...operand.Op) { ctx.VCVTUQQ2PSX_BCST(ops...) }
|
|
|
|
// VCVTUQQ2PSX_BCST_Z: Convert Packed Unsigned Quadword Integers to Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUQQ2PSX.BCST.Z m64 k xmm
|
|
//
|
|
// Construct and append a VCVTUQQ2PSX.BCST.Z instruction to the active function.
|
|
func (c *Context) VCVTUQQ2PSX_BCST_Z(m, k, x operand.Op) {
|
|
c.addinstruction(x86.VCVTUQQ2PSX_BCST_Z(m, k, x))
|
|
}
|
|
|
|
// VCVTUQQ2PSX_BCST_Z: Convert Packed Unsigned Quadword Integers to Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUQQ2PSX.BCST.Z m64 k xmm
|
|
//
|
|
// Construct and append a VCVTUQQ2PSX.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTUQQ2PSX_BCST_Z(m, k, x operand.Op) { ctx.VCVTUQQ2PSX_BCST_Z(m, k, x) }
|
|
|
|
// VCVTUQQ2PSX_Z: Convert Packed Unsigned Quadword Integers to Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUQQ2PSX.Z m128 k xmm
|
|
// VCVTUQQ2PSX.Z xmm k xmm
|
|
//
|
|
// Construct and append a VCVTUQQ2PSX.Z instruction to the active function.
|
|
func (c *Context) VCVTUQQ2PSX_Z(mx, k, x operand.Op) {
|
|
c.addinstruction(x86.VCVTUQQ2PSX_Z(mx, k, x))
|
|
}
|
|
|
|
// VCVTUQQ2PSX_Z: Convert Packed Unsigned Quadword Integers to Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUQQ2PSX.Z m128 k xmm
|
|
// VCVTUQQ2PSX.Z xmm k xmm
|
|
//
|
|
// Construct and append a VCVTUQQ2PSX.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTUQQ2PSX_Z(mx, k, x operand.Op) { ctx.VCVTUQQ2PSX_Z(mx, k, x) }
|
|
|
|
// VCVTUQQ2PSY: Convert Packed Unsigned Quadword Integers to Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUQQ2PSY m256 k xmm
|
|
// VCVTUQQ2PSY m256 xmm
|
|
// VCVTUQQ2PSY ymm k xmm
|
|
// VCVTUQQ2PSY ymm xmm
|
|
//
|
|
// Construct and append a VCVTUQQ2PSY instruction to the active function.
|
|
func (c *Context) VCVTUQQ2PSY(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTUQQ2PSY(ops...))
|
|
}
|
|
|
|
// VCVTUQQ2PSY: Convert Packed Unsigned Quadword Integers to Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUQQ2PSY m256 k xmm
|
|
// VCVTUQQ2PSY m256 xmm
|
|
// VCVTUQQ2PSY ymm k xmm
|
|
// VCVTUQQ2PSY ymm xmm
|
|
//
|
|
// Construct and append a VCVTUQQ2PSY instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTUQQ2PSY(ops ...operand.Op) { ctx.VCVTUQQ2PSY(ops...) }
|
|
|
|
// VCVTUQQ2PSY_BCST: Convert Packed Unsigned Quadword Integers to Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUQQ2PSY.BCST m64 k xmm
|
|
// VCVTUQQ2PSY.BCST m64 xmm
|
|
//
|
|
// Construct and append a VCVTUQQ2PSY.BCST instruction to the active function.
|
|
func (c *Context) VCVTUQQ2PSY_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTUQQ2PSY_BCST(ops...))
|
|
}
|
|
|
|
// VCVTUQQ2PSY_BCST: Convert Packed Unsigned Quadword Integers to Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUQQ2PSY.BCST m64 k xmm
|
|
// VCVTUQQ2PSY.BCST m64 xmm
|
|
//
|
|
// Construct and append a VCVTUQQ2PSY.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTUQQ2PSY_BCST(ops ...operand.Op) { ctx.VCVTUQQ2PSY_BCST(ops...) }
|
|
|
|
// VCVTUQQ2PSY_BCST_Z: Convert Packed Unsigned Quadword Integers to Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUQQ2PSY.BCST.Z m64 k xmm
|
|
//
|
|
// Construct and append a VCVTUQQ2PSY.BCST.Z instruction to the active function.
|
|
func (c *Context) VCVTUQQ2PSY_BCST_Z(m, k, x operand.Op) {
|
|
c.addinstruction(x86.VCVTUQQ2PSY_BCST_Z(m, k, x))
|
|
}
|
|
|
|
// VCVTUQQ2PSY_BCST_Z: Convert Packed Unsigned Quadword Integers to Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUQQ2PSY.BCST.Z m64 k xmm
|
|
//
|
|
// Construct and append a VCVTUQQ2PSY.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTUQQ2PSY_BCST_Z(m, k, x operand.Op) { ctx.VCVTUQQ2PSY_BCST_Z(m, k, x) }
|
|
|
|
// VCVTUQQ2PSY_Z: Convert Packed Unsigned Quadword Integers to Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUQQ2PSY.Z m256 k xmm
|
|
// VCVTUQQ2PSY.Z ymm k xmm
|
|
//
|
|
// Construct and append a VCVTUQQ2PSY.Z instruction to the active function.
|
|
func (c *Context) VCVTUQQ2PSY_Z(my, k, x operand.Op) {
|
|
c.addinstruction(x86.VCVTUQQ2PSY_Z(my, k, x))
|
|
}
|
|
|
|
// VCVTUQQ2PSY_Z: Convert Packed Unsigned Quadword Integers to Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUQQ2PSY.Z m256 k xmm
|
|
// VCVTUQQ2PSY.Z ymm k xmm
|
|
//
|
|
// Construct and append a VCVTUQQ2PSY.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTUQQ2PSY_Z(my, k, x operand.Op) { ctx.VCVTUQQ2PSY_Z(my, k, x) }
|
|
|
|
// VCVTUQQ2PS_BCST: Convert Packed Unsigned Quadword Integers to Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUQQ2PS.BCST m64 k ymm
|
|
// VCVTUQQ2PS.BCST m64 ymm
|
|
//
|
|
// Construct and append a VCVTUQQ2PS.BCST instruction to the active function.
|
|
func (c *Context) VCVTUQQ2PS_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTUQQ2PS_BCST(ops...))
|
|
}
|
|
|
|
// VCVTUQQ2PS_BCST: Convert Packed Unsigned Quadword Integers to Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUQQ2PS.BCST m64 k ymm
|
|
// VCVTUQQ2PS.BCST m64 ymm
|
|
//
|
|
// Construct and append a VCVTUQQ2PS.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTUQQ2PS_BCST(ops ...operand.Op) { ctx.VCVTUQQ2PS_BCST(ops...) }
|
|
|
|
// VCVTUQQ2PS_BCST_Z: Convert Packed Unsigned Quadword Integers to Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUQQ2PS.BCST.Z m64 k ymm
|
|
//
|
|
// Construct and append a VCVTUQQ2PS.BCST.Z instruction to the active function.
|
|
func (c *Context) VCVTUQQ2PS_BCST_Z(m, k, y operand.Op) {
|
|
c.addinstruction(x86.VCVTUQQ2PS_BCST_Z(m, k, y))
|
|
}
|
|
|
|
// VCVTUQQ2PS_BCST_Z: Convert Packed Unsigned Quadword Integers to Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUQQ2PS.BCST.Z m64 k ymm
|
|
//
|
|
// Construct and append a VCVTUQQ2PS.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTUQQ2PS_BCST_Z(m, k, y operand.Op) { ctx.VCVTUQQ2PS_BCST_Z(m, k, y) }
|
|
|
|
// VCVTUQQ2PS_RD_SAE: Convert Packed Unsigned Quadword Integers to Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUQQ2PS.RD_SAE zmm k ymm
|
|
// VCVTUQQ2PS.RD_SAE zmm ymm
|
|
//
|
|
// Construct and append a VCVTUQQ2PS.RD_SAE instruction to the active function.
|
|
func (c *Context) VCVTUQQ2PS_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTUQQ2PS_RD_SAE(ops...))
|
|
}
|
|
|
|
// VCVTUQQ2PS_RD_SAE: Convert Packed Unsigned Quadword Integers to Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUQQ2PS.RD_SAE zmm k ymm
|
|
// VCVTUQQ2PS.RD_SAE zmm ymm
|
|
//
|
|
// Construct and append a VCVTUQQ2PS.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTUQQ2PS_RD_SAE(ops ...operand.Op) { ctx.VCVTUQQ2PS_RD_SAE(ops...) }
|
|
|
|
// VCVTUQQ2PS_RD_SAE_Z: Convert Packed Unsigned Quadword Integers to Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUQQ2PS.RD_SAE.Z zmm k ymm
|
|
//
|
|
// Construct and append a VCVTUQQ2PS.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VCVTUQQ2PS_RD_SAE_Z(z, k, y operand.Op) {
|
|
c.addinstruction(x86.VCVTUQQ2PS_RD_SAE_Z(z, k, y))
|
|
}
|
|
|
|
// VCVTUQQ2PS_RD_SAE_Z: Convert Packed Unsigned Quadword Integers to Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUQQ2PS.RD_SAE.Z zmm k ymm
|
|
//
|
|
// Construct and append a VCVTUQQ2PS.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTUQQ2PS_RD_SAE_Z(z, k, y operand.Op) { ctx.VCVTUQQ2PS_RD_SAE_Z(z, k, y) }
|
|
|
|
// VCVTUQQ2PS_RN_SAE: Convert Packed Unsigned Quadword Integers to Packed Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUQQ2PS.RN_SAE zmm k ymm
|
|
// VCVTUQQ2PS.RN_SAE zmm ymm
|
|
//
|
|
// Construct and append a VCVTUQQ2PS.RN_SAE instruction to the active function.
|
|
func (c *Context) VCVTUQQ2PS_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTUQQ2PS_RN_SAE(ops...))
|
|
}
|
|
|
|
// VCVTUQQ2PS_RN_SAE: Convert Packed Unsigned Quadword Integers to Packed Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUQQ2PS.RN_SAE zmm k ymm
|
|
// VCVTUQQ2PS.RN_SAE zmm ymm
|
|
//
|
|
// Construct and append a VCVTUQQ2PS.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTUQQ2PS_RN_SAE(ops ...operand.Op) { ctx.VCVTUQQ2PS_RN_SAE(ops...) }
|
|
|
|
// VCVTUQQ2PS_RN_SAE_Z: Convert Packed Unsigned Quadword Integers to Packed Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUQQ2PS.RN_SAE.Z zmm k ymm
|
|
//
|
|
// Construct and append a VCVTUQQ2PS.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VCVTUQQ2PS_RN_SAE_Z(z, k, y operand.Op) {
|
|
c.addinstruction(x86.VCVTUQQ2PS_RN_SAE_Z(z, k, y))
|
|
}
|
|
|
|
// VCVTUQQ2PS_RN_SAE_Z: Convert Packed Unsigned Quadword Integers to Packed Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUQQ2PS.RN_SAE.Z zmm k ymm
|
|
//
|
|
// Construct and append a VCVTUQQ2PS.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTUQQ2PS_RN_SAE_Z(z, k, y operand.Op) { ctx.VCVTUQQ2PS_RN_SAE_Z(z, k, y) }
|
|
|
|
// VCVTUQQ2PS_RU_SAE: Convert Packed Unsigned Quadword Integers to Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUQQ2PS.RU_SAE zmm k ymm
|
|
// VCVTUQQ2PS.RU_SAE zmm ymm
|
|
//
|
|
// Construct and append a VCVTUQQ2PS.RU_SAE instruction to the active function.
|
|
func (c *Context) VCVTUQQ2PS_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTUQQ2PS_RU_SAE(ops...))
|
|
}
|
|
|
|
// VCVTUQQ2PS_RU_SAE: Convert Packed Unsigned Quadword Integers to Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUQQ2PS.RU_SAE zmm k ymm
|
|
// VCVTUQQ2PS.RU_SAE zmm ymm
|
|
//
|
|
// Construct and append a VCVTUQQ2PS.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTUQQ2PS_RU_SAE(ops ...operand.Op) { ctx.VCVTUQQ2PS_RU_SAE(ops...) }
|
|
|
|
// VCVTUQQ2PS_RU_SAE_Z: Convert Packed Unsigned Quadword Integers to Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUQQ2PS.RU_SAE.Z zmm k ymm
|
|
//
|
|
// Construct and append a VCVTUQQ2PS.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VCVTUQQ2PS_RU_SAE_Z(z, k, y operand.Op) {
|
|
c.addinstruction(x86.VCVTUQQ2PS_RU_SAE_Z(z, k, y))
|
|
}
|
|
|
|
// VCVTUQQ2PS_RU_SAE_Z: Convert Packed Unsigned Quadword Integers to Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUQQ2PS.RU_SAE.Z zmm k ymm
|
|
//
|
|
// Construct and append a VCVTUQQ2PS.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTUQQ2PS_RU_SAE_Z(z, k, y operand.Op) { ctx.VCVTUQQ2PS_RU_SAE_Z(z, k, y) }
|
|
|
|
// VCVTUQQ2PS_RZ_SAE: Convert Packed Unsigned Quadword Integers to Packed Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUQQ2PS.RZ_SAE zmm k ymm
|
|
// VCVTUQQ2PS.RZ_SAE zmm ymm
|
|
//
|
|
// Construct and append a VCVTUQQ2PS.RZ_SAE instruction to the active function.
|
|
func (c *Context) VCVTUQQ2PS_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VCVTUQQ2PS_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VCVTUQQ2PS_RZ_SAE: Convert Packed Unsigned Quadword Integers to Packed Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUQQ2PS.RZ_SAE zmm k ymm
|
|
// VCVTUQQ2PS.RZ_SAE zmm ymm
|
|
//
|
|
// Construct and append a VCVTUQQ2PS.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTUQQ2PS_RZ_SAE(ops ...operand.Op) { ctx.VCVTUQQ2PS_RZ_SAE(ops...) }
|
|
|
|
// VCVTUQQ2PS_RZ_SAE_Z: Convert Packed Unsigned Quadword Integers to Packed Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUQQ2PS.RZ_SAE.Z zmm k ymm
|
|
//
|
|
// Construct and append a VCVTUQQ2PS.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VCVTUQQ2PS_RZ_SAE_Z(z, k, y operand.Op) {
|
|
c.addinstruction(x86.VCVTUQQ2PS_RZ_SAE_Z(z, k, y))
|
|
}
|
|
|
|
// VCVTUQQ2PS_RZ_SAE_Z: Convert Packed Unsigned Quadword Integers to Packed Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUQQ2PS.RZ_SAE.Z zmm k ymm
|
|
//
|
|
// Construct and append a VCVTUQQ2PS.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTUQQ2PS_RZ_SAE_Z(z, k, y operand.Op) { ctx.VCVTUQQ2PS_RZ_SAE_Z(z, k, y) }
|
|
|
|
// VCVTUQQ2PS_Z: Convert Packed Unsigned Quadword Integers to Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUQQ2PS.Z m512 k ymm
|
|
// VCVTUQQ2PS.Z zmm k ymm
|
|
//
|
|
// Construct and append a VCVTUQQ2PS.Z instruction to the active function.
|
|
func (c *Context) VCVTUQQ2PS_Z(mz, k, y operand.Op) {
|
|
c.addinstruction(x86.VCVTUQQ2PS_Z(mz, k, y))
|
|
}
|
|
|
|
// VCVTUQQ2PS_Z: Convert Packed Unsigned Quadword Integers to Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUQQ2PS.Z m512 k ymm
|
|
// VCVTUQQ2PS.Z zmm k ymm
|
|
//
|
|
// Construct and append a VCVTUQQ2PS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTUQQ2PS_Z(mz, k, y operand.Op) { ctx.VCVTUQQ2PS_Z(mz, k, y) }
|
|
|
|
// VCVTUSI2SDL: Convert Unsigned Integer to Scalar Double-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUSI2SDL m32 xmm xmm
|
|
// VCVTUSI2SDL r32 xmm xmm
|
|
//
|
|
// Construct and append a VCVTUSI2SDL instruction to the active function.
|
|
func (c *Context) VCVTUSI2SDL(mr, x, x1 operand.Op) {
|
|
c.addinstruction(x86.VCVTUSI2SDL(mr, x, x1))
|
|
}
|
|
|
|
// VCVTUSI2SDL: Convert Unsigned Integer to Scalar Double-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUSI2SDL m32 xmm xmm
|
|
// VCVTUSI2SDL r32 xmm xmm
|
|
//
|
|
// Construct and append a VCVTUSI2SDL instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTUSI2SDL(mr, x, x1 operand.Op) { ctx.VCVTUSI2SDL(mr, x, x1) }
|
|
|
|
// VCVTUSI2SDQ: Convert Unsigned Integer to Scalar Double-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUSI2SDQ m64 xmm xmm
|
|
// VCVTUSI2SDQ r64 xmm xmm
|
|
//
|
|
// Construct and append a VCVTUSI2SDQ instruction to the active function.
|
|
func (c *Context) VCVTUSI2SDQ(mr, x, x1 operand.Op) {
|
|
c.addinstruction(x86.VCVTUSI2SDQ(mr, x, x1))
|
|
}
|
|
|
|
// VCVTUSI2SDQ: Convert Unsigned Integer to Scalar Double-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUSI2SDQ m64 xmm xmm
|
|
// VCVTUSI2SDQ r64 xmm xmm
|
|
//
|
|
// Construct and append a VCVTUSI2SDQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTUSI2SDQ(mr, x, x1 operand.Op) { ctx.VCVTUSI2SDQ(mr, x, x1) }
|
|
|
|
// VCVTUSI2SDQ_RD_SAE: Convert Unsigned Integer to Scalar Double-Precision Floating-Point Value (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUSI2SDQ.RD_SAE r64 xmm xmm
|
|
//
|
|
// Construct and append a VCVTUSI2SDQ.RD_SAE instruction to the active function.
|
|
func (c *Context) VCVTUSI2SDQ_RD_SAE(r, x, x1 operand.Op) {
|
|
c.addinstruction(x86.VCVTUSI2SDQ_RD_SAE(r, x, x1))
|
|
}
|
|
|
|
// VCVTUSI2SDQ_RD_SAE: Convert Unsigned Integer to Scalar Double-Precision Floating-Point Value (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUSI2SDQ.RD_SAE r64 xmm xmm
|
|
//
|
|
// Construct and append a VCVTUSI2SDQ.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTUSI2SDQ_RD_SAE(r, x, x1 operand.Op) { ctx.VCVTUSI2SDQ_RD_SAE(r, x, x1) }
|
|
|
|
// VCVTUSI2SDQ_RN_SAE: Convert Unsigned Integer to Scalar Double-Precision Floating-Point Value (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUSI2SDQ.RN_SAE r64 xmm xmm
|
|
//
|
|
// Construct and append a VCVTUSI2SDQ.RN_SAE instruction to the active function.
|
|
func (c *Context) VCVTUSI2SDQ_RN_SAE(r, x, x1 operand.Op) {
|
|
c.addinstruction(x86.VCVTUSI2SDQ_RN_SAE(r, x, x1))
|
|
}
|
|
|
|
// VCVTUSI2SDQ_RN_SAE: Convert Unsigned Integer to Scalar Double-Precision Floating-Point Value (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUSI2SDQ.RN_SAE r64 xmm xmm
|
|
//
|
|
// Construct and append a VCVTUSI2SDQ.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTUSI2SDQ_RN_SAE(r, x, x1 operand.Op) { ctx.VCVTUSI2SDQ_RN_SAE(r, x, x1) }
|
|
|
|
// VCVTUSI2SDQ_RU_SAE: Convert Unsigned Integer to Scalar Double-Precision Floating-Point Value (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUSI2SDQ.RU_SAE r64 xmm xmm
|
|
//
|
|
// Construct and append a VCVTUSI2SDQ.RU_SAE instruction to the active function.
|
|
func (c *Context) VCVTUSI2SDQ_RU_SAE(r, x, x1 operand.Op) {
|
|
c.addinstruction(x86.VCVTUSI2SDQ_RU_SAE(r, x, x1))
|
|
}
|
|
|
|
// VCVTUSI2SDQ_RU_SAE: Convert Unsigned Integer to Scalar Double-Precision Floating-Point Value (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUSI2SDQ.RU_SAE r64 xmm xmm
|
|
//
|
|
// Construct and append a VCVTUSI2SDQ.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTUSI2SDQ_RU_SAE(r, x, x1 operand.Op) { ctx.VCVTUSI2SDQ_RU_SAE(r, x, x1) }
|
|
|
|
// VCVTUSI2SDQ_RZ_SAE: Convert Unsigned Integer to Scalar Double-Precision Floating-Point Value (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUSI2SDQ.RZ_SAE r64 xmm xmm
|
|
//
|
|
// Construct and append a VCVTUSI2SDQ.RZ_SAE instruction to the active function.
|
|
func (c *Context) VCVTUSI2SDQ_RZ_SAE(r, x, x1 operand.Op) {
|
|
c.addinstruction(x86.VCVTUSI2SDQ_RZ_SAE(r, x, x1))
|
|
}
|
|
|
|
// VCVTUSI2SDQ_RZ_SAE: Convert Unsigned Integer to Scalar Double-Precision Floating-Point Value (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUSI2SDQ.RZ_SAE r64 xmm xmm
|
|
//
|
|
// Construct and append a VCVTUSI2SDQ.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTUSI2SDQ_RZ_SAE(r, x, x1 operand.Op) { ctx.VCVTUSI2SDQ_RZ_SAE(r, x, x1) }
|
|
|
|
// VCVTUSI2SSL: Convert Unsigned Integer to Scalar Single-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUSI2SSL m32 xmm xmm
|
|
// VCVTUSI2SSL r32 xmm xmm
|
|
//
|
|
// Construct and append a VCVTUSI2SSL instruction to the active function.
|
|
func (c *Context) VCVTUSI2SSL(mr, x, x1 operand.Op) {
|
|
c.addinstruction(x86.VCVTUSI2SSL(mr, x, x1))
|
|
}
|
|
|
|
// VCVTUSI2SSL: Convert Unsigned Integer to Scalar Single-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUSI2SSL m32 xmm xmm
|
|
// VCVTUSI2SSL r32 xmm xmm
|
|
//
|
|
// Construct and append a VCVTUSI2SSL instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTUSI2SSL(mr, x, x1 operand.Op) { ctx.VCVTUSI2SSL(mr, x, x1) }
|
|
|
|
// VCVTUSI2SSL_RD_SAE: Convert Unsigned Integer to Scalar Single-Precision Floating-Point Value (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUSI2SSL.RD_SAE r32 xmm xmm
|
|
//
|
|
// Construct and append a VCVTUSI2SSL.RD_SAE instruction to the active function.
|
|
func (c *Context) VCVTUSI2SSL_RD_SAE(r, x, x1 operand.Op) {
|
|
c.addinstruction(x86.VCVTUSI2SSL_RD_SAE(r, x, x1))
|
|
}
|
|
|
|
// VCVTUSI2SSL_RD_SAE: Convert Unsigned Integer to Scalar Single-Precision Floating-Point Value (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUSI2SSL.RD_SAE r32 xmm xmm
|
|
//
|
|
// Construct and append a VCVTUSI2SSL.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTUSI2SSL_RD_SAE(r, x, x1 operand.Op) { ctx.VCVTUSI2SSL_RD_SAE(r, x, x1) }
|
|
|
|
// VCVTUSI2SSL_RN_SAE: Convert Unsigned Integer to Scalar Single-Precision Floating-Point Value (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUSI2SSL.RN_SAE r32 xmm xmm
|
|
//
|
|
// Construct and append a VCVTUSI2SSL.RN_SAE instruction to the active function.
|
|
func (c *Context) VCVTUSI2SSL_RN_SAE(r, x, x1 operand.Op) {
|
|
c.addinstruction(x86.VCVTUSI2SSL_RN_SAE(r, x, x1))
|
|
}
|
|
|
|
// VCVTUSI2SSL_RN_SAE: Convert Unsigned Integer to Scalar Single-Precision Floating-Point Value (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUSI2SSL.RN_SAE r32 xmm xmm
|
|
//
|
|
// Construct and append a VCVTUSI2SSL.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTUSI2SSL_RN_SAE(r, x, x1 operand.Op) { ctx.VCVTUSI2SSL_RN_SAE(r, x, x1) }
|
|
|
|
// VCVTUSI2SSL_RU_SAE: Convert Unsigned Integer to Scalar Single-Precision Floating-Point Value (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUSI2SSL.RU_SAE r32 xmm xmm
|
|
//
|
|
// Construct and append a VCVTUSI2SSL.RU_SAE instruction to the active function.
|
|
func (c *Context) VCVTUSI2SSL_RU_SAE(r, x, x1 operand.Op) {
|
|
c.addinstruction(x86.VCVTUSI2SSL_RU_SAE(r, x, x1))
|
|
}
|
|
|
|
// VCVTUSI2SSL_RU_SAE: Convert Unsigned Integer to Scalar Single-Precision Floating-Point Value (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUSI2SSL.RU_SAE r32 xmm xmm
|
|
//
|
|
// Construct and append a VCVTUSI2SSL.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTUSI2SSL_RU_SAE(r, x, x1 operand.Op) { ctx.VCVTUSI2SSL_RU_SAE(r, x, x1) }
|
|
|
|
// VCVTUSI2SSL_RZ_SAE: Convert Unsigned Integer to Scalar Single-Precision Floating-Point Value (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUSI2SSL.RZ_SAE r32 xmm xmm
|
|
//
|
|
// Construct and append a VCVTUSI2SSL.RZ_SAE instruction to the active function.
|
|
func (c *Context) VCVTUSI2SSL_RZ_SAE(r, x, x1 operand.Op) {
|
|
c.addinstruction(x86.VCVTUSI2SSL_RZ_SAE(r, x, x1))
|
|
}
|
|
|
|
// VCVTUSI2SSL_RZ_SAE: Convert Unsigned Integer to Scalar Single-Precision Floating-Point Value (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUSI2SSL.RZ_SAE r32 xmm xmm
|
|
//
|
|
// Construct and append a VCVTUSI2SSL.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTUSI2SSL_RZ_SAE(r, x, x1 operand.Op) { ctx.VCVTUSI2SSL_RZ_SAE(r, x, x1) }
|
|
|
|
// VCVTUSI2SSQ: Convert Unsigned Integer to Scalar Single-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUSI2SSQ m64 xmm xmm
|
|
// VCVTUSI2SSQ r64 xmm xmm
|
|
//
|
|
// Construct and append a VCVTUSI2SSQ instruction to the active function.
|
|
func (c *Context) VCVTUSI2SSQ(mr, x, x1 operand.Op) {
|
|
c.addinstruction(x86.VCVTUSI2SSQ(mr, x, x1))
|
|
}
|
|
|
|
// VCVTUSI2SSQ: Convert Unsigned Integer to Scalar Single-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUSI2SSQ m64 xmm xmm
|
|
// VCVTUSI2SSQ r64 xmm xmm
|
|
//
|
|
// Construct and append a VCVTUSI2SSQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTUSI2SSQ(mr, x, x1 operand.Op) { ctx.VCVTUSI2SSQ(mr, x, x1) }
|
|
|
|
// VCVTUSI2SSQ_RD_SAE: Convert Unsigned Integer to Scalar Single-Precision Floating-Point Value (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUSI2SSQ.RD_SAE r64 xmm xmm
|
|
//
|
|
// Construct and append a VCVTUSI2SSQ.RD_SAE instruction to the active function.
|
|
func (c *Context) VCVTUSI2SSQ_RD_SAE(r, x, x1 operand.Op) {
|
|
c.addinstruction(x86.VCVTUSI2SSQ_RD_SAE(r, x, x1))
|
|
}
|
|
|
|
// VCVTUSI2SSQ_RD_SAE: Convert Unsigned Integer to Scalar Single-Precision Floating-Point Value (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUSI2SSQ.RD_SAE r64 xmm xmm
|
|
//
|
|
// Construct and append a VCVTUSI2SSQ.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTUSI2SSQ_RD_SAE(r, x, x1 operand.Op) { ctx.VCVTUSI2SSQ_RD_SAE(r, x, x1) }
|
|
|
|
// VCVTUSI2SSQ_RN_SAE: Convert Unsigned Integer to Scalar Single-Precision Floating-Point Value (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUSI2SSQ.RN_SAE r64 xmm xmm
|
|
//
|
|
// Construct and append a VCVTUSI2SSQ.RN_SAE instruction to the active function.
|
|
func (c *Context) VCVTUSI2SSQ_RN_SAE(r, x, x1 operand.Op) {
|
|
c.addinstruction(x86.VCVTUSI2SSQ_RN_SAE(r, x, x1))
|
|
}
|
|
|
|
// VCVTUSI2SSQ_RN_SAE: Convert Unsigned Integer to Scalar Single-Precision Floating-Point Value (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUSI2SSQ.RN_SAE r64 xmm xmm
|
|
//
|
|
// Construct and append a VCVTUSI2SSQ.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTUSI2SSQ_RN_SAE(r, x, x1 operand.Op) { ctx.VCVTUSI2SSQ_RN_SAE(r, x, x1) }
|
|
|
|
// VCVTUSI2SSQ_RU_SAE: Convert Unsigned Integer to Scalar Single-Precision Floating-Point Value (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUSI2SSQ.RU_SAE r64 xmm xmm
|
|
//
|
|
// Construct and append a VCVTUSI2SSQ.RU_SAE instruction to the active function.
|
|
func (c *Context) VCVTUSI2SSQ_RU_SAE(r, x, x1 operand.Op) {
|
|
c.addinstruction(x86.VCVTUSI2SSQ_RU_SAE(r, x, x1))
|
|
}
|
|
|
|
// VCVTUSI2SSQ_RU_SAE: Convert Unsigned Integer to Scalar Single-Precision Floating-Point Value (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUSI2SSQ.RU_SAE r64 xmm xmm
|
|
//
|
|
// Construct and append a VCVTUSI2SSQ.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTUSI2SSQ_RU_SAE(r, x, x1 operand.Op) { ctx.VCVTUSI2SSQ_RU_SAE(r, x, x1) }
|
|
|
|
// VCVTUSI2SSQ_RZ_SAE: Convert Unsigned Integer to Scalar Single-Precision Floating-Point Value (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUSI2SSQ.RZ_SAE r64 xmm xmm
|
|
//
|
|
// Construct and append a VCVTUSI2SSQ.RZ_SAE instruction to the active function.
|
|
func (c *Context) VCVTUSI2SSQ_RZ_SAE(r, x, x1 operand.Op) {
|
|
c.addinstruction(x86.VCVTUSI2SSQ_RZ_SAE(r, x, x1))
|
|
}
|
|
|
|
// VCVTUSI2SSQ_RZ_SAE: Convert Unsigned Integer to Scalar Single-Precision Floating-Point Value (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VCVTUSI2SSQ.RZ_SAE r64 xmm xmm
|
|
//
|
|
// Construct and append a VCVTUSI2SSQ.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VCVTUSI2SSQ_RZ_SAE(r, x, x1 operand.Op) { ctx.VCVTUSI2SSQ_RZ_SAE(r, x, x1) }
|
|
|
|
// VDBPSADBW: Double Block Packed Sum-Absolute-Differences on Unsigned Bytes.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDBPSADBW imm8 m128 xmm k xmm
|
|
// VDBPSADBW imm8 m128 xmm xmm
|
|
// VDBPSADBW imm8 m256 ymm k ymm
|
|
// VDBPSADBW imm8 m256 ymm ymm
|
|
// VDBPSADBW imm8 xmm xmm k xmm
|
|
// VDBPSADBW imm8 xmm xmm xmm
|
|
// VDBPSADBW imm8 ymm ymm k ymm
|
|
// VDBPSADBW imm8 ymm ymm ymm
|
|
// VDBPSADBW imm8 m512 zmm k zmm
|
|
// VDBPSADBW imm8 m512 zmm zmm
|
|
// VDBPSADBW imm8 zmm zmm k zmm
|
|
// VDBPSADBW imm8 zmm zmm zmm
|
|
//
|
|
// Construct and append a VDBPSADBW instruction to the active function.
|
|
func (c *Context) VDBPSADBW(ops ...operand.Op) {
|
|
c.addinstruction(x86.VDBPSADBW(ops...))
|
|
}
|
|
|
|
// VDBPSADBW: Double Block Packed Sum-Absolute-Differences on Unsigned Bytes.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDBPSADBW imm8 m128 xmm k xmm
|
|
// VDBPSADBW imm8 m128 xmm xmm
|
|
// VDBPSADBW imm8 m256 ymm k ymm
|
|
// VDBPSADBW imm8 m256 ymm ymm
|
|
// VDBPSADBW imm8 xmm xmm k xmm
|
|
// VDBPSADBW imm8 xmm xmm xmm
|
|
// VDBPSADBW imm8 ymm ymm k ymm
|
|
// VDBPSADBW imm8 ymm ymm ymm
|
|
// VDBPSADBW imm8 m512 zmm k zmm
|
|
// VDBPSADBW imm8 m512 zmm zmm
|
|
// VDBPSADBW imm8 zmm zmm k zmm
|
|
// VDBPSADBW imm8 zmm zmm zmm
|
|
//
|
|
// Construct and append a VDBPSADBW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VDBPSADBW(ops ...operand.Op) { ctx.VDBPSADBW(ops...) }
|
|
|
|
// VDBPSADBW_Z: Double Block Packed Sum-Absolute-Differences on Unsigned Bytes (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDBPSADBW.Z imm8 m128 xmm k xmm
|
|
// VDBPSADBW.Z imm8 m256 ymm k ymm
|
|
// VDBPSADBW.Z imm8 xmm xmm k xmm
|
|
// VDBPSADBW.Z imm8 ymm ymm k ymm
|
|
// VDBPSADBW.Z imm8 m512 zmm k zmm
|
|
// VDBPSADBW.Z imm8 zmm zmm k zmm
|
|
//
|
|
// Construct and append a VDBPSADBW.Z instruction to the active function.
|
|
func (c *Context) VDBPSADBW_Z(i, mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VDBPSADBW_Z(i, mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VDBPSADBW_Z: Double Block Packed Sum-Absolute-Differences on Unsigned Bytes (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDBPSADBW.Z imm8 m128 xmm k xmm
|
|
// VDBPSADBW.Z imm8 m256 ymm k ymm
|
|
// VDBPSADBW.Z imm8 xmm xmm k xmm
|
|
// VDBPSADBW.Z imm8 ymm ymm k ymm
|
|
// VDBPSADBW.Z imm8 m512 zmm k zmm
|
|
// VDBPSADBW.Z imm8 zmm zmm k zmm
|
|
//
|
|
// Construct and append a VDBPSADBW.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VDBPSADBW_Z(i, mxyz, xyz, k, xyz1 operand.Op) { ctx.VDBPSADBW_Z(i, mxyz, xyz, k, xyz1) }
|
|
|
|
// VDIVPD: Divide Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVPD m128 xmm xmm
|
|
// VDIVPD m256 ymm ymm
|
|
// VDIVPD xmm xmm xmm
|
|
// VDIVPD ymm ymm ymm
|
|
// VDIVPD m128 xmm k xmm
|
|
// VDIVPD m256 ymm k ymm
|
|
// VDIVPD xmm xmm k xmm
|
|
// VDIVPD ymm ymm k ymm
|
|
// VDIVPD m512 zmm k zmm
|
|
// VDIVPD m512 zmm zmm
|
|
// VDIVPD zmm zmm k zmm
|
|
// VDIVPD zmm zmm zmm
|
|
//
|
|
// Construct and append a VDIVPD instruction to the active function.
|
|
func (c *Context) VDIVPD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VDIVPD(ops...))
|
|
}
|
|
|
|
// VDIVPD: Divide Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVPD m128 xmm xmm
|
|
// VDIVPD m256 ymm ymm
|
|
// VDIVPD xmm xmm xmm
|
|
// VDIVPD ymm ymm ymm
|
|
// VDIVPD m128 xmm k xmm
|
|
// VDIVPD m256 ymm k ymm
|
|
// VDIVPD xmm xmm k xmm
|
|
// VDIVPD ymm ymm k ymm
|
|
// VDIVPD m512 zmm k zmm
|
|
// VDIVPD m512 zmm zmm
|
|
// VDIVPD zmm zmm k zmm
|
|
// VDIVPD zmm zmm zmm
|
|
//
|
|
// Construct and append a VDIVPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VDIVPD(ops ...operand.Op) { ctx.VDIVPD(ops...) }
|
|
|
|
// VDIVPD_BCST: Divide Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVPD.BCST m64 xmm k xmm
|
|
// VDIVPD.BCST m64 xmm xmm
|
|
// VDIVPD.BCST m64 ymm k ymm
|
|
// VDIVPD.BCST m64 ymm ymm
|
|
// VDIVPD.BCST m64 zmm k zmm
|
|
// VDIVPD.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VDIVPD.BCST instruction to the active function.
|
|
func (c *Context) VDIVPD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VDIVPD_BCST(ops...))
|
|
}
|
|
|
|
// VDIVPD_BCST: Divide Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVPD.BCST m64 xmm k xmm
|
|
// VDIVPD.BCST m64 xmm xmm
|
|
// VDIVPD.BCST m64 ymm k ymm
|
|
// VDIVPD.BCST m64 ymm ymm
|
|
// VDIVPD.BCST m64 zmm k zmm
|
|
// VDIVPD.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VDIVPD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VDIVPD_BCST(ops ...operand.Op) { ctx.VDIVPD_BCST(ops...) }
|
|
|
|
// VDIVPD_BCST_Z: Divide Packed Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVPD.BCST.Z m64 xmm k xmm
|
|
// VDIVPD.BCST.Z m64 ymm k ymm
|
|
// VDIVPD.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VDIVPD.BCST.Z instruction to the active function.
|
|
func (c *Context) VDIVPD_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VDIVPD_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VDIVPD_BCST_Z: Divide Packed Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVPD.BCST.Z m64 xmm k xmm
|
|
// VDIVPD.BCST.Z m64 ymm k ymm
|
|
// VDIVPD.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VDIVPD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VDIVPD_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VDIVPD_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VDIVPD_RD_SAE: Divide Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVPD.RD_SAE zmm zmm k zmm
|
|
// VDIVPD.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VDIVPD.RD_SAE instruction to the active function.
|
|
func (c *Context) VDIVPD_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VDIVPD_RD_SAE(ops...))
|
|
}
|
|
|
|
// VDIVPD_RD_SAE: Divide Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVPD.RD_SAE zmm zmm k zmm
|
|
// VDIVPD.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VDIVPD.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VDIVPD_RD_SAE(ops ...operand.Op) { ctx.VDIVPD_RD_SAE(ops...) }
|
|
|
|
// VDIVPD_RD_SAE_Z: Divide Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVPD.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VDIVPD.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VDIVPD_RD_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VDIVPD_RD_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VDIVPD_RD_SAE_Z: Divide Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVPD.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VDIVPD.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VDIVPD_RD_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VDIVPD_RD_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VDIVPD_RN_SAE: Divide Packed Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVPD.RN_SAE zmm zmm k zmm
|
|
// VDIVPD.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VDIVPD.RN_SAE instruction to the active function.
|
|
func (c *Context) VDIVPD_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VDIVPD_RN_SAE(ops...))
|
|
}
|
|
|
|
// VDIVPD_RN_SAE: Divide Packed Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVPD.RN_SAE zmm zmm k zmm
|
|
// VDIVPD.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VDIVPD.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VDIVPD_RN_SAE(ops ...operand.Op) { ctx.VDIVPD_RN_SAE(ops...) }
|
|
|
|
// VDIVPD_RN_SAE_Z: Divide Packed Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVPD.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VDIVPD.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VDIVPD_RN_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VDIVPD_RN_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VDIVPD_RN_SAE_Z: Divide Packed Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVPD.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VDIVPD.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VDIVPD_RN_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VDIVPD_RN_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VDIVPD_RU_SAE: Divide Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVPD.RU_SAE zmm zmm k zmm
|
|
// VDIVPD.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VDIVPD.RU_SAE instruction to the active function.
|
|
func (c *Context) VDIVPD_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VDIVPD_RU_SAE(ops...))
|
|
}
|
|
|
|
// VDIVPD_RU_SAE: Divide Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVPD.RU_SAE zmm zmm k zmm
|
|
// VDIVPD.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VDIVPD.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VDIVPD_RU_SAE(ops ...operand.Op) { ctx.VDIVPD_RU_SAE(ops...) }
|
|
|
|
// VDIVPD_RU_SAE_Z: Divide Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVPD.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VDIVPD.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VDIVPD_RU_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VDIVPD_RU_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VDIVPD_RU_SAE_Z: Divide Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVPD.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VDIVPD.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VDIVPD_RU_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VDIVPD_RU_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VDIVPD_RZ_SAE: Divide Packed Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVPD.RZ_SAE zmm zmm k zmm
|
|
// VDIVPD.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VDIVPD.RZ_SAE instruction to the active function.
|
|
func (c *Context) VDIVPD_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VDIVPD_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VDIVPD_RZ_SAE: Divide Packed Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVPD.RZ_SAE zmm zmm k zmm
|
|
// VDIVPD.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VDIVPD.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VDIVPD_RZ_SAE(ops ...operand.Op) { ctx.VDIVPD_RZ_SAE(ops...) }
|
|
|
|
// VDIVPD_RZ_SAE_Z: Divide Packed Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVPD.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VDIVPD.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VDIVPD_RZ_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VDIVPD_RZ_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VDIVPD_RZ_SAE_Z: Divide Packed Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVPD.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VDIVPD.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VDIVPD_RZ_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VDIVPD_RZ_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VDIVPD_Z: Divide Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVPD.Z m128 xmm k xmm
|
|
// VDIVPD.Z m256 ymm k ymm
|
|
// VDIVPD.Z xmm xmm k xmm
|
|
// VDIVPD.Z ymm ymm k ymm
|
|
// VDIVPD.Z m512 zmm k zmm
|
|
// VDIVPD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VDIVPD.Z instruction to the active function.
|
|
func (c *Context) VDIVPD_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VDIVPD_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VDIVPD_Z: Divide Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVPD.Z m128 xmm k xmm
|
|
// VDIVPD.Z m256 ymm k ymm
|
|
// VDIVPD.Z xmm xmm k xmm
|
|
// VDIVPD.Z ymm ymm k ymm
|
|
// VDIVPD.Z m512 zmm k zmm
|
|
// VDIVPD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VDIVPD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VDIVPD_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VDIVPD_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VDIVPS: Divide Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVPS m128 xmm xmm
|
|
// VDIVPS m256 ymm ymm
|
|
// VDIVPS xmm xmm xmm
|
|
// VDIVPS ymm ymm ymm
|
|
// VDIVPS m128 xmm k xmm
|
|
// VDIVPS m256 ymm k ymm
|
|
// VDIVPS xmm xmm k xmm
|
|
// VDIVPS ymm ymm k ymm
|
|
// VDIVPS m512 zmm k zmm
|
|
// VDIVPS m512 zmm zmm
|
|
// VDIVPS zmm zmm k zmm
|
|
// VDIVPS zmm zmm zmm
|
|
//
|
|
// Construct and append a VDIVPS instruction to the active function.
|
|
func (c *Context) VDIVPS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VDIVPS(ops...))
|
|
}
|
|
|
|
// VDIVPS: Divide Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVPS m128 xmm xmm
|
|
// VDIVPS m256 ymm ymm
|
|
// VDIVPS xmm xmm xmm
|
|
// VDIVPS ymm ymm ymm
|
|
// VDIVPS m128 xmm k xmm
|
|
// VDIVPS m256 ymm k ymm
|
|
// VDIVPS xmm xmm k xmm
|
|
// VDIVPS ymm ymm k ymm
|
|
// VDIVPS m512 zmm k zmm
|
|
// VDIVPS m512 zmm zmm
|
|
// VDIVPS zmm zmm k zmm
|
|
// VDIVPS zmm zmm zmm
|
|
//
|
|
// Construct and append a VDIVPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VDIVPS(ops ...operand.Op) { ctx.VDIVPS(ops...) }
|
|
|
|
// VDIVPS_BCST: Divide Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVPS.BCST m32 xmm k xmm
|
|
// VDIVPS.BCST m32 xmm xmm
|
|
// VDIVPS.BCST m32 ymm k ymm
|
|
// VDIVPS.BCST m32 ymm ymm
|
|
// VDIVPS.BCST m32 zmm k zmm
|
|
// VDIVPS.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VDIVPS.BCST instruction to the active function.
|
|
func (c *Context) VDIVPS_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VDIVPS_BCST(ops...))
|
|
}
|
|
|
|
// VDIVPS_BCST: Divide Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVPS.BCST m32 xmm k xmm
|
|
// VDIVPS.BCST m32 xmm xmm
|
|
// VDIVPS.BCST m32 ymm k ymm
|
|
// VDIVPS.BCST m32 ymm ymm
|
|
// VDIVPS.BCST m32 zmm k zmm
|
|
// VDIVPS.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VDIVPS.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VDIVPS_BCST(ops ...operand.Op) { ctx.VDIVPS_BCST(ops...) }
|
|
|
|
// VDIVPS_BCST_Z: Divide Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVPS.BCST.Z m32 xmm k xmm
|
|
// VDIVPS.BCST.Z m32 ymm k ymm
|
|
// VDIVPS.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VDIVPS.BCST.Z instruction to the active function.
|
|
func (c *Context) VDIVPS_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VDIVPS_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VDIVPS_BCST_Z: Divide Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVPS.BCST.Z m32 xmm k xmm
|
|
// VDIVPS.BCST.Z m32 ymm k ymm
|
|
// VDIVPS.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VDIVPS.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VDIVPS_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VDIVPS_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VDIVPS_RD_SAE: Divide Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVPS.RD_SAE zmm zmm k zmm
|
|
// VDIVPS.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VDIVPS.RD_SAE instruction to the active function.
|
|
func (c *Context) VDIVPS_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VDIVPS_RD_SAE(ops...))
|
|
}
|
|
|
|
// VDIVPS_RD_SAE: Divide Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVPS.RD_SAE zmm zmm k zmm
|
|
// VDIVPS.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VDIVPS.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VDIVPS_RD_SAE(ops ...operand.Op) { ctx.VDIVPS_RD_SAE(ops...) }
|
|
|
|
// VDIVPS_RD_SAE_Z: Divide Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVPS.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VDIVPS.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VDIVPS_RD_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VDIVPS_RD_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VDIVPS_RD_SAE_Z: Divide Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVPS.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VDIVPS.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VDIVPS_RD_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VDIVPS_RD_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VDIVPS_RN_SAE: Divide Packed Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVPS.RN_SAE zmm zmm k zmm
|
|
// VDIVPS.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VDIVPS.RN_SAE instruction to the active function.
|
|
func (c *Context) VDIVPS_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VDIVPS_RN_SAE(ops...))
|
|
}
|
|
|
|
// VDIVPS_RN_SAE: Divide Packed Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVPS.RN_SAE zmm zmm k zmm
|
|
// VDIVPS.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VDIVPS.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VDIVPS_RN_SAE(ops ...operand.Op) { ctx.VDIVPS_RN_SAE(ops...) }
|
|
|
|
// VDIVPS_RN_SAE_Z: Divide Packed Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVPS.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VDIVPS.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VDIVPS_RN_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VDIVPS_RN_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VDIVPS_RN_SAE_Z: Divide Packed Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVPS.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VDIVPS.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VDIVPS_RN_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VDIVPS_RN_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VDIVPS_RU_SAE: Divide Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVPS.RU_SAE zmm zmm k zmm
|
|
// VDIVPS.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VDIVPS.RU_SAE instruction to the active function.
|
|
func (c *Context) VDIVPS_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VDIVPS_RU_SAE(ops...))
|
|
}
|
|
|
|
// VDIVPS_RU_SAE: Divide Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVPS.RU_SAE zmm zmm k zmm
|
|
// VDIVPS.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VDIVPS.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VDIVPS_RU_SAE(ops ...operand.Op) { ctx.VDIVPS_RU_SAE(ops...) }
|
|
|
|
// VDIVPS_RU_SAE_Z: Divide Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVPS.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VDIVPS.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VDIVPS_RU_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VDIVPS_RU_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VDIVPS_RU_SAE_Z: Divide Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVPS.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VDIVPS.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VDIVPS_RU_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VDIVPS_RU_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VDIVPS_RZ_SAE: Divide Packed Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVPS.RZ_SAE zmm zmm k zmm
|
|
// VDIVPS.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VDIVPS.RZ_SAE instruction to the active function.
|
|
func (c *Context) VDIVPS_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VDIVPS_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VDIVPS_RZ_SAE: Divide Packed Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVPS.RZ_SAE zmm zmm k zmm
|
|
// VDIVPS.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VDIVPS.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VDIVPS_RZ_SAE(ops ...operand.Op) { ctx.VDIVPS_RZ_SAE(ops...) }
|
|
|
|
// VDIVPS_RZ_SAE_Z: Divide Packed Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVPS.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VDIVPS.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VDIVPS_RZ_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VDIVPS_RZ_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VDIVPS_RZ_SAE_Z: Divide Packed Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVPS.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VDIVPS.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VDIVPS_RZ_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VDIVPS_RZ_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VDIVPS_Z: Divide Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVPS.Z m128 xmm k xmm
|
|
// VDIVPS.Z m256 ymm k ymm
|
|
// VDIVPS.Z xmm xmm k xmm
|
|
// VDIVPS.Z ymm ymm k ymm
|
|
// VDIVPS.Z m512 zmm k zmm
|
|
// VDIVPS.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VDIVPS.Z instruction to the active function.
|
|
func (c *Context) VDIVPS_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VDIVPS_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VDIVPS_Z: Divide Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVPS.Z m128 xmm k xmm
|
|
// VDIVPS.Z m256 ymm k ymm
|
|
// VDIVPS.Z xmm xmm k xmm
|
|
// VDIVPS.Z ymm ymm k ymm
|
|
// VDIVPS.Z m512 zmm k zmm
|
|
// VDIVPS.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VDIVPS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VDIVPS_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VDIVPS_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VDIVSD: Divide Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVSD m64 xmm xmm
|
|
// VDIVSD xmm xmm xmm
|
|
// VDIVSD m64 xmm k xmm
|
|
// VDIVSD xmm xmm k xmm
|
|
//
|
|
// Construct and append a VDIVSD instruction to the active function.
|
|
func (c *Context) VDIVSD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VDIVSD(ops...))
|
|
}
|
|
|
|
// VDIVSD: Divide Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVSD m64 xmm xmm
|
|
// VDIVSD xmm xmm xmm
|
|
// VDIVSD m64 xmm k xmm
|
|
// VDIVSD xmm xmm k xmm
|
|
//
|
|
// Construct and append a VDIVSD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VDIVSD(ops ...operand.Op) { ctx.VDIVSD(ops...) }
|
|
|
|
// VDIVSD_RD_SAE: Divide Scalar Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVSD.RD_SAE xmm xmm k xmm
|
|
// VDIVSD.RD_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VDIVSD.RD_SAE instruction to the active function.
|
|
func (c *Context) VDIVSD_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VDIVSD_RD_SAE(ops...))
|
|
}
|
|
|
|
// VDIVSD_RD_SAE: Divide Scalar Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVSD.RD_SAE xmm xmm k xmm
|
|
// VDIVSD.RD_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VDIVSD.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VDIVSD_RD_SAE(ops ...operand.Op) { ctx.VDIVSD_RD_SAE(ops...) }
|
|
|
|
// VDIVSD_RD_SAE_Z: Divide Scalar Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVSD.RD_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VDIVSD.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VDIVSD_RD_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VDIVSD_RD_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VDIVSD_RD_SAE_Z: Divide Scalar Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVSD.RD_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VDIVSD.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VDIVSD_RD_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VDIVSD_RD_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VDIVSD_RN_SAE: Divide Scalar Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVSD.RN_SAE xmm xmm k xmm
|
|
// VDIVSD.RN_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VDIVSD.RN_SAE instruction to the active function.
|
|
func (c *Context) VDIVSD_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VDIVSD_RN_SAE(ops...))
|
|
}
|
|
|
|
// VDIVSD_RN_SAE: Divide Scalar Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVSD.RN_SAE xmm xmm k xmm
|
|
// VDIVSD.RN_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VDIVSD.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VDIVSD_RN_SAE(ops ...operand.Op) { ctx.VDIVSD_RN_SAE(ops...) }
|
|
|
|
// VDIVSD_RN_SAE_Z: Divide Scalar Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVSD.RN_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VDIVSD.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VDIVSD_RN_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VDIVSD_RN_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VDIVSD_RN_SAE_Z: Divide Scalar Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVSD.RN_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VDIVSD.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VDIVSD_RN_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VDIVSD_RN_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VDIVSD_RU_SAE: Divide Scalar Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVSD.RU_SAE xmm xmm k xmm
|
|
// VDIVSD.RU_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VDIVSD.RU_SAE instruction to the active function.
|
|
func (c *Context) VDIVSD_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VDIVSD_RU_SAE(ops...))
|
|
}
|
|
|
|
// VDIVSD_RU_SAE: Divide Scalar Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVSD.RU_SAE xmm xmm k xmm
|
|
// VDIVSD.RU_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VDIVSD.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VDIVSD_RU_SAE(ops ...operand.Op) { ctx.VDIVSD_RU_SAE(ops...) }
|
|
|
|
// VDIVSD_RU_SAE_Z: Divide Scalar Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVSD.RU_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VDIVSD.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VDIVSD_RU_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VDIVSD_RU_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VDIVSD_RU_SAE_Z: Divide Scalar Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVSD.RU_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VDIVSD.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VDIVSD_RU_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VDIVSD_RU_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VDIVSD_RZ_SAE: Divide Scalar Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVSD.RZ_SAE xmm xmm k xmm
|
|
// VDIVSD.RZ_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VDIVSD.RZ_SAE instruction to the active function.
|
|
func (c *Context) VDIVSD_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VDIVSD_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VDIVSD_RZ_SAE: Divide Scalar Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVSD.RZ_SAE xmm xmm k xmm
|
|
// VDIVSD.RZ_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VDIVSD.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VDIVSD_RZ_SAE(ops ...operand.Op) { ctx.VDIVSD_RZ_SAE(ops...) }
|
|
|
|
// VDIVSD_RZ_SAE_Z: Divide Scalar Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVSD.RZ_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VDIVSD.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VDIVSD_RZ_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VDIVSD_RZ_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VDIVSD_RZ_SAE_Z: Divide Scalar Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVSD.RZ_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VDIVSD.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VDIVSD_RZ_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VDIVSD_RZ_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VDIVSD_Z: Divide Scalar Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVSD.Z m64 xmm k xmm
|
|
// VDIVSD.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VDIVSD.Z instruction to the active function.
|
|
func (c *Context) VDIVSD_Z(mx, x, k, x1 operand.Op) {
|
|
c.addinstruction(x86.VDIVSD_Z(mx, x, k, x1))
|
|
}
|
|
|
|
// VDIVSD_Z: Divide Scalar Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVSD.Z m64 xmm k xmm
|
|
// VDIVSD.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VDIVSD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VDIVSD_Z(mx, x, k, x1 operand.Op) { ctx.VDIVSD_Z(mx, x, k, x1) }
|
|
|
|
// VDIVSS: Divide Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVSS m32 xmm xmm
|
|
// VDIVSS xmm xmm xmm
|
|
// VDIVSS m32 xmm k xmm
|
|
// VDIVSS xmm xmm k xmm
|
|
//
|
|
// Construct and append a VDIVSS instruction to the active function.
|
|
func (c *Context) VDIVSS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VDIVSS(ops...))
|
|
}
|
|
|
|
// VDIVSS: Divide Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVSS m32 xmm xmm
|
|
// VDIVSS xmm xmm xmm
|
|
// VDIVSS m32 xmm k xmm
|
|
// VDIVSS xmm xmm k xmm
|
|
//
|
|
// Construct and append a VDIVSS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VDIVSS(ops ...operand.Op) { ctx.VDIVSS(ops...) }
|
|
|
|
// VDIVSS_RD_SAE: Divide Scalar Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVSS.RD_SAE xmm xmm k xmm
|
|
// VDIVSS.RD_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VDIVSS.RD_SAE instruction to the active function.
|
|
func (c *Context) VDIVSS_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VDIVSS_RD_SAE(ops...))
|
|
}
|
|
|
|
// VDIVSS_RD_SAE: Divide Scalar Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVSS.RD_SAE xmm xmm k xmm
|
|
// VDIVSS.RD_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VDIVSS.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VDIVSS_RD_SAE(ops ...operand.Op) { ctx.VDIVSS_RD_SAE(ops...) }
|
|
|
|
// VDIVSS_RD_SAE_Z: Divide Scalar Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVSS.RD_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VDIVSS.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VDIVSS_RD_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VDIVSS_RD_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VDIVSS_RD_SAE_Z: Divide Scalar Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVSS.RD_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VDIVSS.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VDIVSS_RD_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VDIVSS_RD_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VDIVSS_RN_SAE: Divide Scalar Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVSS.RN_SAE xmm xmm k xmm
|
|
// VDIVSS.RN_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VDIVSS.RN_SAE instruction to the active function.
|
|
func (c *Context) VDIVSS_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VDIVSS_RN_SAE(ops...))
|
|
}
|
|
|
|
// VDIVSS_RN_SAE: Divide Scalar Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVSS.RN_SAE xmm xmm k xmm
|
|
// VDIVSS.RN_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VDIVSS.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VDIVSS_RN_SAE(ops ...operand.Op) { ctx.VDIVSS_RN_SAE(ops...) }
|
|
|
|
// VDIVSS_RN_SAE_Z: Divide Scalar Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVSS.RN_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VDIVSS.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VDIVSS_RN_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VDIVSS_RN_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VDIVSS_RN_SAE_Z: Divide Scalar Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVSS.RN_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VDIVSS.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VDIVSS_RN_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VDIVSS_RN_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VDIVSS_RU_SAE: Divide Scalar Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVSS.RU_SAE xmm xmm k xmm
|
|
// VDIVSS.RU_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VDIVSS.RU_SAE instruction to the active function.
|
|
func (c *Context) VDIVSS_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VDIVSS_RU_SAE(ops...))
|
|
}
|
|
|
|
// VDIVSS_RU_SAE: Divide Scalar Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVSS.RU_SAE xmm xmm k xmm
|
|
// VDIVSS.RU_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VDIVSS.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VDIVSS_RU_SAE(ops ...operand.Op) { ctx.VDIVSS_RU_SAE(ops...) }
|
|
|
|
// VDIVSS_RU_SAE_Z: Divide Scalar Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVSS.RU_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VDIVSS.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VDIVSS_RU_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VDIVSS_RU_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VDIVSS_RU_SAE_Z: Divide Scalar Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVSS.RU_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VDIVSS.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VDIVSS_RU_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VDIVSS_RU_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VDIVSS_RZ_SAE: Divide Scalar Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVSS.RZ_SAE xmm xmm k xmm
|
|
// VDIVSS.RZ_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VDIVSS.RZ_SAE instruction to the active function.
|
|
func (c *Context) VDIVSS_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VDIVSS_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VDIVSS_RZ_SAE: Divide Scalar Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVSS.RZ_SAE xmm xmm k xmm
|
|
// VDIVSS.RZ_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VDIVSS.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VDIVSS_RZ_SAE(ops ...operand.Op) { ctx.VDIVSS_RZ_SAE(ops...) }
|
|
|
|
// VDIVSS_RZ_SAE_Z: Divide Scalar Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVSS.RZ_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VDIVSS.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VDIVSS_RZ_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VDIVSS_RZ_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VDIVSS_RZ_SAE_Z: Divide Scalar Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVSS.RZ_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VDIVSS.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VDIVSS_RZ_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VDIVSS_RZ_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VDIVSS_Z: Divide Scalar Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVSS.Z m32 xmm k xmm
|
|
// VDIVSS.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VDIVSS.Z instruction to the active function.
|
|
func (c *Context) VDIVSS_Z(mx, x, k, x1 operand.Op) {
|
|
c.addinstruction(x86.VDIVSS_Z(mx, x, k, x1))
|
|
}
|
|
|
|
// VDIVSS_Z: Divide Scalar Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDIVSS.Z m32 xmm k xmm
|
|
// VDIVSS.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VDIVSS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VDIVSS_Z(mx, x, k, x1 operand.Op) { ctx.VDIVSS_Z(mx, x, k, x1) }
|
|
|
|
// VDPPD: Dot Product of Packed Double Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDPPD imm8 m128 xmm xmm
|
|
// VDPPD imm8 xmm xmm xmm
|
|
//
|
|
// Construct and append a VDPPD instruction to the active function.
|
|
func (c *Context) VDPPD(i, mx, x, x1 operand.Op) {
|
|
c.addinstruction(x86.VDPPD(i, mx, x, x1))
|
|
}
|
|
|
|
// VDPPD: Dot Product of Packed Double Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDPPD imm8 m128 xmm xmm
|
|
// VDPPD imm8 xmm xmm xmm
|
|
//
|
|
// Construct and append a VDPPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VDPPD(i, mx, x, x1 operand.Op) { ctx.VDPPD(i, mx, x, x1) }
|
|
|
|
// VDPPS: Dot Product of Packed Single Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDPPS imm8 m128 xmm xmm
|
|
// VDPPS imm8 m256 ymm ymm
|
|
// VDPPS imm8 xmm xmm xmm
|
|
// VDPPS imm8 ymm ymm ymm
|
|
//
|
|
// Construct and append a VDPPS instruction to the active function.
|
|
func (c *Context) VDPPS(i, mxy, xy, xy1 operand.Op) {
|
|
c.addinstruction(x86.VDPPS(i, mxy, xy, xy1))
|
|
}
|
|
|
|
// VDPPS: Dot Product of Packed Single Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VDPPS imm8 m128 xmm xmm
|
|
// VDPPS imm8 m256 ymm ymm
|
|
// VDPPS imm8 xmm xmm xmm
|
|
// VDPPS imm8 ymm ymm ymm
|
|
//
|
|
// Construct and append a VDPPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VDPPS(i, mxy, xy, xy1 operand.Op) { ctx.VDPPS(i, mxy, xy, xy1) }
|
|
|
|
// VEXP2PD: Approximation to the Exponential 2^x of Packed Double-Precision Floating-Point Values with Less Than 2^-23 Relative Error.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VEXP2PD m512 k zmm
|
|
// VEXP2PD m512 zmm
|
|
// VEXP2PD zmm k zmm
|
|
// VEXP2PD zmm zmm
|
|
//
|
|
// Construct and append a VEXP2PD instruction to the active function.
|
|
func (c *Context) VEXP2PD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VEXP2PD(ops...))
|
|
}
|
|
|
|
// VEXP2PD: Approximation to the Exponential 2^x of Packed Double-Precision Floating-Point Values with Less Than 2^-23 Relative Error.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VEXP2PD m512 k zmm
|
|
// VEXP2PD m512 zmm
|
|
// VEXP2PD zmm k zmm
|
|
// VEXP2PD zmm zmm
|
|
//
|
|
// Construct and append a VEXP2PD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VEXP2PD(ops ...operand.Op) { ctx.VEXP2PD(ops...) }
|
|
|
|
// VEXP2PD_BCST: Approximation to the Exponential 2^x of Packed Double-Precision Floating-Point Values with Less Than 2^-23 Relative Error (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VEXP2PD.BCST m64 k zmm
|
|
// VEXP2PD.BCST m64 zmm
|
|
//
|
|
// Construct and append a VEXP2PD.BCST instruction to the active function.
|
|
func (c *Context) VEXP2PD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VEXP2PD_BCST(ops...))
|
|
}
|
|
|
|
// VEXP2PD_BCST: Approximation to the Exponential 2^x of Packed Double-Precision Floating-Point Values with Less Than 2^-23 Relative Error (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VEXP2PD.BCST m64 k zmm
|
|
// VEXP2PD.BCST m64 zmm
|
|
//
|
|
// Construct and append a VEXP2PD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VEXP2PD_BCST(ops ...operand.Op) { ctx.VEXP2PD_BCST(ops...) }
|
|
|
|
// VEXP2PD_BCST_Z: Approximation to the Exponential 2^x of Packed Double-Precision Floating-Point Values with Less Than 2^-23 Relative Error (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VEXP2PD.BCST.Z m64 k zmm
|
|
//
|
|
// Construct and append a VEXP2PD.BCST.Z instruction to the active function.
|
|
func (c *Context) VEXP2PD_BCST_Z(m, k, z operand.Op) {
|
|
c.addinstruction(x86.VEXP2PD_BCST_Z(m, k, z))
|
|
}
|
|
|
|
// VEXP2PD_BCST_Z: Approximation to the Exponential 2^x of Packed Double-Precision Floating-Point Values with Less Than 2^-23 Relative Error (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VEXP2PD.BCST.Z m64 k zmm
|
|
//
|
|
// Construct and append a VEXP2PD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VEXP2PD_BCST_Z(m, k, z operand.Op) { ctx.VEXP2PD_BCST_Z(m, k, z) }
|
|
|
|
// VEXP2PD_SAE: Approximation to the Exponential 2^x of Packed Double-Precision Floating-Point Values with Less Than 2^-23 Relative Error (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VEXP2PD.SAE zmm k zmm
|
|
// VEXP2PD.SAE zmm zmm
|
|
//
|
|
// Construct and append a VEXP2PD.SAE instruction to the active function.
|
|
func (c *Context) VEXP2PD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VEXP2PD_SAE(ops...))
|
|
}
|
|
|
|
// VEXP2PD_SAE: Approximation to the Exponential 2^x of Packed Double-Precision Floating-Point Values with Less Than 2^-23 Relative Error (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VEXP2PD.SAE zmm k zmm
|
|
// VEXP2PD.SAE zmm zmm
|
|
//
|
|
// Construct and append a VEXP2PD.SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VEXP2PD_SAE(ops ...operand.Op) { ctx.VEXP2PD_SAE(ops...) }
|
|
|
|
// VEXP2PD_SAE_Z: Approximation to the Exponential 2^x of Packed Double-Precision Floating-Point Values with Less Than 2^-23 Relative Error (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VEXP2PD.SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VEXP2PD.SAE.Z instruction to the active function.
|
|
func (c *Context) VEXP2PD_SAE_Z(z, k, z1 operand.Op) {
|
|
c.addinstruction(x86.VEXP2PD_SAE_Z(z, k, z1))
|
|
}
|
|
|
|
// VEXP2PD_SAE_Z: Approximation to the Exponential 2^x of Packed Double-Precision Floating-Point Values with Less Than 2^-23 Relative Error (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VEXP2PD.SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VEXP2PD.SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VEXP2PD_SAE_Z(z, k, z1 operand.Op) { ctx.VEXP2PD_SAE_Z(z, k, z1) }
|
|
|
|
// VEXP2PD_Z: Approximation to the Exponential 2^x of Packed Double-Precision Floating-Point Values with Less Than 2^-23 Relative Error (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VEXP2PD.Z m512 k zmm
|
|
// VEXP2PD.Z zmm k zmm
|
|
//
|
|
// Construct and append a VEXP2PD.Z instruction to the active function.
|
|
func (c *Context) VEXP2PD_Z(mz, k, z operand.Op) {
|
|
c.addinstruction(x86.VEXP2PD_Z(mz, k, z))
|
|
}
|
|
|
|
// VEXP2PD_Z: Approximation to the Exponential 2^x of Packed Double-Precision Floating-Point Values with Less Than 2^-23 Relative Error (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VEXP2PD.Z m512 k zmm
|
|
// VEXP2PD.Z zmm k zmm
|
|
//
|
|
// Construct and append a VEXP2PD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VEXP2PD_Z(mz, k, z operand.Op) { ctx.VEXP2PD_Z(mz, k, z) }
|
|
|
|
// VEXP2PS: Approximation to the Exponential 2^x of Packed Single-Precision Floating-Point Values with Less Than 2^-23 Relative Error.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VEXP2PS m512 k zmm
|
|
// VEXP2PS m512 zmm
|
|
// VEXP2PS zmm k zmm
|
|
// VEXP2PS zmm zmm
|
|
//
|
|
// Construct and append a VEXP2PS instruction to the active function.
|
|
func (c *Context) VEXP2PS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VEXP2PS(ops...))
|
|
}
|
|
|
|
// VEXP2PS: Approximation to the Exponential 2^x of Packed Single-Precision Floating-Point Values with Less Than 2^-23 Relative Error.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VEXP2PS m512 k zmm
|
|
// VEXP2PS m512 zmm
|
|
// VEXP2PS zmm k zmm
|
|
// VEXP2PS zmm zmm
|
|
//
|
|
// Construct and append a VEXP2PS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VEXP2PS(ops ...operand.Op) { ctx.VEXP2PS(ops...) }
|
|
|
|
// VEXP2PS_BCST: Approximation to the Exponential 2^x of Packed Single-Precision Floating-Point Values with Less Than 2^-23 Relative Error (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VEXP2PS.BCST m32 k zmm
|
|
// VEXP2PS.BCST m32 zmm
|
|
//
|
|
// Construct and append a VEXP2PS.BCST instruction to the active function.
|
|
func (c *Context) VEXP2PS_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VEXP2PS_BCST(ops...))
|
|
}
|
|
|
|
// VEXP2PS_BCST: Approximation to the Exponential 2^x of Packed Single-Precision Floating-Point Values with Less Than 2^-23 Relative Error (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VEXP2PS.BCST m32 k zmm
|
|
// VEXP2PS.BCST m32 zmm
|
|
//
|
|
// Construct and append a VEXP2PS.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VEXP2PS_BCST(ops ...operand.Op) { ctx.VEXP2PS_BCST(ops...) }
|
|
|
|
// VEXP2PS_BCST_Z: Approximation to the Exponential 2^x of Packed Single-Precision Floating-Point Values with Less Than 2^-23 Relative Error (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VEXP2PS.BCST.Z m32 k zmm
|
|
//
|
|
// Construct and append a VEXP2PS.BCST.Z instruction to the active function.
|
|
func (c *Context) VEXP2PS_BCST_Z(m, k, z operand.Op) {
|
|
c.addinstruction(x86.VEXP2PS_BCST_Z(m, k, z))
|
|
}
|
|
|
|
// VEXP2PS_BCST_Z: Approximation to the Exponential 2^x of Packed Single-Precision Floating-Point Values with Less Than 2^-23 Relative Error (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VEXP2PS.BCST.Z m32 k zmm
|
|
//
|
|
// Construct and append a VEXP2PS.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VEXP2PS_BCST_Z(m, k, z operand.Op) { ctx.VEXP2PS_BCST_Z(m, k, z) }
|
|
|
|
// VEXP2PS_SAE: Approximation to the Exponential 2^x of Packed Single-Precision Floating-Point Values with Less Than 2^-23 Relative Error (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VEXP2PS.SAE zmm k zmm
|
|
// VEXP2PS.SAE zmm zmm
|
|
//
|
|
// Construct and append a VEXP2PS.SAE instruction to the active function.
|
|
func (c *Context) VEXP2PS_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VEXP2PS_SAE(ops...))
|
|
}
|
|
|
|
// VEXP2PS_SAE: Approximation to the Exponential 2^x of Packed Single-Precision Floating-Point Values with Less Than 2^-23 Relative Error (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VEXP2PS.SAE zmm k zmm
|
|
// VEXP2PS.SAE zmm zmm
|
|
//
|
|
// Construct and append a VEXP2PS.SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VEXP2PS_SAE(ops ...operand.Op) { ctx.VEXP2PS_SAE(ops...) }
|
|
|
|
// VEXP2PS_SAE_Z: Approximation to the Exponential 2^x of Packed Single-Precision Floating-Point Values with Less Than 2^-23 Relative Error (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VEXP2PS.SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VEXP2PS.SAE.Z instruction to the active function.
|
|
func (c *Context) VEXP2PS_SAE_Z(z, k, z1 operand.Op) {
|
|
c.addinstruction(x86.VEXP2PS_SAE_Z(z, k, z1))
|
|
}
|
|
|
|
// VEXP2PS_SAE_Z: Approximation to the Exponential 2^x of Packed Single-Precision Floating-Point Values with Less Than 2^-23 Relative Error (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VEXP2PS.SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VEXP2PS.SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VEXP2PS_SAE_Z(z, k, z1 operand.Op) { ctx.VEXP2PS_SAE_Z(z, k, z1) }
|
|
|
|
// VEXP2PS_Z: Approximation to the Exponential 2^x of Packed Single-Precision Floating-Point Values with Less Than 2^-23 Relative Error (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VEXP2PS.Z m512 k zmm
|
|
// VEXP2PS.Z zmm k zmm
|
|
//
|
|
// Construct and append a VEXP2PS.Z instruction to the active function.
|
|
func (c *Context) VEXP2PS_Z(mz, k, z operand.Op) {
|
|
c.addinstruction(x86.VEXP2PS_Z(mz, k, z))
|
|
}
|
|
|
|
// VEXP2PS_Z: Approximation to the Exponential 2^x of Packed Single-Precision Floating-Point Values with Less Than 2^-23 Relative Error (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VEXP2PS.Z m512 k zmm
|
|
// VEXP2PS.Z zmm k zmm
|
|
//
|
|
// Construct and append a VEXP2PS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VEXP2PS_Z(mz, k, z operand.Op) { ctx.VEXP2PS_Z(mz, k, z) }
|
|
|
|
// VEXPANDPD: Load Sparse Packed Double-Precision Floating-Point Values from Dense Memory.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VEXPANDPD m256 k ymm
|
|
// VEXPANDPD m256 ymm
|
|
// VEXPANDPD ymm k ymm
|
|
// VEXPANDPD ymm ymm
|
|
// VEXPANDPD m512 k zmm
|
|
// VEXPANDPD m512 zmm
|
|
// VEXPANDPD zmm k zmm
|
|
// VEXPANDPD zmm zmm
|
|
// VEXPANDPD m128 k xmm
|
|
// VEXPANDPD m128 xmm
|
|
// VEXPANDPD xmm k xmm
|
|
// VEXPANDPD xmm xmm
|
|
//
|
|
// Construct and append a VEXPANDPD instruction to the active function.
|
|
func (c *Context) VEXPANDPD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VEXPANDPD(ops...))
|
|
}
|
|
|
|
// VEXPANDPD: Load Sparse Packed Double-Precision Floating-Point Values from Dense Memory.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VEXPANDPD m256 k ymm
|
|
// VEXPANDPD m256 ymm
|
|
// VEXPANDPD ymm k ymm
|
|
// VEXPANDPD ymm ymm
|
|
// VEXPANDPD m512 k zmm
|
|
// VEXPANDPD m512 zmm
|
|
// VEXPANDPD zmm k zmm
|
|
// VEXPANDPD zmm zmm
|
|
// VEXPANDPD m128 k xmm
|
|
// VEXPANDPD m128 xmm
|
|
// VEXPANDPD xmm k xmm
|
|
// VEXPANDPD xmm xmm
|
|
//
|
|
// Construct and append a VEXPANDPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VEXPANDPD(ops ...operand.Op) { ctx.VEXPANDPD(ops...) }
|
|
|
|
// VEXPANDPD_Z: Load Sparse Packed Double-Precision Floating-Point Values from Dense Memory (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VEXPANDPD.Z m256 k ymm
|
|
// VEXPANDPD.Z ymm k ymm
|
|
// VEXPANDPD.Z m512 k zmm
|
|
// VEXPANDPD.Z zmm k zmm
|
|
// VEXPANDPD.Z m128 k xmm
|
|
// VEXPANDPD.Z xmm k xmm
|
|
//
|
|
// Construct and append a VEXPANDPD.Z instruction to the active function.
|
|
func (c *Context) VEXPANDPD_Z(mxyz, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VEXPANDPD_Z(mxyz, k, xyz))
|
|
}
|
|
|
|
// VEXPANDPD_Z: Load Sparse Packed Double-Precision Floating-Point Values from Dense Memory (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VEXPANDPD.Z m256 k ymm
|
|
// VEXPANDPD.Z ymm k ymm
|
|
// VEXPANDPD.Z m512 k zmm
|
|
// VEXPANDPD.Z zmm k zmm
|
|
// VEXPANDPD.Z m128 k xmm
|
|
// VEXPANDPD.Z xmm k xmm
|
|
//
|
|
// Construct and append a VEXPANDPD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VEXPANDPD_Z(mxyz, k, xyz operand.Op) { ctx.VEXPANDPD_Z(mxyz, k, xyz) }
|
|
|
|
// VEXPANDPS: Load Sparse Packed Single-Precision Floating-Point Values from Dense Memory.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VEXPANDPS m128 k xmm
|
|
// VEXPANDPS m128 xmm
|
|
// VEXPANDPS m256 k ymm
|
|
// VEXPANDPS m256 ymm
|
|
// VEXPANDPS xmm k xmm
|
|
// VEXPANDPS xmm xmm
|
|
// VEXPANDPS ymm k ymm
|
|
// VEXPANDPS ymm ymm
|
|
// VEXPANDPS m512 k zmm
|
|
// VEXPANDPS m512 zmm
|
|
// VEXPANDPS zmm k zmm
|
|
// VEXPANDPS zmm zmm
|
|
//
|
|
// Construct and append a VEXPANDPS instruction to the active function.
|
|
func (c *Context) VEXPANDPS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VEXPANDPS(ops...))
|
|
}
|
|
|
|
// VEXPANDPS: Load Sparse Packed Single-Precision Floating-Point Values from Dense Memory.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VEXPANDPS m128 k xmm
|
|
// VEXPANDPS m128 xmm
|
|
// VEXPANDPS m256 k ymm
|
|
// VEXPANDPS m256 ymm
|
|
// VEXPANDPS xmm k xmm
|
|
// VEXPANDPS xmm xmm
|
|
// VEXPANDPS ymm k ymm
|
|
// VEXPANDPS ymm ymm
|
|
// VEXPANDPS m512 k zmm
|
|
// VEXPANDPS m512 zmm
|
|
// VEXPANDPS zmm k zmm
|
|
// VEXPANDPS zmm zmm
|
|
//
|
|
// Construct and append a VEXPANDPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VEXPANDPS(ops ...operand.Op) { ctx.VEXPANDPS(ops...) }
|
|
|
|
// VEXPANDPS_Z: Load Sparse Packed Single-Precision Floating-Point Values from Dense Memory (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VEXPANDPS.Z m128 k xmm
|
|
// VEXPANDPS.Z m256 k ymm
|
|
// VEXPANDPS.Z xmm k xmm
|
|
// VEXPANDPS.Z ymm k ymm
|
|
// VEXPANDPS.Z m512 k zmm
|
|
// VEXPANDPS.Z zmm k zmm
|
|
//
|
|
// Construct and append a VEXPANDPS.Z instruction to the active function.
|
|
func (c *Context) VEXPANDPS_Z(mxyz, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VEXPANDPS_Z(mxyz, k, xyz))
|
|
}
|
|
|
|
// VEXPANDPS_Z: Load Sparse Packed Single-Precision Floating-Point Values from Dense Memory (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VEXPANDPS.Z m128 k xmm
|
|
// VEXPANDPS.Z m256 k ymm
|
|
// VEXPANDPS.Z xmm k xmm
|
|
// VEXPANDPS.Z ymm k ymm
|
|
// VEXPANDPS.Z m512 k zmm
|
|
// VEXPANDPS.Z zmm k zmm
|
|
//
|
|
// Construct and append a VEXPANDPS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VEXPANDPS_Z(mxyz, k, xyz operand.Op) { ctx.VEXPANDPS_Z(mxyz, k, xyz) }
|
|
|
|
// VEXTRACTF128: Extract Packed Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VEXTRACTF128 imm8 ymm m128
|
|
// VEXTRACTF128 imm8 ymm xmm
|
|
//
|
|
// Construct and append a VEXTRACTF128 instruction to the active function.
|
|
func (c *Context) VEXTRACTF128(i, y, mx operand.Op) {
|
|
c.addinstruction(x86.VEXTRACTF128(i, y, mx))
|
|
}
|
|
|
|
// VEXTRACTF128: Extract Packed Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VEXTRACTF128 imm8 ymm m128
|
|
// VEXTRACTF128 imm8 ymm xmm
|
|
//
|
|
// Construct and append a VEXTRACTF128 instruction to the active function.
|
|
// Operates on the global context.
|
|
func VEXTRACTF128(i, y, mx operand.Op) { ctx.VEXTRACTF128(i, y, mx) }
|
|
|
|
// VEXTRACTF32X4: Extract 128 Bits of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VEXTRACTF32X4 imm8 ymm k m128
|
|
// VEXTRACTF32X4 imm8 ymm k xmm
|
|
// VEXTRACTF32X4 imm8 ymm m128
|
|
// VEXTRACTF32X4 imm8 ymm xmm
|
|
// VEXTRACTF32X4 imm8 zmm k m128
|
|
// VEXTRACTF32X4 imm8 zmm k xmm
|
|
// VEXTRACTF32X4 imm8 zmm m128
|
|
// VEXTRACTF32X4 imm8 zmm xmm
|
|
//
|
|
// Construct and append a VEXTRACTF32X4 instruction to the active function.
|
|
func (c *Context) VEXTRACTF32X4(ops ...operand.Op) {
|
|
c.addinstruction(x86.VEXTRACTF32X4(ops...))
|
|
}
|
|
|
|
// VEXTRACTF32X4: Extract 128 Bits of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VEXTRACTF32X4 imm8 ymm k m128
|
|
// VEXTRACTF32X4 imm8 ymm k xmm
|
|
// VEXTRACTF32X4 imm8 ymm m128
|
|
// VEXTRACTF32X4 imm8 ymm xmm
|
|
// VEXTRACTF32X4 imm8 zmm k m128
|
|
// VEXTRACTF32X4 imm8 zmm k xmm
|
|
// VEXTRACTF32X4 imm8 zmm m128
|
|
// VEXTRACTF32X4 imm8 zmm xmm
|
|
//
|
|
// Construct and append a VEXTRACTF32X4 instruction to the active function.
|
|
// Operates on the global context.
|
|
func VEXTRACTF32X4(ops ...operand.Op) { ctx.VEXTRACTF32X4(ops...) }
|
|
|
|
// VEXTRACTF32X4_Z: Extract 128 Bits of Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VEXTRACTF32X4.Z imm8 ymm k m128
|
|
// VEXTRACTF32X4.Z imm8 ymm k xmm
|
|
// VEXTRACTF32X4.Z imm8 zmm k m128
|
|
// VEXTRACTF32X4.Z imm8 zmm k xmm
|
|
//
|
|
// Construct and append a VEXTRACTF32X4.Z instruction to the active function.
|
|
func (c *Context) VEXTRACTF32X4_Z(i, yz, k, mx operand.Op) {
|
|
c.addinstruction(x86.VEXTRACTF32X4_Z(i, yz, k, mx))
|
|
}
|
|
|
|
// VEXTRACTF32X4_Z: Extract 128 Bits of Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VEXTRACTF32X4.Z imm8 ymm k m128
|
|
// VEXTRACTF32X4.Z imm8 ymm k xmm
|
|
// VEXTRACTF32X4.Z imm8 zmm k m128
|
|
// VEXTRACTF32X4.Z imm8 zmm k xmm
|
|
//
|
|
// Construct and append a VEXTRACTF32X4.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VEXTRACTF32X4_Z(i, yz, k, mx operand.Op) { ctx.VEXTRACTF32X4_Z(i, yz, k, mx) }
|
|
|
|
// VEXTRACTF32X8: Extract 256 Bits of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VEXTRACTF32X8 imm8 zmm k m256
|
|
// VEXTRACTF32X8 imm8 zmm k ymm
|
|
// VEXTRACTF32X8 imm8 zmm m256
|
|
// VEXTRACTF32X8 imm8 zmm ymm
|
|
//
|
|
// Construct and append a VEXTRACTF32X8 instruction to the active function.
|
|
func (c *Context) VEXTRACTF32X8(ops ...operand.Op) {
|
|
c.addinstruction(x86.VEXTRACTF32X8(ops...))
|
|
}
|
|
|
|
// VEXTRACTF32X8: Extract 256 Bits of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VEXTRACTF32X8 imm8 zmm k m256
|
|
// VEXTRACTF32X8 imm8 zmm k ymm
|
|
// VEXTRACTF32X8 imm8 zmm m256
|
|
// VEXTRACTF32X8 imm8 zmm ymm
|
|
//
|
|
// Construct and append a VEXTRACTF32X8 instruction to the active function.
|
|
// Operates on the global context.
|
|
func VEXTRACTF32X8(ops ...operand.Op) { ctx.VEXTRACTF32X8(ops...) }
|
|
|
|
// VEXTRACTF32X8_Z: Extract 256 Bits of Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VEXTRACTF32X8.Z imm8 zmm k m256
|
|
// VEXTRACTF32X8.Z imm8 zmm k ymm
|
|
//
|
|
// Construct and append a VEXTRACTF32X8.Z instruction to the active function.
|
|
func (c *Context) VEXTRACTF32X8_Z(i, z, k, my operand.Op) {
|
|
c.addinstruction(x86.VEXTRACTF32X8_Z(i, z, k, my))
|
|
}
|
|
|
|
// VEXTRACTF32X8_Z: Extract 256 Bits of Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VEXTRACTF32X8.Z imm8 zmm k m256
|
|
// VEXTRACTF32X8.Z imm8 zmm k ymm
|
|
//
|
|
// Construct and append a VEXTRACTF32X8.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VEXTRACTF32X8_Z(i, z, k, my operand.Op) { ctx.VEXTRACTF32X8_Z(i, z, k, my) }
|
|
|
|
// VEXTRACTF64X2: Extract 128 Bits of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VEXTRACTF64X2 imm8 ymm k m128
|
|
// VEXTRACTF64X2 imm8 ymm k xmm
|
|
// VEXTRACTF64X2 imm8 ymm m128
|
|
// VEXTRACTF64X2 imm8 ymm xmm
|
|
// VEXTRACTF64X2 imm8 zmm k m128
|
|
// VEXTRACTF64X2 imm8 zmm k xmm
|
|
// VEXTRACTF64X2 imm8 zmm m128
|
|
// VEXTRACTF64X2 imm8 zmm xmm
|
|
//
|
|
// Construct and append a VEXTRACTF64X2 instruction to the active function.
|
|
func (c *Context) VEXTRACTF64X2(ops ...operand.Op) {
|
|
c.addinstruction(x86.VEXTRACTF64X2(ops...))
|
|
}
|
|
|
|
// VEXTRACTF64X2: Extract 128 Bits of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VEXTRACTF64X2 imm8 ymm k m128
|
|
// VEXTRACTF64X2 imm8 ymm k xmm
|
|
// VEXTRACTF64X2 imm8 ymm m128
|
|
// VEXTRACTF64X2 imm8 ymm xmm
|
|
// VEXTRACTF64X2 imm8 zmm k m128
|
|
// VEXTRACTF64X2 imm8 zmm k xmm
|
|
// VEXTRACTF64X2 imm8 zmm m128
|
|
// VEXTRACTF64X2 imm8 zmm xmm
|
|
//
|
|
// Construct and append a VEXTRACTF64X2 instruction to the active function.
|
|
// Operates on the global context.
|
|
func VEXTRACTF64X2(ops ...operand.Op) { ctx.VEXTRACTF64X2(ops...) }
|
|
|
|
// VEXTRACTF64X2_Z: Extract 128 Bits of Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VEXTRACTF64X2.Z imm8 ymm k m128
|
|
// VEXTRACTF64X2.Z imm8 ymm k xmm
|
|
// VEXTRACTF64X2.Z imm8 zmm k m128
|
|
// VEXTRACTF64X2.Z imm8 zmm k xmm
|
|
//
|
|
// Construct and append a VEXTRACTF64X2.Z instruction to the active function.
|
|
func (c *Context) VEXTRACTF64X2_Z(i, yz, k, mx operand.Op) {
|
|
c.addinstruction(x86.VEXTRACTF64X2_Z(i, yz, k, mx))
|
|
}
|
|
|
|
// VEXTRACTF64X2_Z: Extract 128 Bits of Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VEXTRACTF64X2.Z imm8 ymm k m128
|
|
// VEXTRACTF64X2.Z imm8 ymm k xmm
|
|
// VEXTRACTF64X2.Z imm8 zmm k m128
|
|
// VEXTRACTF64X2.Z imm8 zmm k xmm
|
|
//
|
|
// Construct and append a VEXTRACTF64X2.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VEXTRACTF64X2_Z(i, yz, k, mx operand.Op) { ctx.VEXTRACTF64X2_Z(i, yz, k, mx) }
|
|
|
|
// VEXTRACTF64X4: Extract 256 Bits of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VEXTRACTF64X4 imm8 zmm k m256
|
|
// VEXTRACTF64X4 imm8 zmm k ymm
|
|
// VEXTRACTF64X4 imm8 zmm m256
|
|
// VEXTRACTF64X4 imm8 zmm ymm
|
|
//
|
|
// Construct and append a VEXTRACTF64X4 instruction to the active function.
|
|
func (c *Context) VEXTRACTF64X4(ops ...operand.Op) {
|
|
c.addinstruction(x86.VEXTRACTF64X4(ops...))
|
|
}
|
|
|
|
// VEXTRACTF64X4: Extract 256 Bits of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VEXTRACTF64X4 imm8 zmm k m256
|
|
// VEXTRACTF64X4 imm8 zmm k ymm
|
|
// VEXTRACTF64X4 imm8 zmm m256
|
|
// VEXTRACTF64X4 imm8 zmm ymm
|
|
//
|
|
// Construct and append a VEXTRACTF64X4 instruction to the active function.
|
|
// Operates on the global context.
|
|
func VEXTRACTF64X4(ops ...operand.Op) { ctx.VEXTRACTF64X4(ops...) }
|
|
|
|
// VEXTRACTF64X4_Z: Extract 256 Bits of Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VEXTRACTF64X4.Z imm8 zmm k m256
|
|
// VEXTRACTF64X4.Z imm8 zmm k ymm
|
|
//
|
|
// Construct and append a VEXTRACTF64X4.Z instruction to the active function.
|
|
func (c *Context) VEXTRACTF64X4_Z(i, z, k, my operand.Op) {
|
|
c.addinstruction(x86.VEXTRACTF64X4_Z(i, z, k, my))
|
|
}
|
|
|
|
// VEXTRACTF64X4_Z: Extract 256 Bits of Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VEXTRACTF64X4.Z imm8 zmm k m256
|
|
// VEXTRACTF64X4.Z imm8 zmm k ymm
|
|
//
|
|
// Construct and append a VEXTRACTF64X4.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VEXTRACTF64X4_Z(i, z, k, my operand.Op) { ctx.VEXTRACTF64X4_Z(i, z, k, my) }
|
|
|
|
// VEXTRACTI128: Extract Packed Integer Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VEXTRACTI128 imm8 ymm m128
|
|
// VEXTRACTI128 imm8 ymm xmm
|
|
//
|
|
// Construct and append a VEXTRACTI128 instruction to the active function.
|
|
func (c *Context) VEXTRACTI128(i, y, mx operand.Op) {
|
|
c.addinstruction(x86.VEXTRACTI128(i, y, mx))
|
|
}
|
|
|
|
// VEXTRACTI128: Extract Packed Integer Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VEXTRACTI128 imm8 ymm m128
|
|
// VEXTRACTI128 imm8 ymm xmm
|
|
//
|
|
// Construct and append a VEXTRACTI128 instruction to the active function.
|
|
// Operates on the global context.
|
|
func VEXTRACTI128(i, y, mx operand.Op) { ctx.VEXTRACTI128(i, y, mx) }
|
|
|
|
// VEXTRACTI32X4: Extract 128 Bits of Packed Doubleword Integer Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VEXTRACTI32X4 imm8 ymm k m128
|
|
// VEXTRACTI32X4 imm8 ymm k xmm
|
|
// VEXTRACTI32X4 imm8 ymm m128
|
|
// VEXTRACTI32X4 imm8 ymm xmm
|
|
// VEXTRACTI32X4 imm8 zmm k m128
|
|
// VEXTRACTI32X4 imm8 zmm k xmm
|
|
// VEXTRACTI32X4 imm8 zmm m128
|
|
// VEXTRACTI32X4 imm8 zmm xmm
|
|
//
|
|
// Construct and append a VEXTRACTI32X4 instruction to the active function.
|
|
func (c *Context) VEXTRACTI32X4(ops ...operand.Op) {
|
|
c.addinstruction(x86.VEXTRACTI32X4(ops...))
|
|
}
|
|
|
|
// VEXTRACTI32X4: Extract 128 Bits of Packed Doubleword Integer Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VEXTRACTI32X4 imm8 ymm k m128
|
|
// VEXTRACTI32X4 imm8 ymm k xmm
|
|
// VEXTRACTI32X4 imm8 ymm m128
|
|
// VEXTRACTI32X4 imm8 ymm xmm
|
|
// VEXTRACTI32X4 imm8 zmm k m128
|
|
// VEXTRACTI32X4 imm8 zmm k xmm
|
|
// VEXTRACTI32X4 imm8 zmm m128
|
|
// VEXTRACTI32X4 imm8 zmm xmm
|
|
//
|
|
// Construct and append a VEXTRACTI32X4 instruction to the active function.
|
|
// Operates on the global context.
|
|
func VEXTRACTI32X4(ops ...operand.Op) { ctx.VEXTRACTI32X4(ops...) }
|
|
|
|
// VEXTRACTI32X4_Z: Extract 128 Bits of Packed Doubleword Integer Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VEXTRACTI32X4.Z imm8 ymm k m128
|
|
// VEXTRACTI32X4.Z imm8 ymm k xmm
|
|
// VEXTRACTI32X4.Z imm8 zmm k m128
|
|
// VEXTRACTI32X4.Z imm8 zmm k xmm
|
|
//
|
|
// Construct and append a VEXTRACTI32X4.Z instruction to the active function.
|
|
func (c *Context) VEXTRACTI32X4_Z(i, yz, k, mx operand.Op) {
|
|
c.addinstruction(x86.VEXTRACTI32X4_Z(i, yz, k, mx))
|
|
}
|
|
|
|
// VEXTRACTI32X4_Z: Extract 128 Bits of Packed Doubleword Integer Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VEXTRACTI32X4.Z imm8 ymm k m128
|
|
// VEXTRACTI32X4.Z imm8 ymm k xmm
|
|
// VEXTRACTI32X4.Z imm8 zmm k m128
|
|
// VEXTRACTI32X4.Z imm8 zmm k xmm
|
|
//
|
|
// Construct and append a VEXTRACTI32X4.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VEXTRACTI32X4_Z(i, yz, k, mx operand.Op) { ctx.VEXTRACTI32X4_Z(i, yz, k, mx) }
|
|
|
|
// VEXTRACTI32X8: Extract 256 Bits of Packed Doubleword Integer Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VEXTRACTI32X8 imm8 zmm k m256
|
|
// VEXTRACTI32X8 imm8 zmm k ymm
|
|
// VEXTRACTI32X8 imm8 zmm m256
|
|
// VEXTRACTI32X8 imm8 zmm ymm
|
|
//
|
|
// Construct and append a VEXTRACTI32X8 instruction to the active function.
|
|
func (c *Context) VEXTRACTI32X8(ops ...operand.Op) {
|
|
c.addinstruction(x86.VEXTRACTI32X8(ops...))
|
|
}
|
|
|
|
// VEXTRACTI32X8: Extract 256 Bits of Packed Doubleword Integer Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VEXTRACTI32X8 imm8 zmm k m256
|
|
// VEXTRACTI32X8 imm8 zmm k ymm
|
|
// VEXTRACTI32X8 imm8 zmm m256
|
|
// VEXTRACTI32X8 imm8 zmm ymm
|
|
//
|
|
// Construct and append a VEXTRACTI32X8 instruction to the active function.
|
|
// Operates on the global context.
|
|
func VEXTRACTI32X8(ops ...operand.Op) { ctx.VEXTRACTI32X8(ops...) }
|
|
|
|
// VEXTRACTI32X8_Z: Extract 256 Bits of Packed Doubleword Integer Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VEXTRACTI32X8.Z imm8 zmm k m256
|
|
// VEXTRACTI32X8.Z imm8 zmm k ymm
|
|
//
|
|
// Construct and append a VEXTRACTI32X8.Z instruction to the active function.
|
|
func (c *Context) VEXTRACTI32X8_Z(i, z, k, my operand.Op) {
|
|
c.addinstruction(x86.VEXTRACTI32X8_Z(i, z, k, my))
|
|
}
|
|
|
|
// VEXTRACTI32X8_Z: Extract 256 Bits of Packed Doubleword Integer Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VEXTRACTI32X8.Z imm8 zmm k m256
|
|
// VEXTRACTI32X8.Z imm8 zmm k ymm
|
|
//
|
|
// Construct and append a VEXTRACTI32X8.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VEXTRACTI32X8_Z(i, z, k, my operand.Op) { ctx.VEXTRACTI32X8_Z(i, z, k, my) }
|
|
|
|
// VEXTRACTI64X2: Extract 128 Bits of Packed Quadword Integer Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VEXTRACTI64X2 imm8 ymm k m128
|
|
// VEXTRACTI64X2 imm8 ymm k xmm
|
|
// VEXTRACTI64X2 imm8 ymm m128
|
|
// VEXTRACTI64X2 imm8 ymm xmm
|
|
// VEXTRACTI64X2 imm8 zmm k m128
|
|
// VEXTRACTI64X2 imm8 zmm k xmm
|
|
// VEXTRACTI64X2 imm8 zmm m128
|
|
// VEXTRACTI64X2 imm8 zmm xmm
|
|
//
|
|
// Construct and append a VEXTRACTI64X2 instruction to the active function.
|
|
func (c *Context) VEXTRACTI64X2(ops ...operand.Op) {
|
|
c.addinstruction(x86.VEXTRACTI64X2(ops...))
|
|
}
|
|
|
|
// VEXTRACTI64X2: Extract 128 Bits of Packed Quadword Integer Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VEXTRACTI64X2 imm8 ymm k m128
|
|
// VEXTRACTI64X2 imm8 ymm k xmm
|
|
// VEXTRACTI64X2 imm8 ymm m128
|
|
// VEXTRACTI64X2 imm8 ymm xmm
|
|
// VEXTRACTI64X2 imm8 zmm k m128
|
|
// VEXTRACTI64X2 imm8 zmm k xmm
|
|
// VEXTRACTI64X2 imm8 zmm m128
|
|
// VEXTRACTI64X2 imm8 zmm xmm
|
|
//
|
|
// Construct and append a VEXTRACTI64X2 instruction to the active function.
|
|
// Operates on the global context.
|
|
func VEXTRACTI64X2(ops ...operand.Op) { ctx.VEXTRACTI64X2(ops...) }
|
|
|
|
// VEXTRACTI64X2_Z: Extract 128 Bits of Packed Quadword Integer Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VEXTRACTI64X2.Z imm8 ymm k m128
|
|
// VEXTRACTI64X2.Z imm8 ymm k xmm
|
|
// VEXTRACTI64X2.Z imm8 zmm k m128
|
|
// VEXTRACTI64X2.Z imm8 zmm k xmm
|
|
//
|
|
// Construct and append a VEXTRACTI64X2.Z instruction to the active function.
|
|
func (c *Context) VEXTRACTI64X2_Z(i, yz, k, mx operand.Op) {
|
|
c.addinstruction(x86.VEXTRACTI64X2_Z(i, yz, k, mx))
|
|
}
|
|
|
|
// VEXTRACTI64X2_Z: Extract 128 Bits of Packed Quadword Integer Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VEXTRACTI64X2.Z imm8 ymm k m128
|
|
// VEXTRACTI64X2.Z imm8 ymm k xmm
|
|
// VEXTRACTI64X2.Z imm8 zmm k m128
|
|
// VEXTRACTI64X2.Z imm8 zmm k xmm
|
|
//
|
|
// Construct and append a VEXTRACTI64X2.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VEXTRACTI64X2_Z(i, yz, k, mx operand.Op) { ctx.VEXTRACTI64X2_Z(i, yz, k, mx) }
|
|
|
|
// VEXTRACTI64X4: Extract 256 Bits of Packed Quadword Integer Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VEXTRACTI64X4 imm8 zmm k m256
|
|
// VEXTRACTI64X4 imm8 zmm k ymm
|
|
// VEXTRACTI64X4 imm8 zmm m256
|
|
// VEXTRACTI64X4 imm8 zmm ymm
|
|
//
|
|
// Construct and append a VEXTRACTI64X4 instruction to the active function.
|
|
func (c *Context) VEXTRACTI64X4(ops ...operand.Op) {
|
|
c.addinstruction(x86.VEXTRACTI64X4(ops...))
|
|
}
|
|
|
|
// VEXTRACTI64X4: Extract 256 Bits of Packed Quadword Integer Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VEXTRACTI64X4 imm8 zmm k m256
|
|
// VEXTRACTI64X4 imm8 zmm k ymm
|
|
// VEXTRACTI64X4 imm8 zmm m256
|
|
// VEXTRACTI64X4 imm8 zmm ymm
|
|
//
|
|
// Construct and append a VEXTRACTI64X4 instruction to the active function.
|
|
// Operates on the global context.
|
|
func VEXTRACTI64X4(ops ...operand.Op) { ctx.VEXTRACTI64X4(ops...) }
|
|
|
|
// VEXTRACTI64X4_Z: Extract 256 Bits of Packed Quadword Integer Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VEXTRACTI64X4.Z imm8 zmm k m256
|
|
// VEXTRACTI64X4.Z imm8 zmm k ymm
|
|
//
|
|
// Construct and append a VEXTRACTI64X4.Z instruction to the active function.
|
|
func (c *Context) VEXTRACTI64X4_Z(i, z, k, my operand.Op) {
|
|
c.addinstruction(x86.VEXTRACTI64X4_Z(i, z, k, my))
|
|
}
|
|
|
|
// VEXTRACTI64X4_Z: Extract 256 Bits of Packed Quadword Integer Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VEXTRACTI64X4.Z imm8 zmm k m256
|
|
// VEXTRACTI64X4.Z imm8 zmm k ymm
|
|
//
|
|
// Construct and append a VEXTRACTI64X4.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VEXTRACTI64X4_Z(i, z, k, my operand.Op) { ctx.VEXTRACTI64X4_Z(i, z, k, my) }
|
|
|
|
// VEXTRACTPS: Extract Packed Single Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VEXTRACTPS imm8 xmm m32
|
|
// VEXTRACTPS imm8 xmm r32
|
|
//
|
|
// Construct and append a VEXTRACTPS instruction to the active function.
|
|
func (c *Context) VEXTRACTPS(i, x, mr operand.Op) {
|
|
c.addinstruction(x86.VEXTRACTPS(i, x, mr))
|
|
}
|
|
|
|
// VEXTRACTPS: Extract Packed Single Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VEXTRACTPS imm8 xmm m32
|
|
// VEXTRACTPS imm8 xmm r32
|
|
//
|
|
// Construct and append a VEXTRACTPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VEXTRACTPS(i, x, mr operand.Op) { ctx.VEXTRACTPS(i, x, mr) }
|
|
|
|
// VFIXUPIMMPD: Fix Up Special Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFIXUPIMMPD imm8 m128 xmm k xmm
|
|
// VFIXUPIMMPD imm8 m128 xmm xmm
|
|
// VFIXUPIMMPD imm8 m256 ymm k ymm
|
|
// VFIXUPIMMPD imm8 m256 ymm ymm
|
|
// VFIXUPIMMPD imm8 xmm xmm k xmm
|
|
// VFIXUPIMMPD imm8 xmm xmm xmm
|
|
// VFIXUPIMMPD imm8 ymm ymm k ymm
|
|
// VFIXUPIMMPD imm8 ymm ymm ymm
|
|
// VFIXUPIMMPD imm8 m512 zmm k zmm
|
|
// VFIXUPIMMPD imm8 m512 zmm zmm
|
|
// VFIXUPIMMPD imm8 zmm zmm k zmm
|
|
// VFIXUPIMMPD imm8 zmm zmm zmm
|
|
//
|
|
// Construct and append a VFIXUPIMMPD instruction to the active function.
|
|
func (c *Context) VFIXUPIMMPD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFIXUPIMMPD(ops...))
|
|
}
|
|
|
|
// VFIXUPIMMPD: Fix Up Special Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFIXUPIMMPD imm8 m128 xmm k xmm
|
|
// VFIXUPIMMPD imm8 m128 xmm xmm
|
|
// VFIXUPIMMPD imm8 m256 ymm k ymm
|
|
// VFIXUPIMMPD imm8 m256 ymm ymm
|
|
// VFIXUPIMMPD imm8 xmm xmm k xmm
|
|
// VFIXUPIMMPD imm8 xmm xmm xmm
|
|
// VFIXUPIMMPD imm8 ymm ymm k ymm
|
|
// VFIXUPIMMPD imm8 ymm ymm ymm
|
|
// VFIXUPIMMPD imm8 m512 zmm k zmm
|
|
// VFIXUPIMMPD imm8 m512 zmm zmm
|
|
// VFIXUPIMMPD imm8 zmm zmm k zmm
|
|
// VFIXUPIMMPD imm8 zmm zmm zmm
|
|
//
|
|
// Construct and append a VFIXUPIMMPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFIXUPIMMPD(ops ...operand.Op) { ctx.VFIXUPIMMPD(ops...) }
|
|
|
|
// VFIXUPIMMPD_BCST: Fix Up Special Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFIXUPIMMPD.BCST imm8 m64 xmm k xmm
|
|
// VFIXUPIMMPD.BCST imm8 m64 xmm xmm
|
|
// VFIXUPIMMPD.BCST imm8 m64 ymm k ymm
|
|
// VFIXUPIMMPD.BCST imm8 m64 ymm ymm
|
|
// VFIXUPIMMPD.BCST imm8 m64 zmm k zmm
|
|
// VFIXUPIMMPD.BCST imm8 m64 zmm zmm
|
|
//
|
|
// Construct and append a VFIXUPIMMPD.BCST instruction to the active function.
|
|
func (c *Context) VFIXUPIMMPD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFIXUPIMMPD_BCST(ops...))
|
|
}
|
|
|
|
// VFIXUPIMMPD_BCST: Fix Up Special Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFIXUPIMMPD.BCST imm8 m64 xmm k xmm
|
|
// VFIXUPIMMPD.BCST imm8 m64 xmm xmm
|
|
// VFIXUPIMMPD.BCST imm8 m64 ymm k ymm
|
|
// VFIXUPIMMPD.BCST imm8 m64 ymm ymm
|
|
// VFIXUPIMMPD.BCST imm8 m64 zmm k zmm
|
|
// VFIXUPIMMPD.BCST imm8 m64 zmm zmm
|
|
//
|
|
// Construct and append a VFIXUPIMMPD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFIXUPIMMPD_BCST(ops ...operand.Op) { ctx.VFIXUPIMMPD_BCST(ops...) }
|
|
|
|
// VFIXUPIMMPD_BCST_Z: Fix Up Special Packed Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFIXUPIMMPD.BCST.Z imm8 m64 xmm k xmm
|
|
// VFIXUPIMMPD.BCST.Z imm8 m64 ymm k ymm
|
|
// VFIXUPIMMPD.BCST.Z imm8 m64 zmm k zmm
|
|
//
|
|
// Construct and append a VFIXUPIMMPD.BCST.Z instruction to the active function.
|
|
func (c *Context) VFIXUPIMMPD_BCST_Z(i, m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VFIXUPIMMPD_BCST_Z(i, m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VFIXUPIMMPD_BCST_Z: Fix Up Special Packed Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFIXUPIMMPD.BCST.Z imm8 m64 xmm k xmm
|
|
// VFIXUPIMMPD.BCST.Z imm8 m64 ymm k ymm
|
|
// VFIXUPIMMPD.BCST.Z imm8 m64 zmm k zmm
|
|
//
|
|
// Construct and append a VFIXUPIMMPD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFIXUPIMMPD_BCST_Z(i, m, xyz, k, xyz1 operand.Op) { ctx.VFIXUPIMMPD_BCST_Z(i, m, xyz, k, xyz1) }
|
|
|
|
// VFIXUPIMMPD_SAE: Fix Up Special Packed Double-Precision Floating-Point Values (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFIXUPIMMPD.SAE imm8 zmm zmm k zmm
|
|
// VFIXUPIMMPD.SAE imm8 zmm zmm zmm
|
|
//
|
|
// Construct and append a VFIXUPIMMPD.SAE instruction to the active function.
|
|
func (c *Context) VFIXUPIMMPD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFIXUPIMMPD_SAE(ops...))
|
|
}
|
|
|
|
// VFIXUPIMMPD_SAE: Fix Up Special Packed Double-Precision Floating-Point Values (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFIXUPIMMPD.SAE imm8 zmm zmm k zmm
|
|
// VFIXUPIMMPD.SAE imm8 zmm zmm zmm
|
|
//
|
|
// Construct and append a VFIXUPIMMPD.SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFIXUPIMMPD_SAE(ops ...operand.Op) { ctx.VFIXUPIMMPD_SAE(ops...) }
|
|
|
|
// VFIXUPIMMPD_SAE_Z: Fix Up Special Packed Double-Precision Floating-Point Values (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFIXUPIMMPD.SAE.Z imm8 zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFIXUPIMMPD.SAE.Z instruction to the active function.
|
|
func (c *Context) VFIXUPIMMPD_SAE_Z(i, z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFIXUPIMMPD_SAE_Z(i, z, z1, k, z2))
|
|
}
|
|
|
|
// VFIXUPIMMPD_SAE_Z: Fix Up Special Packed Double-Precision Floating-Point Values (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFIXUPIMMPD.SAE.Z imm8 zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFIXUPIMMPD.SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFIXUPIMMPD_SAE_Z(i, z, z1, k, z2 operand.Op) { ctx.VFIXUPIMMPD_SAE_Z(i, z, z1, k, z2) }
|
|
|
|
// VFIXUPIMMPD_Z: Fix Up Special Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFIXUPIMMPD.Z imm8 m128 xmm k xmm
|
|
// VFIXUPIMMPD.Z imm8 m256 ymm k ymm
|
|
// VFIXUPIMMPD.Z imm8 xmm xmm k xmm
|
|
// VFIXUPIMMPD.Z imm8 ymm ymm k ymm
|
|
// VFIXUPIMMPD.Z imm8 m512 zmm k zmm
|
|
// VFIXUPIMMPD.Z imm8 zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFIXUPIMMPD.Z instruction to the active function.
|
|
func (c *Context) VFIXUPIMMPD_Z(i, mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VFIXUPIMMPD_Z(i, mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VFIXUPIMMPD_Z: Fix Up Special Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFIXUPIMMPD.Z imm8 m128 xmm k xmm
|
|
// VFIXUPIMMPD.Z imm8 m256 ymm k ymm
|
|
// VFIXUPIMMPD.Z imm8 xmm xmm k xmm
|
|
// VFIXUPIMMPD.Z imm8 ymm ymm k ymm
|
|
// VFIXUPIMMPD.Z imm8 m512 zmm k zmm
|
|
// VFIXUPIMMPD.Z imm8 zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFIXUPIMMPD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFIXUPIMMPD_Z(i, mxyz, xyz, k, xyz1 operand.Op) { ctx.VFIXUPIMMPD_Z(i, mxyz, xyz, k, xyz1) }
|
|
|
|
// VFIXUPIMMPS: Fix Up Special Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFIXUPIMMPS imm8 m256 ymm k ymm
|
|
// VFIXUPIMMPS imm8 m256 ymm ymm
|
|
// VFIXUPIMMPS imm8 ymm ymm k ymm
|
|
// VFIXUPIMMPS imm8 ymm ymm ymm
|
|
// VFIXUPIMMPS imm8 m512 zmm k zmm
|
|
// VFIXUPIMMPS imm8 m512 zmm zmm
|
|
// VFIXUPIMMPS imm8 zmm zmm k zmm
|
|
// VFIXUPIMMPS imm8 zmm zmm zmm
|
|
// VFIXUPIMMPS imm8 m128 xmm k xmm
|
|
// VFIXUPIMMPS imm8 m128 xmm xmm
|
|
// VFIXUPIMMPS imm8 xmm xmm k xmm
|
|
// VFIXUPIMMPS imm8 xmm xmm xmm
|
|
//
|
|
// Construct and append a VFIXUPIMMPS instruction to the active function.
|
|
func (c *Context) VFIXUPIMMPS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFIXUPIMMPS(ops...))
|
|
}
|
|
|
|
// VFIXUPIMMPS: Fix Up Special Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFIXUPIMMPS imm8 m256 ymm k ymm
|
|
// VFIXUPIMMPS imm8 m256 ymm ymm
|
|
// VFIXUPIMMPS imm8 ymm ymm k ymm
|
|
// VFIXUPIMMPS imm8 ymm ymm ymm
|
|
// VFIXUPIMMPS imm8 m512 zmm k zmm
|
|
// VFIXUPIMMPS imm8 m512 zmm zmm
|
|
// VFIXUPIMMPS imm8 zmm zmm k zmm
|
|
// VFIXUPIMMPS imm8 zmm zmm zmm
|
|
// VFIXUPIMMPS imm8 m128 xmm k xmm
|
|
// VFIXUPIMMPS imm8 m128 xmm xmm
|
|
// VFIXUPIMMPS imm8 xmm xmm k xmm
|
|
// VFIXUPIMMPS imm8 xmm xmm xmm
|
|
//
|
|
// Construct and append a VFIXUPIMMPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFIXUPIMMPS(ops ...operand.Op) { ctx.VFIXUPIMMPS(ops...) }
|
|
|
|
// VFIXUPIMMPS_BCST: Fix Up Special Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFIXUPIMMPS.BCST imm8 m32 ymm k ymm
|
|
// VFIXUPIMMPS.BCST imm8 m32 ymm ymm
|
|
// VFIXUPIMMPS.BCST imm8 m32 zmm k zmm
|
|
// VFIXUPIMMPS.BCST imm8 m32 zmm zmm
|
|
// VFIXUPIMMPS.BCST imm8 m32 xmm k xmm
|
|
// VFIXUPIMMPS.BCST imm8 m32 xmm xmm
|
|
//
|
|
// Construct and append a VFIXUPIMMPS.BCST instruction to the active function.
|
|
func (c *Context) VFIXUPIMMPS_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFIXUPIMMPS_BCST(ops...))
|
|
}
|
|
|
|
// VFIXUPIMMPS_BCST: Fix Up Special Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFIXUPIMMPS.BCST imm8 m32 ymm k ymm
|
|
// VFIXUPIMMPS.BCST imm8 m32 ymm ymm
|
|
// VFIXUPIMMPS.BCST imm8 m32 zmm k zmm
|
|
// VFIXUPIMMPS.BCST imm8 m32 zmm zmm
|
|
// VFIXUPIMMPS.BCST imm8 m32 xmm k xmm
|
|
// VFIXUPIMMPS.BCST imm8 m32 xmm xmm
|
|
//
|
|
// Construct and append a VFIXUPIMMPS.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFIXUPIMMPS_BCST(ops ...operand.Op) { ctx.VFIXUPIMMPS_BCST(ops...) }
|
|
|
|
// VFIXUPIMMPS_BCST_Z: Fix Up Special Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFIXUPIMMPS.BCST.Z imm8 m32 ymm k ymm
|
|
// VFIXUPIMMPS.BCST.Z imm8 m32 zmm k zmm
|
|
// VFIXUPIMMPS.BCST.Z imm8 m32 xmm k xmm
|
|
//
|
|
// Construct and append a VFIXUPIMMPS.BCST.Z instruction to the active function.
|
|
func (c *Context) VFIXUPIMMPS_BCST_Z(i, m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VFIXUPIMMPS_BCST_Z(i, m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VFIXUPIMMPS_BCST_Z: Fix Up Special Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFIXUPIMMPS.BCST.Z imm8 m32 ymm k ymm
|
|
// VFIXUPIMMPS.BCST.Z imm8 m32 zmm k zmm
|
|
// VFIXUPIMMPS.BCST.Z imm8 m32 xmm k xmm
|
|
//
|
|
// Construct and append a VFIXUPIMMPS.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFIXUPIMMPS_BCST_Z(i, m, xyz, k, xyz1 operand.Op) { ctx.VFIXUPIMMPS_BCST_Z(i, m, xyz, k, xyz1) }
|
|
|
|
// VFIXUPIMMPS_SAE: Fix Up Special Packed Single-Precision Floating-Point Values (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFIXUPIMMPS.SAE imm8 zmm zmm k zmm
|
|
// VFIXUPIMMPS.SAE imm8 zmm zmm zmm
|
|
//
|
|
// Construct and append a VFIXUPIMMPS.SAE instruction to the active function.
|
|
func (c *Context) VFIXUPIMMPS_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFIXUPIMMPS_SAE(ops...))
|
|
}
|
|
|
|
// VFIXUPIMMPS_SAE: Fix Up Special Packed Single-Precision Floating-Point Values (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFIXUPIMMPS.SAE imm8 zmm zmm k zmm
|
|
// VFIXUPIMMPS.SAE imm8 zmm zmm zmm
|
|
//
|
|
// Construct and append a VFIXUPIMMPS.SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFIXUPIMMPS_SAE(ops ...operand.Op) { ctx.VFIXUPIMMPS_SAE(ops...) }
|
|
|
|
// VFIXUPIMMPS_SAE_Z: Fix Up Special Packed Single-Precision Floating-Point Values (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFIXUPIMMPS.SAE.Z imm8 zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFIXUPIMMPS.SAE.Z instruction to the active function.
|
|
func (c *Context) VFIXUPIMMPS_SAE_Z(i, z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFIXUPIMMPS_SAE_Z(i, z, z1, k, z2))
|
|
}
|
|
|
|
// VFIXUPIMMPS_SAE_Z: Fix Up Special Packed Single-Precision Floating-Point Values (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFIXUPIMMPS.SAE.Z imm8 zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFIXUPIMMPS.SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFIXUPIMMPS_SAE_Z(i, z, z1, k, z2 operand.Op) { ctx.VFIXUPIMMPS_SAE_Z(i, z, z1, k, z2) }
|
|
|
|
// VFIXUPIMMPS_Z: Fix Up Special Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFIXUPIMMPS.Z imm8 m256 ymm k ymm
|
|
// VFIXUPIMMPS.Z imm8 ymm ymm k ymm
|
|
// VFIXUPIMMPS.Z imm8 m512 zmm k zmm
|
|
// VFIXUPIMMPS.Z imm8 zmm zmm k zmm
|
|
// VFIXUPIMMPS.Z imm8 m128 xmm k xmm
|
|
// VFIXUPIMMPS.Z imm8 xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFIXUPIMMPS.Z instruction to the active function.
|
|
func (c *Context) VFIXUPIMMPS_Z(i, mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VFIXUPIMMPS_Z(i, mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VFIXUPIMMPS_Z: Fix Up Special Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFIXUPIMMPS.Z imm8 m256 ymm k ymm
|
|
// VFIXUPIMMPS.Z imm8 ymm ymm k ymm
|
|
// VFIXUPIMMPS.Z imm8 m512 zmm k zmm
|
|
// VFIXUPIMMPS.Z imm8 zmm zmm k zmm
|
|
// VFIXUPIMMPS.Z imm8 m128 xmm k xmm
|
|
// VFIXUPIMMPS.Z imm8 xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFIXUPIMMPS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFIXUPIMMPS_Z(i, mxyz, xyz, k, xyz1 operand.Op) { ctx.VFIXUPIMMPS_Z(i, mxyz, xyz, k, xyz1) }
|
|
|
|
// VFIXUPIMMSD: Fix Up Special Scalar Double-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFIXUPIMMSD imm8 m64 xmm k xmm
|
|
// VFIXUPIMMSD imm8 m64 xmm xmm
|
|
// VFIXUPIMMSD imm8 xmm xmm k xmm
|
|
// VFIXUPIMMSD imm8 xmm xmm xmm
|
|
//
|
|
// Construct and append a VFIXUPIMMSD instruction to the active function.
|
|
func (c *Context) VFIXUPIMMSD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFIXUPIMMSD(ops...))
|
|
}
|
|
|
|
// VFIXUPIMMSD: Fix Up Special Scalar Double-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFIXUPIMMSD imm8 m64 xmm k xmm
|
|
// VFIXUPIMMSD imm8 m64 xmm xmm
|
|
// VFIXUPIMMSD imm8 xmm xmm k xmm
|
|
// VFIXUPIMMSD imm8 xmm xmm xmm
|
|
//
|
|
// Construct and append a VFIXUPIMMSD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFIXUPIMMSD(ops ...operand.Op) { ctx.VFIXUPIMMSD(ops...) }
|
|
|
|
// VFIXUPIMMSD_SAE: Fix Up Special Scalar Double-Precision Floating-Point Value (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFIXUPIMMSD.SAE imm8 xmm xmm k xmm
|
|
// VFIXUPIMMSD.SAE imm8 xmm xmm xmm
|
|
//
|
|
// Construct and append a VFIXUPIMMSD.SAE instruction to the active function.
|
|
func (c *Context) VFIXUPIMMSD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFIXUPIMMSD_SAE(ops...))
|
|
}
|
|
|
|
// VFIXUPIMMSD_SAE: Fix Up Special Scalar Double-Precision Floating-Point Value (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFIXUPIMMSD.SAE imm8 xmm xmm k xmm
|
|
// VFIXUPIMMSD.SAE imm8 xmm xmm xmm
|
|
//
|
|
// Construct and append a VFIXUPIMMSD.SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFIXUPIMMSD_SAE(ops ...operand.Op) { ctx.VFIXUPIMMSD_SAE(ops...) }
|
|
|
|
// VFIXUPIMMSD_SAE_Z: Fix Up Special Scalar Double-Precision Floating-Point Value (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFIXUPIMMSD.SAE.Z imm8 xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFIXUPIMMSD.SAE.Z instruction to the active function.
|
|
func (c *Context) VFIXUPIMMSD_SAE_Z(i, x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFIXUPIMMSD_SAE_Z(i, x, x1, k, x2))
|
|
}
|
|
|
|
// VFIXUPIMMSD_SAE_Z: Fix Up Special Scalar Double-Precision Floating-Point Value (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFIXUPIMMSD.SAE.Z imm8 xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFIXUPIMMSD.SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFIXUPIMMSD_SAE_Z(i, x, x1, k, x2 operand.Op) { ctx.VFIXUPIMMSD_SAE_Z(i, x, x1, k, x2) }
|
|
|
|
// VFIXUPIMMSD_Z: Fix Up Special Scalar Double-Precision Floating-Point Value (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFIXUPIMMSD.Z imm8 m64 xmm k xmm
|
|
// VFIXUPIMMSD.Z imm8 xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFIXUPIMMSD.Z instruction to the active function.
|
|
func (c *Context) VFIXUPIMMSD_Z(i, mx, x, k, x1 operand.Op) {
|
|
c.addinstruction(x86.VFIXUPIMMSD_Z(i, mx, x, k, x1))
|
|
}
|
|
|
|
// VFIXUPIMMSD_Z: Fix Up Special Scalar Double-Precision Floating-Point Value (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFIXUPIMMSD.Z imm8 m64 xmm k xmm
|
|
// VFIXUPIMMSD.Z imm8 xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFIXUPIMMSD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFIXUPIMMSD_Z(i, mx, x, k, x1 operand.Op) { ctx.VFIXUPIMMSD_Z(i, mx, x, k, x1) }
|
|
|
|
// VFIXUPIMMSS: Fix Up Special Scalar Single-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFIXUPIMMSS imm8 m32 xmm k xmm
|
|
// VFIXUPIMMSS imm8 m32 xmm xmm
|
|
// VFIXUPIMMSS imm8 xmm xmm k xmm
|
|
// VFIXUPIMMSS imm8 xmm xmm xmm
|
|
//
|
|
// Construct and append a VFIXUPIMMSS instruction to the active function.
|
|
func (c *Context) VFIXUPIMMSS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFIXUPIMMSS(ops...))
|
|
}
|
|
|
|
// VFIXUPIMMSS: Fix Up Special Scalar Single-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFIXUPIMMSS imm8 m32 xmm k xmm
|
|
// VFIXUPIMMSS imm8 m32 xmm xmm
|
|
// VFIXUPIMMSS imm8 xmm xmm k xmm
|
|
// VFIXUPIMMSS imm8 xmm xmm xmm
|
|
//
|
|
// Construct and append a VFIXUPIMMSS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFIXUPIMMSS(ops ...operand.Op) { ctx.VFIXUPIMMSS(ops...) }
|
|
|
|
// VFIXUPIMMSS_SAE: Fix Up Special Scalar Single-Precision Floating-Point Value (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFIXUPIMMSS.SAE imm8 xmm xmm k xmm
|
|
// VFIXUPIMMSS.SAE imm8 xmm xmm xmm
|
|
//
|
|
// Construct and append a VFIXUPIMMSS.SAE instruction to the active function.
|
|
func (c *Context) VFIXUPIMMSS_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFIXUPIMMSS_SAE(ops...))
|
|
}
|
|
|
|
// VFIXUPIMMSS_SAE: Fix Up Special Scalar Single-Precision Floating-Point Value (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFIXUPIMMSS.SAE imm8 xmm xmm k xmm
|
|
// VFIXUPIMMSS.SAE imm8 xmm xmm xmm
|
|
//
|
|
// Construct and append a VFIXUPIMMSS.SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFIXUPIMMSS_SAE(ops ...operand.Op) { ctx.VFIXUPIMMSS_SAE(ops...) }
|
|
|
|
// VFIXUPIMMSS_SAE_Z: Fix Up Special Scalar Single-Precision Floating-Point Value (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFIXUPIMMSS.SAE.Z imm8 xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFIXUPIMMSS.SAE.Z instruction to the active function.
|
|
func (c *Context) VFIXUPIMMSS_SAE_Z(i, x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFIXUPIMMSS_SAE_Z(i, x, x1, k, x2))
|
|
}
|
|
|
|
// VFIXUPIMMSS_SAE_Z: Fix Up Special Scalar Single-Precision Floating-Point Value (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFIXUPIMMSS.SAE.Z imm8 xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFIXUPIMMSS.SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFIXUPIMMSS_SAE_Z(i, x, x1, k, x2 operand.Op) { ctx.VFIXUPIMMSS_SAE_Z(i, x, x1, k, x2) }
|
|
|
|
// VFIXUPIMMSS_Z: Fix Up Special Scalar Single-Precision Floating-Point Value (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFIXUPIMMSS.Z imm8 m32 xmm k xmm
|
|
// VFIXUPIMMSS.Z imm8 xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFIXUPIMMSS.Z instruction to the active function.
|
|
func (c *Context) VFIXUPIMMSS_Z(i, mx, x, k, x1 operand.Op) {
|
|
c.addinstruction(x86.VFIXUPIMMSS_Z(i, mx, x, k, x1))
|
|
}
|
|
|
|
// VFIXUPIMMSS_Z: Fix Up Special Scalar Single-Precision Floating-Point Value (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFIXUPIMMSS.Z imm8 m32 xmm k xmm
|
|
// VFIXUPIMMSS.Z imm8 xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFIXUPIMMSS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFIXUPIMMSS_Z(i, mx, x, k, x1 operand.Op) { ctx.VFIXUPIMMSS_Z(i, mx, x, k, x1) }
|
|
|
|
// VFMADD132PD: Fused Multiply-Add of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132PD m128 xmm xmm
|
|
// VFMADD132PD m256 ymm ymm
|
|
// VFMADD132PD xmm xmm xmm
|
|
// VFMADD132PD ymm ymm ymm
|
|
// VFMADD132PD m128 xmm k xmm
|
|
// VFMADD132PD m256 ymm k ymm
|
|
// VFMADD132PD xmm xmm k xmm
|
|
// VFMADD132PD ymm ymm k ymm
|
|
// VFMADD132PD m512 zmm k zmm
|
|
// VFMADD132PD m512 zmm zmm
|
|
// VFMADD132PD zmm zmm k zmm
|
|
// VFMADD132PD zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADD132PD instruction to the active function.
|
|
func (c *Context) VFMADD132PD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADD132PD(ops...))
|
|
}
|
|
|
|
// VFMADD132PD: Fused Multiply-Add of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132PD m128 xmm xmm
|
|
// VFMADD132PD m256 ymm ymm
|
|
// VFMADD132PD xmm xmm xmm
|
|
// VFMADD132PD ymm ymm ymm
|
|
// VFMADD132PD m128 xmm k xmm
|
|
// VFMADD132PD m256 ymm k ymm
|
|
// VFMADD132PD xmm xmm k xmm
|
|
// VFMADD132PD ymm ymm k ymm
|
|
// VFMADD132PD m512 zmm k zmm
|
|
// VFMADD132PD m512 zmm zmm
|
|
// VFMADD132PD zmm zmm k zmm
|
|
// VFMADD132PD zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADD132PD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD132PD(ops ...operand.Op) { ctx.VFMADD132PD(ops...) }
|
|
|
|
// VFMADD132PD_BCST: Fused Multiply-Add of Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132PD.BCST m64 xmm k xmm
|
|
// VFMADD132PD.BCST m64 xmm xmm
|
|
// VFMADD132PD.BCST m64 ymm k ymm
|
|
// VFMADD132PD.BCST m64 ymm ymm
|
|
// VFMADD132PD.BCST m64 zmm k zmm
|
|
// VFMADD132PD.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VFMADD132PD.BCST instruction to the active function.
|
|
func (c *Context) VFMADD132PD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADD132PD_BCST(ops...))
|
|
}
|
|
|
|
// VFMADD132PD_BCST: Fused Multiply-Add of Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132PD.BCST m64 xmm k xmm
|
|
// VFMADD132PD.BCST m64 xmm xmm
|
|
// VFMADD132PD.BCST m64 ymm k ymm
|
|
// VFMADD132PD.BCST m64 ymm ymm
|
|
// VFMADD132PD.BCST m64 zmm k zmm
|
|
// VFMADD132PD.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VFMADD132PD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD132PD_BCST(ops ...operand.Op) { ctx.VFMADD132PD_BCST(ops...) }
|
|
|
|
// VFMADD132PD_BCST_Z: Fused Multiply-Add of Packed Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132PD.BCST.Z m64 xmm k xmm
|
|
// VFMADD132PD.BCST.Z m64 ymm k ymm
|
|
// VFMADD132PD.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VFMADD132PD.BCST.Z instruction to the active function.
|
|
func (c *Context) VFMADD132PD_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VFMADD132PD_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VFMADD132PD_BCST_Z: Fused Multiply-Add of Packed Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132PD.BCST.Z m64 xmm k xmm
|
|
// VFMADD132PD.BCST.Z m64 ymm k ymm
|
|
// VFMADD132PD.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VFMADD132PD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD132PD_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VFMADD132PD_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VFMADD132PD_RD_SAE: Fused Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132PD.RD_SAE zmm zmm k zmm
|
|
// VFMADD132PD.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADD132PD.RD_SAE instruction to the active function.
|
|
func (c *Context) VFMADD132PD_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADD132PD_RD_SAE(ops...))
|
|
}
|
|
|
|
// VFMADD132PD_RD_SAE: Fused Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132PD.RD_SAE zmm zmm k zmm
|
|
// VFMADD132PD.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADD132PD.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD132PD_RD_SAE(ops ...operand.Op) { ctx.VFMADD132PD_RD_SAE(ops...) }
|
|
|
|
// VFMADD132PD_RD_SAE_Z: Fused Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132PD.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADD132PD.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMADD132PD_RD_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMADD132PD_RD_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMADD132PD_RD_SAE_Z: Fused Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132PD.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADD132PD.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD132PD_RD_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMADD132PD_RD_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMADD132PD_RN_SAE: Fused Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132PD.RN_SAE zmm zmm k zmm
|
|
// VFMADD132PD.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADD132PD.RN_SAE instruction to the active function.
|
|
func (c *Context) VFMADD132PD_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADD132PD_RN_SAE(ops...))
|
|
}
|
|
|
|
// VFMADD132PD_RN_SAE: Fused Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132PD.RN_SAE zmm zmm k zmm
|
|
// VFMADD132PD.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADD132PD.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD132PD_RN_SAE(ops ...operand.Op) { ctx.VFMADD132PD_RN_SAE(ops...) }
|
|
|
|
// VFMADD132PD_RN_SAE_Z: Fused Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132PD.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADD132PD.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMADD132PD_RN_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMADD132PD_RN_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMADD132PD_RN_SAE_Z: Fused Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132PD.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADD132PD.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD132PD_RN_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMADD132PD_RN_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMADD132PD_RU_SAE: Fused Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132PD.RU_SAE zmm zmm k zmm
|
|
// VFMADD132PD.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADD132PD.RU_SAE instruction to the active function.
|
|
func (c *Context) VFMADD132PD_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADD132PD_RU_SAE(ops...))
|
|
}
|
|
|
|
// VFMADD132PD_RU_SAE: Fused Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132PD.RU_SAE zmm zmm k zmm
|
|
// VFMADD132PD.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADD132PD.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD132PD_RU_SAE(ops ...operand.Op) { ctx.VFMADD132PD_RU_SAE(ops...) }
|
|
|
|
// VFMADD132PD_RU_SAE_Z: Fused Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132PD.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADD132PD.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMADD132PD_RU_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMADD132PD_RU_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMADD132PD_RU_SAE_Z: Fused Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132PD.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADD132PD.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD132PD_RU_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMADD132PD_RU_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMADD132PD_RZ_SAE: Fused Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132PD.RZ_SAE zmm zmm k zmm
|
|
// VFMADD132PD.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADD132PD.RZ_SAE instruction to the active function.
|
|
func (c *Context) VFMADD132PD_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADD132PD_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VFMADD132PD_RZ_SAE: Fused Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132PD.RZ_SAE zmm zmm k zmm
|
|
// VFMADD132PD.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADD132PD.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD132PD_RZ_SAE(ops ...operand.Op) { ctx.VFMADD132PD_RZ_SAE(ops...) }
|
|
|
|
// VFMADD132PD_RZ_SAE_Z: Fused Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132PD.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADD132PD.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMADD132PD_RZ_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMADD132PD_RZ_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMADD132PD_RZ_SAE_Z: Fused Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132PD.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADD132PD.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD132PD_RZ_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMADD132PD_RZ_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMADD132PD_Z: Fused Multiply-Add of Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132PD.Z m128 xmm k xmm
|
|
// VFMADD132PD.Z m256 ymm k ymm
|
|
// VFMADD132PD.Z xmm xmm k xmm
|
|
// VFMADD132PD.Z ymm ymm k ymm
|
|
// VFMADD132PD.Z m512 zmm k zmm
|
|
// VFMADD132PD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADD132PD.Z instruction to the active function.
|
|
func (c *Context) VFMADD132PD_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VFMADD132PD_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VFMADD132PD_Z: Fused Multiply-Add of Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132PD.Z m128 xmm k xmm
|
|
// VFMADD132PD.Z m256 ymm k ymm
|
|
// VFMADD132PD.Z xmm xmm k xmm
|
|
// VFMADD132PD.Z ymm ymm k ymm
|
|
// VFMADD132PD.Z m512 zmm k zmm
|
|
// VFMADD132PD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADD132PD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD132PD_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VFMADD132PD_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VFMADD132PS: Fused Multiply-Add of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132PS m128 xmm xmm
|
|
// VFMADD132PS m256 ymm ymm
|
|
// VFMADD132PS xmm xmm xmm
|
|
// VFMADD132PS ymm ymm ymm
|
|
// VFMADD132PS m128 xmm k xmm
|
|
// VFMADD132PS m256 ymm k ymm
|
|
// VFMADD132PS xmm xmm k xmm
|
|
// VFMADD132PS ymm ymm k ymm
|
|
// VFMADD132PS m512 zmm k zmm
|
|
// VFMADD132PS m512 zmm zmm
|
|
// VFMADD132PS zmm zmm k zmm
|
|
// VFMADD132PS zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADD132PS instruction to the active function.
|
|
func (c *Context) VFMADD132PS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADD132PS(ops...))
|
|
}
|
|
|
|
// VFMADD132PS: Fused Multiply-Add of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132PS m128 xmm xmm
|
|
// VFMADD132PS m256 ymm ymm
|
|
// VFMADD132PS xmm xmm xmm
|
|
// VFMADD132PS ymm ymm ymm
|
|
// VFMADD132PS m128 xmm k xmm
|
|
// VFMADD132PS m256 ymm k ymm
|
|
// VFMADD132PS xmm xmm k xmm
|
|
// VFMADD132PS ymm ymm k ymm
|
|
// VFMADD132PS m512 zmm k zmm
|
|
// VFMADD132PS m512 zmm zmm
|
|
// VFMADD132PS zmm zmm k zmm
|
|
// VFMADD132PS zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADD132PS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD132PS(ops ...operand.Op) { ctx.VFMADD132PS(ops...) }
|
|
|
|
// VFMADD132PS_BCST: Fused Multiply-Add of Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132PS.BCST m32 xmm k xmm
|
|
// VFMADD132PS.BCST m32 xmm xmm
|
|
// VFMADD132PS.BCST m32 ymm k ymm
|
|
// VFMADD132PS.BCST m32 ymm ymm
|
|
// VFMADD132PS.BCST m32 zmm k zmm
|
|
// VFMADD132PS.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VFMADD132PS.BCST instruction to the active function.
|
|
func (c *Context) VFMADD132PS_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADD132PS_BCST(ops...))
|
|
}
|
|
|
|
// VFMADD132PS_BCST: Fused Multiply-Add of Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132PS.BCST m32 xmm k xmm
|
|
// VFMADD132PS.BCST m32 xmm xmm
|
|
// VFMADD132PS.BCST m32 ymm k ymm
|
|
// VFMADD132PS.BCST m32 ymm ymm
|
|
// VFMADD132PS.BCST m32 zmm k zmm
|
|
// VFMADD132PS.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VFMADD132PS.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD132PS_BCST(ops ...operand.Op) { ctx.VFMADD132PS_BCST(ops...) }
|
|
|
|
// VFMADD132PS_BCST_Z: Fused Multiply-Add of Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132PS.BCST.Z m32 xmm k xmm
|
|
// VFMADD132PS.BCST.Z m32 ymm k ymm
|
|
// VFMADD132PS.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VFMADD132PS.BCST.Z instruction to the active function.
|
|
func (c *Context) VFMADD132PS_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VFMADD132PS_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VFMADD132PS_BCST_Z: Fused Multiply-Add of Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132PS.BCST.Z m32 xmm k xmm
|
|
// VFMADD132PS.BCST.Z m32 ymm k ymm
|
|
// VFMADD132PS.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VFMADD132PS.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD132PS_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VFMADD132PS_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VFMADD132PS_RD_SAE: Fused Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132PS.RD_SAE zmm zmm k zmm
|
|
// VFMADD132PS.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADD132PS.RD_SAE instruction to the active function.
|
|
func (c *Context) VFMADD132PS_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADD132PS_RD_SAE(ops...))
|
|
}
|
|
|
|
// VFMADD132PS_RD_SAE: Fused Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132PS.RD_SAE zmm zmm k zmm
|
|
// VFMADD132PS.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADD132PS.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD132PS_RD_SAE(ops ...operand.Op) { ctx.VFMADD132PS_RD_SAE(ops...) }
|
|
|
|
// VFMADD132PS_RD_SAE_Z: Fused Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132PS.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADD132PS.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMADD132PS_RD_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMADD132PS_RD_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMADD132PS_RD_SAE_Z: Fused Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132PS.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADD132PS.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD132PS_RD_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMADD132PS_RD_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMADD132PS_RN_SAE: Fused Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132PS.RN_SAE zmm zmm k zmm
|
|
// VFMADD132PS.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADD132PS.RN_SAE instruction to the active function.
|
|
func (c *Context) VFMADD132PS_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADD132PS_RN_SAE(ops...))
|
|
}
|
|
|
|
// VFMADD132PS_RN_SAE: Fused Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132PS.RN_SAE zmm zmm k zmm
|
|
// VFMADD132PS.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADD132PS.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD132PS_RN_SAE(ops ...operand.Op) { ctx.VFMADD132PS_RN_SAE(ops...) }
|
|
|
|
// VFMADD132PS_RN_SAE_Z: Fused Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132PS.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADD132PS.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMADD132PS_RN_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMADD132PS_RN_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMADD132PS_RN_SAE_Z: Fused Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132PS.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADD132PS.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD132PS_RN_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMADD132PS_RN_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMADD132PS_RU_SAE: Fused Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132PS.RU_SAE zmm zmm k zmm
|
|
// VFMADD132PS.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADD132PS.RU_SAE instruction to the active function.
|
|
func (c *Context) VFMADD132PS_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADD132PS_RU_SAE(ops...))
|
|
}
|
|
|
|
// VFMADD132PS_RU_SAE: Fused Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132PS.RU_SAE zmm zmm k zmm
|
|
// VFMADD132PS.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADD132PS.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD132PS_RU_SAE(ops ...operand.Op) { ctx.VFMADD132PS_RU_SAE(ops...) }
|
|
|
|
// VFMADD132PS_RU_SAE_Z: Fused Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132PS.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADD132PS.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMADD132PS_RU_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMADD132PS_RU_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMADD132PS_RU_SAE_Z: Fused Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132PS.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADD132PS.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD132PS_RU_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMADD132PS_RU_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMADD132PS_RZ_SAE: Fused Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132PS.RZ_SAE zmm zmm k zmm
|
|
// VFMADD132PS.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADD132PS.RZ_SAE instruction to the active function.
|
|
func (c *Context) VFMADD132PS_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADD132PS_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VFMADD132PS_RZ_SAE: Fused Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132PS.RZ_SAE zmm zmm k zmm
|
|
// VFMADD132PS.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADD132PS.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD132PS_RZ_SAE(ops ...operand.Op) { ctx.VFMADD132PS_RZ_SAE(ops...) }
|
|
|
|
// VFMADD132PS_RZ_SAE_Z: Fused Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132PS.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADD132PS.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMADD132PS_RZ_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMADD132PS_RZ_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMADD132PS_RZ_SAE_Z: Fused Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132PS.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADD132PS.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD132PS_RZ_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMADD132PS_RZ_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMADD132PS_Z: Fused Multiply-Add of Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132PS.Z m128 xmm k xmm
|
|
// VFMADD132PS.Z m256 ymm k ymm
|
|
// VFMADD132PS.Z xmm xmm k xmm
|
|
// VFMADD132PS.Z ymm ymm k ymm
|
|
// VFMADD132PS.Z m512 zmm k zmm
|
|
// VFMADD132PS.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADD132PS.Z instruction to the active function.
|
|
func (c *Context) VFMADD132PS_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VFMADD132PS_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VFMADD132PS_Z: Fused Multiply-Add of Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132PS.Z m128 xmm k xmm
|
|
// VFMADD132PS.Z m256 ymm k ymm
|
|
// VFMADD132PS.Z xmm xmm k xmm
|
|
// VFMADD132PS.Z ymm ymm k ymm
|
|
// VFMADD132PS.Z m512 zmm k zmm
|
|
// VFMADD132PS.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADD132PS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD132PS_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VFMADD132PS_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VFMADD132SD: Fused Multiply-Add of Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132SD m64 xmm xmm
|
|
// VFMADD132SD xmm xmm xmm
|
|
// VFMADD132SD m64 xmm k xmm
|
|
// VFMADD132SD xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMADD132SD instruction to the active function.
|
|
func (c *Context) VFMADD132SD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADD132SD(ops...))
|
|
}
|
|
|
|
// VFMADD132SD: Fused Multiply-Add of Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132SD m64 xmm xmm
|
|
// VFMADD132SD xmm xmm xmm
|
|
// VFMADD132SD m64 xmm k xmm
|
|
// VFMADD132SD xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMADD132SD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD132SD(ops ...operand.Op) { ctx.VFMADD132SD(ops...) }
|
|
|
|
// VFMADD132SD_RD_SAE: Fused Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132SD.RD_SAE xmm xmm k xmm
|
|
// VFMADD132SD.RD_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMADD132SD.RD_SAE instruction to the active function.
|
|
func (c *Context) VFMADD132SD_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADD132SD_RD_SAE(ops...))
|
|
}
|
|
|
|
// VFMADD132SD_RD_SAE: Fused Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132SD.RD_SAE xmm xmm k xmm
|
|
// VFMADD132SD.RD_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMADD132SD.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD132SD_RD_SAE(ops ...operand.Op) { ctx.VFMADD132SD_RD_SAE(ops...) }
|
|
|
|
// VFMADD132SD_RD_SAE_Z: Fused Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132SD.RD_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMADD132SD.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMADD132SD_RD_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFMADD132SD_RD_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFMADD132SD_RD_SAE_Z: Fused Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132SD.RD_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMADD132SD.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD132SD_RD_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFMADD132SD_RD_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFMADD132SD_RN_SAE: Fused Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132SD.RN_SAE xmm xmm k xmm
|
|
// VFMADD132SD.RN_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMADD132SD.RN_SAE instruction to the active function.
|
|
func (c *Context) VFMADD132SD_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADD132SD_RN_SAE(ops...))
|
|
}
|
|
|
|
// VFMADD132SD_RN_SAE: Fused Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132SD.RN_SAE xmm xmm k xmm
|
|
// VFMADD132SD.RN_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMADD132SD.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD132SD_RN_SAE(ops ...operand.Op) { ctx.VFMADD132SD_RN_SAE(ops...) }
|
|
|
|
// VFMADD132SD_RN_SAE_Z: Fused Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132SD.RN_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMADD132SD.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMADD132SD_RN_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFMADD132SD_RN_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFMADD132SD_RN_SAE_Z: Fused Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132SD.RN_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMADD132SD.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD132SD_RN_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFMADD132SD_RN_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFMADD132SD_RU_SAE: Fused Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132SD.RU_SAE xmm xmm k xmm
|
|
// VFMADD132SD.RU_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMADD132SD.RU_SAE instruction to the active function.
|
|
func (c *Context) VFMADD132SD_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADD132SD_RU_SAE(ops...))
|
|
}
|
|
|
|
// VFMADD132SD_RU_SAE: Fused Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132SD.RU_SAE xmm xmm k xmm
|
|
// VFMADD132SD.RU_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMADD132SD.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD132SD_RU_SAE(ops ...operand.Op) { ctx.VFMADD132SD_RU_SAE(ops...) }
|
|
|
|
// VFMADD132SD_RU_SAE_Z: Fused Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132SD.RU_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMADD132SD.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMADD132SD_RU_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFMADD132SD_RU_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFMADD132SD_RU_SAE_Z: Fused Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132SD.RU_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMADD132SD.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD132SD_RU_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFMADD132SD_RU_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFMADD132SD_RZ_SAE: Fused Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132SD.RZ_SAE xmm xmm k xmm
|
|
// VFMADD132SD.RZ_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMADD132SD.RZ_SAE instruction to the active function.
|
|
func (c *Context) VFMADD132SD_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADD132SD_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VFMADD132SD_RZ_SAE: Fused Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132SD.RZ_SAE xmm xmm k xmm
|
|
// VFMADD132SD.RZ_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMADD132SD.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD132SD_RZ_SAE(ops ...operand.Op) { ctx.VFMADD132SD_RZ_SAE(ops...) }
|
|
|
|
// VFMADD132SD_RZ_SAE_Z: Fused Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132SD.RZ_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMADD132SD.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMADD132SD_RZ_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFMADD132SD_RZ_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFMADD132SD_RZ_SAE_Z: Fused Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132SD.RZ_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMADD132SD.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD132SD_RZ_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFMADD132SD_RZ_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFMADD132SD_Z: Fused Multiply-Add of Scalar Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132SD.Z m64 xmm k xmm
|
|
// VFMADD132SD.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMADD132SD.Z instruction to the active function.
|
|
func (c *Context) VFMADD132SD_Z(mx, x, k, x1 operand.Op) {
|
|
c.addinstruction(x86.VFMADD132SD_Z(mx, x, k, x1))
|
|
}
|
|
|
|
// VFMADD132SD_Z: Fused Multiply-Add of Scalar Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132SD.Z m64 xmm k xmm
|
|
// VFMADD132SD.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMADD132SD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD132SD_Z(mx, x, k, x1 operand.Op) { ctx.VFMADD132SD_Z(mx, x, k, x1) }
|
|
|
|
// VFMADD132SS: Fused Multiply-Add of Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132SS m32 xmm xmm
|
|
// VFMADD132SS xmm xmm xmm
|
|
// VFMADD132SS m32 xmm k xmm
|
|
// VFMADD132SS xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMADD132SS instruction to the active function.
|
|
func (c *Context) VFMADD132SS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADD132SS(ops...))
|
|
}
|
|
|
|
// VFMADD132SS: Fused Multiply-Add of Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132SS m32 xmm xmm
|
|
// VFMADD132SS xmm xmm xmm
|
|
// VFMADD132SS m32 xmm k xmm
|
|
// VFMADD132SS xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMADD132SS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD132SS(ops ...operand.Op) { ctx.VFMADD132SS(ops...) }
|
|
|
|
// VFMADD132SS_RD_SAE: Fused Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132SS.RD_SAE xmm xmm k xmm
|
|
// VFMADD132SS.RD_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMADD132SS.RD_SAE instruction to the active function.
|
|
func (c *Context) VFMADD132SS_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADD132SS_RD_SAE(ops...))
|
|
}
|
|
|
|
// VFMADD132SS_RD_SAE: Fused Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132SS.RD_SAE xmm xmm k xmm
|
|
// VFMADD132SS.RD_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMADD132SS.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD132SS_RD_SAE(ops ...operand.Op) { ctx.VFMADD132SS_RD_SAE(ops...) }
|
|
|
|
// VFMADD132SS_RD_SAE_Z: Fused Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132SS.RD_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMADD132SS.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMADD132SS_RD_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFMADD132SS_RD_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFMADD132SS_RD_SAE_Z: Fused Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132SS.RD_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMADD132SS.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD132SS_RD_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFMADD132SS_RD_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFMADD132SS_RN_SAE: Fused Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132SS.RN_SAE xmm xmm k xmm
|
|
// VFMADD132SS.RN_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMADD132SS.RN_SAE instruction to the active function.
|
|
func (c *Context) VFMADD132SS_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADD132SS_RN_SAE(ops...))
|
|
}
|
|
|
|
// VFMADD132SS_RN_SAE: Fused Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132SS.RN_SAE xmm xmm k xmm
|
|
// VFMADD132SS.RN_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMADD132SS.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD132SS_RN_SAE(ops ...operand.Op) { ctx.VFMADD132SS_RN_SAE(ops...) }
|
|
|
|
// VFMADD132SS_RN_SAE_Z: Fused Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132SS.RN_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMADD132SS.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMADD132SS_RN_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFMADD132SS_RN_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFMADD132SS_RN_SAE_Z: Fused Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132SS.RN_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMADD132SS.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD132SS_RN_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFMADD132SS_RN_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFMADD132SS_RU_SAE: Fused Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132SS.RU_SAE xmm xmm k xmm
|
|
// VFMADD132SS.RU_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMADD132SS.RU_SAE instruction to the active function.
|
|
func (c *Context) VFMADD132SS_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADD132SS_RU_SAE(ops...))
|
|
}
|
|
|
|
// VFMADD132SS_RU_SAE: Fused Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132SS.RU_SAE xmm xmm k xmm
|
|
// VFMADD132SS.RU_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMADD132SS.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD132SS_RU_SAE(ops ...operand.Op) { ctx.VFMADD132SS_RU_SAE(ops...) }
|
|
|
|
// VFMADD132SS_RU_SAE_Z: Fused Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132SS.RU_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMADD132SS.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMADD132SS_RU_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFMADD132SS_RU_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFMADD132SS_RU_SAE_Z: Fused Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132SS.RU_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMADD132SS.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD132SS_RU_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFMADD132SS_RU_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFMADD132SS_RZ_SAE: Fused Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132SS.RZ_SAE xmm xmm k xmm
|
|
// VFMADD132SS.RZ_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMADD132SS.RZ_SAE instruction to the active function.
|
|
func (c *Context) VFMADD132SS_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADD132SS_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VFMADD132SS_RZ_SAE: Fused Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132SS.RZ_SAE xmm xmm k xmm
|
|
// VFMADD132SS.RZ_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMADD132SS.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD132SS_RZ_SAE(ops ...operand.Op) { ctx.VFMADD132SS_RZ_SAE(ops...) }
|
|
|
|
// VFMADD132SS_RZ_SAE_Z: Fused Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132SS.RZ_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMADD132SS.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMADD132SS_RZ_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFMADD132SS_RZ_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFMADD132SS_RZ_SAE_Z: Fused Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132SS.RZ_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMADD132SS.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD132SS_RZ_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFMADD132SS_RZ_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFMADD132SS_Z: Fused Multiply-Add of Scalar Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132SS.Z m32 xmm k xmm
|
|
// VFMADD132SS.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMADD132SS.Z instruction to the active function.
|
|
func (c *Context) VFMADD132SS_Z(mx, x, k, x1 operand.Op) {
|
|
c.addinstruction(x86.VFMADD132SS_Z(mx, x, k, x1))
|
|
}
|
|
|
|
// VFMADD132SS_Z: Fused Multiply-Add of Scalar Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD132SS.Z m32 xmm k xmm
|
|
// VFMADD132SS.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMADD132SS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD132SS_Z(mx, x, k, x1 operand.Op) { ctx.VFMADD132SS_Z(mx, x, k, x1) }
|
|
|
|
// VFMADD213PD: Fused Multiply-Add of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213PD m128 xmm xmm
|
|
// VFMADD213PD m256 ymm ymm
|
|
// VFMADD213PD xmm xmm xmm
|
|
// VFMADD213PD ymm ymm ymm
|
|
// VFMADD213PD m128 xmm k xmm
|
|
// VFMADD213PD m256 ymm k ymm
|
|
// VFMADD213PD xmm xmm k xmm
|
|
// VFMADD213PD ymm ymm k ymm
|
|
// VFMADD213PD m512 zmm k zmm
|
|
// VFMADD213PD m512 zmm zmm
|
|
// VFMADD213PD zmm zmm k zmm
|
|
// VFMADD213PD zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADD213PD instruction to the active function.
|
|
func (c *Context) VFMADD213PD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADD213PD(ops...))
|
|
}
|
|
|
|
// VFMADD213PD: Fused Multiply-Add of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213PD m128 xmm xmm
|
|
// VFMADD213PD m256 ymm ymm
|
|
// VFMADD213PD xmm xmm xmm
|
|
// VFMADD213PD ymm ymm ymm
|
|
// VFMADD213PD m128 xmm k xmm
|
|
// VFMADD213PD m256 ymm k ymm
|
|
// VFMADD213PD xmm xmm k xmm
|
|
// VFMADD213PD ymm ymm k ymm
|
|
// VFMADD213PD m512 zmm k zmm
|
|
// VFMADD213PD m512 zmm zmm
|
|
// VFMADD213PD zmm zmm k zmm
|
|
// VFMADD213PD zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADD213PD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD213PD(ops ...operand.Op) { ctx.VFMADD213PD(ops...) }
|
|
|
|
// VFMADD213PD_BCST: Fused Multiply-Add of Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213PD.BCST m64 xmm k xmm
|
|
// VFMADD213PD.BCST m64 xmm xmm
|
|
// VFMADD213PD.BCST m64 ymm k ymm
|
|
// VFMADD213PD.BCST m64 ymm ymm
|
|
// VFMADD213PD.BCST m64 zmm k zmm
|
|
// VFMADD213PD.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VFMADD213PD.BCST instruction to the active function.
|
|
func (c *Context) VFMADD213PD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADD213PD_BCST(ops...))
|
|
}
|
|
|
|
// VFMADD213PD_BCST: Fused Multiply-Add of Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213PD.BCST m64 xmm k xmm
|
|
// VFMADD213PD.BCST m64 xmm xmm
|
|
// VFMADD213PD.BCST m64 ymm k ymm
|
|
// VFMADD213PD.BCST m64 ymm ymm
|
|
// VFMADD213PD.BCST m64 zmm k zmm
|
|
// VFMADD213PD.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VFMADD213PD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD213PD_BCST(ops ...operand.Op) { ctx.VFMADD213PD_BCST(ops...) }
|
|
|
|
// VFMADD213PD_BCST_Z: Fused Multiply-Add of Packed Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213PD.BCST.Z m64 xmm k xmm
|
|
// VFMADD213PD.BCST.Z m64 ymm k ymm
|
|
// VFMADD213PD.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VFMADD213PD.BCST.Z instruction to the active function.
|
|
func (c *Context) VFMADD213PD_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VFMADD213PD_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VFMADD213PD_BCST_Z: Fused Multiply-Add of Packed Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213PD.BCST.Z m64 xmm k xmm
|
|
// VFMADD213PD.BCST.Z m64 ymm k ymm
|
|
// VFMADD213PD.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VFMADD213PD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD213PD_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VFMADD213PD_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VFMADD213PD_RD_SAE: Fused Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213PD.RD_SAE zmm zmm k zmm
|
|
// VFMADD213PD.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADD213PD.RD_SAE instruction to the active function.
|
|
func (c *Context) VFMADD213PD_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADD213PD_RD_SAE(ops...))
|
|
}
|
|
|
|
// VFMADD213PD_RD_SAE: Fused Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213PD.RD_SAE zmm zmm k zmm
|
|
// VFMADD213PD.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADD213PD.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD213PD_RD_SAE(ops ...operand.Op) { ctx.VFMADD213PD_RD_SAE(ops...) }
|
|
|
|
// VFMADD213PD_RD_SAE_Z: Fused Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213PD.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADD213PD.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMADD213PD_RD_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMADD213PD_RD_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMADD213PD_RD_SAE_Z: Fused Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213PD.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADD213PD.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD213PD_RD_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMADD213PD_RD_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMADD213PD_RN_SAE: Fused Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213PD.RN_SAE zmm zmm k zmm
|
|
// VFMADD213PD.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADD213PD.RN_SAE instruction to the active function.
|
|
func (c *Context) VFMADD213PD_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADD213PD_RN_SAE(ops...))
|
|
}
|
|
|
|
// VFMADD213PD_RN_SAE: Fused Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213PD.RN_SAE zmm zmm k zmm
|
|
// VFMADD213PD.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADD213PD.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD213PD_RN_SAE(ops ...operand.Op) { ctx.VFMADD213PD_RN_SAE(ops...) }
|
|
|
|
// VFMADD213PD_RN_SAE_Z: Fused Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213PD.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADD213PD.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMADD213PD_RN_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMADD213PD_RN_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMADD213PD_RN_SAE_Z: Fused Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213PD.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADD213PD.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD213PD_RN_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMADD213PD_RN_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMADD213PD_RU_SAE: Fused Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213PD.RU_SAE zmm zmm k zmm
|
|
// VFMADD213PD.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADD213PD.RU_SAE instruction to the active function.
|
|
func (c *Context) VFMADD213PD_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADD213PD_RU_SAE(ops...))
|
|
}
|
|
|
|
// VFMADD213PD_RU_SAE: Fused Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213PD.RU_SAE zmm zmm k zmm
|
|
// VFMADD213PD.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADD213PD.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD213PD_RU_SAE(ops ...operand.Op) { ctx.VFMADD213PD_RU_SAE(ops...) }
|
|
|
|
// VFMADD213PD_RU_SAE_Z: Fused Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213PD.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADD213PD.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMADD213PD_RU_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMADD213PD_RU_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMADD213PD_RU_SAE_Z: Fused Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213PD.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADD213PD.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD213PD_RU_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMADD213PD_RU_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMADD213PD_RZ_SAE: Fused Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213PD.RZ_SAE zmm zmm k zmm
|
|
// VFMADD213PD.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADD213PD.RZ_SAE instruction to the active function.
|
|
func (c *Context) VFMADD213PD_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADD213PD_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VFMADD213PD_RZ_SAE: Fused Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213PD.RZ_SAE zmm zmm k zmm
|
|
// VFMADD213PD.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADD213PD.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD213PD_RZ_SAE(ops ...operand.Op) { ctx.VFMADD213PD_RZ_SAE(ops...) }
|
|
|
|
// VFMADD213PD_RZ_SAE_Z: Fused Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213PD.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADD213PD.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMADD213PD_RZ_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMADD213PD_RZ_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMADD213PD_RZ_SAE_Z: Fused Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213PD.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADD213PD.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD213PD_RZ_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMADD213PD_RZ_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMADD213PD_Z: Fused Multiply-Add of Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213PD.Z m128 xmm k xmm
|
|
// VFMADD213PD.Z m256 ymm k ymm
|
|
// VFMADD213PD.Z xmm xmm k xmm
|
|
// VFMADD213PD.Z ymm ymm k ymm
|
|
// VFMADD213PD.Z m512 zmm k zmm
|
|
// VFMADD213PD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADD213PD.Z instruction to the active function.
|
|
func (c *Context) VFMADD213PD_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VFMADD213PD_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VFMADD213PD_Z: Fused Multiply-Add of Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213PD.Z m128 xmm k xmm
|
|
// VFMADD213PD.Z m256 ymm k ymm
|
|
// VFMADD213PD.Z xmm xmm k xmm
|
|
// VFMADD213PD.Z ymm ymm k ymm
|
|
// VFMADD213PD.Z m512 zmm k zmm
|
|
// VFMADD213PD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADD213PD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD213PD_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VFMADD213PD_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VFMADD213PS: Fused Multiply-Add of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213PS m128 xmm xmm
|
|
// VFMADD213PS m256 ymm ymm
|
|
// VFMADD213PS xmm xmm xmm
|
|
// VFMADD213PS ymm ymm ymm
|
|
// VFMADD213PS m128 xmm k xmm
|
|
// VFMADD213PS m256 ymm k ymm
|
|
// VFMADD213PS xmm xmm k xmm
|
|
// VFMADD213PS ymm ymm k ymm
|
|
// VFMADD213PS m512 zmm k zmm
|
|
// VFMADD213PS m512 zmm zmm
|
|
// VFMADD213PS zmm zmm k zmm
|
|
// VFMADD213PS zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADD213PS instruction to the active function.
|
|
func (c *Context) VFMADD213PS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADD213PS(ops...))
|
|
}
|
|
|
|
// VFMADD213PS: Fused Multiply-Add of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213PS m128 xmm xmm
|
|
// VFMADD213PS m256 ymm ymm
|
|
// VFMADD213PS xmm xmm xmm
|
|
// VFMADD213PS ymm ymm ymm
|
|
// VFMADD213PS m128 xmm k xmm
|
|
// VFMADD213PS m256 ymm k ymm
|
|
// VFMADD213PS xmm xmm k xmm
|
|
// VFMADD213PS ymm ymm k ymm
|
|
// VFMADD213PS m512 zmm k zmm
|
|
// VFMADD213PS m512 zmm zmm
|
|
// VFMADD213PS zmm zmm k zmm
|
|
// VFMADD213PS zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADD213PS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD213PS(ops ...operand.Op) { ctx.VFMADD213PS(ops...) }
|
|
|
|
// VFMADD213PS_BCST: Fused Multiply-Add of Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213PS.BCST m32 xmm k xmm
|
|
// VFMADD213PS.BCST m32 xmm xmm
|
|
// VFMADD213PS.BCST m32 ymm k ymm
|
|
// VFMADD213PS.BCST m32 ymm ymm
|
|
// VFMADD213PS.BCST m32 zmm k zmm
|
|
// VFMADD213PS.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VFMADD213PS.BCST instruction to the active function.
|
|
func (c *Context) VFMADD213PS_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADD213PS_BCST(ops...))
|
|
}
|
|
|
|
// VFMADD213PS_BCST: Fused Multiply-Add of Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213PS.BCST m32 xmm k xmm
|
|
// VFMADD213PS.BCST m32 xmm xmm
|
|
// VFMADD213PS.BCST m32 ymm k ymm
|
|
// VFMADD213PS.BCST m32 ymm ymm
|
|
// VFMADD213PS.BCST m32 zmm k zmm
|
|
// VFMADD213PS.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VFMADD213PS.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD213PS_BCST(ops ...operand.Op) { ctx.VFMADD213PS_BCST(ops...) }
|
|
|
|
// VFMADD213PS_BCST_Z: Fused Multiply-Add of Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213PS.BCST.Z m32 xmm k xmm
|
|
// VFMADD213PS.BCST.Z m32 ymm k ymm
|
|
// VFMADD213PS.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VFMADD213PS.BCST.Z instruction to the active function.
|
|
func (c *Context) VFMADD213PS_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VFMADD213PS_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VFMADD213PS_BCST_Z: Fused Multiply-Add of Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213PS.BCST.Z m32 xmm k xmm
|
|
// VFMADD213PS.BCST.Z m32 ymm k ymm
|
|
// VFMADD213PS.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VFMADD213PS.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD213PS_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VFMADD213PS_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VFMADD213PS_RD_SAE: Fused Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213PS.RD_SAE zmm zmm k zmm
|
|
// VFMADD213PS.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADD213PS.RD_SAE instruction to the active function.
|
|
func (c *Context) VFMADD213PS_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADD213PS_RD_SAE(ops...))
|
|
}
|
|
|
|
// VFMADD213PS_RD_SAE: Fused Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213PS.RD_SAE zmm zmm k zmm
|
|
// VFMADD213PS.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADD213PS.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD213PS_RD_SAE(ops ...operand.Op) { ctx.VFMADD213PS_RD_SAE(ops...) }
|
|
|
|
// VFMADD213PS_RD_SAE_Z: Fused Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213PS.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADD213PS.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMADD213PS_RD_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMADD213PS_RD_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMADD213PS_RD_SAE_Z: Fused Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213PS.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADD213PS.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD213PS_RD_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMADD213PS_RD_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMADD213PS_RN_SAE: Fused Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213PS.RN_SAE zmm zmm k zmm
|
|
// VFMADD213PS.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADD213PS.RN_SAE instruction to the active function.
|
|
func (c *Context) VFMADD213PS_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADD213PS_RN_SAE(ops...))
|
|
}
|
|
|
|
// VFMADD213PS_RN_SAE: Fused Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213PS.RN_SAE zmm zmm k zmm
|
|
// VFMADD213PS.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADD213PS.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD213PS_RN_SAE(ops ...operand.Op) { ctx.VFMADD213PS_RN_SAE(ops...) }
|
|
|
|
// VFMADD213PS_RN_SAE_Z: Fused Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213PS.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADD213PS.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMADD213PS_RN_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMADD213PS_RN_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMADD213PS_RN_SAE_Z: Fused Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213PS.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADD213PS.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD213PS_RN_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMADD213PS_RN_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMADD213PS_RU_SAE: Fused Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213PS.RU_SAE zmm zmm k zmm
|
|
// VFMADD213PS.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADD213PS.RU_SAE instruction to the active function.
|
|
func (c *Context) VFMADD213PS_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADD213PS_RU_SAE(ops...))
|
|
}
|
|
|
|
// VFMADD213PS_RU_SAE: Fused Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213PS.RU_SAE zmm zmm k zmm
|
|
// VFMADD213PS.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADD213PS.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD213PS_RU_SAE(ops ...operand.Op) { ctx.VFMADD213PS_RU_SAE(ops...) }
|
|
|
|
// VFMADD213PS_RU_SAE_Z: Fused Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213PS.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADD213PS.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMADD213PS_RU_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMADD213PS_RU_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMADD213PS_RU_SAE_Z: Fused Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213PS.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADD213PS.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD213PS_RU_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMADD213PS_RU_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMADD213PS_RZ_SAE: Fused Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213PS.RZ_SAE zmm zmm k zmm
|
|
// VFMADD213PS.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADD213PS.RZ_SAE instruction to the active function.
|
|
func (c *Context) VFMADD213PS_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADD213PS_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VFMADD213PS_RZ_SAE: Fused Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213PS.RZ_SAE zmm zmm k zmm
|
|
// VFMADD213PS.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADD213PS.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD213PS_RZ_SAE(ops ...operand.Op) { ctx.VFMADD213PS_RZ_SAE(ops...) }
|
|
|
|
// VFMADD213PS_RZ_SAE_Z: Fused Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213PS.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADD213PS.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMADD213PS_RZ_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMADD213PS_RZ_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMADD213PS_RZ_SAE_Z: Fused Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213PS.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADD213PS.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD213PS_RZ_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMADD213PS_RZ_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMADD213PS_Z: Fused Multiply-Add of Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213PS.Z m128 xmm k xmm
|
|
// VFMADD213PS.Z m256 ymm k ymm
|
|
// VFMADD213PS.Z xmm xmm k xmm
|
|
// VFMADD213PS.Z ymm ymm k ymm
|
|
// VFMADD213PS.Z m512 zmm k zmm
|
|
// VFMADD213PS.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADD213PS.Z instruction to the active function.
|
|
func (c *Context) VFMADD213PS_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VFMADD213PS_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VFMADD213PS_Z: Fused Multiply-Add of Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213PS.Z m128 xmm k xmm
|
|
// VFMADD213PS.Z m256 ymm k ymm
|
|
// VFMADD213PS.Z xmm xmm k xmm
|
|
// VFMADD213PS.Z ymm ymm k ymm
|
|
// VFMADD213PS.Z m512 zmm k zmm
|
|
// VFMADD213PS.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADD213PS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD213PS_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VFMADD213PS_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VFMADD213SD: Fused Multiply-Add of Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213SD m64 xmm xmm
|
|
// VFMADD213SD xmm xmm xmm
|
|
// VFMADD213SD m64 xmm k xmm
|
|
// VFMADD213SD xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMADD213SD instruction to the active function.
|
|
func (c *Context) VFMADD213SD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADD213SD(ops...))
|
|
}
|
|
|
|
// VFMADD213SD: Fused Multiply-Add of Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213SD m64 xmm xmm
|
|
// VFMADD213SD xmm xmm xmm
|
|
// VFMADD213SD m64 xmm k xmm
|
|
// VFMADD213SD xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMADD213SD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD213SD(ops ...operand.Op) { ctx.VFMADD213SD(ops...) }
|
|
|
|
// VFMADD213SD_RD_SAE: Fused Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213SD.RD_SAE xmm xmm k xmm
|
|
// VFMADD213SD.RD_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMADD213SD.RD_SAE instruction to the active function.
|
|
func (c *Context) VFMADD213SD_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADD213SD_RD_SAE(ops...))
|
|
}
|
|
|
|
// VFMADD213SD_RD_SAE: Fused Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213SD.RD_SAE xmm xmm k xmm
|
|
// VFMADD213SD.RD_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMADD213SD.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD213SD_RD_SAE(ops ...operand.Op) { ctx.VFMADD213SD_RD_SAE(ops...) }
|
|
|
|
// VFMADD213SD_RD_SAE_Z: Fused Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213SD.RD_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMADD213SD.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMADD213SD_RD_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFMADD213SD_RD_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFMADD213SD_RD_SAE_Z: Fused Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213SD.RD_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMADD213SD.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD213SD_RD_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFMADD213SD_RD_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFMADD213SD_RN_SAE: Fused Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213SD.RN_SAE xmm xmm k xmm
|
|
// VFMADD213SD.RN_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMADD213SD.RN_SAE instruction to the active function.
|
|
func (c *Context) VFMADD213SD_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADD213SD_RN_SAE(ops...))
|
|
}
|
|
|
|
// VFMADD213SD_RN_SAE: Fused Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213SD.RN_SAE xmm xmm k xmm
|
|
// VFMADD213SD.RN_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMADD213SD.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD213SD_RN_SAE(ops ...operand.Op) { ctx.VFMADD213SD_RN_SAE(ops...) }
|
|
|
|
// VFMADD213SD_RN_SAE_Z: Fused Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213SD.RN_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMADD213SD.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMADD213SD_RN_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFMADD213SD_RN_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFMADD213SD_RN_SAE_Z: Fused Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213SD.RN_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMADD213SD.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD213SD_RN_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFMADD213SD_RN_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFMADD213SD_RU_SAE: Fused Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213SD.RU_SAE xmm xmm k xmm
|
|
// VFMADD213SD.RU_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMADD213SD.RU_SAE instruction to the active function.
|
|
func (c *Context) VFMADD213SD_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADD213SD_RU_SAE(ops...))
|
|
}
|
|
|
|
// VFMADD213SD_RU_SAE: Fused Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213SD.RU_SAE xmm xmm k xmm
|
|
// VFMADD213SD.RU_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMADD213SD.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD213SD_RU_SAE(ops ...operand.Op) { ctx.VFMADD213SD_RU_SAE(ops...) }
|
|
|
|
// VFMADD213SD_RU_SAE_Z: Fused Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213SD.RU_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMADD213SD.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMADD213SD_RU_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFMADD213SD_RU_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFMADD213SD_RU_SAE_Z: Fused Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213SD.RU_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMADD213SD.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD213SD_RU_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFMADD213SD_RU_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFMADD213SD_RZ_SAE: Fused Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213SD.RZ_SAE xmm xmm k xmm
|
|
// VFMADD213SD.RZ_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMADD213SD.RZ_SAE instruction to the active function.
|
|
func (c *Context) VFMADD213SD_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADD213SD_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VFMADD213SD_RZ_SAE: Fused Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213SD.RZ_SAE xmm xmm k xmm
|
|
// VFMADD213SD.RZ_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMADD213SD.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD213SD_RZ_SAE(ops ...operand.Op) { ctx.VFMADD213SD_RZ_SAE(ops...) }
|
|
|
|
// VFMADD213SD_RZ_SAE_Z: Fused Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213SD.RZ_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMADD213SD.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMADD213SD_RZ_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFMADD213SD_RZ_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFMADD213SD_RZ_SAE_Z: Fused Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213SD.RZ_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMADD213SD.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD213SD_RZ_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFMADD213SD_RZ_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFMADD213SD_Z: Fused Multiply-Add of Scalar Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213SD.Z m64 xmm k xmm
|
|
// VFMADD213SD.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMADD213SD.Z instruction to the active function.
|
|
func (c *Context) VFMADD213SD_Z(mx, x, k, x1 operand.Op) {
|
|
c.addinstruction(x86.VFMADD213SD_Z(mx, x, k, x1))
|
|
}
|
|
|
|
// VFMADD213SD_Z: Fused Multiply-Add of Scalar Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213SD.Z m64 xmm k xmm
|
|
// VFMADD213SD.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMADD213SD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD213SD_Z(mx, x, k, x1 operand.Op) { ctx.VFMADD213SD_Z(mx, x, k, x1) }
|
|
|
|
// VFMADD213SS: Fused Multiply-Add of Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213SS m32 xmm xmm
|
|
// VFMADD213SS xmm xmm xmm
|
|
// VFMADD213SS m32 xmm k xmm
|
|
// VFMADD213SS xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMADD213SS instruction to the active function.
|
|
func (c *Context) VFMADD213SS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADD213SS(ops...))
|
|
}
|
|
|
|
// VFMADD213SS: Fused Multiply-Add of Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213SS m32 xmm xmm
|
|
// VFMADD213SS xmm xmm xmm
|
|
// VFMADD213SS m32 xmm k xmm
|
|
// VFMADD213SS xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMADD213SS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD213SS(ops ...operand.Op) { ctx.VFMADD213SS(ops...) }
|
|
|
|
// VFMADD213SS_RD_SAE: Fused Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213SS.RD_SAE xmm xmm k xmm
|
|
// VFMADD213SS.RD_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMADD213SS.RD_SAE instruction to the active function.
|
|
func (c *Context) VFMADD213SS_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADD213SS_RD_SAE(ops...))
|
|
}
|
|
|
|
// VFMADD213SS_RD_SAE: Fused Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213SS.RD_SAE xmm xmm k xmm
|
|
// VFMADD213SS.RD_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMADD213SS.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD213SS_RD_SAE(ops ...operand.Op) { ctx.VFMADD213SS_RD_SAE(ops...) }
|
|
|
|
// VFMADD213SS_RD_SAE_Z: Fused Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213SS.RD_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMADD213SS.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMADD213SS_RD_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFMADD213SS_RD_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFMADD213SS_RD_SAE_Z: Fused Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213SS.RD_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMADD213SS.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD213SS_RD_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFMADD213SS_RD_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFMADD213SS_RN_SAE: Fused Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213SS.RN_SAE xmm xmm k xmm
|
|
// VFMADD213SS.RN_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMADD213SS.RN_SAE instruction to the active function.
|
|
func (c *Context) VFMADD213SS_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADD213SS_RN_SAE(ops...))
|
|
}
|
|
|
|
// VFMADD213SS_RN_SAE: Fused Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213SS.RN_SAE xmm xmm k xmm
|
|
// VFMADD213SS.RN_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMADD213SS.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD213SS_RN_SAE(ops ...operand.Op) { ctx.VFMADD213SS_RN_SAE(ops...) }
|
|
|
|
// VFMADD213SS_RN_SAE_Z: Fused Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213SS.RN_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMADD213SS.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMADD213SS_RN_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFMADD213SS_RN_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFMADD213SS_RN_SAE_Z: Fused Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213SS.RN_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMADD213SS.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD213SS_RN_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFMADD213SS_RN_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFMADD213SS_RU_SAE: Fused Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213SS.RU_SAE xmm xmm k xmm
|
|
// VFMADD213SS.RU_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMADD213SS.RU_SAE instruction to the active function.
|
|
func (c *Context) VFMADD213SS_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADD213SS_RU_SAE(ops...))
|
|
}
|
|
|
|
// VFMADD213SS_RU_SAE: Fused Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213SS.RU_SAE xmm xmm k xmm
|
|
// VFMADD213SS.RU_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMADD213SS.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD213SS_RU_SAE(ops ...operand.Op) { ctx.VFMADD213SS_RU_SAE(ops...) }
|
|
|
|
// VFMADD213SS_RU_SAE_Z: Fused Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213SS.RU_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMADD213SS.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMADD213SS_RU_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFMADD213SS_RU_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFMADD213SS_RU_SAE_Z: Fused Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213SS.RU_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMADD213SS.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD213SS_RU_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFMADD213SS_RU_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFMADD213SS_RZ_SAE: Fused Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213SS.RZ_SAE xmm xmm k xmm
|
|
// VFMADD213SS.RZ_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMADD213SS.RZ_SAE instruction to the active function.
|
|
func (c *Context) VFMADD213SS_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADD213SS_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VFMADD213SS_RZ_SAE: Fused Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213SS.RZ_SAE xmm xmm k xmm
|
|
// VFMADD213SS.RZ_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMADD213SS.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD213SS_RZ_SAE(ops ...operand.Op) { ctx.VFMADD213SS_RZ_SAE(ops...) }
|
|
|
|
// VFMADD213SS_RZ_SAE_Z: Fused Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213SS.RZ_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMADD213SS.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMADD213SS_RZ_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFMADD213SS_RZ_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFMADD213SS_RZ_SAE_Z: Fused Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213SS.RZ_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMADD213SS.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD213SS_RZ_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFMADD213SS_RZ_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFMADD213SS_Z: Fused Multiply-Add of Scalar Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213SS.Z m32 xmm k xmm
|
|
// VFMADD213SS.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMADD213SS.Z instruction to the active function.
|
|
func (c *Context) VFMADD213SS_Z(mx, x, k, x1 operand.Op) {
|
|
c.addinstruction(x86.VFMADD213SS_Z(mx, x, k, x1))
|
|
}
|
|
|
|
// VFMADD213SS_Z: Fused Multiply-Add of Scalar Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD213SS.Z m32 xmm k xmm
|
|
// VFMADD213SS.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMADD213SS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD213SS_Z(mx, x, k, x1 operand.Op) { ctx.VFMADD213SS_Z(mx, x, k, x1) }
|
|
|
|
// VFMADD231PD: Fused Multiply-Add of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231PD m128 xmm xmm
|
|
// VFMADD231PD m256 ymm ymm
|
|
// VFMADD231PD xmm xmm xmm
|
|
// VFMADD231PD ymm ymm ymm
|
|
// VFMADD231PD m128 xmm k xmm
|
|
// VFMADD231PD m256 ymm k ymm
|
|
// VFMADD231PD xmm xmm k xmm
|
|
// VFMADD231PD ymm ymm k ymm
|
|
// VFMADD231PD m512 zmm k zmm
|
|
// VFMADD231PD m512 zmm zmm
|
|
// VFMADD231PD zmm zmm k zmm
|
|
// VFMADD231PD zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADD231PD instruction to the active function.
|
|
func (c *Context) VFMADD231PD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADD231PD(ops...))
|
|
}
|
|
|
|
// VFMADD231PD: Fused Multiply-Add of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231PD m128 xmm xmm
|
|
// VFMADD231PD m256 ymm ymm
|
|
// VFMADD231PD xmm xmm xmm
|
|
// VFMADD231PD ymm ymm ymm
|
|
// VFMADD231PD m128 xmm k xmm
|
|
// VFMADD231PD m256 ymm k ymm
|
|
// VFMADD231PD xmm xmm k xmm
|
|
// VFMADD231PD ymm ymm k ymm
|
|
// VFMADD231PD m512 zmm k zmm
|
|
// VFMADD231PD m512 zmm zmm
|
|
// VFMADD231PD zmm zmm k zmm
|
|
// VFMADD231PD zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADD231PD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD231PD(ops ...operand.Op) { ctx.VFMADD231PD(ops...) }
|
|
|
|
// VFMADD231PD_BCST: Fused Multiply-Add of Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231PD.BCST m64 xmm k xmm
|
|
// VFMADD231PD.BCST m64 xmm xmm
|
|
// VFMADD231PD.BCST m64 ymm k ymm
|
|
// VFMADD231PD.BCST m64 ymm ymm
|
|
// VFMADD231PD.BCST m64 zmm k zmm
|
|
// VFMADD231PD.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VFMADD231PD.BCST instruction to the active function.
|
|
func (c *Context) VFMADD231PD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADD231PD_BCST(ops...))
|
|
}
|
|
|
|
// VFMADD231PD_BCST: Fused Multiply-Add of Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231PD.BCST m64 xmm k xmm
|
|
// VFMADD231PD.BCST m64 xmm xmm
|
|
// VFMADD231PD.BCST m64 ymm k ymm
|
|
// VFMADD231PD.BCST m64 ymm ymm
|
|
// VFMADD231PD.BCST m64 zmm k zmm
|
|
// VFMADD231PD.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VFMADD231PD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD231PD_BCST(ops ...operand.Op) { ctx.VFMADD231PD_BCST(ops...) }
|
|
|
|
// VFMADD231PD_BCST_Z: Fused Multiply-Add of Packed Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231PD.BCST.Z m64 xmm k xmm
|
|
// VFMADD231PD.BCST.Z m64 ymm k ymm
|
|
// VFMADD231PD.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VFMADD231PD.BCST.Z instruction to the active function.
|
|
func (c *Context) VFMADD231PD_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VFMADD231PD_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VFMADD231PD_BCST_Z: Fused Multiply-Add of Packed Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231PD.BCST.Z m64 xmm k xmm
|
|
// VFMADD231PD.BCST.Z m64 ymm k ymm
|
|
// VFMADD231PD.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VFMADD231PD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD231PD_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VFMADD231PD_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VFMADD231PD_RD_SAE: Fused Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231PD.RD_SAE zmm zmm k zmm
|
|
// VFMADD231PD.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADD231PD.RD_SAE instruction to the active function.
|
|
func (c *Context) VFMADD231PD_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADD231PD_RD_SAE(ops...))
|
|
}
|
|
|
|
// VFMADD231PD_RD_SAE: Fused Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231PD.RD_SAE zmm zmm k zmm
|
|
// VFMADD231PD.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADD231PD.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD231PD_RD_SAE(ops ...operand.Op) { ctx.VFMADD231PD_RD_SAE(ops...) }
|
|
|
|
// VFMADD231PD_RD_SAE_Z: Fused Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231PD.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADD231PD.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMADD231PD_RD_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMADD231PD_RD_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMADD231PD_RD_SAE_Z: Fused Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231PD.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADD231PD.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD231PD_RD_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMADD231PD_RD_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMADD231PD_RN_SAE: Fused Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231PD.RN_SAE zmm zmm k zmm
|
|
// VFMADD231PD.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADD231PD.RN_SAE instruction to the active function.
|
|
func (c *Context) VFMADD231PD_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADD231PD_RN_SAE(ops...))
|
|
}
|
|
|
|
// VFMADD231PD_RN_SAE: Fused Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231PD.RN_SAE zmm zmm k zmm
|
|
// VFMADD231PD.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADD231PD.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD231PD_RN_SAE(ops ...operand.Op) { ctx.VFMADD231PD_RN_SAE(ops...) }
|
|
|
|
// VFMADD231PD_RN_SAE_Z: Fused Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231PD.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADD231PD.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMADD231PD_RN_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMADD231PD_RN_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMADD231PD_RN_SAE_Z: Fused Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231PD.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADD231PD.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD231PD_RN_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMADD231PD_RN_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMADD231PD_RU_SAE: Fused Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231PD.RU_SAE zmm zmm k zmm
|
|
// VFMADD231PD.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADD231PD.RU_SAE instruction to the active function.
|
|
func (c *Context) VFMADD231PD_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADD231PD_RU_SAE(ops...))
|
|
}
|
|
|
|
// VFMADD231PD_RU_SAE: Fused Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231PD.RU_SAE zmm zmm k zmm
|
|
// VFMADD231PD.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADD231PD.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD231PD_RU_SAE(ops ...operand.Op) { ctx.VFMADD231PD_RU_SAE(ops...) }
|
|
|
|
// VFMADD231PD_RU_SAE_Z: Fused Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231PD.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADD231PD.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMADD231PD_RU_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMADD231PD_RU_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMADD231PD_RU_SAE_Z: Fused Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231PD.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADD231PD.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD231PD_RU_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMADD231PD_RU_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMADD231PD_RZ_SAE: Fused Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231PD.RZ_SAE zmm zmm k zmm
|
|
// VFMADD231PD.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADD231PD.RZ_SAE instruction to the active function.
|
|
func (c *Context) VFMADD231PD_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADD231PD_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VFMADD231PD_RZ_SAE: Fused Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231PD.RZ_SAE zmm zmm k zmm
|
|
// VFMADD231PD.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADD231PD.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD231PD_RZ_SAE(ops ...operand.Op) { ctx.VFMADD231PD_RZ_SAE(ops...) }
|
|
|
|
// VFMADD231PD_RZ_SAE_Z: Fused Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231PD.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADD231PD.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMADD231PD_RZ_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMADD231PD_RZ_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMADD231PD_RZ_SAE_Z: Fused Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231PD.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADD231PD.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD231PD_RZ_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMADD231PD_RZ_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMADD231PD_Z: Fused Multiply-Add of Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231PD.Z m128 xmm k xmm
|
|
// VFMADD231PD.Z m256 ymm k ymm
|
|
// VFMADD231PD.Z xmm xmm k xmm
|
|
// VFMADD231PD.Z ymm ymm k ymm
|
|
// VFMADD231PD.Z m512 zmm k zmm
|
|
// VFMADD231PD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADD231PD.Z instruction to the active function.
|
|
func (c *Context) VFMADD231PD_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VFMADD231PD_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VFMADD231PD_Z: Fused Multiply-Add of Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231PD.Z m128 xmm k xmm
|
|
// VFMADD231PD.Z m256 ymm k ymm
|
|
// VFMADD231PD.Z xmm xmm k xmm
|
|
// VFMADD231PD.Z ymm ymm k ymm
|
|
// VFMADD231PD.Z m512 zmm k zmm
|
|
// VFMADD231PD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADD231PD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD231PD_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VFMADD231PD_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VFMADD231PS: Fused Multiply-Add of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231PS m128 xmm xmm
|
|
// VFMADD231PS m256 ymm ymm
|
|
// VFMADD231PS xmm xmm xmm
|
|
// VFMADD231PS ymm ymm ymm
|
|
// VFMADD231PS m128 xmm k xmm
|
|
// VFMADD231PS m256 ymm k ymm
|
|
// VFMADD231PS xmm xmm k xmm
|
|
// VFMADD231PS ymm ymm k ymm
|
|
// VFMADD231PS m512 zmm k zmm
|
|
// VFMADD231PS m512 zmm zmm
|
|
// VFMADD231PS zmm zmm k zmm
|
|
// VFMADD231PS zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADD231PS instruction to the active function.
|
|
func (c *Context) VFMADD231PS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADD231PS(ops...))
|
|
}
|
|
|
|
// VFMADD231PS: Fused Multiply-Add of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231PS m128 xmm xmm
|
|
// VFMADD231PS m256 ymm ymm
|
|
// VFMADD231PS xmm xmm xmm
|
|
// VFMADD231PS ymm ymm ymm
|
|
// VFMADD231PS m128 xmm k xmm
|
|
// VFMADD231PS m256 ymm k ymm
|
|
// VFMADD231PS xmm xmm k xmm
|
|
// VFMADD231PS ymm ymm k ymm
|
|
// VFMADD231PS m512 zmm k zmm
|
|
// VFMADD231PS m512 zmm zmm
|
|
// VFMADD231PS zmm zmm k zmm
|
|
// VFMADD231PS zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADD231PS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD231PS(ops ...operand.Op) { ctx.VFMADD231PS(ops...) }
|
|
|
|
// VFMADD231PS_BCST: Fused Multiply-Add of Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231PS.BCST m32 xmm k xmm
|
|
// VFMADD231PS.BCST m32 xmm xmm
|
|
// VFMADD231PS.BCST m32 ymm k ymm
|
|
// VFMADD231PS.BCST m32 ymm ymm
|
|
// VFMADD231PS.BCST m32 zmm k zmm
|
|
// VFMADD231PS.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VFMADD231PS.BCST instruction to the active function.
|
|
func (c *Context) VFMADD231PS_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADD231PS_BCST(ops...))
|
|
}
|
|
|
|
// VFMADD231PS_BCST: Fused Multiply-Add of Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231PS.BCST m32 xmm k xmm
|
|
// VFMADD231PS.BCST m32 xmm xmm
|
|
// VFMADD231PS.BCST m32 ymm k ymm
|
|
// VFMADD231PS.BCST m32 ymm ymm
|
|
// VFMADD231PS.BCST m32 zmm k zmm
|
|
// VFMADD231PS.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VFMADD231PS.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD231PS_BCST(ops ...operand.Op) { ctx.VFMADD231PS_BCST(ops...) }
|
|
|
|
// VFMADD231PS_BCST_Z: Fused Multiply-Add of Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231PS.BCST.Z m32 xmm k xmm
|
|
// VFMADD231PS.BCST.Z m32 ymm k ymm
|
|
// VFMADD231PS.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VFMADD231PS.BCST.Z instruction to the active function.
|
|
func (c *Context) VFMADD231PS_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VFMADD231PS_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VFMADD231PS_BCST_Z: Fused Multiply-Add of Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231PS.BCST.Z m32 xmm k xmm
|
|
// VFMADD231PS.BCST.Z m32 ymm k ymm
|
|
// VFMADD231PS.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VFMADD231PS.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD231PS_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VFMADD231PS_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VFMADD231PS_RD_SAE: Fused Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231PS.RD_SAE zmm zmm k zmm
|
|
// VFMADD231PS.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADD231PS.RD_SAE instruction to the active function.
|
|
func (c *Context) VFMADD231PS_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADD231PS_RD_SAE(ops...))
|
|
}
|
|
|
|
// VFMADD231PS_RD_SAE: Fused Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231PS.RD_SAE zmm zmm k zmm
|
|
// VFMADD231PS.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADD231PS.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD231PS_RD_SAE(ops ...operand.Op) { ctx.VFMADD231PS_RD_SAE(ops...) }
|
|
|
|
// VFMADD231PS_RD_SAE_Z: Fused Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231PS.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADD231PS.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMADD231PS_RD_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMADD231PS_RD_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMADD231PS_RD_SAE_Z: Fused Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231PS.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADD231PS.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD231PS_RD_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMADD231PS_RD_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMADD231PS_RN_SAE: Fused Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231PS.RN_SAE zmm zmm k zmm
|
|
// VFMADD231PS.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADD231PS.RN_SAE instruction to the active function.
|
|
func (c *Context) VFMADD231PS_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADD231PS_RN_SAE(ops...))
|
|
}
|
|
|
|
// VFMADD231PS_RN_SAE: Fused Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231PS.RN_SAE zmm zmm k zmm
|
|
// VFMADD231PS.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADD231PS.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD231PS_RN_SAE(ops ...operand.Op) { ctx.VFMADD231PS_RN_SAE(ops...) }
|
|
|
|
// VFMADD231PS_RN_SAE_Z: Fused Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231PS.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADD231PS.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMADD231PS_RN_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMADD231PS_RN_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMADD231PS_RN_SAE_Z: Fused Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231PS.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADD231PS.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD231PS_RN_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMADD231PS_RN_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMADD231PS_RU_SAE: Fused Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231PS.RU_SAE zmm zmm k zmm
|
|
// VFMADD231PS.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADD231PS.RU_SAE instruction to the active function.
|
|
func (c *Context) VFMADD231PS_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADD231PS_RU_SAE(ops...))
|
|
}
|
|
|
|
// VFMADD231PS_RU_SAE: Fused Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231PS.RU_SAE zmm zmm k zmm
|
|
// VFMADD231PS.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADD231PS.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD231PS_RU_SAE(ops ...operand.Op) { ctx.VFMADD231PS_RU_SAE(ops...) }
|
|
|
|
// VFMADD231PS_RU_SAE_Z: Fused Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231PS.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADD231PS.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMADD231PS_RU_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMADD231PS_RU_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMADD231PS_RU_SAE_Z: Fused Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231PS.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADD231PS.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD231PS_RU_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMADD231PS_RU_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMADD231PS_RZ_SAE: Fused Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231PS.RZ_SAE zmm zmm k zmm
|
|
// VFMADD231PS.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADD231PS.RZ_SAE instruction to the active function.
|
|
func (c *Context) VFMADD231PS_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADD231PS_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VFMADD231PS_RZ_SAE: Fused Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231PS.RZ_SAE zmm zmm k zmm
|
|
// VFMADD231PS.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADD231PS.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD231PS_RZ_SAE(ops ...operand.Op) { ctx.VFMADD231PS_RZ_SAE(ops...) }
|
|
|
|
// VFMADD231PS_RZ_SAE_Z: Fused Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231PS.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADD231PS.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMADD231PS_RZ_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMADD231PS_RZ_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMADD231PS_RZ_SAE_Z: Fused Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231PS.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADD231PS.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD231PS_RZ_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMADD231PS_RZ_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMADD231PS_Z: Fused Multiply-Add of Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231PS.Z m128 xmm k xmm
|
|
// VFMADD231PS.Z m256 ymm k ymm
|
|
// VFMADD231PS.Z xmm xmm k xmm
|
|
// VFMADD231PS.Z ymm ymm k ymm
|
|
// VFMADD231PS.Z m512 zmm k zmm
|
|
// VFMADD231PS.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADD231PS.Z instruction to the active function.
|
|
func (c *Context) VFMADD231PS_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VFMADD231PS_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VFMADD231PS_Z: Fused Multiply-Add of Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231PS.Z m128 xmm k xmm
|
|
// VFMADD231PS.Z m256 ymm k ymm
|
|
// VFMADD231PS.Z xmm xmm k xmm
|
|
// VFMADD231PS.Z ymm ymm k ymm
|
|
// VFMADD231PS.Z m512 zmm k zmm
|
|
// VFMADD231PS.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADD231PS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD231PS_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VFMADD231PS_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VFMADD231SD: Fused Multiply-Add of Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231SD m64 xmm xmm
|
|
// VFMADD231SD xmm xmm xmm
|
|
// VFMADD231SD m64 xmm k xmm
|
|
// VFMADD231SD xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMADD231SD instruction to the active function.
|
|
func (c *Context) VFMADD231SD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADD231SD(ops...))
|
|
}
|
|
|
|
// VFMADD231SD: Fused Multiply-Add of Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231SD m64 xmm xmm
|
|
// VFMADD231SD xmm xmm xmm
|
|
// VFMADD231SD m64 xmm k xmm
|
|
// VFMADD231SD xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMADD231SD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD231SD(ops ...operand.Op) { ctx.VFMADD231SD(ops...) }
|
|
|
|
// VFMADD231SD_RD_SAE: Fused Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231SD.RD_SAE xmm xmm k xmm
|
|
// VFMADD231SD.RD_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMADD231SD.RD_SAE instruction to the active function.
|
|
func (c *Context) VFMADD231SD_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADD231SD_RD_SAE(ops...))
|
|
}
|
|
|
|
// VFMADD231SD_RD_SAE: Fused Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231SD.RD_SAE xmm xmm k xmm
|
|
// VFMADD231SD.RD_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMADD231SD.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD231SD_RD_SAE(ops ...operand.Op) { ctx.VFMADD231SD_RD_SAE(ops...) }
|
|
|
|
// VFMADD231SD_RD_SAE_Z: Fused Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231SD.RD_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMADD231SD.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMADD231SD_RD_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFMADD231SD_RD_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFMADD231SD_RD_SAE_Z: Fused Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231SD.RD_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMADD231SD.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD231SD_RD_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFMADD231SD_RD_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFMADD231SD_RN_SAE: Fused Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231SD.RN_SAE xmm xmm k xmm
|
|
// VFMADD231SD.RN_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMADD231SD.RN_SAE instruction to the active function.
|
|
func (c *Context) VFMADD231SD_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADD231SD_RN_SAE(ops...))
|
|
}
|
|
|
|
// VFMADD231SD_RN_SAE: Fused Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231SD.RN_SAE xmm xmm k xmm
|
|
// VFMADD231SD.RN_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMADD231SD.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD231SD_RN_SAE(ops ...operand.Op) { ctx.VFMADD231SD_RN_SAE(ops...) }
|
|
|
|
// VFMADD231SD_RN_SAE_Z: Fused Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231SD.RN_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMADD231SD.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMADD231SD_RN_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFMADD231SD_RN_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFMADD231SD_RN_SAE_Z: Fused Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231SD.RN_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMADD231SD.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD231SD_RN_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFMADD231SD_RN_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFMADD231SD_RU_SAE: Fused Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231SD.RU_SAE xmm xmm k xmm
|
|
// VFMADD231SD.RU_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMADD231SD.RU_SAE instruction to the active function.
|
|
func (c *Context) VFMADD231SD_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADD231SD_RU_SAE(ops...))
|
|
}
|
|
|
|
// VFMADD231SD_RU_SAE: Fused Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231SD.RU_SAE xmm xmm k xmm
|
|
// VFMADD231SD.RU_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMADD231SD.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD231SD_RU_SAE(ops ...operand.Op) { ctx.VFMADD231SD_RU_SAE(ops...) }
|
|
|
|
// VFMADD231SD_RU_SAE_Z: Fused Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231SD.RU_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMADD231SD.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMADD231SD_RU_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFMADD231SD_RU_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFMADD231SD_RU_SAE_Z: Fused Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231SD.RU_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMADD231SD.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD231SD_RU_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFMADD231SD_RU_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFMADD231SD_RZ_SAE: Fused Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231SD.RZ_SAE xmm xmm k xmm
|
|
// VFMADD231SD.RZ_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMADD231SD.RZ_SAE instruction to the active function.
|
|
func (c *Context) VFMADD231SD_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADD231SD_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VFMADD231SD_RZ_SAE: Fused Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231SD.RZ_SAE xmm xmm k xmm
|
|
// VFMADD231SD.RZ_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMADD231SD.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD231SD_RZ_SAE(ops ...operand.Op) { ctx.VFMADD231SD_RZ_SAE(ops...) }
|
|
|
|
// VFMADD231SD_RZ_SAE_Z: Fused Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231SD.RZ_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMADD231SD.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMADD231SD_RZ_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFMADD231SD_RZ_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFMADD231SD_RZ_SAE_Z: Fused Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231SD.RZ_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMADD231SD.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD231SD_RZ_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFMADD231SD_RZ_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFMADD231SD_Z: Fused Multiply-Add of Scalar Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231SD.Z m64 xmm k xmm
|
|
// VFMADD231SD.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMADD231SD.Z instruction to the active function.
|
|
func (c *Context) VFMADD231SD_Z(mx, x, k, x1 operand.Op) {
|
|
c.addinstruction(x86.VFMADD231SD_Z(mx, x, k, x1))
|
|
}
|
|
|
|
// VFMADD231SD_Z: Fused Multiply-Add of Scalar Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231SD.Z m64 xmm k xmm
|
|
// VFMADD231SD.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMADD231SD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD231SD_Z(mx, x, k, x1 operand.Op) { ctx.VFMADD231SD_Z(mx, x, k, x1) }
|
|
|
|
// VFMADD231SS: Fused Multiply-Add of Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231SS m32 xmm xmm
|
|
// VFMADD231SS xmm xmm xmm
|
|
// VFMADD231SS m32 xmm k xmm
|
|
// VFMADD231SS xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMADD231SS instruction to the active function.
|
|
func (c *Context) VFMADD231SS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADD231SS(ops...))
|
|
}
|
|
|
|
// VFMADD231SS: Fused Multiply-Add of Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231SS m32 xmm xmm
|
|
// VFMADD231SS xmm xmm xmm
|
|
// VFMADD231SS m32 xmm k xmm
|
|
// VFMADD231SS xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMADD231SS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD231SS(ops ...operand.Op) { ctx.VFMADD231SS(ops...) }
|
|
|
|
// VFMADD231SS_RD_SAE: Fused Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231SS.RD_SAE xmm xmm k xmm
|
|
// VFMADD231SS.RD_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMADD231SS.RD_SAE instruction to the active function.
|
|
func (c *Context) VFMADD231SS_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADD231SS_RD_SAE(ops...))
|
|
}
|
|
|
|
// VFMADD231SS_RD_SAE: Fused Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231SS.RD_SAE xmm xmm k xmm
|
|
// VFMADD231SS.RD_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMADD231SS.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD231SS_RD_SAE(ops ...operand.Op) { ctx.VFMADD231SS_RD_SAE(ops...) }
|
|
|
|
// VFMADD231SS_RD_SAE_Z: Fused Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231SS.RD_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMADD231SS.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMADD231SS_RD_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFMADD231SS_RD_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFMADD231SS_RD_SAE_Z: Fused Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231SS.RD_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMADD231SS.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD231SS_RD_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFMADD231SS_RD_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFMADD231SS_RN_SAE: Fused Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231SS.RN_SAE xmm xmm k xmm
|
|
// VFMADD231SS.RN_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMADD231SS.RN_SAE instruction to the active function.
|
|
func (c *Context) VFMADD231SS_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADD231SS_RN_SAE(ops...))
|
|
}
|
|
|
|
// VFMADD231SS_RN_SAE: Fused Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231SS.RN_SAE xmm xmm k xmm
|
|
// VFMADD231SS.RN_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMADD231SS.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD231SS_RN_SAE(ops ...operand.Op) { ctx.VFMADD231SS_RN_SAE(ops...) }
|
|
|
|
// VFMADD231SS_RN_SAE_Z: Fused Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231SS.RN_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMADD231SS.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMADD231SS_RN_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFMADD231SS_RN_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFMADD231SS_RN_SAE_Z: Fused Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231SS.RN_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMADD231SS.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD231SS_RN_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFMADD231SS_RN_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFMADD231SS_RU_SAE: Fused Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231SS.RU_SAE xmm xmm k xmm
|
|
// VFMADD231SS.RU_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMADD231SS.RU_SAE instruction to the active function.
|
|
func (c *Context) VFMADD231SS_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADD231SS_RU_SAE(ops...))
|
|
}
|
|
|
|
// VFMADD231SS_RU_SAE: Fused Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231SS.RU_SAE xmm xmm k xmm
|
|
// VFMADD231SS.RU_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMADD231SS.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD231SS_RU_SAE(ops ...operand.Op) { ctx.VFMADD231SS_RU_SAE(ops...) }
|
|
|
|
// VFMADD231SS_RU_SAE_Z: Fused Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231SS.RU_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMADD231SS.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMADD231SS_RU_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFMADD231SS_RU_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFMADD231SS_RU_SAE_Z: Fused Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231SS.RU_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMADD231SS.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD231SS_RU_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFMADD231SS_RU_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFMADD231SS_RZ_SAE: Fused Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231SS.RZ_SAE xmm xmm k xmm
|
|
// VFMADD231SS.RZ_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMADD231SS.RZ_SAE instruction to the active function.
|
|
func (c *Context) VFMADD231SS_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADD231SS_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VFMADD231SS_RZ_SAE: Fused Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231SS.RZ_SAE xmm xmm k xmm
|
|
// VFMADD231SS.RZ_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMADD231SS.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD231SS_RZ_SAE(ops ...operand.Op) { ctx.VFMADD231SS_RZ_SAE(ops...) }
|
|
|
|
// VFMADD231SS_RZ_SAE_Z: Fused Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231SS.RZ_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMADD231SS.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMADD231SS_RZ_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFMADD231SS_RZ_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFMADD231SS_RZ_SAE_Z: Fused Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231SS.RZ_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMADD231SS.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD231SS_RZ_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFMADD231SS_RZ_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFMADD231SS_Z: Fused Multiply-Add of Scalar Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231SS.Z m32 xmm k xmm
|
|
// VFMADD231SS.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMADD231SS.Z instruction to the active function.
|
|
func (c *Context) VFMADD231SS_Z(mx, x, k, x1 operand.Op) {
|
|
c.addinstruction(x86.VFMADD231SS_Z(mx, x, k, x1))
|
|
}
|
|
|
|
// VFMADD231SS_Z: Fused Multiply-Add of Scalar Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADD231SS.Z m32 xmm k xmm
|
|
// VFMADD231SS.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMADD231SS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADD231SS_Z(mx, x, k, x1 operand.Op) { ctx.VFMADD231SS_Z(mx, x, k, x1) }
|
|
|
|
// VFMADDSUB132PD: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB132PD m128 xmm xmm
|
|
// VFMADDSUB132PD m256 ymm ymm
|
|
// VFMADDSUB132PD xmm xmm xmm
|
|
// VFMADDSUB132PD ymm ymm ymm
|
|
// VFMADDSUB132PD m128 xmm k xmm
|
|
// VFMADDSUB132PD m256 ymm k ymm
|
|
// VFMADDSUB132PD xmm xmm k xmm
|
|
// VFMADDSUB132PD ymm ymm k ymm
|
|
// VFMADDSUB132PD m512 zmm k zmm
|
|
// VFMADDSUB132PD m512 zmm zmm
|
|
// VFMADDSUB132PD zmm zmm k zmm
|
|
// VFMADDSUB132PD zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADDSUB132PD instruction to the active function.
|
|
func (c *Context) VFMADDSUB132PD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADDSUB132PD(ops...))
|
|
}
|
|
|
|
// VFMADDSUB132PD: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB132PD m128 xmm xmm
|
|
// VFMADDSUB132PD m256 ymm ymm
|
|
// VFMADDSUB132PD xmm xmm xmm
|
|
// VFMADDSUB132PD ymm ymm ymm
|
|
// VFMADDSUB132PD m128 xmm k xmm
|
|
// VFMADDSUB132PD m256 ymm k ymm
|
|
// VFMADDSUB132PD xmm xmm k xmm
|
|
// VFMADDSUB132PD ymm ymm k ymm
|
|
// VFMADDSUB132PD m512 zmm k zmm
|
|
// VFMADDSUB132PD m512 zmm zmm
|
|
// VFMADDSUB132PD zmm zmm k zmm
|
|
// VFMADDSUB132PD zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADDSUB132PD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADDSUB132PD(ops ...operand.Op) { ctx.VFMADDSUB132PD(ops...) }
|
|
|
|
// VFMADDSUB132PD_BCST: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB132PD.BCST m64 xmm k xmm
|
|
// VFMADDSUB132PD.BCST m64 xmm xmm
|
|
// VFMADDSUB132PD.BCST m64 ymm k ymm
|
|
// VFMADDSUB132PD.BCST m64 ymm ymm
|
|
// VFMADDSUB132PD.BCST m64 zmm k zmm
|
|
// VFMADDSUB132PD.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VFMADDSUB132PD.BCST instruction to the active function.
|
|
func (c *Context) VFMADDSUB132PD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADDSUB132PD_BCST(ops...))
|
|
}
|
|
|
|
// VFMADDSUB132PD_BCST: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB132PD.BCST m64 xmm k xmm
|
|
// VFMADDSUB132PD.BCST m64 xmm xmm
|
|
// VFMADDSUB132PD.BCST m64 ymm k ymm
|
|
// VFMADDSUB132PD.BCST m64 ymm ymm
|
|
// VFMADDSUB132PD.BCST m64 zmm k zmm
|
|
// VFMADDSUB132PD.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VFMADDSUB132PD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADDSUB132PD_BCST(ops ...operand.Op) { ctx.VFMADDSUB132PD_BCST(ops...) }
|
|
|
|
// VFMADDSUB132PD_BCST_Z: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB132PD.BCST.Z m64 xmm k xmm
|
|
// VFMADDSUB132PD.BCST.Z m64 ymm k ymm
|
|
// VFMADDSUB132PD.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VFMADDSUB132PD.BCST.Z instruction to the active function.
|
|
func (c *Context) VFMADDSUB132PD_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VFMADDSUB132PD_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VFMADDSUB132PD_BCST_Z: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB132PD.BCST.Z m64 xmm k xmm
|
|
// VFMADDSUB132PD.BCST.Z m64 ymm k ymm
|
|
// VFMADDSUB132PD.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VFMADDSUB132PD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADDSUB132PD_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VFMADDSUB132PD_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VFMADDSUB132PD_RD_SAE: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB132PD.RD_SAE zmm zmm k zmm
|
|
// VFMADDSUB132PD.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADDSUB132PD.RD_SAE instruction to the active function.
|
|
func (c *Context) VFMADDSUB132PD_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADDSUB132PD_RD_SAE(ops...))
|
|
}
|
|
|
|
// VFMADDSUB132PD_RD_SAE: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB132PD.RD_SAE zmm zmm k zmm
|
|
// VFMADDSUB132PD.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADDSUB132PD.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADDSUB132PD_RD_SAE(ops ...operand.Op) { ctx.VFMADDSUB132PD_RD_SAE(ops...) }
|
|
|
|
// VFMADDSUB132PD_RD_SAE_Z: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB132PD.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADDSUB132PD.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMADDSUB132PD_RD_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMADDSUB132PD_RD_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMADDSUB132PD_RD_SAE_Z: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB132PD.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADDSUB132PD.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADDSUB132PD_RD_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMADDSUB132PD_RD_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMADDSUB132PD_RN_SAE: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB132PD.RN_SAE zmm zmm k zmm
|
|
// VFMADDSUB132PD.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADDSUB132PD.RN_SAE instruction to the active function.
|
|
func (c *Context) VFMADDSUB132PD_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADDSUB132PD_RN_SAE(ops...))
|
|
}
|
|
|
|
// VFMADDSUB132PD_RN_SAE: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB132PD.RN_SAE zmm zmm k zmm
|
|
// VFMADDSUB132PD.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADDSUB132PD.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADDSUB132PD_RN_SAE(ops ...operand.Op) { ctx.VFMADDSUB132PD_RN_SAE(ops...) }
|
|
|
|
// VFMADDSUB132PD_RN_SAE_Z: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB132PD.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADDSUB132PD.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMADDSUB132PD_RN_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMADDSUB132PD_RN_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMADDSUB132PD_RN_SAE_Z: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB132PD.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADDSUB132PD.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADDSUB132PD_RN_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMADDSUB132PD_RN_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMADDSUB132PD_RU_SAE: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB132PD.RU_SAE zmm zmm k zmm
|
|
// VFMADDSUB132PD.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADDSUB132PD.RU_SAE instruction to the active function.
|
|
func (c *Context) VFMADDSUB132PD_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADDSUB132PD_RU_SAE(ops...))
|
|
}
|
|
|
|
// VFMADDSUB132PD_RU_SAE: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB132PD.RU_SAE zmm zmm k zmm
|
|
// VFMADDSUB132PD.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADDSUB132PD.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADDSUB132PD_RU_SAE(ops ...operand.Op) { ctx.VFMADDSUB132PD_RU_SAE(ops...) }
|
|
|
|
// VFMADDSUB132PD_RU_SAE_Z: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB132PD.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADDSUB132PD.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMADDSUB132PD_RU_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMADDSUB132PD_RU_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMADDSUB132PD_RU_SAE_Z: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB132PD.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADDSUB132PD.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADDSUB132PD_RU_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMADDSUB132PD_RU_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMADDSUB132PD_RZ_SAE: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB132PD.RZ_SAE zmm zmm k zmm
|
|
// VFMADDSUB132PD.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADDSUB132PD.RZ_SAE instruction to the active function.
|
|
func (c *Context) VFMADDSUB132PD_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADDSUB132PD_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VFMADDSUB132PD_RZ_SAE: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB132PD.RZ_SAE zmm zmm k zmm
|
|
// VFMADDSUB132PD.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADDSUB132PD.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADDSUB132PD_RZ_SAE(ops ...operand.Op) { ctx.VFMADDSUB132PD_RZ_SAE(ops...) }
|
|
|
|
// VFMADDSUB132PD_RZ_SAE_Z: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB132PD.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADDSUB132PD.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMADDSUB132PD_RZ_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMADDSUB132PD_RZ_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMADDSUB132PD_RZ_SAE_Z: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB132PD.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADDSUB132PD.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADDSUB132PD_RZ_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMADDSUB132PD_RZ_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMADDSUB132PD_Z: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB132PD.Z m128 xmm k xmm
|
|
// VFMADDSUB132PD.Z m256 ymm k ymm
|
|
// VFMADDSUB132PD.Z xmm xmm k xmm
|
|
// VFMADDSUB132PD.Z ymm ymm k ymm
|
|
// VFMADDSUB132PD.Z m512 zmm k zmm
|
|
// VFMADDSUB132PD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADDSUB132PD.Z instruction to the active function.
|
|
func (c *Context) VFMADDSUB132PD_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VFMADDSUB132PD_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VFMADDSUB132PD_Z: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB132PD.Z m128 xmm k xmm
|
|
// VFMADDSUB132PD.Z m256 ymm k ymm
|
|
// VFMADDSUB132PD.Z xmm xmm k xmm
|
|
// VFMADDSUB132PD.Z ymm ymm k ymm
|
|
// VFMADDSUB132PD.Z m512 zmm k zmm
|
|
// VFMADDSUB132PD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADDSUB132PD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADDSUB132PD_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VFMADDSUB132PD_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VFMADDSUB132PS: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB132PS m128 xmm xmm
|
|
// VFMADDSUB132PS m256 ymm ymm
|
|
// VFMADDSUB132PS xmm xmm xmm
|
|
// VFMADDSUB132PS ymm ymm ymm
|
|
// VFMADDSUB132PS m128 xmm k xmm
|
|
// VFMADDSUB132PS m256 ymm k ymm
|
|
// VFMADDSUB132PS xmm xmm k xmm
|
|
// VFMADDSUB132PS ymm ymm k ymm
|
|
// VFMADDSUB132PS m512 zmm k zmm
|
|
// VFMADDSUB132PS m512 zmm zmm
|
|
// VFMADDSUB132PS zmm zmm k zmm
|
|
// VFMADDSUB132PS zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADDSUB132PS instruction to the active function.
|
|
func (c *Context) VFMADDSUB132PS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADDSUB132PS(ops...))
|
|
}
|
|
|
|
// VFMADDSUB132PS: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB132PS m128 xmm xmm
|
|
// VFMADDSUB132PS m256 ymm ymm
|
|
// VFMADDSUB132PS xmm xmm xmm
|
|
// VFMADDSUB132PS ymm ymm ymm
|
|
// VFMADDSUB132PS m128 xmm k xmm
|
|
// VFMADDSUB132PS m256 ymm k ymm
|
|
// VFMADDSUB132PS xmm xmm k xmm
|
|
// VFMADDSUB132PS ymm ymm k ymm
|
|
// VFMADDSUB132PS m512 zmm k zmm
|
|
// VFMADDSUB132PS m512 zmm zmm
|
|
// VFMADDSUB132PS zmm zmm k zmm
|
|
// VFMADDSUB132PS zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADDSUB132PS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADDSUB132PS(ops ...operand.Op) { ctx.VFMADDSUB132PS(ops...) }
|
|
|
|
// VFMADDSUB132PS_BCST: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB132PS.BCST m32 xmm k xmm
|
|
// VFMADDSUB132PS.BCST m32 xmm xmm
|
|
// VFMADDSUB132PS.BCST m32 ymm k ymm
|
|
// VFMADDSUB132PS.BCST m32 ymm ymm
|
|
// VFMADDSUB132PS.BCST m32 zmm k zmm
|
|
// VFMADDSUB132PS.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VFMADDSUB132PS.BCST instruction to the active function.
|
|
func (c *Context) VFMADDSUB132PS_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADDSUB132PS_BCST(ops...))
|
|
}
|
|
|
|
// VFMADDSUB132PS_BCST: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB132PS.BCST m32 xmm k xmm
|
|
// VFMADDSUB132PS.BCST m32 xmm xmm
|
|
// VFMADDSUB132PS.BCST m32 ymm k ymm
|
|
// VFMADDSUB132PS.BCST m32 ymm ymm
|
|
// VFMADDSUB132PS.BCST m32 zmm k zmm
|
|
// VFMADDSUB132PS.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VFMADDSUB132PS.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADDSUB132PS_BCST(ops ...operand.Op) { ctx.VFMADDSUB132PS_BCST(ops...) }
|
|
|
|
// VFMADDSUB132PS_BCST_Z: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB132PS.BCST.Z m32 xmm k xmm
|
|
// VFMADDSUB132PS.BCST.Z m32 ymm k ymm
|
|
// VFMADDSUB132PS.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VFMADDSUB132PS.BCST.Z instruction to the active function.
|
|
func (c *Context) VFMADDSUB132PS_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VFMADDSUB132PS_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VFMADDSUB132PS_BCST_Z: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB132PS.BCST.Z m32 xmm k xmm
|
|
// VFMADDSUB132PS.BCST.Z m32 ymm k ymm
|
|
// VFMADDSUB132PS.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VFMADDSUB132PS.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADDSUB132PS_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VFMADDSUB132PS_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VFMADDSUB132PS_RD_SAE: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB132PS.RD_SAE zmm zmm k zmm
|
|
// VFMADDSUB132PS.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADDSUB132PS.RD_SAE instruction to the active function.
|
|
func (c *Context) VFMADDSUB132PS_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADDSUB132PS_RD_SAE(ops...))
|
|
}
|
|
|
|
// VFMADDSUB132PS_RD_SAE: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB132PS.RD_SAE zmm zmm k zmm
|
|
// VFMADDSUB132PS.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADDSUB132PS.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADDSUB132PS_RD_SAE(ops ...operand.Op) { ctx.VFMADDSUB132PS_RD_SAE(ops...) }
|
|
|
|
// VFMADDSUB132PS_RD_SAE_Z: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB132PS.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADDSUB132PS.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMADDSUB132PS_RD_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMADDSUB132PS_RD_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMADDSUB132PS_RD_SAE_Z: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB132PS.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADDSUB132PS.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADDSUB132PS_RD_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMADDSUB132PS_RD_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMADDSUB132PS_RN_SAE: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB132PS.RN_SAE zmm zmm k zmm
|
|
// VFMADDSUB132PS.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADDSUB132PS.RN_SAE instruction to the active function.
|
|
func (c *Context) VFMADDSUB132PS_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADDSUB132PS_RN_SAE(ops...))
|
|
}
|
|
|
|
// VFMADDSUB132PS_RN_SAE: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB132PS.RN_SAE zmm zmm k zmm
|
|
// VFMADDSUB132PS.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADDSUB132PS.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADDSUB132PS_RN_SAE(ops ...operand.Op) { ctx.VFMADDSUB132PS_RN_SAE(ops...) }
|
|
|
|
// VFMADDSUB132PS_RN_SAE_Z: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB132PS.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADDSUB132PS.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMADDSUB132PS_RN_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMADDSUB132PS_RN_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMADDSUB132PS_RN_SAE_Z: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB132PS.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADDSUB132PS.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADDSUB132PS_RN_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMADDSUB132PS_RN_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMADDSUB132PS_RU_SAE: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB132PS.RU_SAE zmm zmm k zmm
|
|
// VFMADDSUB132PS.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADDSUB132PS.RU_SAE instruction to the active function.
|
|
func (c *Context) VFMADDSUB132PS_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADDSUB132PS_RU_SAE(ops...))
|
|
}
|
|
|
|
// VFMADDSUB132PS_RU_SAE: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB132PS.RU_SAE zmm zmm k zmm
|
|
// VFMADDSUB132PS.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADDSUB132PS.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADDSUB132PS_RU_SAE(ops ...operand.Op) { ctx.VFMADDSUB132PS_RU_SAE(ops...) }
|
|
|
|
// VFMADDSUB132PS_RU_SAE_Z: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB132PS.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADDSUB132PS.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMADDSUB132PS_RU_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMADDSUB132PS_RU_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMADDSUB132PS_RU_SAE_Z: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB132PS.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADDSUB132PS.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADDSUB132PS_RU_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMADDSUB132PS_RU_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMADDSUB132PS_RZ_SAE: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB132PS.RZ_SAE zmm zmm k zmm
|
|
// VFMADDSUB132PS.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADDSUB132PS.RZ_SAE instruction to the active function.
|
|
func (c *Context) VFMADDSUB132PS_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADDSUB132PS_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VFMADDSUB132PS_RZ_SAE: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB132PS.RZ_SAE zmm zmm k zmm
|
|
// VFMADDSUB132PS.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADDSUB132PS.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADDSUB132PS_RZ_SAE(ops ...operand.Op) { ctx.VFMADDSUB132PS_RZ_SAE(ops...) }
|
|
|
|
// VFMADDSUB132PS_RZ_SAE_Z: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB132PS.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADDSUB132PS.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMADDSUB132PS_RZ_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMADDSUB132PS_RZ_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMADDSUB132PS_RZ_SAE_Z: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB132PS.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADDSUB132PS.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADDSUB132PS_RZ_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMADDSUB132PS_RZ_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMADDSUB132PS_Z: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB132PS.Z m128 xmm k xmm
|
|
// VFMADDSUB132PS.Z m256 ymm k ymm
|
|
// VFMADDSUB132PS.Z xmm xmm k xmm
|
|
// VFMADDSUB132PS.Z ymm ymm k ymm
|
|
// VFMADDSUB132PS.Z m512 zmm k zmm
|
|
// VFMADDSUB132PS.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADDSUB132PS.Z instruction to the active function.
|
|
func (c *Context) VFMADDSUB132PS_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VFMADDSUB132PS_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VFMADDSUB132PS_Z: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB132PS.Z m128 xmm k xmm
|
|
// VFMADDSUB132PS.Z m256 ymm k ymm
|
|
// VFMADDSUB132PS.Z xmm xmm k xmm
|
|
// VFMADDSUB132PS.Z ymm ymm k ymm
|
|
// VFMADDSUB132PS.Z m512 zmm k zmm
|
|
// VFMADDSUB132PS.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADDSUB132PS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADDSUB132PS_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VFMADDSUB132PS_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VFMADDSUB213PD: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB213PD m128 xmm xmm
|
|
// VFMADDSUB213PD m256 ymm ymm
|
|
// VFMADDSUB213PD xmm xmm xmm
|
|
// VFMADDSUB213PD ymm ymm ymm
|
|
// VFMADDSUB213PD m128 xmm k xmm
|
|
// VFMADDSUB213PD m256 ymm k ymm
|
|
// VFMADDSUB213PD xmm xmm k xmm
|
|
// VFMADDSUB213PD ymm ymm k ymm
|
|
// VFMADDSUB213PD m512 zmm k zmm
|
|
// VFMADDSUB213PD m512 zmm zmm
|
|
// VFMADDSUB213PD zmm zmm k zmm
|
|
// VFMADDSUB213PD zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADDSUB213PD instruction to the active function.
|
|
func (c *Context) VFMADDSUB213PD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADDSUB213PD(ops...))
|
|
}
|
|
|
|
// VFMADDSUB213PD: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB213PD m128 xmm xmm
|
|
// VFMADDSUB213PD m256 ymm ymm
|
|
// VFMADDSUB213PD xmm xmm xmm
|
|
// VFMADDSUB213PD ymm ymm ymm
|
|
// VFMADDSUB213PD m128 xmm k xmm
|
|
// VFMADDSUB213PD m256 ymm k ymm
|
|
// VFMADDSUB213PD xmm xmm k xmm
|
|
// VFMADDSUB213PD ymm ymm k ymm
|
|
// VFMADDSUB213PD m512 zmm k zmm
|
|
// VFMADDSUB213PD m512 zmm zmm
|
|
// VFMADDSUB213PD zmm zmm k zmm
|
|
// VFMADDSUB213PD zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADDSUB213PD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADDSUB213PD(ops ...operand.Op) { ctx.VFMADDSUB213PD(ops...) }
|
|
|
|
// VFMADDSUB213PD_BCST: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB213PD.BCST m64 xmm k xmm
|
|
// VFMADDSUB213PD.BCST m64 xmm xmm
|
|
// VFMADDSUB213PD.BCST m64 ymm k ymm
|
|
// VFMADDSUB213PD.BCST m64 ymm ymm
|
|
// VFMADDSUB213PD.BCST m64 zmm k zmm
|
|
// VFMADDSUB213PD.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VFMADDSUB213PD.BCST instruction to the active function.
|
|
func (c *Context) VFMADDSUB213PD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADDSUB213PD_BCST(ops...))
|
|
}
|
|
|
|
// VFMADDSUB213PD_BCST: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB213PD.BCST m64 xmm k xmm
|
|
// VFMADDSUB213PD.BCST m64 xmm xmm
|
|
// VFMADDSUB213PD.BCST m64 ymm k ymm
|
|
// VFMADDSUB213PD.BCST m64 ymm ymm
|
|
// VFMADDSUB213PD.BCST m64 zmm k zmm
|
|
// VFMADDSUB213PD.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VFMADDSUB213PD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADDSUB213PD_BCST(ops ...operand.Op) { ctx.VFMADDSUB213PD_BCST(ops...) }
|
|
|
|
// VFMADDSUB213PD_BCST_Z: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB213PD.BCST.Z m64 xmm k xmm
|
|
// VFMADDSUB213PD.BCST.Z m64 ymm k ymm
|
|
// VFMADDSUB213PD.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VFMADDSUB213PD.BCST.Z instruction to the active function.
|
|
func (c *Context) VFMADDSUB213PD_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VFMADDSUB213PD_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VFMADDSUB213PD_BCST_Z: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB213PD.BCST.Z m64 xmm k xmm
|
|
// VFMADDSUB213PD.BCST.Z m64 ymm k ymm
|
|
// VFMADDSUB213PD.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VFMADDSUB213PD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADDSUB213PD_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VFMADDSUB213PD_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VFMADDSUB213PD_RD_SAE: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB213PD.RD_SAE zmm zmm k zmm
|
|
// VFMADDSUB213PD.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADDSUB213PD.RD_SAE instruction to the active function.
|
|
func (c *Context) VFMADDSUB213PD_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADDSUB213PD_RD_SAE(ops...))
|
|
}
|
|
|
|
// VFMADDSUB213PD_RD_SAE: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB213PD.RD_SAE zmm zmm k zmm
|
|
// VFMADDSUB213PD.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADDSUB213PD.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADDSUB213PD_RD_SAE(ops ...operand.Op) { ctx.VFMADDSUB213PD_RD_SAE(ops...) }
|
|
|
|
// VFMADDSUB213PD_RD_SAE_Z: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB213PD.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADDSUB213PD.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMADDSUB213PD_RD_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMADDSUB213PD_RD_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMADDSUB213PD_RD_SAE_Z: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB213PD.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADDSUB213PD.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADDSUB213PD_RD_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMADDSUB213PD_RD_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMADDSUB213PD_RN_SAE: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB213PD.RN_SAE zmm zmm k zmm
|
|
// VFMADDSUB213PD.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADDSUB213PD.RN_SAE instruction to the active function.
|
|
func (c *Context) VFMADDSUB213PD_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADDSUB213PD_RN_SAE(ops...))
|
|
}
|
|
|
|
// VFMADDSUB213PD_RN_SAE: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB213PD.RN_SAE zmm zmm k zmm
|
|
// VFMADDSUB213PD.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADDSUB213PD.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADDSUB213PD_RN_SAE(ops ...operand.Op) { ctx.VFMADDSUB213PD_RN_SAE(ops...) }
|
|
|
|
// VFMADDSUB213PD_RN_SAE_Z: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB213PD.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADDSUB213PD.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMADDSUB213PD_RN_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMADDSUB213PD_RN_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMADDSUB213PD_RN_SAE_Z: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB213PD.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADDSUB213PD.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADDSUB213PD_RN_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMADDSUB213PD_RN_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMADDSUB213PD_RU_SAE: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB213PD.RU_SAE zmm zmm k zmm
|
|
// VFMADDSUB213PD.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADDSUB213PD.RU_SAE instruction to the active function.
|
|
func (c *Context) VFMADDSUB213PD_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADDSUB213PD_RU_SAE(ops...))
|
|
}
|
|
|
|
// VFMADDSUB213PD_RU_SAE: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB213PD.RU_SAE zmm zmm k zmm
|
|
// VFMADDSUB213PD.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADDSUB213PD.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADDSUB213PD_RU_SAE(ops ...operand.Op) { ctx.VFMADDSUB213PD_RU_SAE(ops...) }
|
|
|
|
// VFMADDSUB213PD_RU_SAE_Z: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB213PD.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADDSUB213PD.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMADDSUB213PD_RU_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMADDSUB213PD_RU_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMADDSUB213PD_RU_SAE_Z: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB213PD.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADDSUB213PD.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADDSUB213PD_RU_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMADDSUB213PD_RU_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMADDSUB213PD_RZ_SAE: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB213PD.RZ_SAE zmm zmm k zmm
|
|
// VFMADDSUB213PD.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADDSUB213PD.RZ_SAE instruction to the active function.
|
|
func (c *Context) VFMADDSUB213PD_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADDSUB213PD_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VFMADDSUB213PD_RZ_SAE: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB213PD.RZ_SAE zmm zmm k zmm
|
|
// VFMADDSUB213PD.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADDSUB213PD.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADDSUB213PD_RZ_SAE(ops ...operand.Op) { ctx.VFMADDSUB213PD_RZ_SAE(ops...) }
|
|
|
|
// VFMADDSUB213PD_RZ_SAE_Z: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB213PD.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADDSUB213PD.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMADDSUB213PD_RZ_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMADDSUB213PD_RZ_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMADDSUB213PD_RZ_SAE_Z: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB213PD.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADDSUB213PD.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADDSUB213PD_RZ_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMADDSUB213PD_RZ_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMADDSUB213PD_Z: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB213PD.Z m128 xmm k xmm
|
|
// VFMADDSUB213PD.Z m256 ymm k ymm
|
|
// VFMADDSUB213PD.Z xmm xmm k xmm
|
|
// VFMADDSUB213PD.Z ymm ymm k ymm
|
|
// VFMADDSUB213PD.Z m512 zmm k zmm
|
|
// VFMADDSUB213PD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADDSUB213PD.Z instruction to the active function.
|
|
func (c *Context) VFMADDSUB213PD_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VFMADDSUB213PD_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VFMADDSUB213PD_Z: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB213PD.Z m128 xmm k xmm
|
|
// VFMADDSUB213PD.Z m256 ymm k ymm
|
|
// VFMADDSUB213PD.Z xmm xmm k xmm
|
|
// VFMADDSUB213PD.Z ymm ymm k ymm
|
|
// VFMADDSUB213PD.Z m512 zmm k zmm
|
|
// VFMADDSUB213PD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADDSUB213PD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADDSUB213PD_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VFMADDSUB213PD_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VFMADDSUB213PS: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB213PS m128 xmm xmm
|
|
// VFMADDSUB213PS m256 ymm ymm
|
|
// VFMADDSUB213PS xmm xmm xmm
|
|
// VFMADDSUB213PS ymm ymm ymm
|
|
// VFMADDSUB213PS m128 xmm k xmm
|
|
// VFMADDSUB213PS m256 ymm k ymm
|
|
// VFMADDSUB213PS xmm xmm k xmm
|
|
// VFMADDSUB213PS ymm ymm k ymm
|
|
// VFMADDSUB213PS m512 zmm k zmm
|
|
// VFMADDSUB213PS m512 zmm zmm
|
|
// VFMADDSUB213PS zmm zmm k zmm
|
|
// VFMADDSUB213PS zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADDSUB213PS instruction to the active function.
|
|
func (c *Context) VFMADDSUB213PS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADDSUB213PS(ops...))
|
|
}
|
|
|
|
// VFMADDSUB213PS: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB213PS m128 xmm xmm
|
|
// VFMADDSUB213PS m256 ymm ymm
|
|
// VFMADDSUB213PS xmm xmm xmm
|
|
// VFMADDSUB213PS ymm ymm ymm
|
|
// VFMADDSUB213PS m128 xmm k xmm
|
|
// VFMADDSUB213PS m256 ymm k ymm
|
|
// VFMADDSUB213PS xmm xmm k xmm
|
|
// VFMADDSUB213PS ymm ymm k ymm
|
|
// VFMADDSUB213PS m512 zmm k zmm
|
|
// VFMADDSUB213PS m512 zmm zmm
|
|
// VFMADDSUB213PS zmm zmm k zmm
|
|
// VFMADDSUB213PS zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADDSUB213PS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADDSUB213PS(ops ...operand.Op) { ctx.VFMADDSUB213PS(ops...) }
|
|
|
|
// VFMADDSUB213PS_BCST: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB213PS.BCST m32 xmm k xmm
|
|
// VFMADDSUB213PS.BCST m32 xmm xmm
|
|
// VFMADDSUB213PS.BCST m32 ymm k ymm
|
|
// VFMADDSUB213PS.BCST m32 ymm ymm
|
|
// VFMADDSUB213PS.BCST m32 zmm k zmm
|
|
// VFMADDSUB213PS.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VFMADDSUB213PS.BCST instruction to the active function.
|
|
func (c *Context) VFMADDSUB213PS_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADDSUB213PS_BCST(ops...))
|
|
}
|
|
|
|
// VFMADDSUB213PS_BCST: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB213PS.BCST m32 xmm k xmm
|
|
// VFMADDSUB213PS.BCST m32 xmm xmm
|
|
// VFMADDSUB213PS.BCST m32 ymm k ymm
|
|
// VFMADDSUB213PS.BCST m32 ymm ymm
|
|
// VFMADDSUB213PS.BCST m32 zmm k zmm
|
|
// VFMADDSUB213PS.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VFMADDSUB213PS.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADDSUB213PS_BCST(ops ...operand.Op) { ctx.VFMADDSUB213PS_BCST(ops...) }
|
|
|
|
// VFMADDSUB213PS_BCST_Z: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB213PS.BCST.Z m32 xmm k xmm
|
|
// VFMADDSUB213PS.BCST.Z m32 ymm k ymm
|
|
// VFMADDSUB213PS.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VFMADDSUB213PS.BCST.Z instruction to the active function.
|
|
func (c *Context) VFMADDSUB213PS_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VFMADDSUB213PS_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VFMADDSUB213PS_BCST_Z: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB213PS.BCST.Z m32 xmm k xmm
|
|
// VFMADDSUB213PS.BCST.Z m32 ymm k ymm
|
|
// VFMADDSUB213PS.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VFMADDSUB213PS.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADDSUB213PS_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VFMADDSUB213PS_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VFMADDSUB213PS_RD_SAE: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB213PS.RD_SAE zmm zmm k zmm
|
|
// VFMADDSUB213PS.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADDSUB213PS.RD_SAE instruction to the active function.
|
|
func (c *Context) VFMADDSUB213PS_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADDSUB213PS_RD_SAE(ops...))
|
|
}
|
|
|
|
// VFMADDSUB213PS_RD_SAE: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB213PS.RD_SAE zmm zmm k zmm
|
|
// VFMADDSUB213PS.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADDSUB213PS.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADDSUB213PS_RD_SAE(ops ...operand.Op) { ctx.VFMADDSUB213PS_RD_SAE(ops...) }
|
|
|
|
// VFMADDSUB213PS_RD_SAE_Z: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB213PS.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADDSUB213PS.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMADDSUB213PS_RD_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMADDSUB213PS_RD_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMADDSUB213PS_RD_SAE_Z: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB213PS.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADDSUB213PS.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADDSUB213PS_RD_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMADDSUB213PS_RD_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMADDSUB213PS_RN_SAE: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB213PS.RN_SAE zmm zmm k zmm
|
|
// VFMADDSUB213PS.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADDSUB213PS.RN_SAE instruction to the active function.
|
|
func (c *Context) VFMADDSUB213PS_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADDSUB213PS_RN_SAE(ops...))
|
|
}
|
|
|
|
// VFMADDSUB213PS_RN_SAE: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB213PS.RN_SAE zmm zmm k zmm
|
|
// VFMADDSUB213PS.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADDSUB213PS.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADDSUB213PS_RN_SAE(ops ...operand.Op) { ctx.VFMADDSUB213PS_RN_SAE(ops...) }
|
|
|
|
// VFMADDSUB213PS_RN_SAE_Z: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB213PS.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADDSUB213PS.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMADDSUB213PS_RN_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMADDSUB213PS_RN_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMADDSUB213PS_RN_SAE_Z: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB213PS.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADDSUB213PS.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADDSUB213PS_RN_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMADDSUB213PS_RN_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMADDSUB213PS_RU_SAE: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB213PS.RU_SAE zmm zmm k zmm
|
|
// VFMADDSUB213PS.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADDSUB213PS.RU_SAE instruction to the active function.
|
|
func (c *Context) VFMADDSUB213PS_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADDSUB213PS_RU_SAE(ops...))
|
|
}
|
|
|
|
// VFMADDSUB213PS_RU_SAE: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB213PS.RU_SAE zmm zmm k zmm
|
|
// VFMADDSUB213PS.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADDSUB213PS.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADDSUB213PS_RU_SAE(ops ...operand.Op) { ctx.VFMADDSUB213PS_RU_SAE(ops...) }
|
|
|
|
// VFMADDSUB213PS_RU_SAE_Z: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB213PS.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADDSUB213PS.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMADDSUB213PS_RU_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMADDSUB213PS_RU_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMADDSUB213PS_RU_SAE_Z: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB213PS.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADDSUB213PS.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADDSUB213PS_RU_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMADDSUB213PS_RU_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMADDSUB213PS_RZ_SAE: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB213PS.RZ_SAE zmm zmm k zmm
|
|
// VFMADDSUB213PS.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADDSUB213PS.RZ_SAE instruction to the active function.
|
|
func (c *Context) VFMADDSUB213PS_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADDSUB213PS_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VFMADDSUB213PS_RZ_SAE: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB213PS.RZ_SAE zmm zmm k zmm
|
|
// VFMADDSUB213PS.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADDSUB213PS.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADDSUB213PS_RZ_SAE(ops ...operand.Op) { ctx.VFMADDSUB213PS_RZ_SAE(ops...) }
|
|
|
|
// VFMADDSUB213PS_RZ_SAE_Z: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB213PS.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADDSUB213PS.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMADDSUB213PS_RZ_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMADDSUB213PS_RZ_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMADDSUB213PS_RZ_SAE_Z: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB213PS.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADDSUB213PS.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADDSUB213PS_RZ_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMADDSUB213PS_RZ_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMADDSUB213PS_Z: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB213PS.Z m128 xmm k xmm
|
|
// VFMADDSUB213PS.Z m256 ymm k ymm
|
|
// VFMADDSUB213PS.Z xmm xmm k xmm
|
|
// VFMADDSUB213PS.Z ymm ymm k ymm
|
|
// VFMADDSUB213PS.Z m512 zmm k zmm
|
|
// VFMADDSUB213PS.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADDSUB213PS.Z instruction to the active function.
|
|
func (c *Context) VFMADDSUB213PS_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VFMADDSUB213PS_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VFMADDSUB213PS_Z: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB213PS.Z m128 xmm k xmm
|
|
// VFMADDSUB213PS.Z m256 ymm k ymm
|
|
// VFMADDSUB213PS.Z xmm xmm k xmm
|
|
// VFMADDSUB213PS.Z ymm ymm k ymm
|
|
// VFMADDSUB213PS.Z m512 zmm k zmm
|
|
// VFMADDSUB213PS.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADDSUB213PS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADDSUB213PS_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VFMADDSUB213PS_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VFMADDSUB231PD: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB231PD m128 xmm xmm
|
|
// VFMADDSUB231PD m256 ymm ymm
|
|
// VFMADDSUB231PD xmm xmm xmm
|
|
// VFMADDSUB231PD ymm ymm ymm
|
|
// VFMADDSUB231PD m128 xmm k xmm
|
|
// VFMADDSUB231PD m256 ymm k ymm
|
|
// VFMADDSUB231PD xmm xmm k xmm
|
|
// VFMADDSUB231PD ymm ymm k ymm
|
|
// VFMADDSUB231PD m512 zmm k zmm
|
|
// VFMADDSUB231PD m512 zmm zmm
|
|
// VFMADDSUB231PD zmm zmm k zmm
|
|
// VFMADDSUB231PD zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADDSUB231PD instruction to the active function.
|
|
func (c *Context) VFMADDSUB231PD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADDSUB231PD(ops...))
|
|
}
|
|
|
|
// VFMADDSUB231PD: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB231PD m128 xmm xmm
|
|
// VFMADDSUB231PD m256 ymm ymm
|
|
// VFMADDSUB231PD xmm xmm xmm
|
|
// VFMADDSUB231PD ymm ymm ymm
|
|
// VFMADDSUB231PD m128 xmm k xmm
|
|
// VFMADDSUB231PD m256 ymm k ymm
|
|
// VFMADDSUB231PD xmm xmm k xmm
|
|
// VFMADDSUB231PD ymm ymm k ymm
|
|
// VFMADDSUB231PD m512 zmm k zmm
|
|
// VFMADDSUB231PD m512 zmm zmm
|
|
// VFMADDSUB231PD zmm zmm k zmm
|
|
// VFMADDSUB231PD zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADDSUB231PD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADDSUB231PD(ops ...operand.Op) { ctx.VFMADDSUB231PD(ops...) }
|
|
|
|
// VFMADDSUB231PD_BCST: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB231PD.BCST m64 xmm k xmm
|
|
// VFMADDSUB231PD.BCST m64 xmm xmm
|
|
// VFMADDSUB231PD.BCST m64 ymm k ymm
|
|
// VFMADDSUB231PD.BCST m64 ymm ymm
|
|
// VFMADDSUB231PD.BCST m64 zmm k zmm
|
|
// VFMADDSUB231PD.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VFMADDSUB231PD.BCST instruction to the active function.
|
|
func (c *Context) VFMADDSUB231PD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADDSUB231PD_BCST(ops...))
|
|
}
|
|
|
|
// VFMADDSUB231PD_BCST: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB231PD.BCST m64 xmm k xmm
|
|
// VFMADDSUB231PD.BCST m64 xmm xmm
|
|
// VFMADDSUB231PD.BCST m64 ymm k ymm
|
|
// VFMADDSUB231PD.BCST m64 ymm ymm
|
|
// VFMADDSUB231PD.BCST m64 zmm k zmm
|
|
// VFMADDSUB231PD.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VFMADDSUB231PD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADDSUB231PD_BCST(ops ...operand.Op) { ctx.VFMADDSUB231PD_BCST(ops...) }
|
|
|
|
// VFMADDSUB231PD_BCST_Z: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB231PD.BCST.Z m64 xmm k xmm
|
|
// VFMADDSUB231PD.BCST.Z m64 ymm k ymm
|
|
// VFMADDSUB231PD.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VFMADDSUB231PD.BCST.Z instruction to the active function.
|
|
func (c *Context) VFMADDSUB231PD_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VFMADDSUB231PD_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VFMADDSUB231PD_BCST_Z: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB231PD.BCST.Z m64 xmm k xmm
|
|
// VFMADDSUB231PD.BCST.Z m64 ymm k ymm
|
|
// VFMADDSUB231PD.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VFMADDSUB231PD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADDSUB231PD_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VFMADDSUB231PD_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VFMADDSUB231PD_RD_SAE: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB231PD.RD_SAE zmm zmm k zmm
|
|
// VFMADDSUB231PD.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADDSUB231PD.RD_SAE instruction to the active function.
|
|
func (c *Context) VFMADDSUB231PD_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADDSUB231PD_RD_SAE(ops...))
|
|
}
|
|
|
|
// VFMADDSUB231PD_RD_SAE: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB231PD.RD_SAE zmm zmm k zmm
|
|
// VFMADDSUB231PD.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADDSUB231PD.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADDSUB231PD_RD_SAE(ops ...operand.Op) { ctx.VFMADDSUB231PD_RD_SAE(ops...) }
|
|
|
|
// VFMADDSUB231PD_RD_SAE_Z: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB231PD.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADDSUB231PD.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMADDSUB231PD_RD_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMADDSUB231PD_RD_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMADDSUB231PD_RD_SAE_Z: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB231PD.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADDSUB231PD.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADDSUB231PD_RD_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMADDSUB231PD_RD_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMADDSUB231PD_RN_SAE: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB231PD.RN_SAE zmm zmm k zmm
|
|
// VFMADDSUB231PD.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADDSUB231PD.RN_SAE instruction to the active function.
|
|
func (c *Context) VFMADDSUB231PD_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADDSUB231PD_RN_SAE(ops...))
|
|
}
|
|
|
|
// VFMADDSUB231PD_RN_SAE: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB231PD.RN_SAE zmm zmm k zmm
|
|
// VFMADDSUB231PD.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADDSUB231PD.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADDSUB231PD_RN_SAE(ops ...operand.Op) { ctx.VFMADDSUB231PD_RN_SAE(ops...) }
|
|
|
|
// VFMADDSUB231PD_RN_SAE_Z: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB231PD.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADDSUB231PD.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMADDSUB231PD_RN_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMADDSUB231PD_RN_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMADDSUB231PD_RN_SAE_Z: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB231PD.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADDSUB231PD.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADDSUB231PD_RN_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMADDSUB231PD_RN_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMADDSUB231PD_RU_SAE: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB231PD.RU_SAE zmm zmm k zmm
|
|
// VFMADDSUB231PD.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADDSUB231PD.RU_SAE instruction to the active function.
|
|
func (c *Context) VFMADDSUB231PD_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADDSUB231PD_RU_SAE(ops...))
|
|
}
|
|
|
|
// VFMADDSUB231PD_RU_SAE: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB231PD.RU_SAE zmm zmm k zmm
|
|
// VFMADDSUB231PD.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADDSUB231PD.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADDSUB231PD_RU_SAE(ops ...operand.Op) { ctx.VFMADDSUB231PD_RU_SAE(ops...) }
|
|
|
|
// VFMADDSUB231PD_RU_SAE_Z: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB231PD.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADDSUB231PD.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMADDSUB231PD_RU_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMADDSUB231PD_RU_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMADDSUB231PD_RU_SAE_Z: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB231PD.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADDSUB231PD.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADDSUB231PD_RU_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMADDSUB231PD_RU_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMADDSUB231PD_RZ_SAE: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB231PD.RZ_SAE zmm zmm k zmm
|
|
// VFMADDSUB231PD.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADDSUB231PD.RZ_SAE instruction to the active function.
|
|
func (c *Context) VFMADDSUB231PD_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADDSUB231PD_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VFMADDSUB231PD_RZ_SAE: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB231PD.RZ_SAE zmm zmm k zmm
|
|
// VFMADDSUB231PD.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADDSUB231PD.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADDSUB231PD_RZ_SAE(ops ...operand.Op) { ctx.VFMADDSUB231PD_RZ_SAE(ops...) }
|
|
|
|
// VFMADDSUB231PD_RZ_SAE_Z: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB231PD.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADDSUB231PD.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMADDSUB231PD_RZ_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMADDSUB231PD_RZ_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMADDSUB231PD_RZ_SAE_Z: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB231PD.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADDSUB231PD.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADDSUB231PD_RZ_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMADDSUB231PD_RZ_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMADDSUB231PD_Z: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB231PD.Z m128 xmm k xmm
|
|
// VFMADDSUB231PD.Z m256 ymm k ymm
|
|
// VFMADDSUB231PD.Z xmm xmm k xmm
|
|
// VFMADDSUB231PD.Z ymm ymm k ymm
|
|
// VFMADDSUB231PD.Z m512 zmm k zmm
|
|
// VFMADDSUB231PD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADDSUB231PD.Z instruction to the active function.
|
|
func (c *Context) VFMADDSUB231PD_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VFMADDSUB231PD_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VFMADDSUB231PD_Z: Fused Multiply-Alternating Add/Subtract of Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB231PD.Z m128 xmm k xmm
|
|
// VFMADDSUB231PD.Z m256 ymm k ymm
|
|
// VFMADDSUB231PD.Z xmm xmm k xmm
|
|
// VFMADDSUB231PD.Z ymm ymm k ymm
|
|
// VFMADDSUB231PD.Z m512 zmm k zmm
|
|
// VFMADDSUB231PD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADDSUB231PD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADDSUB231PD_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VFMADDSUB231PD_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VFMADDSUB231PS: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB231PS m128 xmm xmm
|
|
// VFMADDSUB231PS m256 ymm ymm
|
|
// VFMADDSUB231PS xmm xmm xmm
|
|
// VFMADDSUB231PS ymm ymm ymm
|
|
// VFMADDSUB231PS m128 xmm k xmm
|
|
// VFMADDSUB231PS m256 ymm k ymm
|
|
// VFMADDSUB231PS xmm xmm k xmm
|
|
// VFMADDSUB231PS ymm ymm k ymm
|
|
// VFMADDSUB231PS m512 zmm k zmm
|
|
// VFMADDSUB231PS m512 zmm zmm
|
|
// VFMADDSUB231PS zmm zmm k zmm
|
|
// VFMADDSUB231PS zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADDSUB231PS instruction to the active function.
|
|
func (c *Context) VFMADDSUB231PS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADDSUB231PS(ops...))
|
|
}
|
|
|
|
// VFMADDSUB231PS: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB231PS m128 xmm xmm
|
|
// VFMADDSUB231PS m256 ymm ymm
|
|
// VFMADDSUB231PS xmm xmm xmm
|
|
// VFMADDSUB231PS ymm ymm ymm
|
|
// VFMADDSUB231PS m128 xmm k xmm
|
|
// VFMADDSUB231PS m256 ymm k ymm
|
|
// VFMADDSUB231PS xmm xmm k xmm
|
|
// VFMADDSUB231PS ymm ymm k ymm
|
|
// VFMADDSUB231PS m512 zmm k zmm
|
|
// VFMADDSUB231PS m512 zmm zmm
|
|
// VFMADDSUB231PS zmm zmm k zmm
|
|
// VFMADDSUB231PS zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADDSUB231PS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADDSUB231PS(ops ...operand.Op) { ctx.VFMADDSUB231PS(ops...) }
|
|
|
|
// VFMADDSUB231PS_BCST: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB231PS.BCST m32 xmm k xmm
|
|
// VFMADDSUB231PS.BCST m32 xmm xmm
|
|
// VFMADDSUB231PS.BCST m32 ymm k ymm
|
|
// VFMADDSUB231PS.BCST m32 ymm ymm
|
|
// VFMADDSUB231PS.BCST m32 zmm k zmm
|
|
// VFMADDSUB231PS.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VFMADDSUB231PS.BCST instruction to the active function.
|
|
func (c *Context) VFMADDSUB231PS_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADDSUB231PS_BCST(ops...))
|
|
}
|
|
|
|
// VFMADDSUB231PS_BCST: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB231PS.BCST m32 xmm k xmm
|
|
// VFMADDSUB231PS.BCST m32 xmm xmm
|
|
// VFMADDSUB231PS.BCST m32 ymm k ymm
|
|
// VFMADDSUB231PS.BCST m32 ymm ymm
|
|
// VFMADDSUB231PS.BCST m32 zmm k zmm
|
|
// VFMADDSUB231PS.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VFMADDSUB231PS.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADDSUB231PS_BCST(ops ...operand.Op) { ctx.VFMADDSUB231PS_BCST(ops...) }
|
|
|
|
// VFMADDSUB231PS_BCST_Z: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB231PS.BCST.Z m32 xmm k xmm
|
|
// VFMADDSUB231PS.BCST.Z m32 ymm k ymm
|
|
// VFMADDSUB231PS.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VFMADDSUB231PS.BCST.Z instruction to the active function.
|
|
func (c *Context) VFMADDSUB231PS_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VFMADDSUB231PS_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VFMADDSUB231PS_BCST_Z: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB231PS.BCST.Z m32 xmm k xmm
|
|
// VFMADDSUB231PS.BCST.Z m32 ymm k ymm
|
|
// VFMADDSUB231PS.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VFMADDSUB231PS.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADDSUB231PS_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VFMADDSUB231PS_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VFMADDSUB231PS_RD_SAE: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB231PS.RD_SAE zmm zmm k zmm
|
|
// VFMADDSUB231PS.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADDSUB231PS.RD_SAE instruction to the active function.
|
|
func (c *Context) VFMADDSUB231PS_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADDSUB231PS_RD_SAE(ops...))
|
|
}
|
|
|
|
// VFMADDSUB231PS_RD_SAE: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB231PS.RD_SAE zmm zmm k zmm
|
|
// VFMADDSUB231PS.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADDSUB231PS.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADDSUB231PS_RD_SAE(ops ...operand.Op) { ctx.VFMADDSUB231PS_RD_SAE(ops...) }
|
|
|
|
// VFMADDSUB231PS_RD_SAE_Z: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB231PS.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADDSUB231PS.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMADDSUB231PS_RD_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMADDSUB231PS_RD_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMADDSUB231PS_RD_SAE_Z: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB231PS.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADDSUB231PS.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADDSUB231PS_RD_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMADDSUB231PS_RD_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMADDSUB231PS_RN_SAE: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB231PS.RN_SAE zmm zmm k zmm
|
|
// VFMADDSUB231PS.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADDSUB231PS.RN_SAE instruction to the active function.
|
|
func (c *Context) VFMADDSUB231PS_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADDSUB231PS_RN_SAE(ops...))
|
|
}
|
|
|
|
// VFMADDSUB231PS_RN_SAE: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB231PS.RN_SAE zmm zmm k zmm
|
|
// VFMADDSUB231PS.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADDSUB231PS.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADDSUB231PS_RN_SAE(ops ...operand.Op) { ctx.VFMADDSUB231PS_RN_SAE(ops...) }
|
|
|
|
// VFMADDSUB231PS_RN_SAE_Z: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB231PS.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADDSUB231PS.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMADDSUB231PS_RN_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMADDSUB231PS_RN_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMADDSUB231PS_RN_SAE_Z: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB231PS.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADDSUB231PS.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADDSUB231PS_RN_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMADDSUB231PS_RN_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMADDSUB231PS_RU_SAE: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB231PS.RU_SAE zmm zmm k zmm
|
|
// VFMADDSUB231PS.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADDSUB231PS.RU_SAE instruction to the active function.
|
|
func (c *Context) VFMADDSUB231PS_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADDSUB231PS_RU_SAE(ops...))
|
|
}
|
|
|
|
// VFMADDSUB231PS_RU_SAE: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB231PS.RU_SAE zmm zmm k zmm
|
|
// VFMADDSUB231PS.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADDSUB231PS.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADDSUB231PS_RU_SAE(ops ...operand.Op) { ctx.VFMADDSUB231PS_RU_SAE(ops...) }
|
|
|
|
// VFMADDSUB231PS_RU_SAE_Z: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB231PS.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADDSUB231PS.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMADDSUB231PS_RU_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMADDSUB231PS_RU_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMADDSUB231PS_RU_SAE_Z: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB231PS.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADDSUB231PS.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADDSUB231PS_RU_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMADDSUB231PS_RU_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMADDSUB231PS_RZ_SAE: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB231PS.RZ_SAE zmm zmm k zmm
|
|
// VFMADDSUB231PS.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADDSUB231PS.RZ_SAE instruction to the active function.
|
|
func (c *Context) VFMADDSUB231PS_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMADDSUB231PS_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VFMADDSUB231PS_RZ_SAE: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB231PS.RZ_SAE zmm zmm k zmm
|
|
// VFMADDSUB231PS.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMADDSUB231PS.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADDSUB231PS_RZ_SAE(ops ...operand.Op) { ctx.VFMADDSUB231PS_RZ_SAE(ops...) }
|
|
|
|
// VFMADDSUB231PS_RZ_SAE_Z: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB231PS.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADDSUB231PS.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMADDSUB231PS_RZ_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMADDSUB231PS_RZ_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMADDSUB231PS_RZ_SAE_Z: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB231PS.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADDSUB231PS.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADDSUB231PS_RZ_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMADDSUB231PS_RZ_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMADDSUB231PS_Z: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB231PS.Z m128 xmm k xmm
|
|
// VFMADDSUB231PS.Z m256 ymm k ymm
|
|
// VFMADDSUB231PS.Z xmm xmm k xmm
|
|
// VFMADDSUB231PS.Z ymm ymm k ymm
|
|
// VFMADDSUB231PS.Z m512 zmm k zmm
|
|
// VFMADDSUB231PS.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADDSUB231PS.Z instruction to the active function.
|
|
func (c *Context) VFMADDSUB231PS_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VFMADDSUB231PS_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VFMADDSUB231PS_Z: Fused Multiply-Alternating Add/Subtract of Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMADDSUB231PS.Z m128 xmm k xmm
|
|
// VFMADDSUB231PS.Z m256 ymm k ymm
|
|
// VFMADDSUB231PS.Z xmm xmm k xmm
|
|
// VFMADDSUB231PS.Z ymm ymm k ymm
|
|
// VFMADDSUB231PS.Z m512 zmm k zmm
|
|
// VFMADDSUB231PS.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMADDSUB231PS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMADDSUB231PS_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VFMADDSUB231PS_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VFMSUB132PD: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132PD m128 xmm xmm
|
|
// VFMSUB132PD m256 ymm ymm
|
|
// VFMSUB132PD xmm xmm xmm
|
|
// VFMSUB132PD ymm ymm ymm
|
|
// VFMSUB132PD m128 xmm k xmm
|
|
// VFMSUB132PD m256 ymm k ymm
|
|
// VFMSUB132PD xmm xmm k xmm
|
|
// VFMSUB132PD ymm ymm k ymm
|
|
// VFMSUB132PD m512 zmm k zmm
|
|
// VFMSUB132PD m512 zmm zmm
|
|
// VFMSUB132PD zmm zmm k zmm
|
|
// VFMSUB132PD zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUB132PD instruction to the active function.
|
|
func (c *Context) VFMSUB132PD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUB132PD(ops...))
|
|
}
|
|
|
|
// VFMSUB132PD: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132PD m128 xmm xmm
|
|
// VFMSUB132PD m256 ymm ymm
|
|
// VFMSUB132PD xmm xmm xmm
|
|
// VFMSUB132PD ymm ymm ymm
|
|
// VFMSUB132PD m128 xmm k xmm
|
|
// VFMSUB132PD m256 ymm k ymm
|
|
// VFMSUB132PD xmm xmm k xmm
|
|
// VFMSUB132PD ymm ymm k ymm
|
|
// VFMSUB132PD m512 zmm k zmm
|
|
// VFMSUB132PD m512 zmm zmm
|
|
// VFMSUB132PD zmm zmm k zmm
|
|
// VFMSUB132PD zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUB132PD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB132PD(ops ...operand.Op) { ctx.VFMSUB132PD(ops...) }
|
|
|
|
// VFMSUB132PD_BCST: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132PD.BCST m64 xmm k xmm
|
|
// VFMSUB132PD.BCST m64 xmm xmm
|
|
// VFMSUB132PD.BCST m64 ymm k ymm
|
|
// VFMSUB132PD.BCST m64 ymm ymm
|
|
// VFMSUB132PD.BCST m64 zmm k zmm
|
|
// VFMSUB132PD.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VFMSUB132PD.BCST instruction to the active function.
|
|
func (c *Context) VFMSUB132PD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUB132PD_BCST(ops...))
|
|
}
|
|
|
|
// VFMSUB132PD_BCST: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132PD.BCST m64 xmm k xmm
|
|
// VFMSUB132PD.BCST m64 xmm xmm
|
|
// VFMSUB132PD.BCST m64 ymm k ymm
|
|
// VFMSUB132PD.BCST m64 ymm ymm
|
|
// VFMSUB132PD.BCST m64 zmm k zmm
|
|
// VFMSUB132PD.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VFMSUB132PD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB132PD_BCST(ops ...operand.Op) { ctx.VFMSUB132PD_BCST(ops...) }
|
|
|
|
// VFMSUB132PD_BCST_Z: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132PD.BCST.Z m64 xmm k xmm
|
|
// VFMSUB132PD.BCST.Z m64 ymm k ymm
|
|
// VFMSUB132PD.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUB132PD.BCST.Z instruction to the active function.
|
|
func (c *Context) VFMSUB132PD_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VFMSUB132PD_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VFMSUB132PD_BCST_Z: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132PD.BCST.Z m64 xmm k xmm
|
|
// VFMSUB132PD.BCST.Z m64 ymm k ymm
|
|
// VFMSUB132PD.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUB132PD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB132PD_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VFMSUB132PD_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VFMSUB132PD_RD_SAE: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132PD.RD_SAE zmm zmm k zmm
|
|
// VFMSUB132PD.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUB132PD.RD_SAE instruction to the active function.
|
|
func (c *Context) VFMSUB132PD_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUB132PD_RD_SAE(ops...))
|
|
}
|
|
|
|
// VFMSUB132PD_RD_SAE: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132PD.RD_SAE zmm zmm k zmm
|
|
// VFMSUB132PD.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUB132PD.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB132PD_RD_SAE(ops ...operand.Op) { ctx.VFMSUB132PD_RD_SAE(ops...) }
|
|
|
|
// VFMSUB132PD_RD_SAE_Z: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132PD.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUB132PD.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMSUB132PD_RD_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMSUB132PD_RD_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMSUB132PD_RD_SAE_Z: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132PD.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUB132PD.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB132PD_RD_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMSUB132PD_RD_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMSUB132PD_RN_SAE: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132PD.RN_SAE zmm zmm k zmm
|
|
// VFMSUB132PD.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUB132PD.RN_SAE instruction to the active function.
|
|
func (c *Context) VFMSUB132PD_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUB132PD_RN_SAE(ops...))
|
|
}
|
|
|
|
// VFMSUB132PD_RN_SAE: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132PD.RN_SAE zmm zmm k zmm
|
|
// VFMSUB132PD.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUB132PD.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB132PD_RN_SAE(ops ...operand.Op) { ctx.VFMSUB132PD_RN_SAE(ops...) }
|
|
|
|
// VFMSUB132PD_RN_SAE_Z: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132PD.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUB132PD.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMSUB132PD_RN_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMSUB132PD_RN_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMSUB132PD_RN_SAE_Z: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132PD.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUB132PD.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB132PD_RN_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMSUB132PD_RN_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMSUB132PD_RU_SAE: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132PD.RU_SAE zmm zmm k zmm
|
|
// VFMSUB132PD.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUB132PD.RU_SAE instruction to the active function.
|
|
func (c *Context) VFMSUB132PD_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUB132PD_RU_SAE(ops...))
|
|
}
|
|
|
|
// VFMSUB132PD_RU_SAE: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132PD.RU_SAE zmm zmm k zmm
|
|
// VFMSUB132PD.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUB132PD.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB132PD_RU_SAE(ops ...operand.Op) { ctx.VFMSUB132PD_RU_SAE(ops...) }
|
|
|
|
// VFMSUB132PD_RU_SAE_Z: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132PD.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUB132PD.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMSUB132PD_RU_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMSUB132PD_RU_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMSUB132PD_RU_SAE_Z: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132PD.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUB132PD.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB132PD_RU_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMSUB132PD_RU_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMSUB132PD_RZ_SAE: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132PD.RZ_SAE zmm zmm k zmm
|
|
// VFMSUB132PD.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUB132PD.RZ_SAE instruction to the active function.
|
|
func (c *Context) VFMSUB132PD_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUB132PD_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VFMSUB132PD_RZ_SAE: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132PD.RZ_SAE zmm zmm k zmm
|
|
// VFMSUB132PD.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUB132PD.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB132PD_RZ_SAE(ops ...operand.Op) { ctx.VFMSUB132PD_RZ_SAE(ops...) }
|
|
|
|
// VFMSUB132PD_RZ_SAE_Z: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132PD.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUB132PD.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMSUB132PD_RZ_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMSUB132PD_RZ_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMSUB132PD_RZ_SAE_Z: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132PD.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUB132PD.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB132PD_RZ_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMSUB132PD_RZ_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMSUB132PD_Z: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132PD.Z m128 xmm k xmm
|
|
// VFMSUB132PD.Z m256 ymm k ymm
|
|
// VFMSUB132PD.Z xmm xmm k xmm
|
|
// VFMSUB132PD.Z ymm ymm k ymm
|
|
// VFMSUB132PD.Z m512 zmm k zmm
|
|
// VFMSUB132PD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUB132PD.Z instruction to the active function.
|
|
func (c *Context) VFMSUB132PD_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VFMSUB132PD_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VFMSUB132PD_Z: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132PD.Z m128 xmm k xmm
|
|
// VFMSUB132PD.Z m256 ymm k ymm
|
|
// VFMSUB132PD.Z xmm xmm k xmm
|
|
// VFMSUB132PD.Z ymm ymm k ymm
|
|
// VFMSUB132PD.Z m512 zmm k zmm
|
|
// VFMSUB132PD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUB132PD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB132PD_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VFMSUB132PD_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VFMSUB132PS: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132PS m128 xmm xmm
|
|
// VFMSUB132PS m256 ymm ymm
|
|
// VFMSUB132PS xmm xmm xmm
|
|
// VFMSUB132PS ymm ymm ymm
|
|
// VFMSUB132PS m128 xmm k xmm
|
|
// VFMSUB132PS m256 ymm k ymm
|
|
// VFMSUB132PS xmm xmm k xmm
|
|
// VFMSUB132PS ymm ymm k ymm
|
|
// VFMSUB132PS m512 zmm k zmm
|
|
// VFMSUB132PS m512 zmm zmm
|
|
// VFMSUB132PS zmm zmm k zmm
|
|
// VFMSUB132PS zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUB132PS instruction to the active function.
|
|
func (c *Context) VFMSUB132PS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUB132PS(ops...))
|
|
}
|
|
|
|
// VFMSUB132PS: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132PS m128 xmm xmm
|
|
// VFMSUB132PS m256 ymm ymm
|
|
// VFMSUB132PS xmm xmm xmm
|
|
// VFMSUB132PS ymm ymm ymm
|
|
// VFMSUB132PS m128 xmm k xmm
|
|
// VFMSUB132PS m256 ymm k ymm
|
|
// VFMSUB132PS xmm xmm k xmm
|
|
// VFMSUB132PS ymm ymm k ymm
|
|
// VFMSUB132PS m512 zmm k zmm
|
|
// VFMSUB132PS m512 zmm zmm
|
|
// VFMSUB132PS zmm zmm k zmm
|
|
// VFMSUB132PS zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUB132PS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB132PS(ops ...operand.Op) { ctx.VFMSUB132PS(ops...) }
|
|
|
|
// VFMSUB132PS_BCST: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132PS.BCST m32 xmm k xmm
|
|
// VFMSUB132PS.BCST m32 xmm xmm
|
|
// VFMSUB132PS.BCST m32 ymm k ymm
|
|
// VFMSUB132PS.BCST m32 ymm ymm
|
|
// VFMSUB132PS.BCST m32 zmm k zmm
|
|
// VFMSUB132PS.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VFMSUB132PS.BCST instruction to the active function.
|
|
func (c *Context) VFMSUB132PS_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUB132PS_BCST(ops...))
|
|
}
|
|
|
|
// VFMSUB132PS_BCST: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132PS.BCST m32 xmm k xmm
|
|
// VFMSUB132PS.BCST m32 xmm xmm
|
|
// VFMSUB132PS.BCST m32 ymm k ymm
|
|
// VFMSUB132PS.BCST m32 ymm ymm
|
|
// VFMSUB132PS.BCST m32 zmm k zmm
|
|
// VFMSUB132PS.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VFMSUB132PS.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB132PS_BCST(ops ...operand.Op) { ctx.VFMSUB132PS_BCST(ops...) }
|
|
|
|
// VFMSUB132PS_BCST_Z: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132PS.BCST.Z m32 xmm k xmm
|
|
// VFMSUB132PS.BCST.Z m32 ymm k ymm
|
|
// VFMSUB132PS.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUB132PS.BCST.Z instruction to the active function.
|
|
func (c *Context) VFMSUB132PS_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VFMSUB132PS_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VFMSUB132PS_BCST_Z: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132PS.BCST.Z m32 xmm k xmm
|
|
// VFMSUB132PS.BCST.Z m32 ymm k ymm
|
|
// VFMSUB132PS.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUB132PS.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB132PS_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VFMSUB132PS_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VFMSUB132PS_RD_SAE: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132PS.RD_SAE zmm zmm k zmm
|
|
// VFMSUB132PS.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUB132PS.RD_SAE instruction to the active function.
|
|
func (c *Context) VFMSUB132PS_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUB132PS_RD_SAE(ops...))
|
|
}
|
|
|
|
// VFMSUB132PS_RD_SAE: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132PS.RD_SAE zmm zmm k zmm
|
|
// VFMSUB132PS.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUB132PS.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB132PS_RD_SAE(ops ...operand.Op) { ctx.VFMSUB132PS_RD_SAE(ops...) }
|
|
|
|
// VFMSUB132PS_RD_SAE_Z: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132PS.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUB132PS.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMSUB132PS_RD_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMSUB132PS_RD_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMSUB132PS_RD_SAE_Z: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132PS.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUB132PS.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB132PS_RD_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMSUB132PS_RD_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMSUB132PS_RN_SAE: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132PS.RN_SAE zmm zmm k zmm
|
|
// VFMSUB132PS.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUB132PS.RN_SAE instruction to the active function.
|
|
func (c *Context) VFMSUB132PS_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUB132PS_RN_SAE(ops...))
|
|
}
|
|
|
|
// VFMSUB132PS_RN_SAE: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132PS.RN_SAE zmm zmm k zmm
|
|
// VFMSUB132PS.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUB132PS.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB132PS_RN_SAE(ops ...operand.Op) { ctx.VFMSUB132PS_RN_SAE(ops...) }
|
|
|
|
// VFMSUB132PS_RN_SAE_Z: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132PS.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUB132PS.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMSUB132PS_RN_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMSUB132PS_RN_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMSUB132PS_RN_SAE_Z: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132PS.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUB132PS.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB132PS_RN_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMSUB132PS_RN_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMSUB132PS_RU_SAE: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132PS.RU_SAE zmm zmm k zmm
|
|
// VFMSUB132PS.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUB132PS.RU_SAE instruction to the active function.
|
|
func (c *Context) VFMSUB132PS_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUB132PS_RU_SAE(ops...))
|
|
}
|
|
|
|
// VFMSUB132PS_RU_SAE: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132PS.RU_SAE zmm zmm k zmm
|
|
// VFMSUB132PS.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUB132PS.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB132PS_RU_SAE(ops ...operand.Op) { ctx.VFMSUB132PS_RU_SAE(ops...) }
|
|
|
|
// VFMSUB132PS_RU_SAE_Z: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132PS.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUB132PS.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMSUB132PS_RU_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMSUB132PS_RU_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMSUB132PS_RU_SAE_Z: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132PS.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUB132PS.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB132PS_RU_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMSUB132PS_RU_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMSUB132PS_RZ_SAE: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132PS.RZ_SAE zmm zmm k zmm
|
|
// VFMSUB132PS.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUB132PS.RZ_SAE instruction to the active function.
|
|
func (c *Context) VFMSUB132PS_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUB132PS_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VFMSUB132PS_RZ_SAE: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132PS.RZ_SAE zmm zmm k zmm
|
|
// VFMSUB132PS.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUB132PS.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB132PS_RZ_SAE(ops ...operand.Op) { ctx.VFMSUB132PS_RZ_SAE(ops...) }
|
|
|
|
// VFMSUB132PS_RZ_SAE_Z: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132PS.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUB132PS.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMSUB132PS_RZ_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMSUB132PS_RZ_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMSUB132PS_RZ_SAE_Z: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132PS.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUB132PS.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB132PS_RZ_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMSUB132PS_RZ_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMSUB132PS_Z: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132PS.Z m128 xmm k xmm
|
|
// VFMSUB132PS.Z m256 ymm k ymm
|
|
// VFMSUB132PS.Z xmm xmm k xmm
|
|
// VFMSUB132PS.Z ymm ymm k ymm
|
|
// VFMSUB132PS.Z m512 zmm k zmm
|
|
// VFMSUB132PS.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUB132PS.Z instruction to the active function.
|
|
func (c *Context) VFMSUB132PS_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VFMSUB132PS_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VFMSUB132PS_Z: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132PS.Z m128 xmm k xmm
|
|
// VFMSUB132PS.Z m256 ymm k ymm
|
|
// VFMSUB132PS.Z xmm xmm k xmm
|
|
// VFMSUB132PS.Z ymm ymm k ymm
|
|
// VFMSUB132PS.Z m512 zmm k zmm
|
|
// VFMSUB132PS.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUB132PS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB132PS_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VFMSUB132PS_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VFMSUB132SD: Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132SD m64 xmm xmm
|
|
// VFMSUB132SD xmm xmm xmm
|
|
// VFMSUB132SD m64 xmm k xmm
|
|
// VFMSUB132SD xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMSUB132SD instruction to the active function.
|
|
func (c *Context) VFMSUB132SD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUB132SD(ops...))
|
|
}
|
|
|
|
// VFMSUB132SD: Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132SD m64 xmm xmm
|
|
// VFMSUB132SD xmm xmm xmm
|
|
// VFMSUB132SD m64 xmm k xmm
|
|
// VFMSUB132SD xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMSUB132SD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB132SD(ops ...operand.Op) { ctx.VFMSUB132SD(ops...) }
|
|
|
|
// VFMSUB132SD_RD_SAE: Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132SD.RD_SAE xmm xmm k xmm
|
|
// VFMSUB132SD.RD_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMSUB132SD.RD_SAE instruction to the active function.
|
|
func (c *Context) VFMSUB132SD_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUB132SD_RD_SAE(ops...))
|
|
}
|
|
|
|
// VFMSUB132SD_RD_SAE: Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132SD.RD_SAE xmm xmm k xmm
|
|
// VFMSUB132SD.RD_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMSUB132SD.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB132SD_RD_SAE(ops ...operand.Op) { ctx.VFMSUB132SD_RD_SAE(ops...) }
|
|
|
|
// VFMSUB132SD_RD_SAE_Z: Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132SD.RD_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMSUB132SD.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMSUB132SD_RD_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFMSUB132SD_RD_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFMSUB132SD_RD_SAE_Z: Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132SD.RD_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMSUB132SD.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB132SD_RD_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFMSUB132SD_RD_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFMSUB132SD_RN_SAE: Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132SD.RN_SAE xmm xmm k xmm
|
|
// VFMSUB132SD.RN_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMSUB132SD.RN_SAE instruction to the active function.
|
|
func (c *Context) VFMSUB132SD_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUB132SD_RN_SAE(ops...))
|
|
}
|
|
|
|
// VFMSUB132SD_RN_SAE: Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132SD.RN_SAE xmm xmm k xmm
|
|
// VFMSUB132SD.RN_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMSUB132SD.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB132SD_RN_SAE(ops ...operand.Op) { ctx.VFMSUB132SD_RN_SAE(ops...) }
|
|
|
|
// VFMSUB132SD_RN_SAE_Z: Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132SD.RN_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMSUB132SD.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMSUB132SD_RN_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFMSUB132SD_RN_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFMSUB132SD_RN_SAE_Z: Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132SD.RN_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMSUB132SD.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB132SD_RN_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFMSUB132SD_RN_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFMSUB132SD_RU_SAE: Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132SD.RU_SAE xmm xmm k xmm
|
|
// VFMSUB132SD.RU_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMSUB132SD.RU_SAE instruction to the active function.
|
|
func (c *Context) VFMSUB132SD_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUB132SD_RU_SAE(ops...))
|
|
}
|
|
|
|
// VFMSUB132SD_RU_SAE: Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132SD.RU_SAE xmm xmm k xmm
|
|
// VFMSUB132SD.RU_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMSUB132SD.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB132SD_RU_SAE(ops ...operand.Op) { ctx.VFMSUB132SD_RU_SAE(ops...) }
|
|
|
|
// VFMSUB132SD_RU_SAE_Z: Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132SD.RU_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMSUB132SD.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMSUB132SD_RU_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFMSUB132SD_RU_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFMSUB132SD_RU_SAE_Z: Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132SD.RU_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMSUB132SD.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB132SD_RU_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFMSUB132SD_RU_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFMSUB132SD_RZ_SAE: Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132SD.RZ_SAE xmm xmm k xmm
|
|
// VFMSUB132SD.RZ_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMSUB132SD.RZ_SAE instruction to the active function.
|
|
func (c *Context) VFMSUB132SD_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUB132SD_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VFMSUB132SD_RZ_SAE: Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132SD.RZ_SAE xmm xmm k xmm
|
|
// VFMSUB132SD.RZ_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMSUB132SD.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB132SD_RZ_SAE(ops ...operand.Op) { ctx.VFMSUB132SD_RZ_SAE(ops...) }
|
|
|
|
// VFMSUB132SD_RZ_SAE_Z: Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132SD.RZ_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMSUB132SD.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMSUB132SD_RZ_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFMSUB132SD_RZ_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFMSUB132SD_RZ_SAE_Z: Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132SD.RZ_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMSUB132SD.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB132SD_RZ_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFMSUB132SD_RZ_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFMSUB132SD_Z: Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132SD.Z m64 xmm k xmm
|
|
// VFMSUB132SD.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMSUB132SD.Z instruction to the active function.
|
|
func (c *Context) VFMSUB132SD_Z(mx, x, k, x1 operand.Op) {
|
|
c.addinstruction(x86.VFMSUB132SD_Z(mx, x, k, x1))
|
|
}
|
|
|
|
// VFMSUB132SD_Z: Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132SD.Z m64 xmm k xmm
|
|
// VFMSUB132SD.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMSUB132SD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB132SD_Z(mx, x, k, x1 operand.Op) { ctx.VFMSUB132SD_Z(mx, x, k, x1) }
|
|
|
|
// VFMSUB132SS: Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132SS m32 xmm xmm
|
|
// VFMSUB132SS xmm xmm xmm
|
|
// VFMSUB132SS m32 xmm k xmm
|
|
// VFMSUB132SS xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMSUB132SS instruction to the active function.
|
|
func (c *Context) VFMSUB132SS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUB132SS(ops...))
|
|
}
|
|
|
|
// VFMSUB132SS: Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132SS m32 xmm xmm
|
|
// VFMSUB132SS xmm xmm xmm
|
|
// VFMSUB132SS m32 xmm k xmm
|
|
// VFMSUB132SS xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMSUB132SS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB132SS(ops ...operand.Op) { ctx.VFMSUB132SS(ops...) }
|
|
|
|
// VFMSUB132SS_RD_SAE: Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132SS.RD_SAE xmm xmm k xmm
|
|
// VFMSUB132SS.RD_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMSUB132SS.RD_SAE instruction to the active function.
|
|
func (c *Context) VFMSUB132SS_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUB132SS_RD_SAE(ops...))
|
|
}
|
|
|
|
// VFMSUB132SS_RD_SAE: Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132SS.RD_SAE xmm xmm k xmm
|
|
// VFMSUB132SS.RD_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMSUB132SS.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB132SS_RD_SAE(ops ...operand.Op) { ctx.VFMSUB132SS_RD_SAE(ops...) }
|
|
|
|
// VFMSUB132SS_RD_SAE_Z: Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132SS.RD_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMSUB132SS.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMSUB132SS_RD_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFMSUB132SS_RD_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFMSUB132SS_RD_SAE_Z: Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132SS.RD_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMSUB132SS.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB132SS_RD_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFMSUB132SS_RD_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFMSUB132SS_RN_SAE: Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132SS.RN_SAE xmm xmm k xmm
|
|
// VFMSUB132SS.RN_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMSUB132SS.RN_SAE instruction to the active function.
|
|
func (c *Context) VFMSUB132SS_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUB132SS_RN_SAE(ops...))
|
|
}
|
|
|
|
// VFMSUB132SS_RN_SAE: Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132SS.RN_SAE xmm xmm k xmm
|
|
// VFMSUB132SS.RN_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMSUB132SS.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB132SS_RN_SAE(ops ...operand.Op) { ctx.VFMSUB132SS_RN_SAE(ops...) }
|
|
|
|
// VFMSUB132SS_RN_SAE_Z: Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132SS.RN_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMSUB132SS.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMSUB132SS_RN_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFMSUB132SS_RN_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFMSUB132SS_RN_SAE_Z: Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132SS.RN_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMSUB132SS.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB132SS_RN_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFMSUB132SS_RN_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFMSUB132SS_RU_SAE: Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132SS.RU_SAE xmm xmm k xmm
|
|
// VFMSUB132SS.RU_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMSUB132SS.RU_SAE instruction to the active function.
|
|
func (c *Context) VFMSUB132SS_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUB132SS_RU_SAE(ops...))
|
|
}
|
|
|
|
// VFMSUB132SS_RU_SAE: Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132SS.RU_SAE xmm xmm k xmm
|
|
// VFMSUB132SS.RU_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMSUB132SS.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB132SS_RU_SAE(ops ...operand.Op) { ctx.VFMSUB132SS_RU_SAE(ops...) }
|
|
|
|
// VFMSUB132SS_RU_SAE_Z: Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132SS.RU_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMSUB132SS.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMSUB132SS_RU_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFMSUB132SS_RU_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFMSUB132SS_RU_SAE_Z: Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132SS.RU_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMSUB132SS.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB132SS_RU_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFMSUB132SS_RU_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFMSUB132SS_RZ_SAE: Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132SS.RZ_SAE xmm xmm k xmm
|
|
// VFMSUB132SS.RZ_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMSUB132SS.RZ_SAE instruction to the active function.
|
|
func (c *Context) VFMSUB132SS_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUB132SS_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VFMSUB132SS_RZ_SAE: Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132SS.RZ_SAE xmm xmm k xmm
|
|
// VFMSUB132SS.RZ_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMSUB132SS.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB132SS_RZ_SAE(ops ...operand.Op) { ctx.VFMSUB132SS_RZ_SAE(ops...) }
|
|
|
|
// VFMSUB132SS_RZ_SAE_Z: Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132SS.RZ_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMSUB132SS.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMSUB132SS_RZ_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFMSUB132SS_RZ_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFMSUB132SS_RZ_SAE_Z: Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132SS.RZ_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMSUB132SS.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB132SS_RZ_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFMSUB132SS_RZ_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFMSUB132SS_Z: Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132SS.Z m32 xmm k xmm
|
|
// VFMSUB132SS.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMSUB132SS.Z instruction to the active function.
|
|
func (c *Context) VFMSUB132SS_Z(mx, x, k, x1 operand.Op) {
|
|
c.addinstruction(x86.VFMSUB132SS_Z(mx, x, k, x1))
|
|
}
|
|
|
|
// VFMSUB132SS_Z: Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB132SS.Z m32 xmm k xmm
|
|
// VFMSUB132SS.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMSUB132SS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB132SS_Z(mx, x, k, x1 operand.Op) { ctx.VFMSUB132SS_Z(mx, x, k, x1) }
|
|
|
|
// VFMSUB213PD: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213PD m128 xmm xmm
|
|
// VFMSUB213PD m256 ymm ymm
|
|
// VFMSUB213PD xmm xmm xmm
|
|
// VFMSUB213PD ymm ymm ymm
|
|
// VFMSUB213PD m128 xmm k xmm
|
|
// VFMSUB213PD m256 ymm k ymm
|
|
// VFMSUB213PD xmm xmm k xmm
|
|
// VFMSUB213PD ymm ymm k ymm
|
|
// VFMSUB213PD m512 zmm k zmm
|
|
// VFMSUB213PD m512 zmm zmm
|
|
// VFMSUB213PD zmm zmm k zmm
|
|
// VFMSUB213PD zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUB213PD instruction to the active function.
|
|
func (c *Context) VFMSUB213PD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUB213PD(ops...))
|
|
}
|
|
|
|
// VFMSUB213PD: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213PD m128 xmm xmm
|
|
// VFMSUB213PD m256 ymm ymm
|
|
// VFMSUB213PD xmm xmm xmm
|
|
// VFMSUB213PD ymm ymm ymm
|
|
// VFMSUB213PD m128 xmm k xmm
|
|
// VFMSUB213PD m256 ymm k ymm
|
|
// VFMSUB213PD xmm xmm k xmm
|
|
// VFMSUB213PD ymm ymm k ymm
|
|
// VFMSUB213PD m512 zmm k zmm
|
|
// VFMSUB213PD m512 zmm zmm
|
|
// VFMSUB213PD zmm zmm k zmm
|
|
// VFMSUB213PD zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUB213PD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB213PD(ops ...operand.Op) { ctx.VFMSUB213PD(ops...) }
|
|
|
|
// VFMSUB213PD_BCST: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213PD.BCST m64 xmm k xmm
|
|
// VFMSUB213PD.BCST m64 xmm xmm
|
|
// VFMSUB213PD.BCST m64 ymm k ymm
|
|
// VFMSUB213PD.BCST m64 ymm ymm
|
|
// VFMSUB213PD.BCST m64 zmm k zmm
|
|
// VFMSUB213PD.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VFMSUB213PD.BCST instruction to the active function.
|
|
func (c *Context) VFMSUB213PD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUB213PD_BCST(ops...))
|
|
}
|
|
|
|
// VFMSUB213PD_BCST: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213PD.BCST m64 xmm k xmm
|
|
// VFMSUB213PD.BCST m64 xmm xmm
|
|
// VFMSUB213PD.BCST m64 ymm k ymm
|
|
// VFMSUB213PD.BCST m64 ymm ymm
|
|
// VFMSUB213PD.BCST m64 zmm k zmm
|
|
// VFMSUB213PD.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VFMSUB213PD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB213PD_BCST(ops ...operand.Op) { ctx.VFMSUB213PD_BCST(ops...) }
|
|
|
|
// VFMSUB213PD_BCST_Z: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213PD.BCST.Z m64 xmm k xmm
|
|
// VFMSUB213PD.BCST.Z m64 ymm k ymm
|
|
// VFMSUB213PD.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUB213PD.BCST.Z instruction to the active function.
|
|
func (c *Context) VFMSUB213PD_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VFMSUB213PD_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VFMSUB213PD_BCST_Z: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213PD.BCST.Z m64 xmm k xmm
|
|
// VFMSUB213PD.BCST.Z m64 ymm k ymm
|
|
// VFMSUB213PD.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUB213PD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB213PD_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VFMSUB213PD_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VFMSUB213PD_RD_SAE: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213PD.RD_SAE zmm zmm k zmm
|
|
// VFMSUB213PD.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUB213PD.RD_SAE instruction to the active function.
|
|
func (c *Context) VFMSUB213PD_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUB213PD_RD_SAE(ops...))
|
|
}
|
|
|
|
// VFMSUB213PD_RD_SAE: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213PD.RD_SAE zmm zmm k zmm
|
|
// VFMSUB213PD.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUB213PD.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB213PD_RD_SAE(ops ...operand.Op) { ctx.VFMSUB213PD_RD_SAE(ops...) }
|
|
|
|
// VFMSUB213PD_RD_SAE_Z: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213PD.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUB213PD.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMSUB213PD_RD_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMSUB213PD_RD_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMSUB213PD_RD_SAE_Z: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213PD.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUB213PD.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB213PD_RD_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMSUB213PD_RD_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMSUB213PD_RN_SAE: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213PD.RN_SAE zmm zmm k zmm
|
|
// VFMSUB213PD.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUB213PD.RN_SAE instruction to the active function.
|
|
func (c *Context) VFMSUB213PD_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUB213PD_RN_SAE(ops...))
|
|
}
|
|
|
|
// VFMSUB213PD_RN_SAE: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213PD.RN_SAE zmm zmm k zmm
|
|
// VFMSUB213PD.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUB213PD.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB213PD_RN_SAE(ops ...operand.Op) { ctx.VFMSUB213PD_RN_SAE(ops...) }
|
|
|
|
// VFMSUB213PD_RN_SAE_Z: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213PD.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUB213PD.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMSUB213PD_RN_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMSUB213PD_RN_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMSUB213PD_RN_SAE_Z: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213PD.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUB213PD.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB213PD_RN_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMSUB213PD_RN_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMSUB213PD_RU_SAE: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213PD.RU_SAE zmm zmm k zmm
|
|
// VFMSUB213PD.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUB213PD.RU_SAE instruction to the active function.
|
|
func (c *Context) VFMSUB213PD_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUB213PD_RU_SAE(ops...))
|
|
}
|
|
|
|
// VFMSUB213PD_RU_SAE: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213PD.RU_SAE zmm zmm k zmm
|
|
// VFMSUB213PD.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUB213PD.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB213PD_RU_SAE(ops ...operand.Op) { ctx.VFMSUB213PD_RU_SAE(ops...) }
|
|
|
|
// VFMSUB213PD_RU_SAE_Z: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213PD.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUB213PD.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMSUB213PD_RU_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMSUB213PD_RU_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMSUB213PD_RU_SAE_Z: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213PD.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUB213PD.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB213PD_RU_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMSUB213PD_RU_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMSUB213PD_RZ_SAE: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213PD.RZ_SAE zmm zmm k zmm
|
|
// VFMSUB213PD.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUB213PD.RZ_SAE instruction to the active function.
|
|
func (c *Context) VFMSUB213PD_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUB213PD_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VFMSUB213PD_RZ_SAE: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213PD.RZ_SAE zmm zmm k zmm
|
|
// VFMSUB213PD.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUB213PD.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB213PD_RZ_SAE(ops ...operand.Op) { ctx.VFMSUB213PD_RZ_SAE(ops...) }
|
|
|
|
// VFMSUB213PD_RZ_SAE_Z: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213PD.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUB213PD.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMSUB213PD_RZ_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMSUB213PD_RZ_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMSUB213PD_RZ_SAE_Z: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213PD.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUB213PD.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB213PD_RZ_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMSUB213PD_RZ_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMSUB213PD_Z: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213PD.Z m128 xmm k xmm
|
|
// VFMSUB213PD.Z m256 ymm k ymm
|
|
// VFMSUB213PD.Z xmm xmm k xmm
|
|
// VFMSUB213PD.Z ymm ymm k ymm
|
|
// VFMSUB213PD.Z m512 zmm k zmm
|
|
// VFMSUB213PD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUB213PD.Z instruction to the active function.
|
|
func (c *Context) VFMSUB213PD_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VFMSUB213PD_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VFMSUB213PD_Z: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213PD.Z m128 xmm k xmm
|
|
// VFMSUB213PD.Z m256 ymm k ymm
|
|
// VFMSUB213PD.Z xmm xmm k xmm
|
|
// VFMSUB213PD.Z ymm ymm k ymm
|
|
// VFMSUB213PD.Z m512 zmm k zmm
|
|
// VFMSUB213PD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUB213PD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB213PD_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VFMSUB213PD_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VFMSUB213PS: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213PS m128 xmm xmm
|
|
// VFMSUB213PS m256 ymm ymm
|
|
// VFMSUB213PS xmm xmm xmm
|
|
// VFMSUB213PS ymm ymm ymm
|
|
// VFMSUB213PS m128 xmm k xmm
|
|
// VFMSUB213PS m256 ymm k ymm
|
|
// VFMSUB213PS xmm xmm k xmm
|
|
// VFMSUB213PS ymm ymm k ymm
|
|
// VFMSUB213PS m512 zmm k zmm
|
|
// VFMSUB213PS m512 zmm zmm
|
|
// VFMSUB213PS zmm zmm k zmm
|
|
// VFMSUB213PS zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUB213PS instruction to the active function.
|
|
func (c *Context) VFMSUB213PS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUB213PS(ops...))
|
|
}
|
|
|
|
// VFMSUB213PS: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213PS m128 xmm xmm
|
|
// VFMSUB213PS m256 ymm ymm
|
|
// VFMSUB213PS xmm xmm xmm
|
|
// VFMSUB213PS ymm ymm ymm
|
|
// VFMSUB213PS m128 xmm k xmm
|
|
// VFMSUB213PS m256 ymm k ymm
|
|
// VFMSUB213PS xmm xmm k xmm
|
|
// VFMSUB213PS ymm ymm k ymm
|
|
// VFMSUB213PS m512 zmm k zmm
|
|
// VFMSUB213PS m512 zmm zmm
|
|
// VFMSUB213PS zmm zmm k zmm
|
|
// VFMSUB213PS zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUB213PS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB213PS(ops ...operand.Op) { ctx.VFMSUB213PS(ops...) }
|
|
|
|
// VFMSUB213PS_BCST: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213PS.BCST m32 xmm k xmm
|
|
// VFMSUB213PS.BCST m32 xmm xmm
|
|
// VFMSUB213PS.BCST m32 ymm k ymm
|
|
// VFMSUB213PS.BCST m32 ymm ymm
|
|
// VFMSUB213PS.BCST m32 zmm k zmm
|
|
// VFMSUB213PS.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VFMSUB213PS.BCST instruction to the active function.
|
|
func (c *Context) VFMSUB213PS_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUB213PS_BCST(ops...))
|
|
}
|
|
|
|
// VFMSUB213PS_BCST: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213PS.BCST m32 xmm k xmm
|
|
// VFMSUB213PS.BCST m32 xmm xmm
|
|
// VFMSUB213PS.BCST m32 ymm k ymm
|
|
// VFMSUB213PS.BCST m32 ymm ymm
|
|
// VFMSUB213PS.BCST m32 zmm k zmm
|
|
// VFMSUB213PS.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VFMSUB213PS.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB213PS_BCST(ops ...operand.Op) { ctx.VFMSUB213PS_BCST(ops...) }
|
|
|
|
// VFMSUB213PS_BCST_Z: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213PS.BCST.Z m32 xmm k xmm
|
|
// VFMSUB213PS.BCST.Z m32 ymm k ymm
|
|
// VFMSUB213PS.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUB213PS.BCST.Z instruction to the active function.
|
|
func (c *Context) VFMSUB213PS_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VFMSUB213PS_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VFMSUB213PS_BCST_Z: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213PS.BCST.Z m32 xmm k xmm
|
|
// VFMSUB213PS.BCST.Z m32 ymm k ymm
|
|
// VFMSUB213PS.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUB213PS.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB213PS_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VFMSUB213PS_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VFMSUB213PS_RD_SAE: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213PS.RD_SAE zmm zmm k zmm
|
|
// VFMSUB213PS.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUB213PS.RD_SAE instruction to the active function.
|
|
func (c *Context) VFMSUB213PS_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUB213PS_RD_SAE(ops...))
|
|
}
|
|
|
|
// VFMSUB213PS_RD_SAE: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213PS.RD_SAE zmm zmm k zmm
|
|
// VFMSUB213PS.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUB213PS.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB213PS_RD_SAE(ops ...operand.Op) { ctx.VFMSUB213PS_RD_SAE(ops...) }
|
|
|
|
// VFMSUB213PS_RD_SAE_Z: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213PS.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUB213PS.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMSUB213PS_RD_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMSUB213PS_RD_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMSUB213PS_RD_SAE_Z: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213PS.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUB213PS.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB213PS_RD_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMSUB213PS_RD_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMSUB213PS_RN_SAE: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213PS.RN_SAE zmm zmm k zmm
|
|
// VFMSUB213PS.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUB213PS.RN_SAE instruction to the active function.
|
|
func (c *Context) VFMSUB213PS_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUB213PS_RN_SAE(ops...))
|
|
}
|
|
|
|
// VFMSUB213PS_RN_SAE: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213PS.RN_SAE zmm zmm k zmm
|
|
// VFMSUB213PS.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUB213PS.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB213PS_RN_SAE(ops ...operand.Op) { ctx.VFMSUB213PS_RN_SAE(ops...) }
|
|
|
|
// VFMSUB213PS_RN_SAE_Z: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213PS.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUB213PS.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMSUB213PS_RN_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMSUB213PS_RN_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMSUB213PS_RN_SAE_Z: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213PS.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUB213PS.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB213PS_RN_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMSUB213PS_RN_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMSUB213PS_RU_SAE: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213PS.RU_SAE zmm zmm k zmm
|
|
// VFMSUB213PS.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUB213PS.RU_SAE instruction to the active function.
|
|
func (c *Context) VFMSUB213PS_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUB213PS_RU_SAE(ops...))
|
|
}
|
|
|
|
// VFMSUB213PS_RU_SAE: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213PS.RU_SAE zmm zmm k zmm
|
|
// VFMSUB213PS.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUB213PS.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB213PS_RU_SAE(ops ...operand.Op) { ctx.VFMSUB213PS_RU_SAE(ops...) }
|
|
|
|
// VFMSUB213PS_RU_SAE_Z: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213PS.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUB213PS.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMSUB213PS_RU_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMSUB213PS_RU_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMSUB213PS_RU_SAE_Z: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213PS.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUB213PS.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB213PS_RU_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMSUB213PS_RU_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMSUB213PS_RZ_SAE: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213PS.RZ_SAE zmm zmm k zmm
|
|
// VFMSUB213PS.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUB213PS.RZ_SAE instruction to the active function.
|
|
func (c *Context) VFMSUB213PS_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUB213PS_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VFMSUB213PS_RZ_SAE: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213PS.RZ_SAE zmm zmm k zmm
|
|
// VFMSUB213PS.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUB213PS.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB213PS_RZ_SAE(ops ...operand.Op) { ctx.VFMSUB213PS_RZ_SAE(ops...) }
|
|
|
|
// VFMSUB213PS_RZ_SAE_Z: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213PS.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUB213PS.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMSUB213PS_RZ_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMSUB213PS_RZ_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMSUB213PS_RZ_SAE_Z: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213PS.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUB213PS.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB213PS_RZ_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMSUB213PS_RZ_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMSUB213PS_Z: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213PS.Z m128 xmm k xmm
|
|
// VFMSUB213PS.Z m256 ymm k ymm
|
|
// VFMSUB213PS.Z xmm xmm k xmm
|
|
// VFMSUB213PS.Z ymm ymm k ymm
|
|
// VFMSUB213PS.Z m512 zmm k zmm
|
|
// VFMSUB213PS.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUB213PS.Z instruction to the active function.
|
|
func (c *Context) VFMSUB213PS_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VFMSUB213PS_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VFMSUB213PS_Z: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213PS.Z m128 xmm k xmm
|
|
// VFMSUB213PS.Z m256 ymm k ymm
|
|
// VFMSUB213PS.Z xmm xmm k xmm
|
|
// VFMSUB213PS.Z ymm ymm k ymm
|
|
// VFMSUB213PS.Z m512 zmm k zmm
|
|
// VFMSUB213PS.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUB213PS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB213PS_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VFMSUB213PS_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VFMSUB213SD: Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213SD m64 xmm xmm
|
|
// VFMSUB213SD xmm xmm xmm
|
|
// VFMSUB213SD m64 xmm k xmm
|
|
// VFMSUB213SD xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMSUB213SD instruction to the active function.
|
|
func (c *Context) VFMSUB213SD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUB213SD(ops...))
|
|
}
|
|
|
|
// VFMSUB213SD: Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213SD m64 xmm xmm
|
|
// VFMSUB213SD xmm xmm xmm
|
|
// VFMSUB213SD m64 xmm k xmm
|
|
// VFMSUB213SD xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMSUB213SD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB213SD(ops ...operand.Op) { ctx.VFMSUB213SD(ops...) }
|
|
|
|
// VFMSUB213SD_RD_SAE: Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213SD.RD_SAE xmm xmm k xmm
|
|
// VFMSUB213SD.RD_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMSUB213SD.RD_SAE instruction to the active function.
|
|
func (c *Context) VFMSUB213SD_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUB213SD_RD_SAE(ops...))
|
|
}
|
|
|
|
// VFMSUB213SD_RD_SAE: Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213SD.RD_SAE xmm xmm k xmm
|
|
// VFMSUB213SD.RD_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMSUB213SD.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB213SD_RD_SAE(ops ...operand.Op) { ctx.VFMSUB213SD_RD_SAE(ops...) }
|
|
|
|
// VFMSUB213SD_RD_SAE_Z: Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213SD.RD_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMSUB213SD.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMSUB213SD_RD_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFMSUB213SD_RD_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFMSUB213SD_RD_SAE_Z: Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213SD.RD_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMSUB213SD.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB213SD_RD_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFMSUB213SD_RD_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFMSUB213SD_RN_SAE: Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213SD.RN_SAE xmm xmm k xmm
|
|
// VFMSUB213SD.RN_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMSUB213SD.RN_SAE instruction to the active function.
|
|
func (c *Context) VFMSUB213SD_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUB213SD_RN_SAE(ops...))
|
|
}
|
|
|
|
// VFMSUB213SD_RN_SAE: Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213SD.RN_SAE xmm xmm k xmm
|
|
// VFMSUB213SD.RN_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMSUB213SD.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB213SD_RN_SAE(ops ...operand.Op) { ctx.VFMSUB213SD_RN_SAE(ops...) }
|
|
|
|
// VFMSUB213SD_RN_SAE_Z: Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213SD.RN_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMSUB213SD.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMSUB213SD_RN_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFMSUB213SD_RN_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFMSUB213SD_RN_SAE_Z: Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213SD.RN_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMSUB213SD.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB213SD_RN_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFMSUB213SD_RN_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFMSUB213SD_RU_SAE: Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213SD.RU_SAE xmm xmm k xmm
|
|
// VFMSUB213SD.RU_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMSUB213SD.RU_SAE instruction to the active function.
|
|
func (c *Context) VFMSUB213SD_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUB213SD_RU_SAE(ops...))
|
|
}
|
|
|
|
// VFMSUB213SD_RU_SAE: Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213SD.RU_SAE xmm xmm k xmm
|
|
// VFMSUB213SD.RU_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMSUB213SD.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB213SD_RU_SAE(ops ...operand.Op) { ctx.VFMSUB213SD_RU_SAE(ops...) }
|
|
|
|
// VFMSUB213SD_RU_SAE_Z: Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213SD.RU_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMSUB213SD.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMSUB213SD_RU_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFMSUB213SD_RU_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFMSUB213SD_RU_SAE_Z: Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213SD.RU_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMSUB213SD.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB213SD_RU_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFMSUB213SD_RU_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFMSUB213SD_RZ_SAE: Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213SD.RZ_SAE xmm xmm k xmm
|
|
// VFMSUB213SD.RZ_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMSUB213SD.RZ_SAE instruction to the active function.
|
|
func (c *Context) VFMSUB213SD_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUB213SD_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VFMSUB213SD_RZ_SAE: Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213SD.RZ_SAE xmm xmm k xmm
|
|
// VFMSUB213SD.RZ_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMSUB213SD.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB213SD_RZ_SAE(ops ...operand.Op) { ctx.VFMSUB213SD_RZ_SAE(ops...) }
|
|
|
|
// VFMSUB213SD_RZ_SAE_Z: Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213SD.RZ_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMSUB213SD.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMSUB213SD_RZ_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFMSUB213SD_RZ_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFMSUB213SD_RZ_SAE_Z: Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213SD.RZ_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMSUB213SD.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB213SD_RZ_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFMSUB213SD_RZ_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFMSUB213SD_Z: Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213SD.Z m64 xmm k xmm
|
|
// VFMSUB213SD.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMSUB213SD.Z instruction to the active function.
|
|
func (c *Context) VFMSUB213SD_Z(mx, x, k, x1 operand.Op) {
|
|
c.addinstruction(x86.VFMSUB213SD_Z(mx, x, k, x1))
|
|
}
|
|
|
|
// VFMSUB213SD_Z: Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213SD.Z m64 xmm k xmm
|
|
// VFMSUB213SD.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMSUB213SD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB213SD_Z(mx, x, k, x1 operand.Op) { ctx.VFMSUB213SD_Z(mx, x, k, x1) }
|
|
|
|
// VFMSUB213SS: Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213SS m32 xmm xmm
|
|
// VFMSUB213SS xmm xmm xmm
|
|
// VFMSUB213SS m32 xmm k xmm
|
|
// VFMSUB213SS xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMSUB213SS instruction to the active function.
|
|
func (c *Context) VFMSUB213SS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUB213SS(ops...))
|
|
}
|
|
|
|
// VFMSUB213SS: Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213SS m32 xmm xmm
|
|
// VFMSUB213SS xmm xmm xmm
|
|
// VFMSUB213SS m32 xmm k xmm
|
|
// VFMSUB213SS xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMSUB213SS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB213SS(ops ...operand.Op) { ctx.VFMSUB213SS(ops...) }
|
|
|
|
// VFMSUB213SS_RD_SAE: Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213SS.RD_SAE xmm xmm k xmm
|
|
// VFMSUB213SS.RD_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMSUB213SS.RD_SAE instruction to the active function.
|
|
func (c *Context) VFMSUB213SS_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUB213SS_RD_SAE(ops...))
|
|
}
|
|
|
|
// VFMSUB213SS_RD_SAE: Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213SS.RD_SAE xmm xmm k xmm
|
|
// VFMSUB213SS.RD_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMSUB213SS.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB213SS_RD_SAE(ops ...operand.Op) { ctx.VFMSUB213SS_RD_SAE(ops...) }
|
|
|
|
// VFMSUB213SS_RD_SAE_Z: Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213SS.RD_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMSUB213SS.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMSUB213SS_RD_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFMSUB213SS_RD_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFMSUB213SS_RD_SAE_Z: Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213SS.RD_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMSUB213SS.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB213SS_RD_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFMSUB213SS_RD_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFMSUB213SS_RN_SAE: Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213SS.RN_SAE xmm xmm k xmm
|
|
// VFMSUB213SS.RN_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMSUB213SS.RN_SAE instruction to the active function.
|
|
func (c *Context) VFMSUB213SS_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUB213SS_RN_SAE(ops...))
|
|
}
|
|
|
|
// VFMSUB213SS_RN_SAE: Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213SS.RN_SAE xmm xmm k xmm
|
|
// VFMSUB213SS.RN_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMSUB213SS.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB213SS_RN_SAE(ops ...operand.Op) { ctx.VFMSUB213SS_RN_SAE(ops...) }
|
|
|
|
// VFMSUB213SS_RN_SAE_Z: Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213SS.RN_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMSUB213SS.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMSUB213SS_RN_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFMSUB213SS_RN_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFMSUB213SS_RN_SAE_Z: Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213SS.RN_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMSUB213SS.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB213SS_RN_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFMSUB213SS_RN_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFMSUB213SS_RU_SAE: Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213SS.RU_SAE xmm xmm k xmm
|
|
// VFMSUB213SS.RU_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMSUB213SS.RU_SAE instruction to the active function.
|
|
func (c *Context) VFMSUB213SS_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUB213SS_RU_SAE(ops...))
|
|
}
|
|
|
|
// VFMSUB213SS_RU_SAE: Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213SS.RU_SAE xmm xmm k xmm
|
|
// VFMSUB213SS.RU_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMSUB213SS.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB213SS_RU_SAE(ops ...operand.Op) { ctx.VFMSUB213SS_RU_SAE(ops...) }
|
|
|
|
// VFMSUB213SS_RU_SAE_Z: Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213SS.RU_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMSUB213SS.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMSUB213SS_RU_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFMSUB213SS_RU_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFMSUB213SS_RU_SAE_Z: Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213SS.RU_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMSUB213SS.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB213SS_RU_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFMSUB213SS_RU_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFMSUB213SS_RZ_SAE: Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213SS.RZ_SAE xmm xmm k xmm
|
|
// VFMSUB213SS.RZ_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMSUB213SS.RZ_SAE instruction to the active function.
|
|
func (c *Context) VFMSUB213SS_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUB213SS_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VFMSUB213SS_RZ_SAE: Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213SS.RZ_SAE xmm xmm k xmm
|
|
// VFMSUB213SS.RZ_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMSUB213SS.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB213SS_RZ_SAE(ops ...operand.Op) { ctx.VFMSUB213SS_RZ_SAE(ops...) }
|
|
|
|
// VFMSUB213SS_RZ_SAE_Z: Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213SS.RZ_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMSUB213SS.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMSUB213SS_RZ_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFMSUB213SS_RZ_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFMSUB213SS_RZ_SAE_Z: Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213SS.RZ_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMSUB213SS.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB213SS_RZ_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFMSUB213SS_RZ_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFMSUB213SS_Z: Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213SS.Z m32 xmm k xmm
|
|
// VFMSUB213SS.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMSUB213SS.Z instruction to the active function.
|
|
func (c *Context) VFMSUB213SS_Z(mx, x, k, x1 operand.Op) {
|
|
c.addinstruction(x86.VFMSUB213SS_Z(mx, x, k, x1))
|
|
}
|
|
|
|
// VFMSUB213SS_Z: Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB213SS.Z m32 xmm k xmm
|
|
// VFMSUB213SS.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMSUB213SS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB213SS_Z(mx, x, k, x1 operand.Op) { ctx.VFMSUB213SS_Z(mx, x, k, x1) }
|
|
|
|
// VFMSUB231PD: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231PD m128 xmm xmm
|
|
// VFMSUB231PD m256 ymm ymm
|
|
// VFMSUB231PD xmm xmm xmm
|
|
// VFMSUB231PD ymm ymm ymm
|
|
// VFMSUB231PD m128 xmm k xmm
|
|
// VFMSUB231PD m256 ymm k ymm
|
|
// VFMSUB231PD xmm xmm k xmm
|
|
// VFMSUB231PD ymm ymm k ymm
|
|
// VFMSUB231PD m512 zmm k zmm
|
|
// VFMSUB231PD m512 zmm zmm
|
|
// VFMSUB231PD zmm zmm k zmm
|
|
// VFMSUB231PD zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUB231PD instruction to the active function.
|
|
func (c *Context) VFMSUB231PD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUB231PD(ops...))
|
|
}
|
|
|
|
// VFMSUB231PD: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231PD m128 xmm xmm
|
|
// VFMSUB231PD m256 ymm ymm
|
|
// VFMSUB231PD xmm xmm xmm
|
|
// VFMSUB231PD ymm ymm ymm
|
|
// VFMSUB231PD m128 xmm k xmm
|
|
// VFMSUB231PD m256 ymm k ymm
|
|
// VFMSUB231PD xmm xmm k xmm
|
|
// VFMSUB231PD ymm ymm k ymm
|
|
// VFMSUB231PD m512 zmm k zmm
|
|
// VFMSUB231PD m512 zmm zmm
|
|
// VFMSUB231PD zmm zmm k zmm
|
|
// VFMSUB231PD zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUB231PD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB231PD(ops ...operand.Op) { ctx.VFMSUB231PD(ops...) }
|
|
|
|
// VFMSUB231PD_BCST: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231PD.BCST m64 xmm k xmm
|
|
// VFMSUB231PD.BCST m64 xmm xmm
|
|
// VFMSUB231PD.BCST m64 ymm k ymm
|
|
// VFMSUB231PD.BCST m64 ymm ymm
|
|
// VFMSUB231PD.BCST m64 zmm k zmm
|
|
// VFMSUB231PD.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VFMSUB231PD.BCST instruction to the active function.
|
|
func (c *Context) VFMSUB231PD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUB231PD_BCST(ops...))
|
|
}
|
|
|
|
// VFMSUB231PD_BCST: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231PD.BCST m64 xmm k xmm
|
|
// VFMSUB231PD.BCST m64 xmm xmm
|
|
// VFMSUB231PD.BCST m64 ymm k ymm
|
|
// VFMSUB231PD.BCST m64 ymm ymm
|
|
// VFMSUB231PD.BCST m64 zmm k zmm
|
|
// VFMSUB231PD.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VFMSUB231PD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB231PD_BCST(ops ...operand.Op) { ctx.VFMSUB231PD_BCST(ops...) }
|
|
|
|
// VFMSUB231PD_BCST_Z: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231PD.BCST.Z m64 xmm k xmm
|
|
// VFMSUB231PD.BCST.Z m64 ymm k ymm
|
|
// VFMSUB231PD.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUB231PD.BCST.Z instruction to the active function.
|
|
func (c *Context) VFMSUB231PD_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VFMSUB231PD_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VFMSUB231PD_BCST_Z: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231PD.BCST.Z m64 xmm k xmm
|
|
// VFMSUB231PD.BCST.Z m64 ymm k ymm
|
|
// VFMSUB231PD.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUB231PD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB231PD_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VFMSUB231PD_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VFMSUB231PD_RD_SAE: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231PD.RD_SAE zmm zmm k zmm
|
|
// VFMSUB231PD.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUB231PD.RD_SAE instruction to the active function.
|
|
func (c *Context) VFMSUB231PD_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUB231PD_RD_SAE(ops...))
|
|
}
|
|
|
|
// VFMSUB231PD_RD_SAE: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231PD.RD_SAE zmm zmm k zmm
|
|
// VFMSUB231PD.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUB231PD.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB231PD_RD_SAE(ops ...operand.Op) { ctx.VFMSUB231PD_RD_SAE(ops...) }
|
|
|
|
// VFMSUB231PD_RD_SAE_Z: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231PD.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUB231PD.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMSUB231PD_RD_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMSUB231PD_RD_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMSUB231PD_RD_SAE_Z: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231PD.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUB231PD.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB231PD_RD_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMSUB231PD_RD_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMSUB231PD_RN_SAE: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231PD.RN_SAE zmm zmm k zmm
|
|
// VFMSUB231PD.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUB231PD.RN_SAE instruction to the active function.
|
|
func (c *Context) VFMSUB231PD_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUB231PD_RN_SAE(ops...))
|
|
}
|
|
|
|
// VFMSUB231PD_RN_SAE: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231PD.RN_SAE zmm zmm k zmm
|
|
// VFMSUB231PD.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUB231PD.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB231PD_RN_SAE(ops ...operand.Op) { ctx.VFMSUB231PD_RN_SAE(ops...) }
|
|
|
|
// VFMSUB231PD_RN_SAE_Z: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231PD.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUB231PD.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMSUB231PD_RN_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMSUB231PD_RN_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMSUB231PD_RN_SAE_Z: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231PD.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUB231PD.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB231PD_RN_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMSUB231PD_RN_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMSUB231PD_RU_SAE: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231PD.RU_SAE zmm zmm k zmm
|
|
// VFMSUB231PD.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUB231PD.RU_SAE instruction to the active function.
|
|
func (c *Context) VFMSUB231PD_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUB231PD_RU_SAE(ops...))
|
|
}
|
|
|
|
// VFMSUB231PD_RU_SAE: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231PD.RU_SAE zmm zmm k zmm
|
|
// VFMSUB231PD.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUB231PD.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB231PD_RU_SAE(ops ...operand.Op) { ctx.VFMSUB231PD_RU_SAE(ops...) }
|
|
|
|
// VFMSUB231PD_RU_SAE_Z: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231PD.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUB231PD.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMSUB231PD_RU_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMSUB231PD_RU_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMSUB231PD_RU_SAE_Z: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231PD.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUB231PD.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB231PD_RU_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMSUB231PD_RU_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMSUB231PD_RZ_SAE: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231PD.RZ_SAE zmm zmm k zmm
|
|
// VFMSUB231PD.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUB231PD.RZ_SAE instruction to the active function.
|
|
func (c *Context) VFMSUB231PD_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUB231PD_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VFMSUB231PD_RZ_SAE: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231PD.RZ_SAE zmm zmm k zmm
|
|
// VFMSUB231PD.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUB231PD.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB231PD_RZ_SAE(ops ...operand.Op) { ctx.VFMSUB231PD_RZ_SAE(ops...) }
|
|
|
|
// VFMSUB231PD_RZ_SAE_Z: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231PD.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUB231PD.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMSUB231PD_RZ_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMSUB231PD_RZ_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMSUB231PD_RZ_SAE_Z: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231PD.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUB231PD.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB231PD_RZ_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMSUB231PD_RZ_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMSUB231PD_Z: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231PD.Z m128 xmm k xmm
|
|
// VFMSUB231PD.Z m256 ymm k ymm
|
|
// VFMSUB231PD.Z xmm xmm k xmm
|
|
// VFMSUB231PD.Z ymm ymm k ymm
|
|
// VFMSUB231PD.Z m512 zmm k zmm
|
|
// VFMSUB231PD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUB231PD.Z instruction to the active function.
|
|
func (c *Context) VFMSUB231PD_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VFMSUB231PD_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VFMSUB231PD_Z: Fused Multiply-Subtract of Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231PD.Z m128 xmm k xmm
|
|
// VFMSUB231PD.Z m256 ymm k ymm
|
|
// VFMSUB231PD.Z xmm xmm k xmm
|
|
// VFMSUB231PD.Z ymm ymm k ymm
|
|
// VFMSUB231PD.Z m512 zmm k zmm
|
|
// VFMSUB231PD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUB231PD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB231PD_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VFMSUB231PD_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VFMSUB231PS: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231PS m128 xmm xmm
|
|
// VFMSUB231PS m256 ymm ymm
|
|
// VFMSUB231PS xmm xmm xmm
|
|
// VFMSUB231PS ymm ymm ymm
|
|
// VFMSUB231PS m128 xmm k xmm
|
|
// VFMSUB231PS m256 ymm k ymm
|
|
// VFMSUB231PS xmm xmm k xmm
|
|
// VFMSUB231PS ymm ymm k ymm
|
|
// VFMSUB231PS m512 zmm k zmm
|
|
// VFMSUB231PS m512 zmm zmm
|
|
// VFMSUB231PS zmm zmm k zmm
|
|
// VFMSUB231PS zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUB231PS instruction to the active function.
|
|
func (c *Context) VFMSUB231PS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUB231PS(ops...))
|
|
}
|
|
|
|
// VFMSUB231PS: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231PS m128 xmm xmm
|
|
// VFMSUB231PS m256 ymm ymm
|
|
// VFMSUB231PS xmm xmm xmm
|
|
// VFMSUB231PS ymm ymm ymm
|
|
// VFMSUB231PS m128 xmm k xmm
|
|
// VFMSUB231PS m256 ymm k ymm
|
|
// VFMSUB231PS xmm xmm k xmm
|
|
// VFMSUB231PS ymm ymm k ymm
|
|
// VFMSUB231PS m512 zmm k zmm
|
|
// VFMSUB231PS m512 zmm zmm
|
|
// VFMSUB231PS zmm zmm k zmm
|
|
// VFMSUB231PS zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUB231PS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB231PS(ops ...operand.Op) { ctx.VFMSUB231PS(ops...) }
|
|
|
|
// VFMSUB231PS_BCST: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231PS.BCST m32 xmm k xmm
|
|
// VFMSUB231PS.BCST m32 xmm xmm
|
|
// VFMSUB231PS.BCST m32 ymm k ymm
|
|
// VFMSUB231PS.BCST m32 ymm ymm
|
|
// VFMSUB231PS.BCST m32 zmm k zmm
|
|
// VFMSUB231PS.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VFMSUB231PS.BCST instruction to the active function.
|
|
func (c *Context) VFMSUB231PS_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUB231PS_BCST(ops...))
|
|
}
|
|
|
|
// VFMSUB231PS_BCST: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231PS.BCST m32 xmm k xmm
|
|
// VFMSUB231PS.BCST m32 xmm xmm
|
|
// VFMSUB231PS.BCST m32 ymm k ymm
|
|
// VFMSUB231PS.BCST m32 ymm ymm
|
|
// VFMSUB231PS.BCST m32 zmm k zmm
|
|
// VFMSUB231PS.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VFMSUB231PS.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB231PS_BCST(ops ...operand.Op) { ctx.VFMSUB231PS_BCST(ops...) }
|
|
|
|
// VFMSUB231PS_BCST_Z: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231PS.BCST.Z m32 xmm k xmm
|
|
// VFMSUB231PS.BCST.Z m32 ymm k ymm
|
|
// VFMSUB231PS.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUB231PS.BCST.Z instruction to the active function.
|
|
func (c *Context) VFMSUB231PS_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VFMSUB231PS_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VFMSUB231PS_BCST_Z: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231PS.BCST.Z m32 xmm k xmm
|
|
// VFMSUB231PS.BCST.Z m32 ymm k ymm
|
|
// VFMSUB231PS.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUB231PS.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB231PS_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VFMSUB231PS_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VFMSUB231PS_RD_SAE: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231PS.RD_SAE zmm zmm k zmm
|
|
// VFMSUB231PS.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUB231PS.RD_SAE instruction to the active function.
|
|
func (c *Context) VFMSUB231PS_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUB231PS_RD_SAE(ops...))
|
|
}
|
|
|
|
// VFMSUB231PS_RD_SAE: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231PS.RD_SAE zmm zmm k zmm
|
|
// VFMSUB231PS.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUB231PS.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB231PS_RD_SAE(ops ...operand.Op) { ctx.VFMSUB231PS_RD_SAE(ops...) }
|
|
|
|
// VFMSUB231PS_RD_SAE_Z: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231PS.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUB231PS.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMSUB231PS_RD_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMSUB231PS_RD_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMSUB231PS_RD_SAE_Z: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231PS.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUB231PS.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB231PS_RD_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMSUB231PS_RD_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMSUB231PS_RN_SAE: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231PS.RN_SAE zmm zmm k zmm
|
|
// VFMSUB231PS.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUB231PS.RN_SAE instruction to the active function.
|
|
func (c *Context) VFMSUB231PS_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUB231PS_RN_SAE(ops...))
|
|
}
|
|
|
|
// VFMSUB231PS_RN_SAE: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231PS.RN_SAE zmm zmm k zmm
|
|
// VFMSUB231PS.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUB231PS.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB231PS_RN_SAE(ops ...operand.Op) { ctx.VFMSUB231PS_RN_SAE(ops...) }
|
|
|
|
// VFMSUB231PS_RN_SAE_Z: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231PS.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUB231PS.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMSUB231PS_RN_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMSUB231PS_RN_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMSUB231PS_RN_SAE_Z: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231PS.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUB231PS.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB231PS_RN_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMSUB231PS_RN_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMSUB231PS_RU_SAE: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231PS.RU_SAE zmm zmm k zmm
|
|
// VFMSUB231PS.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUB231PS.RU_SAE instruction to the active function.
|
|
func (c *Context) VFMSUB231PS_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUB231PS_RU_SAE(ops...))
|
|
}
|
|
|
|
// VFMSUB231PS_RU_SAE: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231PS.RU_SAE zmm zmm k zmm
|
|
// VFMSUB231PS.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUB231PS.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB231PS_RU_SAE(ops ...operand.Op) { ctx.VFMSUB231PS_RU_SAE(ops...) }
|
|
|
|
// VFMSUB231PS_RU_SAE_Z: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231PS.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUB231PS.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMSUB231PS_RU_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMSUB231PS_RU_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMSUB231PS_RU_SAE_Z: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231PS.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUB231PS.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB231PS_RU_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMSUB231PS_RU_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMSUB231PS_RZ_SAE: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231PS.RZ_SAE zmm zmm k zmm
|
|
// VFMSUB231PS.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUB231PS.RZ_SAE instruction to the active function.
|
|
func (c *Context) VFMSUB231PS_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUB231PS_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VFMSUB231PS_RZ_SAE: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231PS.RZ_SAE zmm zmm k zmm
|
|
// VFMSUB231PS.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUB231PS.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB231PS_RZ_SAE(ops ...operand.Op) { ctx.VFMSUB231PS_RZ_SAE(ops...) }
|
|
|
|
// VFMSUB231PS_RZ_SAE_Z: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231PS.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUB231PS.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMSUB231PS_RZ_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMSUB231PS_RZ_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMSUB231PS_RZ_SAE_Z: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231PS.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUB231PS.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB231PS_RZ_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMSUB231PS_RZ_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMSUB231PS_Z: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231PS.Z m128 xmm k xmm
|
|
// VFMSUB231PS.Z m256 ymm k ymm
|
|
// VFMSUB231PS.Z xmm xmm k xmm
|
|
// VFMSUB231PS.Z ymm ymm k ymm
|
|
// VFMSUB231PS.Z m512 zmm k zmm
|
|
// VFMSUB231PS.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUB231PS.Z instruction to the active function.
|
|
func (c *Context) VFMSUB231PS_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VFMSUB231PS_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VFMSUB231PS_Z: Fused Multiply-Subtract of Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231PS.Z m128 xmm k xmm
|
|
// VFMSUB231PS.Z m256 ymm k ymm
|
|
// VFMSUB231PS.Z xmm xmm k xmm
|
|
// VFMSUB231PS.Z ymm ymm k ymm
|
|
// VFMSUB231PS.Z m512 zmm k zmm
|
|
// VFMSUB231PS.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUB231PS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB231PS_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VFMSUB231PS_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VFMSUB231SD: Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231SD m64 xmm xmm
|
|
// VFMSUB231SD xmm xmm xmm
|
|
// VFMSUB231SD m64 xmm k xmm
|
|
// VFMSUB231SD xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMSUB231SD instruction to the active function.
|
|
func (c *Context) VFMSUB231SD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUB231SD(ops...))
|
|
}
|
|
|
|
// VFMSUB231SD: Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231SD m64 xmm xmm
|
|
// VFMSUB231SD xmm xmm xmm
|
|
// VFMSUB231SD m64 xmm k xmm
|
|
// VFMSUB231SD xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMSUB231SD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB231SD(ops ...operand.Op) { ctx.VFMSUB231SD(ops...) }
|
|
|
|
// VFMSUB231SD_RD_SAE: Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231SD.RD_SAE xmm xmm k xmm
|
|
// VFMSUB231SD.RD_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMSUB231SD.RD_SAE instruction to the active function.
|
|
func (c *Context) VFMSUB231SD_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUB231SD_RD_SAE(ops...))
|
|
}
|
|
|
|
// VFMSUB231SD_RD_SAE: Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231SD.RD_SAE xmm xmm k xmm
|
|
// VFMSUB231SD.RD_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMSUB231SD.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB231SD_RD_SAE(ops ...operand.Op) { ctx.VFMSUB231SD_RD_SAE(ops...) }
|
|
|
|
// VFMSUB231SD_RD_SAE_Z: Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231SD.RD_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMSUB231SD.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMSUB231SD_RD_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFMSUB231SD_RD_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFMSUB231SD_RD_SAE_Z: Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231SD.RD_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMSUB231SD.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB231SD_RD_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFMSUB231SD_RD_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFMSUB231SD_RN_SAE: Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231SD.RN_SAE xmm xmm k xmm
|
|
// VFMSUB231SD.RN_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMSUB231SD.RN_SAE instruction to the active function.
|
|
func (c *Context) VFMSUB231SD_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUB231SD_RN_SAE(ops...))
|
|
}
|
|
|
|
// VFMSUB231SD_RN_SAE: Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231SD.RN_SAE xmm xmm k xmm
|
|
// VFMSUB231SD.RN_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMSUB231SD.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB231SD_RN_SAE(ops ...operand.Op) { ctx.VFMSUB231SD_RN_SAE(ops...) }
|
|
|
|
// VFMSUB231SD_RN_SAE_Z: Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231SD.RN_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMSUB231SD.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMSUB231SD_RN_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFMSUB231SD_RN_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFMSUB231SD_RN_SAE_Z: Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231SD.RN_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMSUB231SD.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB231SD_RN_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFMSUB231SD_RN_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFMSUB231SD_RU_SAE: Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231SD.RU_SAE xmm xmm k xmm
|
|
// VFMSUB231SD.RU_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMSUB231SD.RU_SAE instruction to the active function.
|
|
func (c *Context) VFMSUB231SD_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUB231SD_RU_SAE(ops...))
|
|
}
|
|
|
|
// VFMSUB231SD_RU_SAE: Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231SD.RU_SAE xmm xmm k xmm
|
|
// VFMSUB231SD.RU_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMSUB231SD.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB231SD_RU_SAE(ops ...operand.Op) { ctx.VFMSUB231SD_RU_SAE(ops...) }
|
|
|
|
// VFMSUB231SD_RU_SAE_Z: Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231SD.RU_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMSUB231SD.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMSUB231SD_RU_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFMSUB231SD_RU_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFMSUB231SD_RU_SAE_Z: Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231SD.RU_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMSUB231SD.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB231SD_RU_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFMSUB231SD_RU_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFMSUB231SD_RZ_SAE: Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231SD.RZ_SAE xmm xmm k xmm
|
|
// VFMSUB231SD.RZ_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMSUB231SD.RZ_SAE instruction to the active function.
|
|
func (c *Context) VFMSUB231SD_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUB231SD_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VFMSUB231SD_RZ_SAE: Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231SD.RZ_SAE xmm xmm k xmm
|
|
// VFMSUB231SD.RZ_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMSUB231SD.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB231SD_RZ_SAE(ops ...operand.Op) { ctx.VFMSUB231SD_RZ_SAE(ops...) }
|
|
|
|
// VFMSUB231SD_RZ_SAE_Z: Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231SD.RZ_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMSUB231SD.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMSUB231SD_RZ_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFMSUB231SD_RZ_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFMSUB231SD_RZ_SAE_Z: Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231SD.RZ_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMSUB231SD.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB231SD_RZ_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFMSUB231SD_RZ_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFMSUB231SD_Z: Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231SD.Z m64 xmm k xmm
|
|
// VFMSUB231SD.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMSUB231SD.Z instruction to the active function.
|
|
func (c *Context) VFMSUB231SD_Z(mx, x, k, x1 operand.Op) {
|
|
c.addinstruction(x86.VFMSUB231SD_Z(mx, x, k, x1))
|
|
}
|
|
|
|
// VFMSUB231SD_Z: Fused Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231SD.Z m64 xmm k xmm
|
|
// VFMSUB231SD.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMSUB231SD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB231SD_Z(mx, x, k, x1 operand.Op) { ctx.VFMSUB231SD_Z(mx, x, k, x1) }
|
|
|
|
// VFMSUB231SS: Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231SS m32 xmm xmm
|
|
// VFMSUB231SS xmm xmm xmm
|
|
// VFMSUB231SS m32 xmm k xmm
|
|
// VFMSUB231SS xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMSUB231SS instruction to the active function.
|
|
func (c *Context) VFMSUB231SS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUB231SS(ops...))
|
|
}
|
|
|
|
// VFMSUB231SS: Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231SS m32 xmm xmm
|
|
// VFMSUB231SS xmm xmm xmm
|
|
// VFMSUB231SS m32 xmm k xmm
|
|
// VFMSUB231SS xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMSUB231SS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB231SS(ops ...operand.Op) { ctx.VFMSUB231SS(ops...) }
|
|
|
|
// VFMSUB231SS_RD_SAE: Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231SS.RD_SAE xmm xmm k xmm
|
|
// VFMSUB231SS.RD_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMSUB231SS.RD_SAE instruction to the active function.
|
|
func (c *Context) VFMSUB231SS_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUB231SS_RD_SAE(ops...))
|
|
}
|
|
|
|
// VFMSUB231SS_RD_SAE: Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231SS.RD_SAE xmm xmm k xmm
|
|
// VFMSUB231SS.RD_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMSUB231SS.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB231SS_RD_SAE(ops ...operand.Op) { ctx.VFMSUB231SS_RD_SAE(ops...) }
|
|
|
|
// VFMSUB231SS_RD_SAE_Z: Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231SS.RD_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMSUB231SS.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMSUB231SS_RD_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFMSUB231SS_RD_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFMSUB231SS_RD_SAE_Z: Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231SS.RD_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMSUB231SS.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB231SS_RD_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFMSUB231SS_RD_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFMSUB231SS_RN_SAE: Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231SS.RN_SAE xmm xmm k xmm
|
|
// VFMSUB231SS.RN_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMSUB231SS.RN_SAE instruction to the active function.
|
|
func (c *Context) VFMSUB231SS_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUB231SS_RN_SAE(ops...))
|
|
}
|
|
|
|
// VFMSUB231SS_RN_SAE: Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231SS.RN_SAE xmm xmm k xmm
|
|
// VFMSUB231SS.RN_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMSUB231SS.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB231SS_RN_SAE(ops ...operand.Op) { ctx.VFMSUB231SS_RN_SAE(ops...) }
|
|
|
|
// VFMSUB231SS_RN_SAE_Z: Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231SS.RN_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMSUB231SS.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMSUB231SS_RN_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFMSUB231SS_RN_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFMSUB231SS_RN_SAE_Z: Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231SS.RN_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMSUB231SS.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB231SS_RN_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFMSUB231SS_RN_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFMSUB231SS_RU_SAE: Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231SS.RU_SAE xmm xmm k xmm
|
|
// VFMSUB231SS.RU_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMSUB231SS.RU_SAE instruction to the active function.
|
|
func (c *Context) VFMSUB231SS_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUB231SS_RU_SAE(ops...))
|
|
}
|
|
|
|
// VFMSUB231SS_RU_SAE: Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231SS.RU_SAE xmm xmm k xmm
|
|
// VFMSUB231SS.RU_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMSUB231SS.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB231SS_RU_SAE(ops ...operand.Op) { ctx.VFMSUB231SS_RU_SAE(ops...) }
|
|
|
|
// VFMSUB231SS_RU_SAE_Z: Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231SS.RU_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMSUB231SS.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMSUB231SS_RU_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFMSUB231SS_RU_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFMSUB231SS_RU_SAE_Z: Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231SS.RU_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMSUB231SS.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB231SS_RU_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFMSUB231SS_RU_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFMSUB231SS_RZ_SAE: Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231SS.RZ_SAE xmm xmm k xmm
|
|
// VFMSUB231SS.RZ_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMSUB231SS.RZ_SAE instruction to the active function.
|
|
func (c *Context) VFMSUB231SS_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUB231SS_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VFMSUB231SS_RZ_SAE: Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231SS.RZ_SAE xmm xmm k xmm
|
|
// VFMSUB231SS.RZ_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFMSUB231SS.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB231SS_RZ_SAE(ops ...operand.Op) { ctx.VFMSUB231SS_RZ_SAE(ops...) }
|
|
|
|
// VFMSUB231SS_RZ_SAE_Z: Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231SS.RZ_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMSUB231SS.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMSUB231SS_RZ_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFMSUB231SS_RZ_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFMSUB231SS_RZ_SAE_Z: Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231SS.RZ_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMSUB231SS.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB231SS_RZ_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFMSUB231SS_RZ_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFMSUB231SS_Z: Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231SS.Z m32 xmm k xmm
|
|
// VFMSUB231SS.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMSUB231SS.Z instruction to the active function.
|
|
func (c *Context) VFMSUB231SS_Z(mx, x, k, x1 operand.Op) {
|
|
c.addinstruction(x86.VFMSUB231SS_Z(mx, x, k, x1))
|
|
}
|
|
|
|
// VFMSUB231SS_Z: Fused Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUB231SS.Z m32 xmm k xmm
|
|
// VFMSUB231SS.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFMSUB231SS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUB231SS_Z(mx, x, k, x1 operand.Op) { ctx.VFMSUB231SS_Z(mx, x, k, x1) }
|
|
|
|
// VFMSUBADD132PD: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD132PD m128 xmm xmm
|
|
// VFMSUBADD132PD m256 ymm ymm
|
|
// VFMSUBADD132PD xmm xmm xmm
|
|
// VFMSUBADD132PD ymm ymm ymm
|
|
// VFMSUBADD132PD m128 xmm k xmm
|
|
// VFMSUBADD132PD m256 ymm k ymm
|
|
// VFMSUBADD132PD xmm xmm k xmm
|
|
// VFMSUBADD132PD ymm ymm k ymm
|
|
// VFMSUBADD132PD m512 zmm k zmm
|
|
// VFMSUBADD132PD m512 zmm zmm
|
|
// VFMSUBADD132PD zmm zmm k zmm
|
|
// VFMSUBADD132PD zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUBADD132PD instruction to the active function.
|
|
func (c *Context) VFMSUBADD132PD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUBADD132PD(ops...))
|
|
}
|
|
|
|
// VFMSUBADD132PD: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD132PD m128 xmm xmm
|
|
// VFMSUBADD132PD m256 ymm ymm
|
|
// VFMSUBADD132PD xmm xmm xmm
|
|
// VFMSUBADD132PD ymm ymm ymm
|
|
// VFMSUBADD132PD m128 xmm k xmm
|
|
// VFMSUBADD132PD m256 ymm k ymm
|
|
// VFMSUBADD132PD xmm xmm k xmm
|
|
// VFMSUBADD132PD ymm ymm k ymm
|
|
// VFMSUBADD132PD m512 zmm k zmm
|
|
// VFMSUBADD132PD m512 zmm zmm
|
|
// VFMSUBADD132PD zmm zmm k zmm
|
|
// VFMSUBADD132PD zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUBADD132PD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUBADD132PD(ops ...operand.Op) { ctx.VFMSUBADD132PD(ops...) }
|
|
|
|
// VFMSUBADD132PD_BCST: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD132PD.BCST m64 xmm k xmm
|
|
// VFMSUBADD132PD.BCST m64 xmm xmm
|
|
// VFMSUBADD132PD.BCST m64 ymm k ymm
|
|
// VFMSUBADD132PD.BCST m64 ymm ymm
|
|
// VFMSUBADD132PD.BCST m64 zmm k zmm
|
|
// VFMSUBADD132PD.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VFMSUBADD132PD.BCST instruction to the active function.
|
|
func (c *Context) VFMSUBADD132PD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUBADD132PD_BCST(ops...))
|
|
}
|
|
|
|
// VFMSUBADD132PD_BCST: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD132PD.BCST m64 xmm k xmm
|
|
// VFMSUBADD132PD.BCST m64 xmm xmm
|
|
// VFMSUBADD132PD.BCST m64 ymm k ymm
|
|
// VFMSUBADD132PD.BCST m64 ymm ymm
|
|
// VFMSUBADD132PD.BCST m64 zmm k zmm
|
|
// VFMSUBADD132PD.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VFMSUBADD132PD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUBADD132PD_BCST(ops ...operand.Op) { ctx.VFMSUBADD132PD_BCST(ops...) }
|
|
|
|
// VFMSUBADD132PD_BCST_Z: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD132PD.BCST.Z m64 xmm k xmm
|
|
// VFMSUBADD132PD.BCST.Z m64 ymm k ymm
|
|
// VFMSUBADD132PD.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUBADD132PD.BCST.Z instruction to the active function.
|
|
func (c *Context) VFMSUBADD132PD_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VFMSUBADD132PD_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VFMSUBADD132PD_BCST_Z: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD132PD.BCST.Z m64 xmm k xmm
|
|
// VFMSUBADD132PD.BCST.Z m64 ymm k ymm
|
|
// VFMSUBADD132PD.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUBADD132PD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUBADD132PD_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VFMSUBADD132PD_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VFMSUBADD132PD_RD_SAE: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD132PD.RD_SAE zmm zmm k zmm
|
|
// VFMSUBADD132PD.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUBADD132PD.RD_SAE instruction to the active function.
|
|
func (c *Context) VFMSUBADD132PD_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUBADD132PD_RD_SAE(ops...))
|
|
}
|
|
|
|
// VFMSUBADD132PD_RD_SAE: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD132PD.RD_SAE zmm zmm k zmm
|
|
// VFMSUBADD132PD.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUBADD132PD.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUBADD132PD_RD_SAE(ops ...operand.Op) { ctx.VFMSUBADD132PD_RD_SAE(ops...) }
|
|
|
|
// VFMSUBADD132PD_RD_SAE_Z: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD132PD.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUBADD132PD.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMSUBADD132PD_RD_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMSUBADD132PD_RD_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMSUBADD132PD_RD_SAE_Z: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD132PD.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUBADD132PD.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUBADD132PD_RD_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMSUBADD132PD_RD_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMSUBADD132PD_RN_SAE: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD132PD.RN_SAE zmm zmm k zmm
|
|
// VFMSUBADD132PD.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUBADD132PD.RN_SAE instruction to the active function.
|
|
func (c *Context) VFMSUBADD132PD_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUBADD132PD_RN_SAE(ops...))
|
|
}
|
|
|
|
// VFMSUBADD132PD_RN_SAE: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD132PD.RN_SAE zmm zmm k zmm
|
|
// VFMSUBADD132PD.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUBADD132PD.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUBADD132PD_RN_SAE(ops ...operand.Op) { ctx.VFMSUBADD132PD_RN_SAE(ops...) }
|
|
|
|
// VFMSUBADD132PD_RN_SAE_Z: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD132PD.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUBADD132PD.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMSUBADD132PD_RN_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMSUBADD132PD_RN_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMSUBADD132PD_RN_SAE_Z: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD132PD.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUBADD132PD.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUBADD132PD_RN_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMSUBADD132PD_RN_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMSUBADD132PD_RU_SAE: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD132PD.RU_SAE zmm zmm k zmm
|
|
// VFMSUBADD132PD.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUBADD132PD.RU_SAE instruction to the active function.
|
|
func (c *Context) VFMSUBADD132PD_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUBADD132PD_RU_SAE(ops...))
|
|
}
|
|
|
|
// VFMSUBADD132PD_RU_SAE: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD132PD.RU_SAE zmm zmm k zmm
|
|
// VFMSUBADD132PD.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUBADD132PD.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUBADD132PD_RU_SAE(ops ...operand.Op) { ctx.VFMSUBADD132PD_RU_SAE(ops...) }
|
|
|
|
// VFMSUBADD132PD_RU_SAE_Z: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD132PD.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUBADD132PD.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMSUBADD132PD_RU_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMSUBADD132PD_RU_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMSUBADD132PD_RU_SAE_Z: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD132PD.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUBADD132PD.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUBADD132PD_RU_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMSUBADD132PD_RU_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMSUBADD132PD_RZ_SAE: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD132PD.RZ_SAE zmm zmm k zmm
|
|
// VFMSUBADD132PD.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUBADD132PD.RZ_SAE instruction to the active function.
|
|
func (c *Context) VFMSUBADD132PD_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUBADD132PD_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VFMSUBADD132PD_RZ_SAE: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD132PD.RZ_SAE zmm zmm k zmm
|
|
// VFMSUBADD132PD.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUBADD132PD.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUBADD132PD_RZ_SAE(ops ...operand.Op) { ctx.VFMSUBADD132PD_RZ_SAE(ops...) }
|
|
|
|
// VFMSUBADD132PD_RZ_SAE_Z: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD132PD.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUBADD132PD.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMSUBADD132PD_RZ_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMSUBADD132PD_RZ_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMSUBADD132PD_RZ_SAE_Z: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD132PD.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUBADD132PD.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUBADD132PD_RZ_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMSUBADD132PD_RZ_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMSUBADD132PD_Z: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD132PD.Z m128 xmm k xmm
|
|
// VFMSUBADD132PD.Z m256 ymm k ymm
|
|
// VFMSUBADD132PD.Z xmm xmm k xmm
|
|
// VFMSUBADD132PD.Z ymm ymm k ymm
|
|
// VFMSUBADD132PD.Z m512 zmm k zmm
|
|
// VFMSUBADD132PD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUBADD132PD.Z instruction to the active function.
|
|
func (c *Context) VFMSUBADD132PD_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VFMSUBADD132PD_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VFMSUBADD132PD_Z: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD132PD.Z m128 xmm k xmm
|
|
// VFMSUBADD132PD.Z m256 ymm k ymm
|
|
// VFMSUBADD132PD.Z xmm xmm k xmm
|
|
// VFMSUBADD132PD.Z ymm ymm k ymm
|
|
// VFMSUBADD132PD.Z m512 zmm k zmm
|
|
// VFMSUBADD132PD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUBADD132PD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUBADD132PD_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VFMSUBADD132PD_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VFMSUBADD132PS: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD132PS m128 xmm xmm
|
|
// VFMSUBADD132PS m256 ymm ymm
|
|
// VFMSUBADD132PS xmm xmm xmm
|
|
// VFMSUBADD132PS ymm ymm ymm
|
|
// VFMSUBADD132PS m128 xmm k xmm
|
|
// VFMSUBADD132PS m256 ymm k ymm
|
|
// VFMSUBADD132PS xmm xmm k xmm
|
|
// VFMSUBADD132PS ymm ymm k ymm
|
|
// VFMSUBADD132PS m512 zmm k zmm
|
|
// VFMSUBADD132PS m512 zmm zmm
|
|
// VFMSUBADD132PS zmm zmm k zmm
|
|
// VFMSUBADD132PS zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUBADD132PS instruction to the active function.
|
|
func (c *Context) VFMSUBADD132PS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUBADD132PS(ops...))
|
|
}
|
|
|
|
// VFMSUBADD132PS: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD132PS m128 xmm xmm
|
|
// VFMSUBADD132PS m256 ymm ymm
|
|
// VFMSUBADD132PS xmm xmm xmm
|
|
// VFMSUBADD132PS ymm ymm ymm
|
|
// VFMSUBADD132PS m128 xmm k xmm
|
|
// VFMSUBADD132PS m256 ymm k ymm
|
|
// VFMSUBADD132PS xmm xmm k xmm
|
|
// VFMSUBADD132PS ymm ymm k ymm
|
|
// VFMSUBADD132PS m512 zmm k zmm
|
|
// VFMSUBADD132PS m512 zmm zmm
|
|
// VFMSUBADD132PS zmm zmm k zmm
|
|
// VFMSUBADD132PS zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUBADD132PS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUBADD132PS(ops ...operand.Op) { ctx.VFMSUBADD132PS(ops...) }
|
|
|
|
// VFMSUBADD132PS_BCST: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD132PS.BCST m32 xmm k xmm
|
|
// VFMSUBADD132PS.BCST m32 xmm xmm
|
|
// VFMSUBADD132PS.BCST m32 ymm k ymm
|
|
// VFMSUBADD132PS.BCST m32 ymm ymm
|
|
// VFMSUBADD132PS.BCST m32 zmm k zmm
|
|
// VFMSUBADD132PS.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VFMSUBADD132PS.BCST instruction to the active function.
|
|
func (c *Context) VFMSUBADD132PS_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUBADD132PS_BCST(ops...))
|
|
}
|
|
|
|
// VFMSUBADD132PS_BCST: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD132PS.BCST m32 xmm k xmm
|
|
// VFMSUBADD132PS.BCST m32 xmm xmm
|
|
// VFMSUBADD132PS.BCST m32 ymm k ymm
|
|
// VFMSUBADD132PS.BCST m32 ymm ymm
|
|
// VFMSUBADD132PS.BCST m32 zmm k zmm
|
|
// VFMSUBADD132PS.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VFMSUBADD132PS.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUBADD132PS_BCST(ops ...operand.Op) { ctx.VFMSUBADD132PS_BCST(ops...) }
|
|
|
|
// VFMSUBADD132PS_BCST_Z: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD132PS.BCST.Z m32 xmm k xmm
|
|
// VFMSUBADD132PS.BCST.Z m32 ymm k ymm
|
|
// VFMSUBADD132PS.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUBADD132PS.BCST.Z instruction to the active function.
|
|
func (c *Context) VFMSUBADD132PS_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VFMSUBADD132PS_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VFMSUBADD132PS_BCST_Z: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD132PS.BCST.Z m32 xmm k xmm
|
|
// VFMSUBADD132PS.BCST.Z m32 ymm k ymm
|
|
// VFMSUBADD132PS.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUBADD132PS.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUBADD132PS_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VFMSUBADD132PS_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VFMSUBADD132PS_RD_SAE: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD132PS.RD_SAE zmm zmm k zmm
|
|
// VFMSUBADD132PS.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUBADD132PS.RD_SAE instruction to the active function.
|
|
func (c *Context) VFMSUBADD132PS_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUBADD132PS_RD_SAE(ops...))
|
|
}
|
|
|
|
// VFMSUBADD132PS_RD_SAE: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD132PS.RD_SAE zmm zmm k zmm
|
|
// VFMSUBADD132PS.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUBADD132PS.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUBADD132PS_RD_SAE(ops ...operand.Op) { ctx.VFMSUBADD132PS_RD_SAE(ops...) }
|
|
|
|
// VFMSUBADD132PS_RD_SAE_Z: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD132PS.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUBADD132PS.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMSUBADD132PS_RD_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMSUBADD132PS_RD_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMSUBADD132PS_RD_SAE_Z: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD132PS.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUBADD132PS.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUBADD132PS_RD_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMSUBADD132PS_RD_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMSUBADD132PS_RN_SAE: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD132PS.RN_SAE zmm zmm k zmm
|
|
// VFMSUBADD132PS.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUBADD132PS.RN_SAE instruction to the active function.
|
|
func (c *Context) VFMSUBADD132PS_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUBADD132PS_RN_SAE(ops...))
|
|
}
|
|
|
|
// VFMSUBADD132PS_RN_SAE: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD132PS.RN_SAE zmm zmm k zmm
|
|
// VFMSUBADD132PS.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUBADD132PS.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUBADD132PS_RN_SAE(ops ...operand.Op) { ctx.VFMSUBADD132PS_RN_SAE(ops...) }
|
|
|
|
// VFMSUBADD132PS_RN_SAE_Z: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD132PS.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUBADD132PS.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMSUBADD132PS_RN_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMSUBADD132PS_RN_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMSUBADD132PS_RN_SAE_Z: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD132PS.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUBADD132PS.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUBADD132PS_RN_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMSUBADD132PS_RN_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMSUBADD132PS_RU_SAE: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD132PS.RU_SAE zmm zmm k zmm
|
|
// VFMSUBADD132PS.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUBADD132PS.RU_SAE instruction to the active function.
|
|
func (c *Context) VFMSUBADD132PS_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUBADD132PS_RU_SAE(ops...))
|
|
}
|
|
|
|
// VFMSUBADD132PS_RU_SAE: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD132PS.RU_SAE zmm zmm k zmm
|
|
// VFMSUBADD132PS.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUBADD132PS.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUBADD132PS_RU_SAE(ops ...operand.Op) { ctx.VFMSUBADD132PS_RU_SAE(ops...) }
|
|
|
|
// VFMSUBADD132PS_RU_SAE_Z: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD132PS.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUBADD132PS.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMSUBADD132PS_RU_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMSUBADD132PS_RU_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMSUBADD132PS_RU_SAE_Z: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD132PS.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUBADD132PS.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUBADD132PS_RU_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMSUBADD132PS_RU_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMSUBADD132PS_RZ_SAE: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD132PS.RZ_SAE zmm zmm k zmm
|
|
// VFMSUBADD132PS.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUBADD132PS.RZ_SAE instruction to the active function.
|
|
func (c *Context) VFMSUBADD132PS_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUBADD132PS_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VFMSUBADD132PS_RZ_SAE: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD132PS.RZ_SAE zmm zmm k zmm
|
|
// VFMSUBADD132PS.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUBADD132PS.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUBADD132PS_RZ_SAE(ops ...operand.Op) { ctx.VFMSUBADD132PS_RZ_SAE(ops...) }
|
|
|
|
// VFMSUBADD132PS_RZ_SAE_Z: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD132PS.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUBADD132PS.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMSUBADD132PS_RZ_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMSUBADD132PS_RZ_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMSUBADD132PS_RZ_SAE_Z: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD132PS.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUBADD132PS.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUBADD132PS_RZ_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMSUBADD132PS_RZ_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMSUBADD132PS_Z: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD132PS.Z m128 xmm k xmm
|
|
// VFMSUBADD132PS.Z m256 ymm k ymm
|
|
// VFMSUBADD132PS.Z xmm xmm k xmm
|
|
// VFMSUBADD132PS.Z ymm ymm k ymm
|
|
// VFMSUBADD132PS.Z m512 zmm k zmm
|
|
// VFMSUBADD132PS.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUBADD132PS.Z instruction to the active function.
|
|
func (c *Context) VFMSUBADD132PS_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VFMSUBADD132PS_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VFMSUBADD132PS_Z: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD132PS.Z m128 xmm k xmm
|
|
// VFMSUBADD132PS.Z m256 ymm k ymm
|
|
// VFMSUBADD132PS.Z xmm xmm k xmm
|
|
// VFMSUBADD132PS.Z ymm ymm k ymm
|
|
// VFMSUBADD132PS.Z m512 zmm k zmm
|
|
// VFMSUBADD132PS.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUBADD132PS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUBADD132PS_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VFMSUBADD132PS_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VFMSUBADD213PD: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD213PD m128 xmm xmm
|
|
// VFMSUBADD213PD m256 ymm ymm
|
|
// VFMSUBADD213PD xmm xmm xmm
|
|
// VFMSUBADD213PD ymm ymm ymm
|
|
// VFMSUBADD213PD m128 xmm k xmm
|
|
// VFMSUBADD213PD m256 ymm k ymm
|
|
// VFMSUBADD213PD xmm xmm k xmm
|
|
// VFMSUBADD213PD ymm ymm k ymm
|
|
// VFMSUBADD213PD m512 zmm k zmm
|
|
// VFMSUBADD213PD m512 zmm zmm
|
|
// VFMSUBADD213PD zmm zmm k zmm
|
|
// VFMSUBADD213PD zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUBADD213PD instruction to the active function.
|
|
func (c *Context) VFMSUBADD213PD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUBADD213PD(ops...))
|
|
}
|
|
|
|
// VFMSUBADD213PD: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD213PD m128 xmm xmm
|
|
// VFMSUBADD213PD m256 ymm ymm
|
|
// VFMSUBADD213PD xmm xmm xmm
|
|
// VFMSUBADD213PD ymm ymm ymm
|
|
// VFMSUBADD213PD m128 xmm k xmm
|
|
// VFMSUBADD213PD m256 ymm k ymm
|
|
// VFMSUBADD213PD xmm xmm k xmm
|
|
// VFMSUBADD213PD ymm ymm k ymm
|
|
// VFMSUBADD213PD m512 zmm k zmm
|
|
// VFMSUBADD213PD m512 zmm zmm
|
|
// VFMSUBADD213PD zmm zmm k zmm
|
|
// VFMSUBADD213PD zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUBADD213PD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUBADD213PD(ops ...operand.Op) { ctx.VFMSUBADD213PD(ops...) }
|
|
|
|
// VFMSUBADD213PD_BCST: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD213PD.BCST m64 xmm k xmm
|
|
// VFMSUBADD213PD.BCST m64 xmm xmm
|
|
// VFMSUBADD213PD.BCST m64 ymm k ymm
|
|
// VFMSUBADD213PD.BCST m64 ymm ymm
|
|
// VFMSUBADD213PD.BCST m64 zmm k zmm
|
|
// VFMSUBADD213PD.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VFMSUBADD213PD.BCST instruction to the active function.
|
|
func (c *Context) VFMSUBADD213PD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUBADD213PD_BCST(ops...))
|
|
}
|
|
|
|
// VFMSUBADD213PD_BCST: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD213PD.BCST m64 xmm k xmm
|
|
// VFMSUBADD213PD.BCST m64 xmm xmm
|
|
// VFMSUBADD213PD.BCST m64 ymm k ymm
|
|
// VFMSUBADD213PD.BCST m64 ymm ymm
|
|
// VFMSUBADD213PD.BCST m64 zmm k zmm
|
|
// VFMSUBADD213PD.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VFMSUBADD213PD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUBADD213PD_BCST(ops ...operand.Op) { ctx.VFMSUBADD213PD_BCST(ops...) }
|
|
|
|
// VFMSUBADD213PD_BCST_Z: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD213PD.BCST.Z m64 xmm k xmm
|
|
// VFMSUBADD213PD.BCST.Z m64 ymm k ymm
|
|
// VFMSUBADD213PD.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUBADD213PD.BCST.Z instruction to the active function.
|
|
func (c *Context) VFMSUBADD213PD_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VFMSUBADD213PD_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VFMSUBADD213PD_BCST_Z: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD213PD.BCST.Z m64 xmm k xmm
|
|
// VFMSUBADD213PD.BCST.Z m64 ymm k ymm
|
|
// VFMSUBADD213PD.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUBADD213PD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUBADD213PD_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VFMSUBADD213PD_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VFMSUBADD213PD_RD_SAE: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD213PD.RD_SAE zmm zmm k zmm
|
|
// VFMSUBADD213PD.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUBADD213PD.RD_SAE instruction to the active function.
|
|
func (c *Context) VFMSUBADD213PD_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUBADD213PD_RD_SAE(ops...))
|
|
}
|
|
|
|
// VFMSUBADD213PD_RD_SAE: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD213PD.RD_SAE zmm zmm k zmm
|
|
// VFMSUBADD213PD.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUBADD213PD.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUBADD213PD_RD_SAE(ops ...operand.Op) { ctx.VFMSUBADD213PD_RD_SAE(ops...) }
|
|
|
|
// VFMSUBADD213PD_RD_SAE_Z: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD213PD.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUBADD213PD.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMSUBADD213PD_RD_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMSUBADD213PD_RD_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMSUBADD213PD_RD_SAE_Z: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD213PD.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUBADD213PD.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUBADD213PD_RD_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMSUBADD213PD_RD_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMSUBADD213PD_RN_SAE: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD213PD.RN_SAE zmm zmm k zmm
|
|
// VFMSUBADD213PD.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUBADD213PD.RN_SAE instruction to the active function.
|
|
func (c *Context) VFMSUBADD213PD_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUBADD213PD_RN_SAE(ops...))
|
|
}
|
|
|
|
// VFMSUBADD213PD_RN_SAE: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD213PD.RN_SAE zmm zmm k zmm
|
|
// VFMSUBADD213PD.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUBADD213PD.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUBADD213PD_RN_SAE(ops ...operand.Op) { ctx.VFMSUBADD213PD_RN_SAE(ops...) }
|
|
|
|
// VFMSUBADD213PD_RN_SAE_Z: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD213PD.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUBADD213PD.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMSUBADD213PD_RN_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMSUBADD213PD_RN_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMSUBADD213PD_RN_SAE_Z: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD213PD.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUBADD213PD.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUBADD213PD_RN_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMSUBADD213PD_RN_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMSUBADD213PD_RU_SAE: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD213PD.RU_SAE zmm zmm k zmm
|
|
// VFMSUBADD213PD.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUBADD213PD.RU_SAE instruction to the active function.
|
|
func (c *Context) VFMSUBADD213PD_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUBADD213PD_RU_SAE(ops...))
|
|
}
|
|
|
|
// VFMSUBADD213PD_RU_SAE: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD213PD.RU_SAE zmm zmm k zmm
|
|
// VFMSUBADD213PD.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUBADD213PD.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUBADD213PD_RU_SAE(ops ...operand.Op) { ctx.VFMSUBADD213PD_RU_SAE(ops...) }
|
|
|
|
// VFMSUBADD213PD_RU_SAE_Z: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD213PD.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUBADD213PD.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMSUBADD213PD_RU_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMSUBADD213PD_RU_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMSUBADD213PD_RU_SAE_Z: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD213PD.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUBADD213PD.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUBADD213PD_RU_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMSUBADD213PD_RU_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMSUBADD213PD_RZ_SAE: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD213PD.RZ_SAE zmm zmm k zmm
|
|
// VFMSUBADD213PD.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUBADD213PD.RZ_SAE instruction to the active function.
|
|
func (c *Context) VFMSUBADD213PD_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUBADD213PD_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VFMSUBADD213PD_RZ_SAE: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD213PD.RZ_SAE zmm zmm k zmm
|
|
// VFMSUBADD213PD.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUBADD213PD.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUBADD213PD_RZ_SAE(ops ...operand.Op) { ctx.VFMSUBADD213PD_RZ_SAE(ops...) }
|
|
|
|
// VFMSUBADD213PD_RZ_SAE_Z: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD213PD.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUBADD213PD.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMSUBADD213PD_RZ_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMSUBADD213PD_RZ_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMSUBADD213PD_RZ_SAE_Z: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD213PD.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUBADD213PD.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUBADD213PD_RZ_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMSUBADD213PD_RZ_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMSUBADD213PD_Z: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD213PD.Z m128 xmm k xmm
|
|
// VFMSUBADD213PD.Z m256 ymm k ymm
|
|
// VFMSUBADD213PD.Z xmm xmm k xmm
|
|
// VFMSUBADD213PD.Z ymm ymm k ymm
|
|
// VFMSUBADD213PD.Z m512 zmm k zmm
|
|
// VFMSUBADD213PD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUBADD213PD.Z instruction to the active function.
|
|
func (c *Context) VFMSUBADD213PD_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VFMSUBADD213PD_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VFMSUBADD213PD_Z: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD213PD.Z m128 xmm k xmm
|
|
// VFMSUBADD213PD.Z m256 ymm k ymm
|
|
// VFMSUBADD213PD.Z xmm xmm k xmm
|
|
// VFMSUBADD213PD.Z ymm ymm k ymm
|
|
// VFMSUBADD213PD.Z m512 zmm k zmm
|
|
// VFMSUBADD213PD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUBADD213PD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUBADD213PD_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VFMSUBADD213PD_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VFMSUBADD213PS: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD213PS m128 xmm xmm
|
|
// VFMSUBADD213PS m256 ymm ymm
|
|
// VFMSUBADD213PS xmm xmm xmm
|
|
// VFMSUBADD213PS ymm ymm ymm
|
|
// VFMSUBADD213PS m128 xmm k xmm
|
|
// VFMSUBADD213PS m256 ymm k ymm
|
|
// VFMSUBADD213PS xmm xmm k xmm
|
|
// VFMSUBADD213PS ymm ymm k ymm
|
|
// VFMSUBADD213PS m512 zmm k zmm
|
|
// VFMSUBADD213PS m512 zmm zmm
|
|
// VFMSUBADD213PS zmm zmm k zmm
|
|
// VFMSUBADD213PS zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUBADD213PS instruction to the active function.
|
|
func (c *Context) VFMSUBADD213PS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUBADD213PS(ops...))
|
|
}
|
|
|
|
// VFMSUBADD213PS: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD213PS m128 xmm xmm
|
|
// VFMSUBADD213PS m256 ymm ymm
|
|
// VFMSUBADD213PS xmm xmm xmm
|
|
// VFMSUBADD213PS ymm ymm ymm
|
|
// VFMSUBADD213PS m128 xmm k xmm
|
|
// VFMSUBADD213PS m256 ymm k ymm
|
|
// VFMSUBADD213PS xmm xmm k xmm
|
|
// VFMSUBADD213PS ymm ymm k ymm
|
|
// VFMSUBADD213PS m512 zmm k zmm
|
|
// VFMSUBADD213PS m512 zmm zmm
|
|
// VFMSUBADD213PS zmm zmm k zmm
|
|
// VFMSUBADD213PS zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUBADD213PS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUBADD213PS(ops ...operand.Op) { ctx.VFMSUBADD213PS(ops...) }
|
|
|
|
// VFMSUBADD213PS_BCST: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD213PS.BCST m32 xmm k xmm
|
|
// VFMSUBADD213PS.BCST m32 xmm xmm
|
|
// VFMSUBADD213PS.BCST m32 ymm k ymm
|
|
// VFMSUBADD213PS.BCST m32 ymm ymm
|
|
// VFMSUBADD213PS.BCST m32 zmm k zmm
|
|
// VFMSUBADD213PS.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VFMSUBADD213PS.BCST instruction to the active function.
|
|
func (c *Context) VFMSUBADD213PS_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUBADD213PS_BCST(ops...))
|
|
}
|
|
|
|
// VFMSUBADD213PS_BCST: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD213PS.BCST m32 xmm k xmm
|
|
// VFMSUBADD213PS.BCST m32 xmm xmm
|
|
// VFMSUBADD213PS.BCST m32 ymm k ymm
|
|
// VFMSUBADD213PS.BCST m32 ymm ymm
|
|
// VFMSUBADD213PS.BCST m32 zmm k zmm
|
|
// VFMSUBADD213PS.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VFMSUBADD213PS.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUBADD213PS_BCST(ops ...operand.Op) { ctx.VFMSUBADD213PS_BCST(ops...) }
|
|
|
|
// VFMSUBADD213PS_BCST_Z: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD213PS.BCST.Z m32 xmm k xmm
|
|
// VFMSUBADD213PS.BCST.Z m32 ymm k ymm
|
|
// VFMSUBADD213PS.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUBADD213PS.BCST.Z instruction to the active function.
|
|
func (c *Context) VFMSUBADD213PS_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VFMSUBADD213PS_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VFMSUBADD213PS_BCST_Z: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD213PS.BCST.Z m32 xmm k xmm
|
|
// VFMSUBADD213PS.BCST.Z m32 ymm k ymm
|
|
// VFMSUBADD213PS.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUBADD213PS.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUBADD213PS_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VFMSUBADD213PS_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VFMSUBADD213PS_RD_SAE: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD213PS.RD_SAE zmm zmm k zmm
|
|
// VFMSUBADD213PS.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUBADD213PS.RD_SAE instruction to the active function.
|
|
func (c *Context) VFMSUBADD213PS_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUBADD213PS_RD_SAE(ops...))
|
|
}
|
|
|
|
// VFMSUBADD213PS_RD_SAE: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD213PS.RD_SAE zmm zmm k zmm
|
|
// VFMSUBADD213PS.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUBADD213PS.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUBADD213PS_RD_SAE(ops ...operand.Op) { ctx.VFMSUBADD213PS_RD_SAE(ops...) }
|
|
|
|
// VFMSUBADD213PS_RD_SAE_Z: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD213PS.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUBADD213PS.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMSUBADD213PS_RD_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMSUBADD213PS_RD_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMSUBADD213PS_RD_SAE_Z: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD213PS.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUBADD213PS.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUBADD213PS_RD_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMSUBADD213PS_RD_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMSUBADD213PS_RN_SAE: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD213PS.RN_SAE zmm zmm k zmm
|
|
// VFMSUBADD213PS.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUBADD213PS.RN_SAE instruction to the active function.
|
|
func (c *Context) VFMSUBADD213PS_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUBADD213PS_RN_SAE(ops...))
|
|
}
|
|
|
|
// VFMSUBADD213PS_RN_SAE: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD213PS.RN_SAE zmm zmm k zmm
|
|
// VFMSUBADD213PS.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUBADD213PS.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUBADD213PS_RN_SAE(ops ...operand.Op) { ctx.VFMSUBADD213PS_RN_SAE(ops...) }
|
|
|
|
// VFMSUBADD213PS_RN_SAE_Z: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD213PS.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUBADD213PS.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMSUBADD213PS_RN_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMSUBADD213PS_RN_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMSUBADD213PS_RN_SAE_Z: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD213PS.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUBADD213PS.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUBADD213PS_RN_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMSUBADD213PS_RN_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMSUBADD213PS_RU_SAE: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD213PS.RU_SAE zmm zmm k zmm
|
|
// VFMSUBADD213PS.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUBADD213PS.RU_SAE instruction to the active function.
|
|
func (c *Context) VFMSUBADD213PS_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUBADD213PS_RU_SAE(ops...))
|
|
}
|
|
|
|
// VFMSUBADD213PS_RU_SAE: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD213PS.RU_SAE zmm zmm k zmm
|
|
// VFMSUBADD213PS.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUBADD213PS.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUBADD213PS_RU_SAE(ops ...operand.Op) { ctx.VFMSUBADD213PS_RU_SAE(ops...) }
|
|
|
|
// VFMSUBADD213PS_RU_SAE_Z: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD213PS.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUBADD213PS.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMSUBADD213PS_RU_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMSUBADD213PS_RU_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMSUBADD213PS_RU_SAE_Z: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD213PS.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUBADD213PS.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUBADD213PS_RU_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMSUBADD213PS_RU_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMSUBADD213PS_RZ_SAE: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD213PS.RZ_SAE zmm zmm k zmm
|
|
// VFMSUBADD213PS.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUBADD213PS.RZ_SAE instruction to the active function.
|
|
func (c *Context) VFMSUBADD213PS_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUBADD213PS_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VFMSUBADD213PS_RZ_SAE: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD213PS.RZ_SAE zmm zmm k zmm
|
|
// VFMSUBADD213PS.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUBADD213PS.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUBADD213PS_RZ_SAE(ops ...operand.Op) { ctx.VFMSUBADD213PS_RZ_SAE(ops...) }
|
|
|
|
// VFMSUBADD213PS_RZ_SAE_Z: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD213PS.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUBADD213PS.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMSUBADD213PS_RZ_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMSUBADD213PS_RZ_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMSUBADD213PS_RZ_SAE_Z: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD213PS.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUBADD213PS.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUBADD213PS_RZ_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMSUBADD213PS_RZ_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMSUBADD213PS_Z: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD213PS.Z m128 xmm k xmm
|
|
// VFMSUBADD213PS.Z m256 ymm k ymm
|
|
// VFMSUBADD213PS.Z xmm xmm k xmm
|
|
// VFMSUBADD213PS.Z ymm ymm k ymm
|
|
// VFMSUBADD213PS.Z m512 zmm k zmm
|
|
// VFMSUBADD213PS.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUBADD213PS.Z instruction to the active function.
|
|
func (c *Context) VFMSUBADD213PS_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VFMSUBADD213PS_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VFMSUBADD213PS_Z: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD213PS.Z m128 xmm k xmm
|
|
// VFMSUBADD213PS.Z m256 ymm k ymm
|
|
// VFMSUBADD213PS.Z xmm xmm k xmm
|
|
// VFMSUBADD213PS.Z ymm ymm k ymm
|
|
// VFMSUBADD213PS.Z m512 zmm k zmm
|
|
// VFMSUBADD213PS.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUBADD213PS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUBADD213PS_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VFMSUBADD213PS_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VFMSUBADD231PD: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD231PD m128 xmm xmm
|
|
// VFMSUBADD231PD m256 ymm ymm
|
|
// VFMSUBADD231PD xmm xmm xmm
|
|
// VFMSUBADD231PD ymm ymm ymm
|
|
// VFMSUBADD231PD m128 xmm k xmm
|
|
// VFMSUBADD231PD m256 ymm k ymm
|
|
// VFMSUBADD231PD xmm xmm k xmm
|
|
// VFMSUBADD231PD ymm ymm k ymm
|
|
// VFMSUBADD231PD m512 zmm k zmm
|
|
// VFMSUBADD231PD m512 zmm zmm
|
|
// VFMSUBADD231PD zmm zmm k zmm
|
|
// VFMSUBADD231PD zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUBADD231PD instruction to the active function.
|
|
func (c *Context) VFMSUBADD231PD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUBADD231PD(ops...))
|
|
}
|
|
|
|
// VFMSUBADD231PD: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD231PD m128 xmm xmm
|
|
// VFMSUBADD231PD m256 ymm ymm
|
|
// VFMSUBADD231PD xmm xmm xmm
|
|
// VFMSUBADD231PD ymm ymm ymm
|
|
// VFMSUBADD231PD m128 xmm k xmm
|
|
// VFMSUBADD231PD m256 ymm k ymm
|
|
// VFMSUBADD231PD xmm xmm k xmm
|
|
// VFMSUBADD231PD ymm ymm k ymm
|
|
// VFMSUBADD231PD m512 zmm k zmm
|
|
// VFMSUBADD231PD m512 zmm zmm
|
|
// VFMSUBADD231PD zmm zmm k zmm
|
|
// VFMSUBADD231PD zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUBADD231PD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUBADD231PD(ops ...operand.Op) { ctx.VFMSUBADD231PD(ops...) }
|
|
|
|
// VFMSUBADD231PD_BCST: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD231PD.BCST m64 xmm k xmm
|
|
// VFMSUBADD231PD.BCST m64 xmm xmm
|
|
// VFMSUBADD231PD.BCST m64 ymm k ymm
|
|
// VFMSUBADD231PD.BCST m64 ymm ymm
|
|
// VFMSUBADD231PD.BCST m64 zmm k zmm
|
|
// VFMSUBADD231PD.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VFMSUBADD231PD.BCST instruction to the active function.
|
|
func (c *Context) VFMSUBADD231PD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUBADD231PD_BCST(ops...))
|
|
}
|
|
|
|
// VFMSUBADD231PD_BCST: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD231PD.BCST m64 xmm k xmm
|
|
// VFMSUBADD231PD.BCST m64 xmm xmm
|
|
// VFMSUBADD231PD.BCST m64 ymm k ymm
|
|
// VFMSUBADD231PD.BCST m64 ymm ymm
|
|
// VFMSUBADD231PD.BCST m64 zmm k zmm
|
|
// VFMSUBADD231PD.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VFMSUBADD231PD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUBADD231PD_BCST(ops ...operand.Op) { ctx.VFMSUBADD231PD_BCST(ops...) }
|
|
|
|
// VFMSUBADD231PD_BCST_Z: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD231PD.BCST.Z m64 xmm k xmm
|
|
// VFMSUBADD231PD.BCST.Z m64 ymm k ymm
|
|
// VFMSUBADD231PD.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUBADD231PD.BCST.Z instruction to the active function.
|
|
func (c *Context) VFMSUBADD231PD_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VFMSUBADD231PD_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VFMSUBADD231PD_BCST_Z: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD231PD.BCST.Z m64 xmm k xmm
|
|
// VFMSUBADD231PD.BCST.Z m64 ymm k ymm
|
|
// VFMSUBADD231PD.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUBADD231PD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUBADD231PD_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VFMSUBADD231PD_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VFMSUBADD231PD_RD_SAE: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD231PD.RD_SAE zmm zmm k zmm
|
|
// VFMSUBADD231PD.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUBADD231PD.RD_SAE instruction to the active function.
|
|
func (c *Context) VFMSUBADD231PD_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUBADD231PD_RD_SAE(ops...))
|
|
}
|
|
|
|
// VFMSUBADD231PD_RD_SAE: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD231PD.RD_SAE zmm zmm k zmm
|
|
// VFMSUBADD231PD.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUBADD231PD.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUBADD231PD_RD_SAE(ops ...operand.Op) { ctx.VFMSUBADD231PD_RD_SAE(ops...) }
|
|
|
|
// VFMSUBADD231PD_RD_SAE_Z: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD231PD.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUBADD231PD.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMSUBADD231PD_RD_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMSUBADD231PD_RD_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMSUBADD231PD_RD_SAE_Z: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD231PD.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUBADD231PD.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUBADD231PD_RD_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMSUBADD231PD_RD_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMSUBADD231PD_RN_SAE: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD231PD.RN_SAE zmm zmm k zmm
|
|
// VFMSUBADD231PD.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUBADD231PD.RN_SAE instruction to the active function.
|
|
func (c *Context) VFMSUBADD231PD_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUBADD231PD_RN_SAE(ops...))
|
|
}
|
|
|
|
// VFMSUBADD231PD_RN_SAE: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD231PD.RN_SAE zmm zmm k zmm
|
|
// VFMSUBADD231PD.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUBADD231PD.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUBADD231PD_RN_SAE(ops ...operand.Op) { ctx.VFMSUBADD231PD_RN_SAE(ops...) }
|
|
|
|
// VFMSUBADD231PD_RN_SAE_Z: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD231PD.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUBADD231PD.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMSUBADD231PD_RN_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMSUBADD231PD_RN_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMSUBADD231PD_RN_SAE_Z: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD231PD.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUBADD231PD.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUBADD231PD_RN_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMSUBADD231PD_RN_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMSUBADD231PD_RU_SAE: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD231PD.RU_SAE zmm zmm k zmm
|
|
// VFMSUBADD231PD.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUBADD231PD.RU_SAE instruction to the active function.
|
|
func (c *Context) VFMSUBADD231PD_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUBADD231PD_RU_SAE(ops...))
|
|
}
|
|
|
|
// VFMSUBADD231PD_RU_SAE: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD231PD.RU_SAE zmm zmm k zmm
|
|
// VFMSUBADD231PD.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUBADD231PD.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUBADD231PD_RU_SAE(ops ...operand.Op) { ctx.VFMSUBADD231PD_RU_SAE(ops...) }
|
|
|
|
// VFMSUBADD231PD_RU_SAE_Z: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD231PD.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUBADD231PD.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMSUBADD231PD_RU_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMSUBADD231PD_RU_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMSUBADD231PD_RU_SAE_Z: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD231PD.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUBADD231PD.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUBADD231PD_RU_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMSUBADD231PD_RU_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMSUBADD231PD_RZ_SAE: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD231PD.RZ_SAE zmm zmm k zmm
|
|
// VFMSUBADD231PD.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUBADD231PD.RZ_SAE instruction to the active function.
|
|
func (c *Context) VFMSUBADD231PD_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUBADD231PD_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VFMSUBADD231PD_RZ_SAE: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD231PD.RZ_SAE zmm zmm k zmm
|
|
// VFMSUBADD231PD.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUBADD231PD.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUBADD231PD_RZ_SAE(ops ...operand.Op) { ctx.VFMSUBADD231PD_RZ_SAE(ops...) }
|
|
|
|
// VFMSUBADD231PD_RZ_SAE_Z: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD231PD.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUBADD231PD.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMSUBADD231PD_RZ_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMSUBADD231PD_RZ_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMSUBADD231PD_RZ_SAE_Z: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD231PD.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUBADD231PD.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUBADD231PD_RZ_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMSUBADD231PD_RZ_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMSUBADD231PD_Z: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD231PD.Z m128 xmm k xmm
|
|
// VFMSUBADD231PD.Z m256 ymm k ymm
|
|
// VFMSUBADD231PD.Z xmm xmm k xmm
|
|
// VFMSUBADD231PD.Z ymm ymm k ymm
|
|
// VFMSUBADD231PD.Z m512 zmm k zmm
|
|
// VFMSUBADD231PD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUBADD231PD.Z instruction to the active function.
|
|
func (c *Context) VFMSUBADD231PD_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VFMSUBADD231PD_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VFMSUBADD231PD_Z: Fused Multiply-Alternating Subtract/Add of Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD231PD.Z m128 xmm k xmm
|
|
// VFMSUBADD231PD.Z m256 ymm k ymm
|
|
// VFMSUBADD231PD.Z xmm xmm k xmm
|
|
// VFMSUBADD231PD.Z ymm ymm k ymm
|
|
// VFMSUBADD231PD.Z m512 zmm k zmm
|
|
// VFMSUBADD231PD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUBADD231PD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUBADD231PD_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VFMSUBADD231PD_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VFMSUBADD231PS: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD231PS m128 xmm xmm
|
|
// VFMSUBADD231PS m256 ymm ymm
|
|
// VFMSUBADD231PS xmm xmm xmm
|
|
// VFMSUBADD231PS ymm ymm ymm
|
|
// VFMSUBADD231PS m128 xmm k xmm
|
|
// VFMSUBADD231PS m256 ymm k ymm
|
|
// VFMSUBADD231PS xmm xmm k xmm
|
|
// VFMSUBADD231PS ymm ymm k ymm
|
|
// VFMSUBADD231PS m512 zmm k zmm
|
|
// VFMSUBADD231PS m512 zmm zmm
|
|
// VFMSUBADD231PS zmm zmm k zmm
|
|
// VFMSUBADD231PS zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUBADD231PS instruction to the active function.
|
|
func (c *Context) VFMSUBADD231PS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUBADD231PS(ops...))
|
|
}
|
|
|
|
// VFMSUBADD231PS: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD231PS m128 xmm xmm
|
|
// VFMSUBADD231PS m256 ymm ymm
|
|
// VFMSUBADD231PS xmm xmm xmm
|
|
// VFMSUBADD231PS ymm ymm ymm
|
|
// VFMSUBADD231PS m128 xmm k xmm
|
|
// VFMSUBADD231PS m256 ymm k ymm
|
|
// VFMSUBADD231PS xmm xmm k xmm
|
|
// VFMSUBADD231PS ymm ymm k ymm
|
|
// VFMSUBADD231PS m512 zmm k zmm
|
|
// VFMSUBADD231PS m512 zmm zmm
|
|
// VFMSUBADD231PS zmm zmm k zmm
|
|
// VFMSUBADD231PS zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUBADD231PS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUBADD231PS(ops ...operand.Op) { ctx.VFMSUBADD231PS(ops...) }
|
|
|
|
// VFMSUBADD231PS_BCST: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD231PS.BCST m32 xmm k xmm
|
|
// VFMSUBADD231PS.BCST m32 xmm xmm
|
|
// VFMSUBADD231PS.BCST m32 ymm k ymm
|
|
// VFMSUBADD231PS.BCST m32 ymm ymm
|
|
// VFMSUBADD231PS.BCST m32 zmm k zmm
|
|
// VFMSUBADD231PS.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VFMSUBADD231PS.BCST instruction to the active function.
|
|
func (c *Context) VFMSUBADD231PS_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUBADD231PS_BCST(ops...))
|
|
}
|
|
|
|
// VFMSUBADD231PS_BCST: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD231PS.BCST m32 xmm k xmm
|
|
// VFMSUBADD231PS.BCST m32 xmm xmm
|
|
// VFMSUBADD231PS.BCST m32 ymm k ymm
|
|
// VFMSUBADD231PS.BCST m32 ymm ymm
|
|
// VFMSUBADD231PS.BCST m32 zmm k zmm
|
|
// VFMSUBADD231PS.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VFMSUBADD231PS.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUBADD231PS_BCST(ops ...operand.Op) { ctx.VFMSUBADD231PS_BCST(ops...) }
|
|
|
|
// VFMSUBADD231PS_BCST_Z: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD231PS.BCST.Z m32 xmm k xmm
|
|
// VFMSUBADD231PS.BCST.Z m32 ymm k ymm
|
|
// VFMSUBADD231PS.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUBADD231PS.BCST.Z instruction to the active function.
|
|
func (c *Context) VFMSUBADD231PS_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VFMSUBADD231PS_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VFMSUBADD231PS_BCST_Z: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD231PS.BCST.Z m32 xmm k xmm
|
|
// VFMSUBADD231PS.BCST.Z m32 ymm k ymm
|
|
// VFMSUBADD231PS.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUBADD231PS.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUBADD231PS_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VFMSUBADD231PS_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VFMSUBADD231PS_RD_SAE: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD231PS.RD_SAE zmm zmm k zmm
|
|
// VFMSUBADD231PS.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUBADD231PS.RD_SAE instruction to the active function.
|
|
func (c *Context) VFMSUBADD231PS_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUBADD231PS_RD_SAE(ops...))
|
|
}
|
|
|
|
// VFMSUBADD231PS_RD_SAE: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD231PS.RD_SAE zmm zmm k zmm
|
|
// VFMSUBADD231PS.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUBADD231PS.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUBADD231PS_RD_SAE(ops ...operand.Op) { ctx.VFMSUBADD231PS_RD_SAE(ops...) }
|
|
|
|
// VFMSUBADD231PS_RD_SAE_Z: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD231PS.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUBADD231PS.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMSUBADD231PS_RD_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMSUBADD231PS_RD_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMSUBADD231PS_RD_SAE_Z: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD231PS.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUBADD231PS.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUBADD231PS_RD_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMSUBADD231PS_RD_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMSUBADD231PS_RN_SAE: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD231PS.RN_SAE zmm zmm k zmm
|
|
// VFMSUBADD231PS.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUBADD231PS.RN_SAE instruction to the active function.
|
|
func (c *Context) VFMSUBADD231PS_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUBADD231PS_RN_SAE(ops...))
|
|
}
|
|
|
|
// VFMSUBADD231PS_RN_SAE: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD231PS.RN_SAE zmm zmm k zmm
|
|
// VFMSUBADD231PS.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUBADD231PS.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUBADD231PS_RN_SAE(ops ...operand.Op) { ctx.VFMSUBADD231PS_RN_SAE(ops...) }
|
|
|
|
// VFMSUBADD231PS_RN_SAE_Z: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD231PS.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUBADD231PS.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMSUBADD231PS_RN_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMSUBADD231PS_RN_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMSUBADD231PS_RN_SAE_Z: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD231PS.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUBADD231PS.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUBADD231PS_RN_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMSUBADD231PS_RN_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMSUBADD231PS_RU_SAE: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD231PS.RU_SAE zmm zmm k zmm
|
|
// VFMSUBADD231PS.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUBADD231PS.RU_SAE instruction to the active function.
|
|
func (c *Context) VFMSUBADD231PS_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUBADD231PS_RU_SAE(ops...))
|
|
}
|
|
|
|
// VFMSUBADD231PS_RU_SAE: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD231PS.RU_SAE zmm zmm k zmm
|
|
// VFMSUBADD231PS.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUBADD231PS.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUBADD231PS_RU_SAE(ops ...operand.Op) { ctx.VFMSUBADD231PS_RU_SAE(ops...) }
|
|
|
|
// VFMSUBADD231PS_RU_SAE_Z: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD231PS.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUBADD231PS.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMSUBADD231PS_RU_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMSUBADD231PS_RU_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMSUBADD231PS_RU_SAE_Z: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD231PS.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUBADD231PS.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUBADD231PS_RU_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMSUBADD231PS_RU_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMSUBADD231PS_RZ_SAE: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD231PS.RZ_SAE zmm zmm k zmm
|
|
// VFMSUBADD231PS.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUBADD231PS.RZ_SAE instruction to the active function.
|
|
func (c *Context) VFMSUBADD231PS_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFMSUBADD231PS_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VFMSUBADD231PS_RZ_SAE: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD231PS.RZ_SAE zmm zmm k zmm
|
|
// VFMSUBADD231PS.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFMSUBADD231PS.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUBADD231PS_RZ_SAE(ops ...operand.Op) { ctx.VFMSUBADD231PS_RZ_SAE(ops...) }
|
|
|
|
// VFMSUBADD231PS_RZ_SAE_Z: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD231PS.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUBADD231PS.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VFMSUBADD231PS_RZ_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFMSUBADD231PS_RZ_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFMSUBADD231PS_RZ_SAE_Z: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD231PS.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUBADD231PS.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUBADD231PS_RZ_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFMSUBADD231PS_RZ_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFMSUBADD231PS_Z: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD231PS.Z m128 xmm k xmm
|
|
// VFMSUBADD231PS.Z m256 ymm k ymm
|
|
// VFMSUBADD231PS.Z xmm xmm k xmm
|
|
// VFMSUBADD231PS.Z ymm ymm k ymm
|
|
// VFMSUBADD231PS.Z m512 zmm k zmm
|
|
// VFMSUBADD231PS.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUBADD231PS.Z instruction to the active function.
|
|
func (c *Context) VFMSUBADD231PS_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VFMSUBADD231PS_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VFMSUBADD231PS_Z: Fused Multiply-Alternating Subtract/Add of Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFMSUBADD231PS.Z m128 xmm k xmm
|
|
// VFMSUBADD231PS.Z m256 ymm k ymm
|
|
// VFMSUBADD231PS.Z xmm xmm k xmm
|
|
// VFMSUBADD231PS.Z ymm ymm k ymm
|
|
// VFMSUBADD231PS.Z m512 zmm k zmm
|
|
// VFMSUBADD231PS.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFMSUBADD231PS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFMSUBADD231PS_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VFMSUBADD231PS_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VFNMADD132PD: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132PD m128 xmm xmm
|
|
// VFNMADD132PD m256 ymm ymm
|
|
// VFNMADD132PD xmm xmm xmm
|
|
// VFNMADD132PD ymm ymm ymm
|
|
// VFNMADD132PD m128 xmm k xmm
|
|
// VFNMADD132PD m256 ymm k ymm
|
|
// VFNMADD132PD xmm xmm k xmm
|
|
// VFNMADD132PD ymm ymm k ymm
|
|
// VFNMADD132PD m512 zmm k zmm
|
|
// VFNMADD132PD m512 zmm zmm
|
|
// VFNMADD132PD zmm zmm k zmm
|
|
// VFNMADD132PD zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMADD132PD instruction to the active function.
|
|
func (c *Context) VFNMADD132PD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMADD132PD(ops...))
|
|
}
|
|
|
|
// VFNMADD132PD: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132PD m128 xmm xmm
|
|
// VFNMADD132PD m256 ymm ymm
|
|
// VFNMADD132PD xmm xmm xmm
|
|
// VFNMADD132PD ymm ymm ymm
|
|
// VFNMADD132PD m128 xmm k xmm
|
|
// VFNMADD132PD m256 ymm k ymm
|
|
// VFNMADD132PD xmm xmm k xmm
|
|
// VFNMADD132PD ymm ymm k ymm
|
|
// VFNMADD132PD m512 zmm k zmm
|
|
// VFNMADD132PD m512 zmm zmm
|
|
// VFNMADD132PD zmm zmm k zmm
|
|
// VFNMADD132PD zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMADD132PD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD132PD(ops ...operand.Op) { ctx.VFNMADD132PD(ops...) }
|
|
|
|
// VFNMADD132PD_BCST: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132PD.BCST m64 xmm k xmm
|
|
// VFNMADD132PD.BCST m64 xmm xmm
|
|
// VFNMADD132PD.BCST m64 ymm k ymm
|
|
// VFNMADD132PD.BCST m64 ymm ymm
|
|
// VFNMADD132PD.BCST m64 zmm k zmm
|
|
// VFNMADD132PD.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VFNMADD132PD.BCST instruction to the active function.
|
|
func (c *Context) VFNMADD132PD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMADD132PD_BCST(ops...))
|
|
}
|
|
|
|
// VFNMADD132PD_BCST: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132PD.BCST m64 xmm k xmm
|
|
// VFNMADD132PD.BCST m64 xmm xmm
|
|
// VFNMADD132PD.BCST m64 ymm k ymm
|
|
// VFNMADD132PD.BCST m64 ymm ymm
|
|
// VFNMADD132PD.BCST m64 zmm k zmm
|
|
// VFNMADD132PD.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VFNMADD132PD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD132PD_BCST(ops ...operand.Op) { ctx.VFNMADD132PD_BCST(ops...) }
|
|
|
|
// VFNMADD132PD_BCST_Z: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132PD.BCST.Z m64 xmm k xmm
|
|
// VFNMADD132PD.BCST.Z m64 ymm k ymm
|
|
// VFNMADD132PD.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VFNMADD132PD.BCST.Z instruction to the active function.
|
|
func (c *Context) VFNMADD132PD_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VFNMADD132PD_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VFNMADD132PD_BCST_Z: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132PD.BCST.Z m64 xmm k xmm
|
|
// VFNMADD132PD.BCST.Z m64 ymm k ymm
|
|
// VFNMADD132PD.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VFNMADD132PD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD132PD_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VFNMADD132PD_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VFNMADD132PD_RD_SAE: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132PD.RD_SAE zmm zmm k zmm
|
|
// VFNMADD132PD.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMADD132PD.RD_SAE instruction to the active function.
|
|
func (c *Context) VFNMADD132PD_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMADD132PD_RD_SAE(ops...))
|
|
}
|
|
|
|
// VFNMADD132PD_RD_SAE: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132PD.RD_SAE zmm zmm k zmm
|
|
// VFNMADD132PD.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMADD132PD.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD132PD_RD_SAE(ops ...operand.Op) { ctx.VFNMADD132PD_RD_SAE(ops...) }
|
|
|
|
// VFNMADD132PD_RD_SAE_Z: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132PD.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMADD132PD.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMADD132PD_RD_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFNMADD132PD_RD_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFNMADD132PD_RD_SAE_Z: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132PD.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMADD132PD.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD132PD_RD_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFNMADD132PD_RD_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFNMADD132PD_RN_SAE: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132PD.RN_SAE zmm zmm k zmm
|
|
// VFNMADD132PD.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMADD132PD.RN_SAE instruction to the active function.
|
|
func (c *Context) VFNMADD132PD_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMADD132PD_RN_SAE(ops...))
|
|
}
|
|
|
|
// VFNMADD132PD_RN_SAE: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132PD.RN_SAE zmm zmm k zmm
|
|
// VFNMADD132PD.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMADD132PD.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD132PD_RN_SAE(ops ...operand.Op) { ctx.VFNMADD132PD_RN_SAE(ops...) }
|
|
|
|
// VFNMADD132PD_RN_SAE_Z: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132PD.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMADD132PD.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMADD132PD_RN_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFNMADD132PD_RN_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFNMADD132PD_RN_SAE_Z: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132PD.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMADD132PD.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD132PD_RN_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFNMADD132PD_RN_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFNMADD132PD_RU_SAE: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132PD.RU_SAE zmm zmm k zmm
|
|
// VFNMADD132PD.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMADD132PD.RU_SAE instruction to the active function.
|
|
func (c *Context) VFNMADD132PD_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMADD132PD_RU_SAE(ops...))
|
|
}
|
|
|
|
// VFNMADD132PD_RU_SAE: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132PD.RU_SAE zmm zmm k zmm
|
|
// VFNMADD132PD.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMADD132PD.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD132PD_RU_SAE(ops ...operand.Op) { ctx.VFNMADD132PD_RU_SAE(ops...) }
|
|
|
|
// VFNMADD132PD_RU_SAE_Z: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132PD.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMADD132PD.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMADD132PD_RU_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFNMADD132PD_RU_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFNMADD132PD_RU_SAE_Z: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132PD.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMADD132PD.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD132PD_RU_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFNMADD132PD_RU_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFNMADD132PD_RZ_SAE: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132PD.RZ_SAE zmm zmm k zmm
|
|
// VFNMADD132PD.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMADD132PD.RZ_SAE instruction to the active function.
|
|
func (c *Context) VFNMADD132PD_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMADD132PD_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VFNMADD132PD_RZ_SAE: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132PD.RZ_SAE zmm zmm k zmm
|
|
// VFNMADD132PD.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMADD132PD.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD132PD_RZ_SAE(ops ...operand.Op) { ctx.VFNMADD132PD_RZ_SAE(ops...) }
|
|
|
|
// VFNMADD132PD_RZ_SAE_Z: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132PD.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMADD132PD.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMADD132PD_RZ_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFNMADD132PD_RZ_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFNMADD132PD_RZ_SAE_Z: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132PD.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMADD132PD.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD132PD_RZ_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFNMADD132PD_RZ_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFNMADD132PD_Z: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132PD.Z m128 xmm k xmm
|
|
// VFNMADD132PD.Z m256 ymm k ymm
|
|
// VFNMADD132PD.Z xmm xmm k xmm
|
|
// VFNMADD132PD.Z ymm ymm k ymm
|
|
// VFNMADD132PD.Z m512 zmm k zmm
|
|
// VFNMADD132PD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMADD132PD.Z instruction to the active function.
|
|
func (c *Context) VFNMADD132PD_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VFNMADD132PD_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VFNMADD132PD_Z: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132PD.Z m128 xmm k xmm
|
|
// VFNMADD132PD.Z m256 ymm k ymm
|
|
// VFNMADD132PD.Z xmm xmm k xmm
|
|
// VFNMADD132PD.Z ymm ymm k ymm
|
|
// VFNMADD132PD.Z m512 zmm k zmm
|
|
// VFNMADD132PD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMADD132PD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD132PD_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VFNMADD132PD_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VFNMADD132PS: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132PS m128 xmm xmm
|
|
// VFNMADD132PS m256 ymm ymm
|
|
// VFNMADD132PS xmm xmm xmm
|
|
// VFNMADD132PS ymm ymm ymm
|
|
// VFNMADD132PS m128 xmm k xmm
|
|
// VFNMADD132PS m256 ymm k ymm
|
|
// VFNMADD132PS xmm xmm k xmm
|
|
// VFNMADD132PS ymm ymm k ymm
|
|
// VFNMADD132PS m512 zmm k zmm
|
|
// VFNMADD132PS m512 zmm zmm
|
|
// VFNMADD132PS zmm zmm k zmm
|
|
// VFNMADD132PS zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMADD132PS instruction to the active function.
|
|
func (c *Context) VFNMADD132PS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMADD132PS(ops...))
|
|
}
|
|
|
|
// VFNMADD132PS: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132PS m128 xmm xmm
|
|
// VFNMADD132PS m256 ymm ymm
|
|
// VFNMADD132PS xmm xmm xmm
|
|
// VFNMADD132PS ymm ymm ymm
|
|
// VFNMADD132PS m128 xmm k xmm
|
|
// VFNMADD132PS m256 ymm k ymm
|
|
// VFNMADD132PS xmm xmm k xmm
|
|
// VFNMADD132PS ymm ymm k ymm
|
|
// VFNMADD132PS m512 zmm k zmm
|
|
// VFNMADD132PS m512 zmm zmm
|
|
// VFNMADD132PS zmm zmm k zmm
|
|
// VFNMADD132PS zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMADD132PS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD132PS(ops ...operand.Op) { ctx.VFNMADD132PS(ops...) }
|
|
|
|
// VFNMADD132PS_BCST: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132PS.BCST m32 xmm k xmm
|
|
// VFNMADD132PS.BCST m32 xmm xmm
|
|
// VFNMADD132PS.BCST m32 ymm k ymm
|
|
// VFNMADD132PS.BCST m32 ymm ymm
|
|
// VFNMADD132PS.BCST m32 zmm k zmm
|
|
// VFNMADD132PS.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VFNMADD132PS.BCST instruction to the active function.
|
|
func (c *Context) VFNMADD132PS_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMADD132PS_BCST(ops...))
|
|
}
|
|
|
|
// VFNMADD132PS_BCST: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132PS.BCST m32 xmm k xmm
|
|
// VFNMADD132PS.BCST m32 xmm xmm
|
|
// VFNMADD132PS.BCST m32 ymm k ymm
|
|
// VFNMADD132PS.BCST m32 ymm ymm
|
|
// VFNMADD132PS.BCST m32 zmm k zmm
|
|
// VFNMADD132PS.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VFNMADD132PS.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD132PS_BCST(ops ...operand.Op) { ctx.VFNMADD132PS_BCST(ops...) }
|
|
|
|
// VFNMADD132PS_BCST_Z: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132PS.BCST.Z m32 xmm k xmm
|
|
// VFNMADD132PS.BCST.Z m32 ymm k ymm
|
|
// VFNMADD132PS.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VFNMADD132PS.BCST.Z instruction to the active function.
|
|
func (c *Context) VFNMADD132PS_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VFNMADD132PS_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VFNMADD132PS_BCST_Z: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132PS.BCST.Z m32 xmm k xmm
|
|
// VFNMADD132PS.BCST.Z m32 ymm k ymm
|
|
// VFNMADD132PS.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VFNMADD132PS.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD132PS_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VFNMADD132PS_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VFNMADD132PS_RD_SAE: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132PS.RD_SAE zmm zmm k zmm
|
|
// VFNMADD132PS.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMADD132PS.RD_SAE instruction to the active function.
|
|
func (c *Context) VFNMADD132PS_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMADD132PS_RD_SAE(ops...))
|
|
}
|
|
|
|
// VFNMADD132PS_RD_SAE: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132PS.RD_SAE zmm zmm k zmm
|
|
// VFNMADD132PS.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMADD132PS.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD132PS_RD_SAE(ops ...operand.Op) { ctx.VFNMADD132PS_RD_SAE(ops...) }
|
|
|
|
// VFNMADD132PS_RD_SAE_Z: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132PS.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMADD132PS.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMADD132PS_RD_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFNMADD132PS_RD_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFNMADD132PS_RD_SAE_Z: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132PS.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMADD132PS.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD132PS_RD_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFNMADD132PS_RD_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFNMADD132PS_RN_SAE: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132PS.RN_SAE zmm zmm k zmm
|
|
// VFNMADD132PS.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMADD132PS.RN_SAE instruction to the active function.
|
|
func (c *Context) VFNMADD132PS_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMADD132PS_RN_SAE(ops...))
|
|
}
|
|
|
|
// VFNMADD132PS_RN_SAE: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132PS.RN_SAE zmm zmm k zmm
|
|
// VFNMADD132PS.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMADD132PS.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD132PS_RN_SAE(ops ...operand.Op) { ctx.VFNMADD132PS_RN_SAE(ops...) }
|
|
|
|
// VFNMADD132PS_RN_SAE_Z: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132PS.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMADD132PS.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMADD132PS_RN_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFNMADD132PS_RN_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFNMADD132PS_RN_SAE_Z: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132PS.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMADD132PS.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD132PS_RN_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFNMADD132PS_RN_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFNMADD132PS_RU_SAE: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132PS.RU_SAE zmm zmm k zmm
|
|
// VFNMADD132PS.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMADD132PS.RU_SAE instruction to the active function.
|
|
func (c *Context) VFNMADD132PS_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMADD132PS_RU_SAE(ops...))
|
|
}
|
|
|
|
// VFNMADD132PS_RU_SAE: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132PS.RU_SAE zmm zmm k zmm
|
|
// VFNMADD132PS.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMADD132PS.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD132PS_RU_SAE(ops ...operand.Op) { ctx.VFNMADD132PS_RU_SAE(ops...) }
|
|
|
|
// VFNMADD132PS_RU_SAE_Z: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132PS.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMADD132PS.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMADD132PS_RU_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFNMADD132PS_RU_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFNMADD132PS_RU_SAE_Z: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132PS.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMADD132PS.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD132PS_RU_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFNMADD132PS_RU_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFNMADD132PS_RZ_SAE: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132PS.RZ_SAE zmm zmm k zmm
|
|
// VFNMADD132PS.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMADD132PS.RZ_SAE instruction to the active function.
|
|
func (c *Context) VFNMADD132PS_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMADD132PS_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VFNMADD132PS_RZ_SAE: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132PS.RZ_SAE zmm zmm k zmm
|
|
// VFNMADD132PS.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMADD132PS.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD132PS_RZ_SAE(ops ...operand.Op) { ctx.VFNMADD132PS_RZ_SAE(ops...) }
|
|
|
|
// VFNMADD132PS_RZ_SAE_Z: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132PS.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMADD132PS.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMADD132PS_RZ_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFNMADD132PS_RZ_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFNMADD132PS_RZ_SAE_Z: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132PS.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMADD132PS.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD132PS_RZ_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFNMADD132PS_RZ_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFNMADD132PS_Z: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132PS.Z m128 xmm k xmm
|
|
// VFNMADD132PS.Z m256 ymm k ymm
|
|
// VFNMADD132PS.Z xmm xmm k xmm
|
|
// VFNMADD132PS.Z ymm ymm k ymm
|
|
// VFNMADD132PS.Z m512 zmm k zmm
|
|
// VFNMADD132PS.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMADD132PS.Z instruction to the active function.
|
|
func (c *Context) VFNMADD132PS_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VFNMADD132PS_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VFNMADD132PS_Z: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132PS.Z m128 xmm k xmm
|
|
// VFNMADD132PS.Z m256 ymm k ymm
|
|
// VFNMADD132PS.Z xmm xmm k xmm
|
|
// VFNMADD132PS.Z ymm ymm k ymm
|
|
// VFNMADD132PS.Z m512 zmm k zmm
|
|
// VFNMADD132PS.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMADD132PS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD132PS_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VFNMADD132PS_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VFNMADD132SD: Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132SD m64 xmm xmm
|
|
// VFNMADD132SD xmm xmm xmm
|
|
// VFNMADD132SD m64 xmm k xmm
|
|
// VFNMADD132SD xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMADD132SD instruction to the active function.
|
|
func (c *Context) VFNMADD132SD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMADD132SD(ops...))
|
|
}
|
|
|
|
// VFNMADD132SD: Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132SD m64 xmm xmm
|
|
// VFNMADD132SD xmm xmm xmm
|
|
// VFNMADD132SD m64 xmm k xmm
|
|
// VFNMADD132SD xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMADD132SD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD132SD(ops ...operand.Op) { ctx.VFNMADD132SD(ops...) }
|
|
|
|
// VFNMADD132SD_RD_SAE: Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132SD.RD_SAE xmm xmm k xmm
|
|
// VFNMADD132SD.RD_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMADD132SD.RD_SAE instruction to the active function.
|
|
func (c *Context) VFNMADD132SD_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMADD132SD_RD_SAE(ops...))
|
|
}
|
|
|
|
// VFNMADD132SD_RD_SAE: Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132SD.RD_SAE xmm xmm k xmm
|
|
// VFNMADD132SD.RD_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMADD132SD.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD132SD_RD_SAE(ops ...operand.Op) { ctx.VFNMADD132SD_RD_SAE(ops...) }
|
|
|
|
// VFNMADD132SD_RD_SAE_Z: Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132SD.RD_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMADD132SD.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMADD132SD_RD_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFNMADD132SD_RD_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFNMADD132SD_RD_SAE_Z: Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132SD.RD_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMADD132SD.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD132SD_RD_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFNMADD132SD_RD_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFNMADD132SD_RN_SAE: Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132SD.RN_SAE xmm xmm k xmm
|
|
// VFNMADD132SD.RN_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMADD132SD.RN_SAE instruction to the active function.
|
|
func (c *Context) VFNMADD132SD_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMADD132SD_RN_SAE(ops...))
|
|
}
|
|
|
|
// VFNMADD132SD_RN_SAE: Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132SD.RN_SAE xmm xmm k xmm
|
|
// VFNMADD132SD.RN_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMADD132SD.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD132SD_RN_SAE(ops ...operand.Op) { ctx.VFNMADD132SD_RN_SAE(ops...) }
|
|
|
|
// VFNMADD132SD_RN_SAE_Z: Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132SD.RN_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMADD132SD.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMADD132SD_RN_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFNMADD132SD_RN_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFNMADD132SD_RN_SAE_Z: Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132SD.RN_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMADD132SD.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD132SD_RN_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFNMADD132SD_RN_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFNMADD132SD_RU_SAE: Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132SD.RU_SAE xmm xmm k xmm
|
|
// VFNMADD132SD.RU_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMADD132SD.RU_SAE instruction to the active function.
|
|
func (c *Context) VFNMADD132SD_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMADD132SD_RU_SAE(ops...))
|
|
}
|
|
|
|
// VFNMADD132SD_RU_SAE: Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132SD.RU_SAE xmm xmm k xmm
|
|
// VFNMADD132SD.RU_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMADD132SD.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD132SD_RU_SAE(ops ...operand.Op) { ctx.VFNMADD132SD_RU_SAE(ops...) }
|
|
|
|
// VFNMADD132SD_RU_SAE_Z: Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132SD.RU_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMADD132SD.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMADD132SD_RU_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFNMADD132SD_RU_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFNMADD132SD_RU_SAE_Z: Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132SD.RU_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMADD132SD.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD132SD_RU_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFNMADD132SD_RU_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFNMADD132SD_RZ_SAE: Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132SD.RZ_SAE xmm xmm k xmm
|
|
// VFNMADD132SD.RZ_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMADD132SD.RZ_SAE instruction to the active function.
|
|
func (c *Context) VFNMADD132SD_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMADD132SD_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VFNMADD132SD_RZ_SAE: Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132SD.RZ_SAE xmm xmm k xmm
|
|
// VFNMADD132SD.RZ_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMADD132SD.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD132SD_RZ_SAE(ops ...operand.Op) { ctx.VFNMADD132SD_RZ_SAE(ops...) }
|
|
|
|
// VFNMADD132SD_RZ_SAE_Z: Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132SD.RZ_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMADD132SD.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMADD132SD_RZ_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFNMADD132SD_RZ_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFNMADD132SD_RZ_SAE_Z: Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132SD.RZ_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMADD132SD.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD132SD_RZ_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFNMADD132SD_RZ_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFNMADD132SD_Z: Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132SD.Z m64 xmm k xmm
|
|
// VFNMADD132SD.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMADD132SD.Z instruction to the active function.
|
|
func (c *Context) VFNMADD132SD_Z(mx, x, k, x1 operand.Op) {
|
|
c.addinstruction(x86.VFNMADD132SD_Z(mx, x, k, x1))
|
|
}
|
|
|
|
// VFNMADD132SD_Z: Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132SD.Z m64 xmm k xmm
|
|
// VFNMADD132SD.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMADD132SD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD132SD_Z(mx, x, k, x1 operand.Op) { ctx.VFNMADD132SD_Z(mx, x, k, x1) }
|
|
|
|
// VFNMADD132SS: Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132SS m32 xmm xmm
|
|
// VFNMADD132SS xmm xmm xmm
|
|
// VFNMADD132SS m32 xmm k xmm
|
|
// VFNMADD132SS xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMADD132SS instruction to the active function.
|
|
func (c *Context) VFNMADD132SS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMADD132SS(ops...))
|
|
}
|
|
|
|
// VFNMADD132SS: Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132SS m32 xmm xmm
|
|
// VFNMADD132SS xmm xmm xmm
|
|
// VFNMADD132SS m32 xmm k xmm
|
|
// VFNMADD132SS xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMADD132SS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD132SS(ops ...operand.Op) { ctx.VFNMADD132SS(ops...) }
|
|
|
|
// VFNMADD132SS_RD_SAE: Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132SS.RD_SAE xmm xmm k xmm
|
|
// VFNMADD132SS.RD_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMADD132SS.RD_SAE instruction to the active function.
|
|
func (c *Context) VFNMADD132SS_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMADD132SS_RD_SAE(ops...))
|
|
}
|
|
|
|
// VFNMADD132SS_RD_SAE: Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132SS.RD_SAE xmm xmm k xmm
|
|
// VFNMADD132SS.RD_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMADD132SS.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD132SS_RD_SAE(ops ...operand.Op) { ctx.VFNMADD132SS_RD_SAE(ops...) }
|
|
|
|
// VFNMADD132SS_RD_SAE_Z: Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132SS.RD_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMADD132SS.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMADD132SS_RD_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFNMADD132SS_RD_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFNMADD132SS_RD_SAE_Z: Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132SS.RD_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMADD132SS.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD132SS_RD_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFNMADD132SS_RD_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFNMADD132SS_RN_SAE: Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132SS.RN_SAE xmm xmm k xmm
|
|
// VFNMADD132SS.RN_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMADD132SS.RN_SAE instruction to the active function.
|
|
func (c *Context) VFNMADD132SS_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMADD132SS_RN_SAE(ops...))
|
|
}
|
|
|
|
// VFNMADD132SS_RN_SAE: Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132SS.RN_SAE xmm xmm k xmm
|
|
// VFNMADD132SS.RN_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMADD132SS.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD132SS_RN_SAE(ops ...operand.Op) { ctx.VFNMADD132SS_RN_SAE(ops...) }
|
|
|
|
// VFNMADD132SS_RN_SAE_Z: Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132SS.RN_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMADD132SS.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMADD132SS_RN_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFNMADD132SS_RN_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFNMADD132SS_RN_SAE_Z: Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132SS.RN_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMADD132SS.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD132SS_RN_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFNMADD132SS_RN_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFNMADD132SS_RU_SAE: Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132SS.RU_SAE xmm xmm k xmm
|
|
// VFNMADD132SS.RU_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMADD132SS.RU_SAE instruction to the active function.
|
|
func (c *Context) VFNMADD132SS_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMADD132SS_RU_SAE(ops...))
|
|
}
|
|
|
|
// VFNMADD132SS_RU_SAE: Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132SS.RU_SAE xmm xmm k xmm
|
|
// VFNMADD132SS.RU_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMADD132SS.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD132SS_RU_SAE(ops ...operand.Op) { ctx.VFNMADD132SS_RU_SAE(ops...) }
|
|
|
|
// VFNMADD132SS_RU_SAE_Z: Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132SS.RU_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMADD132SS.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMADD132SS_RU_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFNMADD132SS_RU_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFNMADD132SS_RU_SAE_Z: Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132SS.RU_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMADD132SS.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD132SS_RU_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFNMADD132SS_RU_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFNMADD132SS_RZ_SAE: Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132SS.RZ_SAE xmm xmm k xmm
|
|
// VFNMADD132SS.RZ_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMADD132SS.RZ_SAE instruction to the active function.
|
|
func (c *Context) VFNMADD132SS_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMADD132SS_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VFNMADD132SS_RZ_SAE: Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132SS.RZ_SAE xmm xmm k xmm
|
|
// VFNMADD132SS.RZ_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMADD132SS.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD132SS_RZ_SAE(ops ...operand.Op) { ctx.VFNMADD132SS_RZ_SAE(ops...) }
|
|
|
|
// VFNMADD132SS_RZ_SAE_Z: Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132SS.RZ_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMADD132SS.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMADD132SS_RZ_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFNMADD132SS_RZ_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFNMADD132SS_RZ_SAE_Z: Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132SS.RZ_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMADD132SS.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD132SS_RZ_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFNMADD132SS_RZ_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFNMADD132SS_Z: Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132SS.Z m32 xmm k xmm
|
|
// VFNMADD132SS.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMADD132SS.Z instruction to the active function.
|
|
func (c *Context) VFNMADD132SS_Z(mx, x, k, x1 operand.Op) {
|
|
c.addinstruction(x86.VFNMADD132SS_Z(mx, x, k, x1))
|
|
}
|
|
|
|
// VFNMADD132SS_Z: Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD132SS.Z m32 xmm k xmm
|
|
// VFNMADD132SS.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMADD132SS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD132SS_Z(mx, x, k, x1 operand.Op) { ctx.VFNMADD132SS_Z(mx, x, k, x1) }
|
|
|
|
// VFNMADD213PD: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213PD m128 xmm xmm
|
|
// VFNMADD213PD m256 ymm ymm
|
|
// VFNMADD213PD xmm xmm xmm
|
|
// VFNMADD213PD ymm ymm ymm
|
|
// VFNMADD213PD m128 xmm k xmm
|
|
// VFNMADD213PD m256 ymm k ymm
|
|
// VFNMADD213PD xmm xmm k xmm
|
|
// VFNMADD213PD ymm ymm k ymm
|
|
// VFNMADD213PD m512 zmm k zmm
|
|
// VFNMADD213PD m512 zmm zmm
|
|
// VFNMADD213PD zmm zmm k zmm
|
|
// VFNMADD213PD zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMADD213PD instruction to the active function.
|
|
func (c *Context) VFNMADD213PD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMADD213PD(ops...))
|
|
}
|
|
|
|
// VFNMADD213PD: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213PD m128 xmm xmm
|
|
// VFNMADD213PD m256 ymm ymm
|
|
// VFNMADD213PD xmm xmm xmm
|
|
// VFNMADD213PD ymm ymm ymm
|
|
// VFNMADD213PD m128 xmm k xmm
|
|
// VFNMADD213PD m256 ymm k ymm
|
|
// VFNMADD213PD xmm xmm k xmm
|
|
// VFNMADD213PD ymm ymm k ymm
|
|
// VFNMADD213PD m512 zmm k zmm
|
|
// VFNMADD213PD m512 zmm zmm
|
|
// VFNMADD213PD zmm zmm k zmm
|
|
// VFNMADD213PD zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMADD213PD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD213PD(ops ...operand.Op) { ctx.VFNMADD213PD(ops...) }
|
|
|
|
// VFNMADD213PD_BCST: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213PD.BCST m64 xmm k xmm
|
|
// VFNMADD213PD.BCST m64 xmm xmm
|
|
// VFNMADD213PD.BCST m64 ymm k ymm
|
|
// VFNMADD213PD.BCST m64 ymm ymm
|
|
// VFNMADD213PD.BCST m64 zmm k zmm
|
|
// VFNMADD213PD.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VFNMADD213PD.BCST instruction to the active function.
|
|
func (c *Context) VFNMADD213PD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMADD213PD_BCST(ops...))
|
|
}
|
|
|
|
// VFNMADD213PD_BCST: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213PD.BCST m64 xmm k xmm
|
|
// VFNMADD213PD.BCST m64 xmm xmm
|
|
// VFNMADD213PD.BCST m64 ymm k ymm
|
|
// VFNMADD213PD.BCST m64 ymm ymm
|
|
// VFNMADD213PD.BCST m64 zmm k zmm
|
|
// VFNMADD213PD.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VFNMADD213PD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD213PD_BCST(ops ...operand.Op) { ctx.VFNMADD213PD_BCST(ops...) }
|
|
|
|
// VFNMADD213PD_BCST_Z: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213PD.BCST.Z m64 xmm k xmm
|
|
// VFNMADD213PD.BCST.Z m64 ymm k ymm
|
|
// VFNMADD213PD.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VFNMADD213PD.BCST.Z instruction to the active function.
|
|
func (c *Context) VFNMADD213PD_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VFNMADD213PD_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VFNMADD213PD_BCST_Z: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213PD.BCST.Z m64 xmm k xmm
|
|
// VFNMADD213PD.BCST.Z m64 ymm k ymm
|
|
// VFNMADD213PD.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VFNMADD213PD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD213PD_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VFNMADD213PD_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VFNMADD213PD_RD_SAE: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213PD.RD_SAE zmm zmm k zmm
|
|
// VFNMADD213PD.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMADD213PD.RD_SAE instruction to the active function.
|
|
func (c *Context) VFNMADD213PD_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMADD213PD_RD_SAE(ops...))
|
|
}
|
|
|
|
// VFNMADD213PD_RD_SAE: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213PD.RD_SAE zmm zmm k zmm
|
|
// VFNMADD213PD.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMADD213PD.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD213PD_RD_SAE(ops ...operand.Op) { ctx.VFNMADD213PD_RD_SAE(ops...) }
|
|
|
|
// VFNMADD213PD_RD_SAE_Z: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213PD.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMADD213PD.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMADD213PD_RD_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFNMADD213PD_RD_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFNMADD213PD_RD_SAE_Z: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213PD.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMADD213PD.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD213PD_RD_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFNMADD213PD_RD_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFNMADD213PD_RN_SAE: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213PD.RN_SAE zmm zmm k zmm
|
|
// VFNMADD213PD.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMADD213PD.RN_SAE instruction to the active function.
|
|
func (c *Context) VFNMADD213PD_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMADD213PD_RN_SAE(ops...))
|
|
}
|
|
|
|
// VFNMADD213PD_RN_SAE: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213PD.RN_SAE zmm zmm k zmm
|
|
// VFNMADD213PD.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMADD213PD.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD213PD_RN_SAE(ops ...operand.Op) { ctx.VFNMADD213PD_RN_SAE(ops...) }
|
|
|
|
// VFNMADD213PD_RN_SAE_Z: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213PD.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMADD213PD.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMADD213PD_RN_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFNMADD213PD_RN_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFNMADD213PD_RN_SAE_Z: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213PD.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMADD213PD.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD213PD_RN_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFNMADD213PD_RN_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFNMADD213PD_RU_SAE: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213PD.RU_SAE zmm zmm k zmm
|
|
// VFNMADD213PD.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMADD213PD.RU_SAE instruction to the active function.
|
|
func (c *Context) VFNMADD213PD_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMADD213PD_RU_SAE(ops...))
|
|
}
|
|
|
|
// VFNMADD213PD_RU_SAE: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213PD.RU_SAE zmm zmm k zmm
|
|
// VFNMADD213PD.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMADD213PD.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD213PD_RU_SAE(ops ...operand.Op) { ctx.VFNMADD213PD_RU_SAE(ops...) }
|
|
|
|
// VFNMADD213PD_RU_SAE_Z: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213PD.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMADD213PD.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMADD213PD_RU_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFNMADD213PD_RU_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFNMADD213PD_RU_SAE_Z: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213PD.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMADD213PD.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD213PD_RU_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFNMADD213PD_RU_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFNMADD213PD_RZ_SAE: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213PD.RZ_SAE zmm zmm k zmm
|
|
// VFNMADD213PD.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMADD213PD.RZ_SAE instruction to the active function.
|
|
func (c *Context) VFNMADD213PD_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMADD213PD_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VFNMADD213PD_RZ_SAE: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213PD.RZ_SAE zmm zmm k zmm
|
|
// VFNMADD213PD.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMADD213PD.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD213PD_RZ_SAE(ops ...operand.Op) { ctx.VFNMADD213PD_RZ_SAE(ops...) }
|
|
|
|
// VFNMADD213PD_RZ_SAE_Z: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213PD.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMADD213PD.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMADD213PD_RZ_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFNMADD213PD_RZ_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFNMADD213PD_RZ_SAE_Z: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213PD.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMADD213PD.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD213PD_RZ_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFNMADD213PD_RZ_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFNMADD213PD_Z: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213PD.Z m128 xmm k xmm
|
|
// VFNMADD213PD.Z m256 ymm k ymm
|
|
// VFNMADD213PD.Z xmm xmm k xmm
|
|
// VFNMADD213PD.Z ymm ymm k ymm
|
|
// VFNMADD213PD.Z m512 zmm k zmm
|
|
// VFNMADD213PD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMADD213PD.Z instruction to the active function.
|
|
func (c *Context) VFNMADD213PD_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VFNMADD213PD_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VFNMADD213PD_Z: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213PD.Z m128 xmm k xmm
|
|
// VFNMADD213PD.Z m256 ymm k ymm
|
|
// VFNMADD213PD.Z xmm xmm k xmm
|
|
// VFNMADD213PD.Z ymm ymm k ymm
|
|
// VFNMADD213PD.Z m512 zmm k zmm
|
|
// VFNMADD213PD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMADD213PD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD213PD_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VFNMADD213PD_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VFNMADD213PS: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213PS m128 xmm xmm
|
|
// VFNMADD213PS m256 ymm ymm
|
|
// VFNMADD213PS xmm xmm xmm
|
|
// VFNMADD213PS ymm ymm ymm
|
|
// VFNMADD213PS m128 xmm k xmm
|
|
// VFNMADD213PS m256 ymm k ymm
|
|
// VFNMADD213PS xmm xmm k xmm
|
|
// VFNMADD213PS ymm ymm k ymm
|
|
// VFNMADD213PS m512 zmm k zmm
|
|
// VFNMADD213PS m512 zmm zmm
|
|
// VFNMADD213PS zmm zmm k zmm
|
|
// VFNMADD213PS zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMADD213PS instruction to the active function.
|
|
func (c *Context) VFNMADD213PS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMADD213PS(ops...))
|
|
}
|
|
|
|
// VFNMADD213PS: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213PS m128 xmm xmm
|
|
// VFNMADD213PS m256 ymm ymm
|
|
// VFNMADD213PS xmm xmm xmm
|
|
// VFNMADD213PS ymm ymm ymm
|
|
// VFNMADD213PS m128 xmm k xmm
|
|
// VFNMADD213PS m256 ymm k ymm
|
|
// VFNMADD213PS xmm xmm k xmm
|
|
// VFNMADD213PS ymm ymm k ymm
|
|
// VFNMADD213PS m512 zmm k zmm
|
|
// VFNMADD213PS m512 zmm zmm
|
|
// VFNMADD213PS zmm zmm k zmm
|
|
// VFNMADD213PS zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMADD213PS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD213PS(ops ...operand.Op) { ctx.VFNMADD213PS(ops...) }
|
|
|
|
// VFNMADD213PS_BCST: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213PS.BCST m32 xmm k xmm
|
|
// VFNMADD213PS.BCST m32 xmm xmm
|
|
// VFNMADD213PS.BCST m32 ymm k ymm
|
|
// VFNMADD213PS.BCST m32 ymm ymm
|
|
// VFNMADD213PS.BCST m32 zmm k zmm
|
|
// VFNMADD213PS.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VFNMADD213PS.BCST instruction to the active function.
|
|
func (c *Context) VFNMADD213PS_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMADD213PS_BCST(ops...))
|
|
}
|
|
|
|
// VFNMADD213PS_BCST: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213PS.BCST m32 xmm k xmm
|
|
// VFNMADD213PS.BCST m32 xmm xmm
|
|
// VFNMADD213PS.BCST m32 ymm k ymm
|
|
// VFNMADD213PS.BCST m32 ymm ymm
|
|
// VFNMADD213PS.BCST m32 zmm k zmm
|
|
// VFNMADD213PS.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VFNMADD213PS.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD213PS_BCST(ops ...operand.Op) { ctx.VFNMADD213PS_BCST(ops...) }
|
|
|
|
// VFNMADD213PS_BCST_Z: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213PS.BCST.Z m32 xmm k xmm
|
|
// VFNMADD213PS.BCST.Z m32 ymm k ymm
|
|
// VFNMADD213PS.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VFNMADD213PS.BCST.Z instruction to the active function.
|
|
func (c *Context) VFNMADD213PS_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VFNMADD213PS_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VFNMADD213PS_BCST_Z: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213PS.BCST.Z m32 xmm k xmm
|
|
// VFNMADD213PS.BCST.Z m32 ymm k ymm
|
|
// VFNMADD213PS.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VFNMADD213PS.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD213PS_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VFNMADD213PS_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VFNMADD213PS_RD_SAE: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213PS.RD_SAE zmm zmm k zmm
|
|
// VFNMADD213PS.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMADD213PS.RD_SAE instruction to the active function.
|
|
func (c *Context) VFNMADD213PS_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMADD213PS_RD_SAE(ops...))
|
|
}
|
|
|
|
// VFNMADD213PS_RD_SAE: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213PS.RD_SAE zmm zmm k zmm
|
|
// VFNMADD213PS.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMADD213PS.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD213PS_RD_SAE(ops ...operand.Op) { ctx.VFNMADD213PS_RD_SAE(ops...) }
|
|
|
|
// VFNMADD213PS_RD_SAE_Z: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213PS.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMADD213PS.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMADD213PS_RD_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFNMADD213PS_RD_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFNMADD213PS_RD_SAE_Z: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213PS.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMADD213PS.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD213PS_RD_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFNMADD213PS_RD_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFNMADD213PS_RN_SAE: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213PS.RN_SAE zmm zmm k zmm
|
|
// VFNMADD213PS.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMADD213PS.RN_SAE instruction to the active function.
|
|
func (c *Context) VFNMADD213PS_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMADD213PS_RN_SAE(ops...))
|
|
}
|
|
|
|
// VFNMADD213PS_RN_SAE: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213PS.RN_SAE zmm zmm k zmm
|
|
// VFNMADD213PS.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMADD213PS.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD213PS_RN_SAE(ops ...operand.Op) { ctx.VFNMADD213PS_RN_SAE(ops...) }
|
|
|
|
// VFNMADD213PS_RN_SAE_Z: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213PS.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMADD213PS.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMADD213PS_RN_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFNMADD213PS_RN_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFNMADD213PS_RN_SAE_Z: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213PS.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMADD213PS.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD213PS_RN_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFNMADD213PS_RN_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFNMADD213PS_RU_SAE: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213PS.RU_SAE zmm zmm k zmm
|
|
// VFNMADD213PS.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMADD213PS.RU_SAE instruction to the active function.
|
|
func (c *Context) VFNMADD213PS_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMADD213PS_RU_SAE(ops...))
|
|
}
|
|
|
|
// VFNMADD213PS_RU_SAE: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213PS.RU_SAE zmm zmm k zmm
|
|
// VFNMADD213PS.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMADD213PS.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD213PS_RU_SAE(ops ...operand.Op) { ctx.VFNMADD213PS_RU_SAE(ops...) }
|
|
|
|
// VFNMADD213PS_RU_SAE_Z: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213PS.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMADD213PS.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMADD213PS_RU_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFNMADD213PS_RU_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFNMADD213PS_RU_SAE_Z: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213PS.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMADD213PS.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD213PS_RU_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFNMADD213PS_RU_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFNMADD213PS_RZ_SAE: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213PS.RZ_SAE zmm zmm k zmm
|
|
// VFNMADD213PS.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMADD213PS.RZ_SAE instruction to the active function.
|
|
func (c *Context) VFNMADD213PS_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMADD213PS_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VFNMADD213PS_RZ_SAE: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213PS.RZ_SAE zmm zmm k zmm
|
|
// VFNMADD213PS.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMADD213PS.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD213PS_RZ_SAE(ops ...operand.Op) { ctx.VFNMADD213PS_RZ_SAE(ops...) }
|
|
|
|
// VFNMADD213PS_RZ_SAE_Z: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213PS.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMADD213PS.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMADD213PS_RZ_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFNMADD213PS_RZ_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFNMADD213PS_RZ_SAE_Z: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213PS.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMADD213PS.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD213PS_RZ_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFNMADD213PS_RZ_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFNMADD213PS_Z: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213PS.Z m128 xmm k xmm
|
|
// VFNMADD213PS.Z m256 ymm k ymm
|
|
// VFNMADD213PS.Z xmm xmm k xmm
|
|
// VFNMADD213PS.Z ymm ymm k ymm
|
|
// VFNMADD213PS.Z m512 zmm k zmm
|
|
// VFNMADD213PS.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMADD213PS.Z instruction to the active function.
|
|
func (c *Context) VFNMADD213PS_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VFNMADD213PS_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VFNMADD213PS_Z: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213PS.Z m128 xmm k xmm
|
|
// VFNMADD213PS.Z m256 ymm k ymm
|
|
// VFNMADD213PS.Z xmm xmm k xmm
|
|
// VFNMADD213PS.Z ymm ymm k ymm
|
|
// VFNMADD213PS.Z m512 zmm k zmm
|
|
// VFNMADD213PS.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMADD213PS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD213PS_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VFNMADD213PS_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VFNMADD213SD: Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213SD m64 xmm xmm
|
|
// VFNMADD213SD xmm xmm xmm
|
|
// VFNMADD213SD m64 xmm k xmm
|
|
// VFNMADD213SD xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMADD213SD instruction to the active function.
|
|
func (c *Context) VFNMADD213SD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMADD213SD(ops...))
|
|
}
|
|
|
|
// VFNMADD213SD: Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213SD m64 xmm xmm
|
|
// VFNMADD213SD xmm xmm xmm
|
|
// VFNMADD213SD m64 xmm k xmm
|
|
// VFNMADD213SD xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMADD213SD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD213SD(ops ...operand.Op) { ctx.VFNMADD213SD(ops...) }
|
|
|
|
// VFNMADD213SD_RD_SAE: Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213SD.RD_SAE xmm xmm k xmm
|
|
// VFNMADD213SD.RD_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMADD213SD.RD_SAE instruction to the active function.
|
|
func (c *Context) VFNMADD213SD_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMADD213SD_RD_SAE(ops...))
|
|
}
|
|
|
|
// VFNMADD213SD_RD_SAE: Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213SD.RD_SAE xmm xmm k xmm
|
|
// VFNMADD213SD.RD_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMADD213SD.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD213SD_RD_SAE(ops ...operand.Op) { ctx.VFNMADD213SD_RD_SAE(ops...) }
|
|
|
|
// VFNMADD213SD_RD_SAE_Z: Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213SD.RD_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMADD213SD.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMADD213SD_RD_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFNMADD213SD_RD_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFNMADD213SD_RD_SAE_Z: Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213SD.RD_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMADD213SD.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD213SD_RD_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFNMADD213SD_RD_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFNMADD213SD_RN_SAE: Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213SD.RN_SAE xmm xmm k xmm
|
|
// VFNMADD213SD.RN_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMADD213SD.RN_SAE instruction to the active function.
|
|
func (c *Context) VFNMADD213SD_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMADD213SD_RN_SAE(ops...))
|
|
}
|
|
|
|
// VFNMADD213SD_RN_SAE: Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213SD.RN_SAE xmm xmm k xmm
|
|
// VFNMADD213SD.RN_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMADD213SD.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD213SD_RN_SAE(ops ...operand.Op) { ctx.VFNMADD213SD_RN_SAE(ops...) }
|
|
|
|
// VFNMADD213SD_RN_SAE_Z: Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213SD.RN_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMADD213SD.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMADD213SD_RN_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFNMADD213SD_RN_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFNMADD213SD_RN_SAE_Z: Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213SD.RN_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMADD213SD.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD213SD_RN_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFNMADD213SD_RN_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFNMADD213SD_RU_SAE: Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213SD.RU_SAE xmm xmm k xmm
|
|
// VFNMADD213SD.RU_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMADD213SD.RU_SAE instruction to the active function.
|
|
func (c *Context) VFNMADD213SD_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMADD213SD_RU_SAE(ops...))
|
|
}
|
|
|
|
// VFNMADD213SD_RU_SAE: Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213SD.RU_SAE xmm xmm k xmm
|
|
// VFNMADD213SD.RU_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMADD213SD.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD213SD_RU_SAE(ops ...operand.Op) { ctx.VFNMADD213SD_RU_SAE(ops...) }
|
|
|
|
// VFNMADD213SD_RU_SAE_Z: Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213SD.RU_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMADD213SD.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMADD213SD_RU_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFNMADD213SD_RU_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFNMADD213SD_RU_SAE_Z: Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213SD.RU_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMADD213SD.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD213SD_RU_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFNMADD213SD_RU_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFNMADD213SD_RZ_SAE: Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213SD.RZ_SAE xmm xmm k xmm
|
|
// VFNMADD213SD.RZ_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMADD213SD.RZ_SAE instruction to the active function.
|
|
func (c *Context) VFNMADD213SD_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMADD213SD_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VFNMADD213SD_RZ_SAE: Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213SD.RZ_SAE xmm xmm k xmm
|
|
// VFNMADD213SD.RZ_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMADD213SD.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD213SD_RZ_SAE(ops ...operand.Op) { ctx.VFNMADD213SD_RZ_SAE(ops...) }
|
|
|
|
// VFNMADD213SD_RZ_SAE_Z: Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213SD.RZ_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMADD213SD.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMADD213SD_RZ_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFNMADD213SD_RZ_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFNMADD213SD_RZ_SAE_Z: Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213SD.RZ_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMADD213SD.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD213SD_RZ_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFNMADD213SD_RZ_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFNMADD213SD_Z: Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213SD.Z m64 xmm k xmm
|
|
// VFNMADD213SD.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMADD213SD.Z instruction to the active function.
|
|
func (c *Context) VFNMADD213SD_Z(mx, x, k, x1 operand.Op) {
|
|
c.addinstruction(x86.VFNMADD213SD_Z(mx, x, k, x1))
|
|
}
|
|
|
|
// VFNMADD213SD_Z: Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213SD.Z m64 xmm k xmm
|
|
// VFNMADD213SD.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMADD213SD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD213SD_Z(mx, x, k, x1 operand.Op) { ctx.VFNMADD213SD_Z(mx, x, k, x1) }
|
|
|
|
// VFNMADD213SS: Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213SS m32 xmm xmm
|
|
// VFNMADD213SS xmm xmm xmm
|
|
// VFNMADD213SS m32 xmm k xmm
|
|
// VFNMADD213SS xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMADD213SS instruction to the active function.
|
|
func (c *Context) VFNMADD213SS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMADD213SS(ops...))
|
|
}
|
|
|
|
// VFNMADD213SS: Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213SS m32 xmm xmm
|
|
// VFNMADD213SS xmm xmm xmm
|
|
// VFNMADD213SS m32 xmm k xmm
|
|
// VFNMADD213SS xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMADD213SS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD213SS(ops ...operand.Op) { ctx.VFNMADD213SS(ops...) }
|
|
|
|
// VFNMADD213SS_RD_SAE: Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213SS.RD_SAE xmm xmm k xmm
|
|
// VFNMADD213SS.RD_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMADD213SS.RD_SAE instruction to the active function.
|
|
func (c *Context) VFNMADD213SS_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMADD213SS_RD_SAE(ops...))
|
|
}
|
|
|
|
// VFNMADD213SS_RD_SAE: Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213SS.RD_SAE xmm xmm k xmm
|
|
// VFNMADD213SS.RD_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMADD213SS.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD213SS_RD_SAE(ops ...operand.Op) { ctx.VFNMADD213SS_RD_SAE(ops...) }
|
|
|
|
// VFNMADD213SS_RD_SAE_Z: Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213SS.RD_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMADD213SS.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMADD213SS_RD_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFNMADD213SS_RD_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFNMADD213SS_RD_SAE_Z: Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213SS.RD_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMADD213SS.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD213SS_RD_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFNMADD213SS_RD_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFNMADD213SS_RN_SAE: Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213SS.RN_SAE xmm xmm k xmm
|
|
// VFNMADD213SS.RN_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMADD213SS.RN_SAE instruction to the active function.
|
|
func (c *Context) VFNMADD213SS_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMADD213SS_RN_SAE(ops...))
|
|
}
|
|
|
|
// VFNMADD213SS_RN_SAE: Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213SS.RN_SAE xmm xmm k xmm
|
|
// VFNMADD213SS.RN_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMADD213SS.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD213SS_RN_SAE(ops ...operand.Op) { ctx.VFNMADD213SS_RN_SAE(ops...) }
|
|
|
|
// VFNMADD213SS_RN_SAE_Z: Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213SS.RN_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMADD213SS.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMADD213SS_RN_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFNMADD213SS_RN_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFNMADD213SS_RN_SAE_Z: Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213SS.RN_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMADD213SS.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD213SS_RN_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFNMADD213SS_RN_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFNMADD213SS_RU_SAE: Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213SS.RU_SAE xmm xmm k xmm
|
|
// VFNMADD213SS.RU_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMADD213SS.RU_SAE instruction to the active function.
|
|
func (c *Context) VFNMADD213SS_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMADD213SS_RU_SAE(ops...))
|
|
}
|
|
|
|
// VFNMADD213SS_RU_SAE: Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213SS.RU_SAE xmm xmm k xmm
|
|
// VFNMADD213SS.RU_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMADD213SS.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD213SS_RU_SAE(ops ...operand.Op) { ctx.VFNMADD213SS_RU_SAE(ops...) }
|
|
|
|
// VFNMADD213SS_RU_SAE_Z: Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213SS.RU_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMADD213SS.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMADD213SS_RU_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFNMADD213SS_RU_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFNMADD213SS_RU_SAE_Z: Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213SS.RU_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMADD213SS.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD213SS_RU_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFNMADD213SS_RU_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFNMADD213SS_RZ_SAE: Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213SS.RZ_SAE xmm xmm k xmm
|
|
// VFNMADD213SS.RZ_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMADD213SS.RZ_SAE instruction to the active function.
|
|
func (c *Context) VFNMADD213SS_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMADD213SS_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VFNMADD213SS_RZ_SAE: Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213SS.RZ_SAE xmm xmm k xmm
|
|
// VFNMADD213SS.RZ_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMADD213SS.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD213SS_RZ_SAE(ops ...operand.Op) { ctx.VFNMADD213SS_RZ_SAE(ops...) }
|
|
|
|
// VFNMADD213SS_RZ_SAE_Z: Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213SS.RZ_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMADD213SS.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMADD213SS_RZ_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFNMADD213SS_RZ_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFNMADD213SS_RZ_SAE_Z: Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213SS.RZ_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMADD213SS.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD213SS_RZ_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFNMADD213SS_RZ_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFNMADD213SS_Z: Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213SS.Z m32 xmm k xmm
|
|
// VFNMADD213SS.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMADD213SS.Z instruction to the active function.
|
|
func (c *Context) VFNMADD213SS_Z(mx, x, k, x1 operand.Op) {
|
|
c.addinstruction(x86.VFNMADD213SS_Z(mx, x, k, x1))
|
|
}
|
|
|
|
// VFNMADD213SS_Z: Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD213SS.Z m32 xmm k xmm
|
|
// VFNMADD213SS.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMADD213SS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD213SS_Z(mx, x, k, x1 operand.Op) { ctx.VFNMADD213SS_Z(mx, x, k, x1) }
|
|
|
|
// VFNMADD231PD: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231PD m128 xmm xmm
|
|
// VFNMADD231PD m256 ymm ymm
|
|
// VFNMADD231PD xmm xmm xmm
|
|
// VFNMADD231PD ymm ymm ymm
|
|
// VFNMADD231PD m128 xmm k xmm
|
|
// VFNMADD231PD m256 ymm k ymm
|
|
// VFNMADD231PD xmm xmm k xmm
|
|
// VFNMADD231PD ymm ymm k ymm
|
|
// VFNMADD231PD m512 zmm k zmm
|
|
// VFNMADD231PD m512 zmm zmm
|
|
// VFNMADD231PD zmm zmm k zmm
|
|
// VFNMADD231PD zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMADD231PD instruction to the active function.
|
|
func (c *Context) VFNMADD231PD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMADD231PD(ops...))
|
|
}
|
|
|
|
// VFNMADD231PD: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231PD m128 xmm xmm
|
|
// VFNMADD231PD m256 ymm ymm
|
|
// VFNMADD231PD xmm xmm xmm
|
|
// VFNMADD231PD ymm ymm ymm
|
|
// VFNMADD231PD m128 xmm k xmm
|
|
// VFNMADD231PD m256 ymm k ymm
|
|
// VFNMADD231PD xmm xmm k xmm
|
|
// VFNMADD231PD ymm ymm k ymm
|
|
// VFNMADD231PD m512 zmm k zmm
|
|
// VFNMADD231PD m512 zmm zmm
|
|
// VFNMADD231PD zmm zmm k zmm
|
|
// VFNMADD231PD zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMADD231PD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD231PD(ops ...operand.Op) { ctx.VFNMADD231PD(ops...) }
|
|
|
|
// VFNMADD231PD_BCST: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231PD.BCST m64 xmm k xmm
|
|
// VFNMADD231PD.BCST m64 xmm xmm
|
|
// VFNMADD231PD.BCST m64 ymm k ymm
|
|
// VFNMADD231PD.BCST m64 ymm ymm
|
|
// VFNMADD231PD.BCST m64 zmm k zmm
|
|
// VFNMADD231PD.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VFNMADD231PD.BCST instruction to the active function.
|
|
func (c *Context) VFNMADD231PD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMADD231PD_BCST(ops...))
|
|
}
|
|
|
|
// VFNMADD231PD_BCST: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231PD.BCST m64 xmm k xmm
|
|
// VFNMADD231PD.BCST m64 xmm xmm
|
|
// VFNMADD231PD.BCST m64 ymm k ymm
|
|
// VFNMADD231PD.BCST m64 ymm ymm
|
|
// VFNMADD231PD.BCST m64 zmm k zmm
|
|
// VFNMADD231PD.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VFNMADD231PD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD231PD_BCST(ops ...operand.Op) { ctx.VFNMADD231PD_BCST(ops...) }
|
|
|
|
// VFNMADD231PD_BCST_Z: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231PD.BCST.Z m64 xmm k xmm
|
|
// VFNMADD231PD.BCST.Z m64 ymm k ymm
|
|
// VFNMADD231PD.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VFNMADD231PD.BCST.Z instruction to the active function.
|
|
func (c *Context) VFNMADD231PD_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VFNMADD231PD_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VFNMADD231PD_BCST_Z: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231PD.BCST.Z m64 xmm k xmm
|
|
// VFNMADD231PD.BCST.Z m64 ymm k ymm
|
|
// VFNMADD231PD.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VFNMADD231PD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD231PD_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VFNMADD231PD_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VFNMADD231PD_RD_SAE: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231PD.RD_SAE zmm zmm k zmm
|
|
// VFNMADD231PD.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMADD231PD.RD_SAE instruction to the active function.
|
|
func (c *Context) VFNMADD231PD_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMADD231PD_RD_SAE(ops...))
|
|
}
|
|
|
|
// VFNMADD231PD_RD_SAE: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231PD.RD_SAE zmm zmm k zmm
|
|
// VFNMADD231PD.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMADD231PD.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD231PD_RD_SAE(ops ...operand.Op) { ctx.VFNMADD231PD_RD_SAE(ops...) }
|
|
|
|
// VFNMADD231PD_RD_SAE_Z: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231PD.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMADD231PD.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMADD231PD_RD_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFNMADD231PD_RD_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFNMADD231PD_RD_SAE_Z: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231PD.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMADD231PD.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD231PD_RD_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFNMADD231PD_RD_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFNMADD231PD_RN_SAE: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231PD.RN_SAE zmm zmm k zmm
|
|
// VFNMADD231PD.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMADD231PD.RN_SAE instruction to the active function.
|
|
func (c *Context) VFNMADD231PD_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMADD231PD_RN_SAE(ops...))
|
|
}
|
|
|
|
// VFNMADD231PD_RN_SAE: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231PD.RN_SAE zmm zmm k zmm
|
|
// VFNMADD231PD.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMADD231PD.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD231PD_RN_SAE(ops ...operand.Op) { ctx.VFNMADD231PD_RN_SAE(ops...) }
|
|
|
|
// VFNMADD231PD_RN_SAE_Z: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231PD.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMADD231PD.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMADD231PD_RN_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFNMADD231PD_RN_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFNMADD231PD_RN_SAE_Z: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231PD.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMADD231PD.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD231PD_RN_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFNMADD231PD_RN_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFNMADD231PD_RU_SAE: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231PD.RU_SAE zmm zmm k zmm
|
|
// VFNMADD231PD.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMADD231PD.RU_SAE instruction to the active function.
|
|
func (c *Context) VFNMADD231PD_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMADD231PD_RU_SAE(ops...))
|
|
}
|
|
|
|
// VFNMADD231PD_RU_SAE: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231PD.RU_SAE zmm zmm k zmm
|
|
// VFNMADD231PD.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMADD231PD.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD231PD_RU_SAE(ops ...operand.Op) { ctx.VFNMADD231PD_RU_SAE(ops...) }
|
|
|
|
// VFNMADD231PD_RU_SAE_Z: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231PD.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMADD231PD.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMADD231PD_RU_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFNMADD231PD_RU_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFNMADD231PD_RU_SAE_Z: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231PD.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMADD231PD.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD231PD_RU_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFNMADD231PD_RU_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFNMADD231PD_RZ_SAE: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231PD.RZ_SAE zmm zmm k zmm
|
|
// VFNMADD231PD.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMADD231PD.RZ_SAE instruction to the active function.
|
|
func (c *Context) VFNMADD231PD_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMADD231PD_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VFNMADD231PD_RZ_SAE: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231PD.RZ_SAE zmm zmm k zmm
|
|
// VFNMADD231PD.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMADD231PD.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD231PD_RZ_SAE(ops ...operand.Op) { ctx.VFNMADD231PD_RZ_SAE(ops...) }
|
|
|
|
// VFNMADD231PD_RZ_SAE_Z: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231PD.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMADD231PD.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMADD231PD_RZ_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFNMADD231PD_RZ_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFNMADD231PD_RZ_SAE_Z: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231PD.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMADD231PD.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD231PD_RZ_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFNMADD231PD_RZ_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFNMADD231PD_Z: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231PD.Z m128 xmm k xmm
|
|
// VFNMADD231PD.Z m256 ymm k ymm
|
|
// VFNMADD231PD.Z xmm xmm k xmm
|
|
// VFNMADD231PD.Z ymm ymm k ymm
|
|
// VFNMADD231PD.Z m512 zmm k zmm
|
|
// VFNMADD231PD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMADD231PD.Z instruction to the active function.
|
|
func (c *Context) VFNMADD231PD_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VFNMADD231PD_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VFNMADD231PD_Z: Fused Negative Multiply-Add of Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231PD.Z m128 xmm k xmm
|
|
// VFNMADD231PD.Z m256 ymm k ymm
|
|
// VFNMADD231PD.Z xmm xmm k xmm
|
|
// VFNMADD231PD.Z ymm ymm k ymm
|
|
// VFNMADD231PD.Z m512 zmm k zmm
|
|
// VFNMADD231PD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMADD231PD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD231PD_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VFNMADD231PD_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VFNMADD231PS: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231PS m128 xmm xmm
|
|
// VFNMADD231PS m256 ymm ymm
|
|
// VFNMADD231PS xmm xmm xmm
|
|
// VFNMADD231PS ymm ymm ymm
|
|
// VFNMADD231PS m128 xmm k xmm
|
|
// VFNMADD231PS m256 ymm k ymm
|
|
// VFNMADD231PS xmm xmm k xmm
|
|
// VFNMADD231PS ymm ymm k ymm
|
|
// VFNMADD231PS m512 zmm k zmm
|
|
// VFNMADD231PS m512 zmm zmm
|
|
// VFNMADD231PS zmm zmm k zmm
|
|
// VFNMADD231PS zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMADD231PS instruction to the active function.
|
|
func (c *Context) VFNMADD231PS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMADD231PS(ops...))
|
|
}
|
|
|
|
// VFNMADD231PS: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231PS m128 xmm xmm
|
|
// VFNMADD231PS m256 ymm ymm
|
|
// VFNMADD231PS xmm xmm xmm
|
|
// VFNMADD231PS ymm ymm ymm
|
|
// VFNMADD231PS m128 xmm k xmm
|
|
// VFNMADD231PS m256 ymm k ymm
|
|
// VFNMADD231PS xmm xmm k xmm
|
|
// VFNMADD231PS ymm ymm k ymm
|
|
// VFNMADD231PS m512 zmm k zmm
|
|
// VFNMADD231PS m512 zmm zmm
|
|
// VFNMADD231PS zmm zmm k zmm
|
|
// VFNMADD231PS zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMADD231PS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD231PS(ops ...operand.Op) { ctx.VFNMADD231PS(ops...) }
|
|
|
|
// VFNMADD231PS_BCST: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231PS.BCST m32 xmm k xmm
|
|
// VFNMADD231PS.BCST m32 xmm xmm
|
|
// VFNMADD231PS.BCST m32 ymm k ymm
|
|
// VFNMADD231PS.BCST m32 ymm ymm
|
|
// VFNMADD231PS.BCST m32 zmm k zmm
|
|
// VFNMADD231PS.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VFNMADD231PS.BCST instruction to the active function.
|
|
func (c *Context) VFNMADD231PS_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMADD231PS_BCST(ops...))
|
|
}
|
|
|
|
// VFNMADD231PS_BCST: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231PS.BCST m32 xmm k xmm
|
|
// VFNMADD231PS.BCST m32 xmm xmm
|
|
// VFNMADD231PS.BCST m32 ymm k ymm
|
|
// VFNMADD231PS.BCST m32 ymm ymm
|
|
// VFNMADD231PS.BCST m32 zmm k zmm
|
|
// VFNMADD231PS.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VFNMADD231PS.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD231PS_BCST(ops ...operand.Op) { ctx.VFNMADD231PS_BCST(ops...) }
|
|
|
|
// VFNMADD231PS_BCST_Z: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231PS.BCST.Z m32 xmm k xmm
|
|
// VFNMADD231PS.BCST.Z m32 ymm k ymm
|
|
// VFNMADD231PS.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VFNMADD231PS.BCST.Z instruction to the active function.
|
|
func (c *Context) VFNMADD231PS_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VFNMADD231PS_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VFNMADD231PS_BCST_Z: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231PS.BCST.Z m32 xmm k xmm
|
|
// VFNMADD231PS.BCST.Z m32 ymm k ymm
|
|
// VFNMADD231PS.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VFNMADD231PS.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD231PS_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VFNMADD231PS_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VFNMADD231PS_RD_SAE: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231PS.RD_SAE zmm zmm k zmm
|
|
// VFNMADD231PS.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMADD231PS.RD_SAE instruction to the active function.
|
|
func (c *Context) VFNMADD231PS_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMADD231PS_RD_SAE(ops...))
|
|
}
|
|
|
|
// VFNMADD231PS_RD_SAE: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231PS.RD_SAE zmm zmm k zmm
|
|
// VFNMADD231PS.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMADD231PS.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD231PS_RD_SAE(ops ...operand.Op) { ctx.VFNMADD231PS_RD_SAE(ops...) }
|
|
|
|
// VFNMADD231PS_RD_SAE_Z: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231PS.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMADD231PS.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMADD231PS_RD_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFNMADD231PS_RD_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFNMADD231PS_RD_SAE_Z: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231PS.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMADD231PS.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD231PS_RD_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFNMADD231PS_RD_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFNMADD231PS_RN_SAE: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231PS.RN_SAE zmm zmm k zmm
|
|
// VFNMADD231PS.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMADD231PS.RN_SAE instruction to the active function.
|
|
func (c *Context) VFNMADD231PS_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMADD231PS_RN_SAE(ops...))
|
|
}
|
|
|
|
// VFNMADD231PS_RN_SAE: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231PS.RN_SAE zmm zmm k zmm
|
|
// VFNMADD231PS.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMADD231PS.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD231PS_RN_SAE(ops ...operand.Op) { ctx.VFNMADD231PS_RN_SAE(ops...) }
|
|
|
|
// VFNMADD231PS_RN_SAE_Z: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231PS.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMADD231PS.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMADD231PS_RN_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFNMADD231PS_RN_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFNMADD231PS_RN_SAE_Z: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231PS.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMADD231PS.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD231PS_RN_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFNMADD231PS_RN_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFNMADD231PS_RU_SAE: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231PS.RU_SAE zmm zmm k zmm
|
|
// VFNMADD231PS.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMADD231PS.RU_SAE instruction to the active function.
|
|
func (c *Context) VFNMADD231PS_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMADD231PS_RU_SAE(ops...))
|
|
}
|
|
|
|
// VFNMADD231PS_RU_SAE: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231PS.RU_SAE zmm zmm k zmm
|
|
// VFNMADD231PS.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMADD231PS.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD231PS_RU_SAE(ops ...operand.Op) { ctx.VFNMADD231PS_RU_SAE(ops...) }
|
|
|
|
// VFNMADD231PS_RU_SAE_Z: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231PS.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMADD231PS.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMADD231PS_RU_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFNMADD231PS_RU_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFNMADD231PS_RU_SAE_Z: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231PS.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMADD231PS.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD231PS_RU_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFNMADD231PS_RU_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFNMADD231PS_RZ_SAE: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231PS.RZ_SAE zmm zmm k zmm
|
|
// VFNMADD231PS.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMADD231PS.RZ_SAE instruction to the active function.
|
|
func (c *Context) VFNMADD231PS_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMADD231PS_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VFNMADD231PS_RZ_SAE: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231PS.RZ_SAE zmm zmm k zmm
|
|
// VFNMADD231PS.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMADD231PS.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD231PS_RZ_SAE(ops ...operand.Op) { ctx.VFNMADD231PS_RZ_SAE(ops...) }
|
|
|
|
// VFNMADD231PS_RZ_SAE_Z: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231PS.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMADD231PS.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMADD231PS_RZ_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFNMADD231PS_RZ_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFNMADD231PS_RZ_SAE_Z: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231PS.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMADD231PS.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD231PS_RZ_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFNMADD231PS_RZ_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFNMADD231PS_Z: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231PS.Z m128 xmm k xmm
|
|
// VFNMADD231PS.Z m256 ymm k ymm
|
|
// VFNMADD231PS.Z xmm xmm k xmm
|
|
// VFNMADD231PS.Z ymm ymm k ymm
|
|
// VFNMADD231PS.Z m512 zmm k zmm
|
|
// VFNMADD231PS.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMADD231PS.Z instruction to the active function.
|
|
func (c *Context) VFNMADD231PS_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VFNMADD231PS_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VFNMADD231PS_Z: Fused Negative Multiply-Add of Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231PS.Z m128 xmm k xmm
|
|
// VFNMADD231PS.Z m256 ymm k ymm
|
|
// VFNMADD231PS.Z xmm xmm k xmm
|
|
// VFNMADD231PS.Z ymm ymm k ymm
|
|
// VFNMADD231PS.Z m512 zmm k zmm
|
|
// VFNMADD231PS.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMADD231PS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD231PS_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VFNMADD231PS_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VFNMADD231SD: Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231SD m64 xmm xmm
|
|
// VFNMADD231SD xmm xmm xmm
|
|
// VFNMADD231SD m64 xmm k xmm
|
|
// VFNMADD231SD xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMADD231SD instruction to the active function.
|
|
func (c *Context) VFNMADD231SD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMADD231SD(ops...))
|
|
}
|
|
|
|
// VFNMADD231SD: Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231SD m64 xmm xmm
|
|
// VFNMADD231SD xmm xmm xmm
|
|
// VFNMADD231SD m64 xmm k xmm
|
|
// VFNMADD231SD xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMADD231SD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD231SD(ops ...operand.Op) { ctx.VFNMADD231SD(ops...) }
|
|
|
|
// VFNMADD231SD_RD_SAE: Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231SD.RD_SAE xmm xmm k xmm
|
|
// VFNMADD231SD.RD_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMADD231SD.RD_SAE instruction to the active function.
|
|
func (c *Context) VFNMADD231SD_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMADD231SD_RD_SAE(ops...))
|
|
}
|
|
|
|
// VFNMADD231SD_RD_SAE: Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231SD.RD_SAE xmm xmm k xmm
|
|
// VFNMADD231SD.RD_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMADD231SD.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD231SD_RD_SAE(ops ...operand.Op) { ctx.VFNMADD231SD_RD_SAE(ops...) }
|
|
|
|
// VFNMADD231SD_RD_SAE_Z: Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231SD.RD_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMADD231SD.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMADD231SD_RD_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFNMADD231SD_RD_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFNMADD231SD_RD_SAE_Z: Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231SD.RD_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMADD231SD.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD231SD_RD_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFNMADD231SD_RD_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFNMADD231SD_RN_SAE: Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231SD.RN_SAE xmm xmm k xmm
|
|
// VFNMADD231SD.RN_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMADD231SD.RN_SAE instruction to the active function.
|
|
func (c *Context) VFNMADD231SD_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMADD231SD_RN_SAE(ops...))
|
|
}
|
|
|
|
// VFNMADD231SD_RN_SAE: Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231SD.RN_SAE xmm xmm k xmm
|
|
// VFNMADD231SD.RN_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMADD231SD.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD231SD_RN_SAE(ops ...operand.Op) { ctx.VFNMADD231SD_RN_SAE(ops...) }
|
|
|
|
// VFNMADD231SD_RN_SAE_Z: Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231SD.RN_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMADD231SD.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMADD231SD_RN_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFNMADD231SD_RN_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFNMADD231SD_RN_SAE_Z: Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231SD.RN_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMADD231SD.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD231SD_RN_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFNMADD231SD_RN_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFNMADD231SD_RU_SAE: Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231SD.RU_SAE xmm xmm k xmm
|
|
// VFNMADD231SD.RU_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMADD231SD.RU_SAE instruction to the active function.
|
|
func (c *Context) VFNMADD231SD_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMADD231SD_RU_SAE(ops...))
|
|
}
|
|
|
|
// VFNMADD231SD_RU_SAE: Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231SD.RU_SAE xmm xmm k xmm
|
|
// VFNMADD231SD.RU_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMADD231SD.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD231SD_RU_SAE(ops ...operand.Op) { ctx.VFNMADD231SD_RU_SAE(ops...) }
|
|
|
|
// VFNMADD231SD_RU_SAE_Z: Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231SD.RU_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMADD231SD.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMADD231SD_RU_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFNMADD231SD_RU_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFNMADD231SD_RU_SAE_Z: Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231SD.RU_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMADD231SD.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD231SD_RU_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFNMADD231SD_RU_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFNMADD231SD_RZ_SAE: Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231SD.RZ_SAE xmm xmm k xmm
|
|
// VFNMADD231SD.RZ_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMADD231SD.RZ_SAE instruction to the active function.
|
|
func (c *Context) VFNMADD231SD_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMADD231SD_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VFNMADD231SD_RZ_SAE: Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231SD.RZ_SAE xmm xmm k xmm
|
|
// VFNMADD231SD.RZ_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMADD231SD.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD231SD_RZ_SAE(ops ...operand.Op) { ctx.VFNMADD231SD_RZ_SAE(ops...) }
|
|
|
|
// VFNMADD231SD_RZ_SAE_Z: Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231SD.RZ_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMADD231SD.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMADD231SD_RZ_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFNMADD231SD_RZ_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFNMADD231SD_RZ_SAE_Z: Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231SD.RZ_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMADD231SD.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD231SD_RZ_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFNMADD231SD_RZ_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFNMADD231SD_Z: Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231SD.Z m64 xmm k xmm
|
|
// VFNMADD231SD.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMADD231SD.Z instruction to the active function.
|
|
func (c *Context) VFNMADD231SD_Z(mx, x, k, x1 operand.Op) {
|
|
c.addinstruction(x86.VFNMADD231SD_Z(mx, x, k, x1))
|
|
}
|
|
|
|
// VFNMADD231SD_Z: Fused Negative Multiply-Add of Scalar Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231SD.Z m64 xmm k xmm
|
|
// VFNMADD231SD.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMADD231SD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD231SD_Z(mx, x, k, x1 operand.Op) { ctx.VFNMADD231SD_Z(mx, x, k, x1) }
|
|
|
|
// VFNMADD231SS: Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231SS m32 xmm xmm
|
|
// VFNMADD231SS xmm xmm xmm
|
|
// VFNMADD231SS m32 xmm k xmm
|
|
// VFNMADD231SS xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMADD231SS instruction to the active function.
|
|
func (c *Context) VFNMADD231SS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMADD231SS(ops...))
|
|
}
|
|
|
|
// VFNMADD231SS: Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231SS m32 xmm xmm
|
|
// VFNMADD231SS xmm xmm xmm
|
|
// VFNMADD231SS m32 xmm k xmm
|
|
// VFNMADD231SS xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMADD231SS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD231SS(ops ...operand.Op) { ctx.VFNMADD231SS(ops...) }
|
|
|
|
// VFNMADD231SS_RD_SAE: Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231SS.RD_SAE xmm xmm k xmm
|
|
// VFNMADD231SS.RD_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMADD231SS.RD_SAE instruction to the active function.
|
|
func (c *Context) VFNMADD231SS_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMADD231SS_RD_SAE(ops...))
|
|
}
|
|
|
|
// VFNMADD231SS_RD_SAE: Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231SS.RD_SAE xmm xmm k xmm
|
|
// VFNMADD231SS.RD_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMADD231SS.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD231SS_RD_SAE(ops ...operand.Op) { ctx.VFNMADD231SS_RD_SAE(ops...) }
|
|
|
|
// VFNMADD231SS_RD_SAE_Z: Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231SS.RD_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMADD231SS.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMADD231SS_RD_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFNMADD231SS_RD_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFNMADD231SS_RD_SAE_Z: Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231SS.RD_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMADD231SS.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD231SS_RD_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFNMADD231SS_RD_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFNMADD231SS_RN_SAE: Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231SS.RN_SAE xmm xmm k xmm
|
|
// VFNMADD231SS.RN_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMADD231SS.RN_SAE instruction to the active function.
|
|
func (c *Context) VFNMADD231SS_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMADD231SS_RN_SAE(ops...))
|
|
}
|
|
|
|
// VFNMADD231SS_RN_SAE: Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231SS.RN_SAE xmm xmm k xmm
|
|
// VFNMADD231SS.RN_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMADD231SS.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD231SS_RN_SAE(ops ...operand.Op) { ctx.VFNMADD231SS_RN_SAE(ops...) }
|
|
|
|
// VFNMADD231SS_RN_SAE_Z: Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231SS.RN_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMADD231SS.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMADD231SS_RN_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFNMADD231SS_RN_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFNMADD231SS_RN_SAE_Z: Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231SS.RN_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMADD231SS.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD231SS_RN_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFNMADD231SS_RN_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFNMADD231SS_RU_SAE: Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231SS.RU_SAE xmm xmm k xmm
|
|
// VFNMADD231SS.RU_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMADD231SS.RU_SAE instruction to the active function.
|
|
func (c *Context) VFNMADD231SS_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMADD231SS_RU_SAE(ops...))
|
|
}
|
|
|
|
// VFNMADD231SS_RU_SAE: Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231SS.RU_SAE xmm xmm k xmm
|
|
// VFNMADD231SS.RU_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMADD231SS.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD231SS_RU_SAE(ops ...operand.Op) { ctx.VFNMADD231SS_RU_SAE(ops...) }
|
|
|
|
// VFNMADD231SS_RU_SAE_Z: Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231SS.RU_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMADD231SS.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMADD231SS_RU_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFNMADD231SS_RU_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFNMADD231SS_RU_SAE_Z: Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231SS.RU_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMADD231SS.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD231SS_RU_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFNMADD231SS_RU_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFNMADD231SS_RZ_SAE: Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231SS.RZ_SAE xmm xmm k xmm
|
|
// VFNMADD231SS.RZ_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMADD231SS.RZ_SAE instruction to the active function.
|
|
func (c *Context) VFNMADD231SS_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMADD231SS_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VFNMADD231SS_RZ_SAE: Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231SS.RZ_SAE xmm xmm k xmm
|
|
// VFNMADD231SS.RZ_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMADD231SS.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD231SS_RZ_SAE(ops ...operand.Op) { ctx.VFNMADD231SS_RZ_SAE(ops...) }
|
|
|
|
// VFNMADD231SS_RZ_SAE_Z: Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231SS.RZ_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMADD231SS.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMADD231SS_RZ_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFNMADD231SS_RZ_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFNMADD231SS_RZ_SAE_Z: Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231SS.RZ_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMADD231SS.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD231SS_RZ_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFNMADD231SS_RZ_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFNMADD231SS_Z: Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231SS.Z m32 xmm k xmm
|
|
// VFNMADD231SS.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMADD231SS.Z instruction to the active function.
|
|
func (c *Context) VFNMADD231SS_Z(mx, x, k, x1 operand.Op) {
|
|
c.addinstruction(x86.VFNMADD231SS_Z(mx, x, k, x1))
|
|
}
|
|
|
|
// VFNMADD231SS_Z: Fused Negative Multiply-Add of Scalar Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMADD231SS.Z m32 xmm k xmm
|
|
// VFNMADD231SS.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMADD231SS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMADD231SS_Z(mx, x, k, x1 operand.Op) { ctx.VFNMADD231SS_Z(mx, x, k, x1) }
|
|
|
|
// VFNMSUB132PD: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132PD m128 xmm xmm
|
|
// VFNMSUB132PD m256 ymm ymm
|
|
// VFNMSUB132PD xmm xmm xmm
|
|
// VFNMSUB132PD ymm ymm ymm
|
|
// VFNMSUB132PD m128 xmm k xmm
|
|
// VFNMSUB132PD m256 ymm k ymm
|
|
// VFNMSUB132PD xmm xmm k xmm
|
|
// VFNMSUB132PD ymm ymm k ymm
|
|
// VFNMSUB132PD m512 zmm k zmm
|
|
// VFNMSUB132PD m512 zmm zmm
|
|
// VFNMSUB132PD zmm zmm k zmm
|
|
// VFNMSUB132PD zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMSUB132PD instruction to the active function.
|
|
func (c *Context) VFNMSUB132PD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB132PD(ops...))
|
|
}
|
|
|
|
// VFNMSUB132PD: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132PD m128 xmm xmm
|
|
// VFNMSUB132PD m256 ymm ymm
|
|
// VFNMSUB132PD xmm xmm xmm
|
|
// VFNMSUB132PD ymm ymm ymm
|
|
// VFNMSUB132PD m128 xmm k xmm
|
|
// VFNMSUB132PD m256 ymm k ymm
|
|
// VFNMSUB132PD xmm xmm k xmm
|
|
// VFNMSUB132PD ymm ymm k ymm
|
|
// VFNMSUB132PD m512 zmm k zmm
|
|
// VFNMSUB132PD m512 zmm zmm
|
|
// VFNMSUB132PD zmm zmm k zmm
|
|
// VFNMSUB132PD zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMSUB132PD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB132PD(ops ...operand.Op) { ctx.VFNMSUB132PD(ops...) }
|
|
|
|
// VFNMSUB132PD_BCST: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132PD.BCST m64 xmm k xmm
|
|
// VFNMSUB132PD.BCST m64 xmm xmm
|
|
// VFNMSUB132PD.BCST m64 ymm k ymm
|
|
// VFNMSUB132PD.BCST m64 ymm ymm
|
|
// VFNMSUB132PD.BCST m64 zmm k zmm
|
|
// VFNMSUB132PD.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VFNMSUB132PD.BCST instruction to the active function.
|
|
func (c *Context) VFNMSUB132PD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB132PD_BCST(ops...))
|
|
}
|
|
|
|
// VFNMSUB132PD_BCST: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132PD.BCST m64 xmm k xmm
|
|
// VFNMSUB132PD.BCST m64 xmm xmm
|
|
// VFNMSUB132PD.BCST m64 ymm k ymm
|
|
// VFNMSUB132PD.BCST m64 ymm ymm
|
|
// VFNMSUB132PD.BCST m64 zmm k zmm
|
|
// VFNMSUB132PD.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VFNMSUB132PD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB132PD_BCST(ops ...operand.Op) { ctx.VFNMSUB132PD_BCST(ops...) }
|
|
|
|
// VFNMSUB132PD_BCST_Z: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132PD.BCST.Z m64 xmm k xmm
|
|
// VFNMSUB132PD.BCST.Z m64 ymm k ymm
|
|
// VFNMSUB132PD.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VFNMSUB132PD.BCST.Z instruction to the active function.
|
|
func (c *Context) VFNMSUB132PD_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB132PD_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VFNMSUB132PD_BCST_Z: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132PD.BCST.Z m64 xmm k xmm
|
|
// VFNMSUB132PD.BCST.Z m64 ymm k ymm
|
|
// VFNMSUB132PD.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VFNMSUB132PD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB132PD_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VFNMSUB132PD_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VFNMSUB132PD_RD_SAE: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132PD.RD_SAE zmm zmm k zmm
|
|
// VFNMSUB132PD.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMSUB132PD.RD_SAE instruction to the active function.
|
|
func (c *Context) VFNMSUB132PD_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB132PD_RD_SAE(ops...))
|
|
}
|
|
|
|
// VFNMSUB132PD_RD_SAE: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132PD.RD_SAE zmm zmm k zmm
|
|
// VFNMSUB132PD.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMSUB132PD.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB132PD_RD_SAE(ops ...operand.Op) { ctx.VFNMSUB132PD_RD_SAE(ops...) }
|
|
|
|
// VFNMSUB132PD_RD_SAE_Z: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132PD.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMSUB132PD.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMSUB132PD_RD_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB132PD_RD_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFNMSUB132PD_RD_SAE_Z: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132PD.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMSUB132PD.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB132PD_RD_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFNMSUB132PD_RD_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFNMSUB132PD_RN_SAE: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132PD.RN_SAE zmm zmm k zmm
|
|
// VFNMSUB132PD.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMSUB132PD.RN_SAE instruction to the active function.
|
|
func (c *Context) VFNMSUB132PD_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB132PD_RN_SAE(ops...))
|
|
}
|
|
|
|
// VFNMSUB132PD_RN_SAE: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132PD.RN_SAE zmm zmm k zmm
|
|
// VFNMSUB132PD.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMSUB132PD.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB132PD_RN_SAE(ops ...operand.Op) { ctx.VFNMSUB132PD_RN_SAE(ops...) }
|
|
|
|
// VFNMSUB132PD_RN_SAE_Z: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132PD.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMSUB132PD.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMSUB132PD_RN_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB132PD_RN_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFNMSUB132PD_RN_SAE_Z: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132PD.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMSUB132PD.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB132PD_RN_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFNMSUB132PD_RN_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFNMSUB132PD_RU_SAE: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132PD.RU_SAE zmm zmm k zmm
|
|
// VFNMSUB132PD.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMSUB132PD.RU_SAE instruction to the active function.
|
|
func (c *Context) VFNMSUB132PD_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB132PD_RU_SAE(ops...))
|
|
}
|
|
|
|
// VFNMSUB132PD_RU_SAE: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132PD.RU_SAE zmm zmm k zmm
|
|
// VFNMSUB132PD.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMSUB132PD.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB132PD_RU_SAE(ops ...operand.Op) { ctx.VFNMSUB132PD_RU_SAE(ops...) }
|
|
|
|
// VFNMSUB132PD_RU_SAE_Z: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132PD.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMSUB132PD.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMSUB132PD_RU_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB132PD_RU_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFNMSUB132PD_RU_SAE_Z: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132PD.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMSUB132PD.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB132PD_RU_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFNMSUB132PD_RU_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFNMSUB132PD_RZ_SAE: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132PD.RZ_SAE zmm zmm k zmm
|
|
// VFNMSUB132PD.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMSUB132PD.RZ_SAE instruction to the active function.
|
|
func (c *Context) VFNMSUB132PD_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB132PD_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VFNMSUB132PD_RZ_SAE: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132PD.RZ_SAE zmm zmm k zmm
|
|
// VFNMSUB132PD.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMSUB132PD.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB132PD_RZ_SAE(ops ...operand.Op) { ctx.VFNMSUB132PD_RZ_SAE(ops...) }
|
|
|
|
// VFNMSUB132PD_RZ_SAE_Z: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132PD.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMSUB132PD.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMSUB132PD_RZ_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB132PD_RZ_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFNMSUB132PD_RZ_SAE_Z: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132PD.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMSUB132PD.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB132PD_RZ_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFNMSUB132PD_RZ_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFNMSUB132PD_Z: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132PD.Z m128 xmm k xmm
|
|
// VFNMSUB132PD.Z m256 ymm k ymm
|
|
// VFNMSUB132PD.Z xmm xmm k xmm
|
|
// VFNMSUB132PD.Z ymm ymm k ymm
|
|
// VFNMSUB132PD.Z m512 zmm k zmm
|
|
// VFNMSUB132PD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMSUB132PD.Z instruction to the active function.
|
|
func (c *Context) VFNMSUB132PD_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB132PD_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VFNMSUB132PD_Z: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132PD.Z m128 xmm k xmm
|
|
// VFNMSUB132PD.Z m256 ymm k ymm
|
|
// VFNMSUB132PD.Z xmm xmm k xmm
|
|
// VFNMSUB132PD.Z ymm ymm k ymm
|
|
// VFNMSUB132PD.Z m512 zmm k zmm
|
|
// VFNMSUB132PD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMSUB132PD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB132PD_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VFNMSUB132PD_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VFNMSUB132PS: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132PS m128 xmm xmm
|
|
// VFNMSUB132PS m256 ymm ymm
|
|
// VFNMSUB132PS xmm xmm xmm
|
|
// VFNMSUB132PS ymm ymm ymm
|
|
// VFNMSUB132PS m128 xmm k xmm
|
|
// VFNMSUB132PS m256 ymm k ymm
|
|
// VFNMSUB132PS xmm xmm k xmm
|
|
// VFNMSUB132PS ymm ymm k ymm
|
|
// VFNMSUB132PS m512 zmm k zmm
|
|
// VFNMSUB132PS m512 zmm zmm
|
|
// VFNMSUB132PS zmm zmm k zmm
|
|
// VFNMSUB132PS zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMSUB132PS instruction to the active function.
|
|
func (c *Context) VFNMSUB132PS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB132PS(ops...))
|
|
}
|
|
|
|
// VFNMSUB132PS: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132PS m128 xmm xmm
|
|
// VFNMSUB132PS m256 ymm ymm
|
|
// VFNMSUB132PS xmm xmm xmm
|
|
// VFNMSUB132PS ymm ymm ymm
|
|
// VFNMSUB132PS m128 xmm k xmm
|
|
// VFNMSUB132PS m256 ymm k ymm
|
|
// VFNMSUB132PS xmm xmm k xmm
|
|
// VFNMSUB132PS ymm ymm k ymm
|
|
// VFNMSUB132PS m512 zmm k zmm
|
|
// VFNMSUB132PS m512 zmm zmm
|
|
// VFNMSUB132PS zmm zmm k zmm
|
|
// VFNMSUB132PS zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMSUB132PS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB132PS(ops ...operand.Op) { ctx.VFNMSUB132PS(ops...) }
|
|
|
|
// VFNMSUB132PS_BCST: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132PS.BCST m32 xmm k xmm
|
|
// VFNMSUB132PS.BCST m32 xmm xmm
|
|
// VFNMSUB132PS.BCST m32 ymm k ymm
|
|
// VFNMSUB132PS.BCST m32 ymm ymm
|
|
// VFNMSUB132PS.BCST m32 zmm k zmm
|
|
// VFNMSUB132PS.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VFNMSUB132PS.BCST instruction to the active function.
|
|
func (c *Context) VFNMSUB132PS_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB132PS_BCST(ops...))
|
|
}
|
|
|
|
// VFNMSUB132PS_BCST: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132PS.BCST m32 xmm k xmm
|
|
// VFNMSUB132PS.BCST m32 xmm xmm
|
|
// VFNMSUB132PS.BCST m32 ymm k ymm
|
|
// VFNMSUB132PS.BCST m32 ymm ymm
|
|
// VFNMSUB132PS.BCST m32 zmm k zmm
|
|
// VFNMSUB132PS.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VFNMSUB132PS.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB132PS_BCST(ops ...operand.Op) { ctx.VFNMSUB132PS_BCST(ops...) }
|
|
|
|
// VFNMSUB132PS_BCST_Z: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132PS.BCST.Z m32 xmm k xmm
|
|
// VFNMSUB132PS.BCST.Z m32 ymm k ymm
|
|
// VFNMSUB132PS.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VFNMSUB132PS.BCST.Z instruction to the active function.
|
|
func (c *Context) VFNMSUB132PS_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB132PS_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VFNMSUB132PS_BCST_Z: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132PS.BCST.Z m32 xmm k xmm
|
|
// VFNMSUB132PS.BCST.Z m32 ymm k ymm
|
|
// VFNMSUB132PS.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VFNMSUB132PS.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB132PS_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VFNMSUB132PS_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VFNMSUB132PS_RD_SAE: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132PS.RD_SAE zmm zmm k zmm
|
|
// VFNMSUB132PS.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMSUB132PS.RD_SAE instruction to the active function.
|
|
func (c *Context) VFNMSUB132PS_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB132PS_RD_SAE(ops...))
|
|
}
|
|
|
|
// VFNMSUB132PS_RD_SAE: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132PS.RD_SAE zmm zmm k zmm
|
|
// VFNMSUB132PS.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMSUB132PS.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB132PS_RD_SAE(ops ...operand.Op) { ctx.VFNMSUB132PS_RD_SAE(ops...) }
|
|
|
|
// VFNMSUB132PS_RD_SAE_Z: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132PS.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMSUB132PS.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMSUB132PS_RD_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB132PS_RD_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFNMSUB132PS_RD_SAE_Z: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132PS.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMSUB132PS.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB132PS_RD_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFNMSUB132PS_RD_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFNMSUB132PS_RN_SAE: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132PS.RN_SAE zmm zmm k zmm
|
|
// VFNMSUB132PS.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMSUB132PS.RN_SAE instruction to the active function.
|
|
func (c *Context) VFNMSUB132PS_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB132PS_RN_SAE(ops...))
|
|
}
|
|
|
|
// VFNMSUB132PS_RN_SAE: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132PS.RN_SAE zmm zmm k zmm
|
|
// VFNMSUB132PS.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMSUB132PS.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB132PS_RN_SAE(ops ...operand.Op) { ctx.VFNMSUB132PS_RN_SAE(ops...) }
|
|
|
|
// VFNMSUB132PS_RN_SAE_Z: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132PS.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMSUB132PS.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMSUB132PS_RN_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB132PS_RN_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFNMSUB132PS_RN_SAE_Z: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132PS.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMSUB132PS.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB132PS_RN_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFNMSUB132PS_RN_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFNMSUB132PS_RU_SAE: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132PS.RU_SAE zmm zmm k zmm
|
|
// VFNMSUB132PS.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMSUB132PS.RU_SAE instruction to the active function.
|
|
func (c *Context) VFNMSUB132PS_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB132PS_RU_SAE(ops...))
|
|
}
|
|
|
|
// VFNMSUB132PS_RU_SAE: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132PS.RU_SAE zmm zmm k zmm
|
|
// VFNMSUB132PS.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMSUB132PS.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB132PS_RU_SAE(ops ...operand.Op) { ctx.VFNMSUB132PS_RU_SAE(ops...) }
|
|
|
|
// VFNMSUB132PS_RU_SAE_Z: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132PS.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMSUB132PS.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMSUB132PS_RU_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB132PS_RU_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFNMSUB132PS_RU_SAE_Z: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132PS.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMSUB132PS.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB132PS_RU_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFNMSUB132PS_RU_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFNMSUB132PS_RZ_SAE: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132PS.RZ_SAE zmm zmm k zmm
|
|
// VFNMSUB132PS.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMSUB132PS.RZ_SAE instruction to the active function.
|
|
func (c *Context) VFNMSUB132PS_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB132PS_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VFNMSUB132PS_RZ_SAE: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132PS.RZ_SAE zmm zmm k zmm
|
|
// VFNMSUB132PS.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMSUB132PS.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB132PS_RZ_SAE(ops ...operand.Op) { ctx.VFNMSUB132PS_RZ_SAE(ops...) }
|
|
|
|
// VFNMSUB132PS_RZ_SAE_Z: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132PS.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMSUB132PS.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMSUB132PS_RZ_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB132PS_RZ_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFNMSUB132PS_RZ_SAE_Z: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132PS.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMSUB132PS.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB132PS_RZ_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFNMSUB132PS_RZ_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFNMSUB132PS_Z: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132PS.Z m128 xmm k xmm
|
|
// VFNMSUB132PS.Z m256 ymm k ymm
|
|
// VFNMSUB132PS.Z xmm xmm k xmm
|
|
// VFNMSUB132PS.Z ymm ymm k ymm
|
|
// VFNMSUB132PS.Z m512 zmm k zmm
|
|
// VFNMSUB132PS.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMSUB132PS.Z instruction to the active function.
|
|
func (c *Context) VFNMSUB132PS_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB132PS_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VFNMSUB132PS_Z: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132PS.Z m128 xmm k xmm
|
|
// VFNMSUB132PS.Z m256 ymm k ymm
|
|
// VFNMSUB132PS.Z xmm xmm k xmm
|
|
// VFNMSUB132PS.Z ymm ymm k ymm
|
|
// VFNMSUB132PS.Z m512 zmm k zmm
|
|
// VFNMSUB132PS.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMSUB132PS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB132PS_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VFNMSUB132PS_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VFNMSUB132SD: Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132SD m64 xmm xmm
|
|
// VFNMSUB132SD xmm xmm xmm
|
|
// VFNMSUB132SD m64 xmm k xmm
|
|
// VFNMSUB132SD xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMSUB132SD instruction to the active function.
|
|
func (c *Context) VFNMSUB132SD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB132SD(ops...))
|
|
}
|
|
|
|
// VFNMSUB132SD: Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132SD m64 xmm xmm
|
|
// VFNMSUB132SD xmm xmm xmm
|
|
// VFNMSUB132SD m64 xmm k xmm
|
|
// VFNMSUB132SD xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMSUB132SD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB132SD(ops ...operand.Op) { ctx.VFNMSUB132SD(ops...) }
|
|
|
|
// VFNMSUB132SD_RD_SAE: Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132SD.RD_SAE xmm xmm k xmm
|
|
// VFNMSUB132SD.RD_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMSUB132SD.RD_SAE instruction to the active function.
|
|
func (c *Context) VFNMSUB132SD_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB132SD_RD_SAE(ops...))
|
|
}
|
|
|
|
// VFNMSUB132SD_RD_SAE: Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132SD.RD_SAE xmm xmm k xmm
|
|
// VFNMSUB132SD.RD_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMSUB132SD.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB132SD_RD_SAE(ops ...operand.Op) { ctx.VFNMSUB132SD_RD_SAE(ops...) }
|
|
|
|
// VFNMSUB132SD_RD_SAE_Z: Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132SD.RD_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMSUB132SD.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMSUB132SD_RD_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB132SD_RD_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFNMSUB132SD_RD_SAE_Z: Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132SD.RD_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMSUB132SD.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB132SD_RD_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFNMSUB132SD_RD_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFNMSUB132SD_RN_SAE: Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132SD.RN_SAE xmm xmm k xmm
|
|
// VFNMSUB132SD.RN_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMSUB132SD.RN_SAE instruction to the active function.
|
|
func (c *Context) VFNMSUB132SD_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB132SD_RN_SAE(ops...))
|
|
}
|
|
|
|
// VFNMSUB132SD_RN_SAE: Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132SD.RN_SAE xmm xmm k xmm
|
|
// VFNMSUB132SD.RN_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMSUB132SD.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB132SD_RN_SAE(ops ...operand.Op) { ctx.VFNMSUB132SD_RN_SAE(ops...) }
|
|
|
|
// VFNMSUB132SD_RN_SAE_Z: Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132SD.RN_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMSUB132SD.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMSUB132SD_RN_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB132SD_RN_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFNMSUB132SD_RN_SAE_Z: Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132SD.RN_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMSUB132SD.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB132SD_RN_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFNMSUB132SD_RN_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFNMSUB132SD_RU_SAE: Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132SD.RU_SAE xmm xmm k xmm
|
|
// VFNMSUB132SD.RU_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMSUB132SD.RU_SAE instruction to the active function.
|
|
func (c *Context) VFNMSUB132SD_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB132SD_RU_SAE(ops...))
|
|
}
|
|
|
|
// VFNMSUB132SD_RU_SAE: Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132SD.RU_SAE xmm xmm k xmm
|
|
// VFNMSUB132SD.RU_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMSUB132SD.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB132SD_RU_SAE(ops ...operand.Op) { ctx.VFNMSUB132SD_RU_SAE(ops...) }
|
|
|
|
// VFNMSUB132SD_RU_SAE_Z: Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132SD.RU_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMSUB132SD.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMSUB132SD_RU_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB132SD_RU_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFNMSUB132SD_RU_SAE_Z: Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132SD.RU_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMSUB132SD.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB132SD_RU_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFNMSUB132SD_RU_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFNMSUB132SD_RZ_SAE: Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132SD.RZ_SAE xmm xmm k xmm
|
|
// VFNMSUB132SD.RZ_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMSUB132SD.RZ_SAE instruction to the active function.
|
|
func (c *Context) VFNMSUB132SD_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB132SD_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VFNMSUB132SD_RZ_SAE: Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132SD.RZ_SAE xmm xmm k xmm
|
|
// VFNMSUB132SD.RZ_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMSUB132SD.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB132SD_RZ_SAE(ops ...operand.Op) { ctx.VFNMSUB132SD_RZ_SAE(ops...) }
|
|
|
|
// VFNMSUB132SD_RZ_SAE_Z: Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132SD.RZ_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMSUB132SD.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMSUB132SD_RZ_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB132SD_RZ_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFNMSUB132SD_RZ_SAE_Z: Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132SD.RZ_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMSUB132SD.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB132SD_RZ_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFNMSUB132SD_RZ_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFNMSUB132SD_Z: Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132SD.Z m64 xmm k xmm
|
|
// VFNMSUB132SD.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMSUB132SD.Z instruction to the active function.
|
|
func (c *Context) VFNMSUB132SD_Z(mx, x, k, x1 operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB132SD_Z(mx, x, k, x1))
|
|
}
|
|
|
|
// VFNMSUB132SD_Z: Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132SD.Z m64 xmm k xmm
|
|
// VFNMSUB132SD.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMSUB132SD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB132SD_Z(mx, x, k, x1 operand.Op) { ctx.VFNMSUB132SD_Z(mx, x, k, x1) }
|
|
|
|
// VFNMSUB132SS: Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132SS m32 xmm xmm
|
|
// VFNMSUB132SS xmm xmm xmm
|
|
// VFNMSUB132SS m32 xmm k xmm
|
|
// VFNMSUB132SS xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMSUB132SS instruction to the active function.
|
|
func (c *Context) VFNMSUB132SS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB132SS(ops...))
|
|
}
|
|
|
|
// VFNMSUB132SS: Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132SS m32 xmm xmm
|
|
// VFNMSUB132SS xmm xmm xmm
|
|
// VFNMSUB132SS m32 xmm k xmm
|
|
// VFNMSUB132SS xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMSUB132SS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB132SS(ops ...operand.Op) { ctx.VFNMSUB132SS(ops...) }
|
|
|
|
// VFNMSUB132SS_RD_SAE: Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132SS.RD_SAE xmm xmm k xmm
|
|
// VFNMSUB132SS.RD_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMSUB132SS.RD_SAE instruction to the active function.
|
|
func (c *Context) VFNMSUB132SS_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB132SS_RD_SAE(ops...))
|
|
}
|
|
|
|
// VFNMSUB132SS_RD_SAE: Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132SS.RD_SAE xmm xmm k xmm
|
|
// VFNMSUB132SS.RD_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMSUB132SS.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB132SS_RD_SAE(ops ...operand.Op) { ctx.VFNMSUB132SS_RD_SAE(ops...) }
|
|
|
|
// VFNMSUB132SS_RD_SAE_Z: Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132SS.RD_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMSUB132SS.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMSUB132SS_RD_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB132SS_RD_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFNMSUB132SS_RD_SAE_Z: Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132SS.RD_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMSUB132SS.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB132SS_RD_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFNMSUB132SS_RD_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFNMSUB132SS_RN_SAE: Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132SS.RN_SAE xmm xmm k xmm
|
|
// VFNMSUB132SS.RN_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMSUB132SS.RN_SAE instruction to the active function.
|
|
func (c *Context) VFNMSUB132SS_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB132SS_RN_SAE(ops...))
|
|
}
|
|
|
|
// VFNMSUB132SS_RN_SAE: Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132SS.RN_SAE xmm xmm k xmm
|
|
// VFNMSUB132SS.RN_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMSUB132SS.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB132SS_RN_SAE(ops ...operand.Op) { ctx.VFNMSUB132SS_RN_SAE(ops...) }
|
|
|
|
// VFNMSUB132SS_RN_SAE_Z: Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132SS.RN_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMSUB132SS.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMSUB132SS_RN_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB132SS_RN_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFNMSUB132SS_RN_SAE_Z: Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132SS.RN_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMSUB132SS.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB132SS_RN_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFNMSUB132SS_RN_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFNMSUB132SS_RU_SAE: Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132SS.RU_SAE xmm xmm k xmm
|
|
// VFNMSUB132SS.RU_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMSUB132SS.RU_SAE instruction to the active function.
|
|
func (c *Context) VFNMSUB132SS_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB132SS_RU_SAE(ops...))
|
|
}
|
|
|
|
// VFNMSUB132SS_RU_SAE: Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132SS.RU_SAE xmm xmm k xmm
|
|
// VFNMSUB132SS.RU_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMSUB132SS.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB132SS_RU_SAE(ops ...operand.Op) { ctx.VFNMSUB132SS_RU_SAE(ops...) }
|
|
|
|
// VFNMSUB132SS_RU_SAE_Z: Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132SS.RU_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMSUB132SS.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMSUB132SS_RU_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB132SS_RU_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFNMSUB132SS_RU_SAE_Z: Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132SS.RU_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMSUB132SS.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB132SS_RU_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFNMSUB132SS_RU_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFNMSUB132SS_RZ_SAE: Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132SS.RZ_SAE xmm xmm k xmm
|
|
// VFNMSUB132SS.RZ_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMSUB132SS.RZ_SAE instruction to the active function.
|
|
func (c *Context) VFNMSUB132SS_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB132SS_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VFNMSUB132SS_RZ_SAE: Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132SS.RZ_SAE xmm xmm k xmm
|
|
// VFNMSUB132SS.RZ_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMSUB132SS.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB132SS_RZ_SAE(ops ...operand.Op) { ctx.VFNMSUB132SS_RZ_SAE(ops...) }
|
|
|
|
// VFNMSUB132SS_RZ_SAE_Z: Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132SS.RZ_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMSUB132SS.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMSUB132SS_RZ_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB132SS_RZ_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFNMSUB132SS_RZ_SAE_Z: Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132SS.RZ_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMSUB132SS.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB132SS_RZ_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFNMSUB132SS_RZ_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFNMSUB132SS_Z: Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132SS.Z m32 xmm k xmm
|
|
// VFNMSUB132SS.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMSUB132SS.Z instruction to the active function.
|
|
func (c *Context) VFNMSUB132SS_Z(mx, x, k, x1 operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB132SS_Z(mx, x, k, x1))
|
|
}
|
|
|
|
// VFNMSUB132SS_Z: Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB132SS.Z m32 xmm k xmm
|
|
// VFNMSUB132SS.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMSUB132SS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB132SS_Z(mx, x, k, x1 operand.Op) { ctx.VFNMSUB132SS_Z(mx, x, k, x1) }
|
|
|
|
// VFNMSUB213PD: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213PD m128 xmm xmm
|
|
// VFNMSUB213PD m256 ymm ymm
|
|
// VFNMSUB213PD xmm xmm xmm
|
|
// VFNMSUB213PD ymm ymm ymm
|
|
// VFNMSUB213PD m128 xmm k xmm
|
|
// VFNMSUB213PD m256 ymm k ymm
|
|
// VFNMSUB213PD xmm xmm k xmm
|
|
// VFNMSUB213PD ymm ymm k ymm
|
|
// VFNMSUB213PD m512 zmm k zmm
|
|
// VFNMSUB213PD m512 zmm zmm
|
|
// VFNMSUB213PD zmm zmm k zmm
|
|
// VFNMSUB213PD zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMSUB213PD instruction to the active function.
|
|
func (c *Context) VFNMSUB213PD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB213PD(ops...))
|
|
}
|
|
|
|
// VFNMSUB213PD: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213PD m128 xmm xmm
|
|
// VFNMSUB213PD m256 ymm ymm
|
|
// VFNMSUB213PD xmm xmm xmm
|
|
// VFNMSUB213PD ymm ymm ymm
|
|
// VFNMSUB213PD m128 xmm k xmm
|
|
// VFNMSUB213PD m256 ymm k ymm
|
|
// VFNMSUB213PD xmm xmm k xmm
|
|
// VFNMSUB213PD ymm ymm k ymm
|
|
// VFNMSUB213PD m512 zmm k zmm
|
|
// VFNMSUB213PD m512 zmm zmm
|
|
// VFNMSUB213PD zmm zmm k zmm
|
|
// VFNMSUB213PD zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMSUB213PD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB213PD(ops ...operand.Op) { ctx.VFNMSUB213PD(ops...) }
|
|
|
|
// VFNMSUB213PD_BCST: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213PD.BCST m64 xmm k xmm
|
|
// VFNMSUB213PD.BCST m64 xmm xmm
|
|
// VFNMSUB213PD.BCST m64 ymm k ymm
|
|
// VFNMSUB213PD.BCST m64 ymm ymm
|
|
// VFNMSUB213PD.BCST m64 zmm k zmm
|
|
// VFNMSUB213PD.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VFNMSUB213PD.BCST instruction to the active function.
|
|
func (c *Context) VFNMSUB213PD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB213PD_BCST(ops...))
|
|
}
|
|
|
|
// VFNMSUB213PD_BCST: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213PD.BCST m64 xmm k xmm
|
|
// VFNMSUB213PD.BCST m64 xmm xmm
|
|
// VFNMSUB213PD.BCST m64 ymm k ymm
|
|
// VFNMSUB213PD.BCST m64 ymm ymm
|
|
// VFNMSUB213PD.BCST m64 zmm k zmm
|
|
// VFNMSUB213PD.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VFNMSUB213PD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB213PD_BCST(ops ...operand.Op) { ctx.VFNMSUB213PD_BCST(ops...) }
|
|
|
|
// VFNMSUB213PD_BCST_Z: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213PD.BCST.Z m64 xmm k xmm
|
|
// VFNMSUB213PD.BCST.Z m64 ymm k ymm
|
|
// VFNMSUB213PD.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VFNMSUB213PD.BCST.Z instruction to the active function.
|
|
func (c *Context) VFNMSUB213PD_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB213PD_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VFNMSUB213PD_BCST_Z: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213PD.BCST.Z m64 xmm k xmm
|
|
// VFNMSUB213PD.BCST.Z m64 ymm k ymm
|
|
// VFNMSUB213PD.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VFNMSUB213PD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB213PD_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VFNMSUB213PD_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VFNMSUB213PD_RD_SAE: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213PD.RD_SAE zmm zmm k zmm
|
|
// VFNMSUB213PD.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMSUB213PD.RD_SAE instruction to the active function.
|
|
func (c *Context) VFNMSUB213PD_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB213PD_RD_SAE(ops...))
|
|
}
|
|
|
|
// VFNMSUB213PD_RD_SAE: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213PD.RD_SAE zmm zmm k zmm
|
|
// VFNMSUB213PD.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMSUB213PD.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB213PD_RD_SAE(ops ...operand.Op) { ctx.VFNMSUB213PD_RD_SAE(ops...) }
|
|
|
|
// VFNMSUB213PD_RD_SAE_Z: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213PD.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMSUB213PD.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMSUB213PD_RD_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB213PD_RD_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFNMSUB213PD_RD_SAE_Z: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213PD.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMSUB213PD.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB213PD_RD_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFNMSUB213PD_RD_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFNMSUB213PD_RN_SAE: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213PD.RN_SAE zmm zmm k zmm
|
|
// VFNMSUB213PD.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMSUB213PD.RN_SAE instruction to the active function.
|
|
func (c *Context) VFNMSUB213PD_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB213PD_RN_SAE(ops...))
|
|
}
|
|
|
|
// VFNMSUB213PD_RN_SAE: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213PD.RN_SAE zmm zmm k zmm
|
|
// VFNMSUB213PD.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMSUB213PD.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB213PD_RN_SAE(ops ...operand.Op) { ctx.VFNMSUB213PD_RN_SAE(ops...) }
|
|
|
|
// VFNMSUB213PD_RN_SAE_Z: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213PD.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMSUB213PD.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMSUB213PD_RN_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB213PD_RN_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFNMSUB213PD_RN_SAE_Z: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213PD.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMSUB213PD.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB213PD_RN_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFNMSUB213PD_RN_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFNMSUB213PD_RU_SAE: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213PD.RU_SAE zmm zmm k zmm
|
|
// VFNMSUB213PD.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMSUB213PD.RU_SAE instruction to the active function.
|
|
func (c *Context) VFNMSUB213PD_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB213PD_RU_SAE(ops...))
|
|
}
|
|
|
|
// VFNMSUB213PD_RU_SAE: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213PD.RU_SAE zmm zmm k zmm
|
|
// VFNMSUB213PD.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMSUB213PD.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB213PD_RU_SAE(ops ...operand.Op) { ctx.VFNMSUB213PD_RU_SAE(ops...) }
|
|
|
|
// VFNMSUB213PD_RU_SAE_Z: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213PD.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMSUB213PD.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMSUB213PD_RU_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB213PD_RU_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFNMSUB213PD_RU_SAE_Z: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213PD.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMSUB213PD.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB213PD_RU_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFNMSUB213PD_RU_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFNMSUB213PD_RZ_SAE: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213PD.RZ_SAE zmm zmm k zmm
|
|
// VFNMSUB213PD.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMSUB213PD.RZ_SAE instruction to the active function.
|
|
func (c *Context) VFNMSUB213PD_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB213PD_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VFNMSUB213PD_RZ_SAE: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213PD.RZ_SAE zmm zmm k zmm
|
|
// VFNMSUB213PD.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMSUB213PD.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB213PD_RZ_SAE(ops ...operand.Op) { ctx.VFNMSUB213PD_RZ_SAE(ops...) }
|
|
|
|
// VFNMSUB213PD_RZ_SAE_Z: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213PD.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMSUB213PD.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMSUB213PD_RZ_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB213PD_RZ_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFNMSUB213PD_RZ_SAE_Z: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213PD.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMSUB213PD.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB213PD_RZ_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFNMSUB213PD_RZ_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFNMSUB213PD_Z: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213PD.Z m128 xmm k xmm
|
|
// VFNMSUB213PD.Z m256 ymm k ymm
|
|
// VFNMSUB213PD.Z xmm xmm k xmm
|
|
// VFNMSUB213PD.Z ymm ymm k ymm
|
|
// VFNMSUB213PD.Z m512 zmm k zmm
|
|
// VFNMSUB213PD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMSUB213PD.Z instruction to the active function.
|
|
func (c *Context) VFNMSUB213PD_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB213PD_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VFNMSUB213PD_Z: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213PD.Z m128 xmm k xmm
|
|
// VFNMSUB213PD.Z m256 ymm k ymm
|
|
// VFNMSUB213PD.Z xmm xmm k xmm
|
|
// VFNMSUB213PD.Z ymm ymm k ymm
|
|
// VFNMSUB213PD.Z m512 zmm k zmm
|
|
// VFNMSUB213PD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMSUB213PD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB213PD_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VFNMSUB213PD_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VFNMSUB213PS: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213PS m128 xmm xmm
|
|
// VFNMSUB213PS m256 ymm ymm
|
|
// VFNMSUB213PS xmm xmm xmm
|
|
// VFNMSUB213PS ymm ymm ymm
|
|
// VFNMSUB213PS m128 xmm k xmm
|
|
// VFNMSUB213PS m256 ymm k ymm
|
|
// VFNMSUB213PS xmm xmm k xmm
|
|
// VFNMSUB213PS ymm ymm k ymm
|
|
// VFNMSUB213PS m512 zmm k zmm
|
|
// VFNMSUB213PS m512 zmm zmm
|
|
// VFNMSUB213PS zmm zmm k zmm
|
|
// VFNMSUB213PS zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMSUB213PS instruction to the active function.
|
|
func (c *Context) VFNMSUB213PS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB213PS(ops...))
|
|
}
|
|
|
|
// VFNMSUB213PS: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213PS m128 xmm xmm
|
|
// VFNMSUB213PS m256 ymm ymm
|
|
// VFNMSUB213PS xmm xmm xmm
|
|
// VFNMSUB213PS ymm ymm ymm
|
|
// VFNMSUB213PS m128 xmm k xmm
|
|
// VFNMSUB213PS m256 ymm k ymm
|
|
// VFNMSUB213PS xmm xmm k xmm
|
|
// VFNMSUB213PS ymm ymm k ymm
|
|
// VFNMSUB213PS m512 zmm k zmm
|
|
// VFNMSUB213PS m512 zmm zmm
|
|
// VFNMSUB213PS zmm zmm k zmm
|
|
// VFNMSUB213PS zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMSUB213PS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB213PS(ops ...operand.Op) { ctx.VFNMSUB213PS(ops...) }
|
|
|
|
// VFNMSUB213PS_BCST: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213PS.BCST m32 xmm k xmm
|
|
// VFNMSUB213PS.BCST m32 xmm xmm
|
|
// VFNMSUB213PS.BCST m32 ymm k ymm
|
|
// VFNMSUB213PS.BCST m32 ymm ymm
|
|
// VFNMSUB213PS.BCST m32 zmm k zmm
|
|
// VFNMSUB213PS.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VFNMSUB213PS.BCST instruction to the active function.
|
|
func (c *Context) VFNMSUB213PS_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB213PS_BCST(ops...))
|
|
}
|
|
|
|
// VFNMSUB213PS_BCST: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213PS.BCST m32 xmm k xmm
|
|
// VFNMSUB213PS.BCST m32 xmm xmm
|
|
// VFNMSUB213PS.BCST m32 ymm k ymm
|
|
// VFNMSUB213PS.BCST m32 ymm ymm
|
|
// VFNMSUB213PS.BCST m32 zmm k zmm
|
|
// VFNMSUB213PS.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VFNMSUB213PS.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB213PS_BCST(ops ...operand.Op) { ctx.VFNMSUB213PS_BCST(ops...) }
|
|
|
|
// VFNMSUB213PS_BCST_Z: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213PS.BCST.Z m32 xmm k xmm
|
|
// VFNMSUB213PS.BCST.Z m32 ymm k ymm
|
|
// VFNMSUB213PS.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VFNMSUB213PS.BCST.Z instruction to the active function.
|
|
func (c *Context) VFNMSUB213PS_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB213PS_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VFNMSUB213PS_BCST_Z: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213PS.BCST.Z m32 xmm k xmm
|
|
// VFNMSUB213PS.BCST.Z m32 ymm k ymm
|
|
// VFNMSUB213PS.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VFNMSUB213PS.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB213PS_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VFNMSUB213PS_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VFNMSUB213PS_RD_SAE: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213PS.RD_SAE zmm zmm k zmm
|
|
// VFNMSUB213PS.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMSUB213PS.RD_SAE instruction to the active function.
|
|
func (c *Context) VFNMSUB213PS_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB213PS_RD_SAE(ops...))
|
|
}
|
|
|
|
// VFNMSUB213PS_RD_SAE: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213PS.RD_SAE zmm zmm k zmm
|
|
// VFNMSUB213PS.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMSUB213PS.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB213PS_RD_SAE(ops ...operand.Op) { ctx.VFNMSUB213PS_RD_SAE(ops...) }
|
|
|
|
// VFNMSUB213PS_RD_SAE_Z: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213PS.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMSUB213PS.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMSUB213PS_RD_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB213PS_RD_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFNMSUB213PS_RD_SAE_Z: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213PS.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMSUB213PS.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB213PS_RD_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFNMSUB213PS_RD_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFNMSUB213PS_RN_SAE: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213PS.RN_SAE zmm zmm k zmm
|
|
// VFNMSUB213PS.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMSUB213PS.RN_SAE instruction to the active function.
|
|
func (c *Context) VFNMSUB213PS_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB213PS_RN_SAE(ops...))
|
|
}
|
|
|
|
// VFNMSUB213PS_RN_SAE: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213PS.RN_SAE zmm zmm k zmm
|
|
// VFNMSUB213PS.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMSUB213PS.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB213PS_RN_SAE(ops ...operand.Op) { ctx.VFNMSUB213PS_RN_SAE(ops...) }
|
|
|
|
// VFNMSUB213PS_RN_SAE_Z: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213PS.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMSUB213PS.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMSUB213PS_RN_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB213PS_RN_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFNMSUB213PS_RN_SAE_Z: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213PS.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMSUB213PS.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB213PS_RN_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFNMSUB213PS_RN_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFNMSUB213PS_RU_SAE: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213PS.RU_SAE zmm zmm k zmm
|
|
// VFNMSUB213PS.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMSUB213PS.RU_SAE instruction to the active function.
|
|
func (c *Context) VFNMSUB213PS_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB213PS_RU_SAE(ops...))
|
|
}
|
|
|
|
// VFNMSUB213PS_RU_SAE: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213PS.RU_SAE zmm zmm k zmm
|
|
// VFNMSUB213PS.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMSUB213PS.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB213PS_RU_SAE(ops ...operand.Op) { ctx.VFNMSUB213PS_RU_SAE(ops...) }
|
|
|
|
// VFNMSUB213PS_RU_SAE_Z: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213PS.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMSUB213PS.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMSUB213PS_RU_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB213PS_RU_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFNMSUB213PS_RU_SAE_Z: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213PS.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMSUB213PS.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB213PS_RU_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFNMSUB213PS_RU_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFNMSUB213PS_RZ_SAE: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213PS.RZ_SAE zmm zmm k zmm
|
|
// VFNMSUB213PS.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMSUB213PS.RZ_SAE instruction to the active function.
|
|
func (c *Context) VFNMSUB213PS_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB213PS_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VFNMSUB213PS_RZ_SAE: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213PS.RZ_SAE zmm zmm k zmm
|
|
// VFNMSUB213PS.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMSUB213PS.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB213PS_RZ_SAE(ops ...operand.Op) { ctx.VFNMSUB213PS_RZ_SAE(ops...) }
|
|
|
|
// VFNMSUB213PS_RZ_SAE_Z: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213PS.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMSUB213PS.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMSUB213PS_RZ_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB213PS_RZ_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFNMSUB213PS_RZ_SAE_Z: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213PS.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMSUB213PS.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB213PS_RZ_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFNMSUB213PS_RZ_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFNMSUB213PS_Z: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213PS.Z m128 xmm k xmm
|
|
// VFNMSUB213PS.Z m256 ymm k ymm
|
|
// VFNMSUB213PS.Z xmm xmm k xmm
|
|
// VFNMSUB213PS.Z ymm ymm k ymm
|
|
// VFNMSUB213PS.Z m512 zmm k zmm
|
|
// VFNMSUB213PS.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMSUB213PS.Z instruction to the active function.
|
|
func (c *Context) VFNMSUB213PS_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB213PS_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VFNMSUB213PS_Z: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213PS.Z m128 xmm k xmm
|
|
// VFNMSUB213PS.Z m256 ymm k ymm
|
|
// VFNMSUB213PS.Z xmm xmm k xmm
|
|
// VFNMSUB213PS.Z ymm ymm k ymm
|
|
// VFNMSUB213PS.Z m512 zmm k zmm
|
|
// VFNMSUB213PS.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMSUB213PS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB213PS_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VFNMSUB213PS_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VFNMSUB213SD: Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213SD m64 xmm xmm
|
|
// VFNMSUB213SD xmm xmm xmm
|
|
// VFNMSUB213SD m64 xmm k xmm
|
|
// VFNMSUB213SD xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMSUB213SD instruction to the active function.
|
|
func (c *Context) VFNMSUB213SD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB213SD(ops...))
|
|
}
|
|
|
|
// VFNMSUB213SD: Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213SD m64 xmm xmm
|
|
// VFNMSUB213SD xmm xmm xmm
|
|
// VFNMSUB213SD m64 xmm k xmm
|
|
// VFNMSUB213SD xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMSUB213SD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB213SD(ops ...operand.Op) { ctx.VFNMSUB213SD(ops...) }
|
|
|
|
// VFNMSUB213SD_RD_SAE: Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213SD.RD_SAE xmm xmm k xmm
|
|
// VFNMSUB213SD.RD_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMSUB213SD.RD_SAE instruction to the active function.
|
|
func (c *Context) VFNMSUB213SD_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB213SD_RD_SAE(ops...))
|
|
}
|
|
|
|
// VFNMSUB213SD_RD_SAE: Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213SD.RD_SAE xmm xmm k xmm
|
|
// VFNMSUB213SD.RD_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMSUB213SD.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB213SD_RD_SAE(ops ...operand.Op) { ctx.VFNMSUB213SD_RD_SAE(ops...) }
|
|
|
|
// VFNMSUB213SD_RD_SAE_Z: Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213SD.RD_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMSUB213SD.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMSUB213SD_RD_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB213SD_RD_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFNMSUB213SD_RD_SAE_Z: Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213SD.RD_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMSUB213SD.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB213SD_RD_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFNMSUB213SD_RD_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFNMSUB213SD_RN_SAE: Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213SD.RN_SAE xmm xmm k xmm
|
|
// VFNMSUB213SD.RN_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMSUB213SD.RN_SAE instruction to the active function.
|
|
func (c *Context) VFNMSUB213SD_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB213SD_RN_SAE(ops...))
|
|
}
|
|
|
|
// VFNMSUB213SD_RN_SAE: Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213SD.RN_SAE xmm xmm k xmm
|
|
// VFNMSUB213SD.RN_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMSUB213SD.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB213SD_RN_SAE(ops ...operand.Op) { ctx.VFNMSUB213SD_RN_SAE(ops...) }
|
|
|
|
// VFNMSUB213SD_RN_SAE_Z: Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213SD.RN_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMSUB213SD.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMSUB213SD_RN_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB213SD_RN_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFNMSUB213SD_RN_SAE_Z: Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213SD.RN_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMSUB213SD.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB213SD_RN_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFNMSUB213SD_RN_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFNMSUB213SD_RU_SAE: Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213SD.RU_SAE xmm xmm k xmm
|
|
// VFNMSUB213SD.RU_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMSUB213SD.RU_SAE instruction to the active function.
|
|
func (c *Context) VFNMSUB213SD_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB213SD_RU_SAE(ops...))
|
|
}
|
|
|
|
// VFNMSUB213SD_RU_SAE: Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213SD.RU_SAE xmm xmm k xmm
|
|
// VFNMSUB213SD.RU_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMSUB213SD.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB213SD_RU_SAE(ops ...operand.Op) { ctx.VFNMSUB213SD_RU_SAE(ops...) }
|
|
|
|
// VFNMSUB213SD_RU_SAE_Z: Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213SD.RU_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMSUB213SD.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMSUB213SD_RU_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB213SD_RU_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFNMSUB213SD_RU_SAE_Z: Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213SD.RU_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMSUB213SD.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB213SD_RU_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFNMSUB213SD_RU_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFNMSUB213SD_RZ_SAE: Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213SD.RZ_SAE xmm xmm k xmm
|
|
// VFNMSUB213SD.RZ_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMSUB213SD.RZ_SAE instruction to the active function.
|
|
func (c *Context) VFNMSUB213SD_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB213SD_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VFNMSUB213SD_RZ_SAE: Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213SD.RZ_SAE xmm xmm k xmm
|
|
// VFNMSUB213SD.RZ_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMSUB213SD.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB213SD_RZ_SAE(ops ...operand.Op) { ctx.VFNMSUB213SD_RZ_SAE(ops...) }
|
|
|
|
// VFNMSUB213SD_RZ_SAE_Z: Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213SD.RZ_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMSUB213SD.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMSUB213SD_RZ_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB213SD_RZ_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFNMSUB213SD_RZ_SAE_Z: Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213SD.RZ_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMSUB213SD.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB213SD_RZ_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFNMSUB213SD_RZ_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFNMSUB213SD_Z: Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213SD.Z m64 xmm k xmm
|
|
// VFNMSUB213SD.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMSUB213SD.Z instruction to the active function.
|
|
func (c *Context) VFNMSUB213SD_Z(mx, x, k, x1 operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB213SD_Z(mx, x, k, x1))
|
|
}
|
|
|
|
// VFNMSUB213SD_Z: Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213SD.Z m64 xmm k xmm
|
|
// VFNMSUB213SD.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMSUB213SD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB213SD_Z(mx, x, k, x1 operand.Op) { ctx.VFNMSUB213SD_Z(mx, x, k, x1) }
|
|
|
|
// VFNMSUB213SS: Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213SS m32 xmm xmm
|
|
// VFNMSUB213SS xmm xmm xmm
|
|
// VFNMSUB213SS m32 xmm k xmm
|
|
// VFNMSUB213SS xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMSUB213SS instruction to the active function.
|
|
func (c *Context) VFNMSUB213SS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB213SS(ops...))
|
|
}
|
|
|
|
// VFNMSUB213SS: Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213SS m32 xmm xmm
|
|
// VFNMSUB213SS xmm xmm xmm
|
|
// VFNMSUB213SS m32 xmm k xmm
|
|
// VFNMSUB213SS xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMSUB213SS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB213SS(ops ...operand.Op) { ctx.VFNMSUB213SS(ops...) }
|
|
|
|
// VFNMSUB213SS_RD_SAE: Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213SS.RD_SAE xmm xmm k xmm
|
|
// VFNMSUB213SS.RD_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMSUB213SS.RD_SAE instruction to the active function.
|
|
func (c *Context) VFNMSUB213SS_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB213SS_RD_SAE(ops...))
|
|
}
|
|
|
|
// VFNMSUB213SS_RD_SAE: Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213SS.RD_SAE xmm xmm k xmm
|
|
// VFNMSUB213SS.RD_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMSUB213SS.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB213SS_RD_SAE(ops ...operand.Op) { ctx.VFNMSUB213SS_RD_SAE(ops...) }
|
|
|
|
// VFNMSUB213SS_RD_SAE_Z: Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213SS.RD_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMSUB213SS.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMSUB213SS_RD_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB213SS_RD_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFNMSUB213SS_RD_SAE_Z: Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213SS.RD_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMSUB213SS.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB213SS_RD_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFNMSUB213SS_RD_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFNMSUB213SS_RN_SAE: Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213SS.RN_SAE xmm xmm k xmm
|
|
// VFNMSUB213SS.RN_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMSUB213SS.RN_SAE instruction to the active function.
|
|
func (c *Context) VFNMSUB213SS_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB213SS_RN_SAE(ops...))
|
|
}
|
|
|
|
// VFNMSUB213SS_RN_SAE: Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213SS.RN_SAE xmm xmm k xmm
|
|
// VFNMSUB213SS.RN_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMSUB213SS.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB213SS_RN_SAE(ops ...operand.Op) { ctx.VFNMSUB213SS_RN_SAE(ops...) }
|
|
|
|
// VFNMSUB213SS_RN_SAE_Z: Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213SS.RN_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMSUB213SS.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMSUB213SS_RN_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB213SS_RN_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFNMSUB213SS_RN_SAE_Z: Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213SS.RN_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMSUB213SS.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB213SS_RN_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFNMSUB213SS_RN_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFNMSUB213SS_RU_SAE: Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213SS.RU_SAE xmm xmm k xmm
|
|
// VFNMSUB213SS.RU_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMSUB213SS.RU_SAE instruction to the active function.
|
|
func (c *Context) VFNMSUB213SS_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB213SS_RU_SAE(ops...))
|
|
}
|
|
|
|
// VFNMSUB213SS_RU_SAE: Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213SS.RU_SAE xmm xmm k xmm
|
|
// VFNMSUB213SS.RU_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMSUB213SS.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB213SS_RU_SAE(ops ...operand.Op) { ctx.VFNMSUB213SS_RU_SAE(ops...) }
|
|
|
|
// VFNMSUB213SS_RU_SAE_Z: Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213SS.RU_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMSUB213SS.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMSUB213SS_RU_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB213SS_RU_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFNMSUB213SS_RU_SAE_Z: Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213SS.RU_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMSUB213SS.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB213SS_RU_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFNMSUB213SS_RU_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFNMSUB213SS_RZ_SAE: Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213SS.RZ_SAE xmm xmm k xmm
|
|
// VFNMSUB213SS.RZ_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMSUB213SS.RZ_SAE instruction to the active function.
|
|
func (c *Context) VFNMSUB213SS_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB213SS_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VFNMSUB213SS_RZ_SAE: Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213SS.RZ_SAE xmm xmm k xmm
|
|
// VFNMSUB213SS.RZ_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMSUB213SS.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB213SS_RZ_SAE(ops ...operand.Op) { ctx.VFNMSUB213SS_RZ_SAE(ops...) }
|
|
|
|
// VFNMSUB213SS_RZ_SAE_Z: Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213SS.RZ_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMSUB213SS.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMSUB213SS_RZ_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB213SS_RZ_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFNMSUB213SS_RZ_SAE_Z: Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213SS.RZ_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMSUB213SS.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB213SS_RZ_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFNMSUB213SS_RZ_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFNMSUB213SS_Z: Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213SS.Z m32 xmm k xmm
|
|
// VFNMSUB213SS.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMSUB213SS.Z instruction to the active function.
|
|
func (c *Context) VFNMSUB213SS_Z(mx, x, k, x1 operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB213SS_Z(mx, x, k, x1))
|
|
}
|
|
|
|
// VFNMSUB213SS_Z: Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB213SS.Z m32 xmm k xmm
|
|
// VFNMSUB213SS.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMSUB213SS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB213SS_Z(mx, x, k, x1 operand.Op) { ctx.VFNMSUB213SS_Z(mx, x, k, x1) }
|
|
|
|
// VFNMSUB231PD: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231PD m128 xmm xmm
|
|
// VFNMSUB231PD m256 ymm ymm
|
|
// VFNMSUB231PD xmm xmm xmm
|
|
// VFNMSUB231PD ymm ymm ymm
|
|
// VFNMSUB231PD m128 xmm k xmm
|
|
// VFNMSUB231PD m256 ymm k ymm
|
|
// VFNMSUB231PD xmm xmm k xmm
|
|
// VFNMSUB231PD ymm ymm k ymm
|
|
// VFNMSUB231PD m512 zmm k zmm
|
|
// VFNMSUB231PD m512 zmm zmm
|
|
// VFNMSUB231PD zmm zmm k zmm
|
|
// VFNMSUB231PD zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMSUB231PD instruction to the active function.
|
|
func (c *Context) VFNMSUB231PD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB231PD(ops...))
|
|
}
|
|
|
|
// VFNMSUB231PD: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231PD m128 xmm xmm
|
|
// VFNMSUB231PD m256 ymm ymm
|
|
// VFNMSUB231PD xmm xmm xmm
|
|
// VFNMSUB231PD ymm ymm ymm
|
|
// VFNMSUB231PD m128 xmm k xmm
|
|
// VFNMSUB231PD m256 ymm k ymm
|
|
// VFNMSUB231PD xmm xmm k xmm
|
|
// VFNMSUB231PD ymm ymm k ymm
|
|
// VFNMSUB231PD m512 zmm k zmm
|
|
// VFNMSUB231PD m512 zmm zmm
|
|
// VFNMSUB231PD zmm zmm k zmm
|
|
// VFNMSUB231PD zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMSUB231PD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB231PD(ops ...operand.Op) { ctx.VFNMSUB231PD(ops...) }
|
|
|
|
// VFNMSUB231PD_BCST: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231PD.BCST m64 xmm k xmm
|
|
// VFNMSUB231PD.BCST m64 xmm xmm
|
|
// VFNMSUB231PD.BCST m64 ymm k ymm
|
|
// VFNMSUB231PD.BCST m64 ymm ymm
|
|
// VFNMSUB231PD.BCST m64 zmm k zmm
|
|
// VFNMSUB231PD.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VFNMSUB231PD.BCST instruction to the active function.
|
|
func (c *Context) VFNMSUB231PD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB231PD_BCST(ops...))
|
|
}
|
|
|
|
// VFNMSUB231PD_BCST: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231PD.BCST m64 xmm k xmm
|
|
// VFNMSUB231PD.BCST m64 xmm xmm
|
|
// VFNMSUB231PD.BCST m64 ymm k ymm
|
|
// VFNMSUB231PD.BCST m64 ymm ymm
|
|
// VFNMSUB231PD.BCST m64 zmm k zmm
|
|
// VFNMSUB231PD.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VFNMSUB231PD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB231PD_BCST(ops ...operand.Op) { ctx.VFNMSUB231PD_BCST(ops...) }
|
|
|
|
// VFNMSUB231PD_BCST_Z: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231PD.BCST.Z m64 xmm k xmm
|
|
// VFNMSUB231PD.BCST.Z m64 ymm k ymm
|
|
// VFNMSUB231PD.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VFNMSUB231PD.BCST.Z instruction to the active function.
|
|
func (c *Context) VFNMSUB231PD_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB231PD_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VFNMSUB231PD_BCST_Z: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231PD.BCST.Z m64 xmm k xmm
|
|
// VFNMSUB231PD.BCST.Z m64 ymm k ymm
|
|
// VFNMSUB231PD.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VFNMSUB231PD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB231PD_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VFNMSUB231PD_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VFNMSUB231PD_RD_SAE: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231PD.RD_SAE zmm zmm k zmm
|
|
// VFNMSUB231PD.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMSUB231PD.RD_SAE instruction to the active function.
|
|
func (c *Context) VFNMSUB231PD_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB231PD_RD_SAE(ops...))
|
|
}
|
|
|
|
// VFNMSUB231PD_RD_SAE: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231PD.RD_SAE zmm zmm k zmm
|
|
// VFNMSUB231PD.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMSUB231PD.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB231PD_RD_SAE(ops ...operand.Op) { ctx.VFNMSUB231PD_RD_SAE(ops...) }
|
|
|
|
// VFNMSUB231PD_RD_SAE_Z: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231PD.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMSUB231PD.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMSUB231PD_RD_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB231PD_RD_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFNMSUB231PD_RD_SAE_Z: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231PD.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMSUB231PD.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB231PD_RD_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFNMSUB231PD_RD_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFNMSUB231PD_RN_SAE: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231PD.RN_SAE zmm zmm k zmm
|
|
// VFNMSUB231PD.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMSUB231PD.RN_SAE instruction to the active function.
|
|
func (c *Context) VFNMSUB231PD_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB231PD_RN_SAE(ops...))
|
|
}
|
|
|
|
// VFNMSUB231PD_RN_SAE: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231PD.RN_SAE zmm zmm k zmm
|
|
// VFNMSUB231PD.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMSUB231PD.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB231PD_RN_SAE(ops ...operand.Op) { ctx.VFNMSUB231PD_RN_SAE(ops...) }
|
|
|
|
// VFNMSUB231PD_RN_SAE_Z: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231PD.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMSUB231PD.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMSUB231PD_RN_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB231PD_RN_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFNMSUB231PD_RN_SAE_Z: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231PD.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMSUB231PD.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB231PD_RN_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFNMSUB231PD_RN_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFNMSUB231PD_RU_SAE: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231PD.RU_SAE zmm zmm k zmm
|
|
// VFNMSUB231PD.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMSUB231PD.RU_SAE instruction to the active function.
|
|
func (c *Context) VFNMSUB231PD_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB231PD_RU_SAE(ops...))
|
|
}
|
|
|
|
// VFNMSUB231PD_RU_SAE: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231PD.RU_SAE zmm zmm k zmm
|
|
// VFNMSUB231PD.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMSUB231PD.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB231PD_RU_SAE(ops ...operand.Op) { ctx.VFNMSUB231PD_RU_SAE(ops...) }
|
|
|
|
// VFNMSUB231PD_RU_SAE_Z: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231PD.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMSUB231PD.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMSUB231PD_RU_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB231PD_RU_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFNMSUB231PD_RU_SAE_Z: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231PD.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMSUB231PD.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB231PD_RU_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFNMSUB231PD_RU_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFNMSUB231PD_RZ_SAE: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231PD.RZ_SAE zmm zmm k zmm
|
|
// VFNMSUB231PD.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMSUB231PD.RZ_SAE instruction to the active function.
|
|
func (c *Context) VFNMSUB231PD_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB231PD_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VFNMSUB231PD_RZ_SAE: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231PD.RZ_SAE zmm zmm k zmm
|
|
// VFNMSUB231PD.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMSUB231PD.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB231PD_RZ_SAE(ops ...operand.Op) { ctx.VFNMSUB231PD_RZ_SAE(ops...) }
|
|
|
|
// VFNMSUB231PD_RZ_SAE_Z: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231PD.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMSUB231PD.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMSUB231PD_RZ_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB231PD_RZ_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFNMSUB231PD_RZ_SAE_Z: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231PD.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMSUB231PD.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB231PD_RZ_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFNMSUB231PD_RZ_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFNMSUB231PD_Z: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231PD.Z m128 xmm k xmm
|
|
// VFNMSUB231PD.Z m256 ymm k ymm
|
|
// VFNMSUB231PD.Z xmm xmm k xmm
|
|
// VFNMSUB231PD.Z ymm ymm k ymm
|
|
// VFNMSUB231PD.Z m512 zmm k zmm
|
|
// VFNMSUB231PD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMSUB231PD.Z instruction to the active function.
|
|
func (c *Context) VFNMSUB231PD_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB231PD_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VFNMSUB231PD_Z: Fused Negative Multiply-Subtract of Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231PD.Z m128 xmm k xmm
|
|
// VFNMSUB231PD.Z m256 ymm k ymm
|
|
// VFNMSUB231PD.Z xmm xmm k xmm
|
|
// VFNMSUB231PD.Z ymm ymm k ymm
|
|
// VFNMSUB231PD.Z m512 zmm k zmm
|
|
// VFNMSUB231PD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMSUB231PD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB231PD_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VFNMSUB231PD_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VFNMSUB231PS: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231PS m128 xmm xmm
|
|
// VFNMSUB231PS m256 ymm ymm
|
|
// VFNMSUB231PS xmm xmm xmm
|
|
// VFNMSUB231PS ymm ymm ymm
|
|
// VFNMSUB231PS m128 xmm k xmm
|
|
// VFNMSUB231PS m256 ymm k ymm
|
|
// VFNMSUB231PS xmm xmm k xmm
|
|
// VFNMSUB231PS ymm ymm k ymm
|
|
// VFNMSUB231PS m512 zmm k zmm
|
|
// VFNMSUB231PS m512 zmm zmm
|
|
// VFNMSUB231PS zmm zmm k zmm
|
|
// VFNMSUB231PS zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMSUB231PS instruction to the active function.
|
|
func (c *Context) VFNMSUB231PS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB231PS(ops...))
|
|
}
|
|
|
|
// VFNMSUB231PS: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231PS m128 xmm xmm
|
|
// VFNMSUB231PS m256 ymm ymm
|
|
// VFNMSUB231PS xmm xmm xmm
|
|
// VFNMSUB231PS ymm ymm ymm
|
|
// VFNMSUB231PS m128 xmm k xmm
|
|
// VFNMSUB231PS m256 ymm k ymm
|
|
// VFNMSUB231PS xmm xmm k xmm
|
|
// VFNMSUB231PS ymm ymm k ymm
|
|
// VFNMSUB231PS m512 zmm k zmm
|
|
// VFNMSUB231PS m512 zmm zmm
|
|
// VFNMSUB231PS zmm zmm k zmm
|
|
// VFNMSUB231PS zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMSUB231PS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB231PS(ops ...operand.Op) { ctx.VFNMSUB231PS(ops...) }
|
|
|
|
// VFNMSUB231PS_BCST: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231PS.BCST m32 xmm k xmm
|
|
// VFNMSUB231PS.BCST m32 xmm xmm
|
|
// VFNMSUB231PS.BCST m32 ymm k ymm
|
|
// VFNMSUB231PS.BCST m32 ymm ymm
|
|
// VFNMSUB231PS.BCST m32 zmm k zmm
|
|
// VFNMSUB231PS.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VFNMSUB231PS.BCST instruction to the active function.
|
|
func (c *Context) VFNMSUB231PS_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB231PS_BCST(ops...))
|
|
}
|
|
|
|
// VFNMSUB231PS_BCST: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231PS.BCST m32 xmm k xmm
|
|
// VFNMSUB231PS.BCST m32 xmm xmm
|
|
// VFNMSUB231PS.BCST m32 ymm k ymm
|
|
// VFNMSUB231PS.BCST m32 ymm ymm
|
|
// VFNMSUB231PS.BCST m32 zmm k zmm
|
|
// VFNMSUB231PS.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VFNMSUB231PS.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB231PS_BCST(ops ...operand.Op) { ctx.VFNMSUB231PS_BCST(ops...) }
|
|
|
|
// VFNMSUB231PS_BCST_Z: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231PS.BCST.Z m32 xmm k xmm
|
|
// VFNMSUB231PS.BCST.Z m32 ymm k ymm
|
|
// VFNMSUB231PS.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VFNMSUB231PS.BCST.Z instruction to the active function.
|
|
func (c *Context) VFNMSUB231PS_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB231PS_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VFNMSUB231PS_BCST_Z: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231PS.BCST.Z m32 xmm k xmm
|
|
// VFNMSUB231PS.BCST.Z m32 ymm k ymm
|
|
// VFNMSUB231PS.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VFNMSUB231PS.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB231PS_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VFNMSUB231PS_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VFNMSUB231PS_RD_SAE: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231PS.RD_SAE zmm zmm k zmm
|
|
// VFNMSUB231PS.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMSUB231PS.RD_SAE instruction to the active function.
|
|
func (c *Context) VFNMSUB231PS_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB231PS_RD_SAE(ops...))
|
|
}
|
|
|
|
// VFNMSUB231PS_RD_SAE: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231PS.RD_SAE zmm zmm k zmm
|
|
// VFNMSUB231PS.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMSUB231PS.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB231PS_RD_SAE(ops ...operand.Op) { ctx.VFNMSUB231PS_RD_SAE(ops...) }
|
|
|
|
// VFNMSUB231PS_RD_SAE_Z: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231PS.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMSUB231PS.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMSUB231PS_RD_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB231PS_RD_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFNMSUB231PS_RD_SAE_Z: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231PS.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMSUB231PS.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB231PS_RD_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFNMSUB231PS_RD_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFNMSUB231PS_RN_SAE: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231PS.RN_SAE zmm zmm k zmm
|
|
// VFNMSUB231PS.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMSUB231PS.RN_SAE instruction to the active function.
|
|
func (c *Context) VFNMSUB231PS_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB231PS_RN_SAE(ops...))
|
|
}
|
|
|
|
// VFNMSUB231PS_RN_SAE: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231PS.RN_SAE zmm zmm k zmm
|
|
// VFNMSUB231PS.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMSUB231PS.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB231PS_RN_SAE(ops ...operand.Op) { ctx.VFNMSUB231PS_RN_SAE(ops...) }
|
|
|
|
// VFNMSUB231PS_RN_SAE_Z: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231PS.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMSUB231PS.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMSUB231PS_RN_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB231PS_RN_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFNMSUB231PS_RN_SAE_Z: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231PS.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMSUB231PS.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB231PS_RN_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFNMSUB231PS_RN_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFNMSUB231PS_RU_SAE: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231PS.RU_SAE zmm zmm k zmm
|
|
// VFNMSUB231PS.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMSUB231PS.RU_SAE instruction to the active function.
|
|
func (c *Context) VFNMSUB231PS_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB231PS_RU_SAE(ops...))
|
|
}
|
|
|
|
// VFNMSUB231PS_RU_SAE: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231PS.RU_SAE zmm zmm k zmm
|
|
// VFNMSUB231PS.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMSUB231PS.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB231PS_RU_SAE(ops ...operand.Op) { ctx.VFNMSUB231PS_RU_SAE(ops...) }
|
|
|
|
// VFNMSUB231PS_RU_SAE_Z: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231PS.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMSUB231PS.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMSUB231PS_RU_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB231PS_RU_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFNMSUB231PS_RU_SAE_Z: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231PS.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMSUB231PS.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB231PS_RU_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFNMSUB231PS_RU_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFNMSUB231PS_RZ_SAE: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231PS.RZ_SAE zmm zmm k zmm
|
|
// VFNMSUB231PS.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMSUB231PS.RZ_SAE instruction to the active function.
|
|
func (c *Context) VFNMSUB231PS_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB231PS_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VFNMSUB231PS_RZ_SAE: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231PS.RZ_SAE zmm zmm k zmm
|
|
// VFNMSUB231PS.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VFNMSUB231PS.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB231PS_RZ_SAE(ops ...operand.Op) { ctx.VFNMSUB231PS_RZ_SAE(ops...) }
|
|
|
|
// VFNMSUB231PS_RZ_SAE_Z: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231PS.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMSUB231PS.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMSUB231PS_RZ_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB231PS_RZ_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VFNMSUB231PS_RZ_SAE_Z: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231PS.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMSUB231PS.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB231PS_RZ_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VFNMSUB231PS_RZ_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VFNMSUB231PS_Z: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231PS.Z m128 xmm k xmm
|
|
// VFNMSUB231PS.Z m256 ymm k ymm
|
|
// VFNMSUB231PS.Z xmm xmm k xmm
|
|
// VFNMSUB231PS.Z ymm ymm k ymm
|
|
// VFNMSUB231PS.Z m512 zmm k zmm
|
|
// VFNMSUB231PS.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMSUB231PS.Z instruction to the active function.
|
|
func (c *Context) VFNMSUB231PS_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB231PS_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VFNMSUB231PS_Z: Fused Negative Multiply-Subtract of Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231PS.Z m128 xmm k xmm
|
|
// VFNMSUB231PS.Z m256 ymm k ymm
|
|
// VFNMSUB231PS.Z xmm xmm k xmm
|
|
// VFNMSUB231PS.Z ymm ymm k ymm
|
|
// VFNMSUB231PS.Z m512 zmm k zmm
|
|
// VFNMSUB231PS.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VFNMSUB231PS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB231PS_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VFNMSUB231PS_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VFNMSUB231SD: Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231SD m64 xmm xmm
|
|
// VFNMSUB231SD xmm xmm xmm
|
|
// VFNMSUB231SD m64 xmm k xmm
|
|
// VFNMSUB231SD xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMSUB231SD instruction to the active function.
|
|
func (c *Context) VFNMSUB231SD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB231SD(ops...))
|
|
}
|
|
|
|
// VFNMSUB231SD: Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231SD m64 xmm xmm
|
|
// VFNMSUB231SD xmm xmm xmm
|
|
// VFNMSUB231SD m64 xmm k xmm
|
|
// VFNMSUB231SD xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMSUB231SD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB231SD(ops ...operand.Op) { ctx.VFNMSUB231SD(ops...) }
|
|
|
|
// VFNMSUB231SD_RD_SAE: Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231SD.RD_SAE xmm xmm k xmm
|
|
// VFNMSUB231SD.RD_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMSUB231SD.RD_SAE instruction to the active function.
|
|
func (c *Context) VFNMSUB231SD_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB231SD_RD_SAE(ops...))
|
|
}
|
|
|
|
// VFNMSUB231SD_RD_SAE: Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231SD.RD_SAE xmm xmm k xmm
|
|
// VFNMSUB231SD.RD_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMSUB231SD.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB231SD_RD_SAE(ops ...operand.Op) { ctx.VFNMSUB231SD_RD_SAE(ops...) }
|
|
|
|
// VFNMSUB231SD_RD_SAE_Z: Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231SD.RD_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMSUB231SD.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMSUB231SD_RD_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB231SD_RD_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFNMSUB231SD_RD_SAE_Z: Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231SD.RD_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMSUB231SD.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB231SD_RD_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFNMSUB231SD_RD_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFNMSUB231SD_RN_SAE: Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231SD.RN_SAE xmm xmm k xmm
|
|
// VFNMSUB231SD.RN_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMSUB231SD.RN_SAE instruction to the active function.
|
|
func (c *Context) VFNMSUB231SD_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB231SD_RN_SAE(ops...))
|
|
}
|
|
|
|
// VFNMSUB231SD_RN_SAE: Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231SD.RN_SAE xmm xmm k xmm
|
|
// VFNMSUB231SD.RN_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMSUB231SD.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB231SD_RN_SAE(ops ...operand.Op) { ctx.VFNMSUB231SD_RN_SAE(ops...) }
|
|
|
|
// VFNMSUB231SD_RN_SAE_Z: Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231SD.RN_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMSUB231SD.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMSUB231SD_RN_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB231SD_RN_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFNMSUB231SD_RN_SAE_Z: Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231SD.RN_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMSUB231SD.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB231SD_RN_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFNMSUB231SD_RN_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFNMSUB231SD_RU_SAE: Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231SD.RU_SAE xmm xmm k xmm
|
|
// VFNMSUB231SD.RU_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMSUB231SD.RU_SAE instruction to the active function.
|
|
func (c *Context) VFNMSUB231SD_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB231SD_RU_SAE(ops...))
|
|
}
|
|
|
|
// VFNMSUB231SD_RU_SAE: Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231SD.RU_SAE xmm xmm k xmm
|
|
// VFNMSUB231SD.RU_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMSUB231SD.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB231SD_RU_SAE(ops ...operand.Op) { ctx.VFNMSUB231SD_RU_SAE(ops...) }
|
|
|
|
// VFNMSUB231SD_RU_SAE_Z: Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231SD.RU_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMSUB231SD.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMSUB231SD_RU_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB231SD_RU_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFNMSUB231SD_RU_SAE_Z: Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231SD.RU_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMSUB231SD.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB231SD_RU_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFNMSUB231SD_RU_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFNMSUB231SD_RZ_SAE: Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231SD.RZ_SAE xmm xmm k xmm
|
|
// VFNMSUB231SD.RZ_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMSUB231SD.RZ_SAE instruction to the active function.
|
|
func (c *Context) VFNMSUB231SD_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB231SD_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VFNMSUB231SD_RZ_SAE: Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231SD.RZ_SAE xmm xmm k xmm
|
|
// VFNMSUB231SD.RZ_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMSUB231SD.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB231SD_RZ_SAE(ops ...operand.Op) { ctx.VFNMSUB231SD_RZ_SAE(ops...) }
|
|
|
|
// VFNMSUB231SD_RZ_SAE_Z: Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231SD.RZ_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMSUB231SD.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMSUB231SD_RZ_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB231SD_RZ_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFNMSUB231SD_RZ_SAE_Z: Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231SD.RZ_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMSUB231SD.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB231SD_RZ_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFNMSUB231SD_RZ_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFNMSUB231SD_Z: Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231SD.Z m64 xmm k xmm
|
|
// VFNMSUB231SD.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMSUB231SD.Z instruction to the active function.
|
|
func (c *Context) VFNMSUB231SD_Z(mx, x, k, x1 operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB231SD_Z(mx, x, k, x1))
|
|
}
|
|
|
|
// VFNMSUB231SD_Z: Fused Negative Multiply-Subtract of Scalar Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231SD.Z m64 xmm k xmm
|
|
// VFNMSUB231SD.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMSUB231SD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB231SD_Z(mx, x, k, x1 operand.Op) { ctx.VFNMSUB231SD_Z(mx, x, k, x1) }
|
|
|
|
// VFNMSUB231SS: Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231SS m32 xmm xmm
|
|
// VFNMSUB231SS xmm xmm xmm
|
|
// VFNMSUB231SS m32 xmm k xmm
|
|
// VFNMSUB231SS xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMSUB231SS instruction to the active function.
|
|
func (c *Context) VFNMSUB231SS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB231SS(ops...))
|
|
}
|
|
|
|
// VFNMSUB231SS: Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231SS m32 xmm xmm
|
|
// VFNMSUB231SS xmm xmm xmm
|
|
// VFNMSUB231SS m32 xmm k xmm
|
|
// VFNMSUB231SS xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMSUB231SS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB231SS(ops ...operand.Op) { ctx.VFNMSUB231SS(ops...) }
|
|
|
|
// VFNMSUB231SS_RD_SAE: Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231SS.RD_SAE xmm xmm k xmm
|
|
// VFNMSUB231SS.RD_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMSUB231SS.RD_SAE instruction to the active function.
|
|
func (c *Context) VFNMSUB231SS_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB231SS_RD_SAE(ops...))
|
|
}
|
|
|
|
// VFNMSUB231SS_RD_SAE: Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231SS.RD_SAE xmm xmm k xmm
|
|
// VFNMSUB231SS.RD_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMSUB231SS.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB231SS_RD_SAE(ops ...operand.Op) { ctx.VFNMSUB231SS_RD_SAE(ops...) }
|
|
|
|
// VFNMSUB231SS_RD_SAE_Z: Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231SS.RD_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMSUB231SS.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMSUB231SS_RD_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB231SS_RD_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFNMSUB231SS_RD_SAE_Z: Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231SS.RD_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMSUB231SS.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB231SS_RD_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFNMSUB231SS_RD_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFNMSUB231SS_RN_SAE: Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231SS.RN_SAE xmm xmm k xmm
|
|
// VFNMSUB231SS.RN_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMSUB231SS.RN_SAE instruction to the active function.
|
|
func (c *Context) VFNMSUB231SS_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB231SS_RN_SAE(ops...))
|
|
}
|
|
|
|
// VFNMSUB231SS_RN_SAE: Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231SS.RN_SAE xmm xmm k xmm
|
|
// VFNMSUB231SS.RN_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMSUB231SS.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB231SS_RN_SAE(ops ...operand.Op) { ctx.VFNMSUB231SS_RN_SAE(ops...) }
|
|
|
|
// VFNMSUB231SS_RN_SAE_Z: Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231SS.RN_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMSUB231SS.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMSUB231SS_RN_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB231SS_RN_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFNMSUB231SS_RN_SAE_Z: Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231SS.RN_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMSUB231SS.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB231SS_RN_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFNMSUB231SS_RN_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFNMSUB231SS_RU_SAE: Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231SS.RU_SAE xmm xmm k xmm
|
|
// VFNMSUB231SS.RU_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMSUB231SS.RU_SAE instruction to the active function.
|
|
func (c *Context) VFNMSUB231SS_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB231SS_RU_SAE(ops...))
|
|
}
|
|
|
|
// VFNMSUB231SS_RU_SAE: Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231SS.RU_SAE xmm xmm k xmm
|
|
// VFNMSUB231SS.RU_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMSUB231SS.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB231SS_RU_SAE(ops ...operand.Op) { ctx.VFNMSUB231SS_RU_SAE(ops...) }
|
|
|
|
// VFNMSUB231SS_RU_SAE_Z: Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231SS.RU_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMSUB231SS.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMSUB231SS_RU_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB231SS_RU_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFNMSUB231SS_RU_SAE_Z: Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231SS.RU_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMSUB231SS.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB231SS_RU_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFNMSUB231SS_RU_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFNMSUB231SS_RZ_SAE: Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231SS.RZ_SAE xmm xmm k xmm
|
|
// VFNMSUB231SS.RZ_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMSUB231SS.RZ_SAE instruction to the active function.
|
|
func (c *Context) VFNMSUB231SS_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB231SS_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VFNMSUB231SS_RZ_SAE: Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231SS.RZ_SAE xmm xmm k xmm
|
|
// VFNMSUB231SS.RZ_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VFNMSUB231SS.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB231SS_RZ_SAE(ops ...operand.Op) { ctx.VFNMSUB231SS_RZ_SAE(ops...) }
|
|
|
|
// VFNMSUB231SS_RZ_SAE_Z: Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231SS.RZ_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMSUB231SS.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VFNMSUB231SS_RZ_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB231SS_RZ_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VFNMSUB231SS_RZ_SAE_Z: Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231SS.RZ_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMSUB231SS.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB231SS_RZ_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VFNMSUB231SS_RZ_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VFNMSUB231SS_Z: Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231SS.Z m32 xmm k xmm
|
|
// VFNMSUB231SS.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMSUB231SS.Z instruction to the active function.
|
|
func (c *Context) VFNMSUB231SS_Z(mx, x, k, x1 operand.Op) {
|
|
c.addinstruction(x86.VFNMSUB231SS_Z(mx, x, k, x1))
|
|
}
|
|
|
|
// VFNMSUB231SS_Z: Fused Negative Multiply-Subtract of Scalar Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFNMSUB231SS.Z m32 xmm k xmm
|
|
// VFNMSUB231SS.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VFNMSUB231SS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFNMSUB231SS_Z(mx, x, k, x1 operand.Op) { ctx.VFNMSUB231SS_Z(mx, x, k, x1) }
|
|
|
|
// VFPCLASSPDX: Test Class of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFPCLASSPDX imm8 m128 k k
|
|
// VFPCLASSPDX imm8 m128 k
|
|
// VFPCLASSPDX imm8 xmm k k
|
|
// VFPCLASSPDX imm8 xmm k
|
|
//
|
|
// Construct and append a VFPCLASSPDX instruction to the active function.
|
|
func (c *Context) VFPCLASSPDX(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFPCLASSPDX(ops...))
|
|
}
|
|
|
|
// VFPCLASSPDX: Test Class of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFPCLASSPDX imm8 m128 k k
|
|
// VFPCLASSPDX imm8 m128 k
|
|
// VFPCLASSPDX imm8 xmm k k
|
|
// VFPCLASSPDX imm8 xmm k
|
|
//
|
|
// Construct and append a VFPCLASSPDX instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFPCLASSPDX(ops ...operand.Op) { ctx.VFPCLASSPDX(ops...) }
|
|
|
|
// VFPCLASSPDX_BCST: Test Class of Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFPCLASSPDX.BCST imm8 m64 k k
|
|
// VFPCLASSPDX.BCST imm8 m64 k
|
|
//
|
|
// Construct and append a VFPCLASSPDX.BCST instruction to the active function.
|
|
func (c *Context) VFPCLASSPDX_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFPCLASSPDX_BCST(ops...))
|
|
}
|
|
|
|
// VFPCLASSPDX_BCST: Test Class of Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFPCLASSPDX.BCST imm8 m64 k k
|
|
// VFPCLASSPDX.BCST imm8 m64 k
|
|
//
|
|
// Construct and append a VFPCLASSPDX.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFPCLASSPDX_BCST(ops ...operand.Op) { ctx.VFPCLASSPDX_BCST(ops...) }
|
|
|
|
// VFPCLASSPDY: Test Class of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFPCLASSPDY imm8 m256 k k
|
|
// VFPCLASSPDY imm8 m256 k
|
|
// VFPCLASSPDY imm8 ymm k k
|
|
// VFPCLASSPDY imm8 ymm k
|
|
//
|
|
// Construct and append a VFPCLASSPDY instruction to the active function.
|
|
func (c *Context) VFPCLASSPDY(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFPCLASSPDY(ops...))
|
|
}
|
|
|
|
// VFPCLASSPDY: Test Class of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFPCLASSPDY imm8 m256 k k
|
|
// VFPCLASSPDY imm8 m256 k
|
|
// VFPCLASSPDY imm8 ymm k k
|
|
// VFPCLASSPDY imm8 ymm k
|
|
//
|
|
// Construct and append a VFPCLASSPDY instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFPCLASSPDY(ops ...operand.Op) { ctx.VFPCLASSPDY(ops...) }
|
|
|
|
// VFPCLASSPDY_BCST: Test Class of Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFPCLASSPDY.BCST imm8 m64 k k
|
|
// VFPCLASSPDY.BCST imm8 m64 k
|
|
//
|
|
// Construct and append a VFPCLASSPDY.BCST instruction to the active function.
|
|
func (c *Context) VFPCLASSPDY_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFPCLASSPDY_BCST(ops...))
|
|
}
|
|
|
|
// VFPCLASSPDY_BCST: Test Class of Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFPCLASSPDY.BCST imm8 m64 k k
|
|
// VFPCLASSPDY.BCST imm8 m64 k
|
|
//
|
|
// Construct and append a VFPCLASSPDY.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFPCLASSPDY_BCST(ops ...operand.Op) { ctx.VFPCLASSPDY_BCST(ops...) }
|
|
|
|
// VFPCLASSPDZ: Test Class of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFPCLASSPDZ imm8 m512 k k
|
|
// VFPCLASSPDZ imm8 m512 k
|
|
// VFPCLASSPDZ imm8 zmm k k
|
|
// VFPCLASSPDZ imm8 zmm k
|
|
//
|
|
// Construct and append a VFPCLASSPDZ instruction to the active function.
|
|
func (c *Context) VFPCLASSPDZ(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFPCLASSPDZ(ops...))
|
|
}
|
|
|
|
// VFPCLASSPDZ: Test Class of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFPCLASSPDZ imm8 m512 k k
|
|
// VFPCLASSPDZ imm8 m512 k
|
|
// VFPCLASSPDZ imm8 zmm k k
|
|
// VFPCLASSPDZ imm8 zmm k
|
|
//
|
|
// Construct and append a VFPCLASSPDZ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFPCLASSPDZ(ops ...operand.Op) { ctx.VFPCLASSPDZ(ops...) }
|
|
|
|
// VFPCLASSPDZ_BCST: Test Class of Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFPCLASSPDZ.BCST imm8 m64 k k
|
|
// VFPCLASSPDZ.BCST imm8 m64 k
|
|
//
|
|
// Construct and append a VFPCLASSPDZ.BCST instruction to the active function.
|
|
func (c *Context) VFPCLASSPDZ_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFPCLASSPDZ_BCST(ops...))
|
|
}
|
|
|
|
// VFPCLASSPDZ_BCST: Test Class of Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFPCLASSPDZ.BCST imm8 m64 k k
|
|
// VFPCLASSPDZ.BCST imm8 m64 k
|
|
//
|
|
// Construct and append a VFPCLASSPDZ.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFPCLASSPDZ_BCST(ops ...operand.Op) { ctx.VFPCLASSPDZ_BCST(ops...) }
|
|
|
|
// VFPCLASSPSX: Test Class of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFPCLASSPSX imm8 m128 k k
|
|
// VFPCLASSPSX imm8 m128 k
|
|
// VFPCLASSPSX imm8 xmm k k
|
|
// VFPCLASSPSX imm8 xmm k
|
|
//
|
|
// Construct and append a VFPCLASSPSX instruction to the active function.
|
|
func (c *Context) VFPCLASSPSX(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFPCLASSPSX(ops...))
|
|
}
|
|
|
|
// VFPCLASSPSX: Test Class of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFPCLASSPSX imm8 m128 k k
|
|
// VFPCLASSPSX imm8 m128 k
|
|
// VFPCLASSPSX imm8 xmm k k
|
|
// VFPCLASSPSX imm8 xmm k
|
|
//
|
|
// Construct and append a VFPCLASSPSX instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFPCLASSPSX(ops ...operand.Op) { ctx.VFPCLASSPSX(ops...) }
|
|
|
|
// VFPCLASSPSX_BCST: Test Class of Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFPCLASSPSX.BCST imm8 m32 k k
|
|
// VFPCLASSPSX.BCST imm8 m32 k
|
|
//
|
|
// Construct and append a VFPCLASSPSX.BCST instruction to the active function.
|
|
func (c *Context) VFPCLASSPSX_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFPCLASSPSX_BCST(ops...))
|
|
}
|
|
|
|
// VFPCLASSPSX_BCST: Test Class of Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFPCLASSPSX.BCST imm8 m32 k k
|
|
// VFPCLASSPSX.BCST imm8 m32 k
|
|
//
|
|
// Construct and append a VFPCLASSPSX.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFPCLASSPSX_BCST(ops ...operand.Op) { ctx.VFPCLASSPSX_BCST(ops...) }
|
|
|
|
// VFPCLASSPSY: Test Class of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFPCLASSPSY imm8 m256 k k
|
|
// VFPCLASSPSY imm8 m256 k
|
|
// VFPCLASSPSY imm8 ymm k k
|
|
// VFPCLASSPSY imm8 ymm k
|
|
//
|
|
// Construct and append a VFPCLASSPSY instruction to the active function.
|
|
func (c *Context) VFPCLASSPSY(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFPCLASSPSY(ops...))
|
|
}
|
|
|
|
// VFPCLASSPSY: Test Class of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFPCLASSPSY imm8 m256 k k
|
|
// VFPCLASSPSY imm8 m256 k
|
|
// VFPCLASSPSY imm8 ymm k k
|
|
// VFPCLASSPSY imm8 ymm k
|
|
//
|
|
// Construct and append a VFPCLASSPSY instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFPCLASSPSY(ops ...operand.Op) { ctx.VFPCLASSPSY(ops...) }
|
|
|
|
// VFPCLASSPSY_BCST: Test Class of Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFPCLASSPSY.BCST imm8 m32 k k
|
|
// VFPCLASSPSY.BCST imm8 m32 k
|
|
//
|
|
// Construct and append a VFPCLASSPSY.BCST instruction to the active function.
|
|
func (c *Context) VFPCLASSPSY_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFPCLASSPSY_BCST(ops...))
|
|
}
|
|
|
|
// VFPCLASSPSY_BCST: Test Class of Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFPCLASSPSY.BCST imm8 m32 k k
|
|
// VFPCLASSPSY.BCST imm8 m32 k
|
|
//
|
|
// Construct and append a VFPCLASSPSY.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFPCLASSPSY_BCST(ops ...operand.Op) { ctx.VFPCLASSPSY_BCST(ops...) }
|
|
|
|
// VFPCLASSPSZ: Test Class of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFPCLASSPSZ imm8 m512 k k
|
|
// VFPCLASSPSZ imm8 m512 k
|
|
// VFPCLASSPSZ imm8 zmm k k
|
|
// VFPCLASSPSZ imm8 zmm k
|
|
//
|
|
// Construct and append a VFPCLASSPSZ instruction to the active function.
|
|
func (c *Context) VFPCLASSPSZ(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFPCLASSPSZ(ops...))
|
|
}
|
|
|
|
// VFPCLASSPSZ: Test Class of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFPCLASSPSZ imm8 m512 k k
|
|
// VFPCLASSPSZ imm8 m512 k
|
|
// VFPCLASSPSZ imm8 zmm k k
|
|
// VFPCLASSPSZ imm8 zmm k
|
|
//
|
|
// Construct and append a VFPCLASSPSZ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFPCLASSPSZ(ops ...operand.Op) { ctx.VFPCLASSPSZ(ops...) }
|
|
|
|
// VFPCLASSPSZ_BCST: Test Class of Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFPCLASSPSZ.BCST imm8 m32 k k
|
|
// VFPCLASSPSZ.BCST imm8 m32 k
|
|
//
|
|
// Construct and append a VFPCLASSPSZ.BCST instruction to the active function.
|
|
func (c *Context) VFPCLASSPSZ_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFPCLASSPSZ_BCST(ops...))
|
|
}
|
|
|
|
// VFPCLASSPSZ_BCST: Test Class of Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFPCLASSPSZ.BCST imm8 m32 k k
|
|
// VFPCLASSPSZ.BCST imm8 m32 k
|
|
//
|
|
// Construct and append a VFPCLASSPSZ.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFPCLASSPSZ_BCST(ops ...operand.Op) { ctx.VFPCLASSPSZ_BCST(ops...) }
|
|
|
|
// VFPCLASSSD: Test Class of Scalar Double-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFPCLASSSD imm8 m64 k k
|
|
// VFPCLASSSD imm8 m64 k
|
|
// VFPCLASSSD imm8 xmm k k
|
|
// VFPCLASSSD imm8 xmm k
|
|
//
|
|
// Construct and append a VFPCLASSSD instruction to the active function.
|
|
func (c *Context) VFPCLASSSD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFPCLASSSD(ops...))
|
|
}
|
|
|
|
// VFPCLASSSD: Test Class of Scalar Double-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFPCLASSSD imm8 m64 k k
|
|
// VFPCLASSSD imm8 m64 k
|
|
// VFPCLASSSD imm8 xmm k k
|
|
// VFPCLASSSD imm8 xmm k
|
|
//
|
|
// Construct and append a VFPCLASSSD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFPCLASSSD(ops ...operand.Op) { ctx.VFPCLASSSD(ops...) }
|
|
|
|
// VFPCLASSSS: Test Class of Scalar Single-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFPCLASSSS imm8 m32 k k
|
|
// VFPCLASSSS imm8 m32 k
|
|
// VFPCLASSSS imm8 xmm k k
|
|
// VFPCLASSSS imm8 xmm k
|
|
//
|
|
// Construct and append a VFPCLASSSS instruction to the active function.
|
|
func (c *Context) VFPCLASSSS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VFPCLASSSS(ops...))
|
|
}
|
|
|
|
// VFPCLASSSS: Test Class of Scalar Single-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VFPCLASSSS imm8 m32 k k
|
|
// VFPCLASSSS imm8 m32 k
|
|
// VFPCLASSSS imm8 xmm k k
|
|
// VFPCLASSSS imm8 xmm k
|
|
//
|
|
// Construct and append a VFPCLASSSS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VFPCLASSSS(ops ...operand.Op) { ctx.VFPCLASSSS(ops...) }
|
|
|
|
// VGATHERDPD: Gather Packed Double-Precision Floating-Point Values Using Signed Doubleword Indices.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGATHERDPD xmm vm32x xmm
|
|
// VGATHERDPD ymm vm32x ymm
|
|
// VGATHERDPD vm32x k xmm
|
|
// VGATHERDPD vm32x k ymm
|
|
// VGATHERDPD vm32y k zmm
|
|
//
|
|
// Construct and append a VGATHERDPD instruction to the active function.
|
|
func (c *Context) VGATHERDPD(vxy, kv, xyz operand.Op) {
|
|
c.addinstruction(x86.VGATHERDPD(vxy, kv, xyz))
|
|
}
|
|
|
|
// VGATHERDPD: Gather Packed Double-Precision Floating-Point Values Using Signed Doubleword Indices.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGATHERDPD xmm vm32x xmm
|
|
// VGATHERDPD ymm vm32x ymm
|
|
// VGATHERDPD vm32x k xmm
|
|
// VGATHERDPD vm32x k ymm
|
|
// VGATHERDPD vm32y k zmm
|
|
//
|
|
// Construct and append a VGATHERDPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VGATHERDPD(vxy, kv, xyz operand.Op) { ctx.VGATHERDPD(vxy, kv, xyz) }
|
|
|
|
// VGATHERDPS: Gather Packed Single-Precision Floating-Point Values Using Signed Doubleword Indices.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGATHERDPS xmm vm32x xmm
|
|
// VGATHERDPS ymm vm32y ymm
|
|
// VGATHERDPS vm32x k xmm
|
|
// VGATHERDPS vm32y k ymm
|
|
// VGATHERDPS vm32z k zmm
|
|
//
|
|
// Construct and append a VGATHERDPS instruction to the active function.
|
|
func (c *Context) VGATHERDPS(vxy, kv, xyz operand.Op) {
|
|
c.addinstruction(x86.VGATHERDPS(vxy, kv, xyz))
|
|
}
|
|
|
|
// VGATHERDPS: Gather Packed Single-Precision Floating-Point Values Using Signed Doubleword Indices.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGATHERDPS xmm vm32x xmm
|
|
// VGATHERDPS ymm vm32y ymm
|
|
// VGATHERDPS vm32x k xmm
|
|
// VGATHERDPS vm32y k ymm
|
|
// VGATHERDPS vm32z k zmm
|
|
//
|
|
// Construct and append a VGATHERDPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VGATHERDPS(vxy, kv, xyz operand.Op) { ctx.VGATHERDPS(vxy, kv, xyz) }
|
|
|
|
// VGATHERQPD: Gather Packed Double-Precision Floating-Point Values Using Signed Quadword Indices.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGATHERQPD xmm vm64x xmm
|
|
// VGATHERQPD ymm vm64y ymm
|
|
// VGATHERQPD vm64x k xmm
|
|
// VGATHERQPD vm64y k ymm
|
|
// VGATHERQPD vm64z k zmm
|
|
//
|
|
// Construct and append a VGATHERQPD instruction to the active function.
|
|
func (c *Context) VGATHERQPD(vxy, kv, xyz operand.Op) {
|
|
c.addinstruction(x86.VGATHERQPD(vxy, kv, xyz))
|
|
}
|
|
|
|
// VGATHERQPD: Gather Packed Double-Precision Floating-Point Values Using Signed Quadword Indices.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGATHERQPD xmm vm64x xmm
|
|
// VGATHERQPD ymm vm64y ymm
|
|
// VGATHERQPD vm64x k xmm
|
|
// VGATHERQPD vm64y k ymm
|
|
// VGATHERQPD vm64z k zmm
|
|
//
|
|
// Construct and append a VGATHERQPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VGATHERQPD(vxy, kv, xyz operand.Op) { ctx.VGATHERQPD(vxy, kv, xyz) }
|
|
|
|
// VGATHERQPS: Gather Packed Single-Precision Floating-Point Values Using Signed Quadword Indices.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGATHERQPS xmm vm64x xmm
|
|
// VGATHERQPS xmm vm64y xmm
|
|
// VGATHERQPS vm64x k xmm
|
|
// VGATHERQPS vm64y k xmm
|
|
// VGATHERQPS vm64z k ymm
|
|
//
|
|
// Construct and append a VGATHERQPS instruction to the active function.
|
|
func (c *Context) VGATHERQPS(vx, kv, xy operand.Op) {
|
|
c.addinstruction(x86.VGATHERQPS(vx, kv, xy))
|
|
}
|
|
|
|
// VGATHERQPS: Gather Packed Single-Precision Floating-Point Values Using Signed Quadword Indices.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGATHERQPS xmm vm64x xmm
|
|
// VGATHERQPS xmm vm64y xmm
|
|
// VGATHERQPS vm64x k xmm
|
|
// VGATHERQPS vm64y k xmm
|
|
// VGATHERQPS vm64z k ymm
|
|
//
|
|
// Construct and append a VGATHERQPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VGATHERQPS(vx, kv, xy operand.Op) { ctx.VGATHERQPS(vx, kv, xy) }
|
|
|
|
// VGETEXPPD: Extract Exponents of Packed Double-Precision Floating-Point Values as Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGETEXPPD m128 k xmm
|
|
// VGETEXPPD m128 xmm
|
|
// VGETEXPPD m256 k ymm
|
|
// VGETEXPPD m256 ymm
|
|
// VGETEXPPD xmm k xmm
|
|
// VGETEXPPD xmm xmm
|
|
// VGETEXPPD ymm k ymm
|
|
// VGETEXPPD ymm ymm
|
|
// VGETEXPPD m512 k zmm
|
|
// VGETEXPPD m512 zmm
|
|
// VGETEXPPD zmm k zmm
|
|
// VGETEXPPD zmm zmm
|
|
//
|
|
// Construct and append a VGETEXPPD instruction to the active function.
|
|
func (c *Context) VGETEXPPD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VGETEXPPD(ops...))
|
|
}
|
|
|
|
// VGETEXPPD: Extract Exponents of Packed Double-Precision Floating-Point Values as Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGETEXPPD m128 k xmm
|
|
// VGETEXPPD m128 xmm
|
|
// VGETEXPPD m256 k ymm
|
|
// VGETEXPPD m256 ymm
|
|
// VGETEXPPD xmm k xmm
|
|
// VGETEXPPD xmm xmm
|
|
// VGETEXPPD ymm k ymm
|
|
// VGETEXPPD ymm ymm
|
|
// VGETEXPPD m512 k zmm
|
|
// VGETEXPPD m512 zmm
|
|
// VGETEXPPD zmm k zmm
|
|
// VGETEXPPD zmm zmm
|
|
//
|
|
// Construct and append a VGETEXPPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VGETEXPPD(ops ...operand.Op) { ctx.VGETEXPPD(ops...) }
|
|
|
|
// VGETEXPPD_BCST: Extract Exponents of Packed Double-Precision Floating-Point Values as Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGETEXPPD.BCST m64 k xmm
|
|
// VGETEXPPD.BCST m64 k ymm
|
|
// VGETEXPPD.BCST m64 xmm
|
|
// VGETEXPPD.BCST m64 ymm
|
|
// VGETEXPPD.BCST m64 k zmm
|
|
// VGETEXPPD.BCST m64 zmm
|
|
//
|
|
// Construct and append a VGETEXPPD.BCST instruction to the active function.
|
|
func (c *Context) VGETEXPPD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VGETEXPPD_BCST(ops...))
|
|
}
|
|
|
|
// VGETEXPPD_BCST: Extract Exponents of Packed Double-Precision Floating-Point Values as Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGETEXPPD.BCST m64 k xmm
|
|
// VGETEXPPD.BCST m64 k ymm
|
|
// VGETEXPPD.BCST m64 xmm
|
|
// VGETEXPPD.BCST m64 ymm
|
|
// VGETEXPPD.BCST m64 k zmm
|
|
// VGETEXPPD.BCST m64 zmm
|
|
//
|
|
// Construct and append a VGETEXPPD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VGETEXPPD_BCST(ops ...operand.Op) { ctx.VGETEXPPD_BCST(ops...) }
|
|
|
|
// VGETEXPPD_BCST_Z: Extract Exponents of Packed Double-Precision Floating-Point Values as Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGETEXPPD.BCST.Z m64 k xmm
|
|
// VGETEXPPD.BCST.Z m64 k ymm
|
|
// VGETEXPPD.BCST.Z m64 k zmm
|
|
//
|
|
// Construct and append a VGETEXPPD.BCST.Z instruction to the active function.
|
|
func (c *Context) VGETEXPPD_BCST_Z(m, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VGETEXPPD_BCST_Z(m, k, xyz))
|
|
}
|
|
|
|
// VGETEXPPD_BCST_Z: Extract Exponents of Packed Double-Precision Floating-Point Values as Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGETEXPPD.BCST.Z m64 k xmm
|
|
// VGETEXPPD.BCST.Z m64 k ymm
|
|
// VGETEXPPD.BCST.Z m64 k zmm
|
|
//
|
|
// Construct and append a VGETEXPPD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VGETEXPPD_BCST_Z(m, k, xyz operand.Op) { ctx.VGETEXPPD_BCST_Z(m, k, xyz) }
|
|
|
|
// VGETEXPPD_SAE: Extract Exponents of Packed Double-Precision Floating-Point Values as Double-Precision Floating-Point Values (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGETEXPPD.SAE zmm k zmm
|
|
// VGETEXPPD.SAE zmm zmm
|
|
//
|
|
// Construct and append a VGETEXPPD.SAE instruction to the active function.
|
|
func (c *Context) VGETEXPPD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VGETEXPPD_SAE(ops...))
|
|
}
|
|
|
|
// VGETEXPPD_SAE: Extract Exponents of Packed Double-Precision Floating-Point Values as Double-Precision Floating-Point Values (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGETEXPPD.SAE zmm k zmm
|
|
// VGETEXPPD.SAE zmm zmm
|
|
//
|
|
// Construct and append a VGETEXPPD.SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VGETEXPPD_SAE(ops ...operand.Op) { ctx.VGETEXPPD_SAE(ops...) }
|
|
|
|
// VGETEXPPD_SAE_Z: Extract Exponents of Packed Double-Precision Floating-Point Values as Double-Precision Floating-Point Values (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGETEXPPD.SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VGETEXPPD.SAE.Z instruction to the active function.
|
|
func (c *Context) VGETEXPPD_SAE_Z(z, k, z1 operand.Op) {
|
|
c.addinstruction(x86.VGETEXPPD_SAE_Z(z, k, z1))
|
|
}
|
|
|
|
// VGETEXPPD_SAE_Z: Extract Exponents of Packed Double-Precision Floating-Point Values as Double-Precision Floating-Point Values (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGETEXPPD.SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VGETEXPPD.SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VGETEXPPD_SAE_Z(z, k, z1 operand.Op) { ctx.VGETEXPPD_SAE_Z(z, k, z1) }
|
|
|
|
// VGETEXPPD_Z: Extract Exponents of Packed Double-Precision Floating-Point Values as Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGETEXPPD.Z m128 k xmm
|
|
// VGETEXPPD.Z m256 k ymm
|
|
// VGETEXPPD.Z xmm k xmm
|
|
// VGETEXPPD.Z ymm k ymm
|
|
// VGETEXPPD.Z m512 k zmm
|
|
// VGETEXPPD.Z zmm k zmm
|
|
//
|
|
// Construct and append a VGETEXPPD.Z instruction to the active function.
|
|
func (c *Context) VGETEXPPD_Z(mxyz, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VGETEXPPD_Z(mxyz, k, xyz))
|
|
}
|
|
|
|
// VGETEXPPD_Z: Extract Exponents of Packed Double-Precision Floating-Point Values as Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGETEXPPD.Z m128 k xmm
|
|
// VGETEXPPD.Z m256 k ymm
|
|
// VGETEXPPD.Z xmm k xmm
|
|
// VGETEXPPD.Z ymm k ymm
|
|
// VGETEXPPD.Z m512 k zmm
|
|
// VGETEXPPD.Z zmm k zmm
|
|
//
|
|
// Construct and append a VGETEXPPD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VGETEXPPD_Z(mxyz, k, xyz operand.Op) { ctx.VGETEXPPD_Z(mxyz, k, xyz) }
|
|
|
|
// VGETEXPPS: Extract Exponents of Packed Single-Precision Floating-Point Values as Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGETEXPPS m128 k xmm
|
|
// VGETEXPPS m128 xmm
|
|
// VGETEXPPS m256 k ymm
|
|
// VGETEXPPS m256 ymm
|
|
// VGETEXPPS xmm k xmm
|
|
// VGETEXPPS xmm xmm
|
|
// VGETEXPPS ymm k ymm
|
|
// VGETEXPPS ymm ymm
|
|
// VGETEXPPS m512 k zmm
|
|
// VGETEXPPS m512 zmm
|
|
// VGETEXPPS zmm k zmm
|
|
// VGETEXPPS zmm zmm
|
|
//
|
|
// Construct and append a VGETEXPPS instruction to the active function.
|
|
func (c *Context) VGETEXPPS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VGETEXPPS(ops...))
|
|
}
|
|
|
|
// VGETEXPPS: Extract Exponents of Packed Single-Precision Floating-Point Values as Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGETEXPPS m128 k xmm
|
|
// VGETEXPPS m128 xmm
|
|
// VGETEXPPS m256 k ymm
|
|
// VGETEXPPS m256 ymm
|
|
// VGETEXPPS xmm k xmm
|
|
// VGETEXPPS xmm xmm
|
|
// VGETEXPPS ymm k ymm
|
|
// VGETEXPPS ymm ymm
|
|
// VGETEXPPS m512 k zmm
|
|
// VGETEXPPS m512 zmm
|
|
// VGETEXPPS zmm k zmm
|
|
// VGETEXPPS zmm zmm
|
|
//
|
|
// Construct and append a VGETEXPPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VGETEXPPS(ops ...operand.Op) { ctx.VGETEXPPS(ops...) }
|
|
|
|
// VGETEXPPS_BCST: Extract Exponents of Packed Single-Precision Floating-Point Values as Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGETEXPPS.BCST m32 k xmm
|
|
// VGETEXPPS.BCST m32 k ymm
|
|
// VGETEXPPS.BCST m32 xmm
|
|
// VGETEXPPS.BCST m32 ymm
|
|
// VGETEXPPS.BCST m32 k zmm
|
|
// VGETEXPPS.BCST m32 zmm
|
|
//
|
|
// Construct and append a VGETEXPPS.BCST instruction to the active function.
|
|
func (c *Context) VGETEXPPS_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VGETEXPPS_BCST(ops...))
|
|
}
|
|
|
|
// VGETEXPPS_BCST: Extract Exponents of Packed Single-Precision Floating-Point Values as Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGETEXPPS.BCST m32 k xmm
|
|
// VGETEXPPS.BCST m32 k ymm
|
|
// VGETEXPPS.BCST m32 xmm
|
|
// VGETEXPPS.BCST m32 ymm
|
|
// VGETEXPPS.BCST m32 k zmm
|
|
// VGETEXPPS.BCST m32 zmm
|
|
//
|
|
// Construct and append a VGETEXPPS.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VGETEXPPS_BCST(ops ...operand.Op) { ctx.VGETEXPPS_BCST(ops...) }
|
|
|
|
// VGETEXPPS_BCST_Z: Extract Exponents of Packed Single-Precision Floating-Point Values as Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGETEXPPS.BCST.Z m32 k xmm
|
|
// VGETEXPPS.BCST.Z m32 k ymm
|
|
// VGETEXPPS.BCST.Z m32 k zmm
|
|
//
|
|
// Construct and append a VGETEXPPS.BCST.Z instruction to the active function.
|
|
func (c *Context) VGETEXPPS_BCST_Z(m, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VGETEXPPS_BCST_Z(m, k, xyz))
|
|
}
|
|
|
|
// VGETEXPPS_BCST_Z: Extract Exponents of Packed Single-Precision Floating-Point Values as Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGETEXPPS.BCST.Z m32 k xmm
|
|
// VGETEXPPS.BCST.Z m32 k ymm
|
|
// VGETEXPPS.BCST.Z m32 k zmm
|
|
//
|
|
// Construct and append a VGETEXPPS.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VGETEXPPS_BCST_Z(m, k, xyz operand.Op) { ctx.VGETEXPPS_BCST_Z(m, k, xyz) }
|
|
|
|
// VGETEXPPS_SAE: Extract Exponents of Packed Single-Precision Floating-Point Values as Single-Precision Floating-Point Values (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGETEXPPS.SAE zmm k zmm
|
|
// VGETEXPPS.SAE zmm zmm
|
|
//
|
|
// Construct and append a VGETEXPPS.SAE instruction to the active function.
|
|
func (c *Context) VGETEXPPS_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VGETEXPPS_SAE(ops...))
|
|
}
|
|
|
|
// VGETEXPPS_SAE: Extract Exponents of Packed Single-Precision Floating-Point Values as Single-Precision Floating-Point Values (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGETEXPPS.SAE zmm k zmm
|
|
// VGETEXPPS.SAE zmm zmm
|
|
//
|
|
// Construct and append a VGETEXPPS.SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VGETEXPPS_SAE(ops ...operand.Op) { ctx.VGETEXPPS_SAE(ops...) }
|
|
|
|
// VGETEXPPS_SAE_Z: Extract Exponents of Packed Single-Precision Floating-Point Values as Single-Precision Floating-Point Values (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGETEXPPS.SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VGETEXPPS.SAE.Z instruction to the active function.
|
|
func (c *Context) VGETEXPPS_SAE_Z(z, k, z1 operand.Op) {
|
|
c.addinstruction(x86.VGETEXPPS_SAE_Z(z, k, z1))
|
|
}
|
|
|
|
// VGETEXPPS_SAE_Z: Extract Exponents of Packed Single-Precision Floating-Point Values as Single-Precision Floating-Point Values (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGETEXPPS.SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VGETEXPPS.SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VGETEXPPS_SAE_Z(z, k, z1 operand.Op) { ctx.VGETEXPPS_SAE_Z(z, k, z1) }
|
|
|
|
// VGETEXPPS_Z: Extract Exponents of Packed Single-Precision Floating-Point Values as Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGETEXPPS.Z m128 k xmm
|
|
// VGETEXPPS.Z m256 k ymm
|
|
// VGETEXPPS.Z xmm k xmm
|
|
// VGETEXPPS.Z ymm k ymm
|
|
// VGETEXPPS.Z m512 k zmm
|
|
// VGETEXPPS.Z zmm k zmm
|
|
//
|
|
// Construct and append a VGETEXPPS.Z instruction to the active function.
|
|
func (c *Context) VGETEXPPS_Z(mxyz, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VGETEXPPS_Z(mxyz, k, xyz))
|
|
}
|
|
|
|
// VGETEXPPS_Z: Extract Exponents of Packed Single-Precision Floating-Point Values as Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGETEXPPS.Z m128 k xmm
|
|
// VGETEXPPS.Z m256 k ymm
|
|
// VGETEXPPS.Z xmm k xmm
|
|
// VGETEXPPS.Z ymm k ymm
|
|
// VGETEXPPS.Z m512 k zmm
|
|
// VGETEXPPS.Z zmm k zmm
|
|
//
|
|
// Construct and append a VGETEXPPS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VGETEXPPS_Z(mxyz, k, xyz operand.Op) { ctx.VGETEXPPS_Z(mxyz, k, xyz) }
|
|
|
|
// VGETEXPSD: Extract Exponent of Scalar Double-Precision Floating-Point Value as Double-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGETEXPSD m64 xmm k xmm
|
|
// VGETEXPSD m64 xmm xmm
|
|
// VGETEXPSD xmm xmm k xmm
|
|
// VGETEXPSD xmm xmm xmm
|
|
//
|
|
// Construct and append a VGETEXPSD instruction to the active function.
|
|
func (c *Context) VGETEXPSD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VGETEXPSD(ops...))
|
|
}
|
|
|
|
// VGETEXPSD: Extract Exponent of Scalar Double-Precision Floating-Point Value as Double-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGETEXPSD m64 xmm k xmm
|
|
// VGETEXPSD m64 xmm xmm
|
|
// VGETEXPSD xmm xmm k xmm
|
|
// VGETEXPSD xmm xmm xmm
|
|
//
|
|
// Construct and append a VGETEXPSD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VGETEXPSD(ops ...operand.Op) { ctx.VGETEXPSD(ops...) }
|
|
|
|
// VGETEXPSD_SAE: Extract Exponent of Scalar Double-Precision Floating-Point Value as Double-Precision Floating-Point Value (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGETEXPSD.SAE xmm xmm k xmm
|
|
// VGETEXPSD.SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VGETEXPSD.SAE instruction to the active function.
|
|
func (c *Context) VGETEXPSD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VGETEXPSD_SAE(ops...))
|
|
}
|
|
|
|
// VGETEXPSD_SAE: Extract Exponent of Scalar Double-Precision Floating-Point Value as Double-Precision Floating-Point Value (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGETEXPSD.SAE xmm xmm k xmm
|
|
// VGETEXPSD.SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VGETEXPSD.SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VGETEXPSD_SAE(ops ...operand.Op) { ctx.VGETEXPSD_SAE(ops...) }
|
|
|
|
// VGETEXPSD_SAE_Z: Extract Exponent of Scalar Double-Precision Floating-Point Value as Double-Precision Floating-Point Value (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGETEXPSD.SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VGETEXPSD.SAE.Z instruction to the active function.
|
|
func (c *Context) VGETEXPSD_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VGETEXPSD_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VGETEXPSD_SAE_Z: Extract Exponent of Scalar Double-Precision Floating-Point Value as Double-Precision Floating-Point Value (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGETEXPSD.SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VGETEXPSD.SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VGETEXPSD_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VGETEXPSD_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VGETEXPSD_Z: Extract Exponent of Scalar Double-Precision Floating-Point Value as Double-Precision Floating-Point Value (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGETEXPSD.Z m64 xmm k xmm
|
|
// VGETEXPSD.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VGETEXPSD.Z instruction to the active function.
|
|
func (c *Context) VGETEXPSD_Z(mx, x, k, x1 operand.Op) {
|
|
c.addinstruction(x86.VGETEXPSD_Z(mx, x, k, x1))
|
|
}
|
|
|
|
// VGETEXPSD_Z: Extract Exponent of Scalar Double-Precision Floating-Point Value as Double-Precision Floating-Point Value (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGETEXPSD.Z m64 xmm k xmm
|
|
// VGETEXPSD.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VGETEXPSD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VGETEXPSD_Z(mx, x, k, x1 operand.Op) { ctx.VGETEXPSD_Z(mx, x, k, x1) }
|
|
|
|
// VGETEXPSS: Extract Exponent of Scalar Single-Precision Floating-Point Value as Single-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGETEXPSS m32 xmm k xmm
|
|
// VGETEXPSS m32 xmm xmm
|
|
// VGETEXPSS xmm xmm k xmm
|
|
// VGETEXPSS xmm xmm xmm
|
|
//
|
|
// Construct and append a VGETEXPSS instruction to the active function.
|
|
func (c *Context) VGETEXPSS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VGETEXPSS(ops...))
|
|
}
|
|
|
|
// VGETEXPSS: Extract Exponent of Scalar Single-Precision Floating-Point Value as Single-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGETEXPSS m32 xmm k xmm
|
|
// VGETEXPSS m32 xmm xmm
|
|
// VGETEXPSS xmm xmm k xmm
|
|
// VGETEXPSS xmm xmm xmm
|
|
//
|
|
// Construct and append a VGETEXPSS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VGETEXPSS(ops ...operand.Op) { ctx.VGETEXPSS(ops...) }
|
|
|
|
// VGETEXPSS_SAE: Extract Exponent of Scalar Single-Precision Floating-Point Value as Single-Precision Floating-Point Value (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGETEXPSS.SAE xmm xmm k xmm
|
|
// VGETEXPSS.SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VGETEXPSS.SAE instruction to the active function.
|
|
func (c *Context) VGETEXPSS_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VGETEXPSS_SAE(ops...))
|
|
}
|
|
|
|
// VGETEXPSS_SAE: Extract Exponent of Scalar Single-Precision Floating-Point Value as Single-Precision Floating-Point Value (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGETEXPSS.SAE xmm xmm k xmm
|
|
// VGETEXPSS.SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VGETEXPSS.SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VGETEXPSS_SAE(ops ...operand.Op) { ctx.VGETEXPSS_SAE(ops...) }
|
|
|
|
// VGETEXPSS_SAE_Z: Extract Exponent of Scalar Single-Precision Floating-Point Value as Single-Precision Floating-Point Value (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGETEXPSS.SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VGETEXPSS.SAE.Z instruction to the active function.
|
|
func (c *Context) VGETEXPSS_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VGETEXPSS_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VGETEXPSS_SAE_Z: Extract Exponent of Scalar Single-Precision Floating-Point Value as Single-Precision Floating-Point Value (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGETEXPSS.SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VGETEXPSS.SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VGETEXPSS_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VGETEXPSS_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VGETEXPSS_Z: Extract Exponent of Scalar Single-Precision Floating-Point Value as Single-Precision Floating-Point Value (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGETEXPSS.Z m32 xmm k xmm
|
|
// VGETEXPSS.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VGETEXPSS.Z instruction to the active function.
|
|
func (c *Context) VGETEXPSS_Z(mx, x, k, x1 operand.Op) {
|
|
c.addinstruction(x86.VGETEXPSS_Z(mx, x, k, x1))
|
|
}
|
|
|
|
// VGETEXPSS_Z: Extract Exponent of Scalar Single-Precision Floating-Point Value as Single-Precision Floating-Point Value (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGETEXPSS.Z m32 xmm k xmm
|
|
// VGETEXPSS.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VGETEXPSS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VGETEXPSS_Z(mx, x, k, x1 operand.Op) { ctx.VGETEXPSS_Z(mx, x, k, x1) }
|
|
|
|
// VGETMANTPD: Extract Normalized Mantissas from Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGETMANTPD imm8 m128 k xmm
|
|
// VGETMANTPD imm8 m128 xmm
|
|
// VGETMANTPD imm8 m256 k ymm
|
|
// VGETMANTPD imm8 m256 ymm
|
|
// VGETMANTPD imm8 xmm k xmm
|
|
// VGETMANTPD imm8 xmm xmm
|
|
// VGETMANTPD imm8 ymm k ymm
|
|
// VGETMANTPD imm8 ymm ymm
|
|
// VGETMANTPD imm8 m512 k zmm
|
|
// VGETMANTPD imm8 m512 zmm
|
|
// VGETMANTPD imm8 zmm k zmm
|
|
// VGETMANTPD imm8 zmm zmm
|
|
//
|
|
// Construct and append a VGETMANTPD instruction to the active function.
|
|
func (c *Context) VGETMANTPD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VGETMANTPD(ops...))
|
|
}
|
|
|
|
// VGETMANTPD: Extract Normalized Mantissas from Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGETMANTPD imm8 m128 k xmm
|
|
// VGETMANTPD imm8 m128 xmm
|
|
// VGETMANTPD imm8 m256 k ymm
|
|
// VGETMANTPD imm8 m256 ymm
|
|
// VGETMANTPD imm8 xmm k xmm
|
|
// VGETMANTPD imm8 xmm xmm
|
|
// VGETMANTPD imm8 ymm k ymm
|
|
// VGETMANTPD imm8 ymm ymm
|
|
// VGETMANTPD imm8 m512 k zmm
|
|
// VGETMANTPD imm8 m512 zmm
|
|
// VGETMANTPD imm8 zmm k zmm
|
|
// VGETMANTPD imm8 zmm zmm
|
|
//
|
|
// Construct and append a VGETMANTPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VGETMANTPD(ops ...operand.Op) { ctx.VGETMANTPD(ops...) }
|
|
|
|
// VGETMANTPD_BCST: Extract Normalized Mantissas from Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGETMANTPD.BCST imm8 m64 k xmm
|
|
// VGETMANTPD.BCST imm8 m64 k ymm
|
|
// VGETMANTPD.BCST imm8 m64 xmm
|
|
// VGETMANTPD.BCST imm8 m64 ymm
|
|
// VGETMANTPD.BCST imm8 m64 k zmm
|
|
// VGETMANTPD.BCST imm8 m64 zmm
|
|
//
|
|
// Construct and append a VGETMANTPD.BCST instruction to the active function.
|
|
func (c *Context) VGETMANTPD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VGETMANTPD_BCST(ops...))
|
|
}
|
|
|
|
// VGETMANTPD_BCST: Extract Normalized Mantissas from Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGETMANTPD.BCST imm8 m64 k xmm
|
|
// VGETMANTPD.BCST imm8 m64 k ymm
|
|
// VGETMANTPD.BCST imm8 m64 xmm
|
|
// VGETMANTPD.BCST imm8 m64 ymm
|
|
// VGETMANTPD.BCST imm8 m64 k zmm
|
|
// VGETMANTPD.BCST imm8 m64 zmm
|
|
//
|
|
// Construct and append a VGETMANTPD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VGETMANTPD_BCST(ops ...operand.Op) { ctx.VGETMANTPD_BCST(ops...) }
|
|
|
|
// VGETMANTPD_BCST_Z: Extract Normalized Mantissas from Packed Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGETMANTPD.BCST.Z imm8 m64 k xmm
|
|
// VGETMANTPD.BCST.Z imm8 m64 k ymm
|
|
// VGETMANTPD.BCST.Z imm8 m64 k zmm
|
|
//
|
|
// Construct and append a VGETMANTPD.BCST.Z instruction to the active function.
|
|
func (c *Context) VGETMANTPD_BCST_Z(i, m, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VGETMANTPD_BCST_Z(i, m, k, xyz))
|
|
}
|
|
|
|
// VGETMANTPD_BCST_Z: Extract Normalized Mantissas from Packed Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGETMANTPD.BCST.Z imm8 m64 k xmm
|
|
// VGETMANTPD.BCST.Z imm8 m64 k ymm
|
|
// VGETMANTPD.BCST.Z imm8 m64 k zmm
|
|
//
|
|
// Construct and append a VGETMANTPD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VGETMANTPD_BCST_Z(i, m, k, xyz operand.Op) { ctx.VGETMANTPD_BCST_Z(i, m, k, xyz) }
|
|
|
|
// VGETMANTPD_SAE: Extract Normalized Mantissas from Packed Double-Precision Floating-Point Values (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGETMANTPD.SAE imm8 zmm k zmm
|
|
// VGETMANTPD.SAE imm8 zmm zmm
|
|
//
|
|
// Construct and append a VGETMANTPD.SAE instruction to the active function.
|
|
func (c *Context) VGETMANTPD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VGETMANTPD_SAE(ops...))
|
|
}
|
|
|
|
// VGETMANTPD_SAE: Extract Normalized Mantissas from Packed Double-Precision Floating-Point Values (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGETMANTPD.SAE imm8 zmm k zmm
|
|
// VGETMANTPD.SAE imm8 zmm zmm
|
|
//
|
|
// Construct and append a VGETMANTPD.SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VGETMANTPD_SAE(ops ...operand.Op) { ctx.VGETMANTPD_SAE(ops...) }
|
|
|
|
// VGETMANTPD_SAE_Z: Extract Normalized Mantissas from Packed Double-Precision Floating-Point Values (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGETMANTPD.SAE.Z imm8 zmm k zmm
|
|
//
|
|
// Construct and append a VGETMANTPD.SAE.Z instruction to the active function.
|
|
func (c *Context) VGETMANTPD_SAE_Z(i, z, k, z1 operand.Op) {
|
|
c.addinstruction(x86.VGETMANTPD_SAE_Z(i, z, k, z1))
|
|
}
|
|
|
|
// VGETMANTPD_SAE_Z: Extract Normalized Mantissas from Packed Double-Precision Floating-Point Values (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGETMANTPD.SAE.Z imm8 zmm k zmm
|
|
//
|
|
// Construct and append a VGETMANTPD.SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VGETMANTPD_SAE_Z(i, z, k, z1 operand.Op) { ctx.VGETMANTPD_SAE_Z(i, z, k, z1) }
|
|
|
|
// VGETMANTPD_Z: Extract Normalized Mantissas from Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGETMANTPD.Z imm8 m128 k xmm
|
|
// VGETMANTPD.Z imm8 m256 k ymm
|
|
// VGETMANTPD.Z imm8 xmm k xmm
|
|
// VGETMANTPD.Z imm8 ymm k ymm
|
|
// VGETMANTPD.Z imm8 m512 k zmm
|
|
// VGETMANTPD.Z imm8 zmm k zmm
|
|
//
|
|
// Construct and append a VGETMANTPD.Z instruction to the active function.
|
|
func (c *Context) VGETMANTPD_Z(i, mxyz, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VGETMANTPD_Z(i, mxyz, k, xyz))
|
|
}
|
|
|
|
// VGETMANTPD_Z: Extract Normalized Mantissas from Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGETMANTPD.Z imm8 m128 k xmm
|
|
// VGETMANTPD.Z imm8 m256 k ymm
|
|
// VGETMANTPD.Z imm8 xmm k xmm
|
|
// VGETMANTPD.Z imm8 ymm k ymm
|
|
// VGETMANTPD.Z imm8 m512 k zmm
|
|
// VGETMANTPD.Z imm8 zmm k zmm
|
|
//
|
|
// Construct and append a VGETMANTPD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VGETMANTPD_Z(i, mxyz, k, xyz operand.Op) { ctx.VGETMANTPD_Z(i, mxyz, k, xyz) }
|
|
|
|
// VGETMANTPS: Extract Normalized Mantissas from Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGETMANTPS imm8 m128 k xmm
|
|
// VGETMANTPS imm8 m128 xmm
|
|
// VGETMANTPS imm8 m256 k ymm
|
|
// VGETMANTPS imm8 m256 ymm
|
|
// VGETMANTPS imm8 xmm k xmm
|
|
// VGETMANTPS imm8 xmm xmm
|
|
// VGETMANTPS imm8 ymm k ymm
|
|
// VGETMANTPS imm8 ymm ymm
|
|
// VGETMANTPS imm8 m512 k zmm
|
|
// VGETMANTPS imm8 m512 zmm
|
|
// VGETMANTPS imm8 zmm k zmm
|
|
// VGETMANTPS imm8 zmm zmm
|
|
//
|
|
// Construct and append a VGETMANTPS instruction to the active function.
|
|
func (c *Context) VGETMANTPS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VGETMANTPS(ops...))
|
|
}
|
|
|
|
// VGETMANTPS: Extract Normalized Mantissas from Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGETMANTPS imm8 m128 k xmm
|
|
// VGETMANTPS imm8 m128 xmm
|
|
// VGETMANTPS imm8 m256 k ymm
|
|
// VGETMANTPS imm8 m256 ymm
|
|
// VGETMANTPS imm8 xmm k xmm
|
|
// VGETMANTPS imm8 xmm xmm
|
|
// VGETMANTPS imm8 ymm k ymm
|
|
// VGETMANTPS imm8 ymm ymm
|
|
// VGETMANTPS imm8 m512 k zmm
|
|
// VGETMANTPS imm8 m512 zmm
|
|
// VGETMANTPS imm8 zmm k zmm
|
|
// VGETMANTPS imm8 zmm zmm
|
|
//
|
|
// Construct and append a VGETMANTPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VGETMANTPS(ops ...operand.Op) { ctx.VGETMANTPS(ops...) }
|
|
|
|
// VGETMANTPS_BCST: Extract Normalized Mantissas from Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGETMANTPS.BCST imm8 m32 k xmm
|
|
// VGETMANTPS.BCST imm8 m32 k ymm
|
|
// VGETMANTPS.BCST imm8 m32 xmm
|
|
// VGETMANTPS.BCST imm8 m32 ymm
|
|
// VGETMANTPS.BCST imm8 m32 k zmm
|
|
// VGETMANTPS.BCST imm8 m32 zmm
|
|
//
|
|
// Construct and append a VGETMANTPS.BCST instruction to the active function.
|
|
func (c *Context) VGETMANTPS_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VGETMANTPS_BCST(ops...))
|
|
}
|
|
|
|
// VGETMANTPS_BCST: Extract Normalized Mantissas from Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGETMANTPS.BCST imm8 m32 k xmm
|
|
// VGETMANTPS.BCST imm8 m32 k ymm
|
|
// VGETMANTPS.BCST imm8 m32 xmm
|
|
// VGETMANTPS.BCST imm8 m32 ymm
|
|
// VGETMANTPS.BCST imm8 m32 k zmm
|
|
// VGETMANTPS.BCST imm8 m32 zmm
|
|
//
|
|
// Construct and append a VGETMANTPS.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VGETMANTPS_BCST(ops ...operand.Op) { ctx.VGETMANTPS_BCST(ops...) }
|
|
|
|
// VGETMANTPS_BCST_Z: Extract Normalized Mantissas from Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGETMANTPS.BCST.Z imm8 m32 k xmm
|
|
// VGETMANTPS.BCST.Z imm8 m32 k ymm
|
|
// VGETMANTPS.BCST.Z imm8 m32 k zmm
|
|
//
|
|
// Construct and append a VGETMANTPS.BCST.Z instruction to the active function.
|
|
func (c *Context) VGETMANTPS_BCST_Z(i, m, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VGETMANTPS_BCST_Z(i, m, k, xyz))
|
|
}
|
|
|
|
// VGETMANTPS_BCST_Z: Extract Normalized Mantissas from Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGETMANTPS.BCST.Z imm8 m32 k xmm
|
|
// VGETMANTPS.BCST.Z imm8 m32 k ymm
|
|
// VGETMANTPS.BCST.Z imm8 m32 k zmm
|
|
//
|
|
// Construct and append a VGETMANTPS.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VGETMANTPS_BCST_Z(i, m, k, xyz operand.Op) { ctx.VGETMANTPS_BCST_Z(i, m, k, xyz) }
|
|
|
|
// VGETMANTPS_SAE: Extract Normalized Mantissas from Packed Single-Precision Floating-Point Values (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGETMANTPS.SAE imm8 zmm k zmm
|
|
// VGETMANTPS.SAE imm8 zmm zmm
|
|
//
|
|
// Construct and append a VGETMANTPS.SAE instruction to the active function.
|
|
func (c *Context) VGETMANTPS_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VGETMANTPS_SAE(ops...))
|
|
}
|
|
|
|
// VGETMANTPS_SAE: Extract Normalized Mantissas from Packed Single-Precision Floating-Point Values (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGETMANTPS.SAE imm8 zmm k zmm
|
|
// VGETMANTPS.SAE imm8 zmm zmm
|
|
//
|
|
// Construct and append a VGETMANTPS.SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VGETMANTPS_SAE(ops ...operand.Op) { ctx.VGETMANTPS_SAE(ops...) }
|
|
|
|
// VGETMANTPS_SAE_Z: Extract Normalized Mantissas from Packed Single-Precision Floating-Point Values (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGETMANTPS.SAE.Z imm8 zmm k zmm
|
|
//
|
|
// Construct and append a VGETMANTPS.SAE.Z instruction to the active function.
|
|
func (c *Context) VGETMANTPS_SAE_Z(i, z, k, z1 operand.Op) {
|
|
c.addinstruction(x86.VGETMANTPS_SAE_Z(i, z, k, z1))
|
|
}
|
|
|
|
// VGETMANTPS_SAE_Z: Extract Normalized Mantissas from Packed Single-Precision Floating-Point Values (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGETMANTPS.SAE.Z imm8 zmm k zmm
|
|
//
|
|
// Construct and append a VGETMANTPS.SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VGETMANTPS_SAE_Z(i, z, k, z1 operand.Op) { ctx.VGETMANTPS_SAE_Z(i, z, k, z1) }
|
|
|
|
// VGETMANTPS_Z: Extract Normalized Mantissas from Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGETMANTPS.Z imm8 m128 k xmm
|
|
// VGETMANTPS.Z imm8 m256 k ymm
|
|
// VGETMANTPS.Z imm8 xmm k xmm
|
|
// VGETMANTPS.Z imm8 ymm k ymm
|
|
// VGETMANTPS.Z imm8 m512 k zmm
|
|
// VGETMANTPS.Z imm8 zmm k zmm
|
|
//
|
|
// Construct and append a VGETMANTPS.Z instruction to the active function.
|
|
func (c *Context) VGETMANTPS_Z(i, mxyz, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VGETMANTPS_Z(i, mxyz, k, xyz))
|
|
}
|
|
|
|
// VGETMANTPS_Z: Extract Normalized Mantissas from Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGETMANTPS.Z imm8 m128 k xmm
|
|
// VGETMANTPS.Z imm8 m256 k ymm
|
|
// VGETMANTPS.Z imm8 xmm k xmm
|
|
// VGETMANTPS.Z imm8 ymm k ymm
|
|
// VGETMANTPS.Z imm8 m512 k zmm
|
|
// VGETMANTPS.Z imm8 zmm k zmm
|
|
//
|
|
// Construct and append a VGETMANTPS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VGETMANTPS_Z(i, mxyz, k, xyz operand.Op) { ctx.VGETMANTPS_Z(i, mxyz, k, xyz) }
|
|
|
|
// VGETMANTSD: Extract Normalized Mantissa from Scalar Double-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGETMANTSD imm8 m64 xmm k xmm
|
|
// VGETMANTSD imm8 m64 xmm xmm
|
|
// VGETMANTSD imm8 xmm xmm k xmm
|
|
// VGETMANTSD imm8 xmm xmm xmm
|
|
//
|
|
// Construct and append a VGETMANTSD instruction to the active function.
|
|
func (c *Context) VGETMANTSD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VGETMANTSD(ops...))
|
|
}
|
|
|
|
// VGETMANTSD: Extract Normalized Mantissa from Scalar Double-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGETMANTSD imm8 m64 xmm k xmm
|
|
// VGETMANTSD imm8 m64 xmm xmm
|
|
// VGETMANTSD imm8 xmm xmm k xmm
|
|
// VGETMANTSD imm8 xmm xmm xmm
|
|
//
|
|
// Construct and append a VGETMANTSD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VGETMANTSD(ops ...operand.Op) { ctx.VGETMANTSD(ops...) }
|
|
|
|
// VGETMANTSD_SAE: Extract Normalized Mantissa from Scalar Double-Precision Floating-Point Value (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGETMANTSD.SAE imm8 xmm xmm k xmm
|
|
// VGETMANTSD.SAE imm8 xmm xmm xmm
|
|
//
|
|
// Construct and append a VGETMANTSD.SAE instruction to the active function.
|
|
func (c *Context) VGETMANTSD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VGETMANTSD_SAE(ops...))
|
|
}
|
|
|
|
// VGETMANTSD_SAE: Extract Normalized Mantissa from Scalar Double-Precision Floating-Point Value (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGETMANTSD.SAE imm8 xmm xmm k xmm
|
|
// VGETMANTSD.SAE imm8 xmm xmm xmm
|
|
//
|
|
// Construct and append a VGETMANTSD.SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VGETMANTSD_SAE(ops ...operand.Op) { ctx.VGETMANTSD_SAE(ops...) }
|
|
|
|
// VGETMANTSD_SAE_Z: Extract Normalized Mantissa from Scalar Double-Precision Floating-Point Value (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGETMANTSD.SAE.Z imm8 xmm xmm k xmm
|
|
//
|
|
// Construct and append a VGETMANTSD.SAE.Z instruction to the active function.
|
|
func (c *Context) VGETMANTSD_SAE_Z(i, x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VGETMANTSD_SAE_Z(i, x, x1, k, x2))
|
|
}
|
|
|
|
// VGETMANTSD_SAE_Z: Extract Normalized Mantissa from Scalar Double-Precision Floating-Point Value (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGETMANTSD.SAE.Z imm8 xmm xmm k xmm
|
|
//
|
|
// Construct and append a VGETMANTSD.SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VGETMANTSD_SAE_Z(i, x, x1, k, x2 operand.Op) { ctx.VGETMANTSD_SAE_Z(i, x, x1, k, x2) }
|
|
|
|
// VGETMANTSD_Z: Extract Normalized Mantissa from Scalar Double-Precision Floating-Point Value (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGETMANTSD.Z imm8 m64 xmm k xmm
|
|
// VGETMANTSD.Z imm8 xmm xmm k xmm
|
|
//
|
|
// Construct and append a VGETMANTSD.Z instruction to the active function.
|
|
func (c *Context) VGETMANTSD_Z(i, mx, x, k, x1 operand.Op) {
|
|
c.addinstruction(x86.VGETMANTSD_Z(i, mx, x, k, x1))
|
|
}
|
|
|
|
// VGETMANTSD_Z: Extract Normalized Mantissa from Scalar Double-Precision Floating-Point Value (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGETMANTSD.Z imm8 m64 xmm k xmm
|
|
// VGETMANTSD.Z imm8 xmm xmm k xmm
|
|
//
|
|
// Construct and append a VGETMANTSD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VGETMANTSD_Z(i, mx, x, k, x1 operand.Op) { ctx.VGETMANTSD_Z(i, mx, x, k, x1) }
|
|
|
|
// VGETMANTSS: Extract Normalized Mantissa from Scalar Single-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGETMANTSS imm8 m32 xmm k xmm
|
|
// VGETMANTSS imm8 m32 xmm xmm
|
|
// VGETMANTSS imm8 xmm xmm k xmm
|
|
// VGETMANTSS imm8 xmm xmm xmm
|
|
//
|
|
// Construct and append a VGETMANTSS instruction to the active function.
|
|
func (c *Context) VGETMANTSS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VGETMANTSS(ops...))
|
|
}
|
|
|
|
// VGETMANTSS: Extract Normalized Mantissa from Scalar Single-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGETMANTSS imm8 m32 xmm k xmm
|
|
// VGETMANTSS imm8 m32 xmm xmm
|
|
// VGETMANTSS imm8 xmm xmm k xmm
|
|
// VGETMANTSS imm8 xmm xmm xmm
|
|
//
|
|
// Construct and append a VGETMANTSS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VGETMANTSS(ops ...operand.Op) { ctx.VGETMANTSS(ops...) }
|
|
|
|
// VGETMANTSS_SAE: Extract Normalized Mantissa from Scalar Single-Precision Floating-Point Value (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGETMANTSS.SAE imm8 xmm xmm k xmm
|
|
// VGETMANTSS.SAE imm8 xmm xmm xmm
|
|
//
|
|
// Construct and append a VGETMANTSS.SAE instruction to the active function.
|
|
func (c *Context) VGETMANTSS_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VGETMANTSS_SAE(ops...))
|
|
}
|
|
|
|
// VGETMANTSS_SAE: Extract Normalized Mantissa from Scalar Single-Precision Floating-Point Value (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGETMANTSS.SAE imm8 xmm xmm k xmm
|
|
// VGETMANTSS.SAE imm8 xmm xmm xmm
|
|
//
|
|
// Construct and append a VGETMANTSS.SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VGETMANTSS_SAE(ops ...operand.Op) { ctx.VGETMANTSS_SAE(ops...) }
|
|
|
|
// VGETMANTSS_SAE_Z: Extract Normalized Mantissa from Scalar Single-Precision Floating-Point Value (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGETMANTSS.SAE.Z imm8 xmm xmm k xmm
|
|
//
|
|
// Construct and append a VGETMANTSS.SAE.Z instruction to the active function.
|
|
func (c *Context) VGETMANTSS_SAE_Z(i, x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VGETMANTSS_SAE_Z(i, x, x1, k, x2))
|
|
}
|
|
|
|
// VGETMANTSS_SAE_Z: Extract Normalized Mantissa from Scalar Single-Precision Floating-Point Value (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGETMANTSS.SAE.Z imm8 xmm xmm k xmm
|
|
//
|
|
// Construct and append a VGETMANTSS.SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VGETMANTSS_SAE_Z(i, x, x1, k, x2 operand.Op) { ctx.VGETMANTSS_SAE_Z(i, x, x1, k, x2) }
|
|
|
|
// VGETMANTSS_Z: Extract Normalized Mantissa from Scalar Single-Precision Floating-Point Value (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGETMANTSS.Z imm8 m32 xmm k xmm
|
|
// VGETMANTSS.Z imm8 xmm xmm k xmm
|
|
//
|
|
// Construct and append a VGETMANTSS.Z instruction to the active function.
|
|
func (c *Context) VGETMANTSS_Z(i, mx, x, k, x1 operand.Op) {
|
|
c.addinstruction(x86.VGETMANTSS_Z(i, mx, x, k, x1))
|
|
}
|
|
|
|
// VGETMANTSS_Z: Extract Normalized Mantissa from Scalar Single-Precision Floating-Point Value (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VGETMANTSS.Z imm8 m32 xmm k xmm
|
|
// VGETMANTSS.Z imm8 xmm xmm k xmm
|
|
//
|
|
// Construct and append a VGETMANTSS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VGETMANTSS_Z(i, mx, x, k, x1 operand.Op) { ctx.VGETMANTSS_Z(i, mx, x, k, x1) }
|
|
|
|
// VHADDPD: Packed Double-FP Horizontal Add.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VHADDPD m128 xmm xmm
|
|
// VHADDPD m256 ymm ymm
|
|
// VHADDPD xmm xmm xmm
|
|
// VHADDPD ymm ymm ymm
|
|
//
|
|
// Construct and append a VHADDPD instruction to the active function.
|
|
func (c *Context) VHADDPD(mxy, xy, xy1 operand.Op) {
|
|
c.addinstruction(x86.VHADDPD(mxy, xy, xy1))
|
|
}
|
|
|
|
// VHADDPD: Packed Double-FP Horizontal Add.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VHADDPD m128 xmm xmm
|
|
// VHADDPD m256 ymm ymm
|
|
// VHADDPD xmm xmm xmm
|
|
// VHADDPD ymm ymm ymm
|
|
//
|
|
// Construct and append a VHADDPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VHADDPD(mxy, xy, xy1 operand.Op) { ctx.VHADDPD(mxy, xy, xy1) }
|
|
|
|
// VHADDPS: Packed Single-FP Horizontal Add.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VHADDPS m128 xmm xmm
|
|
// VHADDPS m256 ymm ymm
|
|
// VHADDPS xmm xmm xmm
|
|
// VHADDPS ymm ymm ymm
|
|
//
|
|
// Construct and append a VHADDPS instruction to the active function.
|
|
func (c *Context) VHADDPS(mxy, xy, xy1 operand.Op) {
|
|
c.addinstruction(x86.VHADDPS(mxy, xy, xy1))
|
|
}
|
|
|
|
// VHADDPS: Packed Single-FP Horizontal Add.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VHADDPS m128 xmm xmm
|
|
// VHADDPS m256 ymm ymm
|
|
// VHADDPS xmm xmm xmm
|
|
// VHADDPS ymm ymm ymm
|
|
//
|
|
// Construct and append a VHADDPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VHADDPS(mxy, xy, xy1 operand.Op) { ctx.VHADDPS(mxy, xy, xy1) }
|
|
|
|
// VHSUBPD: Packed Double-FP Horizontal Subtract.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VHSUBPD m128 xmm xmm
|
|
// VHSUBPD m256 ymm ymm
|
|
// VHSUBPD xmm xmm xmm
|
|
// VHSUBPD ymm ymm ymm
|
|
//
|
|
// Construct and append a VHSUBPD instruction to the active function.
|
|
func (c *Context) VHSUBPD(mxy, xy, xy1 operand.Op) {
|
|
c.addinstruction(x86.VHSUBPD(mxy, xy, xy1))
|
|
}
|
|
|
|
// VHSUBPD: Packed Double-FP Horizontal Subtract.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VHSUBPD m128 xmm xmm
|
|
// VHSUBPD m256 ymm ymm
|
|
// VHSUBPD xmm xmm xmm
|
|
// VHSUBPD ymm ymm ymm
|
|
//
|
|
// Construct and append a VHSUBPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VHSUBPD(mxy, xy, xy1 operand.Op) { ctx.VHSUBPD(mxy, xy, xy1) }
|
|
|
|
// VHSUBPS: Packed Single-FP Horizontal Subtract.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VHSUBPS m128 xmm xmm
|
|
// VHSUBPS m256 ymm ymm
|
|
// VHSUBPS xmm xmm xmm
|
|
// VHSUBPS ymm ymm ymm
|
|
//
|
|
// Construct and append a VHSUBPS instruction to the active function.
|
|
func (c *Context) VHSUBPS(mxy, xy, xy1 operand.Op) {
|
|
c.addinstruction(x86.VHSUBPS(mxy, xy, xy1))
|
|
}
|
|
|
|
// VHSUBPS: Packed Single-FP Horizontal Subtract.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VHSUBPS m128 xmm xmm
|
|
// VHSUBPS m256 ymm ymm
|
|
// VHSUBPS xmm xmm xmm
|
|
// VHSUBPS ymm ymm ymm
|
|
//
|
|
// Construct and append a VHSUBPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VHSUBPS(mxy, xy, xy1 operand.Op) { ctx.VHSUBPS(mxy, xy, xy1) }
|
|
|
|
// VINSERTF128: Insert Packed Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VINSERTF128 imm8 m128 ymm ymm
|
|
// VINSERTF128 imm8 xmm ymm ymm
|
|
//
|
|
// Construct and append a VINSERTF128 instruction to the active function.
|
|
func (c *Context) VINSERTF128(i, mx, y, y1 operand.Op) {
|
|
c.addinstruction(x86.VINSERTF128(i, mx, y, y1))
|
|
}
|
|
|
|
// VINSERTF128: Insert Packed Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VINSERTF128 imm8 m128 ymm ymm
|
|
// VINSERTF128 imm8 xmm ymm ymm
|
|
//
|
|
// Construct and append a VINSERTF128 instruction to the active function.
|
|
// Operates on the global context.
|
|
func VINSERTF128(i, mx, y, y1 operand.Op) { ctx.VINSERTF128(i, mx, y, y1) }
|
|
|
|
// VINSERTF32X4: Insert 128 Bits of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VINSERTF32X4 imm8 m128 ymm k ymm
|
|
// VINSERTF32X4 imm8 m128 ymm ymm
|
|
// VINSERTF32X4 imm8 xmm ymm k ymm
|
|
// VINSERTF32X4 imm8 xmm ymm ymm
|
|
// VINSERTF32X4 imm8 m128 zmm k zmm
|
|
// VINSERTF32X4 imm8 m128 zmm zmm
|
|
// VINSERTF32X4 imm8 xmm zmm k zmm
|
|
// VINSERTF32X4 imm8 xmm zmm zmm
|
|
//
|
|
// Construct and append a VINSERTF32X4 instruction to the active function.
|
|
func (c *Context) VINSERTF32X4(ops ...operand.Op) {
|
|
c.addinstruction(x86.VINSERTF32X4(ops...))
|
|
}
|
|
|
|
// VINSERTF32X4: Insert 128 Bits of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VINSERTF32X4 imm8 m128 ymm k ymm
|
|
// VINSERTF32X4 imm8 m128 ymm ymm
|
|
// VINSERTF32X4 imm8 xmm ymm k ymm
|
|
// VINSERTF32X4 imm8 xmm ymm ymm
|
|
// VINSERTF32X4 imm8 m128 zmm k zmm
|
|
// VINSERTF32X4 imm8 m128 zmm zmm
|
|
// VINSERTF32X4 imm8 xmm zmm k zmm
|
|
// VINSERTF32X4 imm8 xmm zmm zmm
|
|
//
|
|
// Construct and append a VINSERTF32X4 instruction to the active function.
|
|
// Operates on the global context.
|
|
func VINSERTF32X4(ops ...operand.Op) { ctx.VINSERTF32X4(ops...) }
|
|
|
|
// VINSERTF32X4_Z: Insert 128 Bits of Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VINSERTF32X4.Z imm8 m128 ymm k ymm
|
|
// VINSERTF32X4.Z imm8 xmm ymm k ymm
|
|
// VINSERTF32X4.Z imm8 m128 zmm k zmm
|
|
// VINSERTF32X4.Z imm8 xmm zmm k zmm
|
|
//
|
|
// Construct and append a VINSERTF32X4.Z instruction to the active function.
|
|
func (c *Context) VINSERTF32X4_Z(i, mx, yz, k, yz1 operand.Op) {
|
|
c.addinstruction(x86.VINSERTF32X4_Z(i, mx, yz, k, yz1))
|
|
}
|
|
|
|
// VINSERTF32X4_Z: Insert 128 Bits of Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VINSERTF32X4.Z imm8 m128 ymm k ymm
|
|
// VINSERTF32X4.Z imm8 xmm ymm k ymm
|
|
// VINSERTF32X4.Z imm8 m128 zmm k zmm
|
|
// VINSERTF32X4.Z imm8 xmm zmm k zmm
|
|
//
|
|
// Construct and append a VINSERTF32X4.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VINSERTF32X4_Z(i, mx, yz, k, yz1 operand.Op) { ctx.VINSERTF32X4_Z(i, mx, yz, k, yz1) }
|
|
|
|
// VINSERTF32X8: Insert 256 Bits of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VINSERTF32X8 imm8 m256 zmm k zmm
|
|
// VINSERTF32X8 imm8 m256 zmm zmm
|
|
// VINSERTF32X8 imm8 ymm zmm k zmm
|
|
// VINSERTF32X8 imm8 ymm zmm zmm
|
|
//
|
|
// Construct and append a VINSERTF32X8 instruction to the active function.
|
|
func (c *Context) VINSERTF32X8(ops ...operand.Op) {
|
|
c.addinstruction(x86.VINSERTF32X8(ops...))
|
|
}
|
|
|
|
// VINSERTF32X8: Insert 256 Bits of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VINSERTF32X8 imm8 m256 zmm k zmm
|
|
// VINSERTF32X8 imm8 m256 zmm zmm
|
|
// VINSERTF32X8 imm8 ymm zmm k zmm
|
|
// VINSERTF32X8 imm8 ymm zmm zmm
|
|
//
|
|
// Construct and append a VINSERTF32X8 instruction to the active function.
|
|
// Operates on the global context.
|
|
func VINSERTF32X8(ops ...operand.Op) { ctx.VINSERTF32X8(ops...) }
|
|
|
|
// VINSERTF32X8_Z: Insert 256 Bits of Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VINSERTF32X8.Z imm8 m256 zmm k zmm
|
|
// VINSERTF32X8.Z imm8 ymm zmm k zmm
|
|
//
|
|
// Construct and append a VINSERTF32X8.Z instruction to the active function.
|
|
func (c *Context) VINSERTF32X8_Z(i, my, z, k, z1 operand.Op) {
|
|
c.addinstruction(x86.VINSERTF32X8_Z(i, my, z, k, z1))
|
|
}
|
|
|
|
// VINSERTF32X8_Z: Insert 256 Bits of Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VINSERTF32X8.Z imm8 m256 zmm k zmm
|
|
// VINSERTF32X8.Z imm8 ymm zmm k zmm
|
|
//
|
|
// Construct and append a VINSERTF32X8.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VINSERTF32X8_Z(i, my, z, k, z1 operand.Op) { ctx.VINSERTF32X8_Z(i, my, z, k, z1) }
|
|
|
|
// VINSERTF64X2: Insert 128 Bits of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VINSERTF64X2 imm8 m128 ymm k ymm
|
|
// VINSERTF64X2 imm8 m128 ymm ymm
|
|
// VINSERTF64X2 imm8 xmm ymm k ymm
|
|
// VINSERTF64X2 imm8 xmm ymm ymm
|
|
// VINSERTF64X2 imm8 m128 zmm k zmm
|
|
// VINSERTF64X2 imm8 m128 zmm zmm
|
|
// VINSERTF64X2 imm8 xmm zmm k zmm
|
|
// VINSERTF64X2 imm8 xmm zmm zmm
|
|
//
|
|
// Construct and append a VINSERTF64X2 instruction to the active function.
|
|
func (c *Context) VINSERTF64X2(ops ...operand.Op) {
|
|
c.addinstruction(x86.VINSERTF64X2(ops...))
|
|
}
|
|
|
|
// VINSERTF64X2: Insert 128 Bits of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VINSERTF64X2 imm8 m128 ymm k ymm
|
|
// VINSERTF64X2 imm8 m128 ymm ymm
|
|
// VINSERTF64X2 imm8 xmm ymm k ymm
|
|
// VINSERTF64X2 imm8 xmm ymm ymm
|
|
// VINSERTF64X2 imm8 m128 zmm k zmm
|
|
// VINSERTF64X2 imm8 m128 zmm zmm
|
|
// VINSERTF64X2 imm8 xmm zmm k zmm
|
|
// VINSERTF64X2 imm8 xmm zmm zmm
|
|
//
|
|
// Construct and append a VINSERTF64X2 instruction to the active function.
|
|
// Operates on the global context.
|
|
func VINSERTF64X2(ops ...operand.Op) { ctx.VINSERTF64X2(ops...) }
|
|
|
|
// VINSERTF64X2_Z: Insert 128 Bits of Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VINSERTF64X2.Z imm8 m128 ymm k ymm
|
|
// VINSERTF64X2.Z imm8 xmm ymm k ymm
|
|
// VINSERTF64X2.Z imm8 m128 zmm k zmm
|
|
// VINSERTF64X2.Z imm8 xmm zmm k zmm
|
|
//
|
|
// Construct and append a VINSERTF64X2.Z instruction to the active function.
|
|
func (c *Context) VINSERTF64X2_Z(i, mx, yz, k, yz1 operand.Op) {
|
|
c.addinstruction(x86.VINSERTF64X2_Z(i, mx, yz, k, yz1))
|
|
}
|
|
|
|
// VINSERTF64X2_Z: Insert 128 Bits of Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VINSERTF64X2.Z imm8 m128 ymm k ymm
|
|
// VINSERTF64X2.Z imm8 xmm ymm k ymm
|
|
// VINSERTF64X2.Z imm8 m128 zmm k zmm
|
|
// VINSERTF64X2.Z imm8 xmm zmm k zmm
|
|
//
|
|
// Construct and append a VINSERTF64X2.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VINSERTF64X2_Z(i, mx, yz, k, yz1 operand.Op) { ctx.VINSERTF64X2_Z(i, mx, yz, k, yz1) }
|
|
|
|
// VINSERTF64X4: Insert 256 Bits of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VINSERTF64X4 imm8 m256 zmm k zmm
|
|
// VINSERTF64X4 imm8 m256 zmm zmm
|
|
// VINSERTF64X4 imm8 ymm zmm k zmm
|
|
// VINSERTF64X4 imm8 ymm zmm zmm
|
|
//
|
|
// Construct and append a VINSERTF64X4 instruction to the active function.
|
|
func (c *Context) VINSERTF64X4(ops ...operand.Op) {
|
|
c.addinstruction(x86.VINSERTF64X4(ops...))
|
|
}
|
|
|
|
// VINSERTF64X4: Insert 256 Bits of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VINSERTF64X4 imm8 m256 zmm k zmm
|
|
// VINSERTF64X4 imm8 m256 zmm zmm
|
|
// VINSERTF64X4 imm8 ymm zmm k zmm
|
|
// VINSERTF64X4 imm8 ymm zmm zmm
|
|
//
|
|
// Construct and append a VINSERTF64X4 instruction to the active function.
|
|
// Operates on the global context.
|
|
func VINSERTF64X4(ops ...operand.Op) { ctx.VINSERTF64X4(ops...) }
|
|
|
|
// VINSERTF64X4_Z: Insert 256 Bits of Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VINSERTF64X4.Z imm8 m256 zmm k zmm
|
|
// VINSERTF64X4.Z imm8 ymm zmm k zmm
|
|
//
|
|
// Construct and append a VINSERTF64X4.Z instruction to the active function.
|
|
func (c *Context) VINSERTF64X4_Z(i, my, z, k, z1 operand.Op) {
|
|
c.addinstruction(x86.VINSERTF64X4_Z(i, my, z, k, z1))
|
|
}
|
|
|
|
// VINSERTF64X4_Z: Insert 256 Bits of Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VINSERTF64X4.Z imm8 m256 zmm k zmm
|
|
// VINSERTF64X4.Z imm8 ymm zmm k zmm
|
|
//
|
|
// Construct and append a VINSERTF64X4.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VINSERTF64X4_Z(i, my, z, k, z1 operand.Op) { ctx.VINSERTF64X4_Z(i, my, z, k, z1) }
|
|
|
|
// VINSERTI128: Insert Packed Integer Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VINSERTI128 imm8 m128 ymm ymm
|
|
// VINSERTI128 imm8 xmm ymm ymm
|
|
//
|
|
// Construct and append a VINSERTI128 instruction to the active function.
|
|
func (c *Context) VINSERTI128(i, mx, y, y1 operand.Op) {
|
|
c.addinstruction(x86.VINSERTI128(i, mx, y, y1))
|
|
}
|
|
|
|
// VINSERTI128: Insert Packed Integer Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VINSERTI128 imm8 m128 ymm ymm
|
|
// VINSERTI128 imm8 xmm ymm ymm
|
|
//
|
|
// Construct and append a VINSERTI128 instruction to the active function.
|
|
// Operates on the global context.
|
|
func VINSERTI128(i, mx, y, y1 operand.Op) { ctx.VINSERTI128(i, mx, y, y1) }
|
|
|
|
// VINSERTI32X4: Insert 128 Bits of Packed Doubleword Integer Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VINSERTI32X4 imm8 m128 ymm k ymm
|
|
// VINSERTI32X4 imm8 m128 ymm ymm
|
|
// VINSERTI32X4 imm8 xmm ymm k ymm
|
|
// VINSERTI32X4 imm8 xmm ymm ymm
|
|
// VINSERTI32X4 imm8 m128 zmm k zmm
|
|
// VINSERTI32X4 imm8 m128 zmm zmm
|
|
// VINSERTI32X4 imm8 xmm zmm k zmm
|
|
// VINSERTI32X4 imm8 xmm zmm zmm
|
|
//
|
|
// Construct and append a VINSERTI32X4 instruction to the active function.
|
|
func (c *Context) VINSERTI32X4(ops ...operand.Op) {
|
|
c.addinstruction(x86.VINSERTI32X4(ops...))
|
|
}
|
|
|
|
// VINSERTI32X4: Insert 128 Bits of Packed Doubleword Integer Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VINSERTI32X4 imm8 m128 ymm k ymm
|
|
// VINSERTI32X4 imm8 m128 ymm ymm
|
|
// VINSERTI32X4 imm8 xmm ymm k ymm
|
|
// VINSERTI32X4 imm8 xmm ymm ymm
|
|
// VINSERTI32X4 imm8 m128 zmm k zmm
|
|
// VINSERTI32X4 imm8 m128 zmm zmm
|
|
// VINSERTI32X4 imm8 xmm zmm k zmm
|
|
// VINSERTI32X4 imm8 xmm zmm zmm
|
|
//
|
|
// Construct and append a VINSERTI32X4 instruction to the active function.
|
|
// Operates on the global context.
|
|
func VINSERTI32X4(ops ...operand.Op) { ctx.VINSERTI32X4(ops...) }
|
|
|
|
// VINSERTI32X4_Z: Insert 128 Bits of Packed Doubleword Integer Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VINSERTI32X4.Z imm8 m128 ymm k ymm
|
|
// VINSERTI32X4.Z imm8 xmm ymm k ymm
|
|
// VINSERTI32X4.Z imm8 m128 zmm k zmm
|
|
// VINSERTI32X4.Z imm8 xmm zmm k zmm
|
|
//
|
|
// Construct and append a VINSERTI32X4.Z instruction to the active function.
|
|
func (c *Context) VINSERTI32X4_Z(i, mx, yz, k, yz1 operand.Op) {
|
|
c.addinstruction(x86.VINSERTI32X4_Z(i, mx, yz, k, yz1))
|
|
}
|
|
|
|
// VINSERTI32X4_Z: Insert 128 Bits of Packed Doubleword Integer Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VINSERTI32X4.Z imm8 m128 ymm k ymm
|
|
// VINSERTI32X4.Z imm8 xmm ymm k ymm
|
|
// VINSERTI32X4.Z imm8 m128 zmm k zmm
|
|
// VINSERTI32X4.Z imm8 xmm zmm k zmm
|
|
//
|
|
// Construct and append a VINSERTI32X4.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VINSERTI32X4_Z(i, mx, yz, k, yz1 operand.Op) { ctx.VINSERTI32X4_Z(i, mx, yz, k, yz1) }
|
|
|
|
// VINSERTI32X8: Insert 256 Bits of Packed Doubleword Integer Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VINSERTI32X8 imm8 m256 zmm k zmm
|
|
// VINSERTI32X8 imm8 m256 zmm zmm
|
|
// VINSERTI32X8 imm8 ymm zmm k zmm
|
|
// VINSERTI32X8 imm8 ymm zmm zmm
|
|
//
|
|
// Construct and append a VINSERTI32X8 instruction to the active function.
|
|
func (c *Context) VINSERTI32X8(ops ...operand.Op) {
|
|
c.addinstruction(x86.VINSERTI32X8(ops...))
|
|
}
|
|
|
|
// VINSERTI32X8: Insert 256 Bits of Packed Doubleword Integer Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VINSERTI32X8 imm8 m256 zmm k zmm
|
|
// VINSERTI32X8 imm8 m256 zmm zmm
|
|
// VINSERTI32X8 imm8 ymm zmm k zmm
|
|
// VINSERTI32X8 imm8 ymm zmm zmm
|
|
//
|
|
// Construct and append a VINSERTI32X8 instruction to the active function.
|
|
// Operates on the global context.
|
|
func VINSERTI32X8(ops ...operand.Op) { ctx.VINSERTI32X8(ops...) }
|
|
|
|
// VINSERTI32X8_Z: Insert 256 Bits of Packed Doubleword Integer Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VINSERTI32X8.Z imm8 m256 zmm k zmm
|
|
// VINSERTI32X8.Z imm8 ymm zmm k zmm
|
|
//
|
|
// Construct and append a VINSERTI32X8.Z instruction to the active function.
|
|
func (c *Context) VINSERTI32X8_Z(i, my, z, k, z1 operand.Op) {
|
|
c.addinstruction(x86.VINSERTI32X8_Z(i, my, z, k, z1))
|
|
}
|
|
|
|
// VINSERTI32X8_Z: Insert 256 Bits of Packed Doubleword Integer Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VINSERTI32X8.Z imm8 m256 zmm k zmm
|
|
// VINSERTI32X8.Z imm8 ymm zmm k zmm
|
|
//
|
|
// Construct and append a VINSERTI32X8.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VINSERTI32X8_Z(i, my, z, k, z1 operand.Op) { ctx.VINSERTI32X8_Z(i, my, z, k, z1) }
|
|
|
|
// VINSERTI64X2: Insert 128 Bits of Packed Quadword Integer Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VINSERTI64X2 imm8 m128 ymm k ymm
|
|
// VINSERTI64X2 imm8 m128 ymm ymm
|
|
// VINSERTI64X2 imm8 xmm ymm k ymm
|
|
// VINSERTI64X2 imm8 xmm ymm ymm
|
|
// VINSERTI64X2 imm8 m128 zmm k zmm
|
|
// VINSERTI64X2 imm8 m128 zmm zmm
|
|
// VINSERTI64X2 imm8 xmm zmm k zmm
|
|
// VINSERTI64X2 imm8 xmm zmm zmm
|
|
//
|
|
// Construct and append a VINSERTI64X2 instruction to the active function.
|
|
func (c *Context) VINSERTI64X2(ops ...operand.Op) {
|
|
c.addinstruction(x86.VINSERTI64X2(ops...))
|
|
}
|
|
|
|
// VINSERTI64X2: Insert 128 Bits of Packed Quadword Integer Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VINSERTI64X2 imm8 m128 ymm k ymm
|
|
// VINSERTI64X2 imm8 m128 ymm ymm
|
|
// VINSERTI64X2 imm8 xmm ymm k ymm
|
|
// VINSERTI64X2 imm8 xmm ymm ymm
|
|
// VINSERTI64X2 imm8 m128 zmm k zmm
|
|
// VINSERTI64X2 imm8 m128 zmm zmm
|
|
// VINSERTI64X2 imm8 xmm zmm k zmm
|
|
// VINSERTI64X2 imm8 xmm zmm zmm
|
|
//
|
|
// Construct and append a VINSERTI64X2 instruction to the active function.
|
|
// Operates on the global context.
|
|
func VINSERTI64X2(ops ...operand.Op) { ctx.VINSERTI64X2(ops...) }
|
|
|
|
// VINSERTI64X2_Z: Insert 128 Bits of Packed Quadword Integer Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VINSERTI64X2.Z imm8 m128 ymm k ymm
|
|
// VINSERTI64X2.Z imm8 xmm ymm k ymm
|
|
// VINSERTI64X2.Z imm8 m128 zmm k zmm
|
|
// VINSERTI64X2.Z imm8 xmm zmm k zmm
|
|
//
|
|
// Construct and append a VINSERTI64X2.Z instruction to the active function.
|
|
func (c *Context) VINSERTI64X2_Z(i, mx, yz, k, yz1 operand.Op) {
|
|
c.addinstruction(x86.VINSERTI64X2_Z(i, mx, yz, k, yz1))
|
|
}
|
|
|
|
// VINSERTI64X2_Z: Insert 128 Bits of Packed Quadword Integer Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VINSERTI64X2.Z imm8 m128 ymm k ymm
|
|
// VINSERTI64X2.Z imm8 xmm ymm k ymm
|
|
// VINSERTI64X2.Z imm8 m128 zmm k zmm
|
|
// VINSERTI64X2.Z imm8 xmm zmm k zmm
|
|
//
|
|
// Construct and append a VINSERTI64X2.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VINSERTI64X2_Z(i, mx, yz, k, yz1 operand.Op) { ctx.VINSERTI64X2_Z(i, mx, yz, k, yz1) }
|
|
|
|
// VINSERTI64X4: Insert 256 Bits of Packed Quadword Integer Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VINSERTI64X4 imm8 m256 zmm k zmm
|
|
// VINSERTI64X4 imm8 m256 zmm zmm
|
|
// VINSERTI64X4 imm8 ymm zmm k zmm
|
|
// VINSERTI64X4 imm8 ymm zmm zmm
|
|
//
|
|
// Construct and append a VINSERTI64X4 instruction to the active function.
|
|
func (c *Context) VINSERTI64X4(ops ...operand.Op) {
|
|
c.addinstruction(x86.VINSERTI64X4(ops...))
|
|
}
|
|
|
|
// VINSERTI64X4: Insert 256 Bits of Packed Quadword Integer Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VINSERTI64X4 imm8 m256 zmm k zmm
|
|
// VINSERTI64X4 imm8 m256 zmm zmm
|
|
// VINSERTI64X4 imm8 ymm zmm k zmm
|
|
// VINSERTI64X4 imm8 ymm zmm zmm
|
|
//
|
|
// Construct and append a VINSERTI64X4 instruction to the active function.
|
|
// Operates on the global context.
|
|
func VINSERTI64X4(ops ...operand.Op) { ctx.VINSERTI64X4(ops...) }
|
|
|
|
// VINSERTI64X4_Z: Insert 256 Bits of Packed Quadword Integer Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VINSERTI64X4.Z imm8 m256 zmm k zmm
|
|
// VINSERTI64X4.Z imm8 ymm zmm k zmm
|
|
//
|
|
// Construct and append a VINSERTI64X4.Z instruction to the active function.
|
|
func (c *Context) VINSERTI64X4_Z(i, my, z, k, z1 operand.Op) {
|
|
c.addinstruction(x86.VINSERTI64X4_Z(i, my, z, k, z1))
|
|
}
|
|
|
|
// VINSERTI64X4_Z: Insert 256 Bits of Packed Quadword Integer Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VINSERTI64X4.Z imm8 m256 zmm k zmm
|
|
// VINSERTI64X4.Z imm8 ymm zmm k zmm
|
|
//
|
|
// Construct and append a VINSERTI64X4.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VINSERTI64X4_Z(i, my, z, k, z1 operand.Op) { ctx.VINSERTI64X4_Z(i, my, z, k, z1) }
|
|
|
|
// VINSERTPS: Insert Packed Single Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VINSERTPS imm8 m32 xmm xmm
|
|
// VINSERTPS imm8 xmm xmm xmm
|
|
//
|
|
// Construct and append a VINSERTPS instruction to the active function.
|
|
func (c *Context) VINSERTPS(i, mx, x, x1 operand.Op) {
|
|
c.addinstruction(x86.VINSERTPS(i, mx, x, x1))
|
|
}
|
|
|
|
// VINSERTPS: Insert Packed Single Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VINSERTPS imm8 m32 xmm xmm
|
|
// VINSERTPS imm8 xmm xmm xmm
|
|
//
|
|
// Construct and append a VINSERTPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VINSERTPS(i, mx, x, x1 operand.Op) { ctx.VINSERTPS(i, mx, x, x1) }
|
|
|
|
// VLDDQU: Load Unaligned Integer 128 Bits.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VLDDQU m128 xmm
|
|
// VLDDQU m256 ymm
|
|
//
|
|
// Construct and append a VLDDQU instruction to the active function.
|
|
func (c *Context) VLDDQU(m, xy operand.Op) {
|
|
c.addinstruction(x86.VLDDQU(m, xy))
|
|
}
|
|
|
|
// VLDDQU: Load Unaligned Integer 128 Bits.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VLDDQU m128 xmm
|
|
// VLDDQU m256 ymm
|
|
//
|
|
// Construct and append a VLDDQU instruction to the active function.
|
|
// Operates on the global context.
|
|
func VLDDQU(m, xy operand.Op) { ctx.VLDDQU(m, xy) }
|
|
|
|
// VLDMXCSR: Load MXCSR Register.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VLDMXCSR m32
|
|
//
|
|
// Construct and append a VLDMXCSR instruction to the active function.
|
|
func (c *Context) VLDMXCSR(m operand.Op) {
|
|
c.addinstruction(x86.VLDMXCSR(m))
|
|
}
|
|
|
|
// VLDMXCSR: Load MXCSR Register.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VLDMXCSR m32
|
|
//
|
|
// Construct and append a VLDMXCSR instruction to the active function.
|
|
// Operates on the global context.
|
|
func VLDMXCSR(m operand.Op) { ctx.VLDMXCSR(m) }
|
|
|
|
// VMASKMOVDQU: Store Selected Bytes of Double Quadword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMASKMOVDQU xmm xmm
|
|
//
|
|
// Construct and append a VMASKMOVDQU instruction to the active function.
|
|
func (c *Context) VMASKMOVDQU(x, x1 operand.Op) {
|
|
c.addinstruction(x86.VMASKMOVDQU(x, x1))
|
|
}
|
|
|
|
// VMASKMOVDQU: Store Selected Bytes of Double Quadword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMASKMOVDQU xmm xmm
|
|
//
|
|
// Construct and append a VMASKMOVDQU instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMASKMOVDQU(x, x1 operand.Op) { ctx.VMASKMOVDQU(x, x1) }
|
|
|
|
// VMASKMOVPD: Conditional Move Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMASKMOVPD m128 xmm xmm
|
|
// VMASKMOVPD m256 ymm ymm
|
|
// VMASKMOVPD xmm xmm m128
|
|
// VMASKMOVPD ymm ymm m256
|
|
//
|
|
// Construct and append a VMASKMOVPD instruction to the active function.
|
|
func (c *Context) VMASKMOVPD(mxy, xy, mxy1 operand.Op) {
|
|
c.addinstruction(x86.VMASKMOVPD(mxy, xy, mxy1))
|
|
}
|
|
|
|
// VMASKMOVPD: Conditional Move Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMASKMOVPD m128 xmm xmm
|
|
// VMASKMOVPD m256 ymm ymm
|
|
// VMASKMOVPD xmm xmm m128
|
|
// VMASKMOVPD ymm ymm m256
|
|
//
|
|
// Construct and append a VMASKMOVPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMASKMOVPD(mxy, xy, mxy1 operand.Op) { ctx.VMASKMOVPD(mxy, xy, mxy1) }
|
|
|
|
// VMASKMOVPS: Conditional Move Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMASKMOVPS m128 xmm xmm
|
|
// VMASKMOVPS m256 ymm ymm
|
|
// VMASKMOVPS xmm xmm m128
|
|
// VMASKMOVPS ymm ymm m256
|
|
//
|
|
// Construct and append a VMASKMOVPS instruction to the active function.
|
|
func (c *Context) VMASKMOVPS(mxy, xy, mxy1 operand.Op) {
|
|
c.addinstruction(x86.VMASKMOVPS(mxy, xy, mxy1))
|
|
}
|
|
|
|
// VMASKMOVPS: Conditional Move Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMASKMOVPS m128 xmm xmm
|
|
// VMASKMOVPS m256 ymm ymm
|
|
// VMASKMOVPS xmm xmm m128
|
|
// VMASKMOVPS ymm ymm m256
|
|
//
|
|
// Construct and append a VMASKMOVPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMASKMOVPS(mxy, xy, mxy1 operand.Op) { ctx.VMASKMOVPS(mxy, xy, mxy1) }
|
|
|
|
// VMAXPD: Return Maximum Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMAXPD m128 xmm xmm
|
|
// VMAXPD m256 ymm ymm
|
|
// VMAXPD xmm xmm xmm
|
|
// VMAXPD ymm ymm ymm
|
|
// VMAXPD m128 xmm k xmm
|
|
// VMAXPD m256 ymm k ymm
|
|
// VMAXPD xmm xmm k xmm
|
|
// VMAXPD ymm ymm k ymm
|
|
// VMAXPD m512 zmm k zmm
|
|
// VMAXPD m512 zmm zmm
|
|
// VMAXPD zmm zmm k zmm
|
|
// VMAXPD zmm zmm zmm
|
|
//
|
|
// Construct and append a VMAXPD instruction to the active function.
|
|
func (c *Context) VMAXPD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VMAXPD(ops...))
|
|
}
|
|
|
|
// VMAXPD: Return Maximum Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMAXPD m128 xmm xmm
|
|
// VMAXPD m256 ymm ymm
|
|
// VMAXPD xmm xmm xmm
|
|
// VMAXPD ymm ymm ymm
|
|
// VMAXPD m128 xmm k xmm
|
|
// VMAXPD m256 ymm k ymm
|
|
// VMAXPD xmm xmm k xmm
|
|
// VMAXPD ymm ymm k ymm
|
|
// VMAXPD m512 zmm k zmm
|
|
// VMAXPD m512 zmm zmm
|
|
// VMAXPD zmm zmm k zmm
|
|
// VMAXPD zmm zmm zmm
|
|
//
|
|
// Construct and append a VMAXPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMAXPD(ops ...operand.Op) { ctx.VMAXPD(ops...) }
|
|
|
|
// VMAXPD_BCST: Return Maximum Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMAXPD.BCST m64 xmm k xmm
|
|
// VMAXPD.BCST m64 xmm xmm
|
|
// VMAXPD.BCST m64 ymm k ymm
|
|
// VMAXPD.BCST m64 ymm ymm
|
|
// VMAXPD.BCST m64 zmm k zmm
|
|
// VMAXPD.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VMAXPD.BCST instruction to the active function.
|
|
func (c *Context) VMAXPD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VMAXPD_BCST(ops...))
|
|
}
|
|
|
|
// VMAXPD_BCST: Return Maximum Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMAXPD.BCST m64 xmm k xmm
|
|
// VMAXPD.BCST m64 xmm xmm
|
|
// VMAXPD.BCST m64 ymm k ymm
|
|
// VMAXPD.BCST m64 ymm ymm
|
|
// VMAXPD.BCST m64 zmm k zmm
|
|
// VMAXPD.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VMAXPD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMAXPD_BCST(ops ...operand.Op) { ctx.VMAXPD_BCST(ops...) }
|
|
|
|
// VMAXPD_BCST_Z: Return Maximum Packed Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMAXPD.BCST.Z m64 xmm k xmm
|
|
// VMAXPD.BCST.Z m64 ymm k ymm
|
|
// VMAXPD.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VMAXPD.BCST.Z instruction to the active function.
|
|
func (c *Context) VMAXPD_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VMAXPD_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VMAXPD_BCST_Z: Return Maximum Packed Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMAXPD.BCST.Z m64 xmm k xmm
|
|
// VMAXPD.BCST.Z m64 ymm k ymm
|
|
// VMAXPD.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VMAXPD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMAXPD_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VMAXPD_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VMAXPD_SAE: Return Maximum Packed Double-Precision Floating-Point Values (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMAXPD.SAE zmm zmm k zmm
|
|
// VMAXPD.SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VMAXPD.SAE instruction to the active function.
|
|
func (c *Context) VMAXPD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VMAXPD_SAE(ops...))
|
|
}
|
|
|
|
// VMAXPD_SAE: Return Maximum Packed Double-Precision Floating-Point Values (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMAXPD.SAE zmm zmm k zmm
|
|
// VMAXPD.SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VMAXPD.SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMAXPD_SAE(ops ...operand.Op) { ctx.VMAXPD_SAE(ops...) }
|
|
|
|
// VMAXPD_SAE_Z: Return Maximum Packed Double-Precision Floating-Point Values (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMAXPD.SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VMAXPD.SAE.Z instruction to the active function.
|
|
func (c *Context) VMAXPD_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VMAXPD_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VMAXPD_SAE_Z: Return Maximum Packed Double-Precision Floating-Point Values (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMAXPD.SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VMAXPD.SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMAXPD_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VMAXPD_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VMAXPD_Z: Return Maximum Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMAXPD.Z m128 xmm k xmm
|
|
// VMAXPD.Z m256 ymm k ymm
|
|
// VMAXPD.Z xmm xmm k xmm
|
|
// VMAXPD.Z ymm ymm k ymm
|
|
// VMAXPD.Z m512 zmm k zmm
|
|
// VMAXPD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VMAXPD.Z instruction to the active function.
|
|
func (c *Context) VMAXPD_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VMAXPD_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VMAXPD_Z: Return Maximum Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMAXPD.Z m128 xmm k xmm
|
|
// VMAXPD.Z m256 ymm k ymm
|
|
// VMAXPD.Z xmm xmm k xmm
|
|
// VMAXPD.Z ymm ymm k ymm
|
|
// VMAXPD.Z m512 zmm k zmm
|
|
// VMAXPD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VMAXPD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMAXPD_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VMAXPD_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VMAXPS: Return Maximum Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMAXPS m128 xmm xmm
|
|
// VMAXPS m256 ymm ymm
|
|
// VMAXPS xmm xmm xmm
|
|
// VMAXPS ymm ymm ymm
|
|
// VMAXPS m128 xmm k xmm
|
|
// VMAXPS m256 ymm k ymm
|
|
// VMAXPS xmm xmm k xmm
|
|
// VMAXPS ymm ymm k ymm
|
|
// VMAXPS m512 zmm k zmm
|
|
// VMAXPS m512 zmm zmm
|
|
// VMAXPS zmm zmm k zmm
|
|
// VMAXPS zmm zmm zmm
|
|
//
|
|
// Construct and append a VMAXPS instruction to the active function.
|
|
func (c *Context) VMAXPS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VMAXPS(ops...))
|
|
}
|
|
|
|
// VMAXPS: Return Maximum Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMAXPS m128 xmm xmm
|
|
// VMAXPS m256 ymm ymm
|
|
// VMAXPS xmm xmm xmm
|
|
// VMAXPS ymm ymm ymm
|
|
// VMAXPS m128 xmm k xmm
|
|
// VMAXPS m256 ymm k ymm
|
|
// VMAXPS xmm xmm k xmm
|
|
// VMAXPS ymm ymm k ymm
|
|
// VMAXPS m512 zmm k zmm
|
|
// VMAXPS m512 zmm zmm
|
|
// VMAXPS zmm zmm k zmm
|
|
// VMAXPS zmm zmm zmm
|
|
//
|
|
// Construct and append a VMAXPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMAXPS(ops ...operand.Op) { ctx.VMAXPS(ops...) }
|
|
|
|
// VMAXPS_BCST: Return Maximum Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMAXPS.BCST m32 xmm k xmm
|
|
// VMAXPS.BCST m32 xmm xmm
|
|
// VMAXPS.BCST m32 ymm k ymm
|
|
// VMAXPS.BCST m32 ymm ymm
|
|
// VMAXPS.BCST m32 zmm k zmm
|
|
// VMAXPS.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VMAXPS.BCST instruction to the active function.
|
|
func (c *Context) VMAXPS_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VMAXPS_BCST(ops...))
|
|
}
|
|
|
|
// VMAXPS_BCST: Return Maximum Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMAXPS.BCST m32 xmm k xmm
|
|
// VMAXPS.BCST m32 xmm xmm
|
|
// VMAXPS.BCST m32 ymm k ymm
|
|
// VMAXPS.BCST m32 ymm ymm
|
|
// VMAXPS.BCST m32 zmm k zmm
|
|
// VMAXPS.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VMAXPS.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMAXPS_BCST(ops ...operand.Op) { ctx.VMAXPS_BCST(ops...) }
|
|
|
|
// VMAXPS_BCST_Z: Return Maximum Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMAXPS.BCST.Z m32 xmm k xmm
|
|
// VMAXPS.BCST.Z m32 ymm k ymm
|
|
// VMAXPS.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VMAXPS.BCST.Z instruction to the active function.
|
|
func (c *Context) VMAXPS_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VMAXPS_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VMAXPS_BCST_Z: Return Maximum Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMAXPS.BCST.Z m32 xmm k xmm
|
|
// VMAXPS.BCST.Z m32 ymm k ymm
|
|
// VMAXPS.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VMAXPS.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMAXPS_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VMAXPS_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VMAXPS_SAE: Return Maximum Packed Single-Precision Floating-Point Values (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMAXPS.SAE zmm zmm k zmm
|
|
// VMAXPS.SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VMAXPS.SAE instruction to the active function.
|
|
func (c *Context) VMAXPS_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VMAXPS_SAE(ops...))
|
|
}
|
|
|
|
// VMAXPS_SAE: Return Maximum Packed Single-Precision Floating-Point Values (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMAXPS.SAE zmm zmm k zmm
|
|
// VMAXPS.SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VMAXPS.SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMAXPS_SAE(ops ...operand.Op) { ctx.VMAXPS_SAE(ops...) }
|
|
|
|
// VMAXPS_SAE_Z: Return Maximum Packed Single-Precision Floating-Point Values (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMAXPS.SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VMAXPS.SAE.Z instruction to the active function.
|
|
func (c *Context) VMAXPS_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VMAXPS_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VMAXPS_SAE_Z: Return Maximum Packed Single-Precision Floating-Point Values (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMAXPS.SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VMAXPS.SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMAXPS_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VMAXPS_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VMAXPS_Z: Return Maximum Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMAXPS.Z m128 xmm k xmm
|
|
// VMAXPS.Z m256 ymm k ymm
|
|
// VMAXPS.Z xmm xmm k xmm
|
|
// VMAXPS.Z ymm ymm k ymm
|
|
// VMAXPS.Z m512 zmm k zmm
|
|
// VMAXPS.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VMAXPS.Z instruction to the active function.
|
|
func (c *Context) VMAXPS_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VMAXPS_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VMAXPS_Z: Return Maximum Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMAXPS.Z m128 xmm k xmm
|
|
// VMAXPS.Z m256 ymm k ymm
|
|
// VMAXPS.Z xmm xmm k xmm
|
|
// VMAXPS.Z ymm ymm k ymm
|
|
// VMAXPS.Z m512 zmm k zmm
|
|
// VMAXPS.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VMAXPS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMAXPS_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VMAXPS_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VMAXSD: Return Maximum Scalar Double-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMAXSD m64 xmm xmm
|
|
// VMAXSD xmm xmm xmm
|
|
// VMAXSD m64 xmm k xmm
|
|
// VMAXSD xmm xmm k xmm
|
|
//
|
|
// Construct and append a VMAXSD instruction to the active function.
|
|
func (c *Context) VMAXSD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VMAXSD(ops...))
|
|
}
|
|
|
|
// VMAXSD: Return Maximum Scalar Double-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMAXSD m64 xmm xmm
|
|
// VMAXSD xmm xmm xmm
|
|
// VMAXSD m64 xmm k xmm
|
|
// VMAXSD xmm xmm k xmm
|
|
//
|
|
// Construct and append a VMAXSD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMAXSD(ops ...operand.Op) { ctx.VMAXSD(ops...) }
|
|
|
|
// VMAXSD_SAE: Return Maximum Scalar Double-Precision Floating-Point Value (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMAXSD.SAE xmm xmm k xmm
|
|
// VMAXSD.SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VMAXSD.SAE instruction to the active function.
|
|
func (c *Context) VMAXSD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VMAXSD_SAE(ops...))
|
|
}
|
|
|
|
// VMAXSD_SAE: Return Maximum Scalar Double-Precision Floating-Point Value (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMAXSD.SAE xmm xmm k xmm
|
|
// VMAXSD.SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VMAXSD.SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMAXSD_SAE(ops ...operand.Op) { ctx.VMAXSD_SAE(ops...) }
|
|
|
|
// VMAXSD_SAE_Z: Return Maximum Scalar Double-Precision Floating-Point Value (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMAXSD.SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VMAXSD.SAE.Z instruction to the active function.
|
|
func (c *Context) VMAXSD_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VMAXSD_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VMAXSD_SAE_Z: Return Maximum Scalar Double-Precision Floating-Point Value (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMAXSD.SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VMAXSD.SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMAXSD_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VMAXSD_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VMAXSD_Z: Return Maximum Scalar Double-Precision Floating-Point Value (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMAXSD.Z m64 xmm k xmm
|
|
// VMAXSD.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VMAXSD.Z instruction to the active function.
|
|
func (c *Context) VMAXSD_Z(mx, x, k, x1 operand.Op) {
|
|
c.addinstruction(x86.VMAXSD_Z(mx, x, k, x1))
|
|
}
|
|
|
|
// VMAXSD_Z: Return Maximum Scalar Double-Precision Floating-Point Value (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMAXSD.Z m64 xmm k xmm
|
|
// VMAXSD.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VMAXSD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMAXSD_Z(mx, x, k, x1 operand.Op) { ctx.VMAXSD_Z(mx, x, k, x1) }
|
|
|
|
// VMAXSS: Return Maximum Scalar Single-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMAXSS m32 xmm xmm
|
|
// VMAXSS xmm xmm xmm
|
|
// VMAXSS m32 xmm k xmm
|
|
// VMAXSS xmm xmm k xmm
|
|
//
|
|
// Construct and append a VMAXSS instruction to the active function.
|
|
func (c *Context) VMAXSS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VMAXSS(ops...))
|
|
}
|
|
|
|
// VMAXSS: Return Maximum Scalar Single-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMAXSS m32 xmm xmm
|
|
// VMAXSS xmm xmm xmm
|
|
// VMAXSS m32 xmm k xmm
|
|
// VMAXSS xmm xmm k xmm
|
|
//
|
|
// Construct and append a VMAXSS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMAXSS(ops ...operand.Op) { ctx.VMAXSS(ops...) }
|
|
|
|
// VMAXSS_SAE: Return Maximum Scalar Single-Precision Floating-Point Value (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMAXSS.SAE xmm xmm k xmm
|
|
// VMAXSS.SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VMAXSS.SAE instruction to the active function.
|
|
func (c *Context) VMAXSS_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VMAXSS_SAE(ops...))
|
|
}
|
|
|
|
// VMAXSS_SAE: Return Maximum Scalar Single-Precision Floating-Point Value (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMAXSS.SAE xmm xmm k xmm
|
|
// VMAXSS.SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VMAXSS.SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMAXSS_SAE(ops ...operand.Op) { ctx.VMAXSS_SAE(ops...) }
|
|
|
|
// VMAXSS_SAE_Z: Return Maximum Scalar Single-Precision Floating-Point Value (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMAXSS.SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VMAXSS.SAE.Z instruction to the active function.
|
|
func (c *Context) VMAXSS_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VMAXSS_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VMAXSS_SAE_Z: Return Maximum Scalar Single-Precision Floating-Point Value (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMAXSS.SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VMAXSS.SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMAXSS_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VMAXSS_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VMAXSS_Z: Return Maximum Scalar Single-Precision Floating-Point Value (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMAXSS.Z m32 xmm k xmm
|
|
// VMAXSS.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VMAXSS.Z instruction to the active function.
|
|
func (c *Context) VMAXSS_Z(mx, x, k, x1 operand.Op) {
|
|
c.addinstruction(x86.VMAXSS_Z(mx, x, k, x1))
|
|
}
|
|
|
|
// VMAXSS_Z: Return Maximum Scalar Single-Precision Floating-Point Value (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMAXSS.Z m32 xmm k xmm
|
|
// VMAXSS.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VMAXSS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMAXSS_Z(mx, x, k, x1 operand.Op) { ctx.VMAXSS_Z(mx, x, k, x1) }
|
|
|
|
// VMINPD: Return Minimum Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMINPD m128 xmm xmm
|
|
// VMINPD m256 ymm ymm
|
|
// VMINPD xmm xmm xmm
|
|
// VMINPD ymm ymm ymm
|
|
// VMINPD m128 xmm k xmm
|
|
// VMINPD m256 ymm k ymm
|
|
// VMINPD xmm xmm k xmm
|
|
// VMINPD ymm ymm k ymm
|
|
// VMINPD m512 zmm k zmm
|
|
// VMINPD m512 zmm zmm
|
|
// VMINPD zmm zmm k zmm
|
|
// VMINPD zmm zmm zmm
|
|
//
|
|
// Construct and append a VMINPD instruction to the active function.
|
|
func (c *Context) VMINPD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VMINPD(ops...))
|
|
}
|
|
|
|
// VMINPD: Return Minimum Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMINPD m128 xmm xmm
|
|
// VMINPD m256 ymm ymm
|
|
// VMINPD xmm xmm xmm
|
|
// VMINPD ymm ymm ymm
|
|
// VMINPD m128 xmm k xmm
|
|
// VMINPD m256 ymm k ymm
|
|
// VMINPD xmm xmm k xmm
|
|
// VMINPD ymm ymm k ymm
|
|
// VMINPD m512 zmm k zmm
|
|
// VMINPD m512 zmm zmm
|
|
// VMINPD zmm zmm k zmm
|
|
// VMINPD zmm zmm zmm
|
|
//
|
|
// Construct and append a VMINPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMINPD(ops ...operand.Op) { ctx.VMINPD(ops...) }
|
|
|
|
// VMINPD_BCST: Return Minimum Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMINPD.BCST m64 xmm k xmm
|
|
// VMINPD.BCST m64 xmm xmm
|
|
// VMINPD.BCST m64 ymm k ymm
|
|
// VMINPD.BCST m64 ymm ymm
|
|
// VMINPD.BCST m64 zmm k zmm
|
|
// VMINPD.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VMINPD.BCST instruction to the active function.
|
|
func (c *Context) VMINPD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VMINPD_BCST(ops...))
|
|
}
|
|
|
|
// VMINPD_BCST: Return Minimum Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMINPD.BCST m64 xmm k xmm
|
|
// VMINPD.BCST m64 xmm xmm
|
|
// VMINPD.BCST m64 ymm k ymm
|
|
// VMINPD.BCST m64 ymm ymm
|
|
// VMINPD.BCST m64 zmm k zmm
|
|
// VMINPD.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VMINPD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMINPD_BCST(ops ...operand.Op) { ctx.VMINPD_BCST(ops...) }
|
|
|
|
// VMINPD_BCST_Z: Return Minimum Packed Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMINPD.BCST.Z m64 xmm k xmm
|
|
// VMINPD.BCST.Z m64 ymm k ymm
|
|
// VMINPD.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VMINPD.BCST.Z instruction to the active function.
|
|
func (c *Context) VMINPD_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VMINPD_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VMINPD_BCST_Z: Return Minimum Packed Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMINPD.BCST.Z m64 xmm k xmm
|
|
// VMINPD.BCST.Z m64 ymm k ymm
|
|
// VMINPD.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VMINPD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMINPD_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VMINPD_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VMINPD_SAE: Return Minimum Packed Double-Precision Floating-Point Values (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMINPD.SAE zmm zmm k zmm
|
|
// VMINPD.SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VMINPD.SAE instruction to the active function.
|
|
func (c *Context) VMINPD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VMINPD_SAE(ops...))
|
|
}
|
|
|
|
// VMINPD_SAE: Return Minimum Packed Double-Precision Floating-Point Values (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMINPD.SAE zmm zmm k zmm
|
|
// VMINPD.SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VMINPD.SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMINPD_SAE(ops ...operand.Op) { ctx.VMINPD_SAE(ops...) }
|
|
|
|
// VMINPD_SAE_Z: Return Minimum Packed Double-Precision Floating-Point Values (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMINPD.SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VMINPD.SAE.Z instruction to the active function.
|
|
func (c *Context) VMINPD_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VMINPD_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VMINPD_SAE_Z: Return Minimum Packed Double-Precision Floating-Point Values (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMINPD.SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VMINPD.SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMINPD_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VMINPD_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VMINPD_Z: Return Minimum Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMINPD.Z m128 xmm k xmm
|
|
// VMINPD.Z m256 ymm k ymm
|
|
// VMINPD.Z xmm xmm k xmm
|
|
// VMINPD.Z ymm ymm k ymm
|
|
// VMINPD.Z m512 zmm k zmm
|
|
// VMINPD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VMINPD.Z instruction to the active function.
|
|
func (c *Context) VMINPD_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VMINPD_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VMINPD_Z: Return Minimum Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMINPD.Z m128 xmm k xmm
|
|
// VMINPD.Z m256 ymm k ymm
|
|
// VMINPD.Z xmm xmm k xmm
|
|
// VMINPD.Z ymm ymm k ymm
|
|
// VMINPD.Z m512 zmm k zmm
|
|
// VMINPD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VMINPD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMINPD_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VMINPD_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VMINPS: Return Minimum Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMINPS m128 xmm xmm
|
|
// VMINPS m256 ymm ymm
|
|
// VMINPS xmm xmm xmm
|
|
// VMINPS ymm ymm ymm
|
|
// VMINPS m128 xmm k xmm
|
|
// VMINPS m256 ymm k ymm
|
|
// VMINPS xmm xmm k xmm
|
|
// VMINPS ymm ymm k ymm
|
|
// VMINPS m512 zmm k zmm
|
|
// VMINPS m512 zmm zmm
|
|
// VMINPS zmm zmm k zmm
|
|
// VMINPS zmm zmm zmm
|
|
//
|
|
// Construct and append a VMINPS instruction to the active function.
|
|
func (c *Context) VMINPS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VMINPS(ops...))
|
|
}
|
|
|
|
// VMINPS: Return Minimum Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMINPS m128 xmm xmm
|
|
// VMINPS m256 ymm ymm
|
|
// VMINPS xmm xmm xmm
|
|
// VMINPS ymm ymm ymm
|
|
// VMINPS m128 xmm k xmm
|
|
// VMINPS m256 ymm k ymm
|
|
// VMINPS xmm xmm k xmm
|
|
// VMINPS ymm ymm k ymm
|
|
// VMINPS m512 zmm k zmm
|
|
// VMINPS m512 zmm zmm
|
|
// VMINPS zmm zmm k zmm
|
|
// VMINPS zmm zmm zmm
|
|
//
|
|
// Construct and append a VMINPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMINPS(ops ...operand.Op) { ctx.VMINPS(ops...) }
|
|
|
|
// VMINPS_BCST: Return Minimum Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMINPS.BCST m32 xmm k xmm
|
|
// VMINPS.BCST m32 xmm xmm
|
|
// VMINPS.BCST m32 ymm k ymm
|
|
// VMINPS.BCST m32 ymm ymm
|
|
// VMINPS.BCST m32 zmm k zmm
|
|
// VMINPS.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VMINPS.BCST instruction to the active function.
|
|
func (c *Context) VMINPS_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VMINPS_BCST(ops...))
|
|
}
|
|
|
|
// VMINPS_BCST: Return Minimum Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMINPS.BCST m32 xmm k xmm
|
|
// VMINPS.BCST m32 xmm xmm
|
|
// VMINPS.BCST m32 ymm k ymm
|
|
// VMINPS.BCST m32 ymm ymm
|
|
// VMINPS.BCST m32 zmm k zmm
|
|
// VMINPS.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VMINPS.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMINPS_BCST(ops ...operand.Op) { ctx.VMINPS_BCST(ops...) }
|
|
|
|
// VMINPS_BCST_Z: Return Minimum Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMINPS.BCST.Z m32 xmm k xmm
|
|
// VMINPS.BCST.Z m32 ymm k ymm
|
|
// VMINPS.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VMINPS.BCST.Z instruction to the active function.
|
|
func (c *Context) VMINPS_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VMINPS_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VMINPS_BCST_Z: Return Minimum Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMINPS.BCST.Z m32 xmm k xmm
|
|
// VMINPS.BCST.Z m32 ymm k ymm
|
|
// VMINPS.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VMINPS.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMINPS_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VMINPS_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VMINPS_SAE: Return Minimum Packed Single-Precision Floating-Point Values (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMINPS.SAE zmm zmm k zmm
|
|
// VMINPS.SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VMINPS.SAE instruction to the active function.
|
|
func (c *Context) VMINPS_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VMINPS_SAE(ops...))
|
|
}
|
|
|
|
// VMINPS_SAE: Return Minimum Packed Single-Precision Floating-Point Values (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMINPS.SAE zmm zmm k zmm
|
|
// VMINPS.SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VMINPS.SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMINPS_SAE(ops ...operand.Op) { ctx.VMINPS_SAE(ops...) }
|
|
|
|
// VMINPS_SAE_Z: Return Minimum Packed Single-Precision Floating-Point Values (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMINPS.SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VMINPS.SAE.Z instruction to the active function.
|
|
func (c *Context) VMINPS_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VMINPS_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VMINPS_SAE_Z: Return Minimum Packed Single-Precision Floating-Point Values (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMINPS.SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VMINPS.SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMINPS_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VMINPS_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VMINPS_Z: Return Minimum Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMINPS.Z m128 xmm k xmm
|
|
// VMINPS.Z m256 ymm k ymm
|
|
// VMINPS.Z xmm xmm k xmm
|
|
// VMINPS.Z ymm ymm k ymm
|
|
// VMINPS.Z m512 zmm k zmm
|
|
// VMINPS.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VMINPS.Z instruction to the active function.
|
|
func (c *Context) VMINPS_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VMINPS_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VMINPS_Z: Return Minimum Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMINPS.Z m128 xmm k xmm
|
|
// VMINPS.Z m256 ymm k ymm
|
|
// VMINPS.Z xmm xmm k xmm
|
|
// VMINPS.Z ymm ymm k ymm
|
|
// VMINPS.Z m512 zmm k zmm
|
|
// VMINPS.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VMINPS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMINPS_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VMINPS_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VMINSD: Return Minimum Scalar Double-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMINSD m64 xmm xmm
|
|
// VMINSD xmm xmm xmm
|
|
// VMINSD m64 xmm k xmm
|
|
// VMINSD xmm xmm k xmm
|
|
//
|
|
// Construct and append a VMINSD instruction to the active function.
|
|
func (c *Context) VMINSD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VMINSD(ops...))
|
|
}
|
|
|
|
// VMINSD: Return Minimum Scalar Double-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMINSD m64 xmm xmm
|
|
// VMINSD xmm xmm xmm
|
|
// VMINSD m64 xmm k xmm
|
|
// VMINSD xmm xmm k xmm
|
|
//
|
|
// Construct and append a VMINSD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMINSD(ops ...operand.Op) { ctx.VMINSD(ops...) }
|
|
|
|
// VMINSD_SAE: Return Minimum Scalar Double-Precision Floating-Point Value (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMINSD.SAE xmm xmm k xmm
|
|
// VMINSD.SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VMINSD.SAE instruction to the active function.
|
|
func (c *Context) VMINSD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VMINSD_SAE(ops...))
|
|
}
|
|
|
|
// VMINSD_SAE: Return Minimum Scalar Double-Precision Floating-Point Value (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMINSD.SAE xmm xmm k xmm
|
|
// VMINSD.SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VMINSD.SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMINSD_SAE(ops ...operand.Op) { ctx.VMINSD_SAE(ops...) }
|
|
|
|
// VMINSD_SAE_Z: Return Minimum Scalar Double-Precision Floating-Point Value (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMINSD.SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VMINSD.SAE.Z instruction to the active function.
|
|
func (c *Context) VMINSD_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VMINSD_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VMINSD_SAE_Z: Return Minimum Scalar Double-Precision Floating-Point Value (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMINSD.SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VMINSD.SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMINSD_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VMINSD_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VMINSD_Z: Return Minimum Scalar Double-Precision Floating-Point Value (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMINSD.Z m64 xmm k xmm
|
|
// VMINSD.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VMINSD.Z instruction to the active function.
|
|
func (c *Context) VMINSD_Z(mx, x, k, x1 operand.Op) {
|
|
c.addinstruction(x86.VMINSD_Z(mx, x, k, x1))
|
|
}
|
|
|
|
// VMINSD_Z: Return Minimum Scalar Double-Precision Floating-Point Value (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMINSD.Z m64 xmm k xmm
|
|
// VMINSD.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VMINSD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMINSD_Z(mx, x, k, x1 operand.Op) { ctx.VMINSD_Z(mx, x, k, x1) }
|
|
|
|
// VMINSS: Return Minimum Scalar Single-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMINSS m32 xmm xmm
|
|
// VMINSS xmm xmm xmm
|
|
// VMINSS m32 xmm k xmm
|
|
// VMINSS xmm xmm k xmm
|
|
//
|
|
// Construct and append a VMINSS instruction to the active function.
|
|
func (c *Context) VMINSS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VMINSS(ops...))
|
|
}
|
|
|
|
// VMINSS: Return Minimum Scalar Single-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMINSS m32 xmm xmm
|
|
// VMINSS xmm xmm xmm
|
|
// VMINSS m32 xmm k xmm
|
|
// VMINSS xmm xmm k xmm
|
|
//
|
|
// Construct and append a VMINSS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMINSS(ops ...operand.Op) { ctx.VMINSS(ops...) }
|
|
|
|
// VMINSS_SAE: Return Minimum Scalar Single-Precision Floating-Point Value (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMINSS.SAE xmm xmm k xmm
|
|
// VMINSS.SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VMINSS.SAE instruction to the active function.
|
|
func (c *Context) VMINSS_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VMINSS_SAE(ops...))
|
|
}
|
|
|
|
// VMINSS_SAE: Return Minimum Scalar Single-Precision Floating-Point Value (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMINSS.SAE xmm xmm k xmm
|
|
// VMINSS.SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VMINSS.SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMINSS_SAE(ops ...operand.Op) { ctx.VMINSS_SAE(ops...) }
|
|
|
|
// VMINSS_SAE_Z: Return Minimum Scalar Single-Precision Floating-Point Value (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMINSS.SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VMINSS.SAE.Z instruction to the active function.
|
|
func (c *Context) VMINSS_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VMINSS_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VMINSS_SAE_Z: Return Minimum Scalar Single-Precision Floating-Point Value (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMINSS.SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VMINSS.SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMINSS_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VMINSS_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VMINSS_Z: Return Minimum Scalar Single-Precision Floating-Point Value (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMINSS.Z m32 xmm k xmm
|
|
// VMINSS.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VMINSS.Z instruction to the active function.
|
|
func (c *Context) VMINSS_Z(mx, x, k, x1 operand.Op) {
|
|
c.addinstruction(x86.VMINSS_Z(mx, x, k, x1))
|
|
}
|
|
|
|
// VMINSS_Z: Return Minimum Scalar Single-Precision Floating-Point Value (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMINSS.Z m32 xmm k xmm
|
|
// VMINSS.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VMINSS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMINSS_Z(mx, x, k, x1 operand.Op) { ctx.VMINSS_Z(mx, x, k, x1) }
|
|
|
|
// VMOVAPD: Move Aligned Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVAPD m128 xmm
|
|
// VMOVAPD m256 ymm
|
|
// VMOVAPD xmm m128
|
|
// VMOVAPD xmm xmm
|
|
// VMOVAPD ymm m256
|
|
// VMOVAPD ymm ymm
|
|
// VMOVAPD m128 k xmm
|
|
// VMOVAPD m256 k ymm
|
|
// VMOVAPD xmm k m128
|
|
// VMOVAPD xmm k xmm
|
|
// VMOVAPD ymm k m256
|
|
// VMOVAPD ymm k ymm
|
|
// VMOVAPD m512 k zmm
|
|
// VMOVAPD m512 zmm
|
|
// VMOVAPD zmm k m512
|
|
// VMOVAPD zmm k zmm
|
|
// VMOVAPD zmm m512
|
|
// VMOVAPD zmm zmm
|
|
//
|
|
// Construct and append a VMOVAPD instruction to the active function.
|
|
func (c *Context) VMOVAPD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VMOVAPD(ops...))
|
|
}
|
|
|
|
// VMOVAPD: Move Aligned Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVAPD m128 xmm
|
|
// VMOVAPD m256 ymm
|
|
// VMOVAPD xmm m128
|
|
// VMOVAPD xmm xmm
|
|
// VMOVAPD ymm m256
|
|
// VMOVAPD ymm ymm
|
|
// VMOVAPD m128 k xmm
|
|
// VMOVAPD m256 k ymm
|
|
// VMOVAPD xmm k m128
|
|
// VMOVAPD xmm k xmm
|
|
// VMOVAPD ymm k m256
|
|
// VMOVAPD ymm k ymm
|
|
// VMOVAPD m512 k zmm
|
|
// VMOVAPD m512 zmm
|
|
// VMOVAPD zmm k m512
|
|
// VMOVAPD zmm k zmm
|
|
// VMOVAPD zmm m512
|
|
// VMOVAPD zmm zmm
|
|
//
|
|
// Construct and append a VMOVAPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMOVAPD(ops ...operand.Op) { ctx.VMOVAPD(ops...) }
|
|
|
|
// VMOVAPD_Z: Move Aligned Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVAPD.Z m128 k xmm
|
|
// VMOVAPD.Z m256 k ymm
|
|
// VMOVAPD.Z xmm k m128
|
|
// VMOVAPD.Z xmm k xmm
|
|
// VMOVAPD.Z ymm k m256
|
|
// VMOVAPD.Z ymm k ymm
|
|
// VMOVAPD.Z m512 k zmm
|
|
// VMOVAPD.Z zmm k m512
|
|
// VMOVAPD.Z zmm k zmm
|
|
//
|
|
// Construct and append a VMOVAPD.Z instruction to the active function.
|
|
func (c *Context) VMOVAPD_Z(mxyz, k, mxyz1 operand.Op) {
|
|
c.addinstruction(x86.VMOVAPD_Z(mxyz, k, mxyz1))
|
|
}
|
|
|
|
// VMOVAPD_Z: Move Aligned Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVAPD.Z m128 k xmm
|
|
// VMOVAPD.Z m256 k ymm
|
|
// VMOVAPD.Z xmm k m128
|
|
// VMOVAPD.Z xmm k xmm
|
|
// VMOVAPD.Z ymm k m256
|
|
// VMOVAPD.Z ymm k ymm
|
|
// VMOVAPD.Z m512 k zmm
|
|
// VMOVAPD.Z zmm k m512
|
|
// VMOVAPD.Z zmm k zmm
|
|
//
|
|
// Construct and append a VMOVAPD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMOVAPD_Z(mxyz, k, mxyz1 operand.Op) { ctx.VMOVAPD_Z(mxyz, k, mxyz1) }
|
|
|
|
// VMOVAPS: Move Aligned Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVAPS m128 xmm
|
|
// VMOVAPS m256 ymm
|
|
// VMOVAPS xmm m128
|
|
// VMOVAPS xmm xmm
|
|
// VMOVAPS ymm m256
|
|
// VMOVAPS ymm ymm
|
|
// VMOVAPS m128 k xmm
|
|
// VMOVAPS m256 k ymm
|
|
// VMOVAPS xmm k m128
|
|
// VMOVAPS xmm k xmm
|
|
// VMOVAPS ymm k m256
|
|
// VMOVAPS ymm k ymm
|
|
// VMOVAPS m512 k zmm
|
|
// VMOVAPS m512 zmm
|
|
// VMOVAPS zmm k m512
|
|
// VMOVAPS zmm k zmm
|
|
// VMOVAPS zmm m512
|
|
// VMOVAPS zmm zmm
|
|
//
|
|
// Construct and append a VMOVAPS instruction to the active function.
|
|
func (c *Context) VMOVAPS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VMOVAPS(ops...))
|
|
}
|
|
|
|
// VMOVAPS: Move Aligned Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVAPS m128 xmm
|
|
// VMOVAPS m256 ymm
|
|
// VMOVAPS xmm m128
|
|
// VMOVAPS xmm xmm
|
|
// VMOVAPS ymm m256
|
|
// VMOVAPS ymm ymm
|
|
// VMOVAPS m128 k xmm
|
|
// VMOVAPS m256 k ymm
|
|
// VMOVAPS xmm k m128
|
|
// VMOVAPS xmm k xmm
|
|
// VMOVAPS ymm k m256
|
|
// VMOVAPS ymm k ymm
|
|
// VMOVAPS m512 k zmm
|
|
// VMOVAPS m512 zmm
|
|
// VMOVAPS zmm k m512
|
|
// VMOVAPS zmm k zmm
|
|
// VMOVAPS zmm m512
|
|
// VMOVAPS zmm zmm
|
|
//
|
|
// Construct and append a VMOVAPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMOVAPS(ops ...operand.Op) { ctx.VMOVAPS(ops...) }
|
|
|
|
// VMOVAPS_Z: Move Aligned Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVAPS.Z m128 k xmm
|
|
// VMOVAPS.Z m256 k ymm
|
|
// VMOVAPS.Z xmm k m128
|
|
// VMOVAPS.Z xmm k xmm
|
|
// VMOVAPS.Z ymm k m256
|
|
// VMOVAPS.Z ymm k ymm
|
|
// VMOVAPS.Z m512 k zmm
|
|
// VMOVAPS.Z zmm k m512
|
|
// VMOVAPS.Z zmm k zmm
|
|
//
|
|
// Construct and append a VMOVAPS.Z instruction to the active function.
|
|
func (c *Context) VMOVAPS_Z(mxyz, k, mxyz1 operand.Op) {
|
|
c.addinstruction(x86.VMOVAPS_Z(mxyz, k, mxyz1))
|
|
}
|
|
|
|
// VMOVAPS_Z: Move Aligned Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVAPS.Z m128 k xmm
|
|
// VMOVAPS.Z m256 k ymm
|
|
// VMOVAPS.Z xmm k m128
|
|
// VMOVAPS.Z xmm k xmm
|
|
// VMOVAPS.Z ymm k m256
|
|
// VMOVAPS.Z ymm k ymm
|
|
// VMOVAPS.Z m512 k zmm
|
|
// VMOVAPS.Z zmm k m512
|
|
// VMOVAPS.Z zmm k zmm
|
|
//
|
|
// Construct and append a VMOVAPS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMOVAPS_Z(mxyz, k, mxyz1 operand.Op) { ctx.VMOVAPS_Z(mxyz, k, mxyz1) }
|
|
|
|
// VMOVD: Move Doubleword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVD m32 xmm
|
|
// VMOVD r32 xmm
|
|
// VMOVD xmm m32
|
|
// VMOVD xmm r32
|
|
//
|
|
// Construct and append a VMOVD instruction to the active function.
|
|
func (c *Context) VMOVD(mrx, mrx1 operand.Op) {
|
|
c.addinstruction(x86.VMOVD(mrx, mrx1))
|
|
}
|
|
|
|
// VMOVD: Move Doubleword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVD m32 xmm
|
|
// VMOVD r32 xmm
|
|
// VMOVD xmm m32
|
|
// VMOVD xmm r32
|
|
//
|
|
// Construct and append a VMOVD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMOVD(mrx, mrx1 operand.Op) { ctx.VMOVD(mrx, mrx1) }
|
|
|
|
// VMOVDDUP: Move One Double-FP and Duplicate.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVDDUP m256 ymm
|
|
// VMOVDDUP m64 xmm
|
|
// VMOVDDUP xmm xmm
|
|
// VMOVDDUP ymm ymm
|
|
// VMOVDDUP m256 k ymm
|
|
// VMOVDDUP m64 k xmm
|
|
// VMOVDDUP xmm k xmm
|
|
// VMOVDDUP ymm k ymm
|
|
// VMOVDDUP m512 k zmm
|
|
// VMOVDDUP m512 zmm
|
|
// VMOVDDUP zmm k zmm
|
|
// VMOVDDUP zmm zmm
|
|
//
|
|
// Construct and append a VMOVDDUP instruction to the active function.
|
|
func (c *Context) VMOVDDUP(ops ...operand.Op) {
|
|
c.addinstruction(x86.VMOVDDUP(ops...))
|
|
}
|
|
|
|
// VMOVDDUP: Move One Double-FP and Duplicate.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVDDUP m256 ymm
|
|
// VMOVDDUP m64 xmm
|
|
// VMOVDDUP xmm xmm
|
|
// VMOVDDUP ymm ymm
|
|
// VMOVDDUP m256 k ymm
|
|
// VMOVDDUP m64 k xmm
|
|
// VMOVDDUP xmm k xmm
|
|
// VMOVDDUP ymm k ymm
|
|
// VMOVDDUP m512 k zmm
|
|
// VMOVDDUP m512 zmm
|
|
// VMOVDDUP zmm k zmm
|
|
// VMOVDDUP zmm zmm
|
|
//
|
|
// Construct and append a VMOVDDUP instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMOVDDUP(ops ...operand.Op) { ctx.VMOVDDUP(ops...) }
|
|
|
|
// VMOVDDUP_Z: Move One Double-FP and Duplicate (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVDDUP.Z m256 k ymm
|
|
// VMOVDDUP.Z m64 k xmm
|
|
// VMOVDDUP.Z xmm k xmm
|
|
// VMOVDDUP.Z ymm k ymm
|
|
// VMOVDDUP.Z m512 k zmm
|
|
// VMOVDDUP.Z zmm k zmm
|
|
//
|
|
// Construct and append a VMOVDDUP.Z instruction to the active function.
|
|
func (c *Context) VMOVDDUP_Z(mxyz, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VMOVDDUP_Z(mxyz, k, xyz))
|
|
}
|
|
|
|
// VMOVDDUP_Z: Move One Double-FP and Duplicate (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVDDUP.Z m256 k ymm
|
|
// VMOVDDUP.Z m64 k xmm
|
|
// VMOVDDUP.Z xmm k xmm
|
|
// VMOVDDUP.Z ymm k ymm
|
|
// VMOVDDUP.Z m512 k zmm
|
|
// VMOVDDUP.Z zmm k zmm
|
|
//
|
|
// Construct and append a VMOVDDUP.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMOVDDUP_Z(mxyz, k, xyz operand.Op) { ctx.VMOVDDUP_Z(mxyz, k, xyz) }
|
|
|
|
// VMOVDQA: Move Aligned Double Quadword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVDQA m128 xmm
|
|
// VMOVDQA m256 ymm
|
|
// VMOVDQA xmm m128
|
|
// VMOVDQA xmm xmm
|
|
// VMOVDQA ymm m256
|
|
// VMOVDQA ymm ymm
|
|
//
|
|
// Construct and append a VMOVDQA instruction to the active function.
|
|
func (c *Context) VMOVDQA(mxy, mxy1 operand.Op) {
|
|
c.addinstruction(x86.VMOVDQA(mxy, mxy1))
|
|
}
|
|
|
|
// VMOVDQA: Move Aligned Double Quadword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVDQA m128 xmm
|
|
// VMOVDQA m256 ymm
|
|
// VMOVDQA xmm m128
|
|
// VMOVDQA xmm xmm
|
|
// VMOVDQA ymm m256
|
|
// VMOVDQA ymm ymm
|
|
//
|
|
// Construct and append a VMOVDQA instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMOVDQA(mxy, mxy1 operand.Op) { ctx.VMOVDQA(mxy, mxy1) }
|
|
|
|
// VMOVDQA32: Move Aligned Doubleword Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVDQA32 m128 k xmm
|
|
// VMOVDQA32 m128 xmm
|
|
// VMOVDQA32 m256 k ymm
|
|
// VMOVDQA32 m256 ymm
|
|
// VMOVDQA32 xmm k m128
|
|
// VMOVDQA32 xmm k xmm
|
|
// VMOVDQA32 xmm m128
|
|
// VMOVDQA32 xmm xmm
|
|
// VMOVDQA32 ymm k m256
|
|
// VMOVDQA32 ymm k ymm
|
|
// VMOVDQA32 ymm m256
|
|
// VMOVDQA32 ymm ymm
|
|
// VMOVDQA32 m512 k zmm
|
|
// VMOVDQA32 m512 zmm
|
|
// VMOVDQA32 zmm k m512
|
|
// VMOVDQA32 zmm k zmm
|
|
// VMOVDQA32 zmm m512
|
|
// VMOVDQA32 zmm zmm
|
|
//
|
|
// Construct and append a VMOVDQA32 instruction to the active function.
|
|
func (c *Context) VMOVDQA32(ops ...operand.Op) {
|
|
c.addinstruction(x86.VMOVDQA32(ops...))
|
|
}
|
|
|
|
// VMOVDQA32: Move Aligned Doubleword Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVDQA32 m128 k xmm
|
|
// VMOVDQA32 m128 xmm
|
|
// VMOVDQA32 m256 k ymm
|
|
// VMOVDQA32 m256 ymm
|
|
// VMOVDQA32 xmm k m128
|
|
// VMOVDQA32 xmm k xmm
|
|
// VMOVDQA32 xmm m128
|
|
// VMOVDQA32 xmm xmm
|
|
// VMOVDQA32 ymm k m256
|
|
// VMOVDQA32 ymm k ymm
|
|
// VMOVDQA32 ymm m256
|
|
// VMOVDQA32 ymm ymm
|
|
// VMOVDQA32 m512 k zmm
|
|
// VMOVDQA32 m512 zmm
|
|
// VMOVDQA32 zmm k m512
|
|
// VMOVDQA32 zmm k zmm
|
|
// VMOVDQA32 zmm m512
|
|
// VMOVDQA32 zmm zmm
|
|
//
|
|
// Construct and append a VMOVDQA32 instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMOVDQA32(ops ...operand.Op) { ctx.VMOVDQA32(ops...) }
|
|
|
|
// VMOVDQA32_Z: Move Aligned Doubleword Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVDQA32.Z m128 k xmm
|
|
// VMOVDQA32.Z m256 k ymm
|
|
// VMOVDQA32.Z xmm k m128
|
|
// VMOVDQA32.Z xmm k xmm
|
|
// VMOVDQA32.Z ymm k m256
|
|
// VMOVDQA32.Z ymm k ymm
|
|
// VMOVDQA32.Z m512 k zmm
|
|
// VMOVDQA32.Z zmm k m512
|
|
// VMOVDQA32.Z zmm k zmm
|
|
//
|
|
// Construct and append a VMOVDQA32.Z instruction to the active function.
|
|
func (c *Context) VMOVDQA32_Z(mxyz, k, mxyz1 operand.Op) {
|
|
c.addinstruction(x86.VMOVDQA32_Z(mxyz, k, mxyz1))
|
|
}
|
|
|
|
// VMOVDQA32_Z: Move Aligned Doubleword Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVDQA32.Z m128 k xmm
|
|
// VMOVDQA32.Z m256 k ymm
|
|
// VMOVDQA32.Z xmm k m128
|
|
// VMOVDQA32.Z xmm k xmm
|
|
// VMOVDQA32.Z ymm k m256
|
|
// VMOVDQA32.Z ymm k ymm
|
|
// VMOVDQA32.Z m512 k zmm
|
|
// VMOVDQA32.Z zmm k m512
|
|
// VMOVDQA32.Z zmm k zmm
|
|
//
|
|
// Construct and append a VMOVDQA32.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMOVDQA32_Z(mxyz, k, mxyz1 operand.Op) { ctx.VMOVDQA32_Z(mxyz, k, mxyz1) }
|
|
|
|
// VMOVDQA64: Move Aligned Quadword Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVDQA64 m128 k xmm
|
|
// VMOVDQA64 m128 xmm
|
|
// VMOVDQA64 m256 k ymm
|
|
// VMOVDQA64 m256 ymm
|
|
// VMOVDQA64 xmm k m128
|
|
// VMOVDQA64 xmm k xmm
|
|
// VMOVDQA64 xmm m128
|
|
// VMOVDQA64 xmm xmm
|
|
// VMOVDQA64 ymm k m256
|
|
// VMOVDQA64 ymm k ymm
|
|
// VMOVDQA64 ymm m256
|
|
// VMOVDQA64 ymm ymm
|
|
// VMOVDQA64 m512 k zmm
|
|
// VMOVDQA64 m512 zmm
|
|
// VMOVDQA64 zmm k m512
|
|
// VMOVDQA64 zmm k zmm
|
|
// VMOVDQA64 zmm m512
|
|
// VMOVDQA64 zmm zmm
|
|
//
|
|
// Construct and append a VMOVDQA64 instruction to the active function.
|
|
func (c *Context) VMOVDQA64(ops ...operand.Op) {
|
|
c.addinstruction(x86.VMOVDQA64(ops...))
|
|
}
|
|
|
|
// VMOVDQA64: Move Aligned Quadword Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVDQA64 m128 k xmm
|
|
// VMOVDQA64 m128 xmm
|
|
// VMOVDQA64 m256 k ymm
|
|
// VMOVDQA64 m256 ymm
|
|
// VMOVDQA64 xmm k m128
|
|
// VMOVDQA64 xmm k xmm
|
|
// VMOVDQA64 xmm m128
|
|
// VMOVDQA64 xmm xmm
|
|
// VMOVDQA64 ymm k m256
|
|
// VMOVDQA64 ymm k ymm
|
|
// VMOVDQA64 ymm m256
|
|
// VMOVDQA64 ymm ymm
|
|
// VMOVDQA64 m512 k zmm
|
|
// VMOVDQA64 m512 zmm
|
|
// VMOVDQA64 zmm k m512
|
|
// VMOVDQA64 zmm k zmm
|
|
// VMOVDQA64 zmm m512
|
|
// VMOVDQA64 zmm zmm
|
|
//
|
|
// Construct and append a VMOVDQA64 instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMOVDQA64(ops ...operand.Op) { ctx.VMOVDQA64(ops...) }
|
|
|
|
// VMOVDQA64_Z: Move Aligned Quadword Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVDQA64.Z m128 k xmm
|
|
// VMOVDQA64.Z m256 k ymm
|
|
// VMOVDQA64.Z xmm k m128
|
|
// VMOVDQA64.Z xmm k xmm
|
|
// VMOVDQA64.Z ymm k m256
|
|
// VMOVDQA64.Z ymm k ymm
|
|
// VMOVDQA64.Z m512 k zmm
|
|
// VMOVDQA64.Z zmm k m512
|
|
// VMOVDQA64.Z zmm k zmm
|
|
//
|
|
// Construct and append a VMOVDQA64.Z instruction to the active function.
|
|
func (c *Context) VMOVDQA64_Z(mxyz, k, mxyz1 operand.Op) {
|
|
c.addinstruction(x86.VMOVDQA64_Z(mxyz, k, mxyz1))
|
|
}
|
|
|
|
// VMOVDQA64_Z: Move Aligned Quadword Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVDQA64.Z m128 k xmm
|
|
// VMOVDQA64.Z m256 k ymm
|
|
// VMOVDQA64.Z xmm k m128
|
|
// VMOVDQA64.Z xmm k xmm
|
|
// VMOVDQA64.Z ymm k m256
|
|
// VMOVDQA64.Z ymm k ymm
|
|
// VMOVDQA64.Z m512 k zmm
|
|
// VMOVDQA64.Z zmm k m512
|
|
// VMOVDQA64.Z zmm k zmm
|
|
//
|
|
// Construct and append a VMOVDQA64.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMOVDQA64_Z(mxyz, k, mxyz1 operand.Op) { ctx.VMOVDQA64_Z(mxyz, k, mxyz1) }
|
|
|
|
// VMOVDQU: Move Unaligned Double Quadword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVDQU m128 xmm
|
|
// VMOVDQU m256 ymm
|
|
// VMOVDQU xmm m128
|
|
// VMOVDQU xmm xmm
|
|
// VMOVDQU ymm m256
|
|
// VMOVDQU ymm ymm
|
|
//
|
|
// Construct and append a VMOVDQU instruction to the active function.
|
|
func (c *Context) VMOVDQU(mxy, mxy1 operand.Op) {
|
|
c.addinstruction(x86.VMOVDQU(mxy, mxy1))
|
|
}
|
|
|
|
// VMOVDQU: Move Unaligned Double Quadword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVDQU m128 xmm
|
|
// VMOVDQU m256 ymm
|
|
// VMOVDQU xmm m128
|
|
// VMOVDQU xmm xmm
|
|
// VMOVDQU ymm m256
|
|
// VMOVDQU ymm ymm
|
|
//
|
|
// Construct and append a VMOVDQU instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMOVDQU(mxy, mxy1 operand.Op) { ctx.VMOVDQU(mxy, mxy1) }
|
|
|
|
// VMOVDQU16: Move Unaligned Word Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVDQU16 m128 k xmm
|
|
// VMOVDQU16 m128 xmm
|
|
// VMOVDQU16 m256 k ymm
|
|
// VMOVDQU16 m256 ymm
|
|
// VMOVDQU16 xmm k m128
|
|
// VMOVDQU16 xmm k xmm
|
|
// VMOVDQU16 xmm m128
|
|
// VMOVDQU16 xmm xmm
|
|
// VMOVDQU16 ymm k m256
|
|
// VMOVDQU16 ymm k ymm
|
|
// VMOVDQU16 ymm m256
|
|
// VMOVDQU16 ymm ymm
|
|
// VMOVDQU16 m512 k zmm
|
|
// VMOVDQU16 m512 zmm
|
|
// VMOVDQU16 zmm k m512
|
|
// VMOVDQU16 zmm k zmm
|
|
// VMOVDQU16 zmm m512
|
|
// VMOVDQU16 zmm zmm
|
|
//
|
|
// Construct and append a VMOVDQU16 instruction to the active function.
|
|
func (c *Context) VMOVDQU16(ops ...operand.Op) {
|
|
c.addinstruction(x86.VMOVDQU16(ops...))
|
|
}
|
|
|
|
// VMOVDQU16: Move Unaligned Word Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVDQU16 m128 k xmm
|
|
// VMOVDQU16 m128 xmm
|
|
// VMOVDQU16 m256 k ymm
|
|
// VMOVDQU16 m256 ymm
|
|
// VMOVDQU16 xmm k m128
|
|
// VMOVDQU16 xmm k xmm
|
|
// VMOVDQU16 xmm m128
|
|
// VMOVDQU16 xmm xmm
|
|
// VMOVDQU16 ymm k m256
|
|
// VMOVDQU16 ymm k ymm
|
|
// VMOVDQU16 ymm m256
|
|
// VMOVDQU16 ymm ymm
|
|
// VMOVDQU16 m512 k zmm
|
|
// VMOVDQU16 m512 zmm
|
|
// VMOVDQU16 zmm k m512
|
|
// VMOVDQU16 zmm k zmm
|
|
// VMOVDQU16 zmm m512
|
|
// VMOVDQU16 zmm zmm
|
|
//
|
|
// Construct and append a VMOVDQU16 instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMOVDQU16(ops ...operand.Op) { ctx.VMOVDQU16(ops...) }
|
|
|
|
// VMOVDQU16_Z: Move Unaligned Word Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVDQU16.Z m128 k xmm
|
|
// VMOVDQU16.Z m256 k ymm
|
|
// VMOVDQU16.Z xmm k m128
|
|
// VMOVDQU16.Z xmm k xmm
|
|
// VMOVDQU16.Z ymm k m256
|
|
// VMOVDQU16.Z ymm k ymm
|
|
// VMOVDQU16.Z m512 k zmm
|
|
// VMOVDQU16.Z zmm k m512
|
|
// VMOVDQU16.Z zmm k zmm
|
|
//
|
|
// Construct and append a VMOVDQU16.Z instruction to the active function.
|
|
func (c *Context) VMOVDQU16_Z(mxyz, k, mxyz1 operand.Op) {
|
|
c.addinstruction(x86.VMOVDQU16_Z(mxyz, k, mxyz1))
|
|
}
|
|
|
|
// VMOVDQU16_Z: Move Unaligned Word Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVDQU16.Z m128 k xmm
|
|
// VMOVDQU16.Z m256 k ymm
|
|
// VMOVDQU16.Z xmm k m128
|
|
// VMOVDQU16.Z xmm k xmm
|
|
// VMOVDQU16.Z ymm k m256
|
|
// VMOVDQU16.Z ymm k ymm
|
|
// VMOVDQU16.Z m512 k zmm
|
|
// VMOVDQU16.Z zmm k m512
|
|
// VMOVDQU16.Z zmm k zmm
|
|
//
|
|
// Construct and append a VMOVDQU16.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMOVDQU16_Z(mxyz, k, mxyz1 operand.Op) { ctx.VMOVDQU16_Z(mxyz, k, mxyz1) }
|
|
|
|
// VMOVDQU32: Move Unaligned Doubleword Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVDQU32 m128 k xmm
|
|
// VMOVDQU32 m128 xmm
|
|
// VMOVDQU32 m256 k ymm
|
|
// VMOVDQU32 m256 ymm
|
|
// VMOVDQU32 xmm k m128
|
|
// VMOVDQU32 xmm k xmm
|
|
// VMOVDQU32 xmm m128
|
|
// VMOVDQU32 xmm xmm
|
|
// VMOVDQU32 ymm k m256
|
|
// VMOVDQU32 ymm k ymm
|
|
// VMOVDQU32 ymm m256
|
|
// VMOVDQU32 ymm ymm
|
|
// VMOVDQU32 m512 k zmm
|
|
// VMOVDQU32 m512 zmm
|
|
// VMOVDQU32 zmm k m512
|
|
// VMOVDQU32 zmm k zmm
|
|
// VMOVDQU32 zmm m512
|
|
// VMOVDQU32 zmm zmm
|
|
//
|
|
// Construct and append a VMOVDQU32 instruction to the active function.
|
|
func (c *Context) VMOVDQU32(ops ...operand.Op) {
|
|
c.addinstruction(x86.VMOVDQU32(ops...))
|
|
}
|
|
|
|
// VMOVDQU32: Move Unaligned Doubleword Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVDQU32 m128 k xmm
|
|
// VMOVDQU32 m128 xmm
|
|
// VMOVDQU32 m256 k ymm
|
|
// VMOVDQU32 m256 ymm
|
|
// VMOVDQU32 xmm k m128
|
|
// VMOVDQU32 xmm k xmm
|
|
// VMOVDQU32 xmm m128
|
|
// VMOVDQU32 xmm xmm
|
|
// VMOVDQU32 ymm k m256
|
|
// VMOVDQU32 ymm k ymm
|
|
// VMOVDQU32 ymm m256
|
|
// VMOVDQU32 ymm ymm
|
|
// VMOVDQU32 m512 k zmm
|
|
// VMOVDQU32 m512 zmm
|
|
// VMOVDQU32 zmm k m512
|
|
// VMOVDQU32 zmm k zmm
|
|
// VMOVDQU32 zmm m512
|
|
// VMOVDQU32 zmm zmm
|
|
//
|
|
// Construct and append a VMOVDQU32 instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMOVDQU32(ops ...operand.Op) { ctx.VMOVDQU32(ops...) }
|
|
|
|
// VMOVDQU32_Z: Move Unaligned Doubleword Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVDQU32.Z m128 k xmm
|
|
// VMOVDQU32.Z m256 k ymm
|
|
// VMOVDQU32.Z xmm k m128
|
|
// VMOVDQU32.Z xmm k xmm
|
|
// VMOVDQU32.Z ymm k m256
|
|
// VMOVDQU32.Z ymm k ymm
|
|
// VMOVDQU32.Z m512 k zmm
|
|
// VMOVDQU32.Z zmm k m512
|
|
// VMOVDQU32.Z zmm k zmm
|
|
//
|
|
// Construct and append a VMOVDQU32.Z instruction to the active function.
|
|
func (c *Context) VMOVDQU32_Z(mxyz, k, mxyz1 operand.Op) {
|
|
c.addinstruction(x86.VMOVDQU32_Z(mxyz, k, mxyz1))
|
|
}
|
|
|
|
// VMOVDQU32_Z: Move Unaligned Doubleword Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVDQU32.Z m128 k xmm
|
|
// VMOVDQU32.Z m256 k ymm
|
|
// VMOVDQU32.Z xmm k m128
|
|
// VMOVDQU32.Z xmm k xmm
|
|
// VMOVDQU32.Z ymm k m256
|
|
// VMOVDQU32.Z ymm k ymm
|
|
// VMOVDQU32.Z m512 k zmm
|
|
// VMOVDQU32.Z zmm k m512
|
|
// VMOVDQU32.Z zmm k zmm
|
|
//
|
|
// Construct and append a VMOVDQU32.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMOVDQU32_Z(mxyz, k, mxyz1 operand.Op) { ctx.VMOVDQU32_Z(mxyz, k, mxyz1) }
|
|
|
|
// VMOVDQU64: Move Unaligned Quadword Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVDQU64 m128 k xmm
|
|
// VMOVDQU64 m128 xmm
|
|
// VMOVDQU64 m256 k ymm
|
|
// VMOVDQU64 m256 ymm
|
|
// VMOVDQU64 xmm k m128
|
|
// VMOVDQU64 xmm k xmm
|
|
// VMOVDQU64 xmm m128
|
|
// VMOVDQU64 xmm xmm
|
|
// VMOVDQU64 ymm k m256
|
|
// VMOVDQU64 ymm k ymm
|
|
// VMOVDQU64 ymm m256
|
|
// VMOVDQU64 ymm ymm
|
|
// VMOVDQU64 m512 k zmm
|
|
// VMOVDQU64 m512 zmm
|
|
// VMOVDQU64 zmm k m512
|
|
// VMOVDQU64 zmm k zmm
|
|
// VMOVDQU64 zmm m512
|
|
// VMOVDQU64 zmm zmm
|
|
//
|
|
// Construct and append a VMOVDQU64 instruction to the active function.
|
|
func (c *Context) VMOVDQU64(ops ...operand.Op) {
|
|
c.addinstruction(x86.VMOVDQU64(ops...))
|
|
}
|
|
|
|
// VMOVDQU64: Move Unaligned Quadword Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVDQU64 m128 k xmm
|
|
// VMOVDQU64 m128 xmm
|
|
// VMOVDQU64 m256 k ymm
|
|
// VMOVDQU64 m256 ymm
|
|
// VMOVDQU64 xmm k m128
|
|
// VMOVDQU64 xmm k xmm
|
|
// VMOVDQU64 xmm m128
|
|
// VMOVDQU64 xmm xmm
|
|
// VMOVDQU64 ymm k m256
|
|
// VMOVDQU64 ymm k ymm
|
|
// VMOVDQU64 ymm m256
|
|
// VMOVDQU64 ymm ymm
|
|
// VMOVDQU64 m512 k zmm
|
|
// VMOVDQU64 m512 zmm
|
|
// VMOVDQU64 zmm k m512
|
|
// VMOVDQU64 zmm k zmm
|
|
// VMOVDQU64 zmm m512
|
|
// VMOVDQU64 zmm zmm
|
|
//
|
|
// Construct and append a VMOVDQU64 instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMOVDQU64(ops ...operand.Op) { ctx.VMOVDQU64(ops...) }
|
|
|
|
// VMOVDQU64_Z: Move Unaligned Quadword Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVDQU64.Z m128 k xmm
|
|
// VMOVDQU64.Z m256 k ymm
|
|
// VMOVDQU64.Z xmm k m128
|
|
// VMOVDQU64.Z xmm k xmm
|
|
// VMOVDQU64.Z ymm k m256
|
|
// VMOVDQU64.Z ymm k ymm
|
|
// VMOVDQU64.Z m512 k zmm
|
|
// VMOVDQU64.Z zmm k m512
|
|
// VMOVDQU64.Z zmm k zmm
|
|
//
|
|
// Construct and append a VMOVDQU64.Z instruction to the active function.
|
|
func (c *Context) VMOVDQU64_Z(mxyz, k, mxyz1 operand.Op) {
|
|
c.addinstruction(x86.VMOVDQU64_Z(mxyz, k, mxyz1))
|
|
}
|
|
|
|
// VMOVDQU64_Z: Move Unaligned Quadword Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVDQU64.Z m128 k xmm
|
|
// VMOVDQU64.Z m256 k ymm
|
|
// VMOVDQU64.Z xmm k m128
|
|
// VMOVDQU64.Z xmm k xmm
|
|
// VMOVDQU64.Z ymm k m256
|
|
// VMOVDQU64.Z ymm k ymm
|
|
// VMOVDQU64.Z m512 k zmm
|
|
// VMOVDQU64.Z zmm k m512
|
|
// VMOVDQU64.Z zmm k zmm
|
|
//
|
|
// Construct and append a VMOVDQU64.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMOVDQU64_Z(mxyz, k, mxyz1 operand.Op) { ctx.VMOVDQU64_Z(mxyz, k, mxyz1) }
|
|
|
|
// VMOVDQU8: Move Unaligned Byte Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVDQU8 m128 k xmm
|
|
// VMOVDQU8 m128 xmm
|
|
// VMOVDQU8 m256 k ymm
|
|
// VMOVDQU8 m256 ymm
|
|
// VMOVDQU8 xmm k m128
|
|
// VMOVDQU8 xmm k xmm
|
|
// VMOVDQU8 xmm m128
|
|
// VMOVDQU8 xmm xmm
|
|
// VMOVDQU8 ymm k m256
|
|
// VMOVDQU8 ymm k ymm
|
|
// VMOVDQU8 ymm m256
|
|
// VMOVDQU8 ymm ymm
|
|
// VMOVDQU8 m512 k zmm
|
|
// VMOVDQU8 m512 zmm
|
|
// VMOVDQU8 zmm k m512
|
|
// VMOVDQU8 zmm k zmm
|
|
// VMOVDQU8 zmm m512
|
|
// VMOVDQU8 zmm zmm
|
|
//
|
|
// Construct and append a VMOVDQU8 instruction to the active function.
|
|
func (c *Context) VMOVDQU8(ops ...operand.Op) {
|
|
c.addinstruction(x86.VMOVDQU8(ops...))
|
|
}
|
|
|
|
// VMOVDQU8: Move Unaligned Byte Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVDQU8 m128 k xmm
|
|
// VMOVDQU8 m128 xmm
|
|
// VMOVDQU8 m256 k ymm
|
|
// VMOVDQU8 m256 ymm
|
|
// VMOVDQU8 xmm k m128
|
|
// VMOVDQU8 xmm k xmm
|
|
// VMOVDQU8 xmm m128
|
|
// VMOVDQU8 xmm xmm
|
|
// VMOVDQU8 ymm k m256
|
|
// VMOVDQU8 ymm k ymm
|
|
// VMOVDQU8 ymm m256
|
|
// VMOVDQU8 ymm ymm
|
|
// VMOVDQU8 m512 k zmm
|
|
// VMOVDQU8 m512 zmm
|
|
// VMOVDQU8 zmm k m512
|
|
// VMOVDQU8 zmm k zmm
|
|
// VMOVDQU8 zmm m512
|
|
// VMOVDQU8 zmm zmm
|
|
//
|
|
// Construct and append a VMOVDQU8 instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMOVDQU8(ops ...operand.Op) { ctx.VMOVDQU8(ops...) }
|
|
|
|
// VMOVDQU8_Z: Move Unaligned Byte Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVDQU8.Z m128 k xmm
|
|
// VMOVDQU8.Z m256 k ymm
|
|
// VMOVDQU8.Z xmm k m128
|
|
// VMOVDQU8.Z xmm k xmm
|
|
// VMOVDQU8.Z ymm k m256
|
|
// VMOVDQU8.Z ymm k ymm
|
|
// VMOVDQU8.Z m512 k zmm
|
|
// VMOVDQU8.Z zmm k m512
|
|
// VMOVDQU8.Z zmm k zmm
|
|
//
|
|
// Construct and append a VMOVDQU8.Z instruction to the active function.
|
|
func (c *Context) VMOVDQU8_Z(mxyz, k, mxyz1 operand.Op) {
|
|
c.addinstruction(x86.VMOVDQU8_Z(mxyz, k, mxyz1))
|
|
}
|
|
|
|
// VMOVDQU8_Z: Move Unaligned Byte Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVDQU8.Z m128 k xmm
|
|
// VMOVDQU8.Z m256 k ymm
|
|
// VMOVDQU8.Z xmm k m128
|
|
// VMOVDQU8.Z xmm k xmm
|
|
// VMOVDQU8.Z ymm k m256
|
|
// VMOVDQU8.Z ymm k ymm
|
|
// VMOVDQU8.Z m512 k zmm
|
|
// VMOVDQU8.Z zmm k m512
|
|
// VMOVDQU8.Z zmm k zmm
|
|
//
|
|
// Construct and append a VMOVDQU8.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMOVDQU8_Z(mxyz, k, mxyz1 operand.Op) { ctx.VMOVDQU8_Z(mxyz, k, mxyz1) }
|
|
|
|
// VMOVHLPS: Move Packed Single-Precision Floating-Point Values High to Low.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVHLPS xmm xmm xmm
|
|
//
|
|
// Construct and append a VMOVHLPS instruction to the active function.
|
|
func (c *Context) VMOVHLPS(x, x1, x2 operand.Op) {
|
|
c.addinstruction(x86.VMOVHLPS(x, x1, x2))
|
|
}
|
|
|
|
// VMOVHLPS: Move Packed Single-Precision Floating-Point Values High to Low.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVHLPS xmm xmm xmm
|
|
//
|
|
// Construct and append a VMOVHLPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMOVHLPS(x, x1, x2 operand.Op) { ctx.VMOVHLPS(x, x1, x2) }
|
|
|
|
// VMOVHPD: Move High Packed Double-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVHPD m64 xmm xmm
|
|
// VMOVHPD xmm m64
|
|
//
|
|
// Construct and append a VMOVHPD instruction to the active function.
|
|
func (c *Context) VMOVHPD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VMOVHPD(ops...))
|
|
}
|
|
|
|
// VMOVHPD: Move High Packed Double-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVHPD m64 xmm xmm
|
|
// VMOVHPD xmm m64
|
|
//
|
|
// Construct and append a VMOVHPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMOVHPD(ops ...operand.Op) { ctx.VMOVHPD(ops...) }
|
|
|
|
// VMOVHPS: Move High Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVHPS m64 xmm xmm
|
|
// VMOVHPS xmm m64
|
|
//
|
|
// Construct and append a VMOVHPS instruction to the active function.
|
|
func (c *Context) VMOVHPS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VMOVHPS(ops...))
|
|
}
|
|
|
|
// VMOVHPS: Move High Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVHPS m64 xmm xmm
|
|
// VMOVHPS xmm m64
|
|
//
|
|
// Construct and append a VMOVHPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMOVHPS(ops ...operand.Op) { ctx.VMOVHPS(ops...) }
|
|
|
|
// VMOVLHPS: Move Packed Single-Precision Floating-Point Values Low to High.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVLHPS xmm xmm xmm
|
|
//
|
|
// Construct and append a VMOVLHPS instruction to the active function.
|
|
func (c *Context) VMOVLHPS(x, x1, x2 operand.Op) {
|
|
c.addinstruction(x86.VMOVLHPS(x, x1, x2))
|
|
}
|
|
|
|
// VMOVLHPS: Move Packed Single-Precision Floating-Point Values Low to High.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVLHPS xmm xmm xmm
|
|
//
|
|
// Construct and append a VMOVLHPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMOVLHPS(x, x1, x2 operand.Op) { ctx.VMOVLHPS(x, x1, x2) }
|
|
|
|
// VMOVLPD: Move Low Packed Double-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVLPD m64 xmm xmm
|
|
// VMOVLPD xmm m64
|
|
//
|
|
// Construct and append a VMOVLPD instruction to the active function.
|
|
func (c *Context) VMOVLPD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VMOVLPD(ops...))
|
|
}
|
|
|
|
// VMOVLPD: Move Low Packed Double-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVLPD m64 xmm xmm
|
|
// VMOVLPD xmm m64
|
|
//
|
|
// Construct and append a VMOVLPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMOVLPD(ops ...operand.Op) { ctx.VMOVLPD(ops...) }
|
|
|
|
// VMOVLPS: Move Low Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVLPS m64 xmm xmm
|
|
// VMOVLPS xmm m64
|
|
//
|
|
// Construct and append a VMOVLPS instruction to the active function.
|
|
func (c *Context) VMOVLPS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VMOVLPS(ops...))
|
|
}
|
|
|
|
// VMOVLPS: Move Low Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVLPS m64 xmm xmm
|
|
// VMOVLPS xmm m64
|
|
//
|
|
// Construct and append a VMOVLPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMOVLPS(ops ...operand.Op) { ctx.VMOVLPS(ops...) }
|
|
|
|
// VMOVMSKPD: Extract Packed Double-Precision Floating-Point Sign Mask.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVMSKPD xmm r32
|
|
// VMOVMSKPD ymm r32
|
|
//
|
|
// Construct and append a VMOVMSKPD instruction to the active function.
|
|
func (c *Context) VMOVMSKPD(xy, r operand.Op) {
|
|
c.addinstruction(x86.VMOVMSKPD(xy, r))
|
|
}
|
|
|
|
// VMOVMSKPD: Extract Packed Double-Precision Floating-Point Sign Mask.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVMSKPD xmm r32
|
|
// VMOVMSKPD ymm r32
|
|
//
|
|
// Construct and append a VMOVMSKPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMOVMSKPD(xy, r operand.Op) { ctx.VMOVMSKPD(xy, r) }
|
|
|
|
// VMOVMSKPS: Extract Packed Single-Precision Floating-Point Sign Mask.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVMSKPS xmm r32
|
|
// VMOVMSKPS ymm r32
|
|
//
|
|
// Construct and append a VMOVMSKPS instruction to the active function.
|
|
func (c *Context) VMOVMSKPS(xy, r operand.Op) {
|
|
c.addinstruction(x86.VMOVMSKPS(xy, r))
|
|
}
|
|
|
|
// VMOVMSKPS: Extract Packed Single-Precision Floating-Point Sign Mask.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVMSKPS xmm r32
|
|
// VMOVMSKPS ymm r32
|
|
//
|
|
// Construct and append a VMOVMSKPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMOVMSKPS(xy, r operand.Op) { ctx.VMOVMSKPS(xy, r) }
|
|
|
|
// VMOVNTDQ: Store Double Quadword Using Non-Temporal Hint.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVNTDQ xmm m128
|
|
// VMOVNTDQ ymm m256
|
|
// VMOVNTDQ zmm m512
|
|
//
|
|
// Construct and append a VMOVNTDQ instruction to the active function.
|
|
func (c *Context) VMOVNTDQ(xyz, m operand.Op) {
|
|
c.addinstruction(x86.VMOVNTDQ(xyz, m))
|
|
}
|
|
|
|
// VMOVNTDQ: Store Double Quadword Using Non-Temporal Hint.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVNTDQ xmm m128
|
|
// VMOVNTDQ ymm m256
|
|
// VMOVNTDQ zmm m512
|
|
//
|
|
// Construct and append a VMOVNTDQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMOVNTDQ(xyz, m operand.Op) { ctx.VMOVNTDQ(xyz, m) }
|
|
|
|
// VMOVNTDQA: Load Double Quadword Non-Temporal Aligned Hint.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVNTDQA m256 ymm
|
|
// VMOVNTDQA m128 xmm
|
|
// VMOVNTDQA m512 zmm
|
|
//
|
|
// Construct and append a VMOVNTDQA instruction to the active function.
|
|
func (c *Context) VMOVNTDQA(m, xyz operand.Op) {
|
|
c.addinstruction(x86.VMOVNTDQA(m, xyz))
|
|
}
|
|
|
|
// VMOVNTDQA: Load Double Quadword Non-Temporal Aligned Hint.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVNTDQA m256 ymm
|
|
// VMOVNTDQA m128 xmm
|
|
// VMOVNTDQA m512 zmm
|
|
//
|
|
// Construct and append a VMOVNTDQA instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMOVNTDQA(m, xyz operand.Op) { ctx.VMOVNTDQA(m, xyz) }
|
|
|
|
// VMOVNTPD: Store Packed Double-Precision Floating-Point Values Using Non-Temporal Hint.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVNTPD xmm m128
|
|
// VMOVNTPD ymm m256
|
|
// VMOVNTPD zmm m512
|
|
//
|
|
// Construct and append a VMOVNTPD instruction to the active function.
|
|
func (c *Context) VMOVNTPD(xyz, m operand.Op) {
|
|
c.addinstruction(x86.VMOVNTPD(xyz, m))
|
|
}
|
|
|
|
// VMOVNTPD: Store Packed Double-Precision Floating-Point Values Using Non-Temporal Hint.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVNTPD xmm m128
|
|
// VMOVNTPD ymm m256
|
|
// VMOVNTPD zmm m512
|
|
//
|
|
// Construct and append a VMOVNTPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMOVNTPD(xyz, m operand.Op) { ctx.VMOVNTPD(xyz, m) }
|
|
|
|
// VMOVNTPS: Store Packed Single-Precision Floating-Point Values Using Non-Temporal Hint.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVNTPS xmm m128
|
|
// VMOVNTPS ymm m256
|
|
// VMOVNTPS zmm m512
|
|
//
|
|
// Construct and append a VMOVNTPS instruction to the active function.
|
|
func (c *Context) VMOVNTPS(xyz, m operand.Op) {
|
|
c.addinstruction(x86.VMOVNTPS(xyz, m))
|
|
}
|
|
|
|
// VMOVNTPS: Store Packed Single-Precision Floating-Point Values Using Non-Temporal Hint.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVNTPS xmm m128
|
|
// VMOVNTPS ymm m256
|
|
// VMOVNTPS zmm m512
|
|
//
|
|
// Construct and append a VMOVNTPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMOVNTPS(xyz, m operand.Op) { ctx.VMOVNTPS(xyz, m) }
|
|
|
|
// VMOVQ: Move Quadword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVQ m64 xmm
|
|
// VMOVQ r64 xmm
|
|
// VMOVQ xmm m64
|
|
// VMOVQ xmm r64
|
|
// VMOVQ xmm xmm
|
|
//
|
|
// Construct and append a VMOVQ instruction to the active function.
|
|
func (c *Context) VMOVQ(mrx, mrx1 operand.Op) {
|
|
c.addinstruction(x86.VMOVQ(mrx, mrx1))
|
|
}
|
|
|
|
// VMOVQ: Move Quadword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVQ m64 xmm
|
|
// VMOVQ r64 xmm
|
|
// VMOVQ xmm m64
|
|
// VMOVQ xmm r64
|
|
// VMOVQ xmm xmm
|
|
//
|
|
// Construct and append a VMOVQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMOVQ(mrx, mrx1 operand.Op) { ctx.VMOVQ(mrx, mrx1) }
|
|
|
|
// VMOVSD: Move Scalar Double-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVSD m64 xmm
|
|
// VMOVSD xmm m64
|
|
// VMOVSD xmm xmm xmm
|
|
// VMOVSD m64 k xmm
|
|
// VMOVSD xmm k m64
|
|
// VMOVSD xmm xmm k xmm
|
|
//
|
|
// Construct and append a VMOVSD instruction to the active function.
|
|
func (c *Context) VMOVSD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VMOVSD(ops...))
|
|
}
|
|
|
|
// VMOVSD: Move Scalar Double-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVSD m64 xmm
|
|
// VMOVSD xmm m64
|
|
// VMOVSD xmm xmm xmm
|
|
// VMOVSD m64 k xmm
|
|
// VMOVSD xmm k m64
|
|
// VMOVSD xmm xmm k xmm
|
|
//
|
|
// Construct and append a VMOVSD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMOVSD(ops ...operand.Op) { ctx.VMOVSD(ops...) }
|
|
|
|
// VMOVSD_Z: Move Scalar Double-Precision Floating-Point Value (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVSD.Z m64 k xmm
|
|
// VMOVSD.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VMOVSD.Z instruction to the active function.
|
|
func (c *Context) VMOVSD_Z(ops ...operand.Op) {
|
|
c.addinstruction(x86.VMOVSD_Z(ops...))
|
|
}
|
|
|
|
// VMOVSD_Z: Move Scalar Double-Precision Floating-Point Value (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVSD.Z m64 k xmm
|
|
// VMOVSD.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VMOVSD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMOVSD_Z(ops ...operand.Op) { ctx.VMOVSD_Z(ops...) }
|
|
|
|
// VMOVSHDUP: Move Packed Single-FP High and Duplicate.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVSHDUP m128 xmm
|
|
// VMOVSHDUP m256 ymm
|
|
// VMOVSHDUP xmm xmm
|
|
// VMOVSHDUP ymm ymm
|
|
// VMOVSHDUP m128 k xmm
|
|
// VMOVSHDUP m256 k ymm
|
|
// VMOVSHDUP xmm k xmm
|
|
// VMOVSHDUP ymm k ymm
|
|
// VMOVSHDUP m512 k zmm
|
|
// VMOVSHDUP m512 zmm
|
|
// VMOVSHDUP zmm k zmm
|
|
// VMOVSHDUP zmm zmm
|
|
//
|
|
// Construct and append a VMOVSHDUP instruction to the active function.
|
|
func (c *Context) VMOVSHDUP(ops ...operand.Op) {
|
|
c.addinstruction(x86.VMOVSHDUP(ops...))
|
|
}
|
|
|
|
// VMOVSHDUP: Move Packed Single-FP High and Duplicate.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVSHDUP m128 xmm
|
|
// VMOVSHDUP m256 ymm
|
|
// VMOVSHDUP xmm xmm
|
|
// VMOVSHDUP ymm ymm
|
|
// VMOVSHDUP m128 k xmm
|
|
// VMOVSHDUP m256 k ymm
|
|
// VMOVSHDUP xmm k xmm
|
|
// VMOVSHDUP ymm k ymm
|
|
// VMOVSHDUP m512 k zmm
|
|
// VMOVSHDUP m512 zmm
|
|
// VMOVSHDUP zmm k zmm
|
|
// VMOVSHDUP zmm zmm
|
|
//
|
|
// Construct and append a VMOVSHDUP instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMOVSHDUP(ops ...operand.Op) { ctx.VMOVSHDUP(ops...) }
|
|
|
|
// VMOVSHDUP_Z: Move Packed Single-FP High and Duplicate (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVSHDUP.Z m128 k xmm
|
|
// VMOVSHDUP.Z m256 k ymm
|
|
// VMOVSHDUP.Z xmm k xmm
|
|
// VMOVSHDUP.Z ymm k ymm
|
|
// VMOVSHDUP.Z m512 k zmm
|
|
// VMOVSHDUP.Z zmm k zmm
|
|
//
|
|
// Construct and append a VMOVSHDUP.Z instruction to the active function.
|
|
func (c *Context) VMOVSHDUP_Z(mxyz, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VMOVSHDUP_Z(mxyz, k, xyz))
|
|
}
|
|
|
|
// VMOVSHDUP_Z: Move Packed Single-FP High and Duplicate (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVSHDUP.Z m128 k xmm
|
|
// VMOVSHDUP.Z m256 k ymm
|
|
// VMOVSHDUP.Z xmm k xmm
|
|
// VMOVSHDUP.Z ymm k ymm
|
|
// VMOVSHDUP.Z m512 k zmm
|
|
// VMOVSHDUP.Z zmm k zmm
|
|
//
|
|
// Construct and append a VMOVSHDUP.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMOVSHDUP_Z(mxyz, k, xyz operand.Op) { ctx.VMOVSHDUP_Z(mxyz, k, xyz) }
|
|
|
|
// VMOVSLDUP: Move Packed Single-FP Low and Duplicate.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVSLDUP m128 xmm
|
|
// VMOVSLDUP m256 ymm
|
|
// VMOVSLDUP xmm xmm
|
|
// VMOVSLDUP ymm ymm
|
|
// VMOVSLDUP m128 k xmm
|
|
// VMOVSLDUP m256 k ymm
|
|
// VMOVSLDUP xmm k xmm
|
|
// VMOVSLDUP ymm k ymm
|
|
// VMOVSLDUP m512 k zmm
|
|
// VMOVSLDUP m512 zmm
|
|
// VMOVSLDUP zmm k zmm
|
|
// VMOVSLDUP zmm zmm
|
|
//
|
|
// Construct and append a VMOVSLDUP instruction to the active function.
|
|
func (c *Context) VMOVSLDUP(ops ...operand.Op) {
|
|
c.addinstruction(x86.VMOVSLDUP(ops...))
|
|
}
|
|
|
|
// VMOVSLDUP: Move Packed Single-FP Low and Duplicate.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVSLDUP m128 xmm
|
|
// VMOVSLDUP m256 ymm
|
|
// VMOVSLDUP xmm xmm
|
|
// VMOVSLDUP ymm ymm
|
|
// VMOVSLDUP m128 k xmm
|
|
// VMOVSLDUP m256 k ymm
|
|
// VMOVSLDUP xmm k xmm
|
|
// VMOVSLDUP ymm k ymm
|
|
// VMOVSLDUP m512 k zmm
|
|
// VMOVSLDUP m512 zmm
|
|
// VMOVSLDUP zmm k zmm
|
|
// VMOVSLDUP zmm zmm
|
|
//
|
|
// Construct and append a VMOVSLDUP instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMOVSLDUP(ops ...operand.Op) { ctx.VMOVSLDUP(ops...) }
|
|
|
|
// VMOVSLDUP_Z: Move Packed Single-FP Low and Duplicate (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVSLDUP.Z m128 k xmm
|
|
// VMOVSLDUP.Z m256 k ymm
|
|
// VMOVSLDUP.Z xmm k xmm
|
|
// VMOVSLDUP.Z ymm k ymm
|
|
// VMOVSLDUP.Z m512 k zmm
|
|
// VMOVSLDUP.Z zmm k zmm
|
|
//
|
|
// Construct and append a VMOVSLDUP.Z instruction to the active function.
|
|
func (c *Context) VMOVSLDUP_Z(mxyz, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VMOVSLDUP_Z(mxyz, k, xyz))
|
|
}
|
|
|
|
// VMOVSLDUP_Z: Move Packed Single-FP Low and Duplicate (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVSLDUP.Z m128 k xmm
|
|
// VMOVSLDUP.Z m256 k ymm
|
|
// VMOVSLDUP.Z xmm k xmm
|
|
// VMOVSLDUP.Z ymm k ymm
|
|
// VMOVSLDUP.Z m512 k zmm
|
|
// VMOVSLDUP.Z zmm k zmm
|
|
//
|
|
// Construct and append a VMOVSLDUP.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMOVSLDUP_Z(mxyz, k, xyz operand.Op) { ctx.VMOVSLDUP_Z(mxyz, k, xyz) }
|
|
|
|
// VMOVSS: Move Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVSS m32 xmm
|
|
// VMOVSS xmm m32
|
|
// VMOVSS xmm xmm xmm
|
|
// VMOVSS m32 k xmm
|
|
// VMOVSS xmm k m32
|
|
// VMOVSS xmm xmm k xmm
|
|
//
|
|
// Construct and append a VMOVSS instruction to the active function.
|
|
func (c *Context) VMOVSS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VMOVSS(ops...))
|
|
}
|
|
|
|
// VMOVSS: Move Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVSS m32 xmm
|
|
// VMOVSS xmm m32
|
|
// VMOVSS xmm xmm xmm
|
|
// VMOVSS m32 k xmm
|
|
// VMOVSS xmm k m32
|
|
// VMOVSS xmm xmm k xmm
|
|
//
|
|
// Construct and append a VMOVSS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMOVSS(ops ...operand.Op) { ctx.VMOVSS(ops...) }
|
|
|
|
// VMOVSS_Z: Move Scalar Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVSS.Z m32 k xmm
|
|
// VMOVSS.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VMOVSS.Z instruction to the active function.
|
|
func (c *Context) VMOVSS_Z(ops ...operand.Op) {
|
|
c.addinstruction(x86.VMOVSS_Z(ops...))
|
|
}
|
|
|
|
// VMOVSS_Z: Move Scalar Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVSS.Z m32 k xmm
|
|
// VMOVSS.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VMOVSS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMOVSS_Z(ops ...operand.Op) { ctx.VMOVSS_Z(ops...) }
|
|
|
|
// VMOVUPD: Move Unaligned Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVUPD m128 xmm
|
|
// VMOVUPD m256 ymm
|
|
// VMOVUPD xmm m128
|
|
// VMOVUPD xmm xmm
|
|
// VMOVUPD ymm m256
|
|
// VMOVUPD ymm ymm
|
|
// VMOVUPD m128 k xmm
|
|
// VMOVUPD m256 k ymm
|
|
// VMOVUPD xmm k m128
|
|
// VMOVUPD xmm k xmm
|
|
// VMOVUPD ymm k m256
|
|
// VMOVUPD ymm k ymm
|
|
// VMOVUPD m512 k zmm
|
|
// VMOVUPD m512 zmm
|
|
// VMOVUPD zmm k m512
|
|
// VMOVUPD zmm k zmm
|
|
// VMOVUPD zmm m512
|
|
// VMOVUPD zmm zmm
|
|
//
|
|
// Construct and append a VMOVUPD instruction to the active function.
|
|
func (c *Context) VMOVUPD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VMOVUPD(ops...))
|
|
}
|
|
|
|
// VMOVUPD: Move Unaligned Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVUPD m128 xmm
|
|
// VMOVUPD m256 ymm
|
|
// VMOVUPD xmm m128
|
|
// VMOVUPD xmm xmm
|
|
// VMOVUPD ymm m256
|
|
// VMOVUPD ymm ymm
|
|
// VMOVUPD m128 k xmm
|
|
// VMOVUPD m256 k ymm
|
|
// VMOVUPD xmm k m128
|
|
// VMOVUPD xmm k xmm
|
|
// VMOVUPD ymm k m256
|
|
// VMOVUPD ymm k ymm
|
|
// VMOVUPD m512 k zmm
|
|
// VMOVUPD m512 zmm
|
|
// VMOVUPD zmm k m512
|
|
// VMOVUPD zmm k zmm
|
|
// VMOVUPD zmm m512
|
|
// VMOVUPD zmm zmm
|
|
//
|
|
// Construct and append a VMOVUPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMOVUPD(ops ...operand.Op) { ctx.VMOVUPD(ops...) }
|
|
|
|
// VMOVUPD_Z: Move Unaligned Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVUPD.Z m128 k xmm
|
|
// VMOVUPD.Z m256 k ymm
|
|
// VMOVUPD.Z xmm k m128
|
|
// VMOVUPD.Z xmm k xmm
|
|
// VMOVUPD.Z ymm k m256
|
|
// VMOVUPD.Z ymm k ymm
|
|
// VMOVUPD.Z m512 k zmm
|
|
// VMOVUPD.Z zmm k m512
|
|
// VMOVUPD.Z zmm k zmm
|
|
//
|
|
// Construct and append a VMOVUPD.Z instruction to the active function.
|
|
func (c *Context) VMOVUPD_Z(mxyz, k, mxyz1 operand.Op) {
|
|
c.addinstruction(x86.VMOVUPD_Z(mxyz, k, mxyz1))
|
|
}
|
|
|
|
// VMOVUPD_Z: Move Unaligned Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVUPD.Z m128 k xmm
|
|
// VMOVUPD.Z m256 k ymm
|
|
// VMOVUPD.Z xmm k m128
|
|
// VMOVUPD.Z xmm k xmm
|
|
// VMOVUPD.Z ymm k m256
|
|
// VMOVUPD.Z ymm k ymm
|
|
// VMOVUPD.Z m512 k zmm
|
|
// VMOVUPD.Z zmm k m512
|
|
// VMOVUPD.Z zmm k zmm
|
|
//
|
|
// Construct and append a VMOVUPD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMOVUPD_Z(mxyz, k, mxyz1 operand.Op) { ctx.VMOVUPD_Z(mxyz, k, mxyz1) }
|
|
|
|
// VMOVUPS: Move Unaligned Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVUPS m128 xmm
|
|
// VMOVUPS m256 ymm
|
|
// VMOVUPS xmm m128
|
|
// VMOVUPS xmm xmm
|
|
// VMOVUPS ymm m256
|
|
// VMOVUPS ymm ymm
|
|
// VMOVUPS m128 k xmm
|
|
// VMOVUPS m256 k ymm
|
|
// VMOVUPS xmm k m128
|
|
// VMOVUPS xmm k xmm
|
|
// VMOVUPS ymm k m256
|
|
// VMOVUPS ymm k ymm
|
|
// VMOVUPS m512 k zmm
|
|
// VMOVUPS m512 zmm
|
|
// VMOVUPS zmm k m512
|
|
// VMOVUPS zmm k zmm
|
|
// VMOVUPS zmm m512
|
|
// VMOVUPS zmm zmm
|
|
//
|
|
// Construct and append a VMOVUPS instruction to the active function.
|
|
func (c *Context) VMOVUPS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VMOVUPS(ops...))
|
|
}
|
|
|
|
// VMOVUPS: Move Unaligned Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVUPS m128 xmm
|
|
// VMOVUPS m256 ymm
|
|
// VMOVUPS xmm m128
|
|
// VMOVUPS xmm xmm
|
|
// VMOVUPS ymm m256
|
|
// VMOVUPS ymm ymm
|
|
// VMOVUPS m128 k xmm
|
|
// VMOVUPS m256 k ymm
|
|
// VMOVUPS xmm k m128
|
|
// VMOVUPS xmm k xmm
|
|
// VMOVUPS ymm k m256
|
|
// VMOVUPS ymm k ymm
|
|
// VMOVUPS m512 k zmm
|
|
// VMOVUPS m512 zmm
|
|
// VMOVUPS zmm k m512
|
|
// VMOVUPS zmm k zmm
|
|
// VMOVUPS zmm m512
|
|
// VMOVUPS zmm zmm
|
|
//
|
|
// Construct and append a VMOVUPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMOVUPS(ops ...operand.Op) { ctx.VMOVUPS(ops...) }
|
|
|
|
// VMOVUPS_Z: Move Unaligned Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVUPS.Z m128 k xmm
|
|
// VMOVUPS.Z m256 k ymm
|
|
// VMOVUPS.Z xmm k m128
|
|
// VMOVUPS.Z xmm k xmm
|
|
// VMOVUPS.Z ymm k m256
|
|
// VMOVUPS.Z ymm k ymm
|
|
// VMOVUPS.Z m512 k zmm
|
|
// VMOVUPS.Z zmm k m512
|
|
// VMOVUPS.Z zmm k zmm
|
|
//
|
|
// Construct and append a VMOVUPS.Z instruction to the active function.
|
|
func (c *Context) VMOVUPS_Z(mxyz, k, mxyz1 operand.Op) {
|
|
c.addinstruction(x86.VMOVUPS_Z(mxyz, k, mxyz1))
|
|
}
|
|
|
|
// VMOVUPS_Z: Move Unaligned Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMOVUPS.Z m128 k xmm
|
|
// VMOVUPS.Z m256 k ymm
|
|
// VMOVUPS.Z xmm k m128
|
|
// VMOVUPS.Z xmm k xmm
|
|
// VMOVUPS.Z ymm k m256
|
|
// VMOVUPS.Z ymm k ymm
|
|
// VMOVUPS.Z m512 k zmm
|
|
// VMOVUPS.Z zmm k m512
|
|
// VMOVUPS.Z zmm k zmm
|
|
//
|
|
// Construct and append a VMOVUPS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMOVUPS_Z(mxyz, k, mxyz1 operand.Op) { ctx.VMOVUPS_Z(mxyz, k, mxyz1) }
|
|
|
|
// VMPSADBW: Compute Multiple Packed Sums of Absolute Difference.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMPSADBW imm8 m256 ymm ymm
|
|
// VMPSADBW imm8 ymm ymm ymm
|
|
// VMPSADBW imm8 m128 xmm xmm
|
|
// VMPSADBW imm8 xmm xmm xmm
|
|
//
|
|
// Construct and append a VMPSADBW instruction to the active function.
|
|
func (c *Context) VMPSADBW(i, mxy, xy, xy1 operand.Op) {
|
|
c.addinstruction(x86.VMPSADBW(i, mxy, xy, xy1))
|
|
}
|
|
|
|
// VMPSADBW: Compute Multiple Packed Sums of Absolute Difference.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMPSADBW imm8 m256 ymm ymm
|
|
// VMPSADBW imm8 ymm ymm ymm
|
|
// VMPSADBW imm8 m128 xmm xmm
|
|
// VMPSADBW imm8 xmm xmm xmm
|
|
//
|
|
// Construct and append a VMPSADBW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMPSADBW(i, mxy, xy, xy1 operand.Op) { ctx.VMPSADBW(i, mxy, xy, xy1) }
|
|
|
|
// VMULPD: Multiply Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULPD m128 xmm xmm
|
|
// VMULPD m256 ymm ymm
|
|
// VMULPD xmm xmm xmm
|
|
// VMULPD ymm ymm ymm
|
|
// VMULPD m128 xmm k xmm
|
|
// VMULPD m256 ymm k ymm
|
|
// VMULPD xmm xmm k xmm
|
|
// VMULPD ymm ymm k ymm
|
|
// VMULPD m512 zmm k zmm
|
|
// VMULPD m512 zmm zmm
|
|
// VMULPD zmm zmm k zmm
|
|
// VMULPD zmm zmm zmm
|
|
//
|
|
// Construct and append a VMULPD instruction to the active function.
|
|
func (c *Context) VMULPD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VMULPD(ops...))
|
|
}
|
|
|
|
// VMULPD: Multiply Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULPD m128 xmm xmm
|
|
// VMULPD m256 ymm ymm
|
|
// VMULPD xmm xmm xmm
|
|
// VMULPD ymm ymm ymm
|
|
// VMULPD m128 xmm k xmm
|
|
// VMULPD m256 ymm k ymm
|
|
// VMULPD xmm xmm k xmm
|
|
// VMULPD ymm ymm k ymm
|
|
// VMULPD m512 zmm k zmm
|
|
// VMULPD m512 zmm zmm
|
|
// VMULPD zmm zmm k zmm
|
|
// VMULPD zmm zmm zmm
|
|
//
|
|
// Construct and append a VMULPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMULPD(ops ...operand.Op) { ctx.VMULPD(ops...) }
|
|
|
|
// VMULPD_BCST: Multiply Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULPD.BCST m64 xmm k xmm
|
|
// VMULPD.BCST m64 xmm xmm
|
|
// VMULPD.BCST m64 ymm k ymm
|
|
// VMULPD.BCST m64 ymm ymm
|
|
// VMULPD.BCST m64 zmm k zmm
|
|
// VMULPD.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VMULPD.BCST instruction to the active function.
|
|
func (c *Context) VMULPD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VMULPD_BCST(ops...))
|
|
}
|
|
|
|
// VMULPD_BCST: Multiply Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULPD.BCST m64 xmm k xmm
|
|
// VMULPD.BCST m64 xmm xmm
|
|
// VMULPD.BCST m64 ymm k ymm
|
|
// VMULPD.BCST m64 ymm ymm
|
|
// VMULPD.BCST m64 zmm k zmm
|
|
// VMULPD.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VMULPD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMULPD_BCST(ops ...operand.Op) { ctx.VMULPD_BCST(ops...) }
|
|
|
|
// VMULPD_BCST_Z: Multiply Packed Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULPD.BCST.Z m64 xmm k xmm
|
|
// VMULPD.BCST.Z m64 ymm k ymm
|
|
// VMULPD.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VMULPD.BCST.Z instruction to the active function.
|
|
func (c *Context) VMULPD_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VMULPD_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VMULPD_BCST_Z: Multiply Packed Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULPD.BCST.Z m64 xmm k xmm
|
|
// VMULPD.BCST.Z m64 ymm k ymm
|
|
// VMULPD.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VMULPD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMULPD_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VMULPD_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VMULPD_RD_SAE: Multiply Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULPD.RD_SAE zmm zmm k zmm
|
|
// VMULPD.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VMULPD.RD_SAE instruction to the active function.
|
|
func (c *Context) VMULPD_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VMULPD_RD_SAE(ops...))
|
|
}
|
|
|
|
// VMULPD_RD_SAE: Multiply Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULPD.RD_SAE zmm zmm k zmm
|
|
// VMULPD.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VMULPD.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMULPD_RD_SAE(ops ...operand.Op) { ctx.VMULPD_RD_SAE(ops...) }
|
|
|
|
// VMULPD_RD_SAE_Z: Multiply Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULPD.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VMULPD.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VMULPD_RD_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VMULPD_RD_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VMULPD_RD_SAE_Z: Multiply Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULPD.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VMULPD.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMULPD_RD_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VMULPD_RD_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VMULPD_RN_SAE: Multiply Packed Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULPD.RN_SAE zmm zmm k zmm
|
|
// VMULPD.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VMULPD.RN_SAE instruction to the active function.
|
|
func (c *Context) VMULPD_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VMULPD_RN_SAE(ops...))
|
|
}
|
|
|
|
// VMULPD_RN_SAE: Multiply Packed Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULPD.RN_SAE zmm zmm k zmm
|
|
// VMULPD.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VMULPD.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMULPD_RN_SAE(ops ...operand.Op) { ctx.VMULPD_RN_SAE(ops...) }
|
|
|
|
// VMULPD_RN_SAE_Z: Multiply Packed Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULPD.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VMULPD.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VMULPD_RN_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VMULPD_RN_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VMULPD_RN_SAE_Z: Multiply Packed Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULPD.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VMULPD.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMULPD_RN_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VMULPD_RN_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VMULPD_RU_SAE: Multiply Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULPD.RU_SAE zmm zmm k zmm
|
|
// VMULPD.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VMULPD.RU_SAE instruction to the active function.
|
|
func (c *Context) VMULPD_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VMULPD_RU_SAE(ops...))
|
|
}
|
|
|
|
// VMULPD_RU_SAE: Multiply Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULPD.RU_SAE zmm zmm k zmm
|
|
// VMULPD.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VMULPD.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMULPD_RU_SAE(ops ...operand.Op) { ctx.VMULPD_RU_SAE(ops...) }
|
|
|
|
// VMULPD_RU_SAE_Z: Multiply Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULPD.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VMULPD.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VMULPD_RU_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VMULPD_RU_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VMULPD_RU_SAE_Z: Multiply Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULPD.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VMULPD.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMULPD_RU_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VMULPD_RU_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VMULPD_RZ_SAE: Multiply Packed Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULPD.RZ_SAE zmm zmm k zmm
|
|
// VMULPD.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VMULPD.RZ_SAE instruction to the active function.
|
|
func (c *Context) VMULPD_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VMULPD_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VMULPD_RZ_SAE: Multiply Packed Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULPD.RZ_SAE zmm zmm k zmm
|
|
// VMULPD.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VMULPD.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMULPD_RZ_SAE(ops ...operand.Op) { ctx.VMULPD_RZ_SAE(ops...) }
|
|
|
|
// VMULPD_RZ_SAE_Z: Multiply Packed Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULPD.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VMULPD.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VMULPD_RZ_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VMULPD_RZ_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VMULPD_RZ_SAE_Z: Multiply Packed Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULPD.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VMULPD.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMULPD_RZ_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VMULPD_RZ_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VMULPD_Z: Multiply Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULPD.Z m128 xmm k xmm
|
|
// VMULPD.Z m256 ymm k ymm
|
|
// VMULPD.Z xmm xmm k xmm
|
|
// VMULPD.Z ymm ymm k ymm
|
|
// VMULPD.Z m512 zmm k zmm
|
|
// VMULPD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VMULPD.Z instruction to the active function.
|
|
func (c *Context) VMULPD_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VMULPD_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VMULPD_Z: Multiply Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULPD.Z m128 xmm k xmm
|
|
// VMULPD.Z m256 ymm k ymm
|
|
// VMULPD.Z xmm xmm k xmm
|
|
// VMULPD.Z ymm ymm k ymm
|
|
// VMULPD.Z m512 zmm k zmm
|
|
// VMULPD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VMULPD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMULPD_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VMULPD_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VMULPS: Multiply Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULPS m128 xmm xmm
|
|
// VMULPS m256 ymm ymm
|
|
// VMULPS xmm xmm xmm
|
|
// VMULPS ymm ymm ymm
|
|
// VMULPS m128 xmm k xmm
|
|
// VMULPS m256 ymm k ymm
|
|
// VMULPS xmm xmm k xmm
|
|
// VMULPS ymm ymm k ymm
|
|
// VMULPS m512 zmm k zmm
|
|
// VMULPS m512 zmm zmm
|
|
// VMULPS zmm zmm k zmm
|
|
// VMULPS zmm zmm zmm
|
|
//
|
|
// Construct and append a VMULPS instruction to the active function.
|
|
func (c *Context) VMULPS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VMULPS(ops...))
|
|
}
|
|
|
|
// VMULPS: Multiply Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULPS m128 xmm xmm
|
|
// VMULPS m256 ymm ymm
|
|
// VMULPS xmm xmm xmm
|
|
// VMULPS ymm ymm ymm
|
|
// VMULPS m128 xmm k xmm
|
|
// VMULPS m256 ymm k ymm
|
|
// VMULPS xmm xmm k xmm
|
|
// VMULPS ymm ymm k ymm
|
|
// VMULPS m512 zmm k zmm
|
|
// VMULPS m512 zmm zmm
|
|
// VMULPS zmm zmm k zmm
|
|
// VMULPS zmm zmm zmm
|
|
//
|
|
// Construct and append a VMULPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMULPS(ops ...operand.Op) { ctx.VMULPS(ops...) }
|
|
|
|
// VMULPS_BCST: Multiply Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULPS.BCST m32 xmm k xmm
|
|
// VMULPS.BCST m32 xmm xmm
|
|
// VMULPS.BCST m32 ymm k ymm
|
|
// VMULPS.BCST m32 ymm ymm
|
|
// VMULPS.BCST m32 zmm k zmm
|
|
// VMULPS.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VMULPS.BCST instruction to the active function.
|
|
func (c *Context) VMULPS_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VMULPS_BCST(ops...))
|
|
}
|
|
|
|
// VMULPS_BCST: Multiply Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULPS.BCST m32 xmm k xmm
|
|
// VMULPS.BCST m32 xmm xmm
|
|
// VMULPS.BCST m32 ymm k ymm
|
|
// VMULPS.BCST m32 ymm ymm
|
|
// VMULPS.BCST m32 zmm k zmm
|
|
// VMULPS.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VMULPS.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMULPS_BCST(ops ...operand.Op) { ctx.VMULPS_BCST(ops...) }
|
|
|
|
// VMULPS_BCST_Z: Multiply Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULPS.BCST.Z m32 xmm k xmm
|
|
// VMULPS.BCST.Z m32 ymm k ymm
|
|
// VMULPS.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VMULPS.BCST.Z instruction to the active function.
|
|
func (c *Context) VMULPS_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VMULPS_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VMULPS_BCST_Z: Multiply Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULPS.BCST.Z m32 xmm k xmm
|
|
// VMULPS.BCST.Z m32 ymm k ymm
|
|
// VMULPS.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VMULPS.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMULPS_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VMULPS_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VMULPS_RD_SAE: Multiply Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULPS.RD_SAE zmm zmm k zmm
|
|
// VMULPS.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VMULPS.RD_SAE instruction to the active function.
|
|
func (c *Context) VMULPS_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VMULPS_RD_SAE(ops...))
|
|
}
|
|
|
|
// VMULPS_RD_SAE: Multiply Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULPS.RD_SAE zmm zmm k zmm
|
|
// VMULPS.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VMULPS.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMULPS_RD_SAE(ops ...operand.Op) { ctx.VMULPS_RD_SAE(ops...) }
|
|
|
|
// VMULPS_RD_SAE_Z: Multiply Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULPS.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VMULPS.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VMULPS_RD_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VMULPS_RD_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VMULPS_RD_SAE_Z: Multiply Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULPS.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VMULPS.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMULPS_RD_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VMULPS_RD_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VMULPS_RN_SAE: Multiply Packed Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULPS.RN_SAE zmm zmm k zmm
|
|
// VMULPS.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VMULPS.RN_SAE instruction to the active function.
|
|
func (c *Context) VMULPS_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VMULPS_RN_SAE(ops...))
|
|
}
|
|
|
|
// VMULPS_RN_SAE: Multiply Packed Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULPS.RN_SAE zmm zmm k zmm
|
|
// VMULPS.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VMULPS.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMULPS_RN_SAE(ops ...operand.Op) { ctx.VMULPS_RN_SAE(ops...) }
|
|
|
|
// VMULPS_RN_SAE_Z: Multiply Packed Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULPS.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VMULPS.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VMULPS_RN_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VMULPS_RN_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VMULPS_RN_SAE_Z: Multiply Packed Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULPS.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VMULPS.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMULPS_RN_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VMULPS_RN_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VMULPS_RU_SAE: Multiply Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULPS.RU_SAE zmm zmm k zmm
|
|
// VMULPS.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VMULPS.RU_SAE instruction to the active function.
|
|
func (c *Context) VMULPS_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VMULPS_RU_SAE(ops...))
|
|
}
|
|
|
|
// VMULPS_RU_SAE: Multiply Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULPS.RU_SAE zmm zmm k zmm
|
|
// VMULPS.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VMULPS.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMULPS_RU_SAE(ops ...operand.Op) { ctx.VMULPS_RU_SAE(ops...) }
|
|
|
|
// VMULPS_RU_SAE_Z: Multiply Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULPS.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VMULPS.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VMULPS_RU_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VMULPS_RU_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VMULPS_RU_SAE_Z: Multiply Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULPS.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VMULPS.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMULPS_RU_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VMULPS_RU_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VMULPS_RZ_SAE: Multiply Packed Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULPS.RZ_SAE zmm zmm k zmm
|
|
// VMULPS.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VMULPS.RZ_SAE instruction to the active function.
|
|
func (c *Context) VMULPS_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VMULPS_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VMULPS_RZ_SAE: Multiply Packed Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULPS.RZ_SAE zmm zmm k zmm
|
|
// VMULPS.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VMULPS.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMULPS_RZ_SAE(ops ...operand.Op) { ctx.VMULPS_RZ_SAE(ops...) }
|
|
|
|
// VMULPS_RZ_SAE_Z: Multiply Packed Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULPS.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VMULPS.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VMULPS_RZ_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VMULPS_RZ_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VMULPS_RZ_SAE_Z: Multiply Packed Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULPS.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VMULPS.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMULPS_RZ_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VMULPS_RZ_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VMULPS_Z: Multiply Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULPS.Z m128 xmm k xmm
|
|
// VMULPS.Z m256 ymm k ymm
|
|
// VMULPS.Z xmm xmm k xmm
|
|
// VMULPS.Z ymm ymm k ymm
|
|
// VMULPS.Z m512 zmm k zmm
|
|
// VMULPS.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VMULPS.Z instruction to the active function.
|
|
func (c *Context) VMULPS_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VMULPS_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VMULPS_Z: Multiply Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULPS.Z m128 xmm k xmm
|
|
// VMULPS.Z m256 ymm k ymm
|
|
// VMULPS.Z xmm xmm k xmm
|
|
// VMULPS.Z ymm ymm k ymm
|
|
// VMULPS.Z m512 zmm k zmm
|
|
// VMULPS.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VMULPS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMULPS_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VMULPS_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VMULSD: Multiply Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULSD m64 xmm xmm
|
|
// VMULSD xmm xmm xmm
|
|
// VMULSD m64 xmm k xmm
|
|
// VMULSD xmm xmm k xmm
|
|
//
|
|
// Construct and append a VMULSD instruction to the active function.
|
|
func (c *Context) VMULSD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VMULSD(ops...))
|
|
}
|
|
|
|
// VMULSD: Multiply Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULSD m64 xmm xmm
|
|
// VMULSD xmm xmm xmm
|
|
// VMULSD m64 xmm k xmm
|
|
// VMULSD xmm xmm k xmm
|
|
//
|
|
// Construct and append a VMULSD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMULSD(ops ...operand.Op) { ctx.VMULSD(ops...) }
|
|
|
|
// VMULSD_RD_SAE: Multiply Scalar Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULSD.RD_SAE xmm xmm k xmm
|
|
// VMULSD.RD_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VMULSD.RD_SAE instruction to the active function.
|
|
func (c *Context) VMULSD_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VMULSD_RD_SAE(ops...))
|
|
}
|
|
|
|
// VMULSD_RD_SAE: Multiply Scalar Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULSD.RD_SAE xmm xmm k xmm
|
|
// VMULSD.RD_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VMULSD.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMULSD_RD_SAE(ops ...operand.Op) { ctx.VMULSD_RD_SAE(ops...) }
|
|
|
|
// VMULSD_RD_SAE_Z: Multiply Scalar Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULSD.RD_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VMULSD.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VMULSD_RD_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VMULSD_RD_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VMULSD_RD_SAE_Z: Multiply Scalar Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULSD.RD_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VMULSD.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMULSD_RD_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VMULSD_RD_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VMULSD_RN_SAE: Multiply Scalar Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULSD.RN_SAE xmm xmm k xmm
|
|
// VMULSD.RN_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VMULSD.RN_SAE instruction to the active function.
|
|
func (c *Context) VMULSD_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VMULSD_RN_SAE(ops...))
|
|
}
|
|
|
|
// VMULSD_RN_SAE: Multiply Scalar Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULSD.RN_SAE xmm xmm k xmm
|
|
// VMULSD.RN_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VMULSD.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMULSD_RN_SAE(ops ...operand.Op) { ctx.VMULSD_RN_SAE(ops...) }
|
|
|
|
// VMULSD_RN_SAE_Z: Multiply Scalar Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULSD.RN_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VMULSD.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VMULSD_RN_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VMULSD_RN_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VMULSD_RN_SAE_Z: Multiply Scalar Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULSD.RN_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VMULSD.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMULSD_RN_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VMULSD_RN_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VMULSD_RU_SAE: Multiply Scalar Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULSD.RU_SAE xmm xmm k xmm
|
|
// VMULSD.RU_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VMULSD.RU_SAE instruction to the active function.
|
|
func (c *Context) VMULSD_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VMULSD_RU_SAE(ops...))
|
|
}
|
|
|
|
// VMULSD_RU_SAE: Multiply Scalar Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULSD.RU_SAE xmm xmm k xmm
|
|
// VMULSD.RU_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VMULSD.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMULSD_RU_SAE(ops ...operand.Op) { ctx.VMULSD_RU_SAE(ops...) }
|
|
|
|
// VMULSD_RU_SAE_Z: Multiply Scalar Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULSD.RU_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VMULSD.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VMULSD_RU_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VMULSD_RU_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VMULSD_RU_SAE_Z: Multiply Scalar Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULSD.RU_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VMULSD.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMULSD_RU_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VMULSD_RU_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VMULSD_RZ_SAE: Multiply Scalar Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULSD.RZ_SAE xmm xmm k xmm
|
|
// VMULSD.RZ_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VMULSD.RZ_SAE instruction to the active function.
|
|
func (c *Context) VMULSD_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VMULSD_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VMULSD_RZ_SAE: Multiply Scalar Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULSD.RZ_SAE xmm xmm k xmm
|
|
// VMULSD.RZ_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VMULSD.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMULSD_RZ_SAE(ops ...operand.Op) { ctx.VMULSD_RZ_SAE(ops...) }
|
|
|
|
// VMULSD_RZ_SAE_Z: Multiply Scalar Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULSD.RZ_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VMULSD.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VMULSD_RZ_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VMULSD_RZ_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VMULSD_RZ_SAE_Z: Multiply Scalar Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULSD.RZ_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VMULSD.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMULSD_RZ_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VMULSD_RZ_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VMULSD_Z: Multiply Scalar Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULSD.Z m64 xmm k xmm
|
|
// VMULSD.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VMULSD.Z instruction to the active function.
|
|
func (c *Context) VMULSD_Z(mx, x, k, x1 operand.Op) {
|
|
c.addinstruction(x86.VMULSD_Z(mx, x, k, x1))
|
|
}
|
|
|
|
// VMULSD_Z: Multiply Scalar Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULSD.Z m64 xmm k xmm
|
|
// VMULSD.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VMULSD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMULSD_Z(mx, x, k, x1 operand.Op) { ctx.VMULSD_Z(mx, x, k, x1) }
|
|
|
|
// VMULSS: Multiply Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULSS m32 xmm xmm
|
|
// VMULSS xmm xmm xmm
|
|
// VMULSS m32 xmm k xmm
|
|
// VMULSS xmm xmm k xmm
|
|
//
|
|
// Construct and append a VMULSS instruction to the active function.
|
|
func (c *Context) VMULSS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VMULSS(ops...))
|
|
}
|
|
|
|
// VMULSS: Multiply Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULSS m32 xmm xmm
|
|
// VMULSS xmm xmm xmm
|
|
// VMULSS m32 xmm k xmm
|
|
// VMULSS xmm xmm k xmm
|
|
//
|
|
// Construct and append a VMULSS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMULSS(ops ...operand.Op) { ctx.VMULSS(ops...) }
|
|
|
|
// VMULSS_RD_SAE: Multiply Scalar Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULSS.RD_SAE xmm xmm k xmm
|
|
// VMULSS.RD_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VMULSS.RD_SAE instruction to the active function.
|
|
func (c *Context) VMULSS_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VMULSS_RD_SAE(ops...))
|
|
}
|
|
|
|
// VMULSS_RD_SAE: Multiply Scalar Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULSS.RD_SAE xmm xmm k xmm
|
|
// VMULSS.RD_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VMULSS.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMULSS_RD_SAE(ops ...operand.Op) { ctx.VMULSS_RD_SAE(ops...) }
|
|
|
|
// VMULSS_RD_SAE_Z: Multiply Scalar Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULSS.RD_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VMULSS.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VMULSS_RD_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VMULSS_RD_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VMULSS_RD_SAE_Z: Multiply Scalar Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULSS.RD_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VMULSS.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMULSS_RD_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VMULSS_RD_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VMULSS_RN_SAE: Multiply Scalar Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULSS.RN_SAE xmm xmm k xmm
|
|
// VMULSS.RN_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VMULSS.RN_SAE instruction to the active function.
|
|
func (c *Context) VMULSS_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VMULSS_RN_SAE(ops...))
|
|
}
|
|
|
|
// VMULSS_RN_SAE: Multiply Scalar Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULSS.RN_SAE xmm xmm k xmm
|
|
// VMULSS.RN_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VMULSS.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMULSS_RN_SAE(ops ...operand.Op) { ctx.VMULSS_RN_SAE(ops...) }
|
|
|
|
// VMULSS_RN_SAE_Z: Multiply Scalar Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULSS.RN_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VMULSS.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VMULSS_RN_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VMULSS_RN_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VMULSS_RN_SAE_Z: Multiply Scalar Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULSS.RN_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VMULSS.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMULSS_RN_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VMULSS_RN_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VMULSS_RU_SAE: Multiply Scalar Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULSS.RU_SAE xmm xmm k xmm
|
|
// VMULSS.RU_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VMULSS.RU_SAE instruction to the active function.
|
|
func (c *Context) VMULSS_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VMULSS_RU_SAE(ops...))
|
|
}
|
|
|
|
// VMULSS_RU_SAE: Multiply Scalar Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULSS.RU_SAE xmm xmm k xmm
|
|
// VMULSS.RU_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VMULSS.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMULSS_RU_SAE(ops ...operand.Op) { ctx.VMULSS_RU_SAE(ops...) }
|
|
|
|
// VMULSS_RU_SAE_Z: Multiply Scalar Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULSS.RU_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VMULSS.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VMULSS_RU_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VMULSS_RU_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VMULSS_RU_SAE_Z: Multiply Scalar Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULSS.RU_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VMULSS.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMULSS_RU_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VMULSS_RU_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VMULSS_RZ_SAE: Multiply Scalar Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULSS.RZ_SAE xmm xmm k xmm
|
|
// VMULSS.RZ_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VMULSS.RZ_SAE instruction to the active function.
|
|
func (c *Context) VMULSS_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VMULSS_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VMULSS_RZ_SAE: Multiply Scalar Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULSS.RZ_SAE xmm xmm k xmm
|
|
// VMULSS.RZ_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VMULSS.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMULSS_RZ_SAE(ops ...operand.Op) { ctx.VMULSS_RZ_SAE(ops...) }
|
|
|
|
// VMULSS_RZ_SAE_Z: Multiply Scalar Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULSS.RZ_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VMULSS.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VMULSS_RZ_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VMULSS_RZ_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VMULSS_RZ_SAE_Z: Multiply Scalar Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULSS.RZ_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VMULSS.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMULSS_RZ_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VMULSS_RZ_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VMULSS_Z: Multiply Scalar Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULSS.Z m32 xmm k xmm
|
|
// VMULSS.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VMULSS.Z instruction to the active function.
|
|
func (c *Context) VMULSS_Z(mx, x, k, x1 operand.Op) {
|
|
c.addinstruction(x86.VMULSS_Z(mx, x, k, x1))
|
|
}
|
|
|
|
// VMULSS_Z: Multiply Scalar Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VMULSS.Z m32 xmm k xmm
|
|
// VMULSS.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VMULSS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VMULSS_Z(mx, x, k, x1 operand.Op) { ctx.VMULSS_Z(mx, x, k, x1) }
|
|
|
|
// VORPD: Bitwise Logical OR of Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VORPD m128 xmm xmm
|
|
// VORPD m256 ymm ymm
|
|
// VORPD xmm xmm xmm
|
|
// VORPD ymm ymm ymm
|
|
// VORPD m128 xmm k xmm
|
|
// VORPD m256 ymm k ymm
|
|
// VORPD xmm xmm k xmm
|
|
// VORPD ymm ymm k ymm
|
|
// VORPD m512 zmm k zmm
|
|
// VORPD m512 zmm zmm
|
|
// VORPD zmm zmm k zmm
|
|
// VORPD zmm zmm zmm
|
|
//
|
|
// Construct and append a VORPD instruction to the active function.
|
|
func (c *Context) VORPD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VORPD(ops...))
|
|
}
|
|
|
|
// VORPD: Bitwise Logical OR of Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VORPD m128 xmm xmm
|
|
// VORPD m256 ymm ymm
|
|
// VORPD xmm xmm xmm
|
|
// VORPD ymm ymm ymm
|
|
// VORPD m128 xmm k xmm
|
|
// VORPD m256 ymm k ymm
|
|
// VORPD xmm xmm k xmm
|
|
// VORPD ymm ymm k ymm
|
|
// VORPD m512 zmm k zmm
|
|
// VORPD m512 zmm zmm
|
|
// VORPD zmm zmm k zmm
|
|
// VORPD zmm zmm zmm
|
|
//
|
|
// Construct and append a VORPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VORPD(ops ...operand.Op) { ctx.VORPD(ops...) }
|
|
|
|
// VORPD_BCST: Bitwise Logical OR of Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VORPD.BCST m64 xmm k xmm
|
|
// VORPD.BCST m64 xmm xmm
|
|
// VORPD.BCST m64 ymm k ymm
|
|
// VORPD.BCST m64 ymm ymm
|
|
// VORPD.BCST m64 zmm k zmm
|
|
// VORPD.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VORPD.BCST instruction to the active function.
|
|
func (c *Context) VORPD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VORPD_BCST(ops...))
|
|
}
|
|
|
|
// VORPD_BCST: Bitwise Logical OR of Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VORPD.BCST m64 xmm k xmm
|
|
// VORPD.BCST m64 xmm xmm
|
|
// VORPD.BCST m64 ymm k ymm
|
|
// VORPD.BCST m64 ymm ymm
|
|
// VORPD.BCST m64 zmm k zmm
|
|
// VORPD.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VORPD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VORPD_BCST(ops ...operand.Op) { ctx.VORPD_BCST(ops...) }
|
|
|
|
// VORPD_BCST_Z: Bitwise Logical OR of Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VORPD.BCST.Z m64 xmm k xmm
|
|
// VORPD.BCST.Z m64 ymm k ymm
|
|
// VORPD.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VORPD.BCST.Z instruction to the active function.
|
|
func (c *Context) VORPD_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VORPD_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VORPD_BCST_Z: Bitwise Logical OR of Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VORPD.BCST.Z m64 xmm k xmm
|
|
// VORPD.BCST.Z m64 ymm k ymm
|
|
// VORPD.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VORPD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VORPD_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VORPD_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VORPD_Z: Bitwise Logical OR of Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VORPD.Z m128 xmm k xmm
|
|
// VORPD.Z m256 ymm k ymm
|
|
// VORPD.Z xmm xmm k xmm
|
|
// VORPD.Z ymm ymm k ymm
|
|
// VORPD.Z m512 zmm k zmm
|
|
// VORPD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VORPD.Z instruction to the active function.
|
|
func (c *Context) VORPD_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VORPD_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VORPD_Z: Bitwise Logical OR of Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VORPD.Z m128 xmm k xmm
|
|
// VORPD.Z m256 ymm k ymm
|
|
// VORPD.Z xmm xmm k xmm
|
|
// VORPD.Z ymm ymm k ymm
|
|
// VORPD.Z m512 zmm k zmm
|
|
// VORPD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VORPD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VORPD_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VORPD_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VORPS: Bitwise Logical OR of Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VORPS m128 xmm xmm
|
|
// VORPS m256 ymm ymm
|
|
// VORPS xmm xmm xmm
|
|
// VORPS ymm ymm ymm
|
|
// VORPS m128 xmm k xmm
|
|
// VORPS m256 ymm k ymm
|
|
// VORPS xmm xmm k xmm
|
|
// VORPS ymm ymm k ymm
|
|
// VORPS m512 zmm k zmm
|
|
// VORPS m512 zmm zmm
|
|
// VORPS zmm zmm k zmm
|
|
// VORPS zmm zmm zmm
|
|
//
|
|
// Construct and append a VORPS instruction to the active function.
|
|
func (c *Context) VORPS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VORPS(ops...))
|
|
}
|
|
|
|
// VORPS: Bitwise Logical OR of Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VORPS m128 xmm xmm
|
|
// VORPS m256 ymm ymm
|
|
// VORPS xmm xmm xmm
|
|
// VORPS ymm ymm ymm
|
|
// VORPS m128 xmm k xmm
|
|
// VORPS m256 ymm k ymm
|
|
// VORPS xmm xmm k xmm
|
|
// VORPS ymm ymm k ymm
|
|
// VORPS m512 zmm k zmm
|
|
// VORPS m512 zmm zmm
|
|
// VORPS zmm zmm k zmm
|
|
// VORPS zmm zmm zmm
|
|
//
|
|
// Construct and append a VORPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VORPS(ops ...operand.Op) { ctx.VORPS(ops...) }
|
|
|
|
// VORPS_BCST: Bitwise Logical OR of Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VORPS.BCST m32 xmm k xmm
|
|
// VORPS.BCST m32 xmm xmm
|
|
// VORPS.BCST m32 ymm k ymm
|
|
// VORPS.BCST m32 ymm ymm
|
|
// VORPS.BCST m32 zmm k zmm
|
|
// VORPS.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VORPS.BCST instruction to the active function.
|
|
func (c *Context) VORPS_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VORPS_BCST(ops...))
|
|
}
|
|
|
|
// VORPS_BCST: Bitwise Logical OR of Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VORPS.BCST m32 xmm k xmm
|
|
// VORPS.BCST m32 xmm xmm
|
|
// VORPS.BCST m32 ymm k ymm
|
|
// VORPS.BCST m32 ymm ymm
|
|
// VORPS.BCST m32 zmm k zmm
|
|
// VORPS.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VORPS.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VORPS_BCST(ops ...operand.Op) { ctx.VORPS_BCST(ops...) }
|
|
|
|
// VORPS_BCST_Z: Bitwise Logical OR of Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VORPS.BCST.Z m32 xmm k xmm
|
|
// VORPS.BCST.Z m32 ymm k ymm
|
|
// VORPS.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VORPS.BCST.Z instruction to the active function.
|
|
func (c *Context) VORPS_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VORPS_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VORPS_BCST_Z: Bitwise Logical OR of Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VORPS.BCST.Z m32 xmm k xmm
|
|
// VORPS.BCST.Z m32 ymm k ymm
|
|
// VORPS.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VORPS.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VORPS_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VORPS_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VORPS_Z: Bitwise Logical OR of Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VORPS.Z m128 xmm k xmm
|
|
// VORPS.Z m256 ymm k ymm
|
|
// VORPS.Z xmm xmm k xmm
|
|
// VORPS.Z ymm ymm k ymm
|
|
// VORPS.Z m512 zmm k zmm
|
|
// VORPS.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VORPS.Z instruction to the active function.
|
|
func (c *Context) VORPS_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VORPS_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VORPS_Z: Bitwise Logical OR of Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VORPS.Z m128 xmm k xmm
|
|
// VORPS.Z m256 ymm k ymm
|
|
// VORPS.Z xmm xmm k xmm
|
|
// VORPS.Z ymm ymm k ymm
|
|
// VORPS.Z m512 zmm k zmm
|
|
// VORPS.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VORPS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VORPS_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VORPS_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPABSB: Packed Absolute Value of Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPABSB m256 ymm
|
|
// VPABSB ymm ymm
|
|
// VPABSB m128 xmm
|
|
// VPABSB xmm xmm
|
|
// VPABSB m128 k xmm
|
|
// VPABSB m256 k ymm
|
|
// VPABSB xmm k xmm
|
|
// VPABSB ymm k ymm
|
|
// VPABSB m512 k zmm
|
|
// VPABSB m512 zmm
|
|
// VPABSB zmm k zmm
|
|
// VPABSB zmm zmm
|
|
//
|
|
// Construct and append a VPABSB instruction to the active function.
|
|
func (c *Context) VPABSB(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPABSB(ops...))
|
|
}
|
|
|
|
// VPABSB: Packed Absolute Value of Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPABSB m256 ymm
|
|
// VPABSB ymm ymm
|
|
// VPABSB m128 xmm
|
|
// VPABSB xmm xmm
|
|
// VPABSB m128 k xmm
|
|
// VPABSB m256 k ymm
|
|
// VPABSB xmm k xmm
|
|
// VPABSB ymm k ymm
|
|
// VPABSB m512 k zmm
|
|
// VPABSB m512 zmm
|
|
// VPABSB zmm k zmm
|
|
// VPABSB zmm zmm
|
|
//
|
|
// Construct and append a VPABSB instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPABSB(ops ...operand.Op) { ctx.VPABSB(ops...) }
|
|
|
|
// VPABSB_Z: Packed Absolute Value of Byte Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPABSB.Z m128 k xmm
|
|
// VPABSB.Z m256 k ymm
|
|
// VPABSB.Z xmm k xmm
|
|
// VPABSB.Z ymm k ymm
|
|
// VPABSB.Z m512 k zmm
|
|
// VPABSB.Z zmm k zmm
|
|
//
|
|
// Construct and append a VPABSB.Z instruction to the active function.
|
|
func (c *Context) VPABSB_Z(mxyz, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VPABSB_Z(mxyz, k, xyz))
|
|
}
|
|
|
|
// VPABSB_Z: Packed Absolute Value of Byte Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPABSB.Z m128 k xmm
|
|
// VPABSB.Z m256 k ymm
|
|
// VPABSB.Z xmm k xmm
|
|
// VPABSB.Z ymm k ymm
|
|
// VPABSB.Z m512 k zmm
|
|
// VPABSB.Z zmm k zmm
|
|
//
|
|
// Construct and append a VPABSB.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPABSB_Z(mxyz, k, xyz operand.Op) { ctx.VPABSB_Z(mxyz, k, xyz) }
|
|
|
|
// VPABSD: Packed Absolute Value of Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPABSD m256 ymm
|
|
// VPABSD ymm ymm
|
|
// VPABSD m128 xmm
|
|
// VPABSD xmm xmm
|
|
// VPABSD m128 k xmm
|
|
// VPABSD m256 k ymm
|
|
// VPABSD xmm k xmm
|
|
// VPABSD ymm k ymm
|
|
// VPABSD m512 k zmm
|
|
// VPABSD m512 zmm
|
|
// VPABSD zmm k zmm
|
|
// VPABSD zmm zmm
|
|
//
|
|
// Construct and append a VPABSD instruction to the active function.
|
|
func (c *Context) VPABSD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPABSD(ops...))
|
|
}
|
|
|
|
// VPABSD: Packed Absolute Value of Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPABSD m256 ymm
|
|
// VPABSD ymm ymm
|
|
// VPABSD m128 xmm
|
|
// VPABSD xmm xmm
|
|
// VPABSD m128 k xmm
|
|
// VPABSD m256 k ymm
|
|
// VPABSD xmm k xmm
|
|
// VPABSD ymm k ymm
|
|
// VPABSD m512 k zmm
|
|
// VPABSD m512 zmm
|
|
// VPABSD zmm k zmm
|
|
// VPABSD zmm zmm
|
|
//
|
|
// Construct and append a VPABSD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPABSD(ops ...operand.Op) { ctx.VPABSD(ops...) }
|
|
|
|
// VPABSD_BCST: Packed Absolute Value of Doubleword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPABSD.BCST m32 k xmm
|
|
// VPABSD.BCST m32 k ymm
|
|
// VPABSD.BCST m32 xmm
|
|
// VPABSD.BCST m32 ymm
|
|
// VPABSD.BCST m32 k zmm
|
|
// VPABSD.BCST m32 zmm
|
|
//
|
|
// Construct and append a VPABSD.BCST instruction to the active function.
|
|
func (c *Context) VPABSD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPABSD_BCST(ops...))
|
|
}
|
|
|
|
// VPABSD_BCST: Packed Absolute Value of Doubleword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPABSD.BCST m32 k xmm
|
|
// VPABSD.BCST m32 k ymm
|
|
// VPABSD.BCST m32 xmm
|
|
// VPABSD.BCST m32 ymm
|
|
// VPABSD.BCST m32 k zmm
|
|
// VPABSD.BCST m32 zmm
|
|
//
|
|
// Construct and append a VPABSD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPABSD_BCST(ops ...operand.Op) { ctx.VPABSD_BCST(ops...) }
|
|
|
|
// VPABSD_BCST_Z: Packed Absolute Value of Doubleword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPABSD.BCST.Z m32 k xmm
|
|
// VPABSD.BCST.Z m32 k ymm
|
|
// VPABSD.BCST.Z m32 k zmm
|
|
//
|
|
// Construct and append a VPABSD.BCST.Z instruction to the active function.
|
|
func (c *Context) VPABSD_BCST_Z(m, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VPABSD_BCST_Z(m, k, xyz))
|
|
}
|
|
|
|
// VPABSD_BCST_Z: Packed Absolute Value of Doubleword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPABSD.BCST.Z m32 k xmm
|
|
// VPABSD.BCST.Z m32 k ymm
|
|
// VPABSD.BCST.Z m32 k zmm
|
|
//
|
|
// Construct and append a VPABSD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPABSD_BCST_Z(m, k, xyz operand.Op) { ctx.VPABSD_BCST_Z(m, k, xyz) }
|
|
|
|
// VPABSD_Z: Packed Absolute Value of Doubleword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPABSD.Z m128 k xmm
|
|
// VPABSD.Z m256 k ymm
|
|
// VPABSD.Z xmm k xmm
|
|
// VPABSD.Z ymm k ymm
|
|
// VPABSD.Z m512 k zmm
|
|
// VPABSD.Z zmm k zmm
|
|
//
|
|
// Construct and append a VPABSD.Z instruction to the active function.
|
|
func (c *Context) VPABSD_Z(mxyz, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VPABSD_Z(mxyz, k, xyz))
|
|
}
|
|
|
|
// VPABSD_Z: Packed Absolute Value of Doubleword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPABSD.Z m128 k xmm
|
|
// VPABSD.Z m256 k ymm
|
|
// VPABSD.Z xmm k xmm
|
|
// VPABSD.Z ymm k ymm
|
|
// VPABSD.Z m512 k zmm
|
|
// VPABSD.Z zmm k zmm
|
|
//
|
|
// Construct and append a VPABSD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPABSD_Z(mxyz, k, xyz operand.Op) { ctx.VPABSD_Z(mxyz, k, xyz) }
|
|
|
|
// VPABSQ: Packed Absolute Value of Quadword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPABSQ m128 k xmm
|
|
// VPABSQ m128 xmm
|
|
// VPABSQ m256 k ymm
|
|
// VPABSQ m256 ymm
|
|
// VPABSQ xmm k xmm
|
|
// VPABSQ xmm xmm
|
|
// VPABSQ ymm k ymm
|
|
// VPABSQ ymm ymm
|
|
// VPABSQ m512 k zmm
|
|
// VPABSQ m512 zmm
|
|
// VPABSQ zmm k zmm
|
|
// VPABSQ zmm zmm
|
|
//
|
|
// Construct and append a VPABSQ instruction to the active function.
|
|
func (c *Context) VPABSQ(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPABSQ(ops...))
|
|
}
|
|
|
|
// VPABSQ: Packed Absolute Value of Quadword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPABSQ m128 k xmm
|
|
// VPABSQ m128 xmm
|
|
// VPABSQ m256 k ymm
|
|
// VPABSQ m256 ymm
|
|
// VPABSQ xmm k xmm
|
|
// VPABSQ xmm xmm
|
|
// VPABSQ ymm k ymm
|
|
// VPABSQ ymm ymm
|
|
// VPABSQ m512 k zmm
|
|
// VPABSQ m512 zmm
|
|
// VPABSQ zmm k zmm
|
|
// VPABSQ zmm zmm
|
|
//
|
|
// Construct and append a VPABSQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPABSQ(ops ...operand.Op) { ctx.VPABSQ(ops...) }
|
|
|
|
// VPABSQ_BCST: Packed Absolute Value of Quadword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPABSQ.BCST m64 k xmm
|
|
// VPABSQ.BCST m64 k ymm
|
|
// VPABSQ.BCST m64 xmm
|
|
// VPABSQ.BCST m64 ymm
|
|
// VPABSQ.BCST m64 k zmm
|
|
// VPABSQ.BCST m64 zmm
|
|
//
|
|
// Construct and append a VPABSQ.BCST instruction to the active function.
|
|
func (c *Context) VPABSQ_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPABSQ_BCST(ops...))
|
|
}
|
|
|
|
// VPABSQ_BCST: Packed Absolute Value of Quadword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPABSQ.BCST m64 k xmm
|
|
// VPABSQ.BCST m64 k ymm
|
|
// VPABSQ.BCST m64 xmm
|
|
// VPABSQ.BCST m64 ymm
|
|
// VPABSQ.BCST m64 k zmm
|
|
// VPABSQ.BCST m64 zmm
|
|
//
|
|
// Construct and append a VPABSQ.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPABSQ_BCST(ops ...operand.Op) { ctx.VPABSQ_BCST(ops...) }
|
|
|
|
// VPABSQ_BCST_Z: Packed Absolute Value of Quadword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPABSQ.BCST.Z m64 k xmm
|
|
// VPABSQ.BCST.Z m64 k ymm
|
|
// VPABSQ.BCST.Z m64 k zmm
|
|
//
|
|
// Construct and append a VPABSQ.BCST.Z instruction to the active function.
|
|
func (c *Context) VPABSQ_BCST_Z(m, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VPABSQ_BCST_Z(m, k, xyz))
|
|
}
|
|
|
|
// VPABSQ_BCST_Z: Packed Absolute Value of Quadword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPABSQ.BCST.Z m64 k xmm
|
|
// VPABSQ.BCST.Z m64 k ymm
|
|
// VPABSQ.BCST.Z m64 k zmm
|
|
//
|
|
// Construct and append a VPABSQ.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPABSQ_BCST_Z(m, k, xyz operand.Op) { ctx.VPABSQ_BCST_Z(m, k, xyz) }
|
|
|
|
// VPABSQ_Z: Packed Absolute Value of Quadword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPABSQ.Z m128 k xmm
|
|
// VPABSQ.Z m256 k ymm
|
|
// VPABSQ.Z xmm k xmm
|
|
// VPABSQ.Z ymm k ymm
|
|
// VPABSQ.Z m512 k zmm
|
|
// VPABSQ.Z zmm k zmm
|
|
//
|
|
// Construct and append a VPABSQ.Z instruction to the active function.
|
|
func (c *Context) VPABSQ_Z(mxyz, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VPABSQ_Z(mxyz, k, xyz))
|
|
}
|
|
|
|
// VPABSQ_Z: Packed Absolute Value of Quadword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPABSQ.Z m128 k xmm
|
|
// VPABSQ.Z m256 k ymm
|
|
// VPABSQ.Z xmm k xmm
|
|
// VPABSQ.Z ymm k ymm
|
|
// VPABSQ.Z m512 k zmm
|
|
// VPABSQ.Z zmm k zmm
|
|
//
|
|
// Construct and append a VPABSQ.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPABSQ_Z(mxyz, k, xyz operand.Op) { ctx.VPABSQ_Z(mxyz, k, xyz) }
|
|
|
|
// VPABSW: Packed Absolute Value of Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPABSW m256 ymm
|
|
// VPABSW ymm ymm
|
|
// VPABSW m128 xmm
|
|
// VPABSW xmm xmm
|
|
// VPABSW m128 k xmm
|
|
// VPABSW m256 k ymm
|
|
// VPABSW xmm k xmm
|
|
// VPABSW ymm k ymm
|
|
// VPABSW m512 k zmm
|
|
// VPABSW m512 zmm
|
|
// VPABSW zmm k zmm
|
|
// VPABSW zmm zmm
|
|
//
|
|
// Construct and append a VPABSW instruction to the active function.
|
|
func (c *Context) VPABSW(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPABSW(ops...))
|
|
}
|
|
|
|
// VPABSW: Packed Absolute Value of Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPABSW m256 ymm
|
|
// VPABSW ymm ymm
|
|
// VPABSW m128 xmm
|
|
// VPABSW xmm xmm
|
|
// VPABSW m128 k xmm
|
|
// VPABSW m256 k ymm
|
|
// VPABSW xmm k xmm
|
|
// VPABSW ymm k ymm
|
|
// VPABSW m512 k zmm
|
|
// VPABSW m512 zmm
|
|
// VPABSW zmm k zmm
|
|
// VPABSW zmm zmm
|
|
//
|
|
// Construct and append a VPABSW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPABSW(ops ...operand.Op) { ctx.VPABSW(ops...) }
|
|
|
|
// VPABSW_Z: Packed Absolute Value of Word Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPABSW.Z m128 k xmm
|
|
// VPABSW.Z m256 k ymm
|
|
// VPABSW.Z xmm k xmm
|
|
// VPABSW.Z ymm k ymm
|
|
// VPABSW.Z m512 k zmm
|
|
// VPABSW.Z zmm k zmm
|
|
//
|
|
// Construct and append a VPABSW.Z instruction to the active function.
|
|
func (c *Context) VPABSW_Z(mxyz, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VPABSW_Z(mxyz, k, xyz))
|
|
}
|
|
|
|
// VPABSW_Z: Packed Absolute Value of Word Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPABSW.Z m128 k xmm
|
|
// VPABSW.Z m256 k ymm
|
|
// VPABSW.Z xmm k xmm
|
|
// VPABSW.Z ymm k ymm
|
|
// VPABSW.Z m512 k zmm
|
|
// VPABSW.Z zmm k zmm
|
|
//
|
|
// Construct and append a VPABSW.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPABSW_Z(mxyz, k, xyz operand.Op) { ctx.VPABSW_Z(mxyz, k, xyz) }
|
|
|
|
// VPACKSSDW: Pack Doublewords into Words with Signed Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPACKSSDW m256 ymm ymm
|
|
// VPACKSSDW ymm ymm ymm
|
|
// VPACKSSDW m128 xmm xmm
|
|
// VPACKSSDW xmm xmm xmm
|
|
// VPACKSSDW m128 xmm k xmm
|
|
// VPACKSSDW m256 ymm k ymm
|
|
// VPACKSSDW xmm xmm k xmm
|
|
// VPACKSSDW ymm ymm k ymm
|
|
// VPACKSSDW m512 zmm k zmm
|
|
// VPACKSSDW m512 zmm zmm
|
|
// VPACKSSDW zmm zmm k zmm
|
|
// VPACKSSDW zmm zmm zmm
|
|
//
|
|
// Construct and append a VPACKSSDW instruction to the active function.
|
|
func (c *Context) VPACKSSDW(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPACKSSDW(ops...))
|
|
}
|
|
|
|
// VPACKSSDW: Pack Doublewords into Words with Signed Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPACKSSDW m256 ymm ymm
|
|
// VPACKSSDW ymm ymm ymm
|
|
// VPACKSSDW m128 xmm xmm
|
|
// VPACKSSDW xmm xmm xmm
|
|
// VPACKSSDW m128 xmm k xmm
|
|
// VPACKSSDW m256 ymm k ymm
|
|
// VPACKSSDW xmm xmm k xmm
|
|
// VPACKSSDW ymm ymm k ymm
|
|
// VPACKSSDW m512 zmm k zmm
|
|
// VPACKSSDW m512 zmm zmm
|
|
// VPACKSSDW zmm zmm k zmm
|
|
// VPACKSSDW zmm zmm zmm
|
|
//
|
|
// Construct and append a VPACKSSDW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPACKSSDW(ops ...operand.Op) { ctx.VPACKSSDW(ops...) }
|
|
|
|
// VPACKSSDW_BCST: Pack Doublewords into Words with Signed Saturation (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPACKSSDW.BCST m32 xmm k xmm
|
|
// VPACKSSDW.BCST m32 xmm xmm
|
|
// VPACKSSDW.BCST m32 ymm k ymm
|
|
// VPACKSSDW.BCST m32 ymm ymm
|
|
// VPACKSSDW.BCST m32 zmm k zmm
|
|
// VPACKSSDW.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VPACKSSDW.BCST instruction to the active function.
|
|
func (c *Context) VPACKSSDW_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPACKSSDW_BCST(ops...))
|
|
}
|
|
|
|
// VPACKSSDW_BCST: Pack Doublewords into Words with Signed Saturation (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPACKSSDW.BCST m32 xmm k xmm
|
|
// VPACKSSDW.BCST m32 xmm xmm
|
|
// VPACKSSDW.BCST m32 ymm k ymm
|
|
// VPACKSSDW.BCST m32 ymm ymm
|
|
// VPACKSSDW.BCST m32 zmm k zmm
|
|
// VPACKSSDW.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VPACKSSDW.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPACKSSDW_BCST(ops ...operand.Op) { ctx.VPACKSSDW_BCST(ops...) }
|
|
|
|
// VPACKSSDW_BCST_Z: Pack Doublewords into Words with Signed Saturation (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPACKSSDW.BCST.Z m32 xmm k xmm
|
|
// VPACKSSDW.BCST.Z m32 ymm k ymm
|
|
// VPACKSSDW.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VPACKSSDW.BCST.Z instruction to the active function.
|
|
func (c *Context) VPACKSSDW_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPACKSSDW_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPACKSSDW_BCST_Z: Pack Doublewords into Words with Signed Saturation (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPACKSSDW.BCST.Z m32 xmm k xmm
|
|
// VPACKSSDW.BCST.Z m32 ymm k ymm
|
|
// VPACKSSDW.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VPACKSSDW.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPACKSSDW_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VPACKSSDW_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VPACKSSDW_Z: Pack Doublewords into Words with Signed Saturation (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPACKSSDW.Z m128 xmm k xmm
|
|
// VPACKSSDW.Z m256 ymm k ymm
|
|
// VPACKSSDW.Z xmm xmm k xmm
|
|
// VPACKSSDW.Z ymm ymm k ymm
|
|
// VPACKSSDW.Z m512 zmm k zmm
|
|
// VPACKSSDW.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPACKSSDW.Z instruction to the active function.
|
|
func (c *Context) VPACKSSDW_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPACKSSDW_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPACKSSDW_Z: Pack Doublewords into Words with Signed Saturation (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPACKSSDW.Z m128 xmm k xmm
|
|
// VPACKSSDW.Z m256 ymm k ymm
|
|
// VPACKSSDW.Z xmm xmm k xmm
|
|
// VPACKSSDW.Z ymm ymm k ymm
|
|
// VPACKSSDW.Z m512 zmm k zmm
|
|
// VPACKSSDW.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPACKSSDW.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPACKSSDW_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPACKSSDW_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPACKSSWB: Pack Words into Bytes with Signed Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPACKSSWB m256 ymm ymm
|
|
// VPACKSSWB ymm ymm ymm
|
|
// VPACKSSWB m128 xmm xmm
|
|
// VPACKSSWB xmm xmm xmm
|
|
// VPACKSSWB m128 xmm k xmm
|
|
// VPACKSSWB m256 ymm k ymm
|
|
// VPACKSSWB xmm xmm k xmm
|
|
// VPACKSSWB ymm ymm k ymm
|
|
// VPACKSSWB m512 zmm k zmm
|
|
// VPACKSSWB m512 zmm zmm
|
|
// VPACKSSWB zmm zmm k zmm
|
|
// VPACKSSWB zmm zmm zmm
|
|
//
|
|
// Construct and append a VPACKSSWB instruction to the active function.
|
|
func (c *Context) VPACKSSWB(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPACKSSWB(ops...))
|
|
}
|
|
|
|
// VPACKSSWB: Pack Words into Bytes with Signed Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPACKSSWB m256 ymm ymm
|
|
// VPACKSSWB ymm ymm ymm
|
|
// VPACKSSWB m128 xmm xmm
|
|
// VPACKSSWB xmm xmm xmm
|
|
// VPACKSSWB m128 xmm k xmm
|
|
// VPACKSSWB m256 ymm k ymm
|
|
// VPACKSSWB xmm xmm k xmm
|
|
// VPACKSSWB ymm ymm k ymm
|
|
// VPACKSSWB m512 zmm k zmm
|
|
// VPACKSSWB m512 zmm zmm
|
|
// VPACKSSWB zmm zmm k zmm
|
|
// VPACKSSWB zmm zmm zmm
|
|
//
|
|
// Construct and append a VPACKSSWB instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPACKSSWB(ops ...operand.Op) { ctx.VPACKSSWB(ops...) }
|
|
|
|
// VPACKSSWB_Z: Pack Words into Bytes with Signed Saturation (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPACKSSWB.Z m128 xmm k xmm
|
|
// VPACKSSWB.Z m256 ymm k ymm
|
|
// VPACKSSWB.Z xmm xmm k xmm
|
|
// VPACKSSWB.Z ymm ymm k ymm
|
|
// VPACKSSWB.Z m512 zmm k zmm
|
|
// VPACKSSWB.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPACKSSWB.Z instruction to the active function.
|
|
func (c *Context) VPACKSSWB_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPACKSSWB_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPACKSSWB_Z: Pack Words into Bytes with Signed Saturation (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPACKSSWB.Z m128 xmm k xmm
|
|
// VPACKSSWB.Z m256 ymm k ymm
|
|
// VPACKSSWB.Z xmm xmm k xmm
|
|
// VPACKSSWB.Z ymm ymm k ymm
|
|
// VPACKSSWB.Z m512 zmm k zmm
|
|
// VPACKSSWB.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPACKSSWB.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPACKSSWB_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPACKSSWB_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPACKUSDW: Pack Doublewords into Words with Unsigned Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPACKUSDW m256 ymm ymm
|
|
// VPACKUSDW ymm ymm ymm
|
|
// VPACKUSDW m128 xmm xmm
|
|
// VPACKUSDW xmm xmm xmm
|
|
// VPACKUSDW m128 xmm k xmm
|
|
// VPACKUSDW m256 ymm k ymm
|
|
// VPACKUSDW xmm xmm k xmm
|
|
// VPACKUSDW ymm ymm k ymm
|
|
// VPACKUSDW m512 zmm k zmm
|
|
// VPACKUSDW m512 zmm zmm
|
|
// VPACKUSDW zmm zmm k zmm
|
|
// VPACKUSDW zmm zmm zmm
|
|
//
|
|
// Construct and append a VPACKUSDW instruction to the active function.
|
|
func (c *Context) VPACKUSDW(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPACKUSDW(ops...))
|
|
}
|
|
|
|
// VPACKUSDW: Pack Doublewords into Words with Unsigned Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPACKUSDW m256 ymm ymm
|
|
// VPACKUSDW ymm ymm ymm
|
|
// VPACKUSDW m128 xmm xmm
|
|
// VPACKUSDW xmm xmm xmm
|
|
// VPACKUSDW m128 xmm k xmm
|
|
// VPACKUSDW m256 ymm k ymm
|
|
// VPACKUSDW xmm xmm k xmm
|
|
// VPACKUSDW ymm ymm k ymm
|
|
// VPACKUSDW m512 zmm k zmm
|
|
// VPACKUSDW m512 zmm zmm
|
|
// VPACKUSDW zmm zmm k zmm
|
|
// VPACKUSDW zmm zmm zmm
|
|
//
|
|
// Construct and append a VPACKUSDW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPACKUSDW(ops ...operand.Op) { ctx.VPACKUSDW(ops...) }
|
|
|
|
// VPACKUSDW_BCST: Pack Doublewords into Words with Unsigned Saturation (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPACKUSDW.BCST m32 xmm k xmm
|
|
// VPACKUSDW.BCST m32 xmm xmm
|
|
// VPACKUSDW.BCST m32 ymm k ymm
|
|
// VPACKUSDW.BCST m32 ymm ymm
|
|
// VPACKUSDW.BCST m32 zmm k zmm
|
|
// VPACKUSDW.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VPACKUSDW.BCST instruction to the active function.
|
|
func (c *Context) VPACKUSDW_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPACKUSDW_BCST(ops...))
|
|
}
|
|
|
|
// VPACKUSDW_BCST: Pack Doublewords into Words with Unsigned Saturation (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPACKUSDW.BCST m32 xmm k xmm
|
|
// VPACKUSDW.BCST m32 xmm xmm
|
|
// VPACKUSDW.BCST m32 ymm k ymm
|
|
// VPACKUSDW.BCST m32 ymm ymm
|
|
// VPACKUSDW.BCST m32 zmm k zmm
|
|
// VPACKUSDW.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VPACKUSDW.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPACKUSDW_BCST(ops ...operand.Op) { ctx.VPACKUSDW_BCST(ops...) }
|
|
|
|
// VPACKUSDW_BCST_Z: Pack Doublewords into Words with Unsigned Saturation (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPACKUSDW.BCST.Z m32 xmm k xmm
|
|
// VPACKUSDW.BCST.Z m32 ymm k ymm
|
|
// VPACKUSDW.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VPACKUSDW.BCST.Z instruction to the active function.
|
|
func (c *Context) VPACKUSDW_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPACKUSDW_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPACKUSDW_BCST_Z: Pack Doublewords into Words with Unsigned Saturation (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPACKUSDW.BCST.Z m32 xmm k xmm
|
|
// VPACKUSDW.BCST.Z m32 ymm k ymm
|
|
// VPACKUSDW.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VPACKUSDW.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPACKUSDW_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VPACKUSDW_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VPACKUSDW_Z: Pack Doublewords into Words with Unsigned Saturation (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPACKUSDW.Z m128 xmm k xmm
|
|
// VPACKUSDW.Z m256 ymm k ymm
|
|
// VPACKUSDW.Z xmm xmm k xmm
|
|
// VPACKUSDW.Z ymm ymm k ymm
|
|
// VPACKUSDW.Z m512 zmm k zmm
|
|
// VPACKUSDW.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPACKUSDW.Z instruction to the active function.
|
|
func (c *Context) VPACKUSDW_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPACKUSDW_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPACKUSDW_Z: Pack Doublewords into Words with Unsigned Saturation (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPACKUSDW.Z m128 xmm k xmm
|
|
// VPACKUSDW.Z m256 ymm k ymm
|
|
// VPACKUSDW.Z xmm xmm k xmm
|
|
// VPACKUSDW.Z ymm ymm k ymm
|
|
// VPACKUSDW.Z m512 zmm k zmm
|
|
// VPACKUSDW.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPACKUSDW.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPACKUSDW_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPACKUSDW_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPACKUSWB: Pack Words into Bytes with Unsigned Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPACKUSWB m256 ymm ymm
|
|
// VPACKUSWB ymm ymm ymm
|
|
// VPACKUSWB m128 xmm xmm
|
|
// VPACKUSWB xmm xmm xmm
|
|
// VPACKUSWB m128 xmm k xmm
|
|
// VPACKUSWB m256 ymm k ymm
|
|
// VPACKUSWB xmm xmm k xmm
|
|
// VPACKUSWB ymm ymm k ymm
|
|
// VPACKUSWB m512 zmm k zmm
|
|
// VPACKUSWB m512 zmm zmm
|
|
// VPACKUSWB zmm zmm k zmm
|
|
// VPACKUSWB zmm zmm zmm
|
|
//
|
|
// Construct and append a VPACKUSWB instruction to the active function.
|
|
func (c *Context) VPACKUSWB(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPACKUSWB(ops...))
|
|
}
|
|
|
|
// VPACKUSWB: Pack Words into Bytes with Unsigned Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPACKUSWB m256 ymm ymm
|
|
// VPACKUSWB ymm ymm ymm
|
|
// VPACKUSWB m128 xmm xmm
|
|
// VPACKUSWB xmm xmm xmm
|
|
// VPACKUSWB m128 xmm k xmm
|
|
// VPACKUSWB m256 ymm k ymm
|
|
// VPACKUSWB xmm xmm k xmm
|
|
// VPACKUSWB ymm ymm k ymm
|
|
// VPACKUSWB m512 zmm k zmm
|
|
// VPACKUSWB m512 zmm zmm
|
|
// VPACKUSWB zmm zmm k zmm
|
|
// VPACKUSWB zmm zmm zmm
|
|
//
|
|
// Construct and append a VPACKUSWB instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPACKUSWB(ops ...operand.Op) { ctx.VPACKUSWB(ops...) }
|
|
|
|
// VPACKUSWB_Z: Pack Words into Bytes with Unsigned Saturation (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPACKUSWB.Z m128 xmm k xmm
|
|
// VPACKUSWB.Z m256 ymm k ymm
|
|
// VPACKUSWB.Z xmm xmm k xmm
|
|
// VPACKUSWB.Z ymm ymm k ymm
|
|
// VPACKUSWB.Z m512 zmm k zmm
|
|
// VPACKUSWB.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPACKUSWB.Z instruction to the active function.
|
|
func (c *Context) VPACKUSWB_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPACKUSWB_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPACKUSWB_Z: Pack Words into Bytes with Unsigned Saturation (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPACKUSWB.Z m128 xmm k xmm
|
|
// VPACKUSWB.Z m256 ymm k ymm
|
|
// VPACKUSWB.Z xmm xmm k xmm
|
|
// VPACKUSWB.Z ymm ymm k ymm
|
|
// VPACKUSWB.Z m512 zmm k zmm
|
|
// VPACKUSWB.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPACKUSWB.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPACKUSWB_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPACKUSWB_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPADDB: Add Packed Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPADDB m256 ymm ymm
|
|
// VPADDB ymm ymm ymm
|
|
// VPADDB m128 xmm xmm
|
|
// VPADDB xmm xmm xmm
|
|
// VPADDB m128 xmm k xmm
|
|
// VPADDB m256 ymm k ymm
|
|
// VPADDB xmm xmm k xmm
|
|
// VPADDB ymm ymm k ymm
|
|
// VPADDB m512 zmm k zmm
|
|
// VPADDB m512 zmm zmm
|
|
// VPADDB zmm zmm k zmm
|
|
// VPADDB zmm zmm zmm
|
|
//
|
|
// Construct and append a VPADDB instruction to the active function.
|
|
func (c *Context) VPADDB(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPADDB(ops...))
|
|
}
|
|
|
|
// VPADDB: Add Packed Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPADDB m256 ymm ymm
|
|
// VPADDB ymm ymm ymm
|
|
// VPADDB m128 xmm xmm
|
|
// VPADDB xmm xmm xmm
|
|
// VPADDB m128 xmm k xmm
|
|
// VPADDB m256 ymm k ymm
|
|
// VPADDB xmm xmm k xmm
|
|
// VPADDB ymm ymm k ymm
|
|
// VPADDB m512 zmm k zmm
|
|
// VPADDB m512 zmm zmm
|
|
// VPADDB zmm zmm k zmm
|
|
// VPADDB zmm zmm zmm
|
|
//
|
|
// Construct and append a VPADDB instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPADDB(ops ...operand.Op) { ctx.VPADDB(ops...) }
|
|
|
|
// VPADDB_Z: Add Packed Byte Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPADDB.Z m128 xmm k xmm
|
|
// VPADDB.Z m256 ymm k ymm
|
|
// VPADDB.Z xmm xmm k xmm
|
|
// VPADDB.Z ymm ymm k ymm
|
|
// VPADDB.Z m512 zmm k zmm
|
|
// VPADDB.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPADDB.Z instruction to the active function.
|
|
func (c *Context) VPADDB_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPADDB_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPADDB_Z: Add Packed Byte Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPADDB.Z m128 xmm k xmm
|
|
// VPADDB.Z m256 ymm k ymm
|
|
// VPADDB.Z xmm xmm k xmm
|
|
// VPADDB.Z ymm ymm k ymm
|
|
// VPADDB.Z m512 zmm k zmm
|
|
// VPADDB.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPADDB.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPADDB_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPADDB_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPADDD: Add Packed Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPADDD m256 ymm ymm
|
|
// VPADDD ymm ymm ymm
|
|
// VPADDD m128 xmm xmm
|
|
// VPADDD xmm xmm xmm
|
|
// VPADDD m128 xmm k xmm
|
|
// VPADDD m256 ymm k ymm
|
|
// VPADDD xmm xmm k xmm
|
|
// VPADDD ymm ymm k ymm
|
|
// VPADDD m512 zmm k zmm
|
|
// VPADDD m512 zmm zmm
|
|
// VPADDD zmm zmm k zmm
|
|
// VPADDD zmm zmm zmm
|
|
//
|
|
// Construct and append a VPADDD instruction to the active function.
|
|
func (c *Context) VPADDD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPADDD(ops...))
|
|
}
|
|
|
|
// VPADDD: Add Packed Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPADDD m256 ymm ymm
|
|
// VPADDD ymm ymm ymm
|
|
// VPADDD m128 xmm xmm
|
|
// VPADDD xmm xmm xmm
|
|
// VPADDD m128 xmm k xmm
|
|
// VPADDD m256 ymm k ymm
|
|
// VPADDD xmm xmm k xmm
|
|
// VPADDD ymm ymm k ymm
|
|
// VPADDD m512 zmm k zmm
|
|
// VPADDD m512 zmm zmm
|
|
// VPADDD zmm zmm k zmm
|
|
// VPADDD zmm zmm zmm
|
|
//
|
|
// Construct and append a VPADDD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPADDD(ops ...operand.Op) { ctx.VPADDD(ops...) }
|
|
|
|
// VPADDD_BCST: Add Packed Doubleword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPADDD.BCST m32 xmm k xmm
|
|
// VPADDD.BCST m32 xmm xmm
|
|
// VPADDD.BCST m32 ymm k ymm
|
|
// VPADDD.BCST m32 ymm ymm
|
|
// VPADDD.BCST m32 zmm k zmm
|
|
// VPADDD.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VPADDD.BCST instruction to the active function.
|
|
func (c *Context) VPADDD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPADDD_BCST(ops...))
|
|
}
|
|
|
|
// VPADDD_BCST: Add Packed Doubleword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPADDD.BCST m32 xmm k xmm
|
|
// VPADDD.BCST m32 xmm xmm
|
|
// VPADDD.BCST m32 ymm k ymm
|
|
// VPADDD.BCST m32 ymm ymm
|
|
// VPADDD.BCST m32 zmm k zmm
|
|
// VPADDD.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VPADDD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPADDD_BCST(ops ...operand.Op) { ctx.VPADDD_BCST(ops...) }
|
|
|
|
// VPADDD_BCST_Z: Add Packed Doubleword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPADDD.BCST.Z m32 xmm k xmm
|
|
// VPADDD.BCST.Z m32 ymm k ymm
|
|
// VPADDD.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VPADDD.BCST.Z instruction to the active function.
|
|
func (c *Context) VPADDD_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPADDD_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPADDD_BCST_Z: Add Packed Doubleword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPADDD.BCST.Z m32 xmm k xmm
|
|
// VPADDD.BCST.Z m32 ymm k ymm
|
|
// VPADDD.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VPADDD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPADDD_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VPADDD_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VPADDD_Z: Add Packed Doubleword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPADDD.Z m128 xmm k xmm
|
|
// VPADDD.Z m256 ymm k ymm
|
|
// VPADDD.Z xmm xmm k xmm
|
|
// VPADDD.Z ymm ymm k ymm
|
|
// VPADDD.Z m512 zmm k zmm
|
|
// VPADDD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPADDD.Z instruction to the active function.
|
|
func (c *Context) VPADDD_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPADDD_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPADDD_Z: Add Packed Doubleword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPADDD.Z m128 xmm k xmm
|
|
// VPADDD.Z m256 ymm k ymm
|
|
// VPADDD.Z xmm xmm k xmm
|
|
// VPADDD.Z ymm ymm k ymm
|
|
// VPADDD.Z m512 zmm k zmm
|
|
// VPADDD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPADDD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPADDD_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPADDD_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPADDQ: Add Packed Quadword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPADDQ m256 ymm ymm
|
|
// VPADDQ ymm ymm ymm
|
|
// VPADDQ m128 xmm xmm
|
|
// VPADDQ xmm xmm xmm
|
|
// VPADDQ m128 xmm k xmm
|
|
// VPADDQ m256 ymm k ymm
|
|
// VPADDQ xmm xmm k xmm
|
|
// VPADDQ ymm ymm k ymm
|
|
// VPADDQ m512 zmm k zmm
|
|
// VPADDQ m512 zmm zmm
|
|
// VPADDQ zmm zmm k zmm
|
|
// VPADDQ zmm zmm zmm
|
|
//
|
|
// Construct and append a VPADDQ instruction to the active function.
|
|
func (c *Context) VPADDQ(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPADDQ(ops...))
|
|
}
|
|
|
|
// VPADDQ: Add Packed Quadword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPADDQ m256 ymm ymm
|
|
// VPADDQ ymm ymm ymm
|
|
// VPADDQ m128 xmm xmm
|
|
// VPADDQ xmm xmm xmm
|
|
// VPADDQ m128 xmm k xmm
|
|
// VPADDQ m256 ymm k ymm
|
|
// VPADDQ xmm xmm k xmm
|
|
// VPADDQ ymm ymm k ymm
|
|
// VPADDQ m512 zmm k zmm
|
|
// VPADDQ m512 zmm zmm
|
|
// VPADDQ zmm zmm k zmm
|
|
// VPADDQ zmm zmm zmm
|
|
//
|
|
// Construct and append a VPADDQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPADDQ(ops ...operand.Op) { ctx.VPADDQ(ops...) }
|
|
|
|
// VPADDQ_BCST: Add Packed Quadword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPADDQ.BCST m64 xmm k xmm
|
|
// VPADDQ.BCST m64 xmm xmm
|
|
// VPADDQ.BCST m64 ymm k ymm
|
|
// VPADDQ.BCST m64 ymm ymm
|
|
// VPADDQ.BCST m64 zmm k zmm
|
|
// VPADDQ.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VPADDQ.BCST instruction to the active function.
|
|
func (c *Context) VPADDQ_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPADDQ_BCST(ops...))
|
|
}
|
|
|
|
// VPADDQ_BCST: Add Packed Quadword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPADDQ.BCST m64 xmm k xmm
|
|
// VPADDQ.BCST m64 xmm xmm
|
|
// VPADDQ.BCST m64 ymm k ymm
|
|
// VPADDQ.BCST m64 ymm ymm
|
|
// VPADDQ.BCST m64 zmm k zmm
|
|
// VPADDQ.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VPADDQ.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPADDQ_BCST(ops ...operand.Op) { ctx.VPADDQ_BCST(ops...) }
|
|
|
|
// VPADDQ_BCST_Z: Add Packed Quadword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPADDQ.BCST.Z m64 xmm k xmm
|
|
// VPADDQ.BCST.Z m64 ymm k ymm
|
|
// VPADDQ.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VPADDQ.BCST.Z instruction to the active function.
|
|
func (c *Context) VPADDQ_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPADDQ_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPADDQ_BCST_Z: Add Packed Quadword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPADDQ.BCST.Z m64 xmm k xmm
|
|
// VPADDQ.BCST.Z m64 ymm k ymm
|
|
// VPADDQ.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VPADDQ.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPADDQ_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VPADDQ_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VPADDQ_Z: Add Packed Quadword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPADDQ.Z m128 xmm k xmm
|
|
// VPADDQ.Z m256 ymm k ymm
|
|
// VPADDQ.Z xmm xmm k xmm
|
|
// VPADDQ.Z ymm ymm k ymm
|
|
// VPADDQ.Z m512 zmm k zmm
|
|
// VPADDQ.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPADDQ.Z instruction to the active function.
|
|
func (c *Context) VPADDQ_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPADDQ_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPADDQ_Z: Add Packed Quadword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPADDQ.Z m128 xmm k xmm
|
|
// VPADDQ.Z m256 ymm k ymm
|
|
// VPADDQ.Z xmm xmm k xmm
|
|
// VPADDQ.Z ymm ymm k ymm
|
|
// VPADDQ.Z m512 zmm k zmm
|
|
// VPADDQ.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPADDQ.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPADDQ_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPADDQ_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPADDSB: Add Packed Signed Byte Integers with Signed Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPADDSB m256 ymm ymm
|
|
// VPADDSB ymm ymm ymm
|
|
// VPADDSB m128 xmm xmm
|
|
// VPADDSB xmm xmm xmm
|
|
// VPADDSB m128 xmm k xmm
|
|
// VPADDSB m256 ymm k ymm
|
|
// VPADDSB xmm xmm k xmm
|
|
// VPADDSB ymm ymm k ymm
|
|
// VPADDSB m512 zmm k zmm
|
|
// VPADDSB m512 zmm zmm
|
|
// VPADDSB zmm zmm k zmm
|
|
// VPADDSB zmm zmm zmm
|
|
//
|
|
// Construct and append a VPADDSB instruction to the active function.
|
|
func (c *Context) VPADDSB(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPADDSB(ops...))
|
|
}
|
|
|
|
// VPADDSB: Add Packed Signed Byte Integers with Signed Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPADDSB m256 ymm ymm
|
|
// VPADDSB ymm ymm ymm
|
|
// VPADDSB m128 xmm xmm
|
|
// VPADDSB xmm xmm xmm
|
|
// VPADDSB m128 xmm k xmm
|
|
// VPADDSB m256 ymm k ymm
|
|
// VPADDSB xmm xmm k xmm
|
|
// VPADDSB ymm ymm k ymm
|
|
// VPADDSB m512 zmm k zmm
|
|
// VPADDSB m512 zmm zmm
|
|
// VPADDSB zmm zmm k zmm
|
|
// VPADDSB zmm zmm zmm
|
|
//
|
|
// Construct and append a VPADDSB instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPADDSB(ops ...operand.Op) { ctx.VPADDSB(ops...) }
|
|
|
|
// VPADDSB_Z: Add Packed Signed Byte Integers with Signed Saturation (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPADDSB.Z m128 xmm k xmm
|
|
// VPADDSB.Z m256 ymm k ymm
|
|
// VPADDSB.Z xmm xmm k xmm
|
|
// VPADDSB.Z ymm ymm k ymm
|
|
// VPADDSB.Z m512 zmm k zmm
|
|
// VPADDSB.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPADDSB.Z instruction to the active function.
|
|
func (c *Context) VPADDSB_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPADDSB_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPADDSB_Z: Add Packed Signed Byte Integers with Signed Saturation (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPADDSB.Z m128 xmm k xmm
|
|
// VPADDSB.Z m256 ymm k ymm
|
|
// VPADDSB.Z xmm xmm k xmm
|
|
// VPADDSB.Z ymm ymm k ymm
|
|
// VPADDSB.Z m512 zmm k zmm
|
|
// VPADDSB.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPADDSB.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPADDSB_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPADDSB_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPADDSW: Add Packed Signed Word Integers with Signed Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPADDSW m256 ymm ymm
|
|
// VPADDSW ymm ymm ymm
|
|
// VPADDSW m128 xmm xmm
|
|
// VPADDSW xmm xmm xmm
|
|
// VPADDSW m128 xmm k xmm
|
|
// VPADDSW m256 ymm k ymm
|
|
// VPADDSW xmm xmm k xmm
|
|
// VPADDSW ymm ymm k ymm
|
|
// VPADDSW m512 zmm k zmm
|
|
// VPADDSW m512 zmm zmm
|
|
// VPADDSW zmm zmm k zmm
|
|
// VPADDSW zmm zmm zmm
|
|
//
|
|
// Construct and append a VPADDSW instruction to the active function.
|
|
func (c *Context) VPADDSW(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPADDSW(ops...))
|
|
}
|
|
|
|
// VPADDSW: Add Packed Signed Word Integers with Signed Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPADDSW m256 ymm ymm
|
|
// VPADDSW ymm ymm ymm
|
|
// VPADDSW m128 xmm xmm
|
|
// VPADDSW xmm xmm xmm
|
|
// VPADDSW m128 xmm k xmm
|
|
// VPADDSW m256 ymm k ymm
|
|
// VPADDSW xmm xmm k xmm
|
|
// VPADDSW ymm ymm k ymm
|
|
// VPADDSW m512 zmm k zmm
|
|
// VPADDSW m512 zmm zmm
|
|
// VPADDSW zmm zmm k zmm
|
|
// VPADDSW zmm zmm zmm
|
|
//
|
|
// Construct and append a VPADDSW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPADDSW(ops ...operand.Op) { ctx.VPADDSW(ops...) }
|
|
|
|
// VPADDSW_Z: Add Packed Signed Word Integers with Signed Saturation (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPADDSW.Z m128 xmm k xmm
|
|
// VPADDSW.Z m256 ymm k ymm
|
|
// VPADDSW.Z xmm xmm k xmm
|
|
// VPADDSW.Z ymm ymm k ymm
|
|
// VPADDSW.Z m512 zmm k zmm
|
|
// VPADDSW.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPADDSW.Z instruction to the active function.
|
|
func (c *Context) VPADDSW_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPADDSW_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPADDSW_Z: Add Packed Signed Word Integers with Signed Saturation (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPADDSW.Z m128 xmm k xmm
|
|
// VPADDSW.Z m256 ymm k ymm
|
|
// VPADDSW.Z xmm xmm k xmm
|
|
// VPADDSW.Z ymm ymm k ymm
|
|
// VPADDSW.Z m512 zmm k zmm
|
|
// VPADDSW.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPADDSW.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPADDSW_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPADDSW_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPADDUSB: Add Packed Unsigned Byte Integers with Unsigned Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPADDUSB m256 ymm ymm
|
|
// VPADDUSB ymm ymm ymm
|
|
// VPADDUSB m128 xmm xmm
|
|
// VPADDUSB xmm xmm xmm
|
|
// VPADDUSB m128 xmm k xmm
|
|
// VPADDUSB m256 ymm k ymm
|
|
// VPADDUSB xmm xmm k xmm
|
|
// VPADDUSB ymm ymm k ymm
|
|
// VPADDUSB m512 zmm k zmm
|
|
// VPADDUSB m512 zmm zmm
|
|
// VPADDUSB zmm zmm k zmm
|
|
// VPADDUSB zmm zmm zmm
|
|
//
|
|
// Construct and append a VPADDUSB instruction to the active function.
|
|
func (c *Context) VPADDUSB(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPADDUSB(ops...))
|
|
}
|
|
|
|
// VPADDUSB: Add Packed Unsigned Byte Integers with Unsigned Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPADDUSB m256 ymm ymm
|
|
// VPADDUSB ymm ymm ymm
|
|
// VPADDUSB m128 xmm xmm
|
|
// VPADDUSB xmm xmm xmm
|
|
// VPADDUSB m128 xmm k xmm
|
|
// VPADDUSB m256 ymm k ymm
|
|
// VPADDUSB xmm xmm k xmm
|
|
// VPADDUSB ymm ymm k ymm
|
|
// VPADDUSB m512 zmm k zmm
|
|
// VPADDUSB m512 zmm zmm
|
|
// VPADDUSB zmm zmm k zmm
|
|
// VPADDUSB zmm zmm zmm
|
|
//
|
|
// Construct and append a VPADDUSB instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPADDUSB(ops ...operand.Op) { ctx.VPADDUSB(ops...) }
|
|
|
|
// VPADDUSB_Z: Add Packed Unsigned Byte Integers with Unsigned Saturation (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPADDUSB.Z m128 xmm k xmm
|
|
// VPADDUSB.Z m256 ymm k ymm
|
|
// VPADDUSB.Z xmm xmm k xmm
|
|
// VPADDUSB.Z ymm ymm k ymm
|
|
// VPADDUSB.Z m512 zmm k zmm
|
|
// VPADDUSB.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPADDUSB.Z instruction to the active function.
|
|
func (c *Context) VPADDUSB_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPADDUSB_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPADDUSB_Z: Add Packed Unsigned Byte Integers with Unsigned Saturation (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPADDUSB.Z m128 xmm k xmm
|
|
// VPADDUSB.Z m256 ymm k ymm
|
|
// VPADDUSB.Z xmm xmm k xmm
|
|
// VPADDUSB.Z ymm ymm k ymm
|
|
// VPADDUSB.Z m512 zmm k zmm
|
|
// VPADDUSB.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPADDUSB.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPADDUSB_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPADDUSB_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPADDUSW: Add Packed Unsigned Word Integers with Unsigned Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPADDUSW m256 ymm ymm
|
|
// VPADDUSW ymm ymm ymm
|
|
// VPADDUSW m128 xmm xmm
|
|
// VPADDUSW xmm xmm xmm
|
|
// VPADDUSW m128 xmm k xmm
|
|
// VPADDUSW m256 ymm k ymm
|
|
// VPADDUSW xmm xmm k xmm
|
|
// VPADDUSW ymm ymm k ymm
|
|
// VPADDUSW m512 zmm k zmm
|
|
// VPADDUSW m512 zmm zmm
|
|
// VPADDUSW zmm zmm k zmm
|
|
// VPADDUSW zmm zmm zmm
|
|
//
|
|
// Construct and append a VPADDUSW instruction to the active function.
|
|
func (c *Context) VPADDUSW(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPADDUSW(ops...))
|
|
}
|
|
|
|
// VPADDUSW: Add Packed Unsigned Word Integers with Unsigned Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPADDUSW m256 ymm ymm
|
|
// VPADDUSW ymm ymm ymm
|
|
// VPADDUSW m128 xmm xmm
|
|
// VPADDUSW xmm xmm xmm
|
|
// VPADDUSW m128 xmm k xmm
|
|
// VPADDUSW m256 ymm k ymm
|
|
// VPADDUSW xmm xmm k xmm
|
|
// VPADDUSW ymm ymm k ymm
|
|
// VPADDUSW m512 zmm k zmm
|
|
// VPADDUSW m512 zmm zmm
|
|
// VPADDUSW zmm zmm k zmm
|
|
// VPADDUSW zmm zmm zmm
|
|
//
|
|
// Construct and append a VPADDUSW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPADDUSW(ops ...operand.Op) { ctx.VPADDUSW(ops...) }
|
|
|
|
// VPADDUSW_Z: Add Packed Unsigned Word Integers with Unsigned Saturation (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPADDUSW.Z m128 xmm k xmm
|
|
// VPADDUSW.Z m256 ymm k ymm
|
|
// VPADDUSW.Z xmm xmm k xmm
|
|
// VPADDUSW.Z ymm ymm k ymm
|
|
// VPADDUSW.Z m512 zmm k zmm
|
|
// VPADDUSW.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPADDUSW.Z instruction to the active function.
|
|
func (c *Context) VPADDUSW_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPADDUSW_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPADDUSW_Z: Add Packed Unsigned Word Integers with Unsigned Saturation (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPADDUSW.Z m128 xmm k xmm
|
|
// VPADDUSW.Z m256 ymm k ymm
|
|
// VPADDUSW.Z xmm xmm k xmm
|
|
// VPADDUSW.Z ymm ymm k ymm
|
|
// VPADDUSW.Z m512 zmm k zmm
|
|
// VPADDUSW.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPADDUSW.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPADDUSW_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPADDUSW_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPADDW: Add Packed Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPADDW m256 ymm ymm
|
|
// VPADDW ymm ymm ymm
|
|
// VPADDW m128 xmm xmm
|
|
// VPADDW xmm xmm xmm
|
|
// VPADDW m128 xmm k xmm
|
|
// VPADDW m256 ymm k ymm
|
|
// VPADDW xmm xmm k xmm
|
|
// VPADDW ymm ymm k ymm
|
|
// VPADDW m512 zmm k zmm
|
|
// VPADDW m512 zmm zmm
|
|
// VPADDW zmm zmm k zmm
|
|
// VPADDW zmm zmm zmm
|
|
//
|
|
// Construct and append a VPADDW instruction to the active function.
|
|
func (c *Context) VPADDW(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPADDW(ops...))
|
|
}
|
|
|
|
// VPADDW: Add Packed Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPADDW m256 ymm ymm
|
|
// VPADDW ymm ymm ymm
|
|
// VPADDW m128 xmm xmm
|
|
// VPADDW xmm xmm xmm
|
|
// VPADDW m128 xmm k xmm
|
|
// VPADDW m256 ymm k ymm
|
|
// VPADDW xmm xmm k xmm
|
|
// VPADDW ymm ymm k ymm
|
|
// VPADDW m512 zmm k zmm
|
|
// VPADDW m512 zmm zmm
|
|
// VPADDW zmm zmm k zmm
|
|
// VPADDW zmm zmm zmm
|
|
//
|
|
// Construct and append a VPADDW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPADDW(ops ...operand.Op) { ctx.VPADDW(ops...) }
|
|
|
|
// VPADDW_Z: Add Packed Word Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPADDW.Z m128 xmm k xmm
|
|
// VPADDW.Z m256 ymm k ymm
|
|
// VPADDW.Z xmm xmm k xmm
|
|
// VPADDW.Z ymm ymm k ymm
|
|
// VPADDW.Z m512 zmm k zmm
|
|
// VPADDW.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPADDW.Z instruction to the active function.
|
|
func (c *Context) VPADDW_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPADDW_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPADDW_Z: Add Packed Word Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPADDW.Z m128 xmm k xmm
|
|
// VPADDW.Z m256 ymm k ymm
|
|
// VPADDW.Z xmm xmm k xmm
|
|
// VPADDW.Z ymm ymm k ymm
|
|
// VPADDW.Z m512 zmm k zmm
|
|
// VPADDW.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPADDW.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPADDW_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPADDW_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPALIGNR: Packed Align Right.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPALIGNR imm8 m256 ymm ymm
|
|
// VPALIGNR imm8 ymm ymm ymm
|
|
// VPALIGNR imm8 m128 xmm xmm
|
|
// VPALIGNR imm8 xmm xmm xmm
|
|
// VPALIGNR imm8 m128 xmm k xmm
|
|
// VPALIGNR imm8 m256 ymm k ymm
|
|
// VPALIGNR imm8 xmm xmm k xmm
|
|
// VPALIGNR imm8 ymm ymm k ymm
|
|
// VPALIGNR imm8 m512 zmm k zmm
|
|
// VPALIGNR imm8 m512 zmm zmm
|
|
// VPALIGNR imm8 zmm zmm k zmm
|
|
// VPALIGNR imm8 zmm zmm zmm
|
|
//
|
|
// Construct and append a VPALIGNR instruction to the active function.
|
|
func (c *Context) VPALIGNR(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPALIGNR(ops...))
|
|
}
|
|
|
|
// VPALIGNR: Packed Align Right.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPALIGNR imm8 m256 ymm ymm
|
|
// VPALIGNR imm8 ymm ymm ymm
|
|
// VPALIGNR imm8 m128 xmm xmm
|
|
// VPALIGNR imm8 xmm xmm xmm
|
|
// VPALIGNR imm8 m128 xmm k xmm
|
|
// VPALIGNR imm8 m256 ymm k ymm
|
|
// VPALIGNR imm8 xmm xmm k xmm
|
|
// VPALIGNR imm8 ymm ymm k ymm
|
|
// VPALIGNR imm8 m512 zmm k zmm
|
|
// VPALIGNR imm8 m512 zmm zmm
|
|
// VPALIGNR imm8 zmm zmm k zmm
|
|
// VPALIGNR imm8 zmm zmm zmm
|
|
//
|
|
// Construct and append a VPALIGNR instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPALIGNR(ops ...operand.Op) { ctx.VPALIGNR(ops...) }
|
|
|
|
// VPALIGNR_Z: Packed Align Right (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPALIGNR.Z imm8 m128 xmm k xmm
|
|
// VPALIGNR.Z imm8 m256 ymm k ymm
|
|
// VPALIGNR.Z imm8 xmm xmm k xmm
|
|
// VPALIGNR.Z imm8 ymm ymm k ymm
|
|
// VPALIGNR.Z imm8 m512 zmm k zmm
|
|
// VPALIGNR.Z imm8 zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPALIGNR.Z instruction to the active function.
|
|
func (c *Context) VPALIGNR_Z(i, mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPALIGNR_Z(i, mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPALIGNR_Z: Packed Align Right (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPALIGNR.Z imm8 m128 xmm k xmm
|
|
// VPALIGNR.Z imm8 m256 ymm k ymm
|
|
// VPALIGNR.Z imm8 xmm xmm k xmm
|
|
// VPALIGNR.Z imm8 ymm ymm k ymm
|
|
// VPALIGNR.Z imm8 m512 zmm k zmm
|
|
// VPALIGNR.Z imm8 zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPALIGNR.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPALIGNR_Z(i, mxyz, xyz, k, xyz1 operand.Op) { ctx.VPALIGNR_Z(i, mxyz, xyz, k, xyz1) }
|
|
|
|
// VPAND: Packed Bitwise Logical AND.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPAND m256 ymm ymm
|
|
// VPAND ymm ymm ymm
|
|
// VPAND m128 xmm xmm
|
|
// VPAND xmm xmm xmm
|
|
//
|
|
// Construct and append a VPAND instruction to the active function.
|
|
func (c *Context) VPAND(mxy, xy, xy1 operand.Op) {
|
|
c.addinstruction(x86.VPAND(mxy, xy, xy1))
|
|
}
|
|
|
|
// VPAND: Packed Bitwise Logical AND.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPAND m256 ymm ymm
|
|
// VPAND ymm ymm ymm
|
|
// VPAND m128 xmm xmm
|
|
// VPAND xmm xmm xmm
|
|
//
|
|
// Construct and append a VPAND instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPAND(mxy, xy, xy1 operand.Op) { ctx.VPAND(mxy, xy, xy1) }
|
|
|
|
// VPANDD: Bitwise Logical AND of Packed Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPANDD m128 xmm k xmm
|
|
// VPANDD m128 xmm xmm
|
|
// VPANDD m256 ymm k ymm
|
|
// VPANDD m256 ymm ymm
|
|
// VPANDD xmm xmm k xmm
|
|
// VPANDD xmm xmm xmm
|
|
// VPANDD ymm ymm k ymm
|
|
// VPANDD ymm ymm ymm
|
|
// VPANDD m512 zmm k zmm
|
|
// VPANDD m512 zmm zmm
|
|
// VPANDD zmm zmm k zmm
|
|
// VPANDD zmm zmm zmm
|
|
//
|
|
// Construct and append a VPANDD instruction to the active function.
|
|
func (c *Context) VPANDD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPANDD(ops...))
|
|
}
|
|
|
|
// VPANDD: Bitwise Logical AND of Packed Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPANDD m128 xmm k xmm
|
|
// VPANDD m128 xmm xmm
|
|
// VPANDD m256 ymm k ymm
|
|
// VPANDD m256 ymm ymm
|
|
// VPANDD xmm xmm k xmm
|
|
// VPANDD xmm xmm xmm
|
|
// VPANDD ymm ymm k ymm
|
|
// VPANDD ymm ymm ymm
|
|
// VPANDD m512 zmm k zmm
|
|
// VPANDD m512 zmm zmm
|
|
// VPANDD zmm zmm k zmm
|
|
// VPANDD zmm zmm zmm
|
|
//
|
|
// Construct and append a VPANDD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPANDD(ops ...operand.Op) { ctx.VPANDD(ops...) }
|
|
|
|
// VPANDD_BCST: Bitwise Logical AND of Packed Doubleword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPANDD.BCST m32 xmm k xmm
|
|
// VPANDD.BCST m32 xmm xmm
|
|
// VPANDD.BCST m32 ymm k ymm
|
|
// VPANDD.BCST m32 ymm ymm
|
|
// VPANDD.BCST m32 zmm k zmm
|
|
// VPANDD.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VPANDD.BCST instruction to the active function.
|
|
func (c *Context) VPANDD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPANDD_BCST(ops...))
|
|
}
|
|
|
|
// VPANDD_BCST: Bitwise Logical AND of Packed Doubleword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPANDD.BCST m32 xmm k xmm
|
|
// VPANDD.BCST m32 xmm xmm
|
|
// VPANDD.BCST m32 ymm k ymm
|
|
// VPANDD.BCST m32 ymm ymm
|
|
// VPANDD.BCST m32 zmm k zmm
|
|
// VPANDD.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VPANDD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPANDD_BCST(ops ...operand.Op) { ctx.VPANDD_BCST(ops...) }
|
|
|
|
// VPANDD_BCST_Z: Bitwise Logical AND of Packed Doubleword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPANDD.BCST.Z m32 xmm k xmm
|
|
// VPANDD.BCST.Z m32 ymm k ymm
|
|
// VPANDD.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VPANDD.BCST.Z instruction to the active function.
|
|
func (c *Context) VPANDD_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPANDD_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPANDD_BCST_Z: Bitwise Logical AND of Packed Doubleword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPANDD.BCST.Z m32 xmm k xmm
|
|
// VPANDD.BCST.Z m32 ymm k ymm
|
|
// VPANDD.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VPANDD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPANDD_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VPANDD_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VPANDD_Z: Bitwise Logical AND of Packed Doubleword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPANDD.Z m128 xmm k xmm
|
|
// VPANDD.Z m256 ymm k ymm
|
|
// VPANDD.Z xmm xmm k xmm
|
|
// VPANDD.Z ymm ymm k ymm
|
|
// VPANDD.Z m512 zmm k zmm
|
|
// VPANDD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPANDD.Z instruction to the active function.
|
|
func (c *Context) VPANDD_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPANDD_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPANDD_Z: Bitwise Logical AND of Packed Doubleword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPANDD.Z m128 xmm k xmm
|
|
// VPANDD.Z m256 ymm k ymm
|
|
// VPANDD.Z xmm xmm k xmm
|
|
// VPANDD.Z ymm ymm k ymm
|
|
// VPANDD.Z m512 zmm k zmm
|
|
// VPANDD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPANDD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPANDD_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPANDD_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPANDN: Packed Bitwise Logical AND NOT.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPANDN m256 ymm ymm
|
|
// VPANDN ymm ymm ymm
|
|
// VPANDN m128 xmm xmm
|
|
// VPANDN xmm xmm xmm
|
|
//
|
|
// Construct and append a VPANDN instruction to the active function.
|
|
func (c *Context) VPANDN(mxy, xy, xy1 operand.Op) {
|
|
c.addinstruction(x86.VPANDN(mxy, xy, xy1))
|
|
}
|
|
|
|
// VPANDN: Packed Bitwise Logical AND NOT.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPANDN m256 ymm ymm
|
|
// VPANDN ymm ymm ymm
|
|
// VPANDN m128 xmm xmm
|
|
// VPANDN xmm xmm xmm
|
|
//
|
|
// Construct and append a VPANDN instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPANDN(mxy, xy, xy1 operand.Op) { ctx.VPANDN(mxy, xy, xy1) }
|
|
|
|
// VPANDND: Bitwise Logical AND NOT of Packed Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPANDND m128 xmm k xmm
|
|
// VPANDND m128 xmm xmm
|
|
// VPANDND m256 ymm k ymm
|
|
// VPANDND m256 ymm ymm
|
|
// VPANDND xmm xmm k xmm
|
|
// VPANDND xmm xmm xmm
|
|
// VPANDND ymm ymm k ymm
|
|
// VPANDND ymm ymm ymm
|
|
// VPANDND m512 zmm k zmm
|
|
// VPANDND m512 zmm zmm
|
|
// VPANDND zmm zmm k zmm
|
|
// VPANDND zmm zmm zmm
|
|
//
|
|
// Construct and append a VPANDND instruction to the active function.
|
|
func (c *Context) VPANDND(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPANDND(ops...))
|
|
}
|
|
|
|
// VPANDND: Bitwise Logical AND NOT of Packed Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPANDND m128 xmm k xmm
|
|
// VPANDND m128 xmm xmm
|
|
// VPANDND m256 ymm k ymm
|
|
// VPANDND m256 ymm ymm
|
|
// VPANDND xmm xmm k xmm
|
|
// VPANDND xmm xmm xmm
|
|
// VPANDND ymm ymm k ymm
|
|
// VPANDND ymm ymm ymm
|
|
// VPANDND m512 zmm k zmm
|
|
// VPANDND m512 zmm zmm
|
|
// VPANDND zmm zmm k zmm
|
|
// VPANDND zmm zmm zmm
|
|
//
|
|
// Construct and append a VPANDND instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPANDND(ops ...operand.Op) { ctx.VPANDND(ops...) }
|
|
|
|
// VPANDND_BCST: Bitwise Logical AND NOT of Packed Doubleword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPANDND.BCST m32 xmm k xmm
|
|
// VPANDND.BCST m32 xmm xmm
|
|
// VPANDND.BCST m32 ymm k ymm
|
|
// VPANDND.BCST m32 ymm ymm
|
|
// VPANDND.BCST m32 zmm k zmm
|
|
// VPANDND.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VPANDND.BCST instruction to the active function.
|
|
func (c *Context) VPANDND_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPANDND_BCST(ops...))
|
|
}
|
|
|
|
// VPANDND_BCST: Bitwise Logical AND NOT of Packed Doubleword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPANDND.BCST m32 xmm k xmm
|
|
// VPANDND.BCST m32 xmm xmm
|
|
// VPANDND.BCST m32 ymm k ymm
|
|
// VPANDND.BCST m32 ymm ymm
|
|
// VPANDND.BCST m32 zmm k zmm
|
|
// VPANDND.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VPANDND.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPANDND_BCST(ops ...operand.Op) { ctx.VPANDND_BCST(ops...) }
|
|
|
|
// VPANDND_BCST_Z: Bitwise Logical AND NOT of Packed Doubleword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPANDND.BCST.Z m32 xmm k xmm
|
|
// VPANDND.BCST.Z m32 ymm k ymm
|
|
// VPANDND.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VPANDND.BCST.Z instruction to the active function.
|
|
func (c *Context) VPANDND_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPANDND_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPANDND_BCST_Z: Bitwise Logical AND NOT of Packed Doubleword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPANDND.BCST.Z m32 xmm k xmm
|
|
// VPANDND.BCST.Z m32 ymm k ymm
|
|
// VPANDND.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VPANDND.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPANDND_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VPANDND_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VPANDND_Z: Bitwise Logical AND NOT of Packed Doubleword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPANDND.Z m128 xmm k xmm
|
|
// VPANDND.Z m256 ymm k ymm
|
|
// VPANDND.Z xmm xmm k xmm
|
|
// VPANDND.Z ymm ymm k ymm
|
|
// VPANDND.Z m512 zmm k zmm
|
|
// VPANDND.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPANDND.Z instruction to the active function.
|
|
func (c *Context) VPANDND_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPANDND_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPANDND_Z: Bitwise Logical AND NOT of Packed Doubleword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPANDND.Z m128 xmm k xmm
|
|
// VPANDND.Z m256 ymm k ymm
|
|
// VPANDND.Z xmm xmm k xmm
|
|
// VPANDND.Z ymm ymm k ymm
|
|
// VPANDND.Z m512 zmm k zmm
|
|
// VPANDND.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPANDND.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPANDND_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPANDND_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPANDNQ: Bitwise Logical AND NOT of Packed Quadword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPANDNQ m128 xmm k xmm
|
|
// VPANDNQ m128 xmm xmm
|
|
// VPANDNQ m256 ymm k ymm
|
|
// VPANDNQ m256 ymm ymm
|
|
// VPANDNQ xmm xmm k xmm
|
|
// VPANDNQ xmm xmm xmm
|
|
// VPANDNQ ymm ymm k ymm
|
|
// VPANDNQ ymm ymm ymm
|
|
// VPANDNQ m512 zmm k zmm
|
|
// VPANDNQ m512 zmm zmm
|
|
// VPANDNQ zmm zmm k zmm
|
|
// VPANDNQ zmm zmm zmm
|
|
//
|
|
// Construct and append a VPANDNQ instruction to the active function.
|
|
func (c *Context) VPANDNQ(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPANDNQ(ops...))
|
|
}
|
|
|
|
// VPANDNQ: Bitwise Logical AND NOT of Packed Quadword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPANDNQ m128 xmm k xmm
|
|
// VPANDNQ m128 xmm xmm
|
|
// VPANDNQ m256 ymm k ymm
|
|
// VPANDNQ m256 ymm ymm
|
|
// VPANDNQ xmm xmm k xmm
|
|
// VPANDNQ xmm xmm xmm
|
|
// VPANDNQ ymm ymm k ymm
|
|
// VPANDNQ ymm ymm ymm
|
|
// VPANDNQ m512 zmm k zmm
|
|
// VPANDNQ m512 zmm zmm
|
|
// VPANDNQ zmm zmm k zmm
|
|
// VPANDNQ zmm zmm zmm
|
|
//
|
|
// Construct and append a VPANDNQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPANDNQ(ops ...operand.Op) { ctx.VPANDNQ(ops...) }
|
|
|
|
// VPANDNQ_BCST: Bitwise Logical AND NOT of Packed Quadword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPANDNQ.BCST m64 xmm k xmm
|
|
// VPANDNQ.BCST m64 xmm xmm
|
|
// VPANDNQ.BCST m64 ymm k ymm
|
|
// VPANDNQ.BCST m64 ymm ymm
|
|
// VPANDNQ.BCST m64 zmm k zmm
|
|
// VPANDNQ.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VPANDNQ.BCST instruction to the active function.
|
|
func (c *Context) VPANDNQ_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPANDNQ_BCST(ops...))
|
|
}
|
|
|
|
// VPANDNQ_BCST: Bitwise Logical AND NOT of Packed Quadword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPANDNQ.BCST m64 xmm k xmm
|
|
// VPANDNQ.BCST m64 xmm xmm
|
|
// VPANDNQ.BCST m64 ymm k ymm
|
|
// VPANDNQ.BCST m64 ymm ymm
|
|
// VPANDNQ.BCST m64 zmm k zmm
|
|
// VPANDNQ.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VPANDNQ.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPANDNQ_BCST(ops ...operand.Op) { ctx.VPANDNQ_BCST(ops...) }
|
|
|
|
// VPANDNQ_BCST_Z: Bitwise Logical AND NOT of Packed Quadword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPANDNQ.BCST.Z m64 xmm k xmm
|
|
// VPANDNQ.BCST.Z m64 ymm k ymm
|
|
// VPANDNQ.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VPANDNQ.BCST.Z instruction to the active function.
|
|
func (c *Context) VPANDNQ_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPANDNQ_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPANDNQ_BCST_Z: Bitwise Logical AND NOT of Packed Quadword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPANDNQ.BCST.Z m64 xmm k xmm
|
|
// VPANDNQ.BCST.Z m64 ymm k ymm
|
|
// VPANDNQ.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VPANDNQ.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPANDNQ_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VPANDNQ_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VPANDNQ_Z: Bitwise Logical AND NOT of Packed Quadword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPANDNQ.Z m128 xmm k xmm
|
|
// VPANDNQ.Z m256 ymm k ymm
|
|
// VPANDNQ.Z xmm xmm k xmm
|
|
// VPANDNQ.Z ymm ymm k ymm
|
|
// VPANDNQ.Z m512 zmm k zmm
|
|
// VPANDNQ.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPANDNQ.Z instruction to the active function.
|
|
func (c *Context) VPANDNQ_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPANDNQ_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPANDNQ_Z: Bitwise Logical AND NOT of Packed Quadword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPANDNQ.Z m128 xmm k xmm
|
|
// VPANDNQ.Z m256 ymm k ymm
|
|
// VPANDNQ.Z xmm xmm k xmm
|
|
// VPANDNQ.Z ymm ymm k ymm
|
|
// VPANDNQ.Z m512 zmm k zmm
|
|
// VPANDNQ.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPANDNQ.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPANDNQ_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPANDNQ_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPANDQ: Bitwise Logical AND of Packed Quadword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPANDQ m128 xmm k xmm
|
|
// VPANDQ m128 xmm xmm
|
|
// VPANDQ m256 ymm k ymm
|
|
// VPANDQ m256 ymm ymm
|
|
// VPANDQ xmm xmm k xmm
|
|
// VPANDQ xmm xmm xmm
|
|
// VPANDQ ymm ymm k ymm
|
|
// VPANDQ ymm ymm ymm
|
|
// VPANDQ m512 zmm k zmm
|
|
// VPANDQ m512 zmm zmm
|
|
// VPANDQ zmm zmm k zmm
|
|
// VPANDQ zmm zmm zmm
|
|
//
|
|
// Construct and append a VPANDQ instruction to the active function.
|
|
func (c *Context) VPANDQ(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPANDQ(ops...))
|
|
}
|
|
|
|
// VPANDQ: Bitwise Logical AND of Packed Quadword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPANDQ m128 xmm k xmm
|
|
// VPANDQ m128 xmm xmm
|
|
// VPANDQ m256 ymm k ymm
|
|
// VPANDQ m256 ymm ymm
|
|
// VPANDQ xmm xmm k xmm
|
|
// VPANDQ xmm xmm xmm
|
|
// VPANDQ ymm ymm k ymm
|
|
// VPANDQ ymm ymm ymm
|
|
// VPANDQ m512 zmm k zmm
|
|
// VPANDQ m512 zmm zmm
|
|
// VPANDQ zmm zmm k zmm
|
|
// VPANDQ zmm zmm zmm
|
|
//
|
|
// Construct and append a VPANDQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPANDQ(ops ...operand.Op) { ctx.VPANDQ(ops...) }
|
|
|
|
// VPANDQ_BCST: Bitwise Logical AND of Packed Quadword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPANDQ.BCST m64 xmm k xmm
|
|
// VPANDQ.BCST m64 xmm xmm
|
|
// VPANDQ.BCST m64 ymm k ymm
|
|
// VPANDQ.BCST m64 ymm ymm
|
|
// VPANDQ.BCST m64 zmm k zmm
|
|
// VPANDQ.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VPANDQ.BCST instruction to the active function.
|
|
func (c *Context) VPANDQ_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPANDQ_BCST(ops...))
|
|
}
|
|
|
|
// VPANDQ_BCST: Bitwise Logical AND of Packed Quadword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPANDQ.BCST m64 xmm k xmm
|
|
// VPANDQ.BCST m64 xmm xmm
|
|
// VPANDQ.BCST m64 ymm k ymm
|
|
// VPANDQ.BCST m64 ymm ymm
|
|
// VPANDQ.BCST m64 zmm k zmm
|
|
// VPANDQ.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VPANDQ.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPANDQ_BCST(ops ...operand.Op) { ctx.VPANDQ_BCST(ops...) }
|
|
|
|
// VPANDQ_BCST_Z: Bitwise Logical AND of Packed Quadword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPANDQ.BCST.Z m64 xmm k xmm
|
|
// VPANDQ.BCST.Z m64 ymm k ymm
|
|
// VPANDQ.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VPANDQ.BCST.Z instruction to the active function.
|
|
func (c *Context) VPANDQ_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPANDQ_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPANDQ_BCST_Z: Bitwise Logical AND of Packed Quadword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPANDQ.BCST.Z m64 xmm k xmm
|
|
// VPANDQ.BCST.Z m64 ymm k ymm
|
|
// VPANDQ.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VPANDQ.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPANDQ_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VPANDQ_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VPANDQ_Z: Bitwise Logical AND of Packed Quadword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPANDQ.Z m128 xmm k xmm
|
|
// VPANDQ.Z m256 ymm k ymm
|
|
// VPANDQ.Z xmm xmm k xmm
|
|
// VPANDQ.Z ymm ymm k ymm
|
|
// VPANDQ.Z m512 zmm k zmm
|
|
// VPANDQ.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPANDQ.Z instruction to the active function.
|
|
func (c *Context) VPANDQ_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPANDQ_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPANDQ_Z: Bitwise Logical AND of Packed Quadword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPANDQ.Z m128 xmm k xmm
|
|
// VPANDQ.Z m256 ymm k ymm
|
|
// VPANDQ.Z xmm xmm k xmm
|
|
// VPANDQ.Z ymm ymm k ymm
|
|
// VPANDQ.Z m512 zmm k zmm
|
|
// VPANDQ.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPANDQ.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPANDQ_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPANDQ_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPAVGB: Average Packed Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPAVGB m256 ymm ymm
|
|
// VPAVGB ymm ymm ymm
|
|
// VPAVGB m128 xmm xmm
|
|
// VPAVGB xmm xmm xmm
|
|
// VPAVGB m128 xmm k xmm
|
|
// VPAVGB m256 ymm k ymm
|
|
// VPAVGB xmm xmm k xmm
|
|
// VPAVGB ymm ymm k ymm
|
|
// VPAVGB m512 zmm k zmm
|
|
// VPAVGB m512 zmm zmm
|
|
// VPAVGB zmm zmm k zmm
|
|
// VPAVGB zmm zmm zmm
|
|
//
|
|
// Construct and append a VPAVGB instruction to the active function.
|
|
func (c *Context) VPAVGB(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPAVGB(ops...))
|
|
}
|
|
|
|
// VPAVGB: Average Packed Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPAVGB m256 ymm ymm
|
|
// VPAVGB ymm ymm ymm
|
|
// VPAVGB m128 xmm xmm
|
|
// VPAVGB xmm xmm xmm
|
|
// VPAVGB m128 xmm k xmm
|
|
// VPAVGB m256 ymm k ymm
|
|
// VPAVGB xmm xmm k xmm
|
|
// VPAVGB ymm ymm k ymm
|
|
// VPAVGB m512 zmm k zmm
|
|
// VPAVGB m512 zmm zmm
|
|
// VPAVGB zmm zmm k zmm
|
|
// VPAVGB zmm zmm zmm
|
|
//
|
|
// Construct and append a VPAVGB instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPAVGB(ops ...operand.Op) { ctx.VPAVGB(ops...) }
|
|
|
|
// VPAVGB_Z: Average Packed Byte Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPAVGB.Z m128 xmm k xmm
|
|
// VPAVGB.Z m256 ymm k ymm
|
|
// VPAVGB.Z xmm xmm k xmm
|
|
// VPAVGB.Z ymm ymm k ymm
|
|
// VPAVGB.Z m512 zmm k zmm
|
|
// VPAVGB.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPAVGB.Z instruction to the active function.
|
|
func (c *Context) VPAVGB_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPAVGB_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPAVGB_Z: Average Packed Byte Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPAVGB.Z m128 xmm k xmm
|
|
// VPAVGB.Z m256 ymm k ymm
|
|
// VPAVGB.Z xmm xmm k xmm
|
|
// VPAVGB.Z ymm ymm k ymm
|
|
// VPAVGB.Z m512 zmm k zmm
|
|
// VPAVGB.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPAVGB.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPAVGB_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPAVGB_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPAVGW: Average Packed Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPAVGW m256 ymm ymm
|
|
// VPAVGW ymm ymm ymm
|
|
// VPAVGW m128 xmm xmm
|
|
// VPAVGW xmm xmm xmm
|
|
// VPAVGW m128 xmm k xmm
|
|
// VPAVGW m256 ymm k ymm
|
|
// VPAVGW xmm xmm k xmm
|
|
// VPAVGW ymm ymm k ymm
|
|
// VPAVGW m512 zmm k zmm
|
|
// VPAVGW m512 zmm zmm
|
|
// VPAVGW zmm zmm k zmm
|
|
// VPAVGW zmm zmm zmm
|
|
//
|
|
// Construct and append a VPAVGW instruction to the active function.
|
|
func (c *Context) VPAVGW(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPAVGW(ops...))
|
|
}
|
|
|
|
// VPAVGW: Average Packed Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPAVGW m256 ymm ymm
|
|
// VPAVGW ymm ymm ymm
|
|
// VPAVGW m128 xmm xmm
|
|
// VPAVGW xmm xmm xmm
|
|
// VPAVGW m128 xmm k xmm
|
|
// VPAVGW m256 ymm k ymm
|
|
// VPAVGW xmm xmm k xmm
|
|
// VPAVGW ymm ymm k ymm
|
|
// VPAVGW m512 zmm k zmm
|
|
// VPAVGW m512 zmm zmm
|
|
// VPAVGW zmm zmm k zmm
|
|
// VPAVGW zmm zmm zmm
|
|
//
|
|
// Construct and append a VPAVGW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPAVGW(ops ...operand.Op) { ctx.VPAVGW(ops...) }
|
|
|
|
// VPAVGW_Z: Average Packed Word Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPAVGW.Z m128 xmm k xmm
|
|
// VPAVGW.Z m256 ymm k ymm
|
|
// VPAVGW.Z xmm xmm k xmm
|
|
// VPAVGW.Z ymm ymm k ymm
|
|
// VPAVGW.Z m512 zmm k zmm
|
|
// VPAVGW.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPAVGW.Z instruction to the active function.
|
|
func (c *Context) VPAVGW_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPAVGW_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPAVGW_Z: Average Packed Word Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPAVGW.Z m128 xmm k xmm
|
|
// VPAVGW.Z m256 ymm k ymm
|
|
// VPAVGW.Z xmm xmm k xmm
|
|
// VPAVGW.Z ymm ymm k ymm
|
|
// VPAVGW.Z m512 zmm k zmm
|
|
// VPAVGW.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPAVGW.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPAVGW_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPAVGW_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPBLENDD: Blend Packed Doublewords.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPBLENDD imm8 m128 xmm xmm
|
|
// VPBLENDD imm8 m256 ymm ymm
|
|
// VPBLENDD imm8 xmm xmm xmm
|
|
// VPBLENDD imm8 ymm ymm ymm
|
|
//
|
|
// Construct and append a VPBLENDD instruction to the active function.
|
|
func (c *Context) VPBLENDD(i, mxy, xy, xy1 operand.Op) {
|
|
c.addinstruction(x86.VPBLENDD(i, mxy, xy, xy1))
|
|
}
|
|
|
|
// VPBLENDD: Blend Packed Doublewords.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPBLENDD imm8 m128 xmm xmm
|
|
// VPBLENDD imm8 m256 ymm ymm
|
|
// VPBLENDD imm8 xmm xmm xmm
|
|
// VPBLENDD imm8 ymm ymm ymm
|
|
//
|
|
// Construct and append a VPBLENDD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPBLENDD(i, mxy, xy, xy1 operand.Op) { ctx.VPBLENDD(i, mxy, xy, xy1) }
|
|
|
|
// VPBLENDMB: Blend Byte Vectors Using an OpMask Control.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPBLENDMB m128 xmm k xmm
|
|
// VPBLENDMB m128 xmm xmm
|
|
// VPBLENDMB m256 ymm k ymm
|
|
// VPBLENDMB m256 ymm ymm
|
|
// VPBLENDMB xmm xmm k xmm
|
|
// VPBLENDMB xmm xmm xmm
|
|
// VPBLENDMB ymm ymm k ymm
|
|
// VPBLENDMB ymm ymm ymm
|
|
// VPBLENDMB m512 zmm k zmm
|
|
// VPBLENDMB m512 zmm zmm
|
|
// VPBLENDMB zmm zmm k zmm
|
|
// VPBLENDMB zmm zmm zmm
|
|
//
|
|
// Construct and append a VPBLENDMB instruction to the active function.
|
|
func (c *Context) VPBLENDMB(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPBLENDMB(ops...))
|
|
}
|
|
|
|
// VPBLENDMB: Blend Byte Vectors Using an OpMask Control.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPBLENDMB m128 xmm k xmm
|
|
// VPBLENDMB m128 xmm xmm
|
|
// VPBLENDMB m256 ymm k ymm
|
|
// VPBLENDMB m256 ymm ymm
|
|
// VPBLENDMB xmm xmm k xmm
|
|
// VPBLENDMB xmm xmm xmm
|
|
// VPBLENDMB ymm ymm k ymm
|
|
// VPBLENDMB ymm ymm ymm
|
|
// VPBLENDMB m512 zmm k zmm
|
|
// VPBLENDMB m512 zmm zmm
|
|
// VPBLENDMB zmm zmm k zmm
|
|
// VPBLENDMB zmm zmm zmm
|
|
//
|
|
// Construct and append a VPBLENDMB instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPBLENDMB(ops ...operand.Op) { ctx.VPBLENDMB(ops...) }
|
|
|
|
// VPBLENDMB_Z: Blend Byte Vectors Using an OpMask Control (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPBLENDMB.Z m128 xmm k xmm
|
|
// VPBLENDMB.Z m256 ymm k ymm
|
|
// VPBLENDMB.Z xmm xmm k xmm
|
|
// VPBLENDMB.Z ymm ymm k ymm
|
|
// VPBLENDMB.Z m512 zmm k zmm
|
|
// VPBLENDMB.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPBLENDMB.Z instruction to the active function.
|
|
func (c *Context) VPBLENDMB_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPBLENDMB_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPBLENDMB_Z: Blend Byte Vectors Using an OpMask Control (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPBLENDMB.Z m128 xmm k xmm
|
|
// VPBLENDMB.Z m256 ymm k ymm
|
|
// VPBLENDMB.Z xmm xmm k xmm
|
|
// VPBLENDMB.Z ymm ymm k ymm
|
|
// VPBLENDMB.Z m512 zmm k zmm
|
|
// VPBLENDMB.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPBLENDMB.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPBLENDMB_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPBLENDMB_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPBLENDMD: Blend Doubleword Vectors Using an OpMask Control.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPBLENDMD m128 xmm k xmm
|
|
// VPBLENDMD m128 xmm xmm
|
|
// VPBLENDMD m256 ymm k ymm
|
|
// VPBLENDMD m256 ymm ymm
|
|
// VPBLENDMD xmm xmm k xmm
|
|
// VPBLENDMD xmm xmm xmm
|
|
// VPBLENDMD ymm ymm k ymm
|
|
// VPBLENDMD ymm ymm ymm
|
|
// VPBLENDMD m512 zmm k zmm
|
|
// VPBLENDMD m512 zmm zmm
|
|
// VPBLENDMD zmm zmm k zmm
|
|
// VPBLENDMD zmm zmm zmm
|
|
//
|
|
// Construct and append a VPBLENDMD instruction to the active function.
|
|
func (c *Context) VPBLENDMD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPBLENDMD(ops...))
|
|
}
|
|
|
|
// VPBLENDMD: Blend Doubleword Vectors Using an OpMask Control.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPBLENDMD m128 xmm k xmm
|
|
// VPBLENDMD m128 xmm xmm
|
|
// VPBLENDMD m256 ymm k ymm
|
|
// VPBLENDMD m256 ymm ymm
|
|
// VPBLENDMD xmm xmm k xmm
|
|
// VPBLENDMD xmm xmm xmm
|
|
// VPBLENDMD ymm ymm k ymm
|
|
// VPBLENDMD ymm ymm ymm
|
|
// VPBLENDMD m512 zmm k zmm
|
|
// VPBLENDMD m512 zmm zmm
|
|
// VPBLENDMD zmm zmm k zmm
|
|
// VPBLENDMD zmm zmm zmm
|
|
//
|
|
// Construct and append a VPBLENDMD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPBLENDMD(ops ...operand.Op) { ctx.VPBLENDMD(ops...) }
|
|
|
|
// VPBLENDMD_BCST: Blend Doubleword Vectors Using an OpMask Control (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPBLENDMD.BCST m32 xmm k xmm
|
|
// VPBLENDMD.BCST m32 xmm xmm
|
|
// VPBLENDMD.BCST m32 ymm k ymm
|
|
// VPBLENDMD.BCST m32 ymm ymm
|
|
// VPBLENDMD.BCST m32 zmm k zmm
|
|
// VPBLENDMD.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VPBLENDMD.BCST instruction to the active function.
|
|
func (c *Context) VPBLENDMD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPBLENDMD_BCST(ops...))
|
|
}
|
|
|
|
// VPBLENDMD_BCST: Blend Doubleword Vectors Using an OpMask Control (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPBLENDMD.BCST m32 xmm k xmm
|
|
// VPBLENDMD.BCST m32 xmm xmm
|
|
// VPBLENDMD.BCST m32 ymm k ymm
|
|
// VPBLENDMD.BCST m32 ymm ymm
|
|
// VPBLENDMD.BCST m32 zmm k zmm
|
|
// VPBLENDMD.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VPBLENDMD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPBLENDMD_BCST(ops ...operand.Op) { ctx.VPBLENDMD_BCST(ops...) }
|
|
|
|
// VPBLENDMD_BCST_Z: Blend Doubleword Vectors Using an OpMask Control (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPBLENDMD.BCST.Z m32 xmm k xmm
|
|
// VPBLENDMD.BCST.Z m32 ymm k ymm
|
|
// VPBLENDMD.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VPBLENDMD.BCST.Z instruction to the active function.
|
|
func (c *Context) VPBLENDMD_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPBLENDMD_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPBLENDMD_BCST_Z: Blend Doubleword Vectors Using an OpMask Control (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPBLENDMD.BCST.Z m32 xmm k xmm
|
|
// VPBLENDMD.BCST.Z m32 ymm k ymm
|
|
// VPBLENDMD.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VPBLENDMD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPBLENDMD_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VPBLENDMD_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VPBLENDMD_Z: Blend Doubleword Vectors Using an OpMask Control (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPBLENDMD.Z m128 xmm k xmm
|
|
// VPBLENDMD.Z m256 ymm k ymm
|
|
// VPBLENDMD.Z xmm xmm k xmm
|
|
// VPBLENDMD.Z ymm ymm k ymm
|
|
// VPBLENDMD.Z m512 zmm k zmm
|
|
// VPBLENDMD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPBLENDMD.Z instruction to the active function.
|
|
func (c *Context) VPBLENDMD_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPBLENDMD_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPBLENDMD_Z: Blend Doubleword Vectors Using an OpMask Control (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPBLENDMD.Z m128 xmm k xmm
|
|
// VPBLENDMD.Z m256 ymm k ymm
|
|
// VPBLENDMD.Z xmm xmm k xmm
|
|
// VPBLENDMD.Z ymm ymm k ymm
|
|
// VPBLENDMD.Z m512 zmm k zmm
|
|
// VPBLENDMD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPBLENDMD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPBLENDMD_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPBLENDMD_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPBLENDMQ: Blend Quadword Vectors Using an OpMask Control.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPBLENDMQ m128 xmm k xmm
|
|
// VPBLENDMQ m128 xmm xmm
|
|
// VPBLENDMQ m256 ymm k ymm
|
|
// VPBLENDMQ m256 ymm ymm
|
|
// VPBLENDMQ xmm xmm k xmm
|
|
// VPBLENDMQ xmm xmm xmm
|
|
// VPBLENDMQ ymm ymm k ymm
|
|
// VPBLENDMQ ymm ymm ymm
|
|
// VPBLENDMQ m512 zmm k zmm
|
|
// VPBLENDMQ m512 zmm zmm
|
|
// VPBLENDMQ zmm zmm k zmm
|
|
// VPBLENDMQ zmm zmm zmm
|
|
//
|
|
// Construct and append a VPBLENDMQ instruction to the active function.
|
|
func (c *Context) VPBLENDMQ(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPBLENDMQ(ops...))
|
|
}
|
|
|
|
// VPBLENDMQ: Blend Quadword Vectors Using an OpMask Control.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPBLENDMQ m128 xmm k xmm
|
|
// VPBLENDMQ m128 xmm xmm
|
|
// VPBLENDMQ m256 ymm k ymm
|
|
// VPBLENDMQ m256 ymm ymm
|
|
// VPBLENDMQ xmm xmm k xmm
|
|
// VPBLENDMQ xmm xmm xmm
|
|
// VPBLENDMQ ymm ymm k ymm
|
|
// VPBLENDMQ ymm ymm ymm
|
|
// VPBLENDMQ m512 zmm k zmm
|
|
// VPBLENDMQ m512 zmm zmm
|
|
// VPBLENDMQ zmm zmm k zmm
|
|
// VPBLENDMQ zmm zmm zmm
|
|
//
|
|
// Construct and append a VPBLENDMQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPBLENDMQ(ops ...operand.Op) { ctx.VPBLENDMQ(ops...) }
|
|
|
|
// VPBLENDMQ_BCST: Blend Quadword Vectors Using an OpMask Control (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPBLENDMQ.BCST m64 xmm k xmm
|
|
// VPBLENDMQ.BCST m64 xmm xmm
|
|
// VPBLENDMQ.BCST m64 ymm k ymm
|
|
// VPBLENDMQ.BCST m64 ymm ymm
|
|
// VPBLENDMQ.BCST m64 zmm k zmm
|
|
// VPBLENDMQ.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VPBLENDMQ.BCST instruction to the active function.
|
|
func (c *Context) VPBLENDMQ_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPBLENDMQ_BCST(ops...))
|
|
}
|
|
|
|
// VPBLENDMQ_BCST: Blend Quadword Vectors Using an OpMask Control (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPBLENDMQ.BCST m64 xmm k xmm
|
|
// VPBLENDMQ.BCST m64 xmm xmm
|
|
// VPBLENDMQ.BCST m64 ymm k ymm
|
|
// VPBLENDMQ.BCST m64 ymm ymm
|
|
// VPBLENDMQ.BCST m64 zmm k zmm
|
|
// VPBLENDMQ.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VPBLENDMQ.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPBLENDMQ_BCST(ops ...operand.Op) { ctx.VPBLENDMQ_BCST(ops...) }
|
|
|
|
// VPBLENDMQ_BCST_Z: Blend Quadword Vectors Using an OpMask Control (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPBLENDMQ.BCST.Z m64 xmm k xmm
|
|
// VPBLENDMQ.BCST.Z m64 ymm k ymm
|
|
// VPBLENDMQ.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VPBLENDMQ.BCST.Z instruction to the active function.
|
|
func (c *Context) VPBLENDMQ_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPBLENDMQ_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPBLENDMQ_BCST_Z: Blend Quadword Vectors Using an OpMask Control (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPBLENDMQ.BCST.Z m64 xmm k xmm
|
|
// VPBLENDMQ.BCST.Z m64 ymm k ymm
|
|
// VPBLENDMQ.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VPBLENDMQ.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPBLENDMQ_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VPBLENDMQ_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VPBLENDMQ_Z: Blend Quadword Vectors Using an OpMask Control (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPBLENDMQ.Z m128 xmm k xmm
|
|
// VPBLENDMQ.Z m256 ymm k ymm
|
|
// VPBLENDMQ.Z xmm xmm k xmm
|
|
// VPBLENDMQ.Z ymm ymm k ymm
|
|
// VPBLENDMQ.Z m512 zmm k zmm
|
|
// VPBLENDMQ.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPBLENDMQ.Z instruction to the active function.
|
|
func (c *Context) VPBLENDMQ_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPBLENDMQ_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPBLENDMQ_Z: Blend Quadword Vectors Using an OpMask Control (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPBLENDMQ.Z m128 xmm k xmm
|
|
// VPBLENDMQ.Z m256 ymm k ymm
|
|
// VPBLENDMQ.Z xmm xmm k xmm
|
|
// VPBLENDMQ.Z ymm ymm k ymm
|
|
// VPBLENDMQ.Z m512 zmm k zmm
|
|
// VPBLENDMQ.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPBLENDMQ.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPBLENDMQ_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPBLENDMQ_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPBLENDMW: Blend Word Vectors Using an OpMask Control.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPBLENDMW m128 xmm k xmm
|
|
// VPBLENDMW m128 xmm xmm
|
|
// VPBLENDMW m256 ymm k ymm
|
|
// VPBLENDMW m256 ymm ymm
|
|
// VPBLENDMW xmm xmm k xmm
|
|
// VPBLENDMW xmm xmm xmm
|
|
// VPBLENDMW ymm ymm k ymm
|
|
// VPBLENDMW ymm ymm ymm
|
|
// VPBLENDMW m512 zmm k zmm
|
|
// VPBLENDMW m512 zmm zmm
|
|
// VPBLENDMW zmm zmm k zmm
|
|
// VPBLENDMW zmm zmm zmm
|
|
//
|
|
// Construct and append a VPBLENDMW instruction to the active function.
|
|
func (c *Context) VPBLENDMW(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPBLENDMW(ops...))
|
|
}
|
|
|
|
// VPBLENDMW: Blend Word Vectors Using an OpMask Control.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPBLENDMW m128 xmm k xmm
|
|
// VPBLENDMW m128 xmm xmm
|
|
// VPBLENDMW m256 ymm k ymm
|
|
// VPBLENDMW m256 ymm ymm
|
|
// VPBLENDMW xmm xmm k xmm
|
|
// VPBLENDMW xmm xmm xmm
|
|
// VPBLENDMW ymm ymm k ymm
|
|
// VPBLENDMW ymm ymm ymm
|
|
// VPBLENDMW m512 zmm k zmm
|
|
// VPBLENDMW m512 zmm zmm
|
|
// VPBLENDMW zmm zmm k zmm
|
|
// VPBLENDMW zmm zmm zmm
|
|
//
|
|
// Construct and append a VPBLENDMW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPBLENDMW(ops ...operand.Op) { ctx.VPBLENDMW(ops...) }
|
|
|
|
// VPBLENDMW_Z: Blend Word Vectors Using an OpMask Control (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPBLENDMW.Z m128 xmm k xmm
|
|
// VPBLENDMW.Z m256 ymm k ymm
|
|
// VPBLENDMW.Z xmm xmm k xmm
|
|
// VPBLENDMW.Z ymm ymm k ymm
|
|
// VPBLENDMW.Z m512 zmm k zmm
|
|
// VPBLENDMW.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPBLENDMW.Z instruction to the active function.
|
|
func (c *Context) VPBLENDMW_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPBLENDMW_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPBLENDMW_Z: Blend Word Vectors Using an OpMask Control (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPBLENDMW.Z m128 xmm k xmm
|
|
// VPBLENDMW.Z m256 ymm k ymm
|
|
// VPBLENDMW.Z xmm xmm k xmm
|
|
// VPBLENDMW.Z ymm ymm k ymm
|
|
// VPBLENDMW.Z m512 zmm k zmm
|
|
// VPBLENDMW.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPBLENDMW.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPBLENDMW_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPBLENDMW_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPBLENDVB: Variable Blend Packed Bytes.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPBLENDVB ymm m256 ymm ymm
|
|
// VPBLENDVB ymm ymm ymm ymm
|
|
// VPBLENDVB xmm m128 xmm xmm
|
|
// VPBLENDVB xmm xmm xmm xmm
|
|
//
|
|
// Construct and append a VPBLENDVB instruction to the active function.
|
|
func (c *Context) VPBLENDVB(xy, mxy, xy1, xy2 operand.Op) {
|
|
c.addinstruction(x86.VPBLENDVB(xy, mxy, xy1, xy2))
|
|
}
|
|
|
|
// VPBLENDVB: Variable Blend Packed Bytes.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPBLENDVB ymm m256 ymm ymm
|
|
// VPBLENDVB ymm ymm ymm ymm
|
|
// VPBLENDVB xmm m128 xmm xmm
|
|
// VPBLENDVB xmm xmm xmm xmm
|
|
//
|
|
// Construct and append a VPBLENDVB instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPBLENDVB(xy, mxy, xy1, xy2 operand.Op) { ctx.VPBLENDVB(xy, mxy, xy1, xy2) }
|
|
|
|
// VPBLENDW: Blend Packed Words.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPBLENDW imm8 m256 ymm ymm
|
|
// VPBLENDW imm8 ymm ymm ymm
|
|
// VPBLENDW imm8 m128 xmm xmm
|
|
// VPBLENDW imm8 xmm xmm xmm
|
|
//
|
|
// Construct and append a VPBLENDW instruction to the active function.
|
|
func (c *Context) VPBLENDW(i, mxy, xy, xy1 operand.Op) {
|
|
c.addinstruction(x86.VPBLENDW(i, mxy, xy, xy1))
|
|
}
|
|
|
|
// VPBLENDW: Blend Packed Words.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPBLENDW imm8 m256 ymm ymm
|
|
// VPBLENDW imm8 ymm ymm ymm
|
|
// VPBLENDW imm8 m128 xmm xmm
|
|
// VPBLENDW imm8 xmm xmm xmm
|
|
//
|
|
// Construct and append a VPBLENDW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPBLENDW(i, mxy, xy, xy1 operand.Op) { ctx.VPBLENDW(i, mxy, xy, xy1) }
|
|
|
|
// VPBROADCASTB: Broadcast Byte Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPBROADCASTB m8 xmm
|
|
// VPBROADCASTB m8 ymm
|
|
// VPBROADCASTB xmm xmm
|
|
// VPBROADCASTB xmm ymm
|
|
// VPBROADCASTB m8 k xmm
|
|
// VPBROADCASTB m8 k ymm
|
|
// VPBROADCASTB r32 k xmm
|
|
// VPBROADCASTB r32 k ymm
|
|
// VPBROADCASTB r32 xmm
|
|
// VPBROADCASTB r32 ymm
|
|
// VPBROADCASTB xmm k xmm
|
|
// VPBROADCASTB xmm k ymm
|
|
// VPBROADCASTB m8 k zmm
|
|
// VPBROADCASTB m8 zmm
|
|
// VPBROADCASTB r32 k zmm
|
|
// VPBROADCASTB r32 zmm
|
|
// VPBROADCASTB xmm k zmm
|
|
// VPBROADCASTB xmm zmm
|
|
//
|
|
// Construct and append a VPBROADCASTB instruction to the active function.
|
|
func (c *Context) VPBROADCASTB(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPBROADCASTB(ops...))
|
|
}
|
|
|
|
// VPBROADCASTB: Broadcast Byte Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPBROADCASTB m8 xmm
|
|
// VPBROADCASTB m8 ymm
|
|
// VPBROADCASTB xmm xmm
|
|
// VPBROADCASTB xmm ymm
|
|
// VPBROADCASTB m8 k xmm
|
|
// VPBROADCASTB m8 k ymm
|
|
// VPBROADCASTB r32 k xmm
|
|
// VPBROADCASTB r32 k ymm
|
|
// VPBROADCASTB r32 xmm
|
|
// VPBROADCASTB r32 ymm
|
|
// VPBROADCASTB xmm k xmm
|
|
// VPBROADCASTB xmm k ymm
|
|
// VPBROADCASTB m8 k zmm
|
|
// VPBROADCASTB m8 zmm
|
|
// VPBROADCASTB r32 k zmm
|
|
// VPBROADCASTB r32 zmm
|
|
// VPBROADCASTB xmm k zmm
|
|
// VPBROADCASTB xmm zmm
|
|
//
|
|
// Construct and append a VPBROADCASTB instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPBROADCASTB(ops ...operand.Op) { ctx.VPBROADCASTB(ops...) }
|
|
|
|
// VPBROADCASTB_Z: Broadcast Byte Integer (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPBROADCASTB.Z m8 k xmm
|
|
// VPBROADCASTB.Z m8 k ymm
|
|
// VPBROADCASTB.Z r32 k xmm
|
|
// VPBROADCASTB.Z r32 k ymm
|
|
// VPBROADCASTB.Z xmm k xmm
|
|
// VPBROADCASTB.Z xmm k ymm
|
|
// VPBROADCASTB.Z m8 k zmm
|
|
// VPBROADCASTB.Z r32 k zmm
|
|
// VPBROADCASTB.Z xmm k zmm
|
|
//
|
|
// Construct and append a VPBROADCASTB.Z instruction to the active function.
|
|
func (c *Context) VPBROADCASTB_Z(mrx, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VPBROADCASTB_Z(mrx, k, xyz))
|
|
}
|
|
|
|
// VPBROADCASTB_Z: Broadcast Byte Integer (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPBROADCASTB.Z m8 k xmm
|
|
// VPBROADCASTB.Z m8 k ymm
|
|
// VPBROADCASTB.Z r32 k xmm
|
|
// VPBROADCASTB.Z r32 k ymm
|
|
// VPBROADCASTB.Z xmm k xmm
|
|
// VPBROADCASTB.Z xmm k ymm
|
|
// VPBROADCASTB.Z m8 k zmm
|
|
// VPBROADCASTB.Z r32 k zmm
|
|
// VPBROADCASTB.Z xmm k zmm
|
|
//
|
|
// Construct and append a VPBROADCASTB.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPBROADCASTB_Z(mrx, k, xyz operand.Op) { ctx.VPBROADCASTB_Z(mrx, k, xyz) }
|
|
|
|
// VPBROADCASTD: Broadcast Doubleword Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPBROADCASTD m32 xmm
|
|
// VPBROADCASTD m32 ymm
|
|
// VPBROADCASTD xmm xmm
|
|
// VPBROADCASTD xmm ymm
|
|
// VPBROADCASTD m32 k xmm
|
|
// VPBROADCASTD m32 k ymm
|
|
// VPBROADCASTD r32 k xmm
|
|
// VPBROADCASTD r32 k ymm
|
|
// VPBROADCASTD r32 xmm
|
|
// VPBROADCASTD r32 ymm
|
|
// VPBROADCASTD xmm k xmm
|
|
// VPBROADCASTD xmm k ymm
|
|
// VPBROADCASTD m32 k zmm
|
|
// VPBROADCASTD m32 zmm
|
|
// VPBROADCASTD r32 k zmm
|
|
// VPBROADCASTD r32 zmm
|
|
// VPBROADCASTD xmm k zmm
|
|
// VPBROADCASTD xmm zmm
|
|
//
|
|
// Construct and append a VPBROADCASTD instruction to the active function.
|
|
func (c *Context) VPBROADCASTD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPBROADCASTD(ops...))
|
|
}
|
|
|
|
// VPBROADCASTD: Broadcast Doubleword Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPBROADCASTD m32 xmm
|
|
// VPBROADCASTD m32 ymm
|
|
// VPBROADCASTD xmm xmm
|
|
// VPBROADCASTD xmm ymm
|
|
// VPBROADCASTD m32 k xmm
|
|
// VPBROADCASTD m32 k ymm
|
|
// VPBROADCASTD r32 k xmm
|
|
// VPBROADCASTD r32 k ymm
|
|
// VPBROADCASTD r32 xmm
|
|
// VPBROADCASTD r32 ymm
|
|
// VPBROADCASTD xmm k xmm
|
|
// VPBROADCASTD xmm k ymm
|
|
// VPBROADCASTD m32 k zmm
|
|
// VPBROADCASTD m32 zmm
|
|
// VPBROADCASTD r32 k zmm
|
|
// VPBROADCASTD r32 zmm
|
|
// VPBROADCASTD xmm k zmm
|
|
// VPBROADCASTD xmm zmm
|
|
//
|
|
// Construct and append a VPBROADCASTD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPBROADCASTD(ops ...operand.Op) { ctx.VPBROADCASTD(ops...) }
|
|
|
|
// VPBROADCASTD_Z: Broadcast Doubleword Integer (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPBROADCASTD.Z m32 k xmm
|
|
// VPBROADCASTD.Z m32 k ymm
|
|
// VPBROADCASTD.Z r32 k xmm
|
|
// VPBROADCASTD.Z r32 k ymm
|
|
// VPBROADCASTD.Z xmm k xmm
|
|
// VPBROADCASTD.Z xmm k ymm
|
|
// VPBROADCASTD.Z m32 k zmm
|
|
// VPBROADCASTD.Z r32 k zmm
|
|
// VPBROADCASTD.Z xmm k zmm
|
|
//
|
|
// Construct and append a VPBROADCASTD.Z instruction to the active function.
|
|
func (c *Context) VPBROADCASTD_Z(mrx, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VPBROADCASTD_Z(mrx, k, xyz))
|
|
}
|
|
|
|
// VPBROADCASTD_Z: Broadcast Doubleword Integer (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPBROADCASTD.Z m32 k xmm
|
|
// VPBROADCASTD.Z m32 k ymm
|
|
// VPBROADCASTD.Z r32 k xmm
|
|
// VPBROADCASTD.Z r32 k ymm
|
|
// VPBROADCASTD.Z xmm k xmm
|
|
// VPBROADCASTD.Z xmm k ymm
|
|
// VPBROADCASTD.Z m32 k zmm
|
|
// VPBROADCASTD.Z r32 k zmm
|
|
// VPBROADCASTD.Z xmm k zmm
|
|
//
|
|
// Construct and append a VPBROADCASTD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPBROADCASTD_Z(mrx, k, xyz operand.Op) { ctx.VPBROADCASTD_Z(mrx, k, xyz) }
|
|
|
|
// VPBROADCASTMB2Q: Broadcast Low Byte of Mask Register to Packed Quadword Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPBROADCASTMB2Q k xmm
|
|
// VPBROADCASTMB2Q k ymm
|
|
// VPBROADCASTMB2Q k zmm
|
|
//
|
|
// Construct and append a VPBROADCASTMB2Q instruction to the active function.
|
|
func (c *Context) VPBROADCASTMB2Q(k, xyz operand.Op) {
|
|
c.addinstruction(x86.VPBROADCASTMB2Q(k, xyz))
|
|
}
|
|
|
|
// VPBROADCASTMB2Q: Broadcast Low Byte of Mask Register to Packed Quadword Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPBROADCASTMB2Q k xmm
|
|
// VPBROADCASTMB2Q k ymm
|
|
// VPBROADCASTMB2Q k zmm
|
|
//
|
|
// Construct and append a VPBROADCASTMB2Q instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPBROADCASTMB2Q(k, xyz operand.Op) { ctx.VPBROADCASTMB2Q(k, xyz) }
|
|
|
|
// VPBROADCASTMW2D: Broadcast Low Word of Mask Register to Packed Doubleword Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPBROADCASTMW2D k xmm
|
|
// VPBROADCASTMW2D k ymm
|
|
// VPBROADCASTMW2D k zmm
|
|
//
|
|
// Construct and append a VPBROADCASTMW2D instruction to the active function.
|
|
func (c *Context) VPBROADCASTMW2D(k, xyz operand.Op) {
|
|
c.addinstruction(x86.VPBROADCASTMW2D(k, xyz))
|
|
}
|
|
|
|
// VPBROADCASTMW2D: Broadcast Low Word of Mask Register to Packed Doubleword Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPBROADCASTMW2D k xmm
|
|
// VPBROADCASTMW2D k ymm
|
|
// VPBROADCASTMW2D k zmm
|
|
//
|
|
// Construct and append a VPBROADCASTMW2D instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPBROADCASTMW2D(k, xyz operand.Op) { ctx.VPBROADCASTMW2D(k, xyz) }
|
|
|
|
// VPBROADCASTQ: Broadcast Quadword Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPBROADCASTQ m64 xmm
|
|
// VPBROADCASTQ m64 ymm
|
|
// VPBROADCASTQ xmm xmm
|
|
// VPBROADCASTQ xmm ymm
|
|
// VPBROADCASTQ m64 k xmm
|
|
// VPBROADCASTQ m64 k ymm
|
|
// VPBROADCASTQ r64 k xmm
|
|
// VPBROADCASTQ r64 k ymm
|
|
// VPBROADCASTQ r64 xmm
|
|
// VPBROADCASTQ r64 ymm
|
|
// VPBROADCASTQ xmm k xmm
|
|
// VPBROADCASTQ xmm k ymm
|
|
// VPBROADCASTQ m64 k zmm
|
|
// VPBROADCASTQ m64 zmm
|
|
// VPBROADCASTQ r64 k zmm
|
|
// VPBROADCASTQ r64 zmm
|
|
// VPBROADCASTQ xmm k zmm
|
|
// VPBROADCASTQ xmm zmm
|
|
//
|
|
// Construct and append a VPBROADCASTQ instruction to the active function.
|
|
func (c *Context) VPBROADCASTQ(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPBROADCASTQ(ops...))
|
|
}
|
|
|
|
// VPBROADCASTQ: Broadcast Quadword Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPBROADCASTQ m64 xmm
|
|
// VPBROADCASTQ m64 ymm
|
|
// VPBROADCASTQ xmm xmm
|
|
// VPBROADCASTQ xmm ymm
|
|
// VPBROADCASTQ m64 k xmm
|
|
// VPBROADCASTQ m64 k ymm
|
|
// VPBROADCASTQ r64 k xmm
|
|
// VPBROADCASTQ r64 k ymm
|
|
// VPBROADCASTQ r64 xmm
|
|
// VPBROADCASTQ r64 ymm
|
|
// VPBROADCASTQ xmm k xmm
|
|
// VPBROADCASTQ xmm k ymm
|
|
// VPBROADCASTQ m64 k zmm
|
|
// VPBROADCASTQ m64 zmm
|
|
// VPBROADCASTQ r64 k zmm
|
|
// VPBROADCASTQ r64 zmm
|
|
// VPBROADCASTQ xmm k zmm
|
|
// VPBROADCASTQ xmm zmm
|
|
//
|
|
// Construct and append a VPBROADCASTQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPBROADCASTQ(ops ...operand.Op) { ctx.VPBROADCASTQ(ops...) }
|
|
|
|
// VPBROADCASTQ_Z: Broadcast Quadword Integer (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPBROADCASTQ.Z m64 k xmm
|
|
// VPBROADCASTQ.Z m64 k ymm
|
|
// VPBROADCASTQ.Z r64 k xmm
|
|
// VPBROADCASTQ.Z r64 k ymm
|
|
// VPBROADCASTQ.Z xmm k xmm
|
|
// VPBROADCASTQ.Z xmm k ymm
|
|
// VPBROADCASTQ.Z m64 k zmm
|
|
// VPBROADCASTQ.Z r64 k zmm
|
|
// VPBROADCASTQ.Z xmm k zmm
|
|
//
|
|
// Construct and append a VPBROADCASTQ.Z instruction to the active function.
|
|
func (c *Context) VPBROADCASTQ_Z(mrx, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VPBROADCASTQ_Z(mrx, k, xyz))
|
|
}
|
|
|
|
// VPBROADCASTQ_Z: Broadcast Quadword Integer (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPBROADCASTQ.Z m64 k xmm
|
|
// VPBROADCASTQ.Z m64 k ymm
|
|
// VPBROADCASTQ.Z r64 k xmm
|
|
// VPBROADCASTQ.Z r64 k ymm
|
|
// VPBROADCASTQ.Z xmm k xmm
|
|
// VPBROADCASTQ.Z xmm k ymm
|
|
// VPBROADCASTQ.Z m64 k zmm
|
|
// VPBROADCASTQ.Z r64 k zmm
|
|
// VPBROADCASTQ.Z xmm k zmm
|
|
//
|
|
// Construct and append a VPBROADCASTQ.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPBROADCASTQ_Z(mrx, k, xyz operand.Op) { ctx.VPBROADCASTQ_Z(mrx, k, xyz) }
|
|
|
|
// VPBROADCASTW: Broadcast Word Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPBROADCASTW m16 xmm
|
|
// VPBROADCASTW m16 ymm
|
|
// VPBROADCASTW xmm xmm
|
|
// VPBROADCASTW xmm ymm
|
|
// VPBROADCASTW m16 k xmm
|
|
// VPBROADCASTW m16 k ymm
|
|
// VPBROADCASTW r32 k xmm
|
|
// VPBROADCASTW r32 k ymm
|
|
// VPBROADCASTW r32 xmm
|
|
// VPBROADCASTW r32 ymm
|
|
// VPBROADCASTW xmm k xmm
|
|
// VPBROADCASTW xmm k ymm
|
|
// VPBROADCASTW m16 k zmm
|
|
// VPBROADCASTW m16 zmm
|
|
// VPBROADCASTW r32 k zmm
|
|
// VPBROADCASTW r32 zmm
|
|
// VPBROADCASTW xmm k zmm
|
|
// VPBROADCASTW xmm zmm
|
|
//
|
|
// Construct and append a VPBROADCASTW instruction to the active function.
|
|
func (c *Context) VPBROADCASTW(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPBROADCASTW(ops...))
|
|
}
|
|
|
|
// VPBROADCASTW: Broadcast Word Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPBROADCASTW m16 xmm
|
|
// VPBROADCASTW m16 ymm
|
|
// VPBROADCASTW xmm xmm
|
|
// VPBROADCASTW xmm ymm
|
|
// VPBROADCASTW m16 k xmm
|
|
// VPBROADCASTW m16 k ymm
|
|
// VPBROADCASTW r32 k xmm
|
|
// VPBROADCASTW r32 k ymm
|
|
// VPBROADCASTW r32 xmm
|
|
// VPBROADCASTW r32 ymm
|
|
// VPBROADCASTW xmm k xmm
|
|
// VPBROADCASTW xmm k ymm
|
|
// VPBROADCASTW m16 k zmm
|
|
// VPBROADCASTW m16 zmm
|
|
// VPBROADCASTW r32 k zmm
|
|
// VPBROADCASTW r32 zmm
|
|
// VPBROADCASTW xmm k zmm
|
|
// VPBROADCASTW xmm zmm
|
|
//
|
|
// Construct and append a VPBROADCASTW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPBROADCASTW(ops ...operand.Op) { ctx.VPBROADCASTW(ops...) }
|
|
|
|
// VPBROADCASTW_Z: Broadcast Word Integer (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPBROADCASTW.Z m16 k xmm
|
|
// VPBROADCASTW.Z m16 k ymm
|
|
// VPBROADCASTW.Z r32 k xmm
|
|
// VPBROADCASTW.Z r32 k ymm
|
|
// VPBROADCASTW.Z xmm k xmm
|
|
// VPBROADCASTW.Z xmm k ymm
|
|
// VPBROADCASTW.Z m16 k zmm
|
|
// VPBROADCASTW.Z r32 k zmm
|
|
// VPBROADCASTW.Z xmm k zmm
|
|
//
|
|
// Construct and append a VPBROADCASTW.Z instruction to the active function.
|
|
func (c *Context) VPBROADCASTW_Z(mrx, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VPBROADCASTW_Z(mrx, k, xyz))
|
|
}
|
|
|
|
// VPBROADCASTW_Z: Broadcast Word Integer (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPBROADCASTW.Z m16 k xmm
|
|
// VPBROADCASTW.Z m16 k ymm
|
|
// VPBROADCASTW.Z r32 k xmm
|
|
// VPBROADCASTW.Z r32 k ymm
|
|
// VPBROADCASTW.Z xmm k xmm
|
|
// VPBROADCASTW.Z xmm k ymm
|
|
// VPBROADCASTW.Z m16 k zmm
|
|
// VPBROADCASTW.Z r32 k zmm
|
|
// VPBROADCASTW.Z xmm k zmm
|
|
//
|
|
// Construct and append a VPBROADCASTW.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPBROADCASTW_Z(mrx, k, xyz operand.Op) { ctx.VPBROADCASTW_Z(mrx, k, xyz) }
|
|
|
|
// VPCLMULQDQ: Carry-Less Quadword Multiplication.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCLMULQDQ imm8 m128 xmm xmm
|
|
// VPCLMULQDQ imm8 xmm xmm xmm
|
|
//
|
|
// Construct and append a VPCLMULQDQ instruction to the active function.
|
|
func (c *Context) VPCLMULQDQ(i, mx, x, x1 operand.Op) {
|
|
c.addinstruction(x86.VPCLMULQDQ(i, mx, x, x1))
|
|
}
|
|
|
|
// VPCLMULQDQ: Carry-Less Quadword Multiplication.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCLMULQDQ imm8 m128 xmm xmm
|
|
// VPCLMULQDQ imm8 xmm xmm xmm
|
|
//
|
|
// Construct and append a VPCLMULQDQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPCLMULQDQ(i, mx, x, x1 operand.Op) { ctx.VPCLMULQDQ(i, mx, x, x1) }
|
|
|
|
// VPCMPB: Compare Packed Signed Byte Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCMPB imm8 m128 xmm k k
|
|
// VPCMPB imm8 m128 xmm k
|
|
// VPCMPB imm8 m256 ymm k k
|
|
// VPCMPB imm8 m256 ymm k
|
|
// VPCMPB imm8 xmm xmm k k
|
|
// VPCMPB imm8 xmm xmm k
|
|
// VPCMPB imm8 ymm ymm k k
|
|
// VPCMPB imm8 ymm ymm k
|
|
// VPCMPB imm8 m512 zmm k k
|
|
// VPCMPB imm8 m512 zmm k
|
|
// VPCMPB imm8 zmm zmm k k
|
|
// VPCMPB imm8 zmm zmm k
|
|
//
|
|
// Construct and append a VPCMPB instruction to the active function.
|
|
func (c *Context) VPCMPB(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPCMPB(ops...))
|
|
}
|
|
|
|
// VPCMPB: Compare Packed Signed Byte Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCMPB imm8 m128 xmm k k
|
|
// VPCMPB imm8 m128 xmm k
|
|
// VPCMPB imm8 m256 ymm k k
|
|
// VPCMPB imm8 m256 ymm k
|
|
// VPCMPB imm8 xmm xmm k k
|
|
// VPCMPB imm8 xmm xmm k
|
|
// VPCMPB imm8 ymm ymm k k
|
|
// VPCMPB imm8 ymm ymm k
|
|
// VPCMPB imm8 m512 zmm k k
|
|
// VPCMPB imm8 m512 zmm k
|
|
// VPCMPB imm8 zmm zmm k k
|
|
// VPCMPB imm8 zmm zmm k
|
|
//
|
|
// Construct and append a VPCMPB instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPCMPB(ops ...operand.Op) { ctx.VPCMPB(ops...) }
|
|
|
|
// VPCMPD: Compare Packed Signed Doubleword Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCMPD imm8 m128 xmm k k
|
|
// VPCMPD imm8 m128 xmm k
|
|
// VPCMPD imm8 m256 ymm k k
|
|
// VPCMPD imm8 m256 ymm k
|
|
// VPCMPD imm8 xmm xmm k k
|
|
// VPCMPD imm8 xmm xmm k
|
|
// VPCMPD imm8 ymm ymm k k
|
|
// VPCMPD imm8 ymm ymm k
|
|
// VPCMPD imm8 m512 zmm k k
|
|
// VPCMPD imm8 m512 zmm k
|
|
// VPCMPD imm8 zmm zmm k k
|
|
// VPCMPD imm8 zmm zmm k
|
|
//
|
|
// Construct and append a VPCMPD instruction to the active function.
|
|
func (c *Context) VPCMPD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPCMPD(ops...))
|
|
}
|
|
|
|
// VPCMPD: Compare Packed Signed Doubleword Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCMPD imm8 m128 xmm k k
|
|
// VPCMPD imm8 m128 xmm k
|
|
// VPCMPD imm8 m256 ymm k k
|
|
// VPCMPD imm8 m256 ymm k
|
|
// VPCMPD imm8 xmm xmm k k
|
|
// VPCMPD imm8 xmm xmm k
|
|
// VPCMPD imm8 ymm ymm k k
|
|
// VPCMPD imm8 ymm ymm k
|
|
// VPCMPD imm8 m512 zmm k k
|
|
// VPCMPD imm8 m512 zmm k
|
|
// VPCMPD imm8 zmm zmm k k
|
|
// VPCMPD imm8 zmm zmm k
|
|
//
|
|
// Construct and append a VPCMPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPCMPD(ops ...operand.Op) { ctx.VPCMPD(ops...) }
|
|
|
|
// VPCMPD_BCST: Compare Packed Signed Doubleword Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCMPD.BCST imm8 m32 xmm k k
|
|
// VPCMPD.BCST imm8 m32 xmm k
|
|
// VPCMPD.BCST imm8 m32 ymm k k
|
|
// VPCMPD.BCST imm8 m32 ymm k
|
|
// VPCMPD.BCST imm8 m32 zmm k k
|
|
// VPCMPD.BCST imm8 m32 zmm k
|
|
//
|
|
// Construct and append a VPCMPD.BCST instruction to the active function.
|
|
func (c *Context) VPCMPD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPCMPD_BCST(ops...))
|
|
}
|
|
|
|
// VPCMPD_BCST: Compare Packed Signed Doubleword Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCMPD.BCST imm8 m32 xmm k k
|
|
// VPCMPD.BCST imm8 m32 xmm k
|
|
// VPCMPD.BCST imm8 m32 ymm k k
|
|
// VPCMPD.BCST imm8 m32 ymm k
|
|
// VPCMPD.BCST imm8 m32 zmm k k
|
|
// VPCMPD.BCST imm8 m32 zmm k
|
|
//
|
|
// Construct and append a VPCMPD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPCMPD_BCST(ops ...operand.Op) { ctx.VPCMPD_BCST(ops...) }
|
|
|
|
// VPCMPEQB: Compare Packed Byte Data for Equality.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCMPEQB m256 ymm ymm
|
|
// VPCMPEQB ymm ymm ymm
|
|
// VPCMPEQB m128 xmm xmm
|
|
// VPCMPEQB xmm xmm xmm
|
|
// VPCMPEQB m128 xmm k k
|
|
// VPCMPEQB m128 xmm k
|
|
// VPCMPEQB m256 ymm k k
|
|
// VPCMPEQB m256 ymm k
|
|
// VPCMPEQB xmm xmm k k
|
|
// VPCMPEQB xmm xmm k
|
|
// VPCMPEQB ymm ymm k k
|
|
// VPCMPEQB ymm ymm k
|
|
// VPCMPEQB m512 zmm k k
|
|
// VPCMPEQB m512 zmm k
|
|
// VPCMPEQB zmm zmm k k
|
|
// VPCMPEQB zmm zmm k
|
|
//
|
|
// Construct and append a VPCMPEQB instruction to the active function.
|
|
func (c *Context) VPCMPEQB(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPCMPEQB(ops...))
|
|
}
|
|
|
|
// VPCMPEQB: Compare Packed Byte Data for Equality.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCMPEQB m256 ymm ymm
|
|
// VPCMPEQB ymm ymm ymm
|
|
// VPCMPEQB m128 xmm xmm
|
|
// VPCMPEQB xmm xmm xmm
|
|
// VPCMPEQB m128 xmm k k
|
|
// VPCMPEQB m128 xmm k
|
|
// VPCMPEQB m256 ymm k k
|
|
// VPCMPEQB m256 ymm k
|
|
// VPCMPEQB xmm xmm k k
|
|
// VPCMPEQB xmm xmm k
|
|
// VPCMPEQB ymm ymm k k
|
|
// VPCMPEQB ymm ymm k
|
|
// VPCMPEQB m512 zmm k k
|
|
// VPCMPEQB m512 zmm k
|
|
// VPCMPEQB zmm zmm k k
|
|
// VPCMPEQB zmm zmm k
|
|
//
|
|
// Construct and append a VPCMPEQB instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPCMPEQB(ops ...operand.Op) { ctx.VPCMPEQB(ops...) }
|
|
|
|
// VPCMPEQD: Compare Packed Doubleword Data for Equality.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCMPEQD m256 ymm ymm
|
|
// VPCMPEQD ymm ymm ymm
|
|
// VPCMPEQD m128 xmm xmm
|
|
// VPCMPEQD xmm xmm xmm
|
|
// VPCMPEQD m128 xmm k k
|
|
// VPCMPEQD m128 xmm k
|
|
// VPCMPEQD m256 ymm k k
|
|
// VPCMPEQD m256 ymm k
|
|
// VPCMPEQD xmm xmm k k
|
|
// VPCMPEQD xmm xmm k
|
|
// VPCMPEQD ymm ymm k k
|
|
// VPCMPEQD ymm ymm k
|
|
// VPCMPEQD m512 zmm k k
|
|
// VPCMPEQD m512 zmm k
|
|
// VPCMPEQD zmm zmm k k
|
|
// VPCMPEQD zmm zmm k
|
|
//
|
|
// Construct and append a VPCMPEQD instruction to the active function.
|
|
func (c *Context) VPCMPEQD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPCMPEQD(ops...))
|
|
}
|
|
|
|
// VPCMPEQD: Compare Packed Doubleword Data for Equality.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCMPEQD m256 ymm ymm
|
|
// VPCMPEQD ymm ymm ymm
|
|
// VPCMPEQD m128 xmm xmm
|
|
// VPCMPEQD xmm xmm xmm
|
|
// VPCMPEQD m128 xmm k k
|
|
// VPCMPEQD m128 xmm k
|
|
// VPCMPEQD m256 ymm k k
|
|
// VPCMPEQD m256 ymm k
|
|
// VPCMPEQD xmm xmm k k
|
|
// VPCMPEQD xmm xmm k
|
|
// VPCMPEQD ymm ymm k k
|
|
// VPCMPEQD ymm ymm k
|
|
// VPCMPEQD m512 zmm k k
|
|
// VPCMPEQD m512 zmm k
|
|
// VPCMPEQD zmm zmm k k
|
|
// VPCMPEQD zmm zmm k
|
|
//
|
|
// Construct and append a VPCMPEQD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPCMPEQD(ops ...operand.Op) { ctx.VPCMPEQD(ops...) }
|
|
|
|
// VPCMPEQD_BCST: Compare Packed Doubleword Data for Equality (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCMPEQD.BCST m32 xmm k k
|
|
// VPCMPEQD.BCST m32 xmm k
|
|
// VPCMPEQD.BCST m32 ymm k k
|
|
// VPCMPEQD.BCST m32 ymm k
|
|
// VPCMPEQD.BCST m32 zmm k k
|
|
// VPCMPEQD.BCST m32 zmm k
|
|
//
|
|
// Construct and append a VPCMPEQD.BCST instruction to the active function.
|
|
func (c *Context) VPCMPEQD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPCMPEQD_BCST(ops...))
|
|
}
|
|
|
|
// VPCMPEQD_BCST: Compare Packed Doubleword Data for Equality (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCMPEQD.BCST m32 xmm k k
|
|
// VPCMPEQD.BCST m32 xmm k
|
|
// VPCMPEQD.BCST m32 ymm k k
|
|
// VPCMPEQD.BCST m32 ymm k
|
|
// VPCMPEQD.BCST m32 zmm k k
|
|
// VPCMPEQD.BCST m32 zmm k
|
|
//
|
|
// Construct and append a VPCMPEQD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPCMPEQD_BCST(ops ...operand.Op) { ctx.VPCMPEQD_BCST(ops...) }
|
|
|
|
// VPCMPEQQ: Compare Packed Quadword Data for Equality.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCMPEQQ m256 ymm ymm
|
|
// VPCMPEQQ ymm ymm ymm
|
|
// VPCMPEQQ m128 xmm xmm
|
|
// VPCMPEQQ xmm xmm xmm
|
|
// VPCMPEQQ m128 xmm k k
|
|
// VPCMPEQQ m128 xmm k
|
|
// VPCMPEQQ m256 ymm k k
|
|
// VPCMPEQQ m256 ymm k
|
|
// VPCMPEQQ xmm xmm k k
|
|
// VPCMPEQQ xmm xmm k
|
|
// VPCMPEQQ ymm ymm k k
|
|
// VPCMPEQQ ymm ymm k
|
|
// VPCMPEQQ m512 zmm k k
|
|
// VPCMPEQQ m512 zmm k
|
|
// VPCMPEQQ zmm zmm k k
|
|
// VPCMPEQQ zmm zmm k
|
|
//
|
|
// Construct and append a VPCMPEQQ instruction to the active function.
|
|
func (c *Context) VPCMPEQQ(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPCMPEQQ(ops...))
|
|
}
|
|
|
|
// VPCMPEQQ: Compare Packed Quadword Data for Equality.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCMPEQQ m256 ymm ymm
|
|
// VPCMPEQQ ymm ymm ymm
|
|
// VPCMPEQQ m128 xmm xmm
|
|
// VPCMPEQQ xmm xmm xmm
|
|
// VPCMPEQQ m128 xmm k k
|
|
// VPCMPEQQ m128 xmm k
|
|
// VPCMPEQQ m256 ymm k k
|
|
// VPCMPEQQ m256 ymm k
|
|
// VPCMPEQQ xmm xmm k k
|
|
// VPCMPEQQ xmm xmm k
|
|
// VPCMPEQQ ymm ymm k k
|
|
// VPCMPEQQ ymm ymm k
|
|
// VPCMPEQQ m512 zmm k k
|
|
// VPCMPEQQ m512 zmm k
|
|
// VPCMPEQQ zmm zmm k k
|
|
// VPCMPEQQ zmm zmm k
|
|
//
|
|
// Construct and append a VPCMPEQQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPCMPEQQ(ops ...operand.Op) { ctx.VPCMPEQQ(ops...) }
|
|
|
|
// VPCMPEQQ_BCST: Compare Packed Quadword Data for Equality (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCMPEQQ.BCST m64 xmm k k
|
|
// VPCMPEQQ.BCST m64 xmm k
|
|
// VPCMPEQQ.BCST m64 ymm k k
|
|
// VPCMPEQQ.BCST m64 ymm k
|
|
// VPCMPEQQ.BCST m64 zmm k k
|
|
// VPCMPEQQ.BCST m64 zmm k
|
|
//
|
|
// Construct and append a VPCMPEQQ.BCST instruction to the active function.
|
|
func (c *Context) VPCMPEQQ_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPCMPEQQ_BCST(ops...))
|
|
}
|
|
|
|
// VPCMPEQQ_BCST: Compare Packed Quadword Data for Equality (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCMPEQQ.BCST m64 xmm k k
|
|
// VPCMPEQQ.BCST m64 xmm k
|
|
// VPCMPEQQ.BCST m64 ymm k k
|
|
// VPCMPEQQ.BCST m64 ymm k
|
|
// VPCMPEQQ.BCST m64 zmm k k
|
|
// VPCMPEQQ.BCST m64 zmm k
|
|
//
|
|
// Construct and append a VPCMPEQQ.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPCMPEQQ_BCST(ops ...operand.Op) { ctx.VPCMPEQQ_BCST(ops...) }
|
|
|
|
// VPCMPEQW: Compare Packed Word Data for Equality.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCMPEQW m256 ymm ymm
|
|
// VPCMPEQW ymm ymm ymm
|
|
// VPCMPEQW m128 xmm xmm
|
|
// VPCMPEQW xmm xmm xmm
|
|
// VPCMPEQW m128 xmm k k
|
|
// VPCMPEQW m128 xmm k
|
|
// VPCMPEQW m256 ymm k k
|
|
// VPCMPEQW m256 ymm k
|
|
// VPCMPEQW xmm xmm k k
|
|
// VPCMPEQW xmm xmm k
|
|
// VPCMPEQW ymm ymm k k
|
|
// VPCMPEQW ymm ymm k
|
|
// VPCMPEQW m512 zmm k k
|
|
// VPCMPEQW m512 zmm k
|
|
// VPCMPEQW zmm zmm k k
|
|
// VPCMPEQW zmm zmm k
|
|
//
|
|
// Construct and append a VPCMPEQW instruction to the active function.
|
|
func (c *Context) VPCMPEQW(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPCMPEQW(ops...))
|
|
}
|
|
|
|
// VPCMPEQW: Compare Packed Word Data for Equality.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCMPEQW m256 ymm ymm
|
|
// VPCMPEQW ymm ymm ymm
|
|
// VPCMPEQW m128 xmm xmm
|
|
// VPCMPEQW xmm xmm xmm
|
|
// VPCMPEQW m128 xmm k k
|
|
// VPCMPEQW m128 xmm k
|
|
// VPCMPEQW m256 ymm k k
|
|
// VPCMPEQW m256 ymm k
|
|
// VPCMPEQW xmm xmm k k
|
|
// VPCMPEQW xmm xmm k
|
|
// VPCMPEQW ymm ymm k k
|
|
// VPCMPEQW ymm ymm k
|
|
// VPCMPEQW m512 zmm k k
|
|
// VPCMPEQW m512 zmm k
|
|
// VPCMPEQW zmm zmm k k
|
|
// VPCMPEQW zmm zmm k
|
|
//
|
|
// Construct and append a VPCMPEQW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPCMPEQW(ops ...operand.Op) { ctx.VPCMPEQW(ops...) }
|
|
|
|
// VPCMPESTRI: Packed Compare Explicit Length Strings, Return Index.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCMPESTRI imm8 m128 xmm
|
|
// VPCMPESTRI imm8 xmm xmm
|
|
//
|
|
// Construct and append a VPCMPESTRI instruction to the active function.
|
|
func (c *Context) VPCMPESTRI(i, mx, x operand.Op) {
|
|
c.addinstruction(x86.VPCMPESTRI(i, mx, x))
|
|
}
|
|
|
|
// VPCMPESTRI: Packed Compare Explicit Length Strings, Return Index.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCMPESTRI imm8 m128 xmm
|
|
// VPCMPESTRI imm8 xmm xmm
|
|
//
|
|
// Construct and append a VPCMPESTRI instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPCMPESTRI(i, mx, x operand.Op) { ctx.VPCMPESTRI(i, mx, x) }
|
|
|
|
// VPCMPESTRM: Packed Compare Explicit Length Strings, Return Mask.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCMPESTRM imm8 m128 xmm
|
|
// VPCMPESTRM imm8 xmm xmm
|
|
//
|
|
// Construct and append a VPCMPESTRM instruction to the active function.
|
|
func (c *Context) VPCMPESTRM(i, mx, x operand.Op) {
|
|
c.addinstruction(x86.VPCMPESTRM(i, mx, x))
|
|
}
|
|
|
|
// VPCMPESTRM: Packed Compare Explicit Length Strings, Return Mask.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCMPESTRM imm8 m128 xmm
|
|
// VPCMPESTRM imm8 xmm xmm
|
|
//
|
|
// Construct and append a VPCMPESTRM instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPCMPESTRM(i, mx, x operand.Op) { ctx.VPCMPESTRM(i, mx, x) }
|
|
|
|
// VPCMPGTB: Compare Packed Signed Byte Integers for Greater Than.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCMPGTB m256 ymm ymm
|
|
// VPCMPGTB ymm ymm ymm
|
|
// VPCMPGTB m128 xmm xmm
|
|
// VPCMPGTB xmm xmm xmm
|
|
// VPCMPGTB m128 xmm k k
|
|
// VPCMPGTB m128 xmm k
|
|
// VPCMPGTB m256 ymm k k
|
|
// VPCMPGTB m256 ymm k
|
|
// VPCMPGTB xmm xmm k k
|
|
// VPCMPGTB xmm xmm k
|
|
// VPCMPGTB ymm ymm k k
|
|
// VPCMPGTB ymm ymm k
|
|
// VPCMPGTB m512 zmm k k
|
|
// VPCMPGTB m512 zmm k
|
|
// VPCMPGTB zmm zmm k k
|
|
// VPCMPGTB zmm zmm k
|
|
//
|
|
// Construct and append a VPCMPGTB instruction to the active function.
|
|
func (c *Context) VPCMPGTB(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPCMPGTB(ops...))
|
|
}
|
|
|
|
// VPCMPGTB: Compare Packed Signed Byte Integers for Greater Than.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCMPGTB m256 ymm ymm
|
|
// VPCMPGTB ymm ymm ymm
|
|
// VPCMPGTB m128 xmm xmm
|
|
// VPCMPGTB xmm xmm xmm
|
|
// VPCMPGTB m128 xmm k k
|
|
// VPCMPGTB m128 xmm k
|
|
// VPCMPGTB m256 ymm k k
|
|
// VPCMPGTB m256 ymm k
|
|
// VPCMPGTB xmm xmm k k
|
|
// VPCMPGTB xmm xmm k
|
|
// VPCMPGTB ymm ymm k k
|
|
// VPCMPGTB ymm ymm k
|
|
// VPCMPGTB m512 zmm k k
|
|
// VPCMPGTB m512 zmm k
|
|
// VPCMPGTB zmm zmm k k
|
|
// VPCMPGTB zmm zmm k
|
|
//
|
|
// Construct and append a VPCMPGTB instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPCMPGTB(ops ...operand.Op) { ctx.VPCMPGTB(ops...) }
|
|
|
|
// VPCMPGTD: Compare Packed Signed Doubleword Integers for Greater Than.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCMPGTD m256 ymm ymm
|
|
// VPCMPGTD ymm ymm ymm
|
|
// VPCMPGTD m128 xmm xmm
|
|
// VPCMPGTD xmm xmm xmm
|
|
// VPCMPGTD m128 xmm k k
|
|
// VPCMPGTD m128 xmm k
|
|
// VPCMPGTD m256 ymm k k
|
|
// VPCMPGTD m256 ymm k
|
|
// VPCMPGTD xmm xmm k k
|
|
// VPCMPGTD xmm xmm k
|
|
// VPCMPGTD ymm ymm k k
|
|
// VPCMPGTD ymm ymm k
|
|
// VPCMPGTD m512 zmm k k
|
|
// VPCMPGTD m512 zmm k
|
|
// VPCMPGTD zmm zmm k k
|
|
// VPCMPGTD zmm zmm k
|
|
//
|
|
// Construct and append a VPCMPGTD instruction to the active function.
|
|
func (c *Context) VPCMPGTD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPCMPGTD(ops...))
|
|
}
|
|
|
|
// VPCMPGTD: Compare Packed Signed Doubleword Integers for Greater Than.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCMPGTD m256 ymm ymm
|
|
// VPCMPGTD ymm ymm ymm
|
|
// VPCMPGTD m128 xmm xmm
|
|
// VPCMPGTD xmm xmm xmm
|
|
// VPCMPGTD m128 xmm k k
|
|
// VPCMPGTD m128 xmm k
|
|
// VPCMPGTD m256 ymm k k
|
|
// VPCMPGTD m256 ymm k
|
|
// VPCMPGTD xmm xmm k k
|
|
// VPCMPGTD xmm xmm k
|
|
// VPCMPGTD ymm ymm k k
|
|
// VPCMPGTD ymm ymm k
|
|
// VPCMPGTD m512 zmm k k
|
|
// VPCMPGTD m512 zmm k
|
|
// VPCMPGTD zmm zmm k k
|
|
// VPCMPGTD zmm zmm k
|
|
//
|
|
// Construct and append a VPCMPGTD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPCMPGTD(ops ...operand.Op) { ctx.VPCMPGTD(ops...) }
|
|
|
|
// VPCMPGTD_BCST: Compare Packed Signed Doubleword Integers for Greater Than (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCMPGTD.BCST m32 xmm k k
|
|
// VPCMPGTD.BCST m32 xmm k
|
|
// VPCMPGTD.BCST m32 ymm k k
|
|
// VPCMPGTD.BCST m32 ymm k
|
|
// VPCMPGTD.BCST m32 zmm k k
|
|
// VPCMPGTD.BCST m32 zmm k
|
|
//
|
|
// Construct and append a VPCMPGTD.BCST instruction to the active function.
|
|
func (c *Context) VPCMPGTD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPCMPGTD_BCST(ops...))
|
|
}
|
|
|
|
// VPCMPGTD_BCST: Compare Packed Signed Doubleword Integers for Greater Than (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCMPGTD.BCST m32 xmm k k
|
|
// VPCMPGTD.BCST m32 xmm k
|
|
// VPCMPGTD.BCST m32 ymm k k
|
|
// VPCMPGTD.BCST m32 ymm k
|
|
// VPCMPGTD.BCST m32 zmm k k
|
|
// VPCMPGTD.BCST m32 zmm k
|
|
//
|
|
// Construct and append a VPCMPGTD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPCMPGTD_BCST(ops ...operand.Op) { ctx.VPCMPGTD_BCST(ops...) }
|
|
|
|
// VPCMPGTQ: Compare Packed Data for Greater Than.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCMPGTQ m256 ymm ymm
|
|
// VPCMPGTQ ymm ymm ymm
|
|
// VPCMPGTQ m128 xmm xmm
|
|
// VPCMPGTQ xmm xmm xmm
|
|
// VPCMPGTQ m128 xmm k k
|
|
// VPCMPGTQ m128 xmm k
|
|
// VPCMPGTQ m256 ymm k k
|
|
// VPCMPGTQ m256 ymm k
|
|
// VPCMPGTQ xmm xmm k k
|
|
// VPCMPGTQ xmm xmm k
|
|
// VPCMPGTQ ymm ymm k k
|
|
// VPCMPGTQ ymm ymm k
|
|
// VPCMPGTQ m512 zmm k k
|
|
// VPCMPGTQ m512 zmm k
|
|
// VPCMPGTQ zmm zmm k k
|
|
// VPCMPGTQ zmm zmm k
|
|
//
|
|
// Construct and append a VPCMPGTQ instruction to the active function.
|
|
func (c *Context) VPCMPGTQ(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPCMPGTQ(ops...))
|
|
}
|
|
|
|
// VPCMPGTQ: Compare Packed Data for Greater Than.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCMPGTQ m256 ymm ymm
|
|
// VPCMPGTQ ymm ymm ymm
|
|
// VPCMPGTQ m128 xmm xmm
|
|
// VPCMPGTQ xmm xmm xmm
|
|
// VPCMPGTQ m128 xmm k k
|
|
// VPCMPGTQ m128 xmm k
|
|
// VPCMPGTQ m256 ymm k k
|
|
// VPCMPGTQ m256 ymm k
|
|
// VPCMPGTQ xmm xmm k k
|
|
// VPCMPGTQ xmm xmm k
|
|
// VPCMPGTQ ymm ymm k k
|
|
// VPCMPGTQ ymm ymm k
|
|
// VPCMPGTQ m512 zmm k k
|
|
// VPCMPGTQ m512 zmm k
|
|
// VPCMPGTQ zmm zmm k k
|
|
// VPCMPGTQ zmm zmm k
|
|
//
|
|
// Construct and append a VPCMPGTQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPCMPGTQ(ops ...operand.Op) { ctx.VPCMPGTQ(ops...) }
|
|
|
|
// VPCMPGTQ_BCST: Compare Packed Data for Greater Than (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCMPGTQ.BCST m64 xmm k k
|
|
// VPCMPGTQ.BCST m64 xmm k
|
|
// VPCMPGTQ.BCST m64 ymm k k
|
|
// VPCMPGTQ.BCST m64 ymm k
|
|
// VPCMPGTQ.BCST m64 zmm k k
|
|
// VPCMPGTQ.BCST m64 zmm k
|
|
//
|
|
// Construct and append a VPCMPGTQ.BCST instruction to the active function.
|
|
func (c *Context) VPCMPGTQ_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPCMPGTQ_BCST(ops...))
|
|
}
|
|
|
|
// VPCMPGTQ_BCST: Compare Packed Data for Greater Than (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCMPGTQ.BCST m64 xmm k k
|
|
// VPCMPGTQ.BCST m64 xmm k
|
|
// VPCMPGTQ.BCST m64 ymm k k
|
|
// VPCMPGTQ.BCST m64 ymm k
|
|
// VPCMPGTQ.BCST m64 zmm k k
|
|
// VPCMPGTQ.BCST m64 zmm k
|
|
//
|
|
// Construct and append a VPCMPGTQ.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPCMPGTQ_BCST(ops ...operand.Op) { ctx.VPCMPGTQ_BCST(ops...) }
|
|
|
|
// VPCMPGTW: Compare Packed Signed Word Integers for Greater Than.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCMPGTW m256 ymm ymm
|
|
// VPCMPGTW ymm ymm ymm
|
|
// VPCMPGTW m128 xmm xmm
|
|
// VPCMPGTW xmm xmm xmm
|
|
// VPCMPGTW m128 xmm k k
|
|
// VPCMPGTW m128 xmm k
|
|
// VPCMPGTW m256 ymm k k
|
|
// VPCMPGTW m256 ymm k
|
|
// VPCMPGTW xmm xmm k k
|
|
// VPCMPGTW xmm xmm k
|
|
// VPCMPGTW ymm ymm k k
|
|
// VPCMPGTW ymm ymm k
|
|
// VPCMPGTW m512 zmm k k
|
|
// VPCMPGTW m512 zmm k
|
|
// VPCMPGTW zmm zmm k k
|
|
// VPCMPGTW zmm zmm k
|
|
//
|
|
// Construct and append a VPCMPGTW instruction to the active function.
|
|
func (c *Context) VPCMPGTW(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPCMPGTW(ops...))
|
|
}
|
|
|
|
// VPCMPGTW: Compare Packed Signed Word Integers for Greater Than.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCMPGTW m256 ymm ymm
|
|
// VPCMPGTW ymm ymm ymm
|
|
// VPCMPGTW m128 xmm xmm
|
|
// VPCMPGTW xmm xmm xmm
|
|
// VPCMPGTW m128 xmm k k
|
|
// VPCMPGTW m128 xmm k
|
|
// VPCMPGTW m256 ymm k k
|
|
// VPCMPGTW m256 ymm k
|
|
// VPCMPGTW xmm xmm k k
|
|
// VPCMPGTW xmm xmm k
|
|
// VPCMPGTW ymm ymm k k
|
|
// VPCMPGTW ymm ymm k
|
|
// VPCMPGTW m512 zmm k k
|
|
// VPCMPGTW m512 zmm k
|
|
// VPCMPGTW zmm zmm k k
|
|
// VPCMPGTW zmm zmm k
|
|
//
|
|
// Construct and append a VPCMPGTW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPCMPGTW(ops ...operand.Op) { ctx.VPCMPGTW(ops...) }
|
|
|
|
// VPCMPISTRI: Packed Compare Implicit Length Strings, Return Index.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCMPISTRI imm8 m128 xmm
|
|
// VPCMPISTRI imm8 xmm xmm
|
|
//
|
|
// Construct and append a VPCMPISTRI instruction to the active function.
|
|
func (c *Context) VPCMPISTRI(i, mx, x operand.Op) {
|
|
c.addinstruction(x86.VPCMPISTRI(i, mx, x))
|
|
}
|
|
|
|
// VPCMPISTRI: Packed Compare Implicit Length Strings, Return Index.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCMPISTRI imm8 m128 xmm
|
|
// VPCMPISTRI imm8 xmm xmm
|
|
//
|
|
// Construct and append a VPCMPISTRI instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPCMPISTRI(i, mx, x operand.Op) { ctx.VPCMPISTRI(i, mx, x) }
|
|
|
|
// VPCMPISTRM: Packed Compare Implicit Length Strings, Return Mask.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCMPISTRM imm8 m128 xmm
|
|
// VPCMPISTRM imm8 xmm xmm
|
|
//
|
|
// Construct and append a VPCMPISTRM instruction to the active function.
|
|
func (c *Context) VPCMPISTRM(i, mx, x operand.Op) {
|
|
c.addinstruction(x86.VPCMPISTRM(i, mx, x))
|
|
}
|
|
|
|
// VPCMPISTRM: Packed Compare Implicit Length Strings, Return Mask.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCMPISTRM imm8 m128 xmm
|
|
// VPCMPISTRM imm8 xmm xmm
|
|
//
|
|
// Construct and append a VPCMPISTRM instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPCMPISTRM(i, mx, x operand.Op) { ctx.VPCMPISTRM(i, mx, x) }
|
|
|
|
// VPCMPQ: Compare Packed Signed Quadword Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCMPQ imm8 m128 xmm k k
|
|
// VPCMPQ imm8 m128 xmm k
|
|
// VPCMPQ imm8 m256 ymm k k
|
|
// VPCMPQ imm8 m256 ymm k
|
|
// VPCMPQ imm8 xmm xmm k k
|
|
// VPCMPQ imm8 xmm xmm k
|
|
// VPCMPQ imm8 ymm ymm k k
|
|
// VPCMPQ imm8 ymm ymm k
|
|
// VPCMPQ imm8 m512 zmm k k
|
|
// VPCMPQ imm8 m512 zmm k
|
|
// VPCMPQ imm8 zmm zmm k k
|
|
// VPCMPQ imm8 zmm zmm k
|
|
//
|
|
// Construct and append a VPCMPQ instruction to the active function.
|
|
func (c *Context) VPCMPQ(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPCMPQ(ops...))
|
|
}
|
|
|
|
// VPCMPQ: Compare Packed Signed Quadword Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCMPQ imm8 m128 xmm k k
|
|
// VPCMPQ imm8 m128 xmm k
|
|
// VPCMPQ imm8 m256 ymm k k
|
|
// VPCMPQ imm8 m256 ymm k
|
|
// VPCMPQ imm8 xmm xmm k k
|
|
// VPCMPQ imm8 xmm xmm k
|
|
// VPCMPQ imm8 ymm ymm k k
|
|
// VPCMPQ imm8 ymm ymm k
|
|
// VPCMPQ imm8 m512 zmm k k
|
|
// VPCMPQ imm8 m512 zmm k
|
|
// VPCMPQ imm8 zmm zmm k k
|
|
// VPCMPQ imm8 zmm zmm k
|
|
//
|
|
// Construct and append a VPCMPQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPCMPQ(ops ...operand.Op) { ctx.VPCMPQ(ops...) }
|
|
|
|
// VPCMPQ_BCST: Compare Packed Signed Quadword Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCMPQ.BCST imm8 m64 xmm k k
|
|
// VPCMPQ.BCST imm8 m64 xmm k
|
|
// VPCMPQ.BCST imm8 m64 ymm k k
|
|
// VPCMPQ.BCST imm8 m64 ymm k
|
|
// VPCMPQ.BCST imm8 m64 zmm k k
|
|
// VPCMPQ.BCST imm8 m64 zmm k
|
|
//
|
|
// Construct and append a VPCMPQ.BCST instruction to the active function.
|
|
func (c *Context) VPCMPQ_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPCMPQ_BCST(ops...))
|
|
}
|
|
|
|
// VPCMPQ_BCST: Compare Packed Signed Quadword Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCMPQ.BCST imm8 m64 xmm k k
|
|
// VPCMPQ.BCST imm8 m64 xmm k
|
|
// VPCMPQ.BCST imm8 m64 ymm k k
|
|
// VPCMPQ.BCST imm8 m64 ymm k
|
|
// VPCMPQ.BCST imm8 m64 zmm k k
|
|
// VPCMPQ.BCST imm8 m64 zmm k
|
|
//
|
|
// Construct and append a VPCMPQ.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPCMPQ_BCST(ops ...operand.Op) { ctx.VPCMPQ_BCST(ops...) }
|
|
|
|
// VPCMPUB: Compare Packed Unsigned Byte Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCMPUB imm8 m128 xmm k k
|
|
// VPCMPUB imm8 m128 xmm k
|
|
// VPCMPUB imm8 m256 ymm k k
|
|
// VPCMPUB imm8 m256 ymm k
|
|
// VPCMPUB imm8 xmm xmm k k
|
|
// VPCMPUB imm8 xmm xmm k
|
|
// VPCMPUB imm8 ymm ymm k k
|
|
// VPCMPUB imm8 ymm ymm k
|
|
// VPCMPUB imm8 m512 zmm k k
|
|
// VPCMPUB imm8 m512 zmm k
|
|
// VPCMPUB imm8 zmm zmm k k
|
|
// VPCMPUB imm8 zmm zmm k
|
|
//
|
|
// Construct and append a VPCMPUB instruction to the active function.
|
|
func (c *Context) VPCMPUB(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPCMPUB(ops...))
|
|
}
|
|
|
|
// VPCMPUB: Compare Packed Unsigned Byte Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCMPUB imm8 m128 xmm k k
|
|
// VPCMPUB imm8 m128 xmm k
|
|
// VPCMPUB imm8 m256 ymm k k
|
|
// VPCMPUB imm8 m256 ymm k
|
|
// VPCMPUB imm8 xmm xmm k k
|
|
// VPCMPUB imm8 xmm xmm k
|
|
// VPCMPUB imm8 ymm ymm k k
|
|
// VPCMPUB imm8 ymm ymm k
|
|
// VPCMPUB imm8 m512 zmm k k
|
|
// VPCMPUB imm8 m512 zmm k
|
|
// VPCMPUB imm8 zmm zmm k k
|
|
// VPCMPUB imm8 zmm zmm k
|
|
//
|
|
// Construct and append a VPCMPUB instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPCMPUB(ops ...operand.Op) { ctx.VPCMPUB(ops...) }
|
|
|
|
// VPCMPUD: Compare Packed Unsigned Doubleword Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCMPUD imm8 m128 xmm k k
|
|
// VPCMPUD imm8 m128 xmm k
|
|
// VPCMPUD imm8 m256 ymm k k
|
|
// VPCMPUD imm8 m256 ymm k
|
|
// VPCMPUD imm8 xmm xmm k k
|
|
// VPCMPUD imm8 xmm xmm k
|
|
// VPCMPUD imm8 ymm ymm k k
|
|
// VPCMPUD imm8 ymm ymm k
|
|
// VPCMPUD imm8 m512 zmm k k
|
|
// VPCMPUD imm8 m512 zmm k
|
|
// VPCMPUD imm8 zmm zmm k k
|
|
// VPCMPUD imm8 zmm zmm k
|
|
//
|
|
// Construct and append a VPCMPUD instruction to the active function.
|
|
func (c *Context) VPCMPUD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPCMPUD(ops...))
|
|
}
|
|
|
|
// VPCMPUD: Compare Packed Unsigned Doubleword Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCMPUD imm8 m128 xmm k k
|
|
// VPCMPUD imm8 m128 xmm k
|
|
// VPCMPUD imm8 m256 ymm k k
|
|
// VPCMPUD imm8 m256 ymm k
|
|
// VPCMPUD imm8 xmm xmm k k
|
|
// VPCMPUD imm8 xmm xmm k
|
|
// VPCMPUD imm8 ymm ymm k k
|
|
// VPCMPUD imm8 ymm ymm k
|
|
// VPCMPUD imm8 m512 zmm k k
|
|
// VPCMPUD imm8 m512 zmm k
|
|
// VPCMPUD imm8 zmm zmm k k
|
|
// VPCMPUD imm8 zmm zmm k
|
|
//
|
|
// Construct and append a VPCMPUD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPCMPUD(ops ...operand.Op) { ctx.VPCMPUD(ops...) }
|
|
|
|
// VPCMPUD_BCST: Compare Packed Unsigned Doubleword Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCMPUD.BCST imm8 m32 xmm k k
|
|
// VPCMPUD.BCST imm8 m32 xmm k
|
|
// VPCMPUD.BCST imm8 m32 ymm k k
|
|
// VPCMPUD.BCST imm8 m32 ymm k
|
|
// VPCMPUD.BCST imm8 m32 zmm k k
|
|
// VPCMPUD.BCST imm8 m32 zmm k
|
|
//
|
|
// Construct and append a VPCMPUD.BCST instruction to the active function.
|
|
func (c *Context) VPCMPUD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPCMPUD_BCST(ops...))
|
|
}
|
|
|
|
// VPCMPUD_BCST: Compare Packed Unsigned Doubleword Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCMPUD.BCST imm8 m32 xmm k k
|
|
// VPCMPUD.BCST imm8 m32 xmm k
|
|
// VPCMPUD.BCST imm8 m32 ymm k k
|
|
// VPCMPUD.BCST imm8 m32 ymm k
|
|
// VPCMPUD.BCST imm8 m32 zmm k k
|
|
// VPCMPUD.BCST imm8 m32 zmm k
|
|
//
|
|
// Construct and append a VPCMPUD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPCMPUD_BCST(ops ...operand.Op) { ctx.VPCMPUD_BCST(ops...) }
|
|
|
|
// VPCMPUQ: Compare Packed Unsigned Quadword Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCMPUQ imm8 m128 xmm k k
|
|
// VPCMPUQ imm8 m128 xmm k
|
|
// VPCMPUQ imm8 m256 ymm k k
|
|
// VPCMPUQ imm8 m256 ymm k
|
|
// VPCMPUQ imm8 xmm xmm k k
|
|
// VPCMPUQ imm8 xmm xmm k
|
|
// VPCMPUQ imm8 ymm ymm k k
|
|
// VPCMPUQ imm8 ymm ymm k
|
|
// VPCMPUQ imm8 m512 zmm k k
|
|
// VPCMPUQ imm8 m512 zmm k
|
|
// VPCMPUQ imm8 zmm zmm k k
|
|
// VPCMPUQ imm8 zmm zmm k
|
|
//
|
|
// Construct and append a VPCMPUQ instruction to the active function.
|
|
func (c *Context) VPCMPUQ(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPCMPUQ(ops...))
|
|
}
|
|
|
|
// VPCMPUQ: Compare Packed Unsigned Quadword Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCMPUQ imm8 m128 xmm k k
|
|
// VPCMPUQ imm8 m128 xmm k
|
|
// VPCMPUQ imm8 m256 ymm k k
|
|
// VPCMPUQ imm8 m256 ymm k
|
|
// VPCMPUQ imm8 xmm xmm k k
|
|
// VPCMPUQ imm8 xmm xmm k
|
|
// VPCMPUQ imm8 ymm ymm k k
|
|
// VPCMPUQ imm8 ymm ymm k
|
|
// VPCMPUQ imm8 m512 zmm k k
|
|
// VPCMPUQ imm8 m512 zmm k
|
|
// VPCMPUQ imm8 zmm zmm k k
|
|
// VPCMPUQ imm8 zmm zmm k
|
|
//
|
|
// Construct and append a VPCMPUQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPCMPUQ(ops ...operand.Op) { ctx.VPCMPUQ(ops...) }
|
|
|
|
// VPCMPUQ_BCST: Compare Packed Unsigned Quadword Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCMPUQ.BCST imm8 m64 xmm k k
|
|
// VPCMPUQ.BCST imm8 m64 xmm k
|
|
// VPCMPUQ.BCST imm8 m64 ymm k k
|
|
// VPCMPUQ.BCST imm8 m64 ymm k
|
|
// VPCMPUQ.BCST imm8 m64 zmm k k
|
|
// VPCMPUQ.BCST imm8 m64 zmm k
|
|
//
|
|
// Construct and append a VPCMPUQ.BCST instruction to the active function.
|
|
func (c *Context) VPCMPUQ_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPCMPUQ_BCST(ops...))
|
|
}
|
|
|
|
// VPCMPUQ_BCST: Compare Packed Unsigned Quadword Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCMPUQ.BCST imm8 m64 xmm k k
|
|
// VPCMPUQ.BCST imm8 m64 xmm k
|
|
// VPCMPUQ.BCST imm8 m64 ymm k k
|
|
// VPCMPUQ.BCST imm8 m64 ymm k
|
|
// VPCMPUQ.BCST imm8 m64 zmm k k
|
|
// VPCMPUQ.BCST imm8 m64 zmm k
|
|
//
|
|
// Construct and append a VPCMPUQ.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPCMPUQ_BCST(ops ...operand.Op) { ctx.VPCMPUQ_BCST(ops...) }
|
|
|
|
// VPCMPUW: Compare Packed Unsigned Word Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCMPUW imm8 m128 xmm k k
|
|
// VPCMPUW imm8 m128 xmm k
|
|
// VPCMPUW imm8 m256 ymm k k
|
|
// VPCMPUW imm8 m256 ymm k
|
|
// VPCMPUW imm8 xmm xmm k k
|
|
// VPCMPUW imm8 xmm xmm k
|
|
// VPCMPUW imm8 ymm ymm k k
|
|
// VPCMPUW imm8 ymm ymm k
|
|
// VPCMPUW imm8 m512 zmm k k
|
|
// VPCMPUW imm8 m512 zmm k
|
|
// VPCMPUW imm8 zmm zmm k k
|
|
// VPCMPUW imm8 zmm zmm k
|
|
//
|
|
// Construct and append a VPCMPUW instruction to the active function.
|
|
func (c *Context) VPCMPUW(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPCMPUW(ops...))
|
|
}
|
|
|
|
// VPCMPUW: Compare Packed Unsigned Word Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCMPUW imm8 m128 xmm k k
|
|
// VPCMPUW imm8 m128 xmm k
|
|
// VPCMPUW imm8 m256 ymm k k
|
|
// VPCMPUW imm8 m256 ymm k
|
|
// VPCMPUW imm8 xmm xmm k k
|
|
// VPCMPUW imm8 xmm xmm k
|
|
// VPCMPUW imm8 ymm ymm k k
|
|
// VPCMPUW imm8 ymm ymm k
|
|
// VPCMPUW imm8 m512 zmm k k
|
|
// VPCMPUW imm8 m512 zmm k
|
|
// VPCMPUW imm8 zmm zmm k k
|
|
// VPCMPUW imm8 zmm zmm k
|
|
//
|
|
// Construct and append a VPCMPUW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPCMPUW(ops ...operand.Op) { ctx.VPCMPUW(ops...) }
|
|
|
|
// VPCMPW: Compare Packed Signed Word Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCMPW imm8 m128 xmm k k
|
|
// VPCMPW imm8 m128 xmm k
|
|
// VPCMPW imm8 m256 ymm k k
|
|
// VPCMPW imm8 m256 ymm k
|
|
// VPCMPW imm8 xmm xmm k k
|
|
// VPCMPW imm8 xmm xmm k
|
|
// VPCMPW imm8 ymm ymm k k
|
|
// VPCMPW imm8 ymm ymm k
|
|
// VPCMPW imm8 m512 zmm k k
|
|
// VPCMPW imm8 m512 zmm k
|
|
// VPCMPW imm8 zmm zmm k k
|
|
// VPCMPW imm8 zmm zmm k
|
|
//
|
|
// Construct and append a VPCMPW instruction to the active function.
|
|
func (c *Context) VPCMPW(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPCMPW(ops...))
|
|
}
|
|
|
|
// VPCMPW: Compare Packed Signed Word Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCMPW imm8 m128 xmm k k
|
|
// VPCMPW imm8 m128 xmm k
|
|
// VPCMPW imm8 m256 ymm k k
|
|
// VPCMPW imm8 m256 ymm k
|
|
// VPCMPW imm8 xmm xmm k k
|
|
// VPCMPW imm8 xmm xmm k
|
|
// VPCMPW imm8 ymm ymm k k
|
|
// VPCMPW imm8 ymm ymm k
|
|
// VPCMPW imm8 m512 zmm k k
|
|
// VPCMPW imm8 m512 zmm k
|
|
// VPCMPW imm8 zmm zmm k k
|
|
// VPCMPW imm8 zmm zmm k
|
|
//
|
|
// Construct and append a VPCMPW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPCMPW(ops ...operand.Op) { ctx.VPCMPW(ops...) }
|
|
|
|
// VPCOMPRESSD: Store Sparse Packed Doubleword Integer Values into Dense Memory/Register.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCOMPRESSD xmm k m128
|
|
// VPCOMPRESSD xmm k xmm
|
|
// VPCOMPRESSD xmm m128
|
|
// VPCOMPRESSD xmm xmm
|
|
// VPCOMPRESSD ymm k m256
|
|
// VPCOMPRESSD ymm k ymm
|
|
// VPCOMPRESSD ymm m256
|
|
// VPCOMPRESSD ymm ymm
|
|
// VPCOMPRESSD zmm k m512
|
|
// VPCOMPRESSD zmm k zmm
|
|
// VPCOMPRESSD zmm m512
|
|
// VPCOMPRESSD zmm zmm
|
|
//
|
|
// Construct and append a VPCOMPRESSD instruction to the active function.
|
|
func (c *Context) VPCOMPRESSD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPCOMPRESSD(ops...))
|
|
}
|
|
|
|
// VPCOMPRESSD: Store Sparse Packed Doubleword Integer Values into Dense Memory/Register.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCOMPRESSD xmm k m128
|
|
// VPCOMPRESSD xmm k xmm
|
|
// VPCOMPRESSD xmm m128
|
|
// VPCOMPRESSD xmm xmm
|
|
// VPCOMPRESSD ymm k m256
|
|
// VPCOMPRESSD ymm k ymm
|
|
// VPCOMPRESSD ymm m256
|
|
// VPCOMPRESSD ymm ymm
|
|
// VPCOMPRESSD zmm k m512
|
|
// VPCOMPRESSD zmm k zmm
|
|
// VPCOMPRESSD zmm m512
|
|
// VPCOMPRESSD zmm zmm
|
|
//
|
|
// Construct and append a VPCOMPRESSD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPCOMPRESSD(ops ...operand.Op) { ctx.VPCOMPRESSD(ops...) }
|
|
|
|
// VPCOMPRESSD_Z: Store Sparse Packed Doubleword Integer Values into Dense Memory/Register (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCOMPRESSD.Z xmm k m128
|
|
// VPCOMPRESSD.Z xmm k xmm
|
|
// VPCOMPRESSD.Z ymm k m256
|
|
// VPCOMPRESSD.Z ymm k ymm
|
|
// VPCOMPRESSD.Z zmm k m512
|
|
// VPCOMPRESSD.Z zmm k zmm
|
|
//
|
|
// Construct and append a VPCOMPRESSD.Z instruction to the active function.
|
|
func (c *Context) VPCOMPRESSD_Z(xyz, k, mxyz operand.Op) {
|
|
c.addinstruction(x86.VPCOMPRESSD_Z(xyz, k, mxyz))
|
|
}
|
|
|
|
// VPCOMPRESSD_Z: Store Sparse Packed Doubleword Integer Values into Dense Memory/Register (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCOMPRESSD.Z xmm k m128
|
|
// VPCOMPRESSD.Z xmm k xmm
|
|
// VPCOMPRESSD.Z ymm k m256
|
|
// VPCOMPRESSD.Z ymm k ymm
|
|
// VPCOMPRESSD.Z zmm k m512
|
|
// VPCOMPRESSD.Z zmm k zmm
|
|
//
|
|
// Construct and append a VPCOMPRESSD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPCOMPRESSD_Z(xyz, k, mxyz operand.Op) { ctx.VPCOMPRESSD_Z(xyz, k, mxyz) }
|
|
|
|
// VPCOMPRESSQ: Store Sparse Packed Quadword Integer Values into Dense Memory/Register.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCOMPRESSQ xmm k m128
|
|
// VPCOMPRESSQ xmm k xmm
|
|
// VPCOMPRESSQ xmm m128
|
|
// VPCOMPRESSQ xmm xmm
|
|
// VPCOMPRESSQ ymm k m256
|
|
// VPCOMPRESSQ ymm k ymm
|
|
// VPCOMPRESSQ ymm m256
|
|
// VPCOMPRESSQ ymm ymm
|
|
// VPCOMPRESSQ zmm k m512
|
|
// VPCOMPRESSQ zmm k zmm
|
|
// VPCOMPRESSQ zmm m512
|
|
// VPCOMPRESSQ zmm zmm
|
|
//
|
|
// Construct and append a VPCOMPRESSQ instruction to the active function.
|
|
func (c *Context) VPCOMPRESSQ(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPCOMPRESSQ(ops...))
|
|
}
|
|
|
|
// VPCOMPRESSQ: Store Sparse Packed Quadword Integer Values into Dense Memory/Register.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCOMPRESSQ xmm k m128
|
|
// VPCOMPRESSQ xmm k xmm
|
|
// VPCOMPRESSQ xmm m128
|
|
// VPCOMPRESSQ xmm xmm
|
|
// VPCOMPRESSQ ymm k m256
|
|
// VPCOMPRESSQ ymm k ymm
|
|
// VPCOMPRESSQ ymm m256
|
|
// VPCOMPRESSQ ymm ymm
|
|
// VPCOMPRESSQ zmm k m512
|
|
// VPCOMPRESSQ zmm k zmm
|
|
// VPCOMPRESSQ zmm m512
|
|
// VPCOMPRESSQ zmm zmm
|
|
//
|
|
// Construct and append a VPCOMPRESSQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPCOMPRESSQ(ops ...operand.Op) { ctx.VPCOMPRESSQ(ops...) }
|
|
|
|
// VPCOMPRESSQ_Z: Store Sparse Packed Quadword Integer Values into Dense Memory/Register (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCOMPRESSQ.Z xmm k m128
|
|
// VPCOMPRESSQ.Z xmm k xmm
|
|
// VPCOMPRESSQ.Z ymm k m256
|
|
// VPCOMPRESSQ.Z ymm k ymm
|
|
// VPCOMPRESSQ.Z zmm k m512
|
|
// VPCOMPRESSQ.Z zmm k zmm
|
|
//
|
|
// Construct and append a VPCOMPRESSQ.Z instruction to the active function.
|
|
func (c *Context) VPCOMPRESSQ_Z(xyz, k, mxyz operand.Op) {
|
|
c.addinstruction(x86.VPCOMPRESSQ_Z(xyz, k, mxyz))
|
|
}
|
|
|
|
// VPCOMPRESSQ_Z: Store Sparse Packed Quadword Integer Values into Dense Memory/Register (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCOMPRESSQ.Z xmm k m128
|
|
// VPCOMPRESSQ.Z xmm k xmm
|
|
// VPCOMPRESSQ.Z ymm k m256
|
|
// VPCOMPRESSQ.Z ymm k ymm
|
|
// VPCOMPRESSQ.Z zmm k m512
|
|
// VPCOMPRESSQ.Z zmm k zmm
|
|
//
|
|
// Construct and append a VPCOMPRESSQ.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPCOMPRESSQ_Z(xyz, k, mxyz operand.Op) { ctx.VPCOMPRESSQ_Z(xyz, k, mxyz) }
|
|
|
|
// VPCONFLICTD: Detect Conflicts Within a Vector of Packed Doubleword Values into Dense Memory/Register.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCONFLICTD m128 k xmm
|
|
// VPCONFLICTD m128 xmm
|
|
// VPCONFLICTD m256 k ymm
|
|
// VPCONFLICTD m256 ymm
|
|
// VPCONFLICTD xmm k xmm
|
|
// VPCONFLICTD xmm xmm
|
|
// VPCONFLICTD ymm k ymm
|
|
// VPCONFLICTD ymm ymm
|
|
// VPCONFLICTD m512 k zmm
|
|
// VPCONFLICTD m512 zmm
|
|
// VPCONFLICTD zmm k zmm
|
|
// VPCONFLICTD zmm zmm
|
|
//
|
|
// Construct and append a VPCONFLICTD instruction to the active function.
|
|
func (c *Context) VPCONFLICTD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPCONFLICTD(ops...))
|
|
}
|
|
|
|
// VPCONFLICTD: Detect Conflicts Within a Vector of Packed Doubleword Values into Dense Memory/Register.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCONFLICTD m128 k xmm
|
|
// VPCONFLICTD m128 xmm
|
|
// VPCONFLICTD m256 k ymm
|
|
// VPCONFLICTD m256 ymm
|
|
// VPCONFLICTD xmm k xmm
|
|
// VPCONFLICTD xmm xmm
|
|
// VPCONFLICTD ymm k ymm
|
|
// VPCONFLICTD ymm ymm
|
|
// VPCONFLICTD m512 k zmm
|
|
// VPCONFLICTD m512 zmm
|
|
// VPCONFLICTD zmm k zmm
|
|
// VPCONFLICTD zmm zmm
|
|
//
|
|
// Construct and append a VPCONFLICTD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPCONFLICTD(ops ...operand.Op) { ctx.VPCONFLICTD(ops...) }
|
|
|
|
// VPCONFLICTD_BCST: Detect Conflicts Within a Vector of Packed Doubleword Values into Dense Memory/Register (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCONFLICTD.BCST m32 k xmm
|
|
// VPCONFLICTD.BCST m32 k ymm
|
|
// VPCONFLICTD.BCST m32 xmm
|
|
// VPCONFLICTD.BCST m32 ymm
|
|
// VPCONFLICTD.BCST m32 k zmm
|
|
// VPCONFLICTD.BCST m32 zmm
|
|
//
|
|
// Construct and append a VPCONFLICTD.BCST instruction to the active function.
|
|
func (c *Context) VPCONFLICTD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPCONFLICTD_BCST(ops...))
|
|
}
|
|
|
|
// VPCONFLICTD_BCST: Detect Conflicts Within a Vector of Packed Doubleword Values into Dense Memory/Register (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCONFLICTD.BCST m32 k xmm
|
|
// VPCONFLICTD.BCST m32 k ymm
|
|
// VPCONFLICTD.BCST m32 xmm
|
|
// VPCONFLICTD.BCST m32 ymm
|
|
// VPCONFLICTD.BCST m32 k zmm
|
|
// VPCONFLICTD.BCST m32 zmm
|
|
//
|
|
// Construct and append a VPCONFLICTD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPCONFLICTD_BCST(ops ...operand.Op) { ctx.VPCONFLICTD_BCST(ops...) }
|
|
|
|
// VPCONFLICTD_BCST_Z: Detect Conflicts Within a Vector of Packed Doubleword Values into Dense Memory/Register (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCONFLICTD.BCST.Z m32 k xmm
|
|
// VPCONFLICTD.BCST.Z m32 k ymm
|
|
// VPCONFLICTD.BCST.Z m32 k zmm
|
|
//
|
|
// Construct and append a VPCONFLICTD.BCST.Z instruction to the active function.
|
|
func (c *Context) VPCONFLICTD_BCST_Z(m, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VPCONFLICTD_BCST_Z(m, k, xyz))
|
|
}
|
|
|
|
// VPCONFLICTD_BCST_Z: Detect Conflicts Within a Vector of Packed Doubleword Values into Dense Memory/Register (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCONFLICTD.BCST.Z m32 k xmm
|
|
// VPCONFLICTD.BCST.Z m32 k ymm
|
|
// VPCONFLICTD.BCST.Z m32 k zmm
|
|
//
|
|
// Construct and append a VPCONFLICTD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPCONFLICTD_BCST_Z(m, k, xyz operand.Op) { ctx.VPCONFLICTD_BCST_Z(m, k, xyz) }
|
|
|
|
// VPCONFLICTD_Z: Detect Conflicts Within a Vector of Packed Doubleword Values into Dense Memory/Register (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCONFLICTD.Z m128 k xmm
|
|
// VPCONFLICTD.Z m256 k ymm
|
|
// VPCONFLICTD.Z xmm k xmm
|
|
// VPCONFLICTD.Z ymm k ymm
|
|
// VPCONFLICTD.Z m512 k zmm
|
|
// VPCONFLICTD.Z zmm k zmm
|
|
//
|
|
// Construct and append a VPCONFLICTD.Z instruction to the active function.
|
|
func (c *Context) VPCONFLICTD_Z(mxyz, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VPCONFLICTD_Z(mxyz, k, xyz))
|
|
}
|
|
|
|
// VPCONFLICTD_Z: Detect Conflicts Within a Vector of Packed Doubleword Values into Dense Memory/Register (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCONFLICTD.Z m128 k xmm
|
|
// VPCONFLICTD.Z m256 k ymm
|
|
// VPCONFLICTD.Z xmm k xmm
|
|
// VPCONFLICTD.Z ymm k ymm
|
|
// VPCONFLICTD.Z m512 k zmm
|
|
// VPCONFLICTD.Z zmm k zmm
|
|
//
|
|
// Construct and append a VPCONFLICTD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPCONFLICTD_Z(mxyz, k, xyz operand.Op) { ctx.VPCONFLICTD_Z(mxyz, k, xyz) }
|
|
|
|
// VPCONFLICTQ: Detect Conflicts Within a Vector of Packed Quadword Values into Dense Memory/Register.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCONFLICTQ m128 k xmm
|
|
// VPCONFLICTQ m128 xmm
|
|
// VPCONFLICTQ m256 k ymm
|
|
// VPCONFLICTQ m256 ymm
|
|
// VPCONFLICTQ xmm k xmm
|
|
// VPCONFLICTQ xmm xmm
|
|
// VPCONFLICTQ ymm k ymm
|
|
// VPCONFLICTQ ymm ymm
|
|
// VPCONFLICTQ m512 k zmm
|
|
// VPCONFLICTQ m512 zmm
|
|
// VPCONFLICTQ zmm k zmm
|
|
// VPCONFLICTQ zmm zmm
|
|
//
|
|
// Construct and append a VPCONFLICTQ instruction to the active function.
|
|
func (c *Context) VPCONFLICTQ(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPCONFLICTQ(ops...))
|
|
}
|
|
|
|
// VPCONFLICTQ: Detect Conflicts Within a Vector of Packed Quadword Values into Dense Memory/Register.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCONFLICTQ m128 k xmm
|
|
// VPCONFLICTQ m128 xmm
|
|
// VPCONFLICTQ m256 k ymm
|
|
// VPCONFLICTQ m256 ymm
|
|
// VPCONFLICTQ xmm k xmm
|
|
// VPCONFLICTQ xmm xmm
|
|
// VPCONFLICTQ ymm k ymm
|
|
// VPCONFLICTQ ymm ymm
|
|
// VPCONFLICTQ m512 k zmm
|
|
// VPCONFLICTQ m512 zmm
|
|
// VPCONFLICTQ zmm k zmm
|
|
// VPCONFLICTQ zmm zmm
|
|
//
|
|
// Construct and append a VPCONFLICTQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPCONFLICTQ(ops ...operand.Op) { ctx.VPCONFLICTQ(ops...) }
|
|
|
|
// VPCONFLICTQ_BCST: Detect Conflicts Within a Vector of Packed Quadword Values into Dense Memory/Register (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCONFLICTQ.BCST m64 k xmm
|
|
// VPCONFLICTQ.BCST m64 k ymm
|
|
// VPCONFLICTQ.BCST m64 xmm
|
|
// VPCONFLICTQ.BCST m64 ymm
|
|
// VPCONFLICTQ.BCST m64 k zmm
|
|
// VPCONFLICTQ.BCST m64 zmm
|
|
//
|
|
// Construct and append a VPCONFLICTQ.BCST instruction to the active function.
|
|
func (c *Context) VPCONFLICTQ_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPCONFLICTQ_BCST(ops...))
|
|
}
|
|
|
|
// VPCONFLICTQ_BCST: Detect Conflicts Within a Vector of Packed Quadword Values into Dense Memory/Register (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCONFLICTQ.BCST m64 k xmm
|
|
// VPCONFLICTQ.BCST m64 k ymm
|
|
// VPCONFLICTQ.BCST m64 xmm
|
|
// VPCONFLICTQ.BCST m64 ymm
|
|
// VPCONFLICTQ.BCST m64 k zmm
|
|
// VPCONFLICTQ.BCST m64 zmm
|
|
//
|
|
// Construct and append a VPCONFLICTQ.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPCONFLICTQ_BCST(ops ...operand.Op) { ctx.VPCONFLICTQ_BCST(ops...) }
|
|
|
|
// VPCONFLICTQ_BCST_Z: Detect Conflicts Within a Vector of Packed Quadword Values into Dense Memory/Register (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCONFLICTQ.BCST.Z m64 k xmm
|
|
// VPCONFLICTQ.BCST.Z m64 k ymm
|
|
// VPCONFLICTQ.BCST.Z m64 k zmm
|
|
//
|
|
// Construct and append a VPCONFLICTQ.BCST.Z instruction to the active function.
|
|
func (c *Context) VPCONFLICTQ_BCST_Z(m, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VPCONFLICTQ_BCST_Z(m, k, xyz))
|
|
}
|
|
|
|
// VPCONFLICTQ_BCST_Z: Detect Conflicts Within a Vector of Packed Quadword Values into Dense Memory/Register (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCONFLICTQ.BCST.Z m64 k xmm
|
|
// VPCONFLICTQ.BCST.Z m64 k ymm
|
|
// VPCONFLICTQ.BCST.Z m64 k zmm
|
|
//
|
|
// Construct and append a VPCONFLICTQ.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPCONFLICTQ_BCST_Z(m, k, xyz operand.Op) { ctx.VPCONFLICTQ_BCST_Z(m, k, xyz) }
|
|
|
|
// VPCONFLICTQ_Z: Detect Conflicts Within a Vector of Packed Quadword Values into Dense Memory/Register (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCONFLICTQ.Z m128 k xmm
|
|
// VPCONFLICTQ.Z m256 k ymm
|
|
// VPCONFLICTQ.Z xmm k xmm
|
|
// VPCONFLICTQ.Z ymm k ymm
|
|
// VPCONFLICTQ.Z m512 k zmm
|
|
// VPCONFLICTQ.Z zmm k zmm
|
|
//
|
|
// Construct and append a VPCONFLICTQ.Z instruction to the active function.
|
|
func (c *Context) VPCONFLICTQ_Z(mxyz, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VPCONFLICTQ_Z(mxyz, k, xyz))
|
|
}
|
|
|
|
// VPCONFLICTQ_Z: Detect Conflicts Within a Vector of Packed Quadword Values into Dense Memory/Register (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPCONFLICTQ.Z m128 k xmm
|
|
// VPCONFLICTQ.Z m256 k ymm
|
|
// VPCONFLICTQ.Z xmm k xmm
|
|
// VPCONFLICTQ.Z ymm k ymm
|
|
// VPCONFLICTQ.Z m512 k zmm
|
|
// VPCONFLICTQ.Z zmm k zmm
|
|
//
|
|
// Construct and append a VPCONFLICTQ.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPCONFLICTQ_Z(mxyz, k, xyz operand.Op) { ctx.VPCONFLICTQ_Z(mxyz, k, xyz) }
|
|
|
|
// VPERM2F128: Permute Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERM2F128 imm8 m256 ymm ymm
|
|
// VPERM2F128 imm8 ymm ymm ymm
|
|
//
|
|
// Construct and append a VPERM2F128 instruction to the active function.
|
|
func (c *Context) VPERM2F128(i, my, y, y1 operand.Op) {
|
|
c.addinstruction(x86.VPERM2F128(i, my, y, y1))
|
|
}
|
|
|
|
// VPERM2F128: Permute Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERM2F128 imm8 m256 ymm ymm
|
|
// VPERM2F128 imm8 ymm ymm ymm
|
|
//
|
|
// Construct and append a VPERM2F128 instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPERM2F128(i, my, y, y1 operand.Op) { ctx.VPERM2F128(i, my, y, y1) }
|
|
|
|
// VPERM2I128: Permute 128-Bit Integer Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERM2I128 imm8 m256 ymm ymm
|
|
// VPERM2I128 imm8 ymm ymm ymm
|
|
//
|
|
// Construct and append a VPERM2I128 instruction to the active function.
|
|
func (c *Context) VPERM2I128(i, my, y, y1 operand.Op) {
|
|
c.addinstruction(x86.VPERM2I128(i, my, y, y1))
|
|
}
|
|
|
|
// VPERM2I128: Permute 128-Bit Integer Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERM2I128 imm8 m256 ymm ymm
|
|
// VPERM2I128 imm8 ymm ymm ymm
|
|
//
|
|
// Construct and append a VPERM2I128 instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPERM2I128(i, my, y, y1 operand.Op) { ctx.VPERM2I128(i, my, y, y1) }
|
|
|
|
// VPERMB: Permute Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMB m128 xmm k xmm
|
|
// VPERMB m128 xmm xmm
|
|
// VPERMB m256 ymm k ymm
|
|
// VPERMB m256 ymm ymm
|
|
// VPERMB xmm xmm k xmm
|
|
// VPERMB xmm xmm xmm
|
|
// VPERMB ymm ymm k ymm
|
|
// VPERMB ymm ymm ymm
|
|
// VPERMB m512 zmm k zmm
|
|
// VPERMB m512 zmm zmm
|
|
// VPERMB zmm zmm k zmm
|
|
// VPERMB zmm zmm zmm
|
|
//
|
|
// Construct and append a VPERMB instruction to the active function.
|
|
func (c *Context) VPERMB(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPERMB(ops...))
|
|
}
|
|
|
|
// VPERMB: Permute Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMB m128 xmm k xmm
|
|
// VPERMB m128 xmm xmm
|
|
// VPERMB m256 ymm k ymm
|
|
// VPERMB m256 ymm ymm
|
|
// VPERMB xmm xmm k xmm
|
|
// VPERMB xmm xmm xmm
|
|
// VPERMB ymm ymm k ymm
|
|
// VPERMB ymm ymm ymm
|
|
// VPERMB m512 zmm k zmm
|
|
// VPERMB m512 zmm zmm
|
|
// VPERMB zmm zmm k zmm
|
|
// VPERMB zmm zmm zmm
|
|
//
|
|
// Construct and append a VPERMB instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPERMB(ops ...operand.Op) { ctx.VPERMB(ops...) }
|
|
|
|
// VPERMB_Z: Permute Byte Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMB.Z m128 xmm k xmm
|
|
// VPERMB.Z m256 ymm k ymm
|
|
// VPERMB.Z xmm xmm k xmm
|
|
// VPERMB.Z ymm ymm k ymm
|
|
// VPERMB.Z m512 zmm k zmm
|
|
// VPERMB.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPERMB.Z instruction to the active function.
|
|
func (c *Context) VPERMB_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPERMB_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPERMB_Z: Permute Byte Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMB.Z m128 xmm k xmm
|
|
// VPERMB.Z m256 ymm k ymm
|
|
// VPERMB.Z xmm xmm k xmm
|
|
// VPERMB.Z ymm ymm k ymm
|
|
// VPERMB.Z m512 zmm k zmm
|
|
// VPERMB.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPERMB.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPERMB_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPERMB_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPERMD: Permute Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMD m256 ymm ymm
|
|
// VPERMD ymm ymm ymm
|
|
// VPERMD m256 ymm k ymm
|
|
// VPERMD ymm ymm k ymm
|
|
// VPERMD m512 zmm k zmm
|
|
// VPERMD m512 zmm zmm
|
|
// VPERMD zmm zmm k zmm
|
|
// VPERMD zmm zmm zmm
|
|
//
|
|
// Construct and append a VPERMD instruction to the active function.
|
|
func (c *Context) VPERMD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPERMD(ops...))
|
|
}
|
|
|
|
// VPERMD: Permute Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMD m256 ymm ymm
|
|
// VPERMD ymm ymm ymm
|
|
// VPERMD m256 ymm k ymm
|
|
// VPERMD ymm ymm k ymm
|
|
// VPERMD m512 zmm k zmm
|
|
// VPERMD m512 zmm zmm
|
|
// VPERMD zmm zmm k zmm
|
|
// VPERMD zmm zmm zmm
|
|
//
|
|
// Construct and append a VPERMD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPERMD(ops ...operand.Op) { ctx.VPERMD(ops...) }
|
|
|
|
// VPERMD_BCST: Permute Doubleword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMD.BCST m32 ymm k ymm
|
|
// VPERMD.BCST m32 ymm ymm
|
|
// VPERMD.BCST m32 zmm k zmm
|
|
// VPERMD.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VPERMD.BCST instruction to the active function.
|
|
func (c *Context) VPERMD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPERMD_BCST(ops...))
|
|
}
|
|
|
|
// VPERMD_BCST: Permute Doubleword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMD.BCST m32 ymm k ymm
|
|
// VPERMD.BCST m32 ymm ymm
|
|
// VPERMD.BCST m32 zmm k zmm
|
|
// VPERMD.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VPERMD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPERMD_BCST(ops ...operand.Op) { ctx.VPERMD_BCST(ops...) }
|
|
|
|
// VPERMD_BCST_Z: Permute Doubleword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMD.BCST.Z m32 ymm k ymm
|
|
// VPERMD.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VPERMD.BCST.Z instruction to the active function.
|
|
func (c *Context) VPERMD_BCST_Z(m, yz, k, yz1 operand.Op) {
|
|
c.addinstruction(x86.VPERMD_BCST_Z(m, yz, k, yz1))
|
|
}
|
|
|
|
// VPERMD_BCST_Z: Permute Doubleword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMD.BCST.Z m32 ymm k ymm
|
|
// VPERMD.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VPERMD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPERMD_BCST_Z(m, yz, k, yz1 operand.Op) { ctx.VPERMD_BCST_Z(m, yz, k, yz1) }
|
|
|
|
// VPERMD_Z: Permute Doubleword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMD.Z m256 ymm k ymm
|
|
// VPERMD.Z ymm ymm k ymm
|
|
// VPERMD.Z m512 zmm k zmm
|
|
// VPERMD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPERMD.Z instruction to the active function.
|
|
func (c *Context) VPERMD_Z(myz, yz, k, yz1 operand.Op) {
|
|
c.addinstruction(x86.VPERMD_Z(myz, yz, k, yz1))
|
|
}
|
|
|
|
// VPERMD_Z: Permute Doubleword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMD.Z m256 ymm k ymm
|
|
// VPERMD.Z ymm ymm k ymm
|
|
// VPERMD.Z m512 zmm k zmm
|
|
// VPERMD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPERMD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPERMD_Z(myz, yz, k, yz1 operand.Op) { ctx.VPERMD_Z(myz, yz, k, yz1) }
|
|
|
|
// VPERMI2B: Full Permute of Bytes From Two Tables Overwriting the Index.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMI2B m128 xmm k xmm
|
|
// VPERMI2B m128 xmm xmm
|
|
// VPERMI2B m256 ymm k ymm
|
|
// VPERMI2B m256 ymm ymm
|
|
// VPERMI2B xmm xmm k xmm
|
|
// VPERMI2B xmm xmm xmm
|
|
// VPERMI2B ymm ymm k ymm
|
|
// VPERMI2B ymm ymm ymm
|
|
// VPERMI2B m512 zmm k zmm
|
|
// VPERMI2B m512 zmm zmm
|
|
// VPERMI2B zmm zmm k zmm
|
|
// VPERMI2B zmm zmm zmm
|
|
//
|
|
// Construct and append a VPERMI2B instruction to the active function.
|
|
func (c *Context) VPERMI2B(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPERMI2B(ops...))
|
|
}
|
|
|
|
// VPERMI2B: Full Permute of Bytes From Two Tables Overwriting the Index.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMI2B m128 xmm k xmm
|
|
// VPERMI2B m128 xmm xmm
|
|
// VPERMI2B m256 ymm k ymm
|
|
// VPERMI2B m256 ymm ymm
|
|
// VPERMI2B xmm xmm k xmm
|
|
// VPERMI2B xmm xmm xmm
|
|
// VPERMI2B ymm ymm k ymm
|
|
// VPERMI2B ymm ymm ymm
|
|
// VPERMI2B m512 zmm k zmm
|
|
// VPERMI2B m512 zmm zmm
|
|
// VPERMI2B zmm zmm k zmm
|
|
// VPERMI2B zmm zmm zmm
|
|
//
|
|
// Construct and append a VPERMI2B instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPERMI2B(ops ...operand.Op) { ctx.VPERMI2B(ops...) }
|
|
|
|
// VPERMI2B_Z: Full Permute of Bytes From Two Tables Overwriting the Index (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMI2B.Z m128 xmm k xmm
|
|
// VPERMI2B.Z m256 ymm k ymm
|
|
// VPERMI2B.Z xmm xmm k xmm
|
|
// VPERMI2B.Z ymm ymm k ymm
|
|
// VPERMI2B.Z m512 zmm k zmm
|
|
// VPERMI2B.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPERMI2B.Z instruction to the active function.
|
|
func (c *Context) VPERMI2B_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPERMI2B_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPERMI2B_Z: Full Permute of Bytes From Two Tables Overwriting the Index (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMI2B.Z m128 xmm k xmm
|
|
// VPERMI2B.Z m256 ymm k ymm
|
|
// VPERMI2B.Z xmm xmm k xmm
|
|
// VPERMI2B.Z ymm ymm k ymm
|
|
// VPERMI2B.Z m512 zmm k zmm
|
|
// VPERMI2B.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPERMI2B.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPERMI2B_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPERMI2B_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPERMI2D: Full Permute of Doublewords From Two Tables Overwriting the Index.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMI2D m128 xmm k xmm
|
|
// VPERMI2D m128 xmm xmm
|
|
// VPERMI2D m256 ymm k ymm
|
|
// VPERMI2D m256 ymm ymm
|
|
// VPERMI2D xmm xmm k xmm
|
|
// VPERMI2D xmm xmm xmm
|
|
// VPERMI2D ymm ymm k ymm
|
|
// VPERMI2D ymm ymm ymm
|
|
// VPERMI2D m512 zmm k zmm
|
|
// VPERMI2D m512 zmm zmm
|
|
// VPERMI2D zmm zmm k zmm
|
|
// VPERMI2D zmm zmm zmm
|
|
//
|
|
// Construct and append a VPERMI2D instruction to the active function.
|
|
func (c *Context) VPERMI2D(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPERMI2D(ops...))
|
|
}
|
|
|
|
// VPERMI2D: Full Permute of Doublewords From Two Tables Overwriting the Index.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMI2D m128 xmm k xmm
|
|
// VPERMI2D m128 xmm xmm
|
|
// VPERMI2D m256 ymm k ymm
|
|
// VPERMI2D m256 ymm ymm
|
|
// VPERMI2D xmm xmm k xmm
|
|
// VPERMI2D xmm xmm xmm
|
|
// VPERMI2D ymm ymm k ymm
|
|
// VPERMI2D ymm ymm ymm
|
|
// VPERMI2D m512 zmm k zmm
|
|
// VPERMI2D m512 zmm zmm
|
|
// VPERMI2D zmm zmm k zmm
|
|
// VPERMI2D zmm zmm zmm
|
|
//
|
|
// Construct and append a VPERMI2D instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPERMI2D(ops ...operand.Op) { ctx.VPERMI2D(ops...) }
|
|
|
|
// VPERMI2D_BCST: Full Permute of Doublewords From Two Tables Overwriting the Index (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMI2D.BCST m32 xmm k xmm
|
|
// VPERMI2D.BCST m32 xmm xmm
|
|
// VPERMI2D.BCST m32 ymm k ymm
|
|
// VPERMI2D.BCST m32 ymm ymm
|
|
// VPERMI2D.BCST m32 zmm k zmm
|
|
// VPERMI2D.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VPERMI2D.BCST instruction to the active function.
|
|
func (c *Context) VPERMI2D_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPERMI2D_BCST(ops...))
|
|
}
|
|
|
|
// VPERMI2D_BCST: Full Permute of Doublewords From Two Tables Overwriting the Index (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMI2D.BCST m32 xmm k xmm
|
|
// VPERMI2D.BCST m32 xmm xmm
|
|
// VPERMI2D.BCST m32 ymm k ymm
|
|
// VPERMI2D.BCST m32 ymm ymm
|
|
// VPERMI2D.BCST m32 zmm k zmm
|
|
// VPERMI2D.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VPERMI2D.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPERMI2D_BCST(ops ...operand.Op) { ctx.VPERMI2D_BCST(ops...) }
|
|
|
|
// VPERMI2D_BCST_Z: Full Permute of Doublewords From Two Tables Overwriting the Index (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMI2D.BCST.Z m32 xmm k xmm
|
|
// VPERMI2D.BCST.Z m32 ymm k ymm
|
|
// VPERMI2D.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VPERMI2D.BCST.Z instruction to the active function.
|
|
func (c *Context) VPERMI2D_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPERMI2D_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPERMI2D_BCST_Z: Full Permute of Doublewords From Two Tables Overwriting the Index (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMI2D.BCST.Z m32 xmm k xmm
|
|
// VPERMI2D.BCST.Z m32 ymm k ymm
|
|
// VPERMI2D.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VPERMI2D.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPERMI2D_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VPERMI2D_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VPERMI2D_Z: Full Permute of Doublewords From Two Tables Overwriting the Index (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMI2D.Z m128 xmm k xmm
|
|
// VPERMI2D.Z m256 ymm k ymm
|
|
// VPERMI2D.Z xmm xmm k xmm
|
|
// VPERMI2D.Z ymm ymm k ymm
|
|
// VPERMI2D.Z m512 zmm k zmm
|
|
// VPERMI2D.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPERMI2D.Z instruction to the active function.
|
|
func (c *Context) VPERMI2D_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPERMI2D_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPERMI2D_Z: Full Permute of Doublewords From Two Tables Overwriting the Index (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMI2D.Z m128 xmm k xmm
|
|
// VPERMI2D.Z m256 ymm k ymm
|
|
// VPERMI2D.Z xmm xmm k xmm
|
|
// VPERMI2D.Z ymm ymm k ymm
|
|
// VPERMI2D.Z m512 zmm k zmm
|
|
// VPERMI2D.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPERMI2D.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPERMI2D_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPERMI2D_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPERMI2PD: Full Permute of Double-Precision Floating-Point Values From Two Tables Overwriting the Index.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMI2PD m128 xmm k xmm
|
|
// VPERMI2PD m128 xmm xmm
|
|
// VPERMI2PD m256 ymm k ymm
|
|
// VPERMI2PD m256 ymm ymm
|
|
// VPERMI2PD xmm xmm k xmm
|
|
// VPERMI2PD xmm xmm xmm
|
|
// VPERMI2PD ymm ymm k ymm
|
|
// VPERMI2PD ymm ymm ymm
|
|
// VPERMI2PD m512 zmm k zmm
|
|
// VPERMI2PD m512 zmm zmm
|
|
// VPERMI2PD zmm zmm k zmm
|
|
// VPERMI2PD zmm zmm zmm
|
|
//
|
|
// Construct and append a VPERMI2PD instruction to the active function.
|
|
func (c *Context) VPERMI2PD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPERMI2PD(ops...))
|
|
}
|
|
|
|
// VPERMI2PD: Full Permute of Double-Precision Floating-Point Values From Two Tables Overwriting the Index.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMI2PD m128 xmm k xmm
|
|
// VPERMI2PD m128 xmm xmm
|
|
// VPERMI2PD m256 ymm k ymm
|
|
// VPERMI2PD m256 ymm ymm
|
|
// VPERMI2PD xmm xmm k xmm
|
|
// VPERMI2PD xmm xmm xmm
|
|
// VPERMI2PD ymm ymm k ymm
|
|
// VPERMI2PD ymm ymm ymm
|
|
// VPERMI2PD m512 zmm k zmm
|
|
// VPERMI2PD m512 zmm zmm
|
|
// VPERMI2PD zmm zmm k zmm
|
|
// VPERMI2PD zmm zmm zmm
|
|
//
|
|
// Construct and append a VPERMI2PD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPERMI2PD(ops ...operand.Op) { ctx.VPERMI2PD(ops...) }
|
|
|
|
// VPERMI2PD_BCST: Full Permute of Double-Precision Floating-Point Values From Two Tables Overwriting the Index (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMI2PD.BCST m64 xmm k xmm
|
|
// VPERMI2PD.BCST m64 xmm xmm
|
|
// VPERMI2PD.BCST m64 ymm k ymm
|
|
// VPERMI2PD.BCST m64 ymm ymm
|
|
// VPERMI2PD.BCST m64 zmm k zmm
|
|
// VPERMI2PD.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VPERMI2PD.BCST instruction to the active function.
|
|
func (c *Context) VPERMI2PD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPERMI2PD_BCST(ops...))
|
|
}
|
|
|
|
// VPERMI2PD_BCST: Full Permute of Double-Precision Floating-Point Values From Two Tables Overwriting the Index (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMI2PD.BCST m64 xmm k xmm
|
|
// VPERMI2PD.BCST m64 xmm xmm
|
|
// VPERMI2PD.BCST m64 ymm k ymm
|
|
// VPERMI2PD.BCST m64 ymm ymm
|
|
// VPERMI2PD.BCST m64 zmm k zmm
|
|
// VPERMI2PD.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VPERMI2PD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPERMI2PD_BCST(ops ...operand.Op) { ctx.VPERMI2PD_BCST(ops...) }
|
|
|
|
// VPERMI2PD_BCST_Z: Full Permute of Double-Precision Floating-Point Values From Two Tables Overwriting the Index (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMI2PD.BCST.Z m64 xmm k xmm
|
|
// VPERMI2PD.BCST.Z m64 ymm k ymm
|
|
// VPERMI2PD.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VPERMI2PD.BCST.Z instruction to the active function.
|
|
func (c *Context) VPERMI2PD_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPERMI2PD_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPERMI2PD_BCST_Z: Full Permute of Double-Precision Floating-Point Values From Two Tables Overwriting the Index (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMI2PD.BCST.Z m64 xmm k xmm
|
|
// VPERMI2PD.BCST.Z m64 ymm k ymm
|
|
// VPERMI2PD.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VPERMI2PD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPERMI2PD_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VPERMI2PD_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VPERMI2PD_Z: Full Permute of Double-Precision Floating-Point Values From Two Tables Overwriting the Index (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMI2PD.Z m128 xmm k xmm
|
|
// VPERMI2PD.Z m256 ymm k ymm
|
|
// VPERMI2PD.Z xmm xmm k xmm
|
|
// VPERMI2PD.Z ymm ymm k ymm
|
|
// VPERMI2PD.Z m512 zmm k zmm
|
|
// VPERMI2PD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPERMI2PD.Z instruction to the active function.
|
|
func (c *Context) VPERMI2PD_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPERMI2PD_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPERMI2PD_Z: Full Permute of Double-Precision Floating-Point Values From Two Tables Overwriting the Index (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMI2PD.Z m128 xmm k xmm
|
|
// VPERMI2PD.Z m256 ymm k ymm
|
|
// VPERMI2PD.Z xmm xmm k xmm
|
|
// VPERMI2PD.Z ymm ymm k ymm
|
|
// VPERMI2PD.Z m512 zmm k zmm
|
|
// VPERMI2PD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPERMI2PD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPERMI2PD_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPERMI2PD_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPERMI2PS: Full Permute of Single-Precision Floating-Point Values From Two Tables Overwriting the Index.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMI2PS m128 xmm k xmm
|
|
// VPERMI2PS m128 xmm xmm
|
|
// VPERMI2PS m256 ymm k ymm
|
|
// VPERMI2PS m256 ymm ymm
|
|
// VPERMI2PS xmm xmm k xmm
|
|
// VPERMI2PS xmm xmm xmm
|
|
// VPERMI2PS ymm ymm k ymm
|
|
// VPERMI2PS ymm ymm ymm
|
|
// VPERMI2PS m512 zmm k zmm
|
|
// VPERMI2PS m512 zmm zmm
|
|
// VPERMI2PS zmm zmm k zmm
|
|
// VPERMI2PS zmm zmm zmm
|
|
//
|
|
// Construct and append a VPERMI2PS instruction to the active function.
|
|
func (c *Context) VPERMI2PS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPERMI2PS(ops...))
|
|
}
|
|
|
|
// VPERMI2PS: Full Permute of Single-Precision Floating-Point Values From Two Tables Overwriting the Index.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMI2PS m128 xmm k xmm
|
|
// VPERMI2PS m128 xmm xmm
|
|
// VPERMI2PS m256 ymm k ymm
|
|
// VPERMI2PS m256 ymm ymm
|
|
// VPERMI2PS xmm xmm k xmm
|
|
// VPERMI2PS xmm xmm xmm
|
|
// VPERMI2PS ymm ymm k ymm
|
|
// VPERMI2PS ymm ymm ymm
|
|
// VPERMI2PS m512 zmm k zmm
|
|
// VPERMI2PS m512 zmm zmm
|
|
// VPERMI2PS zmm zmm k zmm
|
|
// VPERMI2PS zmm zmm zmm
|
|
//
|
|
// Construct and append a VPERMI2PS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPERMI2PS(ops ...operand.Op) { ctx.VPERMI2PS(ops...) }
|
|
|
|
// VPERMI2PS_BCST: Full Permute of Single-Precision Floating-Point Values From Two Tables Overwriting the Index (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMI2PS.BCST m32 xmm k xmm
|
|
// VPERMI2PS.BCST m32 xmm xmm
|
|
// VPERMI2PS.BCST m32 ymm k ymm
|
|
// VPERMI2PS.BCST m32 ymm ymm
|
|
// VPERMI2PS.BCST m32 zmm k zmm
|
|
// VPERMI2PS.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VPERMI2PS.BCST instruction to the active function.
|
|
func (c *Context) VPERMI2PS_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPERMI2PS_BCST(ops...))
|
|
}
|
|
|
|
// VPERMI2PS_BCST: Full Permute of Single-Precision Floating-Point Values From Two Tables Overwriting the Index (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMI2PS.BCST m32 xmm k xmm
|
|
// VPERMI2PS.BCST m32 xmm xmm
|
|
// VPERMI2PS.BCST m32 ymm k ymm
|
|
// VPERMI2PS.BCST m32 ymm ymm
|
|
// VPERMI2PS.BCST m32 zmm k zmm
|
|
// VPERMI2PS.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VPERMI2PS.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPERMI2PS_BCST(ops ...operand.Op) { ctx.VPERMI2PS_BCST(ops...) }
|
|
|
|
// VPERMI2PS_BCST_Z: Full Permute of Single-Precision Floating-Point Values From Two Tables Overwriting the Index (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMI2PS.BCST.Z m32 xmm k xmm
|
|
// VPERMI2PS.BCST.Z m32 ymm k ymm
|
|
// VPERMI2PS.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VPERMI2PS.BCST.Z instruction to the active function.
|
|
func (c *Context) VPERMI2PS_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPERMI2PS_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPERMI2PS_BCST_Z: Full Permute of Single-Precision Floating-Point Values From Two Tables Overwriting the Index (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMI2PS.BCST.Z m32 xmm k xmm
|
|
// VPERMI2PS.BCST.Z m32 ymm k ymm
|
|
// VPERMI2PS.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VPERMI2PS.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPERMI2PS_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VPERMI2PS_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VPERMI2PS_Z: Full Permute of Single-Precision Floating-Point Values From Two Tables Overwriting the Index (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMI2PS.Z m128 xmm k xmm
|
|
// VPERMI2PS.Z m256 ymm k ymm
|
|
// VPERMI2PS.Z xmm xmm k xmm
|
|
// VPERMI2PS.Z ymm ymm k ymm
|
|
// VPERMI2PS.Z m512 zmm k zmm
|
|
// VPERMI2PS.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPERMI2PS.Z instruction to the active function.
|
|
func (c *Context) VPERMI2PS_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPERMI2PS_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPERMI2PS_Z: Full Permute of Single-Precision Floating-Point Values From Two Tables Overwriting the Index (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMI2PS.Z m128 xmm k xmm
|
|
// VPERMI2PS.Z m256 ymm k ymm
|
|
// VPERMI2PS.Z xmm xmm k xmm
|
|
// VPERMI2PS.Z ymm ymm k ymm
|
|
// VPERMI2PS.Z m512 zmm k zmm
|
|
// VPERMI2PS.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPERMI2PS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPERMI2PS_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPERMI2PS_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPERMI2Q: Full Permute of Quadwords From Two Tables Overwriting the Index.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMI2Q m128 xmm k xmm
|
|
// VPERMI2Q m128 xmm xmm
|
|
// VPERMI2Q m256 ymm k ymm
|
|
// VPERMI2Q m256 ymm ymm
|
|
// VPERMI2Q xmm xmm k xmm
|
|
// VPERMI2Q xmm xmm xmm
|
|
// VPERMI2Q ymm ymm k ymm
|
|
// VPERMI2Q ymm ymm ymm
|
|
// VPERMI2Q m512 zmm k zmm
|
|
// VPERMI2Q m512 zmm zmm
|
|
// VPERMI2Q zmm zmm k zmm
|
|
// VPERMI2Q zmm zmm zmm
|
|
//
|
|
// Construct and append a VPERMI2Q instruction to the active function.
|
|
func (c *Context) VPERMI2Q(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPERMI2Q(ops...))
|
|
}
|
|
|
|
// VPERMI2Q: Full Permute of Quadwords From Two Tables Overwriting the Index.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMI2Q m128 xmm k xmm
|
|
// VPERMI2Q m128 xmm xmm
|
|
// VPERMI2Q m256 ymm k ymm
|
|
// VPERMI2Q m256 ymm ymm
|
|
// VPERMI2Q xmm xmm k xmm
|
|
// VPERMI2Q xmm xmm xmm
|
|
// VPERMI2Q ymm ymm k ymm
|
|
// VPERMI2Q ymm ymm ymm
|
|
// VPERMI2Q m512 zmm k zmm
|
|
// VPERMI2Q m512 zmm zmm
|
|
// VPERMI2Q zmm zmm k zmm
|
|
// VPERMI2Q zmm zmm zmm
|
|
//
|
|
// Construct and append a VPERMI2Q instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPERMI2Q(ops ...operand.Op) { ctx.VPERMI2Q(ops...) }
|
|
|
|
// VPERMI2Q_BCST: Full Permute of Quadwords From Two Tables Overwriting the Index (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMI2Q.BCST m64 xmm k xmm
|
|
// VPERMI2Q.BCST m64 xmm xmm
|
|
// VPERMI2Q.BCST m64 ymm k ymm
|
|
// VPERMI2Q.BCST m64 ymm ymm
|
|
// VPERMI2Q.BCST m64 zmm k zmm
|
|
// VPERMI2Q.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VPERMI2Q.BCST instruction to the active function.
|
|
func (c *Context) VPERMI2Q_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPERMI2Q_BCST(ops...))
|
|
}
|
|
|
|
// VPERMI2Q_BCST: Full Permute of Quadwords From Two Tables Overwriting the Index (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMI2Q.BCST m64 xmm k xmm
|
|
// VPERMI2Q.BCST m64 xmm xmm
|
|
// VPERMI2Q.BCST m64 ymm k ymm
|
|
// VPERMI2Q.BCST m64 ymm ymm
|
|
// VPERMI2Q.BCST m64 zmm k zmm
|
|
// VPERMI2Q.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VPERMI2Q.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPERMI2Q_BCST(ops ...operand.Op) { ctx.VPERMI2Q_BCST(ops...) }
|
|
|
|
// VPERMI2Q_BCST_Z: Full Permute of Quadwords From Two Tables Overwriting the Index (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMI2Q.BCST.Z m64 xmm k xmm
|
|
// VPERMI2Q.BCST.Z m64 ymm k ymm
|
|
// VPERMI2Q.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VPERMI2Q.BCST.Z instruction to the active function.
|
|
func (c *Context) VPERMI2Q_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPERMI2Q_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPERMI2Q_BCST_Z: Full Permute of Quadwords From Two Tables Overwriting the Index (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMI2Q.BCST.Z m64 xmm k xmm
|
|
// VPERMI2Q.BCST.Z m64 ymm k ymm
|
|
// VPERMI2Q.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VPERMI2Q.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPERMI2Q_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VPERMI2Q_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VPERMI2Q_Z: Full Permute of Quadwords From Two Tables Overwriting the Index (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMI2Q.Z m128 xmm k xmm
|
|
// VPERMI2Q.Z m256 ymm k ymm
|
|
// VPERMI2Q.Z xmm xmm k xmm
|
|
// VPERMI2Q.Z ymm ymm k ymm
|
|
// VPERMI2Q.Z m512 zmm k zmm
|
|
// VPERMI2Q.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPERMI2Q.Z instruction to the active function.
|
|
func (c *Context) VPERMI2Q_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPERMI2Q_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPERMI2Q_Z: Full Permute of Quadwords From Two Tables Overwriting the Index (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMI2Q.Z m128 xmm k xmm
|
|
// VPERMI2Q.Z m256 ymm k ymm
|
|
// VPERMI2Q.Z xmm xmm k xmm
|
|
// VPERMI2Q.Z ymm ymm k ymm
|
|
// VPERMI2Q.Z m512 zmm k zmm
|
|
// VPERMI2Q.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPERMI2Q.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPERMI2Q_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPERMI2Q_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPERMI2W: Full Permute of Words From Two Tables Overwriting the Index.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMI2W m128 xmm k xmm
|
|
// VPERMI2W m128 xmm xmm
|
|
// VPERMI2W m256 ymm k ymm
|
|
// VPERMI2W m256 ymm ymm
|
|
// VPERMI2W xmm xmm k xmm
|
|
// VPERMI2W xmm xmm xmm
|
|
// VPERMI2W ymm ymm k ymm
|
|
// VPERMI2W ymm ymm ymm
|
|
// VPERMI2W m512 zmm k zmm
|
|
// VPERMI2W m512 zmm zmm
|
|
// VPERMI2W zmm zmm k zmm
|
|
// VPERMI2W zmm zmm zmm
|
|
//
|
|
// Construct and append a VPERMI2W instruction to the active function.
|
|
func (c *Context) VPERMI2W(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPERMI2W(ops...))
|
|
}
|
|
|
|
// VPERMI2W: Full Permute of Words From Two Tables Overwriting the Index.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMI2W m128 xmm k xmm
|
|
// VPERMI2W m128 xmm xmm
|
|
// VPERMI2W m256 ymm k ymm
|
|
// VPERMI2W m256 ymm ymm
|
|
// VPERMI2W xmm xmm k xmm
|
|
// VPERMI2W xmm xmm xmm
|
|
// VPERMI2W ymm ymm k ymm
|
|
// VPERMI2W ymm ymm ymm
|
|
// VPERMI2W m512 zmm k zmm
|
|
// VPERMI2W m512 zmm zmm
|
|
// VPERMI2W zmm zmm k zmm
|
|
// VPERMI2W zmm zmm zmm
|
|
//
|
|
// Construct and append a VPERMI2W instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPERMI2W(ops ...operand.Op) { ctx.VPERMI2W(ops...) }
|
|
|
|
// VPERMI2W_Z: Full Permute of Words From Two Tables Overwriting the Index (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMI2W.Z m128 xmm k xmm
|
|
// VPERMI2W.Z m256 ymm k ymm
|
|
// VPERMI2W.Z xmm xmm k xmm
|
|
// VPERMI2W.Z ymm ymm k ymm
|
|
// VPERMI2W.Z m512 zmm k zmm
|
|
// VPERMI2W.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPERMI2W.Z instruction to the active function.
|
|
func (c *Context) VPERMI2W_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPERMI2W_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPERMI2W_Z: Full Permute of Words From Two Tables Overwriting the Index (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMI2W.Z m128 xmm k xmm
|
|
// VPERMI2W.Z m256 ymm k ymm
|
|
// VPERMI2W.Z xmm xmm k xmm
|
|
// VPERMI2W.Z ymm ymm k ymm
|
|
// VPERMI2W.Z m512 zmm k zmm
|
|
// VPERMI2W.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPERMI2W.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPERMI2W_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPERMI2W_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPERMILPD: Permute Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMILPD imm8 m128 xmm
|
|
// VPERMILPD imm8 m256 ymm
|
|
// VPERMILPD imm8 xmm xmm
|
|
// VPERMILPD imm8 ymm ymm
|
|
// VPERMILPD m128 xmm xmm
|
|
// VPERMILPD m256 ymm ymm
|
|
// VPERMILPD xmm xmm xmm
|
|
// VPERMILPD ymm ymm ymm
|
|
// VPERMILPD imm8 m128 k xmm
|
|
// VPERMILPD imm8 m256 k ymm
|
|
// VPERMILPD imm8 xmm k xmm
|
|
// VPERMILPD imm8 ymm k ymm
|
|
// VPERMILPD m128 xmm k xmm
|
|
// VPERMILPD m256 ymm k ymm
|
|
// VPERMILPD xmm xmm k xmm
|
|
// VPERMILPD ymm ymm k ymm
|
|
// VPERMILPD imm8 m512 k zmm
|
|
// VPERMILPD imm8 m512 zmm
|
|
// VPERMILPD imm8 zmm k zmm
|
|
// VPERMILPD imm8 zmm zmm
|
|
// VPERMILPD m512 zmm k zmm
|
|
// VPERMILPD m512 zmm zmm
|
|
// VPERMILPD zmm zmm k zmm
|
|
// VPERMILPD zmm zmm zmm
|
|
//
|
|
// Construct and append a VPERMILPD instruction to the active function.
|
|
func (c *Context) VPERMILPD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPERMILPD(ops...))
|
|
}
|
|
|
|
// VPERMILPD: Permute Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMILPD imm8 m128 xmm
|
|
// VPERMILPD imm8 m256 ymm
|
|
// VPERMILPD imm8 xmm xmm
|
|
// VPERMILPD imm8 ymm ymm
|
|
// VPERMILPD m128 xmm xmm
|
|
// VPERMILPD m256 ymm ymm
|
|
// VPERMILPD xmm xmm xmm
|
|
// VPERMILPD ymm ymm ymm
|
|
// VPERMILPD imm8 m128 k xmm
|
|
// VPERMILPD imm8 m256 k ymm
|
|
// VPERMILPD imm8 xmm k xmm
|
|
// VPERMILPD imm8 ymm k ymm
|
|
// VPERMILPD m128 xmm k xmm
|
|
// VPERMILPD m256 ymm k ymm
|
|
// VPERMILPD xmm xmm k xmm
|
|
// VPERMILPD ymm ymm k ymm
|
|
// VPERMILPD imm8 m512 k zmm
|
|
// VPERMILPD imm8 m512 zmm
|
|
// VPERMILPD imm8 zmm k zmm
|
|
// VPERMILPD imm8 zmm zmm
|
|
// VPERMILPD m512 zmm k zmm
|
|
// VPERMILPD m512 zmm zmm
|
|
// VPERMILPD zmm zmm k zmm
|
|
// VPERMILPD zmm zmm zmm
|
|
//
|
|
// Construct and append a VPERMILPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPERMILPD(ops ...operand.Op) { ctx.VPERMILPD(ops...) }
|
|
|
|
// VPERMILPD_BCST: Permute Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMILPD.BCST imm8 m64 k xmm
|
|
// VPERMILPD.BCST imm8 m64 k ymm
|
|
// VPERMILPD.BCST imm8 m64 xmm
|
|
// VPERMILPD.BCST imm8 m64 ymm
|
|
// VPERMILPD.BCST m64 xmm k xmm
|
|
// VPERMILPD.BCST m64 xmm xmm
|
|
// VPERMILPD.BCST m64 ymm k ymm
|
|
// VPERMILPD.BCST m64 ymm ymm
|
|
// VPERMILPD.BCST imm8 m64 k zmm
|
|
// VPERMILPD.BCST imm8 m64 zmm
|
|
// VPERMILPD.BCST m64 zmm k zmm
|
|
// VPERMILPD.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VPERMILPD.BCST instruction to the active function.
|
|
func (c *Context) VPERMILPD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPERMILPD_BCST(ops...))
|
|
}
|
|
|
|
// VPERMILPD_BCST: Permute Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMILPD.BCST imm8 m64 k xmm
|
|
// VPERMILPD.BCST imm8 m64 k ymm
|
|
// VPERMILPD.BCST imm8 m64 xmm
|
|
// VPERMILPD.BCST imm8 m64 ymm
|
|
// VPERMILPD.BCST m64 xmm k xmm
|
|
// VPERMILPD.BCST m64 xmm xmm
|
|
// VPERMILPD.BCST m64 ymm k ymm
|
|
// VPERMILPD.BCST m64 ymm ymm
|
|
// VPERMILPD.BCST imm8 m64 k zmm
|
|
// VPERMILPD.BCST imm8 m64 zmm
|
|
// VPERMILPD.BCST m64 zmm k zmm
|
|
// VPERMILPD.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VPERMILPD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPERMILPD_BCST(ops ...operand.Op) { ctx.VPERMILPD_BCST(ops...) }
|
|
|
|
// VPERMILPD_BCST_Z: Permute Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMILPD.BCST.Z imm8 m64 k xmm
|
|
// VPERMILPD.BCST.Z imm8 m64 k ymm
|
|
// VPERMILPD.BCST.Z m64 xmm k xmm
|
|
// VPERMILPD.BCST.Z m64 ymm k ymm
|
|
// VPERMILPD.BCST.Z imm8 m64 k zmm
|
|
// VPERMILPD.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VPERMILPD.BCST.Z instruction to the active function.
|
|
func (c *Context) VPERMILPD_BCST_Z(im, mxyz, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VPERMILPD_BCST_Z(im, mxyz, k, xyz))
|
|
}
|
|
|
|
// VPERMILPD_BCST_Z: Permute Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMILPD.BCST.Z imm8 m64 k xmm
|
|
// VPERMILPD.BCST.Z imm8 m64 k ymm
|
|
// VPERMILPD.BCST.Z m64 xmm k xmm
|
|
// VPERMILPD.BCST.Z m64 ymm k ymm
|
|
// VPERMILPD.BCST.Z imm8 m64 k zmm
|
|
// VPERMILPD.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VPERMILPD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPERMILPD_BCST_Z(im, mxyz, k, xyz operand.Op) { ctx.VPERMILPD_BCST_Z(im, mxyz, k, xyz) }
|
|
|
|
// VPERMILPD_Z: Permute Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMILPD.Z imm8 m128 k xmm
|
|
// VPERMILPD.Z imm8 m256 k ymm
|
|
// VPERMILPD.Z imm8 xmm k xmm
|
|
// VPERMILPD.Z imm8 ymm k ymm
|
|
// VPERMILPD.Z m128 xmm k xmm
|
|
// VPERMILPD.Z m256 ymm k ymm
|
|
// VPERMILPD.Z xmm xmm k xmm
|
|
// VPERMILPD.Z ymm ymm k ymm
|
|
// VPERMILPD.Z imm8 m512 k zmm
|
|
// VPERMILPD.Z imm8 zmm k zmm
|
|
// VPERMILPD.Z m512 zmm k zmm
|
|
// VPERMILPD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPERMILPD.Z instruction to the active function.
|
|
func (c *Context) VPERMILPD_Z(imxyz, mxyz, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VPERMILPD_Z(imxyz, mxyz, k, xyz))
|
|
}
|
|
|
|
// VPERMILPD_Z: Permute Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMILPD.Z imm8 m128 k xmm
|
|
// VPERMILPD.Z imm8 m256 k ymm
|
|
// VPERMILPD.Z imm8 xmm k xmm
|
|
// VPERMILPD.Z imm8 ymm k ymm
|
|
// VPERMILPD.Z m128 xmm k xmm
|
|
// VPERMILPD.Z m256 ymm k ymm
|
|
// VPERMILPD.Z xmm xmm k xmm
|
|
// VPERMILPD.Z ymm ymm k ymm
|
|
// VPERMILPD.Z imm8 m512 k zmm
|
|
// VPERMILPD.Z imm8 zmm k zmm
|
|
// VPERMILPD.Z m512 zmm k zmm
|
|
// VPERMILPD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPERMILPD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPERMILPD_Z(imxyz, mxyz, k, xyz operand.Op) { ctx.VPERMILPD_Z(imxyz, mxyz, k, xyz) }
|
|
|
|
// VPERMILPS: Permute Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMILPS imm8 m128 xmm
|
|
// VPERMILPS imm8 m256 ymm
|
|
// VPERMILPS imm8 xmm xmm
|
|
// VPERMILPS imm8 ymm ymm
|
|
// VPERMILPS m128 xmm xmm
|
|
// VPERMILPS m256 ymm ymm
|
|
// VPERMILPS xmm xmm xmm
|
|
// VPERMILPS ymm ymm ymm
|
|
// VPERMILPS imm8 m128 k xmm
|
|
// VPERMILPS imm8 m256 k ymm
|
|
// VPERMILPS imm8 xmm k xmm
|
|
// VPERMILPS imm8 ymm k ymm
|
|
// VPERMILPS m128 xmm k xmm
|
|
// VPERMILPS m256 ymm k ymm
|
|
// VPERMILPS xmm xmm k xmm
|
|
// VPERMILPS ymm ymm k ymm
|
|
// VPERMILPS imm8 m512 k zmm
|
|
// VPERMILPS imm8 m512 zmm
|
|
// VPERMILPS imm8 zmm k zmm
|
|
// VPERMILPS imm8 zmm zmm
|
|
// VPERMILPS m512 zmm k zmm
|
|
// VPERMILPS m512 zmm zmm
|
|
// VPERMILPS zmm zmm k zmm
|
|
// VPERMILPS zmm zmm zmm
|
|
//
|
|
// Construct and append a VPERMILPS instruction to the active function.
|
|
func (c *Context) VPERMILPS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPERMILPS(ops...))
|
|
}
|
|
|
|
// VPERMILPS: Permute Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMILPS imm8 m128 xmm
|
|
// VPERMILPS imm8 m256 ymm
|
|
// VPERMILPS imm8 xmm xmm
|
|
// VPERMILPS imm8 ymm ymm
|
|
// VPERMILPS m128 xmm xmm
|
|
// VPERMILPS m256 ymm ymm
|
|
// VPERMILPS xmm xmm xmm
|
|
// VPERMILPS ymm ymm ymm
|
|
// VPERMILPS imm8 m128 k xmm
|
|
// VPERMILPS imm8 m256 k ymm
|
|
// VPERMILPS imm8 xmm k xmm
|
|
// VPERMILPS imm8 ymm k ymm
|
|
// VPERMILPS m128 xmm k xmm
|
|
// VPERMILPS m256 ymm k ymm
|
|
// VPERMILPS xmm xmm k xmm
|
|
// VPERMILPS ymm ymm k ymm
|
|
// VPERMILPS imm8 m512 k zmm
|
|
// VPERMILPS imm8 m512 zmm
|
|
// VPERMILPS imm8 zmm k zmm
|
|
// VPERMILPS imm8 zmm zmm
|
|
// VPERMILPS m512 zmm k zmm
|
|
// VPERMILPS m512 zmm zmm
|
|
// VPERMILPS zmm zmm k zmm
|
|
// VPERMILPS zmm zmm zmm
|
|
//
|
|
// Construct and append a VPERMILPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPERMILPS(ops ...operand.Op) { ctx.VPERMILPS(ops...) }
|
|
|
|
// VPERMILPS_BCST: Permute Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMILPS.BCST imm8 m32 k xmm
|
|
// VPERMILPS.BCST imm8 m32 k ymm
|
|
// VPERMILPS.BCST imm8 m32 xmm
|
|
// VPERMILPS.BCST imm8 m32 ymm
|
|
// VPERMILPS.BCST m32 xmm k xmm
|
|
// VPERMILPS.BCST m32 xmm xmm
|
|
// VPERMILPS.BCST m32 ymm k ymm
|
|
// VPERMILPS.BCST m32 ymm ymm
|
|
// VPERMILPS.BCST imm8 m32 k zmm
|
|
// VPERMILPS.BCST imm8 m32 zmm
|
|
// VPERMILPS.BCST m32 zmm k zmm
|
|
// VPERMILPS.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VPERMILPS.BCST instruction to the active function.
|
|
func (c *Context) VPERMILPS_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPERMILPS_BCST(ops...))
|
|
}
|
|
|
|
// VPERMILPS_BCST: Permute Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMILPS.BCST imm8 m32 k xmm
|
|
// VPERMILPS.BCST imm8 m32 k ymm
|
|
// VPERMILPS.BCST imm8 m32 xmm
|
|
// VPERMILPS.BCST imm8 m32 ymm
|
|
// VPERMILPS.BCST m32 xmm k xmm
|
|
// VPERMILPS.BCST m32 xmm xmm
|
|
// VPERMILPS.BCST m32 ymm k ymm
|
|
// VPERMILPS.BCST m32 ymm ymm
|
|
// VPERMILPS.BCST imm8 m32 k zmm
|
|
// VPERMILPS.BCST imm8 m32 zmm
|
|
// VPERMILPS.BCST m32 zmm k zmm
|
|
// VPERMILPS.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VPERMILPS.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPERMILPS_BCST(ops ...operand.Op) { ctx.VPERMILPS_BCST(ops...) }
|
|
|
|
// VPERMILPS_BCST_Z: Permute Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMILPS.BCST.Z imm8 m32 k xmm
|
|
// VPERMILPS.BCST.Z imm8 m32 k ymm
|
|
// VPERMILPS.BCST.Z m32 xmm k xmm
|
|
// VPERMILPS.BCST.Z m32 ymm k ymm
|
|
// VPERMILPS.BCST.Z imm8 m32 k zmm
|
|
// VPERMILPS.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VPERMILPS.BCST.Z instruction to the active function.
|
|
func (c *Context) VPERMILPS_BCST_Z(im, mxyz, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VPERMILPS_BCST_Z(im, mxyz, k, xyz))
|
|
}
|
|
|
|
// VPERMILPS_BCST_Z: Permute Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMILPS.BCST.Z imm8 m32 k xmm
|
|
// VPERMILPS.BCST.Z imm8 m32 k ymm
|
|
// VPERMILPS.BCST.Z m32 xmm k xmm
|
|
// VPERMILPS.BCST.Z m32 ymm k ymm
|
|
// VPERMILPS.BCST.Z imm8 m32 k zmm
|
|
// VPERMILPS.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VPERMILPS.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPERMILPS_BCST_Z(im, mxyz, k, xyz operand.Op) { ctx.VPERMILPS_BCST_Z(im, mxyz, k, xyz) }
|
|
|
|
// VPERMILPS_Z: Permute Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMILPS.Z imm8 m128 k xmm
|
|
// VPERMILPS.Z imm8 m256 k ymm
|
|
// VPERMILPS.Z imm8 xmm k xmm
|
|
// VPERMILPS.Z imm8 ymm k ymm
|
|
// VPERMILPS.Z m128 xmm k xmm
|
|
// VPERMILPS.Z m256 ymm k ymm
|
|
// VPERMILPS.Z xmm xmm k xmm
|
|
// VPERMILPS.Z ymm ymm k ymm
|
|
// VPERMILPS.Z imm8 m512 k zmm
|
|
// VPERMILPS.Z imm8 zmm k zmm
|
|
// VPERMILPS.Z m512 zmm k zmm
|
|
// VPERMILPS.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPERMILPS.Z instruction to the active function.
|
|
func (c *Context) VPERMILPS_Z(imxyz, mxyz, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VPERMILPS_Z(imxyz, mxyz, k, xyz))
|
|
}
|
|
|
|
// VPERMILPS_Z: Permute Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMILPS.Z imm8 m128 k xmm
|
|
// VPERMILPS.Z imm8 m256 k ymm
|
|
// VPERMILPS.Z imm8 xmm k xmm
|
|
// VPERMILPS.Z imm8 ymm k ymm
|
|
// VPERMILPS.Z m128 xmm k xmm
|
|
// VPERMILPS.Z m256 ymm k ymm
|
|
// VPERMILPS.Z xmm xmm k xmm
|
|
// VPERMILPS.Z ymm ymm k ymm
|
|
// VPERMILPS.Z imm8 m512 k zmm
|
|
// VPERMILPS.Z imm8 zmm k zmm
|
|
// VPERMILPS.Z m512 zmm k zmm
|
|
// VPERMILPS.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPERMILPS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPERMILPS_Z(imxyz, mxyz, k, xyz operand.Op) { ctx.VPERMILPS_Z(imxyz, mxyz, k, xyz) }
|
|
|
|
// VPERMPD: Permute Double-Precision Floating-Point Elements.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMPD imm8 m256 ymm
|
|
// VPERMPD imm8 ymm ymm
|
|
// VPERMPD imm8 m256 k ymm
|
|
// VPERMPD imm8 ymm k ymm
|
|
// VPERMPD m256 ymm k ymm
|
|
// VPERMPD m256 ymm ymm
|
|
// VPERMPD ymm ymm k ymm
|
|
// VPERMPD ymm ymm ymm
|
|
// VPERMPD imm8 m512 k zmm
|
|
// VPERMPD imm8 m512 zmm
|
|
// VPERMPD imm8 zmm k zmm
|
|
// VPERMPD imm8 zmm zmm
|
|
// VPERMPD m512 zmm k zmm
|
|
// VPERMPD m512 zmm zmm
|
|
// VPERMPD zmm zmm k zmm
|
|
// VPERMPD zmm zmm zmm
|
|
//
|
|
// Construct and append a VPERMPD instruction to the active function.
|
|
func (c *Context) VPERMPD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPERMPD(ops...))
|
|
}
|
|
|
|
// VPERMPD: Permute Double-Precision Floating-Point Elements.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMPD imm8 m256 ymm
|
|
// VPERMPD imm8 ymm ymm
|
|
// VPERMPD imm8 m256 k ymm
|
|
// VPERMPD imm8 ymm k ymm
|
|
// VPERMPD m256 ymm k ymm
|
|
// VPERMPD m256 ymm ymm
|
|
// VPERMPD ymm ymm k ymm
|
|
// VPERMPD ymm ymm ymm
|
|
// VPERMPD imm8 m512 k zmm
|
|
// VPERMPD imm8 m512 zmm
|
|
// VPERMPD imm8 zmm k zmm
|
|
// VPERMPD imm8 zmm zmm
|
|
// VPERMPD m512 zmm k zmm
|
|
// VPERMPD m512 zmm zmm
|
|
// VPERMPD zmm zmm k zmm
|
|
// VPERMPD zmm zmm zmm
|
|
//
|
|
// Construct and append a VPERMPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPERMPD(ops ...operand.Op) { ctx.VPERMPD(ops...) }
|
|
|
|
// VPERMPD_BCST: Permute Double-Precision Floating-Point Elements (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMPD.BCST imm8 m64 k ymm
|
|
// VPERMPD.BCST imm8 m64 ymm
|
|
// VPERMPD.BCST m64 ymm k ymm
|
|
// VPERMPD.BCST m64 ymm ymm
|
|
// VPERMPD.BCST imm8 m64 k zmm
|
|
// VPERMPD.BCST imm8 m64 zmm
|
|
// VPERMPD.BCST m64 zmm k zmm
|
|
// VPERMPD.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VPERMPD.BCST instruction to the active function.
|
|
func (c *Context) VPERMPD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPERMPD_BCST(ops...))
|
|
}
|
|
|
|
// VPERMPD_BCST: Permute Double-Precision Floating-Point Elements (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMPD.BCST imm8 m64 k ymm
|
|
// VPERMPD.BCST imm8 m64 ymm
|
|
// VPERMPD.BCST m64 ymm k ymm
|
|
// VPERMPD.BCST m64 ymm ymm
|
|
// VPERMPD.BCST imm8 m64 k zmm
|
|
// VPERMPD.BCST imm8 m64 zmm
|
|
// VPERMPD.BCST m64 zmm k zmm
|
|
// VPERMPD.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VPERMPD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPERMPD_BCST(ops ...operand.Op) { ctx.VPERMPD_BCST(ops...) }
|
|
|
|
// VPERMPD_BCST_Z: Permute Double-Precision Floating-Point Elements (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMPD.BCST.Z imm8 m64 k ymm
|
|
// VPERMPD.BCST.Z m64 ymm k ymm
|
|
// VPERMPD.BCST.Z imm8 m64 k zmm
|
|
// VPERMPD.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VPERMPD.BCST.Z instruction to the active function.
|
|
func (c *Context) VPERMPD_BCST_Z(im, myz, k, yz operand.Op) {
|
|
c.addinstruction(x86.VPERMPD_BCST_Z(im, myz, k, yz))
|
|
}
|
|
|
|
// VPERMPD_BCST_Z: Permute Double-Precision Floating-Point Elements (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMPD.BCST.Z imm8 m64 k ymm
|
|
// VPERMPD.BCST.Z m64 ymm k ymm
|
|
// VPERMPD.BCST.Z imm8 m64 k zmm
|
|
// VPERMPD.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VPERMPD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPERMPD_BCST_Z(im, myz, k, yz operand.Op) { ctx.VPERMPD_BCST_Z(im, myz, k, yz) }
|
|
|
|
// VPERMPD_Z: Permute Double-Precision Floating-Point Elements (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMPD.Z imm8 m256 k ymm
|
|
// VPERMPD.Z imm8 ymm k ymm
|
|
// VPERMPD.Z m256 ymm k ymm
|
|
// VPERMPD.Z ymm ymm k ymm
|
|
// VPERMPD.Z imm8 m512 k zmm
|
|
// VPERMPD.Z imm8 zmm k zmm
|
|
// VPERMPD.Z m512 zmm k zmm
|
|
// VPERMPD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPERMPD.Z instruction to the active function.
|
|
func (c *Context) VPERMPD_Z(imyz, myz, k, yz operand.Op) {
|
|
c.addinstruction(x86.VPERMPD_Z(imyz, myz, k, yz))
|
|
}
|
|
|
|
// VPERMPD_Z: Permute Double-Precision Floating-Point Elements (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMPD.Z imm8 m256 k ymm
|
|
// VPERMPD.Z imm8 ymm k ymm
|
|
// VPERMPD.Z m256 ymm k ymm
|
|
// VPERMPD.Z ymm ymm k ymm
|
|
// VPERMPD.Z imm8 m512 k zmm
|
|
// VPERMPD.Z imm8 zmm k zmm
|
|
// VPERMPD.Z m512 zmm k zmm
|
|
// VPERMPD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPERMPD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPERMPD_Z(imyz, myz, k, yz operand.Op) { ctx.VPERMPD_Z(imyz, myz, k, yz) }
|
|
|
|
// VPERMPS: Permute Single-Precision Floating-Point Elements.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMPS m256 ymm ymm
|
|
// VPERMPS ymm ymm ymm
|
|
// VPERMPS m256 ymm k ymm
|
|
// VPERMPS ymm ymm k ymm
|
|
// VPERMPS m512 zmm k zmm
|
|
// VPERMPS m512 zmm zmm
|
|
// VPERMPS zmm zmm k zmm
|
|
// VPERMPS zmm zmm zmm
|
|
//
|
|
// Construct and append a VPERMPS instruction to the active function.
|
|
func (c *Context) VPERMPS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPERMPS(ops...))
|
|
}
|
|
|
|
// VPERMPS: Permute Single-Precision Floating-Point Elements.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMPS m256 ymm ymm
|
|
// VPERMPS ymm ymm ymm
|
|
// VPERMPS m256 ymm k ymm
|
|
// VPERMPS ymm ymm k ymm
|
|
// VPERMPS m512 zmm k zmm
|
|
// VPERMPS m512 zmm zmm
|
|
// VPERMPS zmm zmm k zmm
|
|
// VPERMPS zmm zmm zmm
|
|
//
|
|
// Construct and append a VPERMPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPERMPS(ops ...operand.Op) { ctx.VPERMPS(ops...) }
|
|
|
|
// VPERMPS_BCST: Permute Single-Precision Floating-Point Elements (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMPS.BCST m32 ymm k ymm
|
|
// VPERMPS.BCST m32 ymm ymm
|
|
// VPERMPS.BCST m32 zmm k zmm
|
|
// VPERMPS.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VPERMPS.BCST instruction to the active function.
|
|
func (c *Context) VPERMPS_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPERMPS_BCST(ops...))
|
|
}
|
|
|
|
// VPERMPS_BCST: Permute Single-Precision Floating-Point Elements (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMPS.BCST m32 ymm k ymm
|
|
// VPERMPS.BCST m32 ymm ymm
|
|
// VPERMPS.BCST m32 zmm k zmm
|
|
// VPERMPS.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VPERMPS.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPERMPS_BCST(ops ...operand.Op) { ctx.VPERMPS_BCST(ops...) }
|
|
|
|
// VPERMPS_BCST_Z: Permute Single-Precision Floating-Point Elements (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMPS.BCST.Z m32 ymm k ymm
|
|
// VPERMPS.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VPERMPS.BCST.Z instruction to the active function.
|
|
func (c *Context) VPERMPS_BCST_Z(m, yz, k, yz1 operand.Op) {
|
|
c.addinstruction(x86.VPERMPS_BCST_Z(m, yz, k, yz1))
|
|
}
|
|
|
|
// VPERMPS_BCST_Z: Permute Single-Precision Floating-Point Elements (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMPS.BCST.Z m32 ymm k ymm
|
|
// VPERMPS.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VPERMPS.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPERMPS_BCST_Z(m, yz, k, yz1 operand.Op) { ctx.VPERMPS_BCST_Z(m, yz, k, yz1) }
|
|
|
|
// VPERMPS_Z: Permute Single-Precision Floating-Point Elements (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMPS.Z m256 ymm k ymm
|
|
// VPERMPS.Z ymm ymm k ymm
|
|
// VPERMPS.Z m512 zmm k zmm
|
|
// VPERMPS.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPERMPS.Z instruction to the active function.
|
|
func (c *Context) VPERMPS_Z(myz, yz, k, yz1 operand.Op) {
|
|
c.addinstruction(x86.VPERMPS_Z(myz, yz, k, yz1))
|
|
}
|
|
|
|
// VPERMPS_Z: Permute Single-Precision Floating-Point Elements (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMPS.Z m256 ymm k ymm
|
|
// VPERMPS.Z ymm ymm k ymm
|
|
// VPERMPS.Z m512 zmm k zmm
|
|
// VPERMPS.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPERMPS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPERMPS_Z(myz, yz, k, yz1 operand.Op) { ctx.VPERMPS_Z(myz, yz, k, yz1) }
|
|
|
|
// VPERMQ: Permute Quadword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMQ imm8 m256 ymm
|
|
// VPERMQ imm8 ymm ymm
|
|
// VPERMQ imm8 m256 k ymm
|
|
// VPERMQ imm8 ymm k ymm
|
|
// VPERMQ m256 ymm k ymm
|
|
// VPERMQ m256 ymm ymm
|
|
// VPERMQ ymm ymm k ymm
|
|
// VPERMQ ymm ymm ymm
|
|
// VPERMQ imm8 m512 k zmm
|
|
// VPERMQ imm8 m512 zmm
|
|
// VPERMQ imm8 zmm k zmm
|
|
// VPERMQ imm8 zmm zmm
|
|
// VPERMQ m512 zmm k zmm
|
|
// VPERMQ m512 zmm zmm
|
|
// VPERMQ zmm zmm k zmm
|
|
// VPERMQ zmm zmm zmm
|
|
//
|
|
// Construct and append a VPERMQ instruction to the active function.
|
|
func (c *Context) VPERMQ(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPERMQ(ops...))
|
|
}
|
|
|
|
// VPERMQ: Permute Quadword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMQ imm8 m256 ymm
|
|
// VPERMQ imm8 ymm ymm
|
|
// VPERMQ imm8 m256 k ymm
|
|
// VPERMQ imm8 ymm k ymm
|
|
// VPERMQ m256 ymm k ymm
|
|
// VPERMQ m256 ymm ymm
|
|
// VPERMQ ymm ymm k ymm
|
|
// VPERMQ ymm ymm ymm
|
|
// VPERMQ imm8 m512 k zmm
|
|
// VPERMQ imm8 m512 zmm
|
|
// VPERMQ imm8 zmm k zmm
|
|
// VPERMQ imm8 zmm zmm
|
|
// VPERMQ m512 zmm k zmm
|
|
// VPERMQ m512 zmm zmm
|
|
// VPERMQ zmm zmm k zmm
|
|
// VPERMQ zmm zmm zmm
|
|
//
|
|
// Construct and append a VPERMQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPERMQ(ops ...operand.Op) { ctx.VPERMQ(ops...) }
|
|
|
|
// VPERMQ_BCST: Permute Quadword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMQ.BCST imm8 m64 k ymm
|
|
// VPERMQ.BCST imm8 m64 ymm
|
|
// VPERMQ.BCST m64 ymm k ymm
|
|
// VPERMQ.BCST m64 ymm ymm
|
|
// VPERMQ.BCST imm8 m64 k zmm
|
|
// VPERMQ.BCST imm8 m64 zmm
|
|
// VPERMQ.BCST m64 zmm k zmm
|
|
// VPERMQ.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VPERMQ.BCST instruction to the active function.
|
|
func (c *Context) VPERMQ_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPERMQ_BCST(ops...))
|
|
}
|
|
|
|
// VPERMQ_BCST: Permute Quadword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMQ.BCST imm8 m64 k ymm
|
|
// VPERMQ.BCST imm8 m64 ymm
|
|
// VPERMQ.BCST m64 ymm k ymm
|
|
// VPERMQ.BCST m64 ymm ymm
|
|
// VPERMQ.BCST imm8 m64 k zmm
|
|
// VPERMQ.BCST imm8 m64 zmm
|
|
// VPERMQ.BCST m64 zmm k zmm
|
|
// VPERMQ.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VPERMQ.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPERMQ_BCST(ops ...operand.Op) { ctx.VPERMQ_BCST(ops...) }
|
|
|
|
// VPERMQ_BCST_Z: Permute Quadword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMQ.BCST.Z imm8 m64 k ymm
|
|
// VPERMQ.BCST.Z m64 ymm k ymm
|
|
// VPERMQ.BCST.Z imm8 m64 k zmm
|
|
// VPERMQ.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VPERMQ.BCST.Z instruction to the active function.
|
|
func (c *Context) VPERMQ_BCST_Z(im, myz, k, yz operand.Op) {
|
|
c.addinstruction(x86.VPERMQ_BCST_Z(im, myz, k, yz))
|
|
}
|
|
|
|
// VPERMQ_BCST_Z: Permute Quadword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMQ.BCST.Z imm8 m64 k ymm
|
|
// VPERMQ.BCST.Z m64 ymm k ymm
|
|
// VPERMQ.BCST.Z imm8 m64 k zmm
|
|
// VPERMQ.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VPERMQ.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPERMQ_BCST_Z(im, myz, k, yz operand.Op) { ctx.VPERMQ_BCST_Z(im, myz, k, yz) }
|
|
|
|
// VPERMQ_Z: Permute Quadword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMQ.Z imm8 m256 k ymm
|
|
// VPERMQ.Z imm8 ymm k ymm
|
|
// VPERMQ.Z m256 ymm k ymm
|
|
// VPERMQ.Z ymm ymm k ymm
|
|
// VPERMQ.Z imm8 m512 k zmm
|
|
// VPERMQ.Z imm8 zmm k zmm
|
|
// VPERMQ.Z m512 zmm k zmm
|
|
// VPERMQ.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPERMQ.Z instruction to the active function.
|
|
func (c *Context) VPERMQ_Z(imyz, myz, k, yz operand.Op) {
|
|
c.addinstruction(x86.VPERMQ_Z(imyz, myz, k, yz))
|
|
}
|
|
|
|
// VPERMQ_Z: Permute Quadword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMQ.Z imm8 m256 k ymm
|
|
// VPERMQ.Z imm8 ymm k ymm
|
|
// VPERMQ.Z m256 ymm k ymm
|
|
// VPERMQ.Z ymm ymm k ymm
|
|
// VPERMQ.Z imm8 m512 k zmm
|
|
// VPERMQ.Z imm8 zmm k zmm
|
|
// VPERMQ.Z m512 zmm k zmm
|
|
// VPERMQ.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPERMQ.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPERMQ_Z(imyz, myz, k, yz operand.Op) { ctx.VPERMQ_Z(imyz, myz, k, yz) }
|
|
|
|
// VPERMT2B: Full Permute of Bytes From Two Tables Overwriting a Table.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMT2B m128 xmm k xmm
|
|
// VPERMT2B m128 xmm xmm
|
|
// VPERMT2B m256 ymm k ymm
|
|
// VPERMT2B m256 ymm ymm
|
|
// VPERMT2B xmm xmm k xmm
|
|
// VPERMT2B xmm xmm xmm
|
|
// VPERMT2B ymm ymm k ymm
|
|
// VPERMT2B ymm ymm ymm
|
|
// VPERMT2B m512 zmm k zmm
|
|
// VPERMT2B m512 zmm zmm
|
|
// VPERMT2B zmm zmm k zmm
|
|
// VPERMT2B zmm zmm zmm
|
|
//
|
|
// Construct and append a VPERMT2B instruction to the active function.
|
|
func (c *Context) VPERMT2B(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPERMT2B(ops...))
|
|
}
|
|
|
|
// VPERMT2B: Full Permute of Bytes From Two Tables Overwriting a Table.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMT2B m128 xmm k xmm
|
|
// VPERMT2B m128 xmm xmm
|
|
// VPERMT2B m256 ymm k ymm
|
|
// VPERMT2B m256 ymm ymm
|
|
// VPERMT2B xmm xmm k xmm
|
|
// VPERMT2B xmm xmm xmm
|
|
// VPERMT2B ymm ymm k ymm
|
|
// VPERMT2B ymm ymm ymm
|
|
// VPERMT2B m512 zmm k zmm
|
|
// VPERMT2B m512 zmm zmm
|
|
// VPERMT2B zmm zmm k zmm
|
|
// VPERMT2B zmm zmm zmm
|
|
//
|
|
// Construct and append a VPERMT2B instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPERMT2B(ops ...operand.Op) { ctx.VPERMT2B(ops...) }
|
|
|
|
// VPERMT2B_Z: Full Permute of Bytes From Two Tables Overwriting a Table (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMT2B.Z m128 xmm k xmm
|
|
// VPERMT2B.Z m256 ymm k ymm
|
|
// VPERMT2B.Z xmm xmm k xmm
|
|
// VPERMT2B.Z ymm ymm k ymm
|
|
// VPERMT2B.Z m512 zmm k zmm
|
|
// VPERMT2B.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPERMT2B.Z instruction to the active function.
|
|
func (c *Context) VPERMT2B_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPERMT2B_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPERMT2B_Z: Full Permute of Bytes From Two Tables Overwriting a Table (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMT2B.Z m128 xmm k xmm
|
|
// VPERMT2B.Z m256 ymm k ymm
|
|
// VPERMT2B.Z xmm xmm k xmm
|
|
// VPERMT2B.Z ymm ymm k ymm
|
|
// VPERMT2B.Z m512 zmm k zmm
|
|
// VPERMT2B.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPERMT2B.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPERMT2B_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPERMT2B_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPERMT2D: Full Permute of Doublewords From Two Tables Overwriting a Table.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMT2D m128 xmm k xmm
|
|
// VPERMT2D m128 xmm xmm
|
|
// VPERMT2D m256 ymm k ymm
|
|
// VPERMT2D m256 ymm ymm
|
|
// VPERMT2D xmm xmm k xmm
|
|
// VPERMT2D xmm xmm xmm
|
|
// VPERMT2D ymm ymm k ymm
|
|
// VPERMT2D ymm ymm ymm
|
|
// VPERMT2D m512 zmm k zmm
|
|
// VPERMT2D m512 zmm zmm
|
|
// VPERMT2D zmm zmm k zmm
|
|
// VPERMT2D zmm zmm zmm
|
|
//
|
|
// Construct and append a VPERMT2D instruction to the active function.
|
|
func (c *Context) VPERMT2D(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPERMT2D(ops...))
|
|
}
|
|
|
|
// VPERMT2D: Full Permute of Doublewords From Two Tables Overwriting a Table.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMT2D m128 xmm k xmm
|
|
// VPERMT2D m128 xmm xmm
|
|
// VPERMT2D m256 ymm k ymm
|
|
// VPERMT2D m256 ymm ymm
|
|
// VPERMT2D xmm xmm k xmm
|
|
// VPERMT2D xmm xmm xmm
|
|
// VPERMT2D ymm ymm k ymm
|
|
// VPERMT2D ymm ymm ymm
|
|
// VPERMT2D m512 zmm k zmm
|
|
// VPERMT2D m512 zmm zmm
|
|
// VPERMT2D zmm zmm k zmm
|
|
// VPERMT2D zmm zmm zmm
|
|
//
|
|
// Construct and append a VPERMT2D instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPERMT2D(ops ...operand.Op) { ctx.VPERMT2D(ops...) }
|
|
|
|
// VPERMT2D_BCST: Full Permute of Doublewords From Two Tables Overwriting a Table (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMT2D.BCST m32 xmm k xmm
|
|
// VPERMT2D.BCST m32 xmm xmm
|
|
// VPERMT2D.BCST m32 ymm k ymm
|
|
// VPERMT2D.BCST m32 ymm ymm
|
|
// VPERMT2D.BCST m32 zmm k zmm
|
|
// VPERMT2D.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VPERMT2D.BCST instruction to the active function.
|
|
func (c *Context) VPERMT2D_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPERMT2D_BCST(ops...))
|
|
}
|
|
|
|
// VPERMT2D_BCST: Full Permute of Doublewords From Two Tables Overwriting a Table (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMT2D.BCST m32 xmm k xmm
|
|
// VPERMT2D.BCST m32 xmm xmm
|
|
// VPERMT2D.BCST m32 ymm k ymm
|
|
// VPERMT2D.BCST m32 ymm ymm
|
|
// VPERMT2D.BCST m32 zmm k zmm
|
|
// VPERMT2D.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VPERMT2D.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPERMT2D_BCST(ops ...operand.Op) { ctx.VPERMT2D_BCST(ops...) }
|
|
|
|
// VPERMT2D_BCST_Z: Full Permute of Doublewords From Two Tables Overwriting a Table (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMT2D.BCST.Z m32 xmm k xmm
|
|
// VPERMT2D.BCST.Z m32 ymm k ymm
|
|
// VPERMT2D.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VPERMT2D.BCST.Z instruction to the active function.
|
|
func (c *Context) VPERMT2D_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPERMT2D_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPERMT2D_BCST_Z: Full Permute of Doublewords From Two Tables Overwriting a Table (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMT2D.BCST.Z m32 xmm k xmm
|
|
// VPERMT2D.BCST.Z m32 ymm k ymm
|
|
// VPERMT2D.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VPERMT2D.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPERMT2D_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VPERMT2D_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VPERMT2D_Z: Full Permute of Doublewords From Two Tables Overwriting a Table (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMT2D.Z m128 xmm k xmm
|
|
// VPERMT2D.Z m256 ymm k ymm
|
|
// VPERMT2D.Z xmm xmm k xmm
|
|
// VPERMT2D.Z ymm ymm k ymm
|
|
// VPERMT2D.Z m512 zmm k zmm
|
|
// VPERMT2D.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPERMT2D.Z instruction to the active function.
|
|
func (c *Context) VPERMT2D_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPERMT2D_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPERMT2D_Z: Full Permute of Doublewords From Two Tables Overwriting a Table (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMT2D.Z m128 xmm k xmm
|
|
// VPERMT2D.Z m256 ymm k ymm
|
|
// VPERMT2D.Z xmm xmm k xmm
|
|
// VPERMT2D.Z ymm ymm k ymm
|
|
// VPERMT2D.Z m512 zmm k zmm
|
|
// VPERMT2D.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPERMT2D.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPERMT2D_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPERMT2D_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPERMT2PD: Full Permute of Double-Precision Floating-Point Values From Two Tables Overwriting a Table.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMT2PD m128 xmm k xmm
|
|
// VPERMT2PD m128 xmm xmm
|
|
// VPERMT2PD m256 ymm k ymm
|
|
// VPERMT2PD m256 ymm ymm
|
|
// VPERMT2PD xmm xmm k xmm
|
|
// VPERMT2PD xmm xmm xmm
|
|
// VPERMT2PD ymm ymm k ymm
|
|
// VPERMT2PD ymm ymm ymm
|
|
// VPERMT2PD m512 zmm k zmm
|
|
// VPERMT2PD m512 zmm zmm
|
|
// VPERMT2PD zmm zmm k zmm
|
|
// VPERMT2PD zmm zmm zmm
|
|
//
|
|
// Construct and append a VPERMT2PD instruction to the active function.
|
|
func (c *Context) VPERMT2PD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPERMT2PD(ops...))
|
|
}
|
|
|
|
// VPERMT2PD: Full Permute of Double-Precision Floating-Point Values From Two Tables Overwriting a Table.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMT2PD m128 xmm k xmm
|
|
// VPERMT2PD m128 xmm xmm
|
|
// VPERMT2PD m256 ymm k ymm
|
|
// VPERMT2PD m256 ymm ymm
|
|
// VPERMT2PD xmm xmm k xmm
|
|
// VPERMT2PD xmm xmm xmm
|
|
// VPERMT2PD ymm ymm k ymm
|
|
// VPERMT2PD ymm ymm ymm
|
|
// VPERMT2PD m512 zmm k zmm
|
|
// VPERMT2PD m512 zmm zmm
|
|
// VPERMT2PD zmm zmm k zmm
|
|
// VPERMT2PD zmm zmm zmm
|
|
//
|
|
// Construct and append a VPERMT2PD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPERMT2PD(ops ...operand.Op) { ctx.VPERMT2PD(ops...) }
|
|
|
|
// VPERMT2PD_BCST: Full Permute of Double-Precision Floating-Point Values From Two Tables Overwriting a Table (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMT2PD.BCST m64 xmm k xmm
|
|
// VPERMT2PD.BCST m64 xmm xmm
|
|
// VPERMT2PD.BCST m64 ymm k ymm
|
|
// VPERMT2PD.BCST m64 ymm ymm
|
|
// VPERMT2PD.BCST m64 zmm k zmm
|
|
// VPERMT2PD.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VPERMT2PD.BCST instruction to the active function.
|
|
func (c *Context) VPERMT2PD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPERMT2PD_BCST(ops...))
|
|
}
|
|
|
|
// VPERMT2PD_BCST: Full Permute of Double-Precision Floating-Point Values From Two Tables Overwriting a Table (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMT2PD.BCST m64 xmm k xmm
|
|
// VPERMT2PD.BCST m64 xmm xmm
|
|
// VPERMT2PD.BCST m64 ymm k ymm
|
|
// VPERMT2PD.BCST m64 ymm ymm
|
|
// VPERMT2PD.BCST m64 zmm k zmm
|
|
// VPERMT2PD.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VPERMT2PD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPERMT2PD_BCST(ops ...operand.Op) { ctx.VPERMT2PD_BCST(ops...) }
|
|
|
|
// VPERMT2PD_BCST_Z: Full Permute of Double-Precision Floating-Point Values From Two Tables Overwriting a Table (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMT2PD.BCST.Z m64 xmm k xmm
|
|
// VPERMT2PD.BCST.Z m64 ymm k ymm
|
|
// VPERMT2PD.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VPERMT2PD.BCST.Z instruction to the active function.
|
|
func (c *Context) VPERMT2PD_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPERMT2PD_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPERMT2PD_BCST_Z: Full Permute of Double-Precision Floating-Point Values From Two Tables Overwriting a Table (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMT2PD.BCST.Z m64 xmm k xmm
|
|
// VPERMT2PD.BCST.Z m64 ymm k ymm
|
|
// VPERMT2PD.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VPERMT2PD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPERMT2PD_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VPERMT2PD_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VPERMT2PD_Z: Full Permute of Double-Precision Floating-Point Values From Two Tables Overwriting a Table (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMT2PD.Z m128 xmm k xmm
|
|
// VPERMT2PD.Z m256 ymm k ymm
|
|
// VPERMT2PD.Z xmm xmm k xmm
|
|
// VPERMT2PD.Z ymm ymm k ymm
|
|
// VPERMT2PD.Z m512 zmm k zmm
|
|
// VPERMT2PD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPERMT2PD.Z instruction to the active function.
|
|
func (c *Context) VPERMT2PD_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPERMT2PD_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPERMT2PD_Z: Full Permute of Double-Precision Floating-Point Values From Two Tables Overwriting a Table (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMT2PD.Z m128 xmm k xmm
|
|
// VPERMT2PD.Z m256 ymm k ymm
|
|
// VPERMT2PD.Z xmm xmm k xmm
|
|
// VPERMT2PD.Z ymm ymm k ymm
|
|
// VPERMT2PD.Z m512 zmm k zmm
|
|
// VPERMT2PD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPERMT2PD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPERMT2PD_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPERMT2PD_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPERMT2PS: Full Permute of Single-Precision Floating-Point Values From Two Tables Overwriting a Table.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMT2PS m128 xmm k xmm
|
|
// VPERMT2PS m128 xmm xmm
|
|
// VPERMT2PS m256 ymm k ymm
|
|
// VPERMT2PS m256 ymm ymm
|
|
// VPERMT2PS xmm xmm k xmm
|
|
// VPERMT2PS xmm xmm xmm
|
|
// VPERMT2PS ymm ymm k ymm
|
|
// VPERMT2PS ymm ymm ymm
|
|
// VPERMT2PS m512 zmm k zmm
|
|
// VPERMT2PS m512 zmm zmm
|
|
// VPERMT2PS zmm zmm k zmm
|
|
// VPERMT2PS zmm zmm zmm
|
|
//
|
|
// Construct and append a VPERMT2PS instruction to the active function.
|
|
func (c *Context) VPERMT2PS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPERMT2PS(ops...))
|
|
}
|
|
|
|
// VPERMT2PS: Full Permute of Single-Precision Floating-Point Values From Two Tables Overwriting a Table.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMT2PS m128 xmm k xmm
|
|
// VPERMT2PS m128 xmm xmm
|
|
// VPERMT2PS m256 ymm k ymm
|
|
// VPERMT2PS m256 ymm ymm
|
|
// VPERMT2PS xmm xmm k xmm
|
|
// VPERMT2PS xmm xmm xmm
|
|
// VPERMT2PS ymm ymm k ymm
|
|
// VPERMT2PS ymm ymm ymm
|
|
// VPERMT2PS m512 zmm k zmm
|
|
// VPERMT2PS m512 zmm zmm
|
|
// VPERMT2PS zmm zmm k zmm
|
|
// VPERMT2PS zmm zmm zmm
|
|
//
|
|
// Construct and append a VPERMT2PS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPERMT2PS(ops ...operand.Op) { ctx.VPERMT2PS(ops...) }
|
|
|
|
// VPERMT2PS_BCST: Full Permute of Single-Precision Floating-Point Values From Two Tables Overwriting a Table (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMT2PS.BCST m32 xmm k xmm
|
|
// VPERMT2PS.BCST m32 xmm xmm
|
|
// VPERMT2PS.BCST m32 ymm k ymm
|
|
// VPERMT2PS.BCST m32 ymm ymm
|
|
// VPERMT2PS.BCST m32 zmm k zmm
|
|
// VPERMT2PS.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VPERMT2PS.BCST instruction to the active function.
|
|
func (c *Context) VPERMT2PS_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPERMT2PS_BCST(ops...))
|
|
}
|
|
|
|
// VPERMT2PS_BCST: Full Permute of Single-Precision Floating-Point Values From Two Tables Overwriting a Table (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMT2PS.BCST m32 xmm k xmm
|
|
// VPERMT2PS.BCST m32 xmm xmm
|
|
// VPERMT2PS.BCST m32 ymm k ymm
|
|
// VPERMT2PS.BCST m32 ymm ymm
|
|
// VPERMT2PS.BCST m32 zmm k zmm
|
|
// VPERMT2PS.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VPERMT2PS.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPERMT2PS_BCST(ops ...operand.Op) { ctx.VPERMT2PS_BCST(ops...) }
|
|
|
|
// VPERMT2PS_BCST_Z: Full Permute of Single-Precision Floating-Point Values From Two Tables Overwriting a Table (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMT2PS.BCST.Z m32 xmm k xmm
|
|
// VPERMT2PS.BCST.Z m32 ymm k ymm
|
|
// VPERMT2PS.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VPERMT2PS.BCST.Z instruction to the active function.
|
|
func (c *Context) VPERMT2PS_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPERMT2PS_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPERMT2PS_BCST_Z: Full Permute of Single-Precision Floating-Point Values From Two Tables Overwriting a Table (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMT2PS.BCST.Z m32 xmm k xmm
|
|
// VPERMT2PS.BCST.Z m32 ymm k ymm
|
|
// VPERMT2PS.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VPERMT2PS.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPERMT2PS_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VPERMT2PS_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VPERMT2PS_Z: Full Permute of Single-Precision Floating-Point Values From Two Tables Overwriting a Table (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMT2PS.Z m128 xmm k xmm
|
|
// VPERMT2PS.Z m256 ymm k ymm
|
|
// VPERMT2PS.Z xmm xmm k xmm
|
|
// VPERMT2PS.Z ymm ymm k ymm
|
|
// VPERMT2PS.Z m512 zmm k zmm
|
|
// VPERMT2PS.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPERMT2PS.Z instruction to the active function.
|
|
func (c *Context) VPERMT2PS_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPERMT2PS_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPERMT2PS_Z: Full Permute of Single-Precision Floating-Point Values From Two Tables Overwriting a Table (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMT2PS.Z m128 xmm k xmm
|
|
// VPERMT2PS.Z m256 ymm k ymm
|
|
// VPERMT2PS.Z xmm xmm k xmm
|
|
// VPERMT2PS.Z ymm ymm k ymm
|
|
// VPERMT2PS.Z m512 zmm k zmm
|
|
// VPERMT2PS.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPERMT2PS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPERMT2PS_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPERMT2PS_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPERMT2Q: Full Permute of Quadwords From Two Tables Overwriting a Table.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMT2Q m128 xmm k xmm
|
|
// VPERMT2Q m128 xmm xmm
|
|
// VPERMT2Q m256 ymm k ymm
|
|
// VPERMT2Q m256 ymm ymm
|
|
// VPERMT2Q xmm xmm k xmm
|
|
// VPERMT2Q xmm xmm xmm
|
|
// VPERMT2Q ymm ymm k ymm
|
|
// VPERMT2Q ymm ymm ymm
|
|
// VPERMT2Q m512 zmm k zmm
|
|
// VPERMT2Q m512 zmm zmm
|
|
// VPERMT2Q zmm zmm k zmm
|
|
// VPERMT2Q zmm zmm zmm
|
|
//
|
|
// Construct and append a VPERMT2Q instruction to the active function.
|
|
func (c *Context) VPERMT2Q(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPERMT2Q(ops...))
|
|
}
|
|
|
|
// VPERMT2Q: Full Permute of Quadwords From Two Tables Overwriting a Table.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMT2Q m128 xmm k xmm
|
|
// VPERMT2Q m128 xmm xmm
|
|
// VPERMT2Q m256 ymm k ymm
|
|
// VPERMT2Q m256 ymm ymm
|
|
// VPERMT2Q xmm xmm k xmm
|
|
// VPERMT2Q xmm xmm xmm
|
|
// VPERMT2Q ymm ymm k ymm
|
|
// VPERMT2Q ymm ymm ymm
|
|
// VPERMT2Q m512 zmm k zmm
|
|
// VPERMT2Q m512 zmm zmm
|
|
// VPERMT2Q zmm zmm k zmm
|
|
// VPERMT2Q zmm zmm zmm
|
|
//
|
|
// Construct and append a VPERMT2Q instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPERMT2Q(ops ...operand.Op) { ctx.VPERMT2Q(ops...) }
|
|
|
|
// VPERMT2Q_BCST: Full Permute of Quadwords From Two Tables Overwriting a Table (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMT2Q.BCST m64 xmm k xmm
|
|
// VPERMT2Q.BCST m64 xmm xmm
|
|
// VPERMT2Q.BCST m64 ymm k ymm
|
|
// VPERMT2Q.BCST m64 ymm ymm
|
|
// VPERMT2Q.BCST m64 zmm k zmm
|
|
// VPERMT2Q.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VPERMT2Q.BCST instruction to the active function.
|
|
func (c *Context) VPERMT2Q_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPERMT2Q_BCST(ops...))
|
|
}
|
|
|
|
// VPERMT2Q_BCST: Full Permute of Quadwords From Two Tables Overwriting a Table (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMT2Q.BCST m64 xmm k xmm
|
|
// VPERMT2Q.BCST m64 xmm xmm
|
|
// VPERMT2Q.BCST m64 ymm k ymm
|
|
// VPERMT2Q.BCST m64 ymm ymm
|
|
// VPERMT2Q.BCST m64 zmm k zmm
|
|
// VPERMT2Q.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VPERMT2Q.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPERMT2Q_BCST(ops ...operand.Op) { ctx.VPERMT2Q_BCST(ops...) }
|
|
|
|
// VPERMT2Q_BCST_Z: Full Permute of Quadwords From Two Tables Overwriting a Table (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMT2Q.BCST.Z m64 xmm k xmm
|
|
// VPERMT2Q.BCST.Z m64 ymm k ymm
|
|
// VPERMT2Q.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VPERMT2Q.BCST.Z instruction to the active function.
|
|
func (c *Context) VPERMT2Q_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPERMT2Q_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPERMT2Q_BCST_Z: Full Permute of Quadwords From Two Tables Overwriting a Table (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMT2Q.BCST.Z m64 xmm k xmm
|
|
// VPERMT2Q.BCST.Z m64 ymm k ymm
|
|
// VPERMT2Q.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VPERMT2Q.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPERMT2Q_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VPERMT2Q_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VPERMT2Q_Z: Full Permute of Quadwords From Two Tables Overwriting a Table (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMT2Q.Z m128 xmm k xmm
|
|
// VPERMT2Q.Z m256 ymm k ymm
|
|
// VPERMT2Q.Z xmm xmm k xmm
|
|
// VPERMT2Q.Z ymm ymm k ymm
|
|
// VPERMT2Q.Z m512 zmm k zmm
|
|
// VPERMT2Q.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPERMT2Q.Z instruction to the active function.
|
|
func (c *Context) VPERMT2Q_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPERMT2Q_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPERMT2Q_Z: Full Permute of Quadwords From Two Tables Overwriting a Table (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMT2Q.Z m128 xmm k xmm
|
|
// VPERMT2Q.Z m256 ymm k ymm
|
|
// VPERMT2Q.Z xmm xmm k xmm
|
|
// VPERMT2Q.Z ymm ymm k ymm
|
|
// VPERMT2Q.Z m512 zmm k zmm
|
|
// VPERMT2Q.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPERMT2Q.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPERMT2Q_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPERMT2Q_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPERMT2W: Full Permute of Words From Two Tables Overwriting a Table.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMT2W m128 xmm k xmm
|
|
// VPERMT2W m128 xmm xmm
|
|
// VPERMT2W m256 ymm k ymm
|
|
// VPERMT2W m256 ymm ymm
|
|
// VPERMT2W xmm xmm k xmm
|
|
// VPERMT2W xmm xmm xmm
|
|
// VPERMT2W ymm ymm k ymm
|
|
// VPERMT2W ymm ymm ymm
|
|
// VPERMT2W m512 zmm k zmm
|
|
// VPERMT2W m512 zmm zmm
|
|
// VPERMT2W zmm zmm k zmm
|
|
// VPERMT2W zmm zmm zmm
|
|
//
|
|
// Construct and append a VPERMT2W instruction to the active function.
|
|
func (c *Context) VPERMT2W(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPERMT2W(ops...))
|
|
}
|
|
|
|
// VPERMT2W: Full Permute of Words From Two Tables Overwriting a Table.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMT2W m128 xmm k xmm
|
|
// VPERMT2W m128 xmm xmm
|
|
// VPERMT2W m256 ymm k ymm
|
|
// VPERMT2W m256 ymm ymm
|
|
// VPERMT2W xmm xmm k xmm
|
|
// VPERMT2W xmm xmm xmm
|
|
// VPERMT2W ymm ymm k ymm
|
|
// VPERMT2W ymm ymm ymm
|
|
// VPERMT2W m512 zmm k zmm
|
|
// VPERMT2W m512 zmm zmm
|
|
// VPERMT2W zmm zmm k zmm
|
|
// VPERMT2W zmm zmm zmm
|
|
//
|
|
// Construct and append a VPERMT2W instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPERMT2W(ops ...operand.Op) { ctx.VPERMT2W(ops...) }
|
|
|
|
// VPERMT2W_Z: Full Permute of Words From Two Tables Overwriting a Table (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMT2W.Z m128 xmm k xmm
|
|
// VPERMT2W.Z m256 ymm k ymm
|
|
// VPERMT2W.Z xmm xmm k xmm
|
|
// VPERMT2W.Z ymm ymm k ymm
|
|
// VPERMT2W.Z m512 zmm k zmm
|
|
// VPERMT2W.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPERMT2W.Z instruction to the active function.
|
|
func (c *Context) VPERMT2W_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPERMT2W_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPERMT2W_Z: Full Permute of Words From Two Tables Overwriting a Table (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMT2W.Z m128 xmm k xmm
|
|
// VPERMT2W.Z m256 ymm k ymm
|
|
// VPERMT2W.Z xmm xmm k xmm
|
|
// VPERMT2W.Z ymm ymm k ymm
|
|
// VPERMT2W.Z m512 zmm k zmm
|
|
// VPERMT2W.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPERMT2W.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPERMT2W_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPERMT2W_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPERMW: Permute Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMW m128 xmm k xmm
|
|
// VPERMW m128 xmm xmm
|
|
// VPERMW m256 ymm k ymm
|
|
// VPERMW m256 ymm ymm
|
|
// VPERMW xmm xmm k xmm
|
|
// VPERMW xmm xmm xmm
|
|
// VPERMW ymm ymm k ymm
|
|
// VPERMW ymm ymm ymm
|
|
// VPERMW m512 zmm k zmm
|
|
// VPERMW m512 zmm zmm
|
|
// VPERMW zmm zmm k zmm
|
|
// VPERMW zmm zmm zmm
|
|
//
|
|
// Construct and append a VPERMW instruction to the active function.
|
|
func (c *Context) VPERMW(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPERMW(ops...))
|
|
}
|
|
|
|
// VPERMW: Permute Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMW m128 xmm k xmm
|
|
// VPERMW m128 xmm xmm
|
|
// VPERMW m256 ymm k ymm
|
|
// VPERMW m256 ymm ymm
|
|
// VPERMW xmm xmm k xmm
|
|
// VPERMW xmm xmm xmm
|
|
// VPERMW ymm ymm k ymm
|
|
// VPERMW ymm ymm ymm
|
|
// VPERMW m512 zmm k zmm
|
|
// VPERMW m512 zmm zmm
|
|
// VPERMW zmm zmm k zmm
|
|
// VPERMW zmm zmm zmm
|
|
//
|
|
// Construct and append a VPERMW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPERMW(ops ...operand.Op) { ctx.VPERMW(ops...) }
|
|
|
|
// VPERMW_Z: Permute Word Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMW.Z m128 xmm k xmm
|
|
// VPERMW.Z m256 ymm k ymm
|
|
// VPERMW.Z xmm xmm k xmm
|
|
// VPERMW.Z ymm ymm k ymm
|
|
// VPERMW.Z m512 zmm k zmm
|
|
// VPERMW.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPERMW.Z instruction to the active function.
|
|
func (c *Context) VPERMW_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPERMW_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPERMW_Z: Permute Word Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPERMW.Z m128 xmm k xmm
|
|
// VPERMW.Z m256 ymm k ymm
|
|
// VPERMW.Z xmm xmm k xmm
|
|
// VPERMW.Z ymm ymm k ymm
|
|
// VPERMW.Z m512 zmm k zmm
|
|
// VPERMW.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPERMW.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPERMW_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPERMW_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPEXPANDD: Load Sparse Packed Doubleword Integer Values from Dense Memory/Register.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPEXPANDD m128 k xmm
|
|
// VPEXPANDD m128 xmm
|
|
// VPEXPANDD m256 k ymm
|
|
// VPEXPANDD m256 ymm
|
|
// VPEXPANDD xmm k xmm
|
|
// VPEXPANDD xmm xmm
|
|
// VPEXPANDD ymm k ymm
|
|
// VPEXPANDD ymm ymm
|
|
// VPEXPANDD m512 k zmm
|
|
// VPEXPANDD m512 zmm
|
|
// VPEXPANDD zmm k zmm
|
|
// VPEXPANDD zmm zmm
|
|
//
|
|
// Construct and append a VPEXPANDD instruction to the active function.
|
|
func (c *Context) VPEXPANDD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPEXPANDD(ops...))
|
|
}
|
|
|
|
// VPEXPANDD: Load Sparse Packed Doubleword Integer Values from Dense Memory/Register.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPEXPANDD m128 k xmm
|
|
// VPEXPANDD m128 xmm
|
|
// VPEXPANDD m256 k ymm
|
|
// VPEXPANDD m256 ymm
|
|
// VPEXPANDD xmm k xmm
|
|
// VPEXPANDD xmm xmm
|
|
// VPEXPANDD ymm k ymm
|
|
// VPEXPANDD ymm ymm
|
|
// VPEXPANDD m512 k zmm
|
|
// VPEXPANDD m512 zmm
|
|
// VPEXPANDD zmm k zmm
|
|
// VPEXPANDD zmm zmm
|
|
//
|
|
// Construct and append a VPEXPANDD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPEXPANDD(ops ...operand.Op) { ctx.VPEXPANDD(ops...) }
|
|
|
|
// VPEXPANDD_Z: Load Sparse Packed Doubleword Integer Values from Dense Memory/Register (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPEXPANDD.Z m128 k xmm
|
|
// VPEXPANDD.Z m256 k ymm
|
|
// VPEXPANDD.Z xmm k xmm
|
|
// VPEXPANDD.Z ymm k ymm
|
|
// VPEXPANDD.Z m512 k zmm
|
|
// VPEXPANDD.Z zmm k zmm
|
|
//
|
|
// Construct and append a VPEXPANDD.Z instruction to the active function.
|
|
func (c *Context) VPEXPANDD_Z(mxyz, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VPEXPANDD_Z(mxyz, k, xyz))
|
|
}
|
|
|
|
// VPEXPANDD_Z: Load Sparse Packed Doubleword Integer Values from Dense Memory/Register (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPEXPANDD.Z m128 k xmm
|
|
// VPEXPANDD.Z m256 k ymm
|
|
// VPEXPANDD.Z xmm k xmm
|
|
// VPEXPANDD.Z ymm k ymm
|
|
// VPEXPANDD.Z m512 k zmm
|
|
// VPEXPANDD.Z zmm k zmm
|
|
//
|
|
// Construct and append a VPEXPANDD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPEXPANDD_Z(mxyz, k, xyz operand.Op) { ctx.VPEXPANDD_Z(mxyz, k, xyz) }
|
|
|
|
// VPEXPANDQ: Load Sparse Packed Quadword Integer Values from Dense Memory/Register.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPEXPANDQ m128 k xmm
|
|
// VPEXPANDQ m128 xmm
|
|
// VPEXPANDQ m256 k ymm
|
|
// VPEXPANDQ m256 ymm
|
|
// VPEXPANDQ xmm k xmm
|
|
// VPEXPANDQ xmm xmm
|
|
// VPEXPANDQ ymm k ymm
|
|
// VPEXPANDQ ymm ymm
|
|
// VPEXPANDQ m512 k zmm
|
|
// VPEXPANDQ m512 zmm
|
|
// VPEXPANDQ zmm k zmm
|
|
// VPEXPANDQ zmm zmm
|
|
//
|
|
// Construct and append a VPEXPANDQ instruction to the active function.
|
|
func (c *Context) VPEXPANDQ(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPEXPANDQ(ops...))
|
|
}
|
|
|
|
// VPEXPANDQ: Load Sparse Packed Quadword Integer Values from Dense Memory/Register.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPEXPANDQ m128 k xmm
|
|
// VPEXPANDQ m128 xmm
|
|
// VPEXPANDQ m256 k ymm
|
|
// VPEXPANDQ m256 ymm
|
|
// VPEXPANDQ xmm k xmm
|
|
// VPEXPANDQ xmm xmm
|
|
// VPEXPANDQ ymm k ymm
|
|
// VPEXPANDQ ymm ymm
|
|
// VPEXPANDQ m512 k zmm
|
|
// VPEXPANDQ m512 zmm
|
|
// VPEXPANDQ zmm k zmm
|
|
// VPEXPANDQ zmm zmm
|
|
//
|
|
// Construct and append a VPEXPANDQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPEXPANDQ(ops ...operand.Op) { ctx.VPEXPANDQ(ops...) }
|
|
|
|
// VPEXPANDQ_Z: Load Sparse Packed Quadword Integer Values from Dense Memory/Register (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPEXPANDQ.Z m128 k xmm
|
|
// VPEXPANDQ.Z m256 k ymm
|
|
// VPEXPANDQ.Z xmm k xmm
|
|
// VPEXPANDQ.Z ymm k ymm
|
|
// VPEXPANDQ.Z m512 k zmm
|
|
// VPEXPANDQ.Z zmm k zmm
|
|
//
|
|
// Construct and append a VPEXPANDQ.Z instruction to the active function.
|
|
func (c *Context) VPEXPANDQ_Z(mxyz, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VPEXPANDQ_Z(mxyz, k, xyz))
|
|
}
|
|
|
|
// VPEXPANDQ_Z: Load Sparse Packed Quadword Integer Values from Dense Memory/Register (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPEXPANDQ.Z m128 k xmm
|
|
// VPEXPANDQ.Z m256 k ymm
|
|
// VPEXPANDQ.Z xmm k xmm
|
|
// VPEXPANDQ.Z ymm k ymm
|
|
// VPEXPANDQ.Z m512 k zmm
|
|
// VPEXPANDQ.Z zmm k zmm
|
|
//
|
|
// Construct and append a VPEXPANDQ.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPEXPANDQ_Z(mxyz, k, xyz operand.Op) { ctx.VPEXPANDQ_Z(mxyz, k, xyz) }
|
|
|
|
// VPEXTRB: Extract Byte.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPEXTRB imm8 xmm m8
|
|
// VPEXTRB imm8 xmm r32
|
|
//
|
|
// Construct and append a VPEXTRB instruction to the active function.
|
|
func (c *Context) VPEXTRB(i, x, mr operand.Op) {
|
|
c.addinstruction(x86.VPEXTRB(i, x, mr))
|
|
}
|
|
|
|
// VPEXTRB: Extract Byte.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPEXTRB imm8 xmm m8
|
|
// VPEXTRB imm8 xmm r32
|
|
//
|
|
// Construct and append a VPEXTRB instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPEXTRB(i, x, mr operand.Op) { ctx.VPEXTRB(i, x, mr) }
|
|
|
|
// VPEXTRD: Extract Doubleword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPEXTRD imm8 xmm m32
|
|
// VPEXTRD imm8 xmm r32
|
|
//
|
|
// Construct and append a VPEXTRD instruction to the active function.
|
|
func (c *Context) VPEXTRD(i, x, mr operand.Op) {
|
|
c.addinstruction(x86.VPEXTRD(i, x, mr))
|
|
}
|
|
|
|
// VPEXTRD: Extract Doubleword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPEXTRD imm8 xmm m32
|
|
// VPEXTRD imm8 xmm r32
|
|
//
|
|
// Construct and append a VPEXTRD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPEXTRD(i, x, mr operand.Op) { ctx.VPEXTRD(i, x, mr) }
|
|
|
|
// VPEXTRQ: Extract Quadword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPEXTRQ imm8 xmm m64
|
|
// VPEXTRQ imm8 xmm r64
|
|
//
|
|
// Construct and append a VPEXTRQ instruction to the active function.
|
|
func (c *Context) VPEXTRQ(i, x, mr operand.Op) {
|
|
c.addinstruction(x86.VPEXTRQ(i, x, mr))
|
|
}
|
|
|
|
// VPEXTRQ: Extract Quadword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPEXTRQ imm8 xmm m64
|
|
// VPEXTRQ imm8 xmm r64
|
|
//
|
|
// Construct and append a VPEXTRQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPEXTRQ(i, x, mr operand.Op) { ctx.VPEXTRQ(i, x, mr) }
|
|
|
|
// VPEXTRW: Extract Word.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPEXTRW imm8 xmm m16
|
|
// VPEXTRW imm8 xmm r32
|
|
//
|
|
// Construct and append a VPEXTRW instruction to the active function.
|
|
func (c *Context) VPEXTRW(i, x, mr operand.Op) {
|
|
c.addinstruction(x86.VPEXTRW(i, x, mr))
|
|
}
|
|
|
|
// VPEXTRW: Extract Word.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPEXTRW imm8 xmm m16
|
|
// VPEXTRW imm8 xmm r32
|
|
//
|
|
// Construct and append a VPEXTRW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPEXTRW(i, x, mr operand.Op) { ctx.VPEXTRW(i, x, mr) }
|
|
|
|
// VPGATHERDD: Gather Packed Doubleword Values Using Signed Doubleword Indices.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPGATHERDD xmm vm32x xmm
|
|
// VPGATHERDD ymm vm32y ymm
|
|
// VPGATHERDD vm32x k xmm
|
|
// VPGATHERDD vm32y k ymm
|
|
// VPGATHERDD vm32z k zmm
|
|
//
|
|
// Construct and append a VPGATHERDD instruction to the active function.
|
|
func (c *Context) VPGATHERDD(vxy, kv, xyz operand.Op) {
|
|
c.addinstruction(x86.VPGATHERDD(vxy, kv, xyz))
|
|
}
|
|
|
|
// VPGATHERDD: Gather Packed Doubleword Values Using Signed Doubleword Indices.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPGATHERDD xmm vm32x xmm
|
|
// VPGATHERDD ymm vm32y ymm
|
|
// VPGATHERDD vm32x k xmm
|
|
// VPGATHERDD vm32y k ymm
|
|
// VPGATHERDD vm32z k zmm
|
|
//
|
|
// Construct and append a VPGATHERDD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPGATHERDD(vxy, kv, xyz operand.Op) { ctx.VPGATHERDD(vxy, kv, xyz) }
|
|
|
|
// VPGATHERDQ: Gather Packed Quadword Values Using Signed Doubleword Indices.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPGATHERDQ xmm vm32x xmm
|
|
// VPGATHERDQ ymm vm32x ymm
|
|
// VPGATHERDQ vm32x k xmm
|
|
// VPGATHERDQ vm32x k ymm
|
|
// VPGATHERDQ vm32y k zmm
|
|
//
|
|
// Construct and append a VPGATHERDQ instruction to the active function.
|
|
func (c *Context) VPGATHERDQ(vxy, kv, xyz operand.Op) {
|
|
c.addinstruction(x86.VPGATHERDQ(vxy, kv, xyz))
|
|
}
|
|
|
|
// VPGATHERDQ: Gather Packed Quadword Values Using Signed Doubleword Indices.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPGATHERDQ xmm vm32x xmm
|
|
// VPGATHERDQ ymm vm32x ymm
|
|
// VPGATHERDQ vm32x k xmm
|
|
// VPGATHERDQ vm32x k ymm
|
|
// VPGATHERDQ vm32y k zmm
|
|
//
|
|
// Construct and append a VPGATHERDQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPGATHERDQ(vxy, kv, xyz operand.Op) { ctx.VPGATHERDQ(vxy, kv, xyz) }
|
|
|
|
// VPGATHERQD: Gather Packed Doubleword Values Using Signed Quadword Indices.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPGATHERQD xmm vm64x xmm
|
|
// VPGATHERQD xmm vm64y xmm
|
|
// VPGATHERQD vm64x k xmm
|
|
// VPGATHERQD vm64y k xmm
|
|
// VPGATHERQD vm64z k ymm
|
|
//
|
|
// Construct and append a VPGATHERQD instruction to the active function.
|
|
func (c *Context) VPGATHERQD(vx, kv, xy operand.Op) {
|
|
c.addinstruction(x86.VPGATHERQD(vx, kv, xy))
|
|
}
|
|
|
|
// VPGATHERQD: Gather Packed Doubleword Values Using Signed Quadword Indices.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPGATHERQD xmm vm64x xmm
|
|
// VPGATHERQD xmm vm64y xmm
|
|
// VPGATHERQD vm64x k xmm
|
|
// VPGATHERQD vm64y k xmm
|
|
// VPGATHERQD vm64z k ymm
|
|
//
|
|
// Construct and append a VPGATHERQD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPGATHERQD(vx, kv, xy operand.Op) { ctx.VPGATHERQD(vx, kv, xy) }
|
|
|
|
// VPGATHERQQ: Gather Packed Quadword Values Using Signed Quadword Indices.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPGATHERQQ xmm vm64x xmm
|
|
// VPGATHERQQ ymm vm64y ymm
|
|
// VPGATHERQQ vm64x k xmm
|
|
// VPGATHERQQ vm64y k ymm
|
|
// VPGATHERQQ vm64z k zmm
|
|
//
|
|
// Construct and append a VPGATHERQQ instruction to the active function.
|
|
func (c *Context) VPGATHERQQ(vxy, kv, xyz operand.Op) {
|
|
c.addinstruction(x86.VPGATHERQQ(vxy, kv, xyz))
|
|
}
|
|
|
|
// VPGATHERQQ: Gather Packed Quadword Values Using Signed Quadword Indices.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPGATHERQQ xmm vm64x xmm
|
|
// VPGATHERQQ ymm vm64y ymm
|
|
// VPGATHERQQ vm64x k xmm
|
|
// VPGATHERQQ vm64y k ymm
|
|
// VPGATHERQQ vm64z k zmm
|
|
//
|
|
// Construct and append a VPGATHERQQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPGATHERQQ(vxy, kv, xyz operand.Op) { ctx.VPGATHERQQ(vxy, kv, xyz) }
|
|
|
|
// VPHADDD: Packed Horizontal Add Doubleword Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPHADDD m256 ymm ymm
|
|
// VPHADDD ymm ymm ymm
|
|
// VPHADDD m128 xmm xmm
|
|
// VPHADDD xmm xmm xmm
|
|
//
|
|
// Construct and append a VPHADDD instruction to the active function.
|
|
func (c *Context) VPHADDD(mxy, xy, xy1 operand.Op) {
|
|
c.addinstruction(x86.VPHADDD(mxy, xy, xy1))
|
|
}
|
|
|
|
// VPHADDD: Packed Horizontal Add Doubleword Integer.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPHADDD m256 ymm ymm
|
|
// VPHADDD ymm ymm ymm
|
|
// VPHADDD m128 xmm xmm
|
|
// VPHADDD xmm xmm xmm
|
|
//
|
|
// Construct and append a VPHADDD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPHADDD(mxy, xy, xy1 operand.Op) { ctx.VPHADDD(mxy, xy, xy1) }
|
|
|
|
// VPHADDSW: Packed Horizontal Add Signed Word Integers with Signed Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPHADDSW m256 ymm ymm
|
|
// VPHADDSW ymm ymm ymm
|
|
// VPHADDSW m128 xmm xmm
|
|
// VPHADDSW xmm xmm xmm
|
|
//
|
|
// Construct and append a VPHADDSW instruction to the active function.
|
|
func (c *Context) VPHADDSW(mxy, xy, xy1 operand.Op) {
|
|
c.addinstruction(x86.VPHADDSW(mxy, xy, xy1))
|
|
}
|
|
|
|
// VPHADDSW: Packed Horizontal Add Signed Word Integers with Signed Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPHADDSW m256 ymm ymm
|
|
// VPHADDSW ymm ymm ymm
|
|
// VPHADDSW m128 xmm xmm
|
|
// VPHADDSW xmm xmm xmm
|
|
//
|
|
// Construct and append a VPHADDSW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPHADDSW(mxy, xy, xy1 operand.Op) { ctx.VPHADDSW(mxy, xy, xy1) }
|
|
|
|
// VPHADDW: Packed Horizontal Add Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPHADDW m256 ymm ymm
|
|
// VPHADDW ymm ymm ymm
|
|
// VPHADDW m128 xmm xmm
|
|
// VPHADDW xmm xmm xmm
|
|
//
|
|
// Construct and append a VPHADDW instruction to the active function.
|
|
func (c *Context) VPHADDW(mxy, xy, xy1 operand.Op) {
|
|
c.addinstruction(x86.VPHADDW(mxy, xy, xy1))
|
|
}
|
|
|
|
// VPHADDW: Packed Horizontal Add Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPHADDW m256 ymm ymm
|
|
// VPHADDW ymm ymm ymm
|
|
// VPHADDW m128 xmm xmm
|
|
// VPHADDW xmm xmm xmm
|
|
//
|
|
// Construct and append a VPHADDW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPHADDW(mxy, xy, xy1 operand.Op) { ctx.VPHADDW(mxy, xy, xy1) }
|
|
|
|
// VPHMINPOSUW: Packed Horizontal Minimum of Unsigned Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPHMINPOSUW m128 xmm
|
|
// VPHMINPOSUW xmm xmm
|
|
//
|
|
// Construct and append a VPHMINPOSUW instruction to the active function.
|
|
func (c *Context) VPHMINPOSUW(mx, x operand.Op) {
|
|
c.addinstruction(x86.VPHMINPOSUW(mx, x))
|
|
}
|
|
|
|
// VPHMINPOSUW: Packed Horizontal Minimum of Unsigned Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPHMINPOSUW m128 xmm
|
|
// VPHMINPOSUW xmm xmm
|
|
//
|
|
// Construct and append a VPHMINPOSUW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPHMINPOSUW(mx, x operand.Op) { ctx.VPHMINPOSUW(mx, x) }
|
|
|
|
// VPHSUBD: Packed Horizontal Subtract Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPHSUBD m256 ymm ymm
|
|
// VPHSUBD ymm ymm ymm
|
|
// VPHSUBD m128 xmm xmm
|
|
// VPHSUBD xmm xmm xmm
|
|
//
|
|
// Construct and append a VPHSUBD instruction to the active function.
|
|
func (c *Context) VPHSUBD(mxy, xy, xy1 operand.Op) {
|
|
c.addinstruction(x86.VPHSUBD(mxy, xy, xy1))
|
|
}
|
|
|
|
// VPHSUBD: Packed Horizontal Subtract Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPHSUBD m256 ymm ymm
|
|
// VPHSUBD ymm ymm ymm
|
|
// VPHSUBD m128 xmm xmm
|
|
// VPHSUBD xmm xmm xmm
|
|
//
|
|
// Construct and append a VPHSUBD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPHSUBD(mxy, xy, xy1 operand.Op) { ctx.VPHSUBD(mxy, xy, xy1) }
|
|
|
|
// VPHSUBSW: Packed Horizontal Subtract Signed Word Integers with Signed Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPHSUBSW m256 ymm ymm
|
|
// VPHSUBSW ymm ymm ymm
|
|
// VPHSUBSW m128 xmm xmm
|
|
// VPHSUBSW xmm xmm xmm
|
|
//
|
|
// Construct and append a VPHSUBSW instruction to the active function.
|
|
func (c *Context) VPHSUBSW(mxy, xy, xy1 operand.Op) {
|
|
c.addinstruction(x86.VPHSUBSW(mxy, xy, xy1))
|
|
}
|
|
|
|
// VPHSUBSW: Packed Horizontal Subtract Signed Word Integers with Signed Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPHSUBSW m256 ymm ymm
|
|
// VPHSUBSW ymm ymm ymm
|
|
// VPHSUBSW m128 xmm xmm
|
|
// VPHSUBSW xmm xmm xmm
|
|
//
|
|
// Construct and append a VPHSUBSW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPHSUBSW(mxy, xy, xy1 operand.Op) { ctx.VPHSUBSW(mxy, xy, xy1) }
|
|
|
|
// VPHSUBW: Packed Horizontal Subtract Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPHSUBW m256 ymm ymm
|
|
// VPHSUBW ymm ymm ymm
|
|
// VPHSUBW m128 xmm xmm
|
|
// VPHSUBW xmm xmm xmm
|
|
//
|
|
// Construct and append a VPHSUBW instruction to the active function.
|
|
func (c *Context) VPHSUBW(mxy, xy, xy1 operand.Op) {
|
|
c.addinstruction(x86.VPHSUBW(mxy, xy, xy1))
|
|
}
|
|
|
|
// VPHSUBW: Packed Horizontal Subtract Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPHSUBW m256 ymm ymm
|
|
// VPHSUBW ymm ymm ymm
|
|
// VPHSUBW m128 xmm xmm
|
|
// VPHSUBW xmm xmm xmm
|
|
//
|
|
// Construct and append a VPHSUBW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPHSUBW(mxy, xy, xy1 operand.Op) { ctx.VPHSUBW(mxy, xy, xy1) }
|
|
|
|
// VPINSRB: Insert Byte.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPINSRB imm8 m8 xmm xmm
|
|
// VPINSRB imm8 r32 xmm xmm
|
|
//
|
|
// Construct and append a VPINSRB instruction to the active function.
|
|
func (c *Context) VPINSRB(i, mr, x, x1 operand.Op) {
|
|
c.addinstruction(x86.VPINSRB(i, mr, x, x1))
|
|
}
|
|
|
|
// VPINSRB: Insert Byte.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPINSRB imm8 m8 xmm xmm
|
|
// VPINSRB imm8 r32 xmm xmm
|
|
//
|
|
// Construct and append a VPINSRB instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPINSRB(i, mr, x, x1 operand.Op) { ctx.VPINSRB(i, mr, x, x1) }
|
|
|
|
// VPINSRD: Insert Doubleword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPINSRD imm8 m32 xmm xmm
|
|
// VPINSRD imm8 r32 xmm xmm
|
|
//
|
|
// Construct and append a VPINSRD instruction to the active function.
|
|
func (c *Context) VPINSRD(i, mr, x, x1 operand.Op) {
|
|
c.addinstruction(x86.VPINSRD(i, mr, x, x1))
|
|
}
|
|
|
|
// VPINSRD: Insert Doubleword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPINSRD imm8 m32 xmm xmm
|
|
// VPINSRD imm8 r32 xmm xmm
|
|
//
|
|
// Construct and append a VPINSRD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPINSRD(i, mr, x, x1 operand.Op) { ctx.VPINSRD(i, mr, x, x1) }
|
|
|
|
// VPINSRQ: Insert Quadword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPINSRQ imm8 m64 xmm xmm
|
|
// VPINSRQ imm8 r64 xmm xmm
|
|
//
|
|
// Construct and append a VPINSRQ instruction to the active function.
|
|
func (c *Context) VPINSRQ(i, mr, x, x1 operand.Op) {
|
|
c.addinstruction(x86.VPINSRQ(i, mr, x, x1))
|
|
}
|
|
|
|
// VPINSRQ: Insert Quadword.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPINSRQ imm8 m64 xmm xmm
|
|
// VPINSRQ imm8 r64 xmm xmm
|
|
//
|
|
// Construct and append a VPINSRQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPINSRQ(i, mr, x, x1 operand.Op) { ctx.VPINSRQ(i, mr, x, x1) }
|
|
|
|
// VPINSRW: Insert Word.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPINSRW imm8 m16 xmm xmm
|
|
// VPINSRW imm8 r32 xmm xmm
|
|
//
|
|
// Construct and append a VPINSRW instruction to the active function.
|
|
func (c *Context) VPINSRW(i, mr, x, x1 operand.Op) {
|
|
c.addinstruction(x86.VPINSRW(i, mr, x, x1))
|
|
}
|
|
|
|
// VPINSRW: Insert Word.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPINSRW imm8 m16 xmm xmm
|
|
// VPINSRW imm8 r32 xmm xmm
|
|
//
|
|
// Construct and append a VPINSRW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPINSRW(i, mr, x, x1 operand.Op) { ctx.VPINSRW(i, mr, x, x1) }
|
|
|
|
// VPLZCNTD: Count the Number of Leading Zero Bits for Packed Doubleword Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPLZCNTD m128 k xmm
|
|
// VPLZCNTD m128 xmm
|
|
// VPLZCNTD m256 k ymm
|
|
// VPLZCNTD m256 ymm
|
|
// VPLZCNTD xmm k xmm
|
|
// VPLZCNTD xmm xmm
|
|
// VPLZCNTD ymm k ymm
|
|
// VPLZCNTD ymm ymm
|
|
// VPLZCNTD m512 k zmm
|
|
// VPLZCNTD m512 zmm
|
|
// VPLZCNTD zmm k zmm
|
|
// VPLZCNTD zmm zmm
|
|
//
|
|
// Construct and append a VPLZCNTD instruction to the active function.
|
|
func (c *Context) VPLZCNTD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPLZCNTD(ops...))
|
|
}
|
|
|
|
// VPLZCNTD: Count the Number of Leading Zero Bits for Packed Doubleword Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPLZCNTD m128 k xmm
|
|
// VPLZCNTD m128 xmm
|
|
// VPLZCNTD m256 k ymm
|
|
// VPLZCNTD m256 ymm
|
|
// VPLZCNTD xmm k xmm
|
|
// VPLZCNTD xmm xmm
|
|
// VPLZCNTD ymm k ymm
|
|
// VPLZCNTD ymm ymm
|
|
// VPLZCNTD m512 k zmm
|
|
// VPLZCNTD m512 zmm
|
|
// VPLZCNTD zmm k zmm
|
|
// VPLZCNTD zmm zmm
|
|
//
|
|
// Construct and append a VPLZCNTD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPLZCNTD(ops ...operand.Op) { ctx.VPLZCNTD(ops...) }
|
|
|
|
// VPLZCNTD_BCST: Count the Number of Leading Zero Bits for Packed Doubleword Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPLZCNTD.BCST m32 k xmm
|
|
// VPLZCNTD.BCST m32 k ymm
|
|
// VPLZCNTD.BCST m32 xmm
|
|
// VPLZCNTD.BCST m32 ymm
|
|
// VPLZCNTD.BCST m32 k zmm
|
|
// VPLZCNTD.BCST m32 zmm
|
|
//
|
|
// Construct and append a VPLZCNTD.BCST instruction to the active function.
|
|
func (c *Context) VPLZCNTD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPLZCNTD_BCST(ops...))
|
|
}
|
|
|
|
// VPLZCNTD_BCST: Count the Number of Leading Zero Bits for Packed Doubleword Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPLZCNTD.BCST m32 k xmm
|
|
// VPLZCNTD.BCST m32 k ymm
|
|
// VPLZCNTD.BCST m32 xmm
|
|
// VPLZCNTD.BCST m32 ymm
|
|
// VPLZCNTD.BCST m32 k zmm
|
|
// VPLZCNTD.BCST m32 zmm
|
|
//
|
|
// Construct and append a VPLZCNTD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPLZCNTD_BCST(ops ...operand.Op) { ctx.VPLZCNTD_BCST(ops...) }
|
|
|
|
// VPLZCNTD_BCST_Z: Count the Number of Leading Zero Bits for Packed Doubleword Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPLZCNTD.BCST.Z m32 k xmm
|
|
// VPLZCNTD.BCST.Z m32 k ymm
|
|
// VPLZCNTD.BCST.Z m32 k zmm
|
|
//
|
|
// Construct and append a VPLZCNTD.BCST.Z instruction to the active function.
|
|
func (c *Context) VPLZCNTD_BCST_Z(m, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VPLZCNTD_BCST_Z(m, k, xyz))
|
|
}
|
|
|
|
// VPLZCNTD_BCST_Z: Count the Number of Leading Zero Bits for Packed Doubleword Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPLZCNTD.BCST.Z m32 k xmm
|
|
// VPLZCNTD.BCST.Z m32 k ymm
|
|
// VPLZCNTD.BCST.Z m32 k zmm
|
|
//
|
|
// Construct and append a VPLZCNTD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPLZCNTD_BCST_Z(m, k, xyz operand.Op) { ctx.VPLZCNTD_BCST_Z(m, k, xyz) }
|
|
|
|
// VPLZCNTD_Z: Count the Number of Leading Zero Bits for Packed Doubleword Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPLZCNTD.Z m128 k xmm
|
|
// VPLZCNTD.Z m256 k ymm
|
|
// VPLZCNTD.Z xmm k xmm
|
|
// VPLZCNTD.Z ymm k ymm
|
|
// VPLZCNTD.Z m512 k zmm
|
|
// VPLZCNTD.Z zmm k zmm
|
|
//
|
|
// Construct and append a VPLZCNTD.Z instruction to the active function.
|
|
func (c *Context) VPLZCNTD_Z(mxyz, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VPLZCNTD_Z(mxyz, k, xyz))
|
|
}
|
|
|
|
// VPLZCNTD_Z: Count the Number of Leading Zero Bits for Packed Doubleword Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPLZCNTD.Z m128 k xmm
|
|
// VPLZCNTD.Z m256 k ymm
|
|
// VPLZCNTD.Z xmm k xmm
|
|
// VPLZCNTD.Z ymm k ymm
|
|
// VPLZCNTD.Z m512 k zmm
|
|
// VPLZCNTD.Z zmm k zmm
|
|
//
|
|
// Construct and append a VPLZCNTD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPLZCNTD_Z(mxyz, k, xyz operand.Op) { ctx.VPLZCNTD_Z(mxyz, k, xyz) }
|
|
|
|
// VPLZCNTQ: Count the Number of Leading Zero Bits for Packed Quadword Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPLZCNTQ m128 k xmm
|
|
// VPLZCNTQ m128 xmm
|
|
// VPLZCNTQ m256 k ymm
|
|
// VPLZCNTQ m256 ymm
|
|
// VPLZCNTQ xmm k xmm
|
|
// VPLZCNTQ xmm xmm
|
|
// VPLZCNTQ ymm k ymm
|
|
// VPLZCNTQ ymm ymm
|
|
// VPLZCNTQ m512 k zmm
|
|
// VPLZCNTQ m512 zmm
|
|
// VPLZCNTQ zmm k zmm
|
|
// VPLZCNTQ zmm zmm
|
|
//
|
|
// Construct and append a VPLZCNTQ instruction to the active function.
|
|
func (c *Context) VPLZCNTQ(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPLZCNTQ(ops...))
|
|
}
|
|
|
|
// VPLZCNTQ: Count the Number of Leading Zero Bits for Packed Quadword Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPLZCNTQ m128 k xmm
|
|
// VPLZCNTQ m128 xmm
|
|
// VPLZCNTQ m256 k ymm
|
|
// VPLZCNTQ m256 ymm
|
|
// VPLZCNTQ xmm k xmm
|
|
// VPLZCNTQ xmm xmm
|
|
// VPLZCNTQ ymm k ymm
|
|
// VPLZCNTQ ymm ymm
|
|
// VPLZCNTQ m512 k zmm
|
|
// VPLZCNTQ m512 zmm
|
|
// VPLZCNTQ zmm k zmm
|
|
// VPLZCNTQ zmm zmm
|
|
//
|
|
// Construct and append a VPLZCNTQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPLZCNTQ(ops ...operand.Op) { ctx.VPLZCNTQ(ops...) }
|
|
|
|
// VPLZCNTQ_BCST: Count the Number of Leading Zero Bits for Packed Quadword Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPLZCNTQ.BCST m64 k xmm
|
|
// VPLZCNTQ.BCST m64 k ymm
|
|
// VPLZCNTQ.BCST m64 xmm
|
|
// VPLZCNTQ.BCST m64 ymm
|
|
// VPLZCNTQ.BCST m64 k zmm
|
|
// VPLZCNTQ.BCST m64 zmm
|
|
//
|
|
// Construct and append a VPLZCNTQ.BCST instruction to the active function.
|
|
func (c *Context) VPLZCNTQ_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPLZCNTQ_BCST(ops...))
|
|
}
|
|
|
|
// VPLZCNTQ_BCST: Count the Number of Leading Zero Bits for Packed Quadword Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPLZCNTQ.BCST m64 k xmm
|
|
// VPLZCNTQ.BCST m64 k ymm
|
|
// VPLZCNTQ.BCST m64 xmm
|
|
// VPLZCNTQ.BCST m64 ymm
|
|
// VPLZCNTQ.BCST m64 k zmm
|
|
// VPLZCNTQ.BCST m64 zmm
|
|
//
|
|
// Construct and append a VPLZCNTQ.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPLZCNTQ_BCST(ops ...operand.Op) { ctx.VPLZCNTQ_BCST(ops...) }
|
|
|
|
// VPLZCNTQ_BCST_Z: Count the Number of Leading Zero Bits for Packed Quadword Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPLZCNTQ.BCST.Z m64 k xmm
|
|
// VPLZCNTQ.BCST.Z m64 k ymm
|
|
// VPLZCNTQ.BCST.Z m64 k zmm
|
|
//
|
|
// Construct and append a VPLZCNTQ.BCST.Z instruction to the active function.
|
|
func (c *Context) VPLZCNTQ_BCST_Z(m, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VPLZCNTQ_BCST_Z(m, k, xyz))
|
|
}
|
|
|
|
// VPLZCNTQ_BCST_Z: Count the Number of Leading Zero Bits for Packed Quadword Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPLZCNTQ.BCST.Z m64 k xmm
|
|
// VPLZCNTQ.BCST.Z m64 k ymm
|
|
// VPLZCNTQ.BCST.Z m64 k zmm
|
|
//
|
|
// Construct and append a VPLZCNTQ.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPLZCNTQ_BCST_Z(m, k, xyz operand.Op) { ctx.VPLZCNTQ_BCST_Z(m, k, xyz) }
|
|
|
|
// VPLZCNTQ_Z: Count the Number of Leading Zero Bits for Packed Quadword Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPLZCNTQ.Z m128 k xmm
|
|
// VPLZCNTQ.Z m256 k ymm
|
|
// VPLZCNTQ.Z xmm k xmm
|
|
// VPLZCNTQ.Z ymm k ymm
|
|
// VPLZCNTQ.Z m512 k zmm
|
|
// VPLZCNTQ.Z zmm k zmm
|
|
//
|
|
// Construct and append a VPLZCNTQ.Z instruction to the active function.
|
|
func (c *Context) VPLZCNTQ_Z(mxyz, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VPLZCNTQ_Z(mxyz, k, xyz))
|
|
}
|
|
|
|
// VPLZCNTQ_Z: Count the Number of Leading Zero Bits for Packed Quadword Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPLZCNTQ.Z m128 k xmm
|
|
// VPLZCNTQ.Z m256 k ymm
|
|
// VPLZCNTQ.Z xmm k xmm
|
|
// VPLZCNTQ.Z ymm k ymm
|
|
// VPLZCNTQ.Z m512 k zmm
|
|
// VPLZCNTQ.Z zmm k zmm
|
|
//
|
|
// Construct and append a VPLZCNTQ.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPLZCNTQ_Z(mxyz, k, xyz operand.Op) { ctx.VPLZCNTQ_Z(mxyz, k, xyz) }
|
|
|
|
// VPMADD52HUQ: Packed Multiply of Unsigned 52-bit Unsigned Integers and Add High 52-bit Products to Quadword Accumulators.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMADD52HUQ m128 xmm k xmm
|
|
// VPMADD52HUQ m128 xmm xmm
|
|
// VPMADD52HUQ m256 ymm k ymm
|
|
// VPMADD52HUQ m256 ymm ymm
|
|
// VPMADD52HUQ xmm xmm k xmm
|
|
// VPMADD52HUQ xmm xmm xmm
|
|
// VPMADD52HUQ ymm ymm k ymm
|
|
// VPMADD52HUQ ymm ymm ymm
|
|
// VPMADD52HUQ m512 zmm k zmm
|
|
// VPMADD52HUQ m512 zmm zmm
|
|
// VPMADD52HUQ zmm zmm k zmm
|
|
// VPMADD52HUQ zmm zmm zmm
|
|
//
|
|
// Construct and append a VPMADD52HUQ instruction to the active function.
|
|
func (c *Context) VPMADD52HUQ(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPMADD52HUQ(ops...))
|
|
}
|
|
|
|
// VPMADD52HUQ: Packed Multiply of Unsigned 52-bit Unsigned Integers and Add High 52-bit Products to Quadword Accumulators.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMADD52HUQ m128 xmm k xmm
|
|
// VPMADD52HUQ m128 xmm xmm
|
|
// VPMADD52HUQ m256 ymm k ymm
|
|
// VPMADD52HUQ m256 ymm ymm
|
|
// VPMADD52HUQ xmm xmm k xmm
|
|
// VPMADD52HUQ xmm xmm xmm
|
|
// VPMADD52HUQ ymm ymm k ymm
|
|
// VPMADD52HUQ ymm ymm ymm
|
|
// VPMADD52HUQ m512 zmm k zmm
|
|
// VPMADD52HUQ m512 zmm zmm
|
|
// VPMADD52HUQ zmm zmm k zmm
|
|
// VPMADD52HUQ zmm zmm zmm
|
|
//
|
|
// Construct and append a VPMADD52HUQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMADD52HUQ(ops ...operand.Op) { ctx.VPMADD52HUQ(ops...) }
|
|
|
|
// VPMADD52HUQ_BCST: Packed Multiply of Unsigned 52-bit Unsigned Integers and Add High 52-bit Products to Quadword Accumulators (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMADD52HUQ.BCST m64 xmm k xmm
|
|
// VPMADD52HUQ.BCST m64 xmm xmm
|
|
// VPMADD52HUQ.BCST m64 ymm k ymm
|
|
// VPMADD52HUQ.BCST m64 ymm ymm
|
|
// VPMADD52HUQ.BCST m64 zmm k zmm
|
|
// VPMADD52HUQ.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VPMADD52HUQ.BCST instruction to the active function.
|
|
func (c *Context) VPMADD52HUQ_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPMADD52HUQ_BCST(ops...))
|
|
}
|
|
|
|
// VPMADD52HUQ_BCST: Packed Multiply of Unsigned 52-bit Unsigned Integers and Add High 52-bit Products to Quadword Accumulators (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMADD52HUQ.BCST m64 xmm k xmm
|
|
// VPMADD52HUQ.BCST m64 xmm xmm
|
|
// VPMADD52HUQ.BCST m64 ymm k ymm
|
|
// VPMADD52HUQ.BCST m64 ymm ymm
|
|
// VPMADD52HUQ.BCST m64 zmm k zmm
|
|
// VPMADD52HUQ.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VPMADD52HUQ.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMADD52HUQ_BCST(ops ...operand.Op) { ctx.VPMADD52HUQ_BCST(ops...) }
|
|
|
|
// VPMADD52HUQ_BCST_Z: Packed Multiply of Unsigned 52-bit Unsigned Integers and Add High 52-bit Products to Quadword Accumulators (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMADD52HUQ.BCST.Z m64 xmm k xmm
|
|
// VPMADD52HUQ.BCST.Z m64 ymm k ymm
|
|
// VPMADD52HUQ.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VPMADD52HUQ.BCST.Z instruction to the active function.
|
|
func (c *Context) VPMADD52HUQ_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPMADD52HUQ_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPMADD52HUQ_BCST_Z: Packed Multiply of Unsigned 52-bit Unsigned Integers and Add High 52-bit Products to Quadword Accumulators (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMADD52HUQ.BCST.Z m64 xmm k xmm
|
|
// VPMADD52HUQ.BCST.Z m64 ymm k ymm
|
|
// VPMADD52HUQ.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VPMADD52HUQ.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMADD52HUQ_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VPMADD52HUQ_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VPMADD52HUQ_Z: Packed Multiply of Unsigned 52-bit Unsigned Integers and Add High 52-bit Products to Quadword Accumulators (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMADD52HUQ.Z m128 xmm k xmm
|
|
// VPMADD52HUQ.Z m256 ymm k ymm
|
|
// VPMADD52HUQ.Z xmm xmm k xmm
|
|
// VPMADD52HUQ.Z ymm ymm k ymm
|
|
// VPMADD52HUQ.Z m512 zmm k zmm
|
|
// VPMADD52HUQ.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPMADD52HUQ.Z instruction to the active function.
|
|
func (c *Context) VPMADD52HUQ_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPMADD52HUQ_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPMADD52HUQ_Z: Packed Multiply of Unsigned 52-bit Unsigned Integers and Add High 52-bit Products to Quadword Accumulators (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMADD52HUQ.Z m128 xmm k xmm
|
|
// VPMADD52HUQ.Z m256 ymm k ymm
|
|
// VPMADD52HUQ.Z xmm xmm k xmm
|
|
// VPMADD52HUQ.Z ymm ymm k ymm
|
|
// VPMADD52HUQ.Z m512 zmm k zmm
|
|
// VPMADD52HUQ.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPMADD52HUQ.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMADD52HUQ_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPMADD52HUQ_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPMADD52LUQ: Packed Multiply of Unsigned 52-bit Integers and Add the Low 52-bit Products to Quadword Accumulators.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMADD52LUQ m128 xmm k xmm
|
|
// VPMADD52LUQ m128 xmm xmm
|
|
// VPMADD52LUQ m256 ymm k ymm
|
|
// VPMADD52LUQ m256 ymm ymm
|
|
// VPMADD52LUQ xmm xmm k xmm
|
|
// VPMADD52LUQ xmm xmm xmm
|
|
// VPMADD52LUQ ymm ymm k ymm
|
|
// VPMADD52LUQ ymm ymm ymm
|
|
// VPMADD52LUQ m512 zmm k zmm
|
|
// VPMADD52LUQ m512 zmm zmm
|
|
// VPMADD52LUQ zmm zmm k zmm
|
|
// VPMADD52LUQ zmm zmm zmm
|
|
//
|
|
// Construct and append a VPMADD52LUQ instruction to the active function.
|
|
func (c *Context) VPMADD52LUQ(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPMADD52LUQ(ops...))
|
|
}
|
|
|
|
// VPMADD52LUQ: Packed Multiply of Unsigned 52-bit Integers and Add the Low 52-bit Products to Quadword Accumulators.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMADD52LUQ m128 xmm k xmm
|
|
// VPMADD52LUQ m128 xmm xmm
|
|
// VPMADD52LUQ m256 ymm k ymm
|
|
// VPMADD52LUQ m256 ymm ymm
|
|
// VPMADD52LUQ xmm xmm k xmm
|
|
// VPMADD52LUQ xmm xmm xmm
|
|
// VPMADD52LUQ ymm ymm k ymm
|
|
// VPMADD52LUQ ymm ymm ymm
|
|
// VPMADD52LUQ m512 zmm k zmm
|
|
// VPMADD52LUQ m512 zmm zmm
|
|
// VPMADD52LUQ zmm zmm k zmm
|
|
// VPMADD52LUQ zmm zmm zmm
|
|
//
|
|
// Construct and append a VPMADD52LUQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMADD52LUQ(ops ...operand.Op) { ctx.VPMADD52LUQ(ops...) }
|
|
|
|
// VPMADD52LUQ_BCST: Packed Multiply of Unsigned 52-bit Integers and Add the Low 52-bit Products to Quadword Accumulators (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMADD52LUQ.BCST m64 xmm k xmm
|
|
// VPMADD52LUQ.BCST m64 xmm xmm
|
|
// VPMADD52LUQ.BCST m64 ymm k ymm
|
|
// VPMADD52LUQ.BCST m64 ymm ymm
|
|
// VPMADD52LUQ.BCST m64 zmm k zmm
|
|
// VPMADD52LUQ.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VPMADD52LUQ.BCST instruction to the active function.
|
|
func (c *Context) VPMADD52LUQ_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPMADD52LUQ_BCST(ops...))
|
|
}
|
|
|
|
// VPMADD52LUQ_BCST: Packed Multiply of Unsigned 52-bit Integers and Add the Low 52-bit Products to Quadword Accumulators (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMADD52LUQ.BCST m64 xmm k xmm
|
|
// VPMADD52LUQ.BCST m64 xmm xmm
|
|
// VPMADD52LUQ.BCST m64 ymm k ymm
|
|
// VPMADD52LUQ.BCST m64 ymm ymm
|
|
// VPMADD52LUQ.BCST m64 zmm k zmm
|
|
// VPMADD52LUQ.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VPMADD52LUQ.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMADD52LUQ_BCST(ops ...operand.Op) { ctx.VPMADD52LUQ_BCST(ops...) }
|
|
|
|
// VPMADD52LUQ_BCST_Z: Packed Multiply of Unsigned 52-bit Integers and Add the Low 52-bit Products to Quadword Accumulators (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMADD52LUQ.BCST.Z m64 xmm k xmm
|
|
// VPMADD52LUQ.BCST.Z m64 ymm k ymm
|
|
// VPMADD52LUQ.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VPMADD52LUQ.BCST.Z instruction to the active function.
|
|
func (c *Context) VPMADD52LUQ_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPMADD52LUQ_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPMADD52LUQ_BCST_Z: Packed Multiply of Unsigned 52-bit Integers and Add the Low 52-bit Products to Quadword Accumulators (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMADD52LUQ.BCST.Z m64 xmm k xmm
|
|
// VPMADD52LUQ.BCST.Z m64 ymm k ymm
|
|
// VPMADD52LUQ.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VPMADD52LUQ.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMADD52LUQ_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VPMADD52LUQ_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VPMADD52LUQ_Z: Packed Multiply of Unsigned 52-bit Integers and Add the Low 52-bit Products to Quadword Accumulators (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMADD52LUQ.Z m128 xmm k xmm
|
|
// VPMADD52LUQ.Z m256 ymm k ymm
|
|
// VPMADD52LUQ.Z xmm xmm k xmm
|
|
// VPMADD52LUQ.Z ymm ymm k ymm
|
|
// VPMADD52LUQ.Z m512 zmm k zmm
|
|
// VPMADD52LUQ.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPMADD52LUQ.Z instruction to the active function.
|
|
func (c *Context) VPMADD52LUQ_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPMADD52LUQ_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPMADD52LUQ_Z: Packed Multiply of Unsigned 52-bit Integers and Add the Low 52-bit Products to Quadword Accumulators (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMADD52LUQ.Z m128 xmm k xmm
|
|
// VPMADD52LUQ.Z m256 ymm k ymm
|
|
// VPMADD52LUQ.Z xmm xmm k xmm
|
|
// VPMADD52LUQ.Z ymm ymm k ymm
|
|
// VPMADD52LUQ.Z m512 zmm k zmm
|
|
// VPMADD52LUQ.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPMADD52LUQ.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMADD52LUQ_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPMADD52LUQ_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPMADDUBSW: Multiply and Add Packed Signed and Unsigned Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMADDUBSW m256 ymm ymm
|
|
// VPMADDUBSW ymm ymm ymm
|
|
// VPMADDUBSW m128 xmm xmm
|
|
// VPMADDUBSW xmm xmm xmm
|
|
// VPMADDUBSW m128 xmm k xmm
|
|
// VPMADDUBSW m256 ymm k ymm
|
|
// VPMADDUBSW xmm xmm k xmm
|
|
// VPMADDUBSW ymm ymm k ymm
|
|
// VPMADDUBSW m512 zmm k zmm
|
|
// VPMADDUBSW m512 zmm zmm
|
|
// VPMADDUBSW zmm zmm k zmm
|
|
// VPMADDUBSW zmm zmm zmm
|
|
//
|
|
// Construct and append a VPMADDUBSW instruction to the active function.
|
|
func (c *Context) VPMADDUBSW(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPMADDUBSW(ops...))
|
|
}
|
|
|
|
// VPMADDUBSW: Multiply and Add Packed Signed and Unsigned Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMADDUBSW m256 ymm ymm
|
|
// VPMADDUBSW ymm ymm ymm
|
|
// VPMADDUBSW m128 xmm xmm
|
|
// VPMADDUBSW xmm xmm xmm
|
|
// VPMADDUBSW m128 xmm k xmm
|
|
// VPMADDUBSW m256 ymm k ymm
|
|
// VPMADDUBSW xmm xmm k xmm
|
|
// VPMADDUBSW ymm ymm k ymm
|
|
// VPMADDUBSW m512 zmm k zmm
|
|
// VPMADDUBSW m512 zmm zmm
|
|
// VPMADDUBSW zmm zmm k zmm
|
|
// VPMADDUBSW zmm zmm zmm
|
|
//
|
|
// Construct and append a VPMADDUBSW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMADDUBSW(ops ...operand.Op) { ctx.VPMADDUBSW(ops...) }
|
|
|
|
// VPMADDUBSW_Z: Multiply and Add Packed Signed and Unsigned Byte Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMADDUBSW.Z m128 xmm k xmm
|
|
// VPMADDUBSW.Z m256 ymm k ymm
|
|
// VPMADDUBSW.Z xmm xmm k xmm
|
|
// VPMADDUBSW.Z ymm ymm k ymm
|
|
// VPMADDUBSW.Z m512 zmm k zmm
|
|
// VPMADDUBSW.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPMADDUBSW.Z instruction to the active function.
|
|
func (c *Context) VPMADDUBSW_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPMADDUBSW_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPMADDUBSW_Z: Multiply and Add Packed Signed and Unsigned Byte Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMADDUBSW.Z m128 xmm k xmm
|
|
// VPMADDUBSW.Z m256 ymm k ymm
|
|
// VPMADDUBSW.Z xmm xmm k xmm
|
|
// VPMADDUBSW.Z ymm ymm k ymm
|
|
// VPMADDUBSW.Z m512 zmm k zmm
|
|
// VPMADDUBSW.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPMADDUBSW.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMADDUBSW_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPMADDUBSW_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPMADDWD: Multiply and Add Packed Signed Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMADDWD m256 ymm ymm
|
|
// VPMADDWD ymm ymm ymm
|
|
// VPMADDWD m128 xmm xmm
|
|
// VPMADDWD xmm xmm xmm
|
|
// VPMADDWD m128 xmm k xmm
|
|
// VPMADDWD m256 ymm k ymm
|
|
// VPMADDWD xmm xmm k xmm
|
|
// VPMADDWD ymm ymm k ymm
|
|
// VPMADDWD m512 zmm k zmm
|
|
// VPMADDWD m512 zmm zmm
|
|
// VPMADDWD zmm zmm k zmm
|
|
// VPMADDWD zmm zmm zmm
|
|
//
|
|
// Construct and append a VPMADDWD instruction to the active function.
|
|
func (c *Context) VPMADDWD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPMADDWD(ops...))
|
|
}
|
|
|
|
// VPMADDWD: Multiply and Add Packed Signed Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMADDWD m256 ymm ymm
|
|
// VPMADDWD ymm ymm ymm
|
|
// VPMADDWD m128 xmm xmm
|
|
// VPMADDWD xmm xmm xmm
|
|
// VPMADDWD m128 xmm k xmm
|
|
// VPMADDWD m256 ymm k ymm
|
|
// VPMADDWD xmm xmm k xmm
|
|
// VPMADDWD ymm ymm k ymm
|
|
// VPMADDWD m512 zmm k zmm
|
|
// VPMADDWD m512 zmm zmm
|
|
// VPMADDWD zmm zmm k zmm
|
|
// VPMADDWD zmm zmm zmm
|
|
//
|
|
// Construct and append a VPMADDWD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMADDWD(ops ...operand.Op) { ctx.VPMADDWD(ops...) }
|
|
|
|
// VPMADDWD_Z: Multiply and Add Packed Signed Word Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMADDWD.Z m128 xmm k xmm
|
|
// VPMADDWD.Z m256 ymm k ymm
|
|
// VPMADDWD.Z xmm xmm k xmm
|
|
// VPMADDWD.Z ymm ymm k ymm
|
|
// VPMADDWD.Z m512 zmm k zmm
|
|
// VPMADDWD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPMADDWD.Z instruction to the active function.
|
|
func (c *Context) VPMADDWD_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPMADDWD_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPMADDWD_Z: Multiply and Add Packed Signed Word Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMADDWD.Z m128 xmm k xmm
|
|
// VPMADDWD.Z m256 ymm k ymm
|
|
// VPMADDWD.Z xmm xmm k xmm
|
|
// VPMADDWD.Z ymm ymm k ymm
|
|
// VPMADDWD.Z m512 zmm k zmm
|
|
// VPMADDWD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPMADDWD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMADDWD_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPMADDWD_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPMASKMOVD: Conditional Move Packed Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMASKMOVD m128 xmm xmm
|
|
// VPMASKMOVD m256 ymm ymm
|
|
// VPMASKMOVD xmm xmm m128
|
|
// VPMASKMOVD ymm ymm m256
|
|
//
|
|
// Construct and append a VPMASKMOVD instruction to the active function.
|
|
func (c *Context) VPMASKMOVD(mxy, xy, mxy1 operand.Op) {
|
|
c.addinstruction(x86.VPMASKMOVD(mxy, xy, mxy1))
|
|
}
|
|
|
|
// VPMASKMOVD: Conditional Move Packed Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMASKMOVD m128 xmm xmm
|
|
// VPMASKMOVD m256 ymm ymm
|
|
// VPMASKMOVD xmm xmm m128
|
|
// VPMASKMOVD ymm ymm m256
|
|
//
|
|
// Construct and append a VPMASKMOVD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMASKMOVD(mxy, xy, mxy1 operand.Op) { ctx.VPMASKMOVD(mxy, xy, mxy1) }
|
|
|
|
// VPMASKMOVQ: Conditional Move Packed Quadword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMASKMOVQ m128 xmm xmm
|
|
// VPMASKMOVQ m256 ymm ymm
|
|
// VPMASKMOVQ xmm xmm m128
|
|
// VPMASKMOVQ ymm ymm m256
|
|
//
|
|
// Construct and append a VPMASKMOVQ instruction to the active function.
|
|
func (c *Context) VPMASKMOVQ(mxy, xy, mxy1 operand.Op) {
|
|
c.addinstruction(x86.VPMASKMOVQ(mxy, xy, mxy1))
|
|
}
|
|
|
|
// VPMASKMOVQ: Conditional Move Packed Quadword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMASKMOVQ m128 xmm xmm
|
|
// VPMASKMOVQ m256 ymm ymm
|
|
// VPMASKMOVQ xmm xmm m128
|
|
// VPMASKMOVQ ymm ymm m256
|
|
//
|
|
// Construct and append a VPMASKMOVQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMASKMOVQ(mxy, xy, mxy1 operand.Op) { ctx.VPMASKMOVQ(mxy, xy, mxy1) }
|
|
|
|
// VPMAXSB: Maximum of Packed Signed Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMAXSB m256 ymm ymm
|
|
// VPMAXSB ymm ymm ymm
|
|
// VPMAXSB m128 xmm xmm
|
|
// VPMAXSB xmm xmm xmm
|
|
// VPMAXSB m128 xmm k xmm
|
|
// VPMAXSB m256 ymm k ymm
|
|
// VPMAXSB xmm xmm k xmm
|
|
// VPMAXSB ymm ymm k ymm
|
|
// VPMAXSB m512 zmm k zmm
|
|
// VPMAXSB m512 zmm zmm
|
|
// VPMAXSB zmm zmm k zmm
|
|
// VPMAXSB zmm zmm zmm
|
|
//
|
|
// Construct and append a VPMAXSB instruction to the active function.
|
|
func (c *Context) VPMAXSB(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPMAXSB(ops...))
|
|
}
|
|
|
|
// VPMAXSB: Maximum of Packed Signed Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMAXSB m256 ymm ymm
|
|
// VPMAXSB ymm ymm ymm
|
|
// VPMAXSB m128 xmm xmm
|
|
// VPMAXSB xmm xmm xmm
|
|
// VPMAXSB m128 xmm k xmm
|
|
// VPMAXSB m256 ymm k ymm
|
|
// VPMAXSB xmm xmm k xmm
|
|
// VPMAXSB ymm ymm k ymm
|
|
// VPMAXSB m512 zmm k zmm
|
|
// VPMAXSB m512 zmm zmm
|
|
// VPMAXSB zmm zmm k zmm
|
|
// VPMAXSB zmm zmm zmm
|
|
//
|
|
// Construct and append a VPMAXSB instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMAXSB(ops ...operand.Op) { ctx.VPMAXSB(ops...) }
|
|
|
|
// VPMAXSB_Z: Maximum of Packed Signed Byte Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMAXSB.Z m128 xmm k xmm
|
|
// VPMAXSB.Z m256 ymm k ymm
|
|
// VPMAXSB.Z xmm xmm k xmm
|
|
// VPMAXSB.Z ymm ymm k ymm
|
|
// VPMAXSB.Z m512 zmm k zmm
|
|
// VPMAXSB.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPMAXSB.Z instruction to the active function.
|
|
func (c *Context) VPMAXSB_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPMAXSB_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPMAXSB_Z: Maximum of Packed Signed Byte Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMAXSB.Z m128 xmm k xmm
|
|
// VPMAXSB.Z m256 ymm k ymm
|
|
// VPMAXSB.Z xmm xmm k xmm
|
|
// VPMAXSB.Z ymm ymm k ymm
|
|
// VPMAXSB.Z m512 zmm k zmm
|
|
// VPMAXSB.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPMAXSB.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMAXSB_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPMAXSB_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPMAXSD: Maximum of Packed Signed Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMAXSD m256 ymm ymm
|
|
// VPMAXSD ymm ymm ymm
|
|
// VPMAXSD m128 xmm xmm
|
|
// VPMAXSD xmm xmm xmm
|
|
// VPMAXSD m128 xmm k xmm
|
|
// VPMAXSD m256 ymm k ymm
|
|
// VPMAXSD xmm xmm k xmm
|
|
// VPMAXSD ymm ymm k ymm
|
|
// VPMAXSD m512 zmm k zmm
|
|
// VPMAXSD m512 zmm zmm
|
|
// VPMAXSD zmm zmm k zmm
|
|
// VPMAXSD zmm zmm zmm
|
|
//
|
|
// Construct and append a VPMAXSD instruction to the active function.
|
|
func (c *Context) VPMAXSD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPMAXSD(ops...))
|
|
}
|
|
|
|
// VPMAXSD: Maximum of Packed Signed Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMAXSD m256 ymm ymm
|
|
// VPMAXSD ymm ymm ymm
|
|
// VPMAXSD m128 xmm xmm
|
|
// VPMAXSD xmm xmm xmm
|
|
// VPMAXSD m128 xmm k xmm
|
|
// VPMAXSD m256 ymm k ymm
|
|
// VPMAXSD xmm xmm k xmm
|
|
// VPMAXSD ymm ymm k ymm
|
|
// VPMAXSD m512 zmm k zmm
|
|
// VPMAXSD m512 zmm zmm
|
|
// VPMAXSD zmm zmm k zmm
|
|
// VPMAXSD zmm zmm zmm
|
|
//
|
|
// Construct and append a VPMAXSD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMAXSD(ops ...operand.Op) { ctx.VPMAXSD(ops...) }
|
|
|
|
// VPMAXSD_BCST: Maximum of Packed Signed Doubleword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMAXSD.BCST m32 xmm k xmm
|
|
// VPMAXSD.BCST m32 xmm xmm
|
|
// VPMAXSD.BCST m32 ymm k ymm
|
|
// VPMAXSD.BCST m32 ymm ymm
|
|
// VPMAXSD.BCST m32 zmm k zmm
|
|
// VPMAXSD.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VPMAXSD.BCST instruction to the active function.
|
|
func (c *Context) VPMAXSD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPMAXSD_BCST(ops...))
|
|
}
|
|
|
|
// VPMAXSD_BCST: Maximum of Packed Signed Doubleword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMAXSD.BCST m32 xmm k xmm
|
|
// VPMAXSD.BCST m32 xmm xmm
|
|
// VPMAXSD.BCST m32 ymm k ymm
|
|
// VPMAXSD.BCST m32 ymm ymm
|
|
// VPMAXSD.BCST m32 zmm k zmm
|
|
// VPMAXSD.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VPMAXSD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMAXSD_BCST(ops ...operand.Op) { ctx.VPMAXSD_BCST(ops...) }
|
|
|
|
// VPMAXSD_BCST_Z: Maximum of Packed Signed Doubleword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMAXSD.BCST.Z m32 xmm k xmm
|
|
// VPMAXSD.BCST.Z m32 ymm k ymm
|
|
// VPMAXSD.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VPMAXSD.BCST.Z instruction to the active function.
|
|
func (c *Context) VPMAXSD_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPMAXSD_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPMAXSD_BCST_Z: Maximum of Packed Signed Doubleword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMAXSD.BCST.Z m32 xmm k xmm
|
|
// VPMAXSD.BCST.Z m32 ymm k ymm
|
|
// VPMAXSD.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VPMAXSD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMAXSD_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VPMAXSD_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VPMAXSD_Z: Maximum of Packed Signed Doubleword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMAXSD.Z m128 xmm k xmm
|
|
// VPMAXSD.Z m256 ymm k ymm
|
|
// VPMAXSD.Z xmm xmm k xmm
|
|
// VPMAXSD.Z ymm ymm k ymm
|
|
// VPMAXSD.Z m512 zmm k zmm
|
|
// VPMAXSD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPMAXSD.Z instruction to the active function.
|
|
func (c *Context) VPMAXSD_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPMAXSD_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPMAXSD_Z: Maximum of Packed Signed Doubleword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMAXSD.Z m128 xmm k xmm
|
|
// VPMAXSD.Z m256 ymm k ymm
|
|
// VPMAXSD.Z xmm xmm k xmm
|
|
// VPMAXSD.Z ymm ymm k ymm
|
|
// VPMAXSD.Z m512 zmm k zmm
|
|
// VPMAXSD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPMAXSD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMAXSD_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPMAXSD_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPMAXSQ: Maximum of Packed Signed Quadword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMAXSQ m128 xmm k xmm
|
|
// VPMAXSQ m128 xmm xmm
|
|
// VPMAXSQ m256 ymm k ymm
|
|
// VPMAXSQ m256 ymm ymm
|
|
// VPMAXSQ xmm xmm k xmm
|
|
// VPMAXSQ xmm xmm xmm
|
|
// VPMAXSQ ymm ymm k ymm
|
|
// VPMAXSQ ymm ymm ymm
|
|
// VPMAXSQ m512 zmm k zmm
|
|
// VPMAXSQ m512 zmm zmm
|
|
// VPMAXSQ zmm zmm k zmm
|
|
// VPMAXSQ zmm zmm zmm
|
|
//
|
|
// Construct and append a VPMAXSQ instruction to the active function.
|
|
func (c *Context) VPMAXSQ(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPMAXSQ(ops...))
|
|
}
|
|
|
|
// VPMAXSQ: Maximum of Packed Signed Quadword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMAXSQ m128 xmm k xmm
|
|
// VPMAXSQ m128 xmm xmm
|
|
// VPMAXSQ m256 ymm k ymm
|
|
// VPMAXSQ m256 ymm ymm
|
|
// VPMAXSQ xmm xmm k xmm
|
|
// VPMAXSQ xmm xmm xmm
|
|
// VPMAXSQ ymm ymm k ymm
|
|
// VPMAXSQ ymm ymm ymm
|
|
// VPMAXSQ m512 zmm k zmm
|
|
// VPMAXSQ m512 zmm zmm
|
|
// VPMAXSQ zmm zmm k zmm
|
|
// VPMAXSQ zmm zmm zmm
|
|
//
|
|
// Construct and append a VPMAXSQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMAXSQ(ops ...operand.Op) { ctx.VPMAXSQ(ops...) }
|
|
|
|
// VPMAXSQ_BCST: Maximum of Packed Signed Quadword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMAXSQ.BCST m64 xmm k xmm
|
|
// VPMAXSQ.BCST m64 xmm xmm
|
|
// VPMAXSQ.BCST m64 ymm k ymm
|
|
// VPMAXSQ.BCST m64 ymm ymm
|
|
// VPMAXSQ.BCST m64 zmm k zmm
|
|
// VPMAXSQ.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VPMAXSQ.BCST instruction to the active function.
|
|
func (c *Context) VPMAXSQ_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPMAXSQ_BCST(ops...))
|
|
}
|
|
|
|
// VPMAXSQ_BCST: Maximum of Packed Signed Quadword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMAXSQ.BCST m64 xmm k xmm
|
|
// VPMAXSQ.BCST m64 xmm xmm
|
|
// VPMAXSQ.BCST m64 ymm k ymm
|
|
// VPMAXSQ.BCST m64 ymm ymm
|
|
// VPMAXSQ.BCST m64 zmm k zmm
|
|
// VPMAXSQ.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VPMAXSQ.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMAXSQ_BCST(ops ...operand.Op) { ctx.VPMAXSQ_BCST(ops...) }
|
|
|
|
// VPMAXSQ_BCST_Z: Maximum of Packed Signed Quadword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMAXSQ.BCST.Z m64 xmm k xmm
|
|
// VPMAXSQ.BCST.Z m64 ymm k ymm
|
|
// VPMAXSQ.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VPMAXSQ.BCST.Z instruction to the active function.
|
|
func (c *Context) VPMAXSQ_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPMAXSQ_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPMAXSQ_BCST_Z: Maximum of Packed Signed Quadword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMAXSQ.BCST.Z m64 xmm k xmm
|
|
// VPMAXSQ.BCST.Z m64 ymm k ymm
|
|
// VPMAXSQ.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VPMAXSQ.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMAXSQ_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VPMAXSQ_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VPMAXSQ_Z: Maximum of Packed Signed Quadword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMAXSQ.Z m128 xmm k xmm
|
|
// VPMAXSQ.Z m256 ymm k ymm
|
|
// VPMAXSQ.Z xmm xmm k xmm
|
|
// VPMAXSQ.Z ymm ymm k ymm
|
|
// VPMAXSQ.Z m512 zmm k zmm
|
|
// VPMAXSQ.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPMAXSQ.Z instruction to the active function.
|
|
func (c *Context) VPMAXSQ_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPMAXSQ_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPMAXSQ_Z: Maximum of Packed Signed Quadword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMAXSQ.Z m128 xmm k xmm
|
|
// VPMAXSQ.Z m256 ymm k ymm
|
|
// VPMAXSQ.Z xmm xmm k xmm
|
|
// VPMAXSQ.Z ymm ymm k ymm
|
|
// VPMAXSQ.Z m512 zmm k zmm
|
|
// VPMAXSQ.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPMAXSQ.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMAXSQ_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPMAXSQ_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPMAXSW: Maximum of Packed Signed Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMAXSW m256 ymm ymm
|
|
// VPMAXSW ymm ymm ymm
|
|
// VPMAXSW m128 xmm xmm
|
|
// VPMAXSW xmm xmm xmm
|
|
// VPMAXSW m128 xmm k xmm
|
|
// VPMAXSW m256 ymm k ymm
|
|
// VPMAXSW xmm xmm k xmm
|
|
// VPMAXSW ymm ymm k ymm
|
|
// VPMAXSW m512 zmm k zmm
|
|
// VPMAXSW m512 zmm zmm
|
|
// VPMAXSW zmm zmm k zmm
|
|
// VPMAXSW zmm zmm zmm
|
|
//
|
|
// Construct and append a VPMAXSW instruction to the active function.
|
|
func (c *Context) VPMAXSW(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPMAXSW(ops...))
|
|
}
|
|
|
|
// VPMAXSW: Maximum of Packed Signed Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMAXSW m256 ymm ymm
|
|
// VPMAXSW ymm ymm ymm
|
|
// VPMAXSW m128 xmm xmm
|
|
// VPMAXSW xmm xmm xmm
|
|
// VPMAXSW m128 xmm k xmm
|
|
// VPMAXSW m256 ymm k ymm
|
|
// VPMAXSW xmm xmm k xmm
|
|
// VPMAXSW ymm ymm k ymm
|
|
// VPMAXSW m512 zmm k zmm
|
|
// VPMAXSW m512 zmm zmm
|
|
// VPMAXSW zmm zmm k zmm
|
|
// VPMAXSW zmm zmm zmm
|
|
//
|
|
// Construct and append a VPMAXSW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMAXSW(ops ...operand.Op) { ctx.VPMAXSW(ops...) }
|
|
|
|
// VPMAXSW_Z: Maximum of Packed Signed Word Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMAXSW.Z m128 xmm k xmm
|
|
// VPMAXSW.Z m256 ymm k ymm
|
|
// VPMAXSW.Z xmm xmm k xmm
|
|
// VPMAXSW.Z ymm ymm k ymm
|
|
// VPMAXSW.Z m512 zmm k zmm
|
|
// VPMAXSW.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPMAXSW.Z instruction to the active function.
|
|
func (c *Context) VPMAXSW_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPMAXSW_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPMAXSW_Z: Maximum of Packed Signed Word Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMAXSW.Z m128 xmm k xmm
|
|
// VPMAXSW.Z m256 ymm k ymm
|
|
// VPMAXSW.Z xmm xmm k xmm
|
|
// VPMAXSW.Z ymm ymm k ymm
|
|
// VPMAXSW.Z m512 zmm k zmm
|
|
// VPMAXSW.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPMAXSW.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMAXSW_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPMAXSW_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPMAXUB: Maximum of Packed Unsigned Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMAXUB m256 ymm ymm
|
|
// VPMAXUB ymm ymm ymm
|
|
// VPMAXUB m128 xmm xmm
|
|
// VPMAXUB xmm xmm xmm
|
|
// VPMAXUB m128 xmm k xmm
|
|
// VPMAXUB m256 ymm k ymm
|
|
// VPMAXUB xmm xmm k xmm
|
|
// VPMAXUB ymm ymm k ymm
|
|
// VPMAXUB m512 zmm k zmm
|
|
// VPMAXUB m512 zmm zmm
|
|
// VPMAXUB zmm zmm k zmm
|
|
// VPMAXUB zmm zmm zmm
|
|
//
|
|
// Construct and append a VPMAXUB instruction to the active function.
|
|
func (c *Context) VPMAXUB(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPMAXUB(ops...))
|
|
}
|
|
|
|
// VPMAXUB: Maximum of Packed Unsigned Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMAXUB m256 ymm ymm
|
|
// VPMAXUB ymm ymm ymm
|
|
// VPMAXUB m128 xmm xmm
|
|
// VPMAXUB xmm xmm xmm
|
|
// VPMAXUB m128 xmm k xmm
|
|
// VPMAXUB m256 ymm k ymm
|
|
// VPMAXUB xmm xmm k xmm
|
|
// VPMAXUB ymm ymm k ymm
|
|
// VPMAXUB m512 zmm k zmm
|
|
// VPMAXUB m512 zmm zmm
|
|
// VPMAXUB zmm zmm k zmm
|
|
// VPMAXUB zmm zmm zmm
|
|
//
|
|
// Construct and append a VPMAXUB instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMAXUB(ops ...operand.Op) { ctx.VPMAXUB(ops...) }
|
|
|
|
// VPMAXUB_Z: Maximum of Packed Unsigned Byte Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMAXUB.Z m128 xmm k xmm
|
|
// VPMAXUB.Z m256 ymm k ymm
|
|
// VPMAXUB.Z xmm xmm k xmm
|
|
// VPMAXUB.Z ymm ymm k ymm
|
|
// VPMAXUB.Z m512 zmm k zmm
|
|
// VPMAXUB.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPMAXUB.Z instruction to the active function.
|
|
func (c *Context) VPMAXUB_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPMAXUB_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPMAXUB_Z: Maximum of Packed Unsigned Byte Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMAXUB.Z m128 xmm k xmm
|
|
// VPMAXUB.Z m256 ymm k ymm
|
|
// VPMAXUB.Z xmm xmm k xmm
|
|
// VPMAXUB.Z ymm ymm k ymm
|
|
// VPMAXUB.Z m512 zmm k zmm
|
|
// VPMAXUB.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPMAXUB.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMAXUB_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPMAXUB_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPMAXUD: Maximum of Packed Unsigned Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMAXUD m256 ymm ymm
|
|
// VPMAXUD ymm ymm ymm
|
|
// VPMAXUD m128 xmm xmm
|
|
// VPMAXUD xmm xmm xmm
|
|
// VPMAXUD m128 xmm k xmm
|
|
// VPMAXUD m256 ymm k ymm
|
|
// VPMAXUD xmm xmm k xmm
|
|
// VPMAXUD ymm ymm k ymm
|
|
// VPMAXUD m512 zmm k zmm
|
|
// VPMAXUD m512 zmm zmm
|
|
// VPMAXUD zmm zmm k zmm
|
|
// VPMAXUD zmm zmm zmm
|
|
//
|
|
// Construct and append a VPMAXUD instruction to the active function.
|
|
func (c *Context) VPMAXUD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPMAXUD(ops...))
|
|
}
|
|
|
|
// VPMAXUD: Maximum of Packed Unsigned Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMAXUD m256 ymm ymm
|
|
// VPMAXUD ymm ymm ymm
|
|
// VPMAXUD m128 xmm xmm
|
|
// VPMAXUD xmm xmm xmm
|
|
// VPMAXUD m128 xmm k xmm
|
|
// VPMAXUD m256 ymm k ymm
|
|
// VPMAXUD xmm xmm k xmm
|
|
// VPMAXUD ymm ymm k ymm
|
|
// VPMAXUD m512 zmm k zmm
|
|
// VPMAXUD m512 zmm zmm
|
|
// VPMAXUD zmm zmm k zmm
|
|
// VPMAXUD zmm zmm zmm
|
|
//
|
|
// Construct and append a VPMAXUD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMAXUD(ops ...operand.Op) { ctx.VPMAXUD(ops...) }
|
|
|
|
// VPMAXUD_BCST: Maximum of Packed Unsigned Doubleword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMAXUD.BCST m32 xmm k xmm
|
|
// VPMAXUD.BCST m32 xmm xmm
|
|
// VPMAXUD.BCST m32 ymm k ymm
|
|
// VPMAXUD.BCST m32 ymm ymm
|
|
// VPMAXUD.BCST m32 zmm k zmm
|
|
// VPMAXUD.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VPMAXUD.BCST instruction to the active function.
|
|
func (c *Context) VPMAXUD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPMAXUD_BCST(ops...))
|
|
}
|
|
|
|
// VPMAXUD_BCST: Maximum of Packed Unsigned Doubleword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMAXUD.BCST m32 xmm k xmm
|
|
// VPMAXUD.BCST m32 xmm xmm
|
|
// VPMAXUD.BCST m32 ymm k ymm
|
|
// VPMAXUD.BCST m32 ymm ymm
|
|
// VPMAXUD.BCST m32 zmm k zmm
|
|
// VPMAXUD.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VPMAXUD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMAXUD_BCST(ops ...operand.Op) { ctx.VPMAXUD_BCST(ops...) }
|
|
|
|
// VPMAXUD_BCST_Z: Maximum of Packed Unsigned Doubleword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMAXUD.BCST.Z m32 xmm k xmm
|
|
// VPMAXUD.BCST.Z m32 ymm k ymm
|
|
// VPMAXUD.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VPMAXUD.BCST.Z instruction to the active function.
|
|
func (c *Context) VPMAXUD_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPMAXUD_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPMAXUD_BCST_Z: Maximum of Packed Unsigned Doubleword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMAXUD.BCST.Z m32 xmm k xmm
|
|
// VPMAXUD.BCST.Z m32 ymm k ymm
|
|
// VPMAXUD.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VPMAXUD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMAXUD_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VPMAXUD_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VPMAXUD_Z: Maximum of Packed Unsigned Doubleword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMAXUD.Z m128 xmm k xmm
|
|
// VPMAXUD.Z m256 ymm k ymm
|
|
// VPMAXUD.Z xmm xmm k xmm
|
|
// VPMAXUD.Z ymm ymm k ymm
|
|
// VPMAXUD.Z m512 zmm k zmm
|
|
// VPMAXUD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPMAXUD.Z instruction to the active function.
|
|
func (c *Context) VPMAXUD_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPMAXUD_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPMAXUD_Z: Maximum of Packed Unsigned Doubleword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMAXUD.Z m128 xmm k xmm
|
|
// VPMAXUD.Z m256 ymm k ymm
|
|
// VPMAXUD.Z xmm xmm k xmm
|
|
// VPMAXUD.Z ymm ymm k ymm
|
|
// VPMAXUD.Z m512 zmm k zmm
|
|
// VPMAXUD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPMAXUD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMAXUD_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPMAXUD_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPMAXUQ: Maximum of Packed Unsigned Quadword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMAXUQ m128 xmm k xmm
|
|
// VPMAXUQ m128 xmm xmm
|
|
// VPMAXUQ m256 ymm k ymm
|
|
// VPMAXUQ m256 ymm ymm
|
|
// VPMAXUQ xmm xmm k xmm
|
|
// VPMAXUQ xmm xmm xmm
|
|
// VPMAXUQ ymm ymm k ymm
|
|
// VPMAXUQ ymm ymm ymm
|
|
// VPMAXUQ m512 zmm k zmm
|
|
// VPMAXUQ m512 zmm zmm
|
|
// VPMAXUQ zmm zmm k zmm
|
|
// VPMAXUQ zmm zmm zmm
|
|
//
|
|
// Construct and append a VPMAXUQ instruction to the active function.
|
|
func (c *Context) VPMAXUQ(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPMAXUQ(ops...))
|
|
}
|
|
|
|
// VPMAXUQ: Maximum of Packed Unsigned Quadword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMAXUQ m128 xmm k xmm
|
|
// VPMAXUQ m128 xmm xmm
|
|
// VPMAXUQ m256 ymm k ymm
|
|
// VPMAXUQ m256 ymm ymm
|
|
// VPMAXUQ xmm xmm k xmm
|
|
// VPMAXUQ xmm xmm xmm
|
|
// VPMAXUQ ymm ymm k ymm
|
|
// VPMAXUQ ymm ymm ymm
|
|
// VPMAXUQ m512 zmm k zmm
|
|
// VPMAXUQ m512 zmm zmm
|
|
// VPMAXUQ zmm zmm k zmm
|
|
// VPMAXUQ zmm zmm zmm
|
|
//
|
|
// Construct and append a VPMAXUQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMAXUQ(ops ...operand.Op) { ctx.VPMAXUQ(ops...) }
|
|
|
|
// VPMAXUQ_BCST: Maximum of Packed Unsigned Quadword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMAXUQ.BCST m64 xmm k xmm
|
|
// VPMAXUQ.BCST m64 xmm xmm
|
|
// VPMAXUQ.BCST m64 ymm k ymm
|
|
// VPMAXUQ.BCST m64 ymm ymm
|
|
// VPMAXUQ.BCST m64 zmm k zmm
|
|
// VPMAXUQ.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VPMAXUQ.BCST instruction to the active function.
|
|
func (c *Context) VPMAXUQ_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPMAXUQ_BCST(ops...))
|
|
}
|
|
|
|
// VPMAXUQ_BCST: Maximum of Packed Unsigned Quadword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMAXUQ.BCST m64 xmm k xmm
|
|
// VPMAXUQ.BCST m64 xmm xmm
|
|
// VPMAXUQ.BCST m64 ymm k ymm
|
|
// VPMAXUQ.BCST m64 ymm ymm
|
|
// VPMAXUQ.BCST m64 zmm k zmm
|
|
// VPMAXUQ.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VPMAXUQ.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMAXUQ_BCST(ops ...operand.Op) { ctx.VPMAXUQ_BCST(ops...) }
|
|
|
|
// VPMAXUQ_BCST_Z: Maximum of Packed Unsigned Quadword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMAXUQ.BCST.Z m64 xmm k xmm
|
|
// VPMAXUQ.BCST.Z m64 ymm k ymm
|
|
// VPMAXUQ.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VPMAXUQ.BCST.Z instruction to the active function.
|
|
func (c *Context) VPMAXUQ_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPMAXUQ_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPMAXUQ_BCST_Z: Maximum of Packed Unsigned Quadword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMAXUQ.BCST.Z m64 xmm k xmm
|
|
// VPMAXUQ.BCST.Z m64 ymm k ymm
|
|
// VPMAXUQ.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VPMAXUQ.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMAXUQ_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VPMAXUQ_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VPMAXUQ_Z: Maximum of Packed Unsigned Quadword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMAXUQ.Z m128 xmm k xmm
|
|
// VPMAXUQ.Z m256 ymm k ymm
|
|
// VPMAXUQ.Z xmm xmm k xmm
|
|
// VPMAXUQ.Z ymm ymm k ymm
|
|
// VPMAXUQ.Z m512 zmm k zmm
|
|
// VPMAXUQ.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPMAXUQ.Z instruction to the active function.
|
|
func (c *Context) VPMAXUQ_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPMAXUQ_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPMAXUQ_Z: Maximum of Packed Unsigned Quadword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMAXUQ.Z m128 xmm k xmm
|
|
// VPMAXUQ.Z m256 ymm k ymm
|
|
// VPMAXUQ.Z xmm xmm k xmm
|
|
// VPMAXUQ.Z ymm ymm k ymm
|
|
// VPMAXUQ.Z m512 zmm k zmm
|
|
// VPMAXUQ.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPMAXUQ.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMAXUQ_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPMAXUQ_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPMAXUW: Maximum of Packed Unsigned Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMAXUW m256 ymm ymm
|
|
// VPMAXUW ymm ymm ymm
|
|
// VPMAXUW m128 xmm xmm
|
|
// VPMAXUW xmm xmm xmm
|
|
// VPMAXUW m128 xmm k xmm
|
|
// VPMAXUW m256 ymm k ymm
|
|
// VPMAXUW xmm xmm k xmm
|
|
// VPMAXUW ymm ymm k ymm
|
|
// VPMAXUW m512 zmm k zmm
|
|
// VPMAXUW m512 zmm zmm
|
|
// VPMAXUW zmm zmm k zmm
|
|
// VPMAXUW zmm zmm zmm
|
|
//
|
|
// Construct and append a VPMAXUW instruction to the active function.
|
|
func (c *Context) VPMAXUW(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPMAXUW(ops...))
|
|
}
|
|
|
|
// VPMAXUW: Maximum of Packed Unsigned Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMAXUW m256 ymm ymm
|
|
// VPMAXUW ymm ymm ymm
|
|
// VPMAXUW m128 xmm xmm
|
|
// VPMAXUW xmm xmm xmm
|
|
// VPMAXUW m128 xmm k xmm
|
|
// VPMAXUW m256 ymm k ymm
|
|
// VPMAXUW xmm xmm k xmm
|
|
// VPMAXUW ymm ymm k ymm
|
|
// VPMAXUW m512 zmm k zmm
|
|
// VPMAXUW m512 zmm zmm
|
|
// VPMAXUW zmm zmm k zmm
|
|
// VPMAXUW zmm zmm zmm
|
|
//
|
|
// Construct and append a VPMAXUW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMAXUW(ops ...operand.Op) { ctx.VPMAXUW(ops...) }
|
|
|
|
// VPMAXUW_Z: Maximum of Packed Unsigned Word Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMAXUW.Z m128 xmm k xmm
|
|
// VPMAXUW.Z m256 ymm k ymm
|
|
// VPMAXUW.Z xmm xmm k xmm
|
|
// VPMAXUW.Z ymm ymm k ymm
|
|
// VPMAXUW.Z m512 zmm k zmm
|
|
// VPMAXUW.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPMAXUW.Z instruction to the active function.
|
|
func (c *Context) VPMAXUW_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPMAXUW_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPMAXUW_Z: Maximum of Packed Unsigned Word Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMAXUW.Z m128 xmm k xmm
|
|
// VPMAXUW.Z m256 ymm k ymm
|
|
// VPMAXUW.Z xmm xmm k xmm
|
|
// VPMAXUW.Z ymm ymm k ymm
|
|
// VPMAXUW.Z m512 zmm k zmm
|
|
// VPMAXUW.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPMAXUW.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMAXUW_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPMAXUW_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPMINSB: Minimum of Packed Signed Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMINSB m256 ymm ymm
|
|
// VPMINSB ymm ymm ymm
|
|
// VPMINSB m128 xmm xmm
|
|
// VPMINSB xmm xmm xmm
|
|
// VPMINSB m128 xmm k xmm
|
|
// VPMINSB m256 ymm k ymm
|
|
// VPMINSB xmm xmm k xmm
|
|
// VPMINSB ymm ymm k ymm
|
|
// VPMINSB m512 zmm k zmm
|
|
// VPMINSB m512 zmm zmm
|
|
// VPMINSB zmm zmm k zmm
|
|
// VPMINSB zmm zmm zmm
|
|
//
|
|
// Construct and append a VPMINSB instruction to the active function.
|
|
func (c *Context) VPMINSB(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPMINSB(ops...))
|
|
}
|
|
|
|
// VPMINSB: Minimum of Packed Signed Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMINSB m256 ymm ymm
|
|
// VPMINSB ymm ymm ymm
|
|
// VPMINSB m128 xmm xmm
|
|
// VPMINSB xmm xmm xmm
|
|
// VPMINSB m128 xmm k xmm
|
|
// VPMINSB m256 ymm k ymm
|
|
// VPMINSB xmm xmm k xmm
|
|
// VPMINSB ymm ymm k ymm
|
|
// VPMINSB m512 zmm k zmm
|
|
// VPMINSB m512 zmm zmm
|
|
// VPMINSB zmm zmm k zmm
|
|
// VPMINSB zmm zmm zmm
|
|
//
|
|
// Construct and append a VPMINSB instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMINSB(ops ...operand.Op) { ctx.VPMINSB(ops...) }
|
|
|
|
// VPMINSB_Z: Minimum of Packed Signed Byte Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMINSB.Z m128 xmm k xmm
|
|
// VPMINSB.Z m256 ymm k ymm
|
|
// VPMINSB.Z xmm xmm k xmm
|
|
// VPMINSB.Z ymm ymm k ymm
|
|
// VPMINSB.Z m512 zmm k zmm
|
|
// VPMINSB.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPMINSB.Z instruction to the active function.
|
|
func (c *Context) VPMINSB_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPMINSB_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPMINSB_Z: Minimum of Packed Signed Byte Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMINSB.Z m128 xmm k xmm
|
|
// VPMINSB.Z m256 ymm k ymm
|
|
// VPMINSB.Z xmm xmm k xmm
|
|
// VPMINSB.Z ymm ymm k ymm
|
|
// VPMINSB.Z m512 zmm k zmm
|
|
// VPMINSB.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPMINSB.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMINSB_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPMINSB_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPMINSD: Minimum of Packed Signed Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMINSD m256 ymm ymm
|
|
// VPMINSD ymm ymm ymm
|
|
// VPMINSD m128 xmm xmm
|
|
// VPMINSD xmm xmm xmm
|
|
// VPMINSD m128 xmm k xmm
|
|
// VPMINSD m256 ymm k ymm
|
|
// VPMINSD xmm xmm k xmm
|
|
// VPMINSD ymm ymm k ymm
|
|
// VPMINSD m512 zmm k zmm
|
|
// VPMINSD m512 zmm zmm
|
|
// VPMINSD zmm zmm k zmm
|
|
// VPMINSD zmm zmm zmm
|
|
//
|
|
// Construct and append a VPMINSD instruction to the active function.
|
|
func (c *Context) VPMINSD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPMINSD(ops...))
|
|
}
|
|
|
|
// VPMINSD: Minimum of Packed Signed Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMINSD m256 ymm ymm
|
|
// VPMINSD ymm ymm ymm
|
|
// VPMINSD m128 xmm xmm
|
|
// VPMINSD xmm xmm xmm
|
|
// VPMINSD m128 xmm k xmm
|
|
// VPMINSD m256 ymm k ymm
|
|
// VPMINSD xmm xmm k xmm
|
|
// VPMINSD ymm ymm k ymm
|
|
// VPMINSD m512 zmm k zmm
|
|
// VPMINSD m512 zmm zmm
|
|
// VPMINSD zmm zmm k zmm
|
|
// VPMINSD zmm zmm zmm
|
|
//
|
|
// Construct and append a VPMINSD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMINSD(ops ...operand.Op) { ctx.VPMINSD(ops...) }
|
|
|
|
// VPMINSD_BCST: Minimum of Packed Signed Doubleword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMINSD.BCST m32 xmm k xmm
|
|
// VPMINSD.BCST m32 xmm xmm
|
|
// VPMINSD.BCST m32 ymm k ymm
|
|
// VPMINSD.BCST m32 ymm ymm
|
|
// VPMINSD.BCST m32 zmm k zmm
|
|
// VPMINSD.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VPMINSD.BCST instruction to the active function.
|
|
func (c *Context) VPMINSD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPMINSD_BCST(ops...))
|
|
}
|
|
|
|
// VPMINSD_BCST: Minimum of Packed Signed Doubleword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMINSD.BCST m32 xmm k xmm
|
|
// VPMINSD.BCST m32 xmm xmm
|
|
// VPMINSD.BCST m32 ymm k ymm
|
|
// VPMINSD.BCST m32 ymm ymm
|
|
// VPMINSD.BCST m32 zmm k zmm
|
|
// VPMINSD.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VPMINSD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMINSD_BCST(ops ...operand.Op) { ctx.VPMINSD_BCST(ops...) }
|
|
|
|
// VPMINSD_BCST_Z: Minimum of Packed Signed Doubleword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMINSD.BCST.Z m32 xmm k xmm
|
|
// VPMINSD.BCST.Z m32 ymm k ymm
|
|
// VPMINSD.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VPMINSD.BCST.Z instruction to the active function.
|
|
func (c *Context) VPMINSD_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPMINSD_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPMINSD_BCST_Z: Minimum of Packed Signed Doubleword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMINSD.BCST.Z m32 xmm k xmm
|
|
// VPMINSD.BCST.Z m32 ymm k ymm
|
|
// VPMINSD.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VPMINSD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMINSD_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VPMINSD_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VPMINSD_Z: Minimum of Packed Signed Doubleword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMINSD.Z m128 xmm k xmm
|
|
// VPMINSD.Z m256 ymm k ymm
|
|
// VPMINSD.Z xmm xmm k xmm
|
|
// VPMINSD.Z ymm ymm k ymm
|
|
// VPMINSD.Z m512 zmm k zmm
|
|
// VPMINSD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPMINSD.Z instruction to the active function.
|
|
func (c *Context) VPMINSD_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPMINSD_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPMINSD_Z: Minimum of Packed Signed Doubleword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMINSD.Z m128 xmm k xmm
|
|
// VPMINSD.Z m256 ymm k ymm
|
|
// VPMINSD.Z xmm xmm k xmm
|
|
// VPMINSD.Z ymm ymm k ymm
|
|
// VPMINSD.Z m512 zmm k zmm
|
|
// VPMINSD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPMINSD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMINSD_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPMINSD_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPMINSQ: Minimum of Packed Signed Quadword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMINSQ m128 xmm k xmm
|
|
// VPMINSQ m128 xmm xmm
|
|
// VPMINSQ m256 ymm k ymm
|
|
// VPMINSQ m256 ymm ymm
|
|
// VPMINSQ xmm xmm k xmm
|
|
// VPMINSQ xmm xmm xmm
|
|
// VPMINSQ ymm ymm k ymm
|
|
// VPMINSQ ymm ymm ymm
|
|
// VPMINSQ m512 zmm k zmm
|
|
// VPMINSQ m512 zmm zmm
|
|
// VPMINSQ zmm zmm k zmm
|
|
// VPMINSQ zmm zmm zmm
|
|
//
|
|
// Construct and append a VPMINSQ instruction to the active function.
|
|
func (c *Context) VPMINSQ(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPMINSQ(ops...))
|
|
}
|
|
|
|
// VPMINSQ: Minimum of Packed Signed Quadword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMINSQ m128 xmm k xmm
|
|
// VPMINSQ m128 xmm xmm
|
|
// VPMINSQ m256 ymm k ymm
|
|
// VPMINSQ m256 ymm ymm
|
|
// VPMINSQ xmm xmm k xmm
|
|
// VPMINSQ xmm xmm xmm
|
|
// VPMINSQ ymm ymm k ymm
|
|
// VPMINSQ ymm ymm ymm
|
|
// VPMINSQ m512 zmm k zmm
|
|
// VPMINSQ m512 zmm zmm
|
|
// VPMINSQ zmm zmm k zmm
|
|
// VPMINSQ zmm zmm zmm
|
|
//
|
|
// Construct and append a VPMINSQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMINSQ(ops ...operand.Op) { ctx.VPMINSQ(ops...) }
|
|
|
|
// VPMINSQ_BCST: Minimum of Packed Signed Quadword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMINSQ.BCST m64 xmm k xmm
|
|
// VPMINSQ.BCST m64 xmm xmm
|
|
// VPMINSQ.BCST m64 ymm k ymm
|
|
// VPMINSQ.BCST m64 ymm ymm
|
|
// VPMINSQ.BCST m64 zmm k zmm
|
|
// VPMINSQ.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VPMINSQ.BCST instruction to the active function.
|
|
func (c *Context) VPMINSQ_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPMINSQ_BCST(ops...))
|
|
}
|
|
|
|
// VPMINSQ_BCST: Minimum of Packed Signed Quadword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMINSQ.BCST m64 xmm k xmm
|
|
// VPMINSQ.BCST m64 xmm xmm
|
|
// VPMINSQ.BCST m64 ymm k ymm
|
|
// VPMINSQ.BCST m64 ymm ymm
|
|
// VPMINSQ.BCST m64 zmm k zmm
|
|
// VPMINSQ.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VPMINSQ.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMINSQ_BCST(ops ...operand.Op) { ctx.VPMINSQ_BCST(ops...) }
|
|
|
|
// VPMINSQ_BCST_Z: Minimum of Packed Signed Quadword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMINSQ.BCST.Z m64 xmm k xmm
|
|
// VPMINSQ.BCST.Z m64 ymm k ymm
|
|
// VPMINSQ.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VPMINSQ.BCST.Z instruction to the active function.
|
|
func (c *Context) VPMINSQ_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPMINSQ_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPMINSQ_BCST_Z: Minimum of Packed Signed Quadword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMINSQ.BCST.Z m64 xmm k xmm
|
|
// VPMINSQ.BCST.Z m64 ymm k ymm
|
|
// VPMINSQ.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VPMINSQ.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMINSQ_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VPMINSQ_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VPMINSQ_Z: Minimum of Packed Signed Quadword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMINSQ.Z m128 xmm k xmm
|
|
// VPMINSQ.Z m256 ymm k ymm
|
|
// VPMINSQ.Z xmm xmm k xmm
|
|
// VPMINSQ.Z ymm ymm k ymm
|
|
// VPMINSQ.Z m512 zmm k zmm
|
|
// VPMINSQ.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPMINSQ.Z instruction to the active function.
|
|
func (c *Context) VPMINSQ_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPMINSQ_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPMINSQ_Z: Minimum of Packed Signed Quadword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMINSQ.Z m128 xmm k xmm
|
|
// VPMINSQ.Z m256 ymm k ymm
|
|
// VPMINSQ.Z xmm xmm k xmm
|
|
// VPMINSQ.Z ymm ymm k ymm
|
|
// VPMINSQ.Z m512 zmm k zmm
|
|
// VPMINSQ.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPMINSQ.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMINSQ_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPMINSQ_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPMINSW: Minimum of Packed Signed Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMINSW m256 ymm ymm
|
|
// VPMINSW ymm ymm ymm
|
|
// VPMINSW m128 xmm xmm
|
|
// VPMINSW xmm xmm xmm
|
|
// VPMINSW m128 xmm k xmm
|
|
// VPMINSW m256 ymm k ymm
|
|
// VPMINSW xmm xmm k xmm
|
|
// VPMINSW ymm ymm k ymm
|
|
// VPMINSW m512 zmm k zmm
|
|
// VPMINSW m512 zmm zmm
|
|
// VPMINSW zmm zmm k zmm
|
|
// VPMINSW zmm zmm zmm
|
|
//
|
|
// Construct and append a VPMINSW instruction to the active function.
|
|
func (c *Context) VPMINSW(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPMINSW(ops...))
|
|
}
|
|
|
|
// VPMINSW: Minimum of Packed Signed Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMINSW m256 ymm ymm
|
|
// VPMINSW ymm ymm ymm
|
|
// VPMINSW m128 xmm xmm
|
|
// VPMINSW xmm xmm xmm
|
|
// VPMINSW m128 xmm k xmm
|
|
// VPMINSW m256 ymm k ymm
|
|
// VPMINSW xmm xmm k xmm
|
|
// VPMINSW ymm ymm k ymm
|
|
// VPMINSW m512 zmm k zmm
|
|
// VPMINSW m512 zmm zmm
|
|
// VPMINSW zmm zmm k zmm
|
|
// VPMINSW zmm zmm zmm
|
|
//
|
|
// Construct and append a VPMINSW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMINSW(ops ...operand.Op) { ctx.VPMINSW(ops...) }
|
|
|
|
// VPMINSW_Z: Minimum of Packed Signed Word Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMINSW.Z m128 xmm k xmm
|
|
// VPMINSW.Z m256 ymm k ymm
|
|
// VPMINSW.Z xmm xmm k xmm
|
|
// VPMINSW.Z ymm ymm k ymm
|
|
// VPMINSW.Z m512 zmm k zmm
|
|
// VPMINSW.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPMINSW.Z instruction to the active function.
|
|
func (c *Context) VPMINSW_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPMINSW_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPMINSW_Z: Minimum of Packed Signed Word Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMINSW.Z m128 xmm k xmm
|
|
// VPMINSW.Z m256 ymm k ymm
|
|
// VPMINSW.Z xmm xmm k xmm
|
|
// VPMINSW.Z ymm ymm k ymm
|
|
// VPMINSW.Z m512 zmm k zmm
|
|
// VPMINSW.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPMINSW.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMINSW_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPMINSW_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPMINUB: Minimum of Packed Unsigned Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMINUB m256 ymm ymm
|
|
// VPMINUB ymm ymm ymm
|
|
// VPMINUB m128 xmm xmm
|
|
// VPMINUB xmm xmm xmm
|
|
// VPMINUB m128 xmm k xmm
|
|
// VPMINUB m256 ymm k ymm
|
|
// VPMINUB xmm xmm k xmm
|
|
// VPMINUB ymm ymm k ymm
|
|
// VPMINUB m512 zmm k zmm
|
|
// VPMINUB m512 zmm zmm
|
|
// VPMINUB zmm zmm k zmm
|
|
// VPMINUB zmm zmm zmm
|
|
//
|
|
// Construct and append a VPMINUB instruction to the active function.
|
|
func (c *Context) VPMINUB(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPMINUB(ops...))
|
|
}
|
|
|
|
// VPMINUB: Minimum of Packed Unsigned Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMINUB m256 ymm ymm
|
|
// VPMINUB ymm ymm ymm
|
|
// VPMINUB m128 xmm xmm
|
|
// VPMINUB xmm xmm xmm
|
|
// VPMINUB m128 xmm k xmm
|
|
// VPMINUB m256 ymm k ymm
|
|
// VPMINUB xmm xmm k xmm
|
|
// VPMINUB ymm ymm k ymm
|
|
// VPMINUB m512 zmm k zmm
|
|
// VPMINUB m512 zmm zmm
|
|
// VPMINUB zmm zmm k zmm
|
|
// VPMINUB zmm zmm zmm
|
|
//
|
|
// Construct and append a VPMINUB instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMINUB(ops ...operand.Op) { ctx.VPMINUB(ops...) }
|
|
|
|
// VPMINUB_Z: Minimum of Packed Unsigned Byte Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMINUB.Z m128 xmm k xmm
|
|
// VPMINUB.Z m256 ymm k ymm
|
|
// VPMINUB.Z xmm xmm k xmm
|
|
// VPMINUB.Z ymm ymm k ymm
|
|
// VPMINUB.Z m512 zmm k zmm
|
|
// VPMINUB.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPMINUB.Z instruction to the active function.
|
|
func (c *Context) VPMINUB_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPMINUB_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPMINUB_Z: Minimum of Packed Unsigned Byte Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMINUB.Z m128 xmm k xmm
|
|
// VPMINUB.Z m256 ymm k ymm
|
|
// VPMINUB.Z xmm xmm k xmm
|
|
// VPMINUB.Z ymm ymm k ymm
|
|
// VPMINUB.Z m512 zmm k zmm
|
|
// VPMINUB.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPMINUB.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMINUB_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPMINUB_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPMINUD: Minimum of Packed Unsigned Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMINUD m256 ymm ymm
|
|
// VPMINUD ymm ymm ymm
|
|
// VPMINUD m128 xmm xmm
|
|
// VPMINUD xmm xmm xmm
|
|
// VPMINUD m128 xmm k xmm
|
|
// VPMINUD m256 ymm k ymm
|
|
// VPMINUD xmm xmm k xmm
|
|
// VPMINUD ymm ymm k ymm
|
|
// VPMINUD m512 zmm k zmm
|
|
// VPMINUD m512 zmm zmm
|
|
// VPMINUD zmm zmm k zmm
|
|
// VPMINUD zmm zmm zmm
|
|
//
|
|
// Construct and append a VPMINUD instruction to the active function.
|
|
func (c *Context) VPMINUD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPMINUD(ops...))
|
|
}
|
|
|
|
// VPMINUD: Minimum of Packed Unsigned Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMINUD m256 ymm ymm
|
|
// VPMINUD ymm ymm ymm
|
|
// VPMINUD m128 xmm xmm
|
|
// VPMINUD xmm xmm xmm
|
|
// VPMINUD m128 xmm k xmm
|
|
// VPMINUD m256 ymm k ymm
|
|
// VPMINUD xmm xmm k xmm
|
|
// VPMINUD ymm ymm k ymm
|
|
// VPMINUD m512 zmm k zmm
|
|
// VPMINUD m512 zmm zmm
|
|
// VPMINUD zmm zmm k zmm
|
|
// VPMINUD zmm zmm zmm
|
|
//
|
|
// Construct and append a VPMINUD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMINUD(ops ...operand.Op) { ctx.VPMINUD(ops...) }
|
|
|
|
// VPMINUD_BCST: Minimum of Packed Unsigned Doubleword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMINUD.BCST m32 xmm k xmm
|
|
// VPMINUD.BCST m32 xmm xmm
|
|
// VPMINUD.BCST m32 ymm k ymm
|
|
// VPMINUD.BCST m32 ymm ymm
|
|
// VPMINUD.BCST m32 zmm k zmm
|
|
// VPMINUD.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VPMINUD.BCST instruction to the active function.
|
|
func (c *Context) VPMINUD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPMINUD_BCST(ops...))
|
|
}
|
|
|
|
// VPMINUD_BCST: Minimum of Packed Unsigned Doubleword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMINUD.BCST m32 xmm k xmm
|
|
// VPMINUD.BCST m32 xmm xmm
|
|
// VPMINUD.BCST m32 ymm k ymm
|
|
// VPMINUD.BCST m32 ymm ymm
|
|
// VPMINUD.BCST m32 zmm k zmm
|
|
// VPMINUD.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VPMINUD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMINUD_BCST(ops ...operand.Op) { ctx.VPMINUD_BCST(ops...) }
|
|
|
|
// VPMINUD_BCST_Z: Minimum of Packed Unsigned Doubleword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMINUD.BCST.Z m32 xmm k xmm
|
|
// VPMINUD.BCST.Z m32 ymm k ymm
|
|
// VPMINUD.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VPMINUD.BCST.Z instruction to the active function.
|
|
func (c *Context) VPMINUD_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPMINUD_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPMINUD_BCST_Z: Minimum of Packed Unsigned Doubleword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMINUD.BCST.Z m32 xmm k xmm
|
|
// VPMINUD.BCST.Z m32 ymm k ymm
|
|
// VPMINUD.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VPMINUD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMINUD_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VPMINUD_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VPMINUD_Z: Minimum of Packed Unsigned Doubleword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMINUD.Z m128 xmm k xmm
|
|
// VPMINUD.Z m256 ymm k ymm
|
|
// VPMINUD.Z xmm xmm k xmm
|
|
// VPMINUD.Z ymm ymm k ymm
|
|
// VPMINUD.Z m512 zmm k zmm
|
|
// VPMINUD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPMINUD.Z instruction to the active function.
|
|
func (c *Context) VPMINUD_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPMINUD_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPMINUD_Z: Minimum of Packed Unsigned Doubleword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMINUD.Z m128 xmm k xmm
|
|
// VPMINUD.Z m256 ymm k ymm
|
|
// VPMINUD.Z xmm xmm k xmm
|
|
// VPMINUD.Z ymm ymm k ymm
|
|
// VPMINUD.Z m512 zmm k zmm
|
|
// VPMINUD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPMINUD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMINUD_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPMINUD_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPMINUQ: Minimum of Packed Unsigned Quadword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMINUQ m128 xmm k xmm
|
|
// VPMINUQ m128 xmm xmm
|
|
// VPMINUQ m256 ymm k ymm
|
|
// VPMINUQ m256 ymm ymm
|
|
// VPMINUQ xmm xmm k xmm
|
|
// VPMINUQ xmm xmm xmm
|
|
// VPMINUQ ymm ymm k ymm
|
|
// VPMINUQ ymm ymm ymm
|
|
// VPMINUQ m512 zmm k zmm
|
|
// VPMINUQ m512 zmm zmm
|
|
// VPMINUQ zmm zmm k zmm
|
|
// VPMINUQ zmm zmm zmm
|
|
//
|
|
// Construct and append a VPMINUQ instruction to the active function.
|
|
func (c *Context) VPMINUQ(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPMINUQ(ops...))
|
|
}
|
|
|
|
// VPMINUQ: Minimum of Packed Unsigned Quadword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMINUQ m128 xmm k xmm
|
|
// VPMINUQ m128 xmm xmm
|
|
// VPMINUQ m256 ymm k ymm
|
|
// VPMINUQ m256 ymm ymm
|
|
// VPMINUQ xmm xmm k xmm
|
|
// VPMINUQ xmm xmm xmm
|
|
// VPMINUQ ymm ymm k ymm
|
|
// VPMINUQ ymm ymm ymm
|
|
// VPMINUQ m512 zmm k zmm
|
|
// VPMINUQ m512 zmm zmm
|
|
// VPMINUQ zmm zmm k zmm
|
|
// VPMINUQ zmm zmm zmm
|
|
//
|
|
// Construct and append a VPMINUQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMINUQ(ops ...operand.Op) { ctx.VPMINUQ(ops...) }
|
|
|
|
// VPMINUQ_BCST: Minimum of Packed Unsigned Quadword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMINUQ.BCST m64 xmm k xmm
|
|
// VPMINUQ.BCST m64 xmm xmm
|
|
// VPMINUQ.BCST m64 ymm k ymm
|
|
// VPMINUQ.BCST m64 ymm ymm
|
|
// VPMINUQ.BCST m64 zmm k zmm
|
|
// VPMINUQ.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VPMINUQ.BCST instruction to the active function.
|
|
func (c *Context) VPMINUQ_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPMINUQ_BCST(ops...))
|
|
}
|
|
|
|
// VPMINUQ_BCST: Minimum of Packed Unsigned Quadword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMINUQ.BCST m64 xmm k xmm
|
|
// VPMINUQ.BCST m64 xmm xmm
|
|
// VPMINUQ.BCST m64 ymm k ymm
|
|
// VPMINUQ.BCST m64 ymm ymm
|
|
// VPMINUQ.BCST m64 zmm k zmm
|
|
// VPMINUQ.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VPMINUQ.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMINUQ_BCST(ops ...operand.Op) { ctx.VPMINUQ_BCST(ops...) }
|
|
|
|
// VPMINUQ_BCST_Z: Minimum of Packed Unsigned Quadword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMINUQ.BCST.Z m64 xmm k xmm
|
|
// VPMINUQ.BCST.Z m64 ymm k ymm
|
|
// VPMINUQ.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VPMINUQ.BCST.Z instruction to the active function.
|
|
func (c *Context) VPMINUQ_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPMINUQ_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPMINUQ_BCST_Z: Minimum of Packed Unsigned Quadword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMINUQ.BCST.Z m64 xmm k xmm
|
|
// VPMINUQ.BCST.Z m64 ymm k ymm
|
|
// VPMINUQ.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VPMINUQ.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMINUQ_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VPMINUQ_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VPMINUQ_Z: Minimum of Packed Unsigned Quadword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMINUQ.Z m128 xmm k xmm
|
|
// VPMINUQ.Z m256 ymm k ymm
|
|
// VPMINUQ.Z xmm xmm k xmm
|
|
// VPMINUQ.Z ymm ymm k ymm
|
|
// VPMINUQ.Z m512 zmm k zmm
|
|
// VPMINUQ.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPMINUQ.Z instruction to the active function.
|
|
func (c *Context) VPMINUQ_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPMINUQ_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPMINUQ_Z: Minimum of Packed Unsigned Quadword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMINUQ.Z m128 xmm k xmm
|
|
// VPMINUQ.Z m256 ymm k ymm
|
|
// VPMINUQ.Z xmm xmm k xmm
|
|
// VPMINUQ.Z ymm ymm k ymm
|
|
// VPMINUQ.Z m512 zmm k zmm
|
|
// VPMINUQ.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPMINUQ.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMINUQ_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPMINUQ_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPMINUW: Minimum of Packed Unsigned Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMINUW m256 ymm ymm
|
|
// VPMINUW ymm ymm ymm
|
|
// VPMINUW m128 xmm xmm
|
|
// VPMINUW xmm xmm xmm
|
|
// VPMINUW m128 xmm k xmm
|
|
// VPMINUW m256 ymm k ymm
|
|
// VPMINUW xmm xmm k xmm
|
|
// VPMINUW ymm ymm k ymm
|
|
// VPMINUW m512 zmm k zmm
|
|
// VPMINUW m512 zmm zmm
|
|
// VPMINUW zmm zmm k zmm
|
|
// VPMINUW zmm zmm zmm
|
|
//
|
|
// Construct and append a VPMINUW instruction to the active function.
|
|
func (c *Context) VPMINUW(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPMINUW(ops...))
|
|
}
|
|
|
|
// VPMINUW: Minimum of Packed Unsigned Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMINUW m256 ymm ymm
|
|
// VPMINUW ymm ymm ymm
|
|
// VPMINUW m128 xmm xmm
|
|
// VPMINUW xmm xmm xmm
|
|
// VPMINUW m128 xmm k xmm
|
|
// VPMINUW m256 ymm k ymm
|
|
// VPMINUW xmm xmm k xmm
|
|
// VPMINUW ymm ymm k ymm
|
|
// VPMINUW m512 zmm k zmm
|
|
// VPMINUW m512 zmm zmm
|
|
// VPMINUW zmm zmm k zmm
|
|
// VPMINUW zmm zmm zmm
|
|
//
|
|
// Construct and append a VPMINUW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMINUW(ops ...operand.Op) { ctx.VPMINUW(ops...) }
|
|
|
|
// VPMINUW_Z: Minimum of Packed Unsigned Word Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMINUW.Z m128 xmm k xmm
|
|
// VPMINUW.Z m256 ymm k ymm
|
|
// VPMINUW.Z xmm xmm k xmm
|
|
// VPMINUW.Z ymm ymm k ymm
|
|
// VPMINUW.Z m512 zmm k zmm
|
|
// VPMINUW.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPMINUW.Z instruction to the active function.
|
|
func (c *Context) VPMINUW_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPMINUW_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPMINUW_Z: Minimum of Packed Unsigned Word Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMINUW.Z m128 xmm k xmm
|
|
// VPMINUW.Z m256 ymm k ymm
|
|
// VPMINUW.Z xmm xmm k xmm
|
|
// VPMINUW.Z ymm ymm k ymm
|
|
// VPMINUW.Z m512 zmm k zmm
|
|
// VPMINUW.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPMINUW.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMINUW_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPMINUW_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPMOVB2M: Move Signs of Packed Byte Integers to Mask Register.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVB2M xmm k
|
|
// VPMOVB2M ymm k
|
|
// VPMOVB2M zmm k
|
|
//
|
|
// Construct and append a VPMOVB2M instruction to the active function.
|
|
func (c *Context) VPMOVB2M(xyz, k operand.Op) {
|
|
c.addinstruction(x86.VPMOVB2M(xyz, k))
|
|
}
|
|
|
|
// VPMOVB2M: Move Signs of Packed Byte Integers to Mask Register.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVB2M xmm k
|
|
// VPMOVB2M ymm k
|
|
// VPMOVB2M zmm k
|
|
//
|
|
// Construct and append a VPMOVB2M instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVB2M(xyz, k operand.Op) { ctx.VPMOVB2M(xyz, k) }
|
|
|
|
// VPMOVD2M: Move Signs of Packed Doubleword Integers to Mask Register.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVD2M xmm k
|
|
// VPMOVD2M ymm k
|
|
// VPMOVD2M zmm k
|
|
//
|
|
// Construct and append a VPMOVD2M instruction to the active function.
|
|
func (c *Context) VPMOVD2M(xyz, k operand.Op) {
|
|
c.addinstruction(x86.VPMOVD2M(xyz, k))
|
|
}
|
|
|
|
// VPMOVD2M: Move Signs of Packed Doubleword Integers to Mask Register.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVD2M xmm k
|
|
// VPMOVD2M ymm k
|
|
// VPMOVD2M zmm k
|
|
//
|
|
// Construct and append a VPMOVD2M instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVD2M(xyz, k operand.Op) { ctx.VPMOVD2M(xyz, k) }
|
|
|
|
// VPMOVDB: Down Convert Packed Doubleword Values to Byte Values with Truncation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVDB xmm k m32
|
|
// VPMOVDB xmm k xmm
|
|
// VPMOVDB xmm m32
|
|
// VPMOVDB xmm xmm
|
|
// VPMOVDB ymm k m64
|
|
// VPMOVDB ymm k xmm
|
|
// VPMOVDB ymm m64
|
|
// VPMOVDB ymm xmm
|
|
// VPMOVDB zmm k m128
|
|
// VPMOVDB zmm k xmm
|
|
// VPMOVDB zmm m128
|
|
// VPMOVDB zmm xmm
|
|
//
|
|
// Construct and append a VPMOVDB instruction to the active function.
|
|
func (c *Context) VPMOVDB(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPMOVDB(ops...))
|
|
}
|
|
|
|
// VPMOVDB: Down Convert Packed Doubleword Values to Byte Values with Truncation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVDB xmm k m32
|
|
// VPMOVDB xmm k xmm
|
|
// VPMOVDB xmm m32
|
|
// VPMOVDB xmm xmm
|
|
// VPMOVDB ymm k m64
|
|
// VPMOVDB ymm k xmm
|
|
// VPMOVDB ymm m64
|
|
// VPMOVDB ymm xmm
|
|
// VPMOVDB zmm k m128
|
|
// VPMOVDB zmm k xmm
|
|
// VPMOVDB zmm m128
|
|
// VPMOVDB zmm xmm
|
|
//
|
|
// Construct and append a VPMOVDB instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVDB(ops ...operand.Op) { ctx.VPMOVDB(ops...) }
|
|
|
|
// VPMOVDB_Z: Down Convert Packed Doubleword Values to Byte Values with Truncation (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVDB.Z xmm k m32
|
|
// VPMOVDB.Z xmm k xmm
|
|
// VPMOVDB.Z ymm k m64
|
|
// VPMOVDB.Z ymm k xmm
|
|
// VPMOVDB.Z zmm k m128
|
|
// VPMOVDB.Z zmm k xmm
|
|
//
|
|
// Construct and append a VPMOVDB.Z instruction to the active function.
|
|
func (c *Context) VPMOVDB_Z(xyz, k, mx operand.Op) {
|
|
c.addinstruction(x86.VPMOVDB_Z(xyz, k, mx))
|
|
}
|
|
|
|
// VPMOVDB_Z: Down Convert Packed Doubleword Values to Byte Values with Truncation (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVDB.Z xmm k m32
|
|
// VPMOVDB.Z xmm k xmm
|
|
// VPMOVDB.Z ymm k m64
|
|
// VPMOVDB.Z ymm k xmm
|
|
// VPMOVDB.Z zmm k m128
|
|
// VPMOVDB.Z zmm k xmm
|
|
//
|
|
// Construct and append a VPMOVDB.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVDB_Z(xyz, k, mx operand.Op) { ctx.VPMOVDB_Z(xyz, k, mx) }
|
|
|
|
// VPMOVDW: Down Convert Packed Doubleword Values to Word Values with Truncation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVDW xmm k m64
|
|
// VPMOVDW xmm k xmm
|
|
// VPMOVDW xmm m64
|
|
// VPMOVDW xmm xmm
|
|
// VPMOVDW ymm k m128
|
|
// VPMOVDW ymm k xmm
|
|
// VPMOVDW ymm m128
|
|
// VPMOVDW ymm xmm
|
|
// VPMOVDW zmm k m256
|
|
// VPMOVDW zmm k ymm
|
|
// VPMOVDW zmm m256
|
|
// VPMOVDW zmm ymm
|
|
//
|
|
// Construct and append a VPMOVDW instruction to the active function.
|
|
func (c *Context) VPMOVDW(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPMOVDW(ops...))
|
|
}
|
|
|
|
// VPMOVDW: Down Convert Packed Doubleword Values to Word Values with Truncation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVDW xmm k m64
|
|
// VPMOVDW xmm k xmm
|
|
// VPMOVDW xmm m64
|
|
// VPMOVDW xmm xmm
|
|
// VPMOVDW ymm k m128
|
|
// VPMOVDW ymm k xmm
|
|
// VPMOVDW ymm m128
|
|
// VPMOVDW ymm xmm
|
|
// VPMOVDW zmm k m256
|
|
// VPMOVDW zmm k ymm
|
|
// VPMOVDW zmm m256
|
|
// VPMOVDW zmm ymm
|
|
//
|
|
// Construct and append a VPMOVDW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVDW(ops ...operand.Op) { ctx.VPMOVDW(ops...) }
|
|
|
|
// VPMOVDW_Z: Down Convert Packed Doubleword Values to Word Values with Truncation (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVDW.Z xmm k m64
|
|
// VPMOVDW.Z xmm k xmm
|
|
// VPMOVDW.Z ymm k m128
|
|
// VPMOVDW.Z ymm k xmm
|
|
// VPMOVDW.Z zmm k m256
|
|
// VPMOVDW.Z zmm k ymm
|
|
//
|
|
// Construct and append a VPMOVDW.Z instruction to the active function.
|
|
func (c *Context) VPMOVDW_Z(xyz, k, mxy operand.Op) {
|
|
c.addinstruction(x86.VPMOVDW_Z(xyz, k, mxy))
|
|
}
|
|
|
|
// VPMOVDW_Z: Down Convert Packed Doubleword Values to Word Values with Truncation (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVDW.Z xmm k m64
|
|
// VPMOVDW.Z xmm k xmm
|
|
// VPMOVDW.Z ymm k m128
|
|
// VPMOVDW.Z ymm k xmm
|
|
// VPMOVDW.Z zmm k m256
|
|
// VPMOVDW.Z zmm k ymm
|
|
//
|
|
// Construct and append a VPMOVDW.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVDW_Z(xyz, k, mxy operand.Op) { ctx.VPMOVDW_Z(xyz, k, mxy) }
|
|
|
|
// VPMOVM2B: Expand Bits of Mask Register to Packed Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVM2B k xmm
|
|
// VPMOVM2B k ymm
|
|
// VPMOVM2B k zmm
|
|
//
|
|
// Construct and append a VPMOVM2B instruction to the active function.
|
|
func (c *Context) VPMOVM2B(k, xyz operand.Op) {
|
|
c.addinstruction(x86.VPMOVM2B(k, xyz))
|
|
}
|
|
|
|
// VPMOVM2B: Expand Bits of Mask Register to Packed Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVM2B k xmm
|
|
// VPMOVM2B k ymm
|
|
// VPMOVM2B k zmm
|
|
//
|
|
// Construct and append a VPMOVM2B instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVM2B(k, xyz operand.Op) { ctx.VPMOVM2B(k, xyz) }
|
|
|
|
// VPMOVM2D: Expand Bits of Mask Register to Packed Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVM2D k xmm
|
|
// VPMOVM2D k ymm
|
|
// VPMOVM2D k zmm
|
|
//
|
|
// Construct and append a VPMOVM2D instruction to the active function.
|
|
func (c *Context) VPMOVM2D(k, xyz operand.Op) {
|
|
c.addinstruction(x86.VPMOVM2D(k, xyz))
|
|
}
|
|
|
|
// VPMOVM2D: Expand Bits of Mask Register to Packed Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVM2D k xmm
|
|
// VPMOVM2D k ymm
|
|
// VPMOVM2D k zmm
|
|
//
|
|
// Construct and append a VPMOVM2D instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVM2D(k, xyz operand.Op) { ctx.VPMOVM2D(k, xyz) }
|
|
|
|
// VPMOVM2Q: Expand Bits of Mask Register to Packed Quadword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVM2Q k xmm
|
|
// VPMOVM2Q k ymm
|
|
// VPMOVM2Q k zmm
|
|
//
|
|
// Construct and append a VPMOVM2Q instruction to the active function.
|
|
func (c *Context) VPMOVM2Q(k, xyz operand.Op) {
|
|
c.addinstruction(x86.VPMOVM2Q(k, xyz))
|
|
}
|
|
|
|
// VPMOVM2Q: Expand Bits of Mask Register to Packed Quadword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVM2Q k xmm
|
|
// VPMOVM2Q k ymm
|
|
// VPMOVM2Q k zmm
|
|
//
|
|
// Construct and append a VPMOVM2Q instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVM2Q(k, xyz operand.Op) { ctx.VPMOVM2Q(k, xyz) }
|
|
|
|
// VPMOVM2W: Expand Bits of Mask Register to Packed Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVM2W k xmm
|
|
// VPMOVM2W k ymm
|
|
// VPMOVM2W k zmm
|
|
//
|
|
// Construct and append a VPMOVM2W instruction to the active function.
|
|
func (c *Context) VPMOVM2W(k, xyz operand.Op) {
|
|
c.addinstruction(x86.VPMOVM2W(k, xyz))
|
|
}
|
|
|
|
// VPMOVM2W: Expand Bits of Mask Register to Packed Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVM2W k xmm
|
|
// VPMOVM2W k ymm
|
|
// VPMOVM2W k zmm
|
|
//
|
|
// Construct and append a VPMOVM2W instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVM2W(k, xyz operand.Op) { ctx.VPMOVM2W(k, xyz) }
|
|
|
|
// VPMOVMSKB: Move Byte Mask.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVMSKB ymm r32
|
|
// VPMOVMSKB xmm r32
|
|
//
|
|
// Construct and append a VPMOVMSKB instruction to the active function.
|
|
func (c *Context) VPMOVMSKB(xy, r operand.Op) {
|
|
c.addinstruction(x86.VPMOVMSKB(xy, r))
|
|
}
|
|
|
|
// VPMOVMSKB: Move Byte Mask.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVMSKB ymm r32
|
|
// VPMOVMSKB xmm r32
|
|
//
|
|
// Construct and append a VPMOVMSKB instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVMSKB(xy, r operand.Op) { ctx.VPMOVMSKB(xy, r) }
|
|
|
|
// VPMOVQ2M: Move Signs of Packed Quadword Integers to Mask Register.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVQ2M xmm k
|
|
// VPMOVQ2M ymm k
|
|
// VPMOVQ2M zmm k
|
|
//
|
|
// Construct and append a VPMOVQ2M instruction to the active function.
|
|
func (c *Context) VPMOVQ2M(xyz, k operand.Op) {
|
|
c.addinstruction(x86.VPMOVQ2M(xyz, k))
|
|
}
|
|
|
|
// VPMOVQ2M: Move Signs of Packed Quadword Integers to Mask Register.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVQ2M xmm k
|
|
// VPMOVQ2M ymm k
|
|
// VPMOVQ2M zmm k
|
|
//
|
|
// Construct and append a VPMOVQ2M instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVQ2M(xyz, k operand.Op) { ctx.VPMOVQ2M(xyz, k) }
|
|
|
|
// VPMOVQB: Down Convert Packed Quadword Values to Byte Values with Truncation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVQB xmm k m16
|
|
// VPMOVQB xmm k xmm
|
|
// VPMOVQB xmm m16
|
|
// VPMOVQB xmm xmm
|
|
// VPMOVQB ymm k m32
|
|
// VPMOVQB ymm k xmm
|
|
// VPMOVQB ymm m32
|
|
// VPMOVQB ymm xmm
|
|
// VPMOVQB zmm k m64
|
|
// VPMOVQB zmm k xmm
|
|
// VPMOVQB zmm m64
|
|
// VPMOVQB zmm xmm
|
|
//
|
|
// Construct and append a VPMOVQB instruction to the active function.
|
|
func (c *Context) VPMOVQB(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPMOVQB(ops...))
|
|
}
|
|
|
|
// VPMOVQB: Down Convert Packed Quadword Values to Byte Values with Truncation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVQB xmm k m16
|
|
// VPMOVQB xmm k xmm
|
|
// VPMOVQB xmm m16
|
|
// VPMOVQB xmm xmm
|
|
// VPMOVQB ymm k m32
|
|
// VPMOVQB ymm k xmm
|
|
// VPMOVQB ymm m32
|
|
// VPMOVQB ymm xmm
|
|
// VPMOVQB zmm k m64
|
|
// VPMOVQB zmm k xmm
|
|
// VPMOVQB zmm m64
|
|
// VPMOVQB zmm xmm
|
|
//
|
|
// Construct and append a VPMOVQB instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVQB(ops ...operand.Op) { ctx.VPMOVQB(ops...) }
|
|
|
|
// VPMOVQB_Z: Down Convert Packed Quadword Values to Byte Values with Truncation (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVQB.Z xmm k m16
|
|
// VPMOVQB.Z xmm k xmm
|
|
// VPMOVQB.Z ymm k m32
|
|
// VPMOVQB.Z ymm k xmm
|
|
// VPMOVQB.Z zmm k m64
|
|
// VPMOVQB.Z zmm k xmm
|
|
//
|
|
// Construct and append a VPMOVQB.Z instruction to the active function.
|
|
func (c *Context) VPMOVQB_Z(xyz, k, mx operand.Op) {
|
|
c.addinstruction(x86.VPMOVQB_Z(xyz, k, mx))
|
|
}
|
|
|
|
// VPMOVQB_Z: Down Convert Packed Quadword Values to Byte Values with Truncation (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVQB.Z xmm k m16
|
|
// VPMOVQB.Z xmm k xmm
|
|
// VPMOVQB.Z ymm k m32
|
|
// VPMOVQB.Z ymm k xmm
|
|
// VPMOVQB.Z zmm k m64
|
|
// VPMOVQB.Z zmm k xmm
|
|
//
|
|
// Construct and append a VPMOVQB.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVQB_Z(xyz, k, mx operand.Op) { ctx.VPMOVQB_Z(xyz, k, mx) }
|
|
|
|
// VPMOVQD: Down Convert Packed Quadword Values to Doubleword Values with Truncation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVQD xmm k m64
|
|
// VPMOVQD xmm k xmm
|
|
// VPMOVQD xmm m64
|
|
// VPMOVQD xmm xmm
|
|
// VPMOVQD ymm k m128
|
|
// VPMOVQD ymm k xmm
|
|
// VPMOVQD ymm m128
|
|
// VPMOVQD ymm xmm
|
|
// VPMOVQD zmm k m256
|
|
// VPMOVQD zmm k ymm
|
|
// VPMOVQD zmm m256
|
|
// VPMOVQD zmm ymm
|
|
//
|
|
// Construct and append a VPMOVQD instruction to the active function.
|
|
func (c *Context) VPMOVQD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPMOVQD(ops...))
|
|
}
|
|
|
|
// VPMOVQD: Down Convert Packed Quadword Values to Doubleword Values with Truncation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVQD xmm k m64
|
|
// VPMOVQD xmm k xmm
|
|
// VPMOVQD xmm m64
|
|
// VPMOVQD xmm xmm
|
|
// VPMOVQD ymm k m128
|
|
// VPMOVQD ymm k xmm
|
|
// VPMOVQD ymm m128
|
|
// VPMOVQD ymm xmm
|
|
// VPMOVQD zmm k m256
|
|
// VPMOVQD zmm k ymm
|
|
// VPMOVQD zmm m256
|
|
// VPMOVQD zmm ymm
|
|
//
|
|
// Construct and append a VPMOVQD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVQD(ops ...operand.Op) { ctx.VPMOVQD(ops...) }
|
|
|
|
// VPMOVQD_Z: Down Convert Packed Quadword Values to Doubleword Values with Truncation (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVQD.Z xmm k m64
|
|
// VPMOVQD.Z xmm k xmm
|
|
// VPMOVQD.Z ymm k m128
|
|
// VPMOVQD.Z ymm k xmm
|
|
// VPMOVQD.Z zmm k m256
|
|
// VPMOVQD.Z zmm k ymm
|
|
//
|
|
// Construct and append a VPMOVQD.Z instruction to the active function.
|
|
func (c *Context) VPMOVQD_Z(xyz, k, mxy operand.Op) {
|
|
c.addinstruction(x86.VPMOVQD_Z(xyz, k, mxy))
|
|
}
|
|
|
|
// VPMOVQD_Z: Down Convert Packed Quadword Values to Doubleword Values with Truncation (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVQD.Z xmm k m64
|
|
// VPMOVQD.Z xmm k xmm
|
|
// VPMOVQD.Z ymm k m128
|
|
// VPMOVQD.Z ymm k xmm
|
|
// VPMOVQD.Z zmm k m256
|
|
// VPMOVQD.Z zmm k ymm
|
|
//
|
|
// Construct and append a VPMOVQD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVQD_Z(xyz, k, mxy operand.Op) { ctx.VPMOVQD_Z(xyz, k, mxy) }
|
|
|
|
// VPMOVQW: Down Convert Packed Quadword Values to Word Values with Truncation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVQW xmm k m32
|
|
// VPMOVQW xmm k xmm
|
|
// VPMOVQW xmm m32
|
|
// VPMOVQW xmm xmm
|
|
// VPMOVQW ymm k m64
|
|
// VPMOVQW ymm k xmm
|
|
// VPMOVQW ymm m64
|
|
// VPMOVQW ymm xmm
|
|
// VPMOVQW zmm k m128
|
|
// VPMOVQW zmm k xmm
|
|
// VPMOVQW zmm m128
|
|
// VPMOVQW zmm xmm
|
|
//
|
|
// Construct and append a VPMOVQW instruction to the active function.
|
|
func (c *Context) VPMOVQW(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPMOVQW(ops...))
|
|
}
|
|
|
|
// VPMOVQW: Down Convert Packed Quadword Values to Word Values with Truncation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVQW xmm k m32
|
|
// VPMOVQW xmm k xmm
|
|
// VPMOVQW xmm m32
|
|
// VPMOVQW xmm xmm
|
|
// VPMOVQW ymm k m64
|
|
// VPMOVQW ymm k xmm
|
|
// VPMOVQW ymm m64
|
|
// VPMOVQW ymm xmm
|
|
// VPMOVQW zmm k m128
|
|
// VPMOVQW zmm k xmm
|
|
// VPMOVQW zmm m128
|
|
// VPMOVQW zmm xmm
|
|
//
|
|
// Construct and append a VPMOVQW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVQW(ops ...operand.Op) { ctx.VPMOVQW(ops...) }
|
|
|
|
// VPMOVQW_Z: Down Convert Packed Quadword Values to Word Values with Truncation (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVQW.Z xmm k m32
|
|
// VPMOVQW.Z xmm k xmm
|
|
// VPMOVQW.Z ymm k m64
|
|
// VPMOVQW.Z ymm k xmm
|
|
// VPMOVQW.Z zmm k m128
|
|
// VPMOVQW.Z zmm k xmm
|
|
//
|
|
// Construct and append a VPMOVQW.Z instruction to the active function.
|
|
func (c *Context) VPMOVQW_Z(xyz, k, mx operand.Op) {
|
|
c.addinstruction(x86.VPMOVQW_Z(xyz, k, mx))
|
|
}
|
|
|
|
// VPMOVQW_Z: Down Convert Packed Quadword Values to Word Values with Truncation (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVQW.Z xmm k m32
|
|
// VPMOVQW.Z xmm k xmm
|
|
// VPMOVQW.Z ymm k m64
|
|
// VPMOVQW.Z ymm k xmm
|
|
// VPMOVQW.Z zmm k m128
|
|
// VPMOVQW.Z zmm k xmm
|
|
//
|
|
// Construct and append a VPMOVQW.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVQW_Z(xyz, k, mx operand.Op) { ctx.VPMOVQW_Z(xyz, k, mx) }
|
|
|
|
// VPMOVSDB: Down Convert Packed Doubleword Values to Byte Values with Signed Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVSDB xmm k m32
|
|
// VPMOVSDB xmm k xmm
|
|
// VPMOVSDB xmm m32
|
|
// VPMOVSDB xmm xmm
|
|
// VPMOVSDB ymm k m64
|
|
// VPMOVSDB ymm k xmm
|
|
// VPMOVSDB ymm m64
|
|
// VPMOVSDB ymm xmm
|
|
// VPMOVSDB zmm k m128
|
|
// VPMOVSDB zmm k xmm
|
|
// VPMOVSDB zmm m128
|
|
// VPMOVSDB zmm xmm
|
|
//
|
|
// Construct and append a VPMOVSDB instruction to the active function.
|
|
func (c *Context) VPMOVSDB(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPMOVSDB(ops...))
|
|
}
|
|
|
|
// VPMOVSDB: Down Convert Packed Doubleword Values to Byte Values with Signed Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVSDB xmm k m32
|
|
// VPMOVSDB xmm k xmm
|
|
// VPMOVSDB xmm m32
|
|
// VPMOVSDB xmm xmm
|
|
// VPMOVSDB ymm k m64
|
|
// VPMOVSDB ymm k xmm
|
|
// VPMOVSDB ymm m64
|
|
// VPMOVSDB ymm xmm
|
|
// VPMOVSDB zmm k m128
|
|
// VPMOVSDB zmm k xmm
|
|
// VPMOVSDB zmm m128
|
|
// VPMOVSDB zmm xmm
|
|
//
|
|
// Construct and append a VPMOVSDB instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVSDB(ops ...operand.Op) { ctx.VPMOVSDB(ops...) }
|
|
|
|
// VPMOVSDB_Z: Down Convert Packed Doubleword Values to Byte Values with Signed Saturation (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVSDB.Z xmm k m32
|
|
// VPMOVSDB.Z xmm k xmm
|
|
// VPMOVSDB.Z ymm k m64
|
|
// VPMOVSDB.Z ymm k xmm
|
|
// VPMOVSDB.Z zmm k m128
|
|
// VPMOVSDB.Z zmm k xmm
|
|
//
|
|
// Construct and append a VPMOVSDB.Z instruction to the active function.
|
|
func (c *Context) VPMOVSDB_Z(xyz, k, mx operand.Op) {
|
|
c.addinstruction(x86.VPMOVSDB_Z(xyz, k, mx))
|
|
}
|
|
|
|
// VPMOVSDB_Z: Down Convert Packed Doubleword Values to Byte Values with Signed Saturation (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVSDB.Z xmm k m32
|
|
// VPMOVSDB.Z xmm k xmm
|
|
// VPMOVSDB.Z ymm k m64
|
|
// VPMOVSDB.Z ymm k xmm
|
|
// VPMOVSDB.Z zmm k m128
|
|
// VPMOVSDB.Z zmm k xmm
|
|
//
|
|
// Construct and append a VPMOVSDB.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVSDB_Z(xyz, k, mx operand.Op) { ctx.VPMOVSDB_Z(xyz, k, mx) }
|
|
|
|
// VPMOVSDW: Down Convert Packed Doubleword Values to Word Values with Signed Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVSDW xmm k m64
|
|
// VPMOVSDW xmm k xmm
|
|
// VPMOVSDW xmm m64
|
|
// VPMOVSDW xmm xmm
|
|
// VPMOVSDW ymm k m128
|
|
// VPMOVSDW ymm k xmm
|
|
// VPMOVSDW ymm m128
|
|
// VPMOVSDW ymm xmm
|
|
// VPMOVSDW zmm k m256
|
|
// VPMOVSDW zmm k ymm
|
|
// VPMOVSDW zmm m256
|
|
// VPMOVSDW zmm ymm
|
|
//
|
|
// Construct and append a VPMOVSDW instruction to the active function.
|
|
func (c *Context) VPMOVSDW(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPMOVSDW(ops...))
|
|
}
|
|
|
|
// VPMOVSDW: Down Convert Packed Doubleword Values to Word Values with Signed Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVSDW xmm k m64
|
|
// VPMOVSDW xmm k xmm
|
|
// VPMOVSDW xmm m64
|
|
// VPMOVSDW xmm xmm
|
|
// VPMOVSDW ymm k m128
|
|
// VPMOVSDW ymm k xmm
|
|
// VPMOVSDW ymm m128
|
|
// VPMOVSDW ymm xmm
|
|
// VPMOVSDW zmm k m256
|
|
// VPMOVSDW zmm k ymm
|
|
// VPMOVSDW zmm m256
|
|
// VPMOVSDW zmm ymm
|
|
//
|
|
// Construct and append a VPMOVSDW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVSDW(ops ...operand.Op) { ctx.VPMOVSDW(ops...) }
|
|
|
|
// VPMOVSDW_Z: Down Convert Packed Doubleword Values to Word Values with Signed Saturation (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVSDW.Z xmm k m64
|
|
// VPMOVSDW.Z xmm k xmm
|
|
// VPMOVSDW.Z ymm k m128
|
|
// VPMOVSDW.Z ymm k xmm
|
|
// VPMOVSDW.Z zmm k m256
|
|
// VPMOVSDW.Z zmm k ymm
|
|
//
|
|
// Construct and append a VPMOVSDW.Z instruction to the active function.
|
|
func (c *Context) VPMOVSDW_Z(xyz, k, mxy operand.Op) {
|
|
c.addinstruction(x86.VPMOVSDW_Z(xyz, k, mxy))
|
|
}
|
|
|
|
// VPMOVSDW_Z: Down Convert Packed Doubleword Values to Word Values with Signed Saturation (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVSDW.Z xmm k m64
|
|
// VPMOVSDW.Z xmm k xmm
|
|
// VPMOVSDW.Z ymm k m128
|
|
// VPMOVSDW.Z ymm k xmm
|
|
// VPMOVSDW.Z zmm k m256
|
|
// VPMOVSDW.Z zmm k ymm
|
|
//
|
|
// Construct and append a VPMOVSDW.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVSDW_Z(xyz, k, mxy operand.Op) { ctx.VPMOVSDW_Z(xyz, k, mxy) }
|
|
|
|
// VPMOVSQB: Down Convert Packed Quadword Values to Byte Values with Signed Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVSQB xmm k m16
|
|
// VPMOVSQB xmm k xmm
|
|
// VPMOVSQB xmm m16
|
|
// VPMOVSQB xmm xmm
|
|
// VPMOVSQB ymm k m32
|
|
// VPMOVSQB ymm k xmm
|
|
// VPMOVSQB ymm m32
|
|
// VPMOVSQB ymm xmm
|
|
// VPMOVSQB zmm k m64
|
|
// VPMOVSQB zmm k xmm
|
|
// VPMOVSQB zmm m64
|
|
// VPMOVSQB zmm xmm
|
|
//
|
|
// Construct and append a VPMOVSQB instruction to the active function.
|
|
func (c *Context) VPMOVSQB(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPMOVSQB(ops...))
|
|
}
|
|
|
|
// VPMOVSQB: Down Convert Packed Quadword Values to Byte Values with Signed Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVSQB xmm k m16
|
|
// VPMOVSQB xmm k xmm
|
|
// VPMOVSQB xmm m16
|
|
// VPMOVSQB xmm xmm
|
|
// VPMOVSQB ymm k m32
|
|
// VPMOVSQB ymm k xmm
|
|
// VPMOVSQB ymm m32
|
|
// VPMOVSQB ymm xmm
|
|
// VPMOVSQB zmm k m64
|
|
// VPMOVSQB zmm k xmm
|
|
// VPMOVSQB zmm m64
|
|
// VPMOVSQB zmm xmm
|
|
//
|
|
// Construct and append a VPMOVSQB instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVSQB(ops ...operand.Op) { ctx.VPMOVSQB(ops...) }
|
|
|
|
// VPMOVSQB_Z: Down Convert Packed Quadword Values to Byte Values with Signed Saturation (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVSQB.Z xmm k m16
|
|
// VPMOVSQB.Z xmm k xmm
|
|
// VPMOVSQB.Z ymm k m32
|
|
// VPMOVSQB.Z ymm k xmm
|
|
// VPMOVSQB.Z zmm k m64
|
|
// VPMOVSQB.Z zmm k xmm
|
|
//
|
|
// Construct and append a VPMOVSQB.Z instruction to the active function.
|
|
func (c *Context) VPMOVSQB_Z(xyz, k, mx operand.Op) {
|
|
c.addinstruction(x86.VPMOVSQB_Z(xyz, k, mx))
|
|
}
|
|
|
|
// VPMOVSQB_Z: Down Convert Packed Quadword Values to Byte Values with Signed Saturation (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVSQB.Z xmm k m16
|
|
// VPMOVSQB.Z xmm k xmm
|
|
// VPMOVSQB.Z ymm k m32
|
|
// VPMOVSQB.Z ymm k xmm
|
|
// VPMOVSQB.Z zmm k m64
|
|
// VPMOVSQB.Z zmm k xmm
|
|
//
|
|
// Construct and append a VPMOVSQB.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVSQB_Z(xyz, k, mx operand.Op) { ctx.VPMOVSQB_Z(xyz, k, mx) }
|
|
|
|
// VPMOVSQD: Down Convert Packed Quadword Values to Doubleword Values with Signed Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVSQD xmm k m64
|
|
// VPMOVSQD xmm k xmm
|
|
// VPMOVSQD xmm m64
|
|
// VPMOVSQD xmm xmm
|
|
// VPMOVSQD ymm k m128
|
|
// VPMOVSQD ymm k xmm
|
|
// VPMOVSQD ymm m128
|
|
// VPMOVSQD ymm xmm
|
|
// VPMOVSQD zmm k m256
|
|
// VPMOVSQD zmm k ymm
|
|
// VPMOVSQD zmm m256
|
|
// VPMOVSQD zmm ymm
|
|
//
|
|
// Construct and append a VPMOVSQD instruction to the active function.
|
|
func (c *Context) VPMOVSQD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPMOVSQD(ops...))
|
|
}
|
|
|
|
// VPMOVSQD: Down Convert Packed Quadword Values to Doubleword Values with Signed Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVSQD xmm k m64
|
|
// VPMOVSQD xmm k xmm
|
|
// VPMOVSQD xmm m64
|
|
// VPMOVSQD xmm xmm
|
|
// VPMOVSQD ymm k m128
|
|
// VPMOVSQD ymm k xmm
|
|
// VPMOVSQD ymm m128
|
|
// VPMOVSQD ymm xmm
|
|
// VPMOVSQD zmm k m256
|
|
// VPMOVSQD zmm k ymm
|
|
// VPMOVSQD zmm m256
|
|
// VPMOVSQD zmm ymm
|
|
//
|
|
// Construct and append a VPMOVSQD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVSQD(ops ...operand.Op) { ctx.VPMOVSQD(ops...) }
|
|
|
|
// VPMOVSQD_Z: Down Convert Packed Quadword Values to Doubleword Values with Signed Saturation (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVSQD.Z xmm k m64
|
|
// VPMOVSQD.Z xmm k xmm
|
|
// VPMOVSQD.Z ymm k m128
|
|
// VPMOVSQD.Z ymm k xmm
|
|
// VPMOVSQD.Z zmm k m256
|
|
// VPMOVSQD.Z zmm k ymm
|
|
//
|
|
// Construct and append a VPMOVSQD.Z instruction to the active function.
|
|
func (c *Context) VPMOVSQD_Z(xyz, k, mxy operand.Op) {
|
|
c.addinstruction(x86.VPMOVSQD_Z(xyz, k, mxy))
|
|
}
|
|
|
|
// VPMOVSQD_Z: Down Convert Packed Quadword Values to Doubleword Values with Signed Saturation (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVSQD.Z xmm k m64
|
|
// VPMOVSQD.Z xmm k xmm
|
|
// VPMOVSQD.Z ymm k m128
|
|
// VPMOVSQD.Z ymm k xmm
|
|
// VPMOVSQD.Z zmm k m256
|
|
// VPMOVSQD.Z zmm k ymm
|
|
//
|
|
// Construct and append a VPMOVSQD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVSQD_Z(xyz, k, mxy operand.Op) { ctx.VPMOVSQD_Z(xyz, k, mxy) }
|
|
|
|
// VPMOVSQW: Down Convert Packed Quadword Values to Word Values with Signed Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVSQW xmm k m32
|
|
// VPMOVSQW xmm k xmm
|
|
// VPMOVSQW xmm m32
|
|
// VPMOVSQW xmm xmm
|
|
// VPMOVSQW ymm k m64
|
|
// VPMOVSQW ymm k xmm
|
|
// VPMOVSQW ymm m64
|
|
// VPMOVSQW ymm xmm
|
|
// VPMOVSQW zmm k m128
|
|
// VPMOVSQW zmm k xmm
|
|
// VPMOVSQW zmm m128
|
|
// VPMOVSQW zmm xmm
|
|
//
|
|
// Construct and append a VPMOVSQW instruction to the active function.
|
|
func (c *Context) VPMOVSQW(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPMOVSQW(ops...))
|
|
}
|
|
|
|
// VPMOVSQW: Down Convert Packed Quadword Values to Word Values with Signed Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVSQW xmm k m32
|
|
// VPMOVSQW xmm k xmm
|
|
// VPMOVSQW xmm m32
|
|
// VPMOVSQW xmm xmm
|
|
// VPMOVSQW ymm k m64
|
|
// VPMOVSQW ymm k xmm
|
|
// VPMOVSQW ymm m64
|
|
// VPMOVSQW ymm xmm
|
|
// VPMOVSQW zmm k m128
|
|
// VPMOVSQW zmm k xmm
|
|
// VPMOVSQW zmm m128
|
|
// VPMOVSQW zmm xmm
|
|
//
|
|
// Construct and append a VPMOVSQW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVSQW(ops ...operand.Op) { ctx.VPMOVSQW(ops...) }
|
|
|
|
// VPMOVSQW_Z: Down Convert Packed Quadword Values to Word Values with Signed Saturation (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVSQW.Z xmm k m32
|
|
// VPMOVSQW.Z xmm k xmm
|
|
// VPMOVSQW.Z ymm k m64
|
|
// VPMOVSQW.Z ymm k xmm
|
|
// VPMOVSQW.Z zmm k m128
|
|
// VPMOVSQW.Z zmm k xmm
|
|
//
|
|
// Construct and append a VPMOVSQW.Z instruction to the active function.
|
|
func (c *Context) VPMOVSQW_Z(xyz, k, mx operand.Op) {
|
|
c.addinstruction(x86.VPMOVSQW_Z(xyz, k, mx))
|
|
}
|
|
|
|
// VPMOVSQW_Z: Down Convert Packed Quadword Values to Word Values with Signed Saturation (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVSQW.Z xmm k m32
|
|
// VPMOVSQW.Z xmm k xmm
|
|
// VPMOVSQW.Z ymm k m64
|
|
// VPMOVSQW.Z ymm k xmm
|
|
// VPMOVSQW.Z zmm k m128
|
|
// VPMOVSQW.Z zmm k xmm
|
|
//
|
|
// Construct and append a VPMOVSQW.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVSQW_Z(xyz, k, mx operand.Op) { ctx.VPMOVSQW_Z(xyz, k, mx) }
|
|
|
|
// VPMOVSWB: Down Convert Packed Word Values to Byte Values with Signed Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVSWB xmm k m64
|
|
// VPMOVSWB xmm k xmm
|
|
// VPMOVSWB xmm m64
|
|
// VPMOVSWB xmm xmm
|
|
// VPMOVSWB ymm k m128
|
|
// VPMOVSWB ymm k xmm
|
|
// VPMOVSWB ymm m128
|
|
// VPMOVSWB ymm xmm
|
|
// VPMOVSWB zmm k m256
|
|
// VPMOVSWB zmm k ymm
|
|
// VPMOVSWB zmm m256
|
|
// VPMOVSWB zmm ymm
|
|
//
|
|
// Construct and append a VPMOVSWB instruction to the active function.
|
|
func (c *Context) VPMOVSWB(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPMOVSWB(ops...))
|
|
}
|
|
|
|
// VPMOVSWB: Down Convert Packed Word Values to Byte Values with Signed Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVSWB xmm k m64
|
|
// VPMOVSWB xmm k xmm
|
|
// VPMOVSWB xmm m64
|
|
// VPMOVSWB xmm xmm
|
|
// VPMOVSWB ymm k m128
|
|
// VPMOVSWB ymm k xmm
|
|
// VPMOVSWB ymm m128
|
|
// VPMOVSWB ymm xmm
|
|
// VPMOVSWB zmm k m256
|
|
// VPMOVSWB zmm k ymm
|
|
// VPMOVSWB zmm m256
|
|
// VPMOVSWB zmm ymm
|
|
//
|
|
// Construct and append a VPMOVSWB instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVSWB(ops ...operand.Op) { ctx.VPMOVSWB(ops...) }
|
|
|
|
// VPMOVSWB_Z: Down Convert Packed Word Values to Byte Values with Signed Saturation (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVSWB.Z xmm k m64
|
|
// VPMOVSWB.Z xmm k xmm
|
|
// VPMOVSWB.Z ymm k m128
|
|
// VPMOVSWB.Z ymm k xmm
|
|
// VPMOVSWB.Z zmm k m256
|
|
// VPMOVSWB.Z zmm k ymm
|
|
//
|
|
// Construct and append a VPMOVSWB.Z instruction to the active function.
|
|
func (c *Context) VPMOVSWB_Z(xyz, k, mxy operand.Op) {
|
|
c.addinstruction(x86.VPMOVSWB_Z(xyz, k, mxy))
|
|
}
|
|
|
|
// VPMOVSWB_Z: Down Convert Packed Word Values to Byte Values with Signed Saturation (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVSWB.Z xmm k m64
|
|
// VPMOVSWB.Z xmm k xmm
|
|
// VPMOVSWB.Z ymm k m128
|
|
// VPMOVSWB.Z ymm k xmm
|
|
// VPMOVSWB.Z zmm k m256
|
|
// VPMOVSWB.Z zmm k ymm
|
|
//
|
|
// Construct and append a VPMOVSWB.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVSWB_Z(xyz, k, mxy operand.Op) { ctx.VPMOVSWB_Z(xyz, k, mxy) }
|
|
|
|
// VPMOVSXBD: Move Packed Byte Integers to Doubleword Integers with Sign Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVSXBD m64 ymm
|
|
// VPMOVSXBD xmm ymm
|
|
// VPMOVSXBD m32 xmm
|
|
// VPMOVSXBD xmm xmm
|
|
// VPMOVSXBD m32 k xmm
|
|
// VPMOVSXBD m64 k ymm
|
|
// VPMOVSXBD xmm k xmm
|
|
// VPMOVSXBD xmm k ymm
|
|
// VPMOVSXBD m128 k zmm
|
|
// VPMOVSXBD m128 zmm
|
|
// VPMOVSXBD xmm k zmm
|
|
// VPMOVSXBD xmm zmm
|
|
//
|
|
// Construct and append a VPMOVSXBD instruction to the active function.
|
|
func (c *Context) VPMOVSXBD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPMOVSXBD(ops...))
|
|
}
|
|
|
|
// VPMOVSXBD: Move Packed Byte Integers to Doubleword Integers with Sign Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVSXBD m64 ymm
|
|
// VPMOVSXBD xmm ymm
|
|
// VPMOVSXBD m32 xmm
|
|
// VPMOVSXBD xmm xmm
|
|
// VPMOVSXBD m32 k xmm
|
|
// VPMOVSXBD m64 k ymm
|
|
// VPMOVSXBD xmm k xmm
|
|
// VPMOVSXBD xmm k ymm
|
|
// VPMOVSXBD m128 k zmm
|
|
// VPMOVSXBD m128 zmm
|
|
// VPMOVSXBD xmm k zmm
|
|
// VPMOVSXBD xmm zmm
|
|
//
|
|
// Construct and append a VPMOVSXBD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVSXBD(ops ...operand.Op) { ctx.VPMOVSXBD(ops...) }
|
|
|
|
// VPMOVSXBD_Z: Move Packed Byte Integers to Doubleword Integers with Sign Extension (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVSXBD.Z m32 k xmm
|
|
// VPMOVSXBD.Z m64 k ymm
|
|
// VPMOVSXBD.Z xmm k xmm
|
|
// VPMOVSXBD.Z xmm k ymm
|
|
// VPMOVSXBD.Z m128 k zmm
|
|
// VPMOVSXBD.Z xmm k zmm
|
|
//
|
|
// Construct and append a VPMOVSXBD.Z instruction to the active function.
|
|
func (c *Context) VPMOVSXBD_Z(mx, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VPMOVSXBD_Z(mx, k, xyz))
|
|
}
|
|
|
|
// VPMOVSXBD_Z: Move Packed Byte Integers to Doubleword Integers with Sign Extension (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVSXBD.Z m32 k xmm
|
|
// VPMOVSXBD.Z m64 k ymm
|
|
// VPMOVSXBD.Z xmm k xmm
|
|
// VPMOVSXBD.Z xmm k ymm
|
|
// VPMOVSXBD.Z m128 k zmm
|
|
// VPMOVSXBD.Z xmm k zmm
|
|
//
|
|
// Construct and append a VPMOVSXBD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVSXBD_Z(mx, k, xyz operand.Op) { ctx.VPMOVSXBD_Z(mx, k, xyz) }
|
|
|
|
// VPMOVSXBQ: Move Packed Byte Integers to Quadword Integers with Sign Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVSXBQ m32 ymm
|
|
// VPMOVSXBQ xmm ymm
|
|
// VPMOVSXBQ m16 xmm
|
|
// VPMOVSXBQ xmm xmm
|
|
// VPMOVSXBQ m16 k xmm
|
|
// VPMOVSXBQ m32 k ymm
|
|
// VPMOVSXBQ xmm k xmm
|
|
// VPMOVSXBQ xmm k ymm
|
|
// VPMOVSXBQ m64 k zmm
|
|
// VPMOVSXBQ m64 zmm
|
|
// VPMOVSXBQ xmm k zmm
|
|
// VPMOVSXBQ xmm zmm
|
|
//
|
|
// Construct and append a VPMOVSXBQ instruction to the active function.
|
|
func (c *Context) VPMOVSXBQ(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPMOVSXBQ(ops...))
|
|
}
|
|
|
|
// VPMOVSXBQ: Move Packed Byte Integers to Quadword Integers with Sign Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVSXBQ m32 ymm
|
|
// VPMOVSXBQ xmm ymm
|
|
// VPMOVSXBQ m16 xmm
|
|
// VPMOVSXBQ xmm xmm
|
|
// VPMOVSXBQ m16 k xmm
|
|
// VPMOVSXBQ m32 k ymm
|
|
// VPMOVSXBQ xmm k xmm
|
|
// VPMOVSXBQ xmm k ymm
|
|
// VPMOVSXBQ m64 k zmm
|
|
// VPMOVSXBQ m64 zmm
|
|
// VPMOVSXBQ xmm k zmm
|
|
// VPMOVSXBQ xmm zmm
|
|
//
|
|
// Construct and append a VPMOVSXBQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVSXBQ(ops ...operand.Op) { ctx.VPMOVSXBQ(ops...) }
|
|
|
|
// VPMOVSXBQ_Z: Move Packed Byte Integers to Quadword Integers with Sign Extension (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVSXBQ.Z m16 k xmm
|
|
// VPMOVSXBQ.Z m32 k ymm
|
|
// VPMOVSXBQ.Z xmm k xmm
|
|
// VPMOVSXBQ.Z xmm k ymm
|
|
// VPMOVSXBQ.Z m64 k zmm
|
|
// VPMOVSXBQ.Z xmm k zmm
|
|
//
|
|
// Construct and append a VPMOVSXBQ.Z instruction to the active function.
|
|
func (c *Context) VPMOVSXBQ_Z(mx, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VPMOVSXBQ_Z(mx, k, xyz))
|
|
}
|
|
|
|
// VPMOVSXBQ_Z: Move Packed Byte Integers to Quadword Integers with Sign Extension (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVSXBQ.Z m16 k xmm
|
|
// VPMOVSXBQ.Z m32 k ymm
|
|
// VPMOVSXBQ.Z xmm k xmm
|
|
// VPMOVSXBQ.Z xmm k ymm
|
|
// VPMOVSXBQ.Z m64 k zmm
|
|
// VPMOVSXBQ.Z xmm k zmm
|
|
//
|
|
// Construct and append a VPMOVSXBQ.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVSXBQ_Z(mx, k, xyz operand.Op) { ctx.VPMOVSXBQ_Z(mx, k, xyz) }
|
|
|
|
// VPMOVSXBW: Move Packed Byte Integers to Word Integers with Sign Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVSXBW m128 ymm
|
|
// VPMOVSXBW xmm ymm
|
|
// VPMOVSXBW m64 xmm
|
|
// VPMOVSXBW xmm xmm
|
|
// VPMOVSXBW m128 k ymm
|
|
// VPMOVSXBW m64 k xmm
|
|
// VPMOVSXBW xmm k xmm
|
|
// VPMOVSXBW xmm k ymm
|
|
// VPMOVSXBW m256 k zmm
|
|
// VPMOVSXBW m256 zmm
|
|
// VPMOVSXBW ymm k zmm
|
|
// VPMOVSXBW ymm zmm
|
|
//
|
|
// Construct and append a VPMOVSXBW instruction to the active function.
|
|
func (c *Context) VPMOVSXBW(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPMOVSXBW(ops...))
|
|
}
|
|
|
|
// VPMOVSXBW: Move Packed Byte Integers to Word Integers with Sign Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVSXBW m128 ymm
|
|
// VPMOVSXBW xmm ymm
|
|
// VPMOVSXBW m64 xmm
|
|
// VPMOVSXBW xmm xmm
|
|
// VPMOVSXBW m128 k ymm
|
|
// VPMOVSXBW m64 k xmm
|
|
// VPMOVSXBW xmm k xmm
|
|
// VPMOVSXBW xmm k ymm
|
|
// VPMOVSXBW m256 k zmm
|
|
// VPMOVSXBW m256 zmm
|
|
// VPMOVSXBW ymm k zmm
|
|
// VPMOVSXBW ymm zmm
|
|
//
|
|
// Construct and append a VPMOVSXBW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVSXBW(ops ...operand.Op) { ctx.VPMOVSXBW(ops...) }
|
|
|
|
// VPMOVSXBW_Z: Move Packed Byte Integers to Word Integers with Sign Extension (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVSXBW.Z m128 k ymm
|
|
// VPMOVSXBW.Z m64 k xmm
|
|
// VPMOVSXBW.Z xmm k xmm
|
|
// VPMOVSXBW.Z xmm k ymm
|
|
// VPMOVSXBW.Z m256 k zmm
|
|
// VPMOVSXBW.Z ymm k zmm
|
|
//
|
|
// Construct and append a VPMOVSXBW.Z instruction to the active function.
|
|
func (c *Context) VPMOVSXBW_Z(mxy, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VPMOVSXBW_Z(mxy, k, xyz))
|
|
}
|
|
|
|
// VPMOVSXBW_Z: Move Packed Byte Integers to Word Integers with Sign Extension (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVSXBW.Z m128 k ymm
|
|
// VPMOVSXBW.Z m64 k xmm
|
|
// VPMOVSXBW.Z xmm k xmm
|
|
// VPMOVSXBW.Z xmm k ymm
|
|
// VPMOVSXBW.Z m256 k zmm
|
|
// VPMOVSXBW.Z ymm k zmm
|
|
//
|
|
// Construct and append a VPMOVSXBW.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVSXBW_Z(mxy, k, xyz operand.Op) { ctx.VPMOVSXBW_Z(mxy, k, xyz) }
|
|
|
|
// VPMOVSXDQ: Move Packed Doubleword Integers to Quadword Integers with Sign Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVSXDQ m128 ymm
|
|
// VPMOVSXDQ xmm ymm
|
|
// VPMOVSXDQ m64 xmm
|
|
// VPMOVSXDQ xmm xmm
|
|
// VPMOVSXDQ m128 k ymm
|
|
// VPMOVSXDQ m64 k xmm
|
|
// VPMOVSXDQ xmm k xmm
|
|
// VPMOVSXDQ xmm k ymm
|
|
// VPMOVSXDQ m256 k zmm
|
|
// VPMOVSXDQ m256 zmm
|
|
// VPMOVSXDQ ymm k zmm
|
|
// VPMOVSXDQ ymm zmm
|
|
//
|
|
// Construct and append a VPMOVSXDQ instruction to the active function.
|
|
func (c *Context) VPMOVSXDQ(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPMOVSXDQ(ops...))
|
|
}
|
|
|
|
// VPMOVSXDQ: Move Packed Doubleword Integers to Quadword Integers with Sign Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVSXDQ m128 ymm
|
|
// VPMOVSXDQ xmm ymm
|
|
// VPMOVSXDQ m64 xmm
|
|
// VPMOVSXDQ xmm xmm
|
|
// VPMOVSXDQ m128 k ymm
|
|
// VPMOVSXDQ m64 k xmm
|
|
// VPMOVSXDQ xmm k xmm
|
|
// VPMOVSXDQ xmm k ymm
|
|
// VPMOVSXDQ m256 k zmm
|
|
// VPMOVSXDQ m256 zmm
|
|
// VPMOVSXDQ ymm k zmm
|
|
// VPMOVSXDQ ymm zmm
|
|
//
|
|
// Construct and append a VPMOVSXDQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVSXDQ(ops ...operand.Op) { ctx.VPMOVSXDQ(ops...) }
|
|
|
|
// VPMOVSXDQ_Z: Move Packed Doubleword Integers to Quadword Integers with Sign Extension (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVSXDQ.Z m128 k ymm
|
|
// VPMOVSXDQ.Z m64 k xmm
|
|
// VPMOVSXDQ.Z xmm k xmm
|
|
// VPMOVSXDQ.Z xmm k ymm
|
|
// VPMOVSXDQ.Z m256 k zmm
|
|
// VPMOVSXDQ.Z ymm k zmm
|
|
//
|
|
// Construct and append a VPMOVSXDQ.Z instruction to the active function.
|
|
func (c *Context) VPMOVSXDQ_Z(mxy, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VPMOVSXDQ_Z(mxy, k, xyz))
|
|
}
|
|
|
|
// VPMOVSXDQ_Z: Move Packed Doubleword Integers to Quadword Integers with Sign Extension (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVSXDQ.Z m128 k ymm
|
|
// VPMOVSXDQ.Z m64 k xmm
|
|
// VPMOVSXDQ.Z xmm k xmm
|
|
// VPMOVSXDQ.Z xmm k ymm
|
|
// VPMOVSXDQ.Z m256 k zmm
|
|
// VPMOVSXDQ.Z ymm k zmm
|
|
//
|
|
// Construct and append a VPMOVSXDQ.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVSXDQ_Z(mxy, k, xyz operand.Op) { ctx.VPMOVSXDQ_Z(mxy, k, xyz) }
|
|
|
|
// VPMOVSXWD: Move Packed Word Integers to Doubleword Integers with Sign Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVSXWD m128 ymm
|
|
// VPMOVSXWD xmm ymm
|
|
// VPMOVSXWD m64 xmm
|
|
// VPMOVSXWD xmm xmm
|
|
// VPMOVSXWD m128 k ymm
|
|
// VPMOVSXWD m64 k xmm
|
|
// VPMOVSXWD xmm k xmm
|
|
// VPMOVSXWD xmm k ymm
|
|
// VPMOVSXWD m256 k zmm
|
|
// VPMOVSXWD m256 zmm
|
|
// VPMOVSXWD ymm k zmm
|
|
// VPMOVSXWD ymm zmm
|
|
//
|
|
// Construct and append a VPMOVSXWD instruction to the active function.
|
|
func (c *Context) VPMOVSXWD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPMOVSXWD(ops...))
|
|
}
|
|
|
|
// VPMOVSXWD: Move Packed Word Integers to Doubleword Integers with Sign Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVSXWD m128 ymm
|
|
// VPMOVSXWD xmm ymm
|
|
// VPMOVSXWD m64 xmm
|
|
// VPMOVSXWD xmm xmm
|
|
// VPMOVSXWD m128 k ymm
|
|
// VPMOVSXWD m64 k xmm
|
|
// VPMOVSXWD xmm k xmm
|
|
// VPMOVSXWD xmm k ymm
|
|
// VPMOVSXWD m256 k zmm
|
|
// VPMOVSXWD m256 zmm
|
|
// VPMOVSXWD ymm k zmm
|
|
// VPMOVSXWD ymm zmm
|
|
//
|
|
// Construct and append a VPMOVSXWD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVSXWD(ops ...operand.Op) { ctx.VPMOVSXWD(ops...) }
|
|
|
|
// VPMOVSXWD_Z: Move Packed Word Integers to Doubleword Integers with Sign Extension (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVSXWD.Z m128 k ymm
|
|
// VPMOVSXWD.Z m64 k xmm
|
|
// VPMOVSXWD.Z xmm k xmm
|
|
// VPMOVSXWD.Z xmm k ymm
|
|
// VPMOVSXWD.Z m256 k zmm
|
|
// VPMOVSXWD.Z ymm k zmm
|
|
//
|
|
// Construct and append a VPMOVSXWD.Z instruction to the active function.
|
|
func (c *Context) VPMOVSXWD_Z(mxy, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VPMOVSXWD_Z(mxy, k, xyz))
|
|
}
|
|
|
|
// VPMOVSXWD_Z: Move Packed Word Integers to Doubleword Integers with Sign Extension (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVSXWD.Z m128 k ymm
|
|
// VPMOVSXWD.Z m64 k xmm
|
|
// VPMOVSXWD.Z xmm k xmm
|
|
// VPMOVSXWD.Z xmm k ymm
|
|
// VPMOVSXWD.Z m256 k zmm
|
|
// VPMOVSXWD.Z ymm k zmm
|
|
//
|
|
// Construct and append a VPMOVSXWD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVSXWD_Z(mxy, k, xyz operand.Op) { ctx.VPMOVSXWD_Z(mxy, k, xyz) }
|
|
|
|
// VPMOVSXWQ: Move Packed Word Integers to Quadword Integers with Sign Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVSXWQ m64 ymm
|
|
// VPMOVSXWQ xmm ymm
|
|
// VPMOVSXWQ m32 xmm
|
|
// VPMOVSXWQ xmm xmm
|
|
// VPMOVSXWQ m32 k xmm
|
|
// VPMOVSXWQ m64 k ymm
|
|
// VPMOVSXWQ xmm k xmm
|
|
// VPMOVSXWQ xmm k ymm
|
|
// VPMOVSXWQ m128 k zmm
|
|
// VPMOVSXWQ m128 zmm
|
|
// VPMOVSXWQ xmm k zmm
|
|
// VPMOVSXWQ xmm zmm
|
|
//
|
|
// Construct and append a VPMOVSXWQ instruction to the active function.
|
|
func (c *Context) VPMOVSXWQ(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPMOVSXWQ(ops...))
|
|
}
|
|
|
|
// VPMOVSXWQ: Move Packed Word Integers to Quadword Integers with Sign Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVSXWQ m64 ymm
|
|
// VPMOVSXWQ xmm ymm
|
|
// VPMOVSXWQ m32 xmm
|
|
// VPMOVSXWQ xmm xmm
|
|
// VPMOVSXWQ m32 k xmm
|
|
// VPMOVSXWQ m64 k ymm
|
|
// VPMOVSXWQ xmm k xmm
|
|
// VPMOVSXWQ xmm k ymm
|
|
// VPMOVSXWQ m128 k zmm
|
|
// VPMOVSXWQ m128 zmm
|
|
// VPMOVSXWQ xmm k zmm
|
|
// VPMOVSXWQ xmm zmm
|
|
//
|
|
// Construct and append a VPMOVSXWQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVSXWQ(ops ...operand.Op) { ctx.VPMOVSXWQ(ops...) }
|
|
|
|
// VPMOVSXWQ_Z: Move Packed Word Integers to Quadword Integers with Sign Extension (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVSXWQ.Z m32 k xmm
|
|
// VPMOVSXWQ.Z m64 k ymm
|
|
// VPMOVSXWQ.Z xmm k xmm
|
|
// VPMOVSXWQ.Z xmm k ymm
|
|
// VPMOVSXWQ.Z m128 k zmm
|
|
// VPMOVSXWQ.Z xmm k zmm
|
|
//
|
|
// Construct and append a VPMOVSXWQ.Z instruction to the active function.
|
|
func (c *Context) VPMOVSXWQ_Z(mx, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VPMOVSXWQ_Z(mx, k, xyz))
|
|
}
|
|
|
|
// VPMOVSXWQ_Z: Move Packed Word Integers to Quadword Integers with Sign Extension (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVSXWQ.Z m32 k xmm
|
|
// VPMOVSXWQ.Z m64 k ymm
|
|
// VPMOVSXWQ.Z xmm k xmm
|
|
// VPMOVSXWQ.Z xmm k ymm
|
|
// VPMOVSXWQ.Z m128 k zmm
|
|
// VPMOVSXWQ.Z xmm k zmm
|
|
//
|
|
// Construct and append a VPMOVSXWQ.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVSXWQ_Z(mx, k, xyz operand.Op) { ctx.VPMOVSXWQ_Z(mx, k, xyz) }
|
|
|
|
// VPMOVUSDB: Down Convert Packed Doubleword Values to Byte Values with Unsigned Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVUSDB xmm k m32
|
|
// VPMOVUSDB xmm k xmm
|
|
// VPMOVUSDB xmm m32
|
|
// VPMOVUSDB xmm xmm
|
|
// VPMOVUSDB ymm k m64
|
|
// VPMOVUSDB ymm k xmm
|
|
// VPMOVUSDB ymm m64
|
|
// VPMOVUSDB ymm xmm
|
|
// VPMOVUSDB zmm k m128
|
|
// VPMOVUSDB zmm k xmm
|
|
// VPMOVUSDB zmm m128
|
|
// VPMOVUSDB zmm xmm
|
|
//
|
|
// Construct and append a VPMOVUSDB instruction to the active function.
|
|
func (c *Context) VPMOVUSDB(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPMOVUSDB(ops...))
|
|
}
|
|
|
|
// VPMOVUSDB: Down Convert Packed Doubleword Values to Byte Values with Unsigned Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVUSDB xmm k m32
|
|
// VPMOVUSDB xmm k xmm
|
|
// VPMOVUSDB xmm m32
|
|
// VPMOVUSDB xmm xmm
|
|
// VPMOVUSDB ymm k m64
|
|
// VPMOVUSDB ymm k xmm
|
|
// VPMOVUSDB ymm m64
|
|
// VPMOVUSDB ymm xmm
|
|
// VPMOVUSDB zmm k m128
|
|
// VPMOVUSDB zmm k xmm
|
|
// VPMOVUSDB zmm m128
|
|
// VPMOVUSDB zmm xmm
|
|
//
|
|
// Construct and append a VPMOVUSDB instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVUSDB(ops ...operand.Op) { ctx.VPMOVUSDB(ops...) }
|
|
|
|
// VPMOVUSDB_Z: Down Convert Packed Doubleword Values to Byte Values with Unsigned Saturation (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVUSDB.Z xmm k m32
|
|
// VPMOVUSDB.Z xmm k xmm
|
|
// VPMOVUSDB.Z ymm k m64
|
|
// VPMOVUSDB.Z ymm k xmm
|
|
// VPMOVUSDB.Z zmm k m128
|
|
// VPMOVUSDB.Z zmm k xmm
|
|
//
|
|
// Construct and append a VPMOVUSDB.Z instruction to the active function.
|
|
func (c *Context) VPMOVUSDB_Z(xyz, k, mx operand.Op) {
|
|
c.addinstruction(x86.VPMOVUSDB_Z(xyz, k, mx))
|
|
}
|
|
|
|
// VPMOVUSDB_Z: Down Convert Packed Doubleword Values to Byte Values with Unsigned Saturation (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVUSDB.Z xmm k m32
|
|
// VPMOVUSDB.Z xmm k xmm
|
|
// VPMOVUSDB.Z ymm k m64
|
|
// VPMOVUSDB.Z ymm k xmm
|
|
// VPMOVUSDB.Z zmm k m128
|
|
// VPMOVUSDB.Z zmm k xmm
|
|
//
|
|
// Construct and append a VPMOVUSDB.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVUSDB_Z(xyz, k, mx operand.Op) { ctx.VPMOVUSDB_Z(xyz, k, mx) }
|
|
|
|
// VPMOVUSDW: Down Convert Packed Doubleword Values to Word Values with Unsigned Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVUSDW xmm k m64
|
|
// VPMOVUSDW xmm k xmm
|
|
// VPMOVUSDW xmm m64
|
|
// VPMOVUSDW xmm xmm
|
|
// VPMOVUSDW ymm k m128
|
|
// VPMOVUSDW ymm k xmm
|
|
// VPMOVUSDW ymm m128
|
|
// VPMOVUSDW ymm xmm
|
|
// VPMOVUSDW zmm k m256
|
|
// VPMOVUSDW zmm k ymm
|
|
// VPMOVUSDW zmm m256
|
|
// VPMOVUSDW zmm ymm
|
|
//
|
|
// Construct and append a VPMOVUSDW instruction to the active function.
|
|
func (c *Context) VPMOVUSDW(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPMOVUSDW(ops...))
|
|
}
|
|
|
|
// VPMOVUSDW: Down Convert Packed Doubleword Values to Word Values with Unsigned Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVUSDW xmm k m64
|
|
// VPMOVUSDW xmm k xmm
|
|
// VPMOVUSDW xmm m64
|
|
// VPMOVUSDW xmm xmm
|
|
// VPMOVUSDW ymm k m128
|
|
// VPMOVUSDW ymm k xmm
|
|
// VPMOVUSDW ymm m128
|
|
// VPMOVUSDW ymm xmm
|
|
// VPMOVUSDW zmm k m256
|
|
// VPMOVUSDW zmm k ymm
|
|
// VPMOVUSDW zmm m256
|
|
// VPMOVUSDW zmm ymm
|
|
//
|
|
// Construct and append a VPMOVUSDW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVUSDW(ops ...operand.Op) { ctx.VPMOVUSDW(ops...) }
|
|
|
|
// VPMOVUSDW_Z: Down Convert Packed Doubleword Values to Word Values with Unsigned Saturation (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVUSDW.Z xmm k m64
|
|
// VPMOVUSDW.Z xmm k xmm
|
|
// VPMOVUSDW.Z ymm k m128
|
|
// VPMOVUSDW.Z ymm k xmm
|
|
// VPMOVUSDW.Z zmm k m256
|
|
// VPMOVUSDW.Z zmm k ymm
|
|
//
|
|
// Construct and append a VPMOVUSDW.Z instruction to the active function.
|
|
func (c *Context) VPMOVUSDW_Z(xyz, k, mxy operand.Op) {
|
|
c.addinstruction(x86.VPMOVUSDW_Z(xyz, k, mxy))
|
|
}
|
|
|
|
// VPMOVUSDW_Z: Down Convert Packed Doubleword Values to Word Values with Unsigned Saturation (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVUSDW.Z xmm k m64
|
|
// VPMOVUSDW.Z xmm k xmm
|
|
// VPMOVUSDW.Z ymm k m128
|
|
// VPMOVUSDW.Z ymm k xmm
|
|
// VPMOVUSDW.Z zmm k m256
|
|
// VPMOVUSDW.Z zmm k ymm
|
|
//
|
|
// Construct and append a VPMOVUSDW.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVUSDW_Z(xyz, k, mxy operand.Op) { ctx.VPMOVUSDW_Z(xyz, k, mxy) }
|
|
|
|
// VPMOVUSQB: Down Convert Packed Quadword Values to Byte Values with Unsigned Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVUSQB xmm k m16
|
|
// VPMOVUSQB xmm k xmm
|
|
// VPMOVUSQB xmm m16
|
|
// VPMOVUSQB xmm xmm
|
|
// VPMOVUSQB ymm k m32
|
|
// VPMOVUSQB ymm k xmm
|
|
// VPMOVUSQB ymm m32
|
|
// VPMOVUSQB ymm xmm
|
|
// VPMOVUSQB zmm k m64
|
|
// VPMOVUSQB zmm k xmm
|
|
// VPMOVUSQB zmm m64
|
|
// VPMOVUSQB zmm xmm
|
|
//
|
|
// Construct and append a VPMOVUSQB instruction to the active function.
|
|
func (c *Context) VPMOVUSQB(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPMOVUSQB(ops...))
|
|
}
|
|
|
|
// VPMOVUSQB: Down Convert Packed Quadword Values to Byte Values with Unsigned Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVUSQB xmm k m16
|
|
// VPMOVUSQB xmm k xmm
|
|
// VPMOVUSQB xmm m16
|
|
// VPMOVUSQB xmm xmm
|
|
// VPMOVUSQB ymm k m32
|
|
// VPMOVUSQB ymm k xmm
|
|
// VPMOVUSQB ymm m32
|
|
// VPMOVUSQB ymm xmm
|
|
// VPMOVUSQB zmm k m64
|
|
// VPMOVUSQB zmm k xmm
|
|
// VPMOVUSQB zmm m64
|
|
// VPMOVUSQB zmm xmm
|
|
//
|
|
// Construct and append a VPMOVUSQB instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVUSQB(ops ...operand.Op) { ctx.VPMOVUSQB(ops...) }
|
|
|
|
// VPMOVUSQB_Z: Down Convert Packed Quadword Values to Byte Values with Unsigned Saturation (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVUSQB.Z xmm k m16
|
|
// VPMOVUSQB.Z xmm k xmm
|
|
// VPMOVUSQB.Z ymm k m32
|
|
// VPMOVUSQB.Z ymm k xmm
|
|
// VPMOVUSQB.Z zmm k m64
|
|
// VPMOVUSQB.Z zmm k xmm
|
|
//
|
|
// Construct and append a VPMOVUSQB.Z instruction to the active function.
|
|
func (c *Context) VPMOVUSQB_Z(xyz, k, mx operand.Op) {
|
|
c.addinstruction(x86.VPMOVUSQB_Z(xyz, k, mx))
|
|
}
|
|
|
|
// VPMOVUSQB_Z: Down Convert Packed Quadword Values to Byte Values with Unsigned Saturation (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVUSQB.Z xmm k m16
|
|
// VPMOVUSQB.Z xmm k xmm
|
|
// VPMOVUSQB.Z ymm k m32
|
|
// VPMOVUSQB.Z ymm k xmm
|
|
// VPMOVUSQB.Z zmm k m64
|
|
// VPMOVUSQB.Z zmm k xmm
|
|
//
|
|
// Construct and append a VPMOVUSQB.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVUSQB_Z(xyz, k, mx operand.Op) { ctx.VPMOVUSQB_Z(xyz, k, mx) }
|
|
|
|
// VPMOVUSQD: Down Convert Packed Quadword Values to Doubleword Values with Unsigned Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVUSQD xmm k m64
|
|
// VPMOVUSQD xmm k xmm
|
|
// VPMOVUSQD xmm m64
|
|
// VPMOVUSQD xmm xmm
|
|
// VPMOVUSQD ymm k m128
|
|
// VPMOVUSQD ymm k xmm
|
|
// VPMOVUSQD ymm m128
|
|
// VPMOVUSQD ymm xmm
|
|
// VPMOVUSQD zmm k m256
|
|
// VPMOVUSQD zmm k ymm
|
|
// VPMOVUSQD zmm m256
|
|
// VPMOVUSQD zmm ymm
|
|
//
|
|
// Construct and append a VPMOVUSQD instruction to the active function.
|
|
func (c *Context) VPMOVUSQD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPMOVUSQD(ops...))
|
|
}
|
|
|
|
// VPMOVUSQD: Down Convert Packed Quadword Values to Doubleword Values with Unsigned Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVUSQD xmm k m64
|
|
// VPMOVUSQD xmm k xmm
|
|
// VPMOVUSQD xmm m64
|
|
// VPMOVUSQD xmm xmm
|
|
// VPMOVUSQD ymm k m128
|
|
// VPMOVUSQD ymm k xmm
|
|
// VPMOVUSQD ymm m128
|
|
// VPMOVUSQD ymm xmm
|
|
// VPMOVUSQD zmm k m256
|
|
// VPMOVUSQD zmm k ymm
|
|
// VPMOVUSQD zmm m256
|
|
// VPMOVUSQD zmm ymm
|
|
//
|
|
// Construct and append a VPMOVUSQD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVUSQD(ops ...operand.Op) { ctx.VPMOVUSQD(ops...) }
|
|
|
|
// VPMOVUSQD_Z: Down Convert Packed Quadword Values to Doubleword Values with Unsigned Saturation (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVUSQD.Z xmm k m64
|
|
// VPMOVUSQD.Z xmm k xmm
|
|
// VPMOVUSQD.Z ymm k m128
|
|
// VPMOVUSQD.Z ymm k xmm
|
|
// VPMOVUSQD.Z zmm k m256
|
|
// VPMOVUSQD.Z zmm k ymm
|
|
//
|
|
// Construct and append a VPMOVUSQD.Z instruction to the active function.
|
|
func (c *Context) VPMOVUSQD_Z(xyz, k, mxy operand.Op) {
|
|
c.addinstruction(x86.VPMOVUSQD_Z(xyz, k, mxy))
|
|
}
|
|
|
|
// VPMOVUSQD_Z: Down Convert Packed Quadword Values to Doubleword Values with Unsigned Saturation (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVUSQD.Z xmm k m64
|
|
// VPMOVUSQD.Z xmm k xmm
|
|
// VPMOVUSQD.Z ymm k m128
|
|
// VPMOVUSQD.Z ymm k xmm
|
|
// VPMOVUSQD.Z zmm k m256
|
|
// VPMOVUSQD.Z zmm k ymm
|
|
//
|
|
// Construct and append a VPMOVUSQD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVUSQD_Z(xyz, k, mxy operand.Op) { ctx.VPMOVUSQD_Z(xyz, k, mxy) }
|
|
|
|
// VPMOVUSQW: Down Convert Packed Quadword Values to Word Values with Unsigned Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVUSQW xmm k m32
|
|
// VPMOVUSQW xmm k xmm
|
|
// VPMOVUSQW xmm m32
|
|
// VPMOVUSQW xmm xmm
|
|
// VPMOVUSQW ymm k m64
|
|
// VPMOVUSQW ymm k xmm
|
|
// VPMOVUSQW ymm m64
|
|
// VPMOVUSQW ymm xmm
|
|
// VPMOVUSQW zmm k m128
|
|
// VPMOVUSQW zmm k xmm
|
|
// VPMOVUSQW zmm m128
|
|
// VPMOVUSQW zmm xmm
|
|
//
|
|
// Construct and append a VPMOVUSQW instruction to the active function.
|
|
func (c *Context) VPMOVUSQW(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPMOVUSQW(ops...))
|
|
}
|
|
|
|
// VPMOVUSQW: Down Convert Packed Quadword Values to Word Values with Unsigned Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVUSQW xmm k m32
|
|
// VPMOVUSQW xmm k xmm
|
|
// VPMOVUSQW xmm m32
|
|
// VPMOVUSQW xmm xmm
|
|
// VPMOVUSQW ymm k m64
|
|
// VPMOVUSQW ymm k xmm
|
|
// VPMOVUSQW ymm m64
|
|
// VPMOVUSQW ymm xmm
|
|
// VPMOVUSQW zmm k m128
|
|
// VPMOVUSQW zmm k xmm
|
|
// VPMOVUSQW zmm m128
|
|
// VPMOVUSQW zmm xmm
|
|
//
|
|
// Construct and append a VPMOVUSQW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVUSQW(ops ...operand.Op) { ctx.VPMOVUSQW(ops...) }
|
|
|
|
// VPMOVUSQW_Z: Down Convert Packed Quadword Values to Word Values with Unsigned Saturation (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVUSQW.Z xmm k m32
|
|
// VPMOVUSQW.Z xmm k xmm
|
|
// VPMOVUSQW.Z ymm k m64
|
|
// VPMOVUSQW.Z ymm k xmm
|
|
// VPMOVUSQW.Z zmm k m128
|
|
// VPMOVUSQW.Z zmm k xmm
|
|
//
|
|
// Construct and append a VPMOVUSQW.Z instruction to the active function.
|
|
func (c *Context) VPMOVUSQW_Z(xyz, k, mx operand.Op) {
|
|
c.addinstruction(x86.VPMOVUSQW_Z(xyz, k, mx))
|
|
}
|
|
|
|
// VPMOVUSQW_Z: Down Convert Packed Quadword Values to Word Values with Unsigned Saturation (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVUSQW.Z xmm k m32
|
|
// VPMOVUSQW.Z xmm k xmm
|
|
// VPMOVUSQW.Z ymm k m64
|
|
// VPMOVUSQW.Z ymm k xmm
|
|
// VPMOVUSQW.Z zmm k m128
|
|
// VPMOVUSQW.Z zmm k xmm
|
|
//
|
|
// Construct and append a VPMOVUSQW.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVUSQW_Z(xyz, k, mx operand.Op) { ctx.VPMOVUSQW_Z(xyz, k, mx) }
|
|
|
|
// VPMOVUSWB: Down Convert Packed Word Values to Byte Values with Unsigned Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVUSWB xmm k m64
|
|
// VPMOVUSWB xmm k xmm
|
|
// VPMOVUSWB xmm m64
|
|
// VPMOVUSWB xmm xmm
|
|
// VPMOVUSWB ymm k m128
|
|
// VPMOVUSWB ymm k xmm
|
|
// VPMOVUSWB ymm m128
|
|
// VPMOVUSWB ymm xmm
|
|
// VPMOVUSWB zmm k m256
|
|
// VPMOVUSWB zmm k ymm
|
|
// VPMOVUSWB zmm m256
|
|
// VPMOVUSWB zmm ymm
|
|
//
|
|
// Construct and append a VPMOVUSWB instruction to the active function.
|
|
func (c *Context) VPMOVUSWB(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPMOVUSWB(ops...))
|
|
}
|
|
|
|
// VPMOVUSWB: Down Convert Packed Word Values to Byte Values with Unsigned Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVUSWB xmm k m64
|
|
// VPMOVUSWB xmm k xmm
|
|
// VPMOVUSWB xmm m64
|
|
// VPMOVUSWB xmm xmm
|
|
// VPMOVUSWB ymm k m128
|
|
// VPMOVUSWB ymm k xmm
|
|
// VPMOVUSWB ymm m128
|
|
// VPMOVUSWB ymm xmm
|
|
// VPMOVUSWB zmm k m256
|
|
// VPMOVUSWB zmm k ymm
|
|
// VPMOVUSWB zmm m256
|
|
// VPMOVUSWB zmm ymm
|
|
//
|
|
// Construct and append a VPMOVUSWB instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVUSWB(ops ...operand.Op) { ctx.VPMOVUSWB(ops...) }
|
|
|
|
// VPMOVUSWB_Z: Down Convert Packed Word Values to Byte Values with Unsigned Saturation (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVUSWB.Z xmm k m64
|
|
// VPMOVUSWB.Z xmm k xmm
|
|
// VPMOVUSWB.Z ymm k m128
|
|
// VPMOVUSWB.Z ymm k xmm
|
|
// VPMOVUSWB.Z zmm k m256
|
|
// VPMOVUSWB.Z zmm k ymm
|
|
//
|
|
// Construct and append a VPMOVUSWB.Z instruction to the active function.
|
|
func (c *Context) VPMOVUSWB_Z(xyz, k, mxy operand.Op) {
|
|
c.addinstruction(x86.VPMOVUSWB_Z(xyz, k, mxy))
|
|
}
|
|
|
|
// VPMOVUSWB_Z: Down Convert Packed Word Values to Byte Values with Unsigned Saturation (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVUSWB.Z xmm k m64
|
|
// VPMOVUSWB.Z xmm k xmm
|
|
// VPMOVUSWB.Z ymm k m128
|
|
// VPMOVUSWB.Z ymm k xmm
|
|
// VPMOVUSWB.Z zmm k m256
|
|
// VPMOVUSWB.Z zmm k ymm
|
|
//
|
|
// Construct and append a VPMOVUSWB.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVUSWB_Z(xyz, k, mxy operand.Op) { ctx.VPMOVUSWB_Z(xyz, k, mxy) }
|
|
|
|
// VPMOVW2M: Move Signs of Packed Word Integers to Mask Register.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVW2M xmm k
|
|
// VPMOVW2M ymm k
|
|
// VPMOVW2M zmm k
|
|
//
|
|
// Construct and append a VPMOVW2M instruction to the active function.
|
|
func (c *Context) VPMOVW2M(xyz, k operand.Op) {
|
|
c.addinstruction(x86.VPMOVW2M(xyz, k))
|
|
}
|
|
|
|
// VPMOVW2M: Move Signs of Packed Word Integers to Mask Register.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVW2M xmm k
|
|
// VPMOVW2M ymm k
|
|
// VPMOVW2M zmm k
|
|
//
|
|
// Construct and append a VPMOVW2M instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVW2M(xyz, k operand.Op) { ctx.VPMOVW2M(xyz, k) }
|
|
|
|
// VPMOVWB: Down Convert Packed Word Values to Byte Values with Truncation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVWB xmm k m64
|
|
// VPMOVWB xmm k xmm
|
|
// VPMOVWB xmm m64
|
|
// VPMOVWB xmm xmm
|
|
// VPMOVWB ymm k m128
|
|
// VPMOVWB ymm k xmm
|
|
// VPMOVWB ymm m128
|
|
// VPMOVWB ymm xmm
|
|
// VPMOVWB zmm k m256
|
|
// VPMOVWB zmm k ymm
|
|
// VPMOVWB zmm m256
|
|
// VPMOVWB zmm ymm
|
|
//
|
|
// Construct and append a VPMOVWB instruction to the active function.
|
|
func (c *Context) VPMOVWB(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPMOVWB(ops...))
|
|
}
|
|
|
|
// VPMOVWB: Down Convert Packed Word Values to Byte Values with Truncation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVWB xmm k m64
|
|
// VPMOVWB xmm k xmm
|
|
// VPMOVWB xmm m64
|
|
// VPMOVWB xmm xmm
|
|
// VPMOVWB ymm k m128
|
|
// VPMOVWB ymm k xmm
|
|
// VPMOVWB ymm m128
|
|
// VPMOVWB ymm xmm
|
|
// VPMOVWB zmm k m256
|
|
// VPMOVWB zmm k ymm
|
|
// VPMOVWB zmm m256
|
|
// VPMOVWB zmm ymm
|
|
//
|
|
// Construct and append a VPMOVWB instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVWB(ops ...operand.Op) { ctx.VPMOVWB(ops...) }
|
|
|
|
// VPMOVWB_Z: Down Convert Packed Word Values to Byte Values with Truncation (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVWB.Z xmm k m64
|
|
// VPMOVWB.Z xmm k xmm
|
|
// VPMOVWB.Z ymm k m128
|
|
// VPMOVWB.Z ymm k xmm
|
|
// VPMOVWB.Z zmm k m256
|
|
// VPMOVWB.Z zmm k ymm
|
|
//
|
|
// Construct and append a VPMOVWB.Z instruction to the active function.
|
|
func (c *Context) VPMOVWB_Z(xyz, k, mxy operand.Op) {
|
|
c.addinstruction(x86.VPMOVWB_Z(xyz, k, mxy))
|
|
}
|
|
|
|
// VPMOVWB_Z: Down Convert Packed Word Values to Byte Values with Truncation (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVWB.Z xmm k m64
|
|
// VPMOVWB.Z xmm k xmm
|
|
// VPMOVWB.Z ymm k m128
|
|
// VPMOVWB.Z ymm k xmm
|
|
// VPMOVWB.Z zmm k m256
|
|
// VPMOVWB.Z zmm k ymm
|
|
//
|
|
// Construct and append a VPMOVWB.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVWB_Z(xyz, k, mxy operand.Op) { ctx.VPMOVWB_Z(xyz, k, mxy) }
|
|
|
|
// VPMOVZXBD: Move Packed Byte Integers to Doubleword Integers with Zero Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVZXBD m64 ymm
|
|
// VPMOVZXBD xmm ymm
|
|
// VPMOVZXBD m32 xmm
|
|
// VPMOVZXBD xmm xmm
|
|
// VPMOVZXBD m32 k xmm
|
|
// VPMOVZXBD m64 k ymm
|
|
// VPMOVZXBD xmm k xmm
|
|
// VPMOVZXBD xmm k ymm
|
|
// VPMOVZXBD m128 k zmm
|
|
// VPMOVZXBD m128 zmm
|
|
// VPMOVZXBD xmm k zmm
|
|
// VPMOVZXBD xmm zmm
|
|
//
|
|
// Construct and append a VPMOVZXBD instruction to the active function.
|
|
func (c *Context) VPMOVZXBD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPMOVZXBD(ops...))
|
|
}
|
|
|
|
// VPMOVZXBD: Move Packed Byte Integers to Doubleword Integers with Zero Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVZXBD m64 ymm
|
|
// VPMOVZXBD xmm ymm
|
|
// VPMOVZXBD m32 xmm
|
|
// VPMOVZXBD xmm xmm
|
|
// VPMOVZXBD m32 k xmm
|
|
// VPMOVZXBD m64 k ymm
|
|
// VPMOVZXBD xmm k xmm
|
|
// VPMOVZXBD xmm k ymm
|
|
// VPMOVZXBD m128 k zmm
|
|
// VPMOVZXBD m128 zmm
|
|
// VPMOVZXBD xmm k zmm
|
|
// VPMOVZXBD xmm zmm
|
|
//
|
|
// Construct and append a VPMOVZXBD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVZXBD(ops ...operand.Op) { ctx.VPMOVZXBD(ops...) }
|
|
|
|
// VPMOVZXBD_Z: Move Packed Byte Integers to Doubleword Integers with Zero Extension (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVZXBD.Z m32 k xmm
|
|
// VPMOVZXBD.Z m64 k ymm
|
|
// VPMOVZXBD.Z xmm k xmm
|
|
// VPMOVZXBD.Z xmm k ymm
|
|
// VPMOVZXBD.Z m128 k zmm
|
|
// VPMOVZXBD.Z xmm k zmm
|
|
//
|
|
// Construct and append a VPMOVZXBD.Z instruction to the active function.
|
|
func (c *Context) VPMOVZXBD_Z(mx, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VPMOVZXBD_Z(mx, k, xyz))
|
|
}
|
|
|
|
// VPMOVZXBD_Z: Move Packed Byte Integers to Doubleword Integers with Zero Extension (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVZXBD.Z m32 k xmm
|
|
// VPMOVZXBD.Z m64 k ymm
|
|
// VPMOVZXBD.Z xmm k xmm
|
|
// VPMOVZXBD.Z xmm k ymm
|
|
// VPMOVZXBD.Z m128 k zmm
|
|
// VPMOVZXBD.Z xmm k zmm
|
|
//
|
|
// Construct and append a VPMOVZXBD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVZXBD_Z(mx, k, xyz operand.Op) { ctx.VPMOVZXBD_Z(mx, k, xyz) }
|
|
|
|
// VPMOVZXBQ: Move Packed Byte Integers to Quadword Integers with Zero Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVZXBQ m32 ymm
|
|
// VPMOVZXBQ xmm ymm
|
|
// VPMOVZXBQ m16 xmm
|
|
// VPMOVZXBQ xmm xmm
|
|
// VPMOVZXBQ m16 k xmm
|
|
// VPMOVZXBQ m32 k ymm
|
|
// VPMOVZXBQ xmm k xmm
|
|
// VPMOVZXBQ xmm k ymm
|
|
// VPMOVZXBQ m64 k zmm
|
|
// VPMOVZXBQ m64 zmm
|
|
// VPMOVZXBQ xmm k zmm
|
|
// VPMOVZXBQ xmm zmm
|
|
//
|
|
// Construct and append a VPMOVZXBQ instruction to the active function.
|
|
func (c *Context) VPMOVZXBQ(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPMOVZXBQ(ops...))
|
|
}
|
|
|
|
// VPMOVZXBQ: Move Packed Byte Integers to Quadword Integers with Zero Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVZXBQ m32 ymm
|
|
// VPMOVZXBQ xmm ymm
|
|
// VPMOVZXBQ m16 xmm
|
|
// VPMOVZXBQ xmm xmm
|
|
// VPMOVZXBQ m16 k xmm
|
|
// VPMOVZXBQ m32 k ymm
|
|
// VPMOVZXBQ xmm k xmm
|
|
// VPMOVZXBQ xmm k ymm
|
|
// VPMOVZXBQ m64 k zmm
|
|
// VPMOVZXBQ m64 zmm
|
|
// VPMOVZXBQ xmm k zmm
|
|
// VPMOVZXBQ xmm zmm
|
|
//
|
|
// Construct and append a VPMOVZXBQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVZXBQ(ops ...operand.Op) { ctx.VPMOVZXBQ(ops...) }
|
|
|
|
// VPMOVZXBQ_Z: Move Packed Byte Integers to Quadword Integers with Zero Extension (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVZXBQ.Z m16 k xmm
|
|
// VPMOVZXBQ.Z m32 k ymm
|
|
// VPMOVZXBQ.Z xmm k xmm
|
|
// VPMOVZXBQ.Z xmm k ymm
|
|
// VPMOVZXBQ.Z m64 k zmm
|
|
// VPMOVZXBQ.Z xmm k zmm
|
|
//
|
|
// Construct and append a VPMOVZXBQ.Z instruction to the active function.
|
|
func (c *Context) VPMOVZXBQ_Z(mx, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VPMOVZXBQ_Z(mx, k, xyz))
|
|
}
|
|
|
|
// VPMOVZXBQ_Z: Move Packed Byte Integers to Quadword Integers with Zero Extension (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVZXBQ.Z m16 k xmm
|
|
// VPMOVZXBQ.Z m32 k ymm
|
|
// VPMOVZXBQ.Z xmm k xmm
|
|
// VPMOVZXBQ.Z xmm k ymm
|
|
// VPMOVZXBQ.Z m64 k zmm
|
|
// VPMOVZXBQ.Z xmm k zmm
|
|
//
|
|
// Construct and append a VPMOVZXBQ.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVZXBQ_Z(mx, k, xyz operand.Op) { ctx.VPMOVZXBQ_Z(mx, k, xyz) }
|
|
|
|
// VPMOVZXBW: Move Packed Byte Integers to Word Integers with Zero Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVZXBW m128 ymm
|
|
// VPMOVZXBW xmm ymm
|
|
// VPMOVZXBW m64 xmm
|
|
// VPMOVZXBW xmm xmm
|
|
// VPMOVZXBW m128 k ymm
|
|
// VPMOVZXBW m64 k xmm
|
|
// VPMOVZXBW xmm k xmm
|
|
// VPMOVZXBW xmm k ymm
|
|
// VPMOVZXBW m256 k zmm
|
|
// VPMOVZXBW m256 zmm
|
|
// VPMOVZXBW ymm k zmm
|
|
// VPMOVZXBW ymm zmm
|
|
//
|
|
// Construct and append a VPMOVZXBW instruction to the active function.
|
|
func (c *Context) VPMOVZXBW(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPMOVZXBW(ops...))
|
|
}
|
|
|
|
// VPMOVZXBW: Move Packed Byte Integers to Word Integers with Zero Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVZXBW m128 ymm
|
|
// VPMOVZXBW xmm ymm
|
|
// VPMOVZXBW m64 xmm
|
|
// VPMOVZXBW xmm xmm
|
|
// VPMOVZXBW m128 k ymm
|
|
// VPMOVZXBW m64 k xmm
|
|
// VPMOVZXBW xmm k xmm
|
|
// VPMOVZXBW xmm k ymm
|
|
// VPMOVZXBW m256 k zmm
|
|
// VPMOVZXBW m256 zmm
|
|
// VPMOVZXBW ymm k zmm
|
|
// VPMOVZXBW ymm zmm
|
|
//
|
|
// Construct and append a VPMOVZXBW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVZXBW(ops ...operand.Op) { ctx.VPMOVZXBW(ops...) }
|
|
|
|
// VPMOVZXBW_Z: Move Packed Byte Integers to Word Integers with Zero Extension (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVZXBW.Z m128 k ymm
|
|
// VPMOVZXBW.Z m64 k xmm
|
|
// VPMOVZXBW.Z xmm k xmm
|
|
// VPMOVZXBW.Z xmm k ymm
|
|
// VPMOVZXBW.Z m256 k zmm
|
|
// VPMOVZXBW.Z ymm k zmm
|
|
//
|
|
// Construct and append a VPMOVZXBW.Z instruction to the active function.
|
|
func (c *Context) VPMOVZXBW_Z(mxy, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VPMOVZXBW_Z(mxy, k, xyz))
|
|
}
|
|
|
|
// VPMOVZXBW_Z: Move Packed Byte Integers to Word Integers with Zero Extension (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVZXBW.Z m128 k ymm
|
|
// VPMOVZXBW.Z m64 k xmm
|
|
// VPMOVZXBW.Z xmm k xmm
|
|
// VPMOVZXBW.Z xmm k ymm
|
|
// VPMOVZXBW.Z m256 k zmm
|
|
// VPMOVZXBW.Z ymm k zmm
|
|
//
|
|
// Construct and append a VPMOVZXBW.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVZXBW_Z(mxy, k, xyz operand.Op) { ctx.VPMOVZXBW_Z(mxy, k, xyz) }
|
|
|
|
// VPMOVZXDQ: Move Packed Doubleword Integers to Quadword Integers with Zero Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVZXDQ m128 ymm
|
|
// VPMOVZXDQ xmm ymm
|
|
// VPMOVZXDQ m64 xmm
|
|
// VPMOVZXDQ xmm xmm
|
|
// VPMOVZXDQ m128 k ymm
|
|
// VPMOVZXDQ m64 k xmm
|
|
// VPMOVZXDQ xmm k xmm
|
|
// VPMOVZXDQ xmm k ymm
|
|
// VPMOVZXDQ m256 k zmm
|
|
// VPMOVZXDQ m256 zmm
|
|
// VPMOVZXDQ ymm k zmm
|
|
// VPMOVZXDQ ymm zmm
|
|
//
|
|
// Construct and append a VPMOVZXDQ instruction to the active function.
|
|
func (c *Context) VPMOVZXDQ(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPMOVZXDQ(ops...))
|
|
}
|
|
|
|
// VPMOVZXDQ: Move Packed Doubleword Integers to Quadword Integers with Zero Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVZXDQ m128 ymm
|
|
// VPMOVZXDQ xmm ymm
|
|
// VPMOVZXDQ m64 xmm
|
|
// VPMOVZXDQ xmm xmm
|
|
// VPMOVZXDQ m128 k ymm
|
|
// VPMOVZXDQ m64 k xmm
|
|
// VPMOVZXDQ xmm k xmm
|
|
// VPMOVZXDQ xmm k ymm
|
|
// VPMOVZXDQ m256 k zmm
|
|
// VPMOVZXDQ m256 zmm
|
|
// VPMOVZXDQ ymm k zmm
|
|
// VPMOVZXDQ ymm zmm
|
|
//
|
|
// Construct and append a VPMOVZXDQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVZXDQ(ops ...operand.Op) { ctx.VPMOVZXDQ(ops...) }
|
|
|
|
// VPMOVZXDQ_Z: Move Packed Doubleword Integers to Quadword Integers with Zero Extension (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVZXDQ.Z m128 k ymm
|
|
// VPMOVZXDQ.Z m64 k xmm
|
|
// VPMOVZXDQ.Z xmm k xmm
|
|
// VPMOVZXDQ.Z xmm k ymm
|
|
// VPMOVZXDQ.Z m256 k zmm
|
|
// VPMOVZXDQ.Z ymm k zmm
|
|
//
|
|
// Construct and append a VPMOVZXDQ.Z instruction to the active function.
|
|
func (c *Context) VPMOVZXDQ_Z(mxy, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VPMOVZXDQ_Z(mxy, k, xyz))
|
|
}
|
|
|
|
// VPMOVZXDQ_Z: Move Packed Doubleword Integers to Quadword Integers with Zero Extension (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVZXDQ.Z m128 k ymm
|
|
// VPMOVZXDQ.Z m64 k xmm
|
|
// VPMOVZXDQ.Z xmm k xmm
|
|
// VPMOVZXDQ.Z xmm k ymm
|
|
// VPMOVZXDQ.Z m256 k zmm
|
|
// VPMOVZXDQ.Z ymm k zmm
|
|
//
|
|
// Construct and append a VPMOVZXDQ.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVZXDQ_Z(mxy, k, xyz operand.Op) { ctx.VPMOVZXDQ_Z(mxy, k, xyz) }
|
|
|
|
// VPMOVZXWD: Move Packed Word Integers to Doubleword Integers with Zero Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVZXWD m128 ymm
|
|
// VPMOVZXWD xmm ymm
|
|
// VPMOVZXWD m64 xmm
|
|
// VPMOVZXWD xmm xmm
|
|
// VPMOVZXWD m128 k ymm
|
|
// VPMOVZXWD m64 k xmm
|
|
// VPMOVZXWD xmm k xmm
|
|
// VPMOVZXWD xmm k ymm
|
|
// VPMOVZXWD m256 k zmm
|
|
// VPMOVZXWD m256 zmm
|
|
// VPMOVZXWD ymm k zmm
|
|
// VPMOVZXWD ymm zmm
|
|
//
|
|
// Construct and append a VPMOVZXWD instruction to the active function.
|
|
func (c *Context) VPMOVZXWD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPMOVZXWD(ops...))
|
|
}
|
|
|
|
// VPMOVZXWD: Move Packed Word Integers to Doubleword Integers with Zero Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVZXWD m128 ymm
|
|
// VPMOVZXWD xmm ymm
|
|
// VPMOVZXWD m64 xmm
|
|
// VPMOVZXWD xmm xmm
|
|
// VPMOVZXWD m128 k ymm
|
|
// VPMOVZXWD m64 k xmm
|
|
// VPMOVZXWD xmm k xmm
|
|
// VPMOVZXWD xmm k ymm
|
|
// VPMOVZXWD m256 k zmm
|
|
// VPMOVZXWD m256 zmm
|
|
// VPMOVZXWD ymm k zmm
|
|
// VPMOVZXWD ymm zmm
|
|
//
|
|
// Construct and append a VPMOVZXWD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVZXWD(ops ...operand.Op) { ctx.VPMOVZXWD(ops...) }
|
|
|
|
// VPMOVZXWD_Z: Move Packed Word Integers to Doubleword Integers with Zero Extension (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVZXWD.Z m128 k ymm
|
|
// VPMOVZXWD.Z m64 k xmm
|
|
// VPMOVZXWD.Z xmm k xmm
|
|
// VPMOVZXWD.Z xmm k ymm
|
|
// VPMOVZXWD.Z m256 k zmm
|
|
// VPMOVZXWD.Z ymm k zmm
|
|
//
|
|
// Construct and append a VPMOVZXWD.Z instruction to the active function.
|
|
func (c *Context) VPMOVZXWD_Z(mxy, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VPMOVZXWD_Z(mxy, k, xyz))
|
|
}
|
|
|
|
// VPMOVZXWD_Z: Move Packed Word Integers to Doubleword Integers with Zero Extension (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVZXWD.Z m128 k ymm
|
|
// VPMOVZXWD.Z m64 k xmm
|
|
// VPMOVZXWD.Z xmm k xmm
|
|
// VPMOVZXWD.Z xmm k ymm
|
|
// VPMOVZXWD.Z m256 k zmm
|
|
// VPMOVZXWD.Z ymm k zmm
|
|
//
|
|
// Construct and append a VPMOVZXWD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVZXWD_Z(mxy, k, xyz operand.Op) { ctx.VPMOVZXWD_Z(mxy, k, xyz) }
|
|
|
|
// VPMOVZXWQ: Move Packed Word Integers to Quadword Integers with Zero Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVZXWQ m64 ymm
|
|
// VPMOVZXWQ xmm ymm
|
|
// VPMOVZXWQ m32 xmm
|
|
// VPMOVZXWQ xmm xmm
|
|
// VPMOVZXWQ m32 k xmm
|
|
// VPMOVZXWQ m64 k ymm
|
|
// VPMOVZXWQ xmm k xmm
|
|
// VPMOVZXWQ xmm k ymm
|
|
// VPMOVZXWQ m128 k zmm
|
|
// VPMOVZXWQ m128 zmm
|
|
// VPMOVZXWQ xmm k zmm
|
|
// VPMOVZXWQ xmm zmm
|
|
//
|
|
// Construct and append a VPMOVZXWQ instruction to the active function.
|
|
func (c *Context) VPMOVZXWQ(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPMOVZXWQ(ops...))
|
|
}
|
|
|
|
// VPMOVZXWQ: Move Packed Word Integers to Quadword Integers with Zero Extension.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVZXWQ m64 ymm
|
|
// VPMOVZXWQ xmm ymm
|
|
// VPMOVZXWQ m32 xmm
|
|
// VPMOVZXWQ xmm xmm
|
|
// VPMOVZXWQ m32 k xmm
|
|
// VPMOVZXWQ m64 k ymm
|
|
// VPMOVZXWQ xmm k xmm
|
|
// VPMOVZXWQ xmm k ymm
|
|
// VPMOVZXWQ m128 k zmm
|
|
// VPMOVZXWQ m128 zmm
|
|
// VPMOVZXWQ xmm k zmm
|
|
// VPMOVZXWQ xmm zmm
|
|
//
|
|
// Construct and append a VPMOVZXWQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVZXWQ(ops ...operand.Op) { ctx.VPMOVZXWQ(ops...) }
|
|
|
|
// VPMOVZXWQ_Z: Move Packed Word Integers to Quadword Integers with Zero Extension (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVZXWQ.Z m32 k xmm
|
|
// VPMOVZXWQ.Z m64 k ymm
|
|
// VPMOVZXWQ.Z xmm k xmm
|
|
// VPMOVZXWQ.Z xmm k ymm
|
|
// VPMOVZXWQ.Z m128 k zmm
|
|
// VPMOVZXWQ.Z xmm k zmm
|
|
//
|
|
// Construct and append a VPMOVZXWQ.Z instruction to the active function.
|
|
func (c *Context) VPMOVZXWQ_Z(mx, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VPMOVZXWQ_Z(mx, k, xyz))
|
|
}
|
|
|
|
// VPMOVZXWQ_Z: Move Packed Word Integers to Quadword Integers with Zero Extension (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMOVZXWQ.Z m32 k xmm
|
|
// VPMOVZXWQ.Z m64 k ymm
|
|
// VPMOVZXWQ.Z xmm k xmm
|
|
// VPMOVZXWQ.Z xmm k ymm
|
|
// VPMOVZXWQ.Z m128 k zmm
|
|
// VPMOVZXWQ.Z xmm k zmm
|
|
//
|
|
// Construct and append a VPMOVZXWQ.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMOVZXWQ_Z(mx, k, xyz operand.Op) { ctx.VPMOVZXWQ_Z(mx, k, xyz) }
|
|
|
|
// VPMULDQ: Multiply Packed Signed Doubleword Integers and Store Quadword Result.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMULDQ m256 ymm ymm
|
|
// VPMULDQ ymm ymm ymm
|
|
// VPMULDQ m128 xmm xmm
|
|
// VPMULDQ xmm xmm xmm
|
|
// VPMULDQ m128 xmm k xmm
|
|
// VPMULDQ m256 ymm k ymm
|
|
// VPMULDQ xmm xmm k xmm
|
|
// VPMULDQ ymm ymm k ymm
|
|
// VPMULDQ m512 zmm k zmm
|
|
// VPMULDQ m512 zmm zmm
|
|
// VPMULDQ zmm zmm k zmm
|
|
// VPMULDQ zmm zmm zmm
|
|
//
|
|
// Construct and append a VPMULDQ instruction to the active function.
|
|
func (c *Context) VPMULDQ(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPMULDQ(ops...))
|
|
}
|
|
|
|
// VPMULDQ: Multiply Packed Signed Doubleword Integers and Store Quadword Result.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMULDQ m256 ymm ymm
|
|
// VPMULDQ ymm ymm ymm
|
|
// VPMULDQ m128 xmm xmm
|
|
// VPMULDQ xmm xmm xmm
|
|
// VPMULDQ m128 xmm k xmm
|
|
// VPMULDQ m256 ymm k ymm
|
|
// VPMULDQ xmm xmm k xmm
|
|
// VPMULDQ ymm ymm k ymm
|
|
// VPMULDQ m512 zmm k zmm
|
|
// VPMULDQ m512 zmm zmm
|
|
// VPMULDQ zmm zmm k zmm
|
|
// VPMULDQ zmm zmm zmm
|
|
//
|
|
// Construct and append a VPMULDQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMULDQ(ops ...operand.Op) { ctx.VPMULDQ(ops...) }
|
|
|
|
// VPMULDQ_BCST: Multiply Packed Signed Doubleword Integers and Store Quadword Result (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMULDQ.BCST m64 xmm k xmm
|
|
// VPMULDQ.BCST m64 xmm xmm
|
|
// VPMULDQ.BCST m64 ymm k ymm
|
|
// VPMULDQ.BCST m64 ymm ymm
|
|
// VPMULDQ.BCST m64 zmm k zmm
|
|
// VPMULDQ.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VPMULDQ.BCST instruction to the active function.
|
|
func (c *Context) VPMULDQ_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPMULDQ_BCST(ops...))
|
|
}
|
|
|
|
// VPMULDQ_BCST: Multiply Packed Signed Doubleword Integers and Store Quadword Result (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMULDQ.BCST m64 xmm k xmm
|
|
// VPMULDQ.BCST m64 xmm xmm
|
|
// VPMULDQ.BCST m64 ymm k ymm
|
|
// VPMULDQ.BCST m64 ymm ymm
|
|
// VPMULDQ.BCST m64 zmm k zmm
|
|
// VPMULDQ.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VPMULDQ.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMULDQ_BCST(ops ...operand.Op) { ctx.VPMULDQ_BCST(ops...) }
|
|
|
|
// VPMULDQ_BCST_Z: Multiply Packed Signed Doubleword Integers and Store Quadword Result (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMULDQ.BCST.Z m64 xmm k xmm
|
|
// VPMULDQ.BCST.Z m64 ymm k ymm
|
|
// VPMULDQ.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VPMULDQ.BCST.Z instruction to the active function.
|
|
func (c *Context) VPMULDQ_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPMULDQ_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPMULDQ_BCST_Z: Multiply Packed Signed Doubleword Integers and Store Quadword Result (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMULDQ.BCST.Z m64 xmm k xmm
|
|
// VPMULDQ.BCST.Z m64 ymm k ymm
|
|
// VPMULDQ.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VPMULDQ.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMULDQ_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VPMULDQ_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VPMULDQ_Z: Multiply Packed Signed Doubleword Integers and Store Quadword Result (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMULDQ.Z m128 xmm k xmm
|
|
// VPMULDQ.Z m256 ymm k ymm
|
|
// VPMULDQ.Z xmm xmm k xmm
|
|
// VPMULDQ.Z ymm ymm k ymm
|
|
// VPMULDQ.Z m512 zmm k zmm
|
|
// VPMULDQ.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPMULDQ.Z instruction to the active function.
|
|
func (c *Context) VPMULDQ_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPMULDQ_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPMULDQ_Z: Multiply Packed Signed Doubleword Integers and Store Quadword Result (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMULDQ.Z m128 xmm k xmm
|
|
// VPMULDQ.Z m256 ymm k ymm
|
|
// VPMULDQ.Z xmm xmm k xmm
|
|
// VPMULDQ.Z ymm ymm k ymm
|
|
// VPMULDQ.Z m512 zmm k zmm
|
|
// VPMULDQ.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPMULDQ.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMULDQ_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPMULDQ_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPMULHRSW: Packed Multiply Signed Word Integers and Store High Result with Round and Scale.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMULHRSW m256 ymm ymm
|
|
// VPMULHRSW ymm ymm ymm
|
|
// VPMULHRSW m128 xmm xmm
|
|
// VPMULHRSW xmm xmm xmm
|
|
// VPMULHRSW m128 xmm k xmm
|
|
// VPMULHRSW m256 ymm k ymm
|
|
// VPMULHRSW xmm xmm k xmm
|
|
// VPMULHRSW ymm ymm k ymm
|
|
// VPMULHRSW m512 zmm k zmm
|
|
// VPMULHRSW m512 zmm zmm
|
|
// VPMULHRSW zmm zmm k zmm
|
|
// VPMULHRSW zmm zmm zmm
|
|
//
|
|
// Construct and append a VPMULHRSW instruction to the active function.
|
|
func (c *Context) VPMULHRSW(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPMULHRSW(ops...))
|
|
}
|
|
|
|
// VPMULHRSW: Packed Multiply Signed Word Integers and Store High Result with Round and Scale.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMULHRSW m256 ymm ymm
|
|
// VPMULHRSW ymm ymm ymm
|
|
// VPMULHRSW m128 xmm xmm
|
|
// VPMULHRSW xmm xmm xmm
|
|
// VPMULHRSW m128 xmm k xmm
|
|
// VPMULHRSW m256 ymm k ymm
|
|
// VPMULHRSW xmm xmm k xmm
|
|
// VPMULHRSW ymm ymm k ymm
|
|
// VPMULHRSW m512 zmm k zmm
|
|
// VPMULHRSW m512 zmm zmm
|
|
// VPMULHRSW zmm zmm k zmm
|
|
// VPMULHRSW zmm zmm zmm
|
|
//
|
|
// Construct and append a VPMULHRSW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMULHRSW(ops ...operand.Op) { ctx.VPMULHRSW(ops...) }
|
|
|
|
// VPMULHRSW_Z: Packed Multiply Signed Word Integers and Store High Result with Round and Scale (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMULHRSW.Z m128 xmm k xmm
|
|
// VPMULHRSW.Z m256 ymm k ymm
|
|
// VPMULHRSW.Z xmm xmm k xmm
|
|
// VPMULHRSW.Z ymm ymm k ymm
|
|
// VPMULHRSW.Z m512 zmm k zmm
|
|
// VPMULHRSW.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPMULHRSW.Z instruction to the active function.
|
|
func (c *Context) VPMULHRSW_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPMULHRSW_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPMULHRSW_Z: Packed Multiply Signed Word Integers and Store High Result with Round and Scale (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMULHRSW.Z m128 xmm k xmm
|
|
// VPMULHRSW.Z m256 ymm k ymm
|
|
// VPMULHRSW.Z xmm xmm k xmm
|
|
// VPMULHRSW.Z ymm ymm k ymm
|
|
// VPMULHRSW.Z m512 zmm k zmm
|
|
// VPMULHRSW.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPMULHRSW.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMULHRSW_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPMULHRSW_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPMULHUW: Multiply Packed Unsigned Word Integers and Store High Result.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMULHUW m256 ymm ymm
|
|
// VPMULHUW ymm ymm ymm
|
|
// VPMULHUW m128 xmm xmm
|
|
// VPMULHUW xmm xmm xmm
|
|
// VPMULHUW m128 xmm k xmm
|
|
// VPMULHUW m256 ymm k ymm
|
|
// VPMULHUW xmm xmm k xmm
|
|
// VPMULHUW ymm ymm k ymm
|
|
// VPMULHUW m512 zmm k zmm
|
|
// VPMULHUW m512 zmm zmm
|
|
// VPMULHUW zmm zmm k zmm
|
|
// VPMULHUW zmm zmm zmm
|
|
//
|
|
// Construct and append a VPMULHUW instruction to the active function.
|
|
func (c *Context) VPMULHUW(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPMULHUW(ops...))
|
|
}
|
|
|
|
// VPMULHUW: Multiply Packed Unsigned Word Integers and Store High Result.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMULHUW m256 ymm ymm
|
|
// VPMULHUW ymm ymm ymm
|
|
// VPMULHUW m128 xmm xmm
|
|
// VPMULHUW xmm xmm xmm
|
|
// VPMULHUW m128 xmm k xmm
|
|
// VPMULHUW m256 ymm k ymm
|
|
// VPMULHUW xmm xmm k xmm
|
|
// VPMULHUW ymm ymm k ymm
|
|
// VPMULHUW m512 zmm k zmm
|
|
// VPMULHUW m512 zmm zmm
|
|
// VPMULHUW zmm zmm k zmm
|
|
// VPMULHUW zmm zmm zmm
|
|
//
|
|
// Construct and append a VPMULHUW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMULHUW(ops ...operand.Op) { ctx.VPMULHUW(ops...) }
|
|
|
|
// VPMULHUW_Z: Multiply Packed Unsigned Word Integers and Store High Result (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMULHUW.Z m128 xmm k xmm
|
|
// VPMULHUW.Z m256 ymm k ymm
|
|
// VPMULHUW.Z xmm xmm k xmm
|
|
// VPMULHUW.Z ymm ymm k ymm
|
|
// VPMULHUW.Z m512 zmm k zmm
|
|
// VPMULHUW.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPMULHUW.Z instruction to the active function.
|
|
func (c *Context) VPMULHUW_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPMULHUW_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPMULHUW_Z: Multiply Packed Unsigned Word Integers and Store High Result (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMULHUW.Z m128 xmm k xmm
|
|
// VPMULHUW.Z m256 ymm k ymm
|
|
// VPMULHUW.Z xmm xmm k xmm
|
|
// VPMULHUW.Z ymm ymm k ymm
|
|
// VPMULHUW.Z m512 zmm k zmm
|
|
// VPMULHUW.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPMULHUW.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMULHUW_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPMULHUW_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPMULHW: Multiply Packed Signed Word Integers and Store High Result.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMULHW m256 ymm ymm
|
|
// VPMULHW ymm ymm ymm
|
|
// VPMULHW m128 xmm xmm
|
|
// VPMULHW xmm xmm xmm
|
|
// VPMULHW m128 xmm k xmm
|
|
// VPMULHW m256 ymm k ymm
|
|
// VPMULHW xmm xmm k xmm
|
|
// VPMULHW ymm ymm k ymm
|
|
// VPMULHW m512 zmm k zmm
|
|
// VPMULHW m512 zmm zmm
|
|
// VPMULHW zmm zmm k zmm
|
|
// VPMULHW zmm zmm zmm
|
|
//
|
|
// Construct and append a VPMULHW instruction to the active function.
|
|
func (c *Context) VPMULHW(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPMULHW(ops...))
|
|
}
|
|
|
|
// VPMULHW: Multiply Packed Signed Word Integers and Store High Result.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMULHW m256 ymm ymm
|
|
// VPMULHW ymm ymm ymm
|
|
// VPMULHW m128 xmm xmm
|
|
// VPMULHW xmm xmm xmm
|
|
// VPMULHW m128 xmm k xmm
|
|
// VPMULHW m256 ymm k ymm
|
|
// VPMULHW xmm xmm k xmm
|
|
// VPMULHW ymm ymm k ymm
|
|
// VPMULHW m512 zmm k zmm
|
|
// VPMULHW m512 zmm zmm
|
|
// VPMULHW zmm zmm k zmm
|
|
// VPMULHW zmm zmm zmm
|
|
//
|
|
// Construct and append a VPMULHW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMULHW(ops ...operand.Op) { ctx.VPMULHW(ops...) }
|
|
|
|
// VPMULHW_Z: Multiply Packed Signed Word Integers and Store High Result (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMULHW.Z m128 xmm k xmm
|
|
// VPMULHW.Z m256 ymm k ymm
|
|
// VPMULHW.Z xmm xmm k xmm
|
|
// VPMULHW.Z ymm ymm k ymm
|
|
// VPMULHW.Z m512 zmm k zmm
|
|
// VPMULHW.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPMULHW.Z instruction to the active function.
|
|
func (c *Context) VPMULHW_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPMULHW_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPMULHW_Z: Multiply Packed Signed Word Integers and Store High Result (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMULHW.Z m128 xmm k xmm
|
|
// VPMULHW.Z m256 ymm k ymm
|
|
// VPMULHW.Z xmm xmm k xmm
|
|
// VPMULHW.Z ymm ymm k ymm
|
|
// VPMULHW.Z m512 zmm k zmm
|
|
// VPMULHW.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPMULHW.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMULHW_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPMULHW_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPMULLD: Multiply Packed Signed Doubleword Integers and Store Low Result.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMULLD m256 ymm ymm
|
|
// VPMULLD ymm ymm ymm
|
|
// VPMULLD m128 xmm xmm
|
|
// VPMULLD xmm xmm xmm
|
|
// VPMULLD m128 xmm k xmm
|
|
// VPMULLD m256 ymm k ymm
|
|
// VPMULLD xmm xmm k xmm
|
|
// VPMULLD ymm ymm k ymm
|
|
// VPMULLD m512 zmm k zmm
|
|
// VPMULLD m512 zmm zmm
|
|
// VPMULLD zmm zmm k zmm
|
|
// VPMULLD zmm zmm zmm
|
|
//
|
|
// Construct and append a VPMULLD instruction to the active function.
|
|
func (c *Context) VPMULLD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPMULLD(ops...))
|
|
}
|
|
|
|
// VPMULLD: Multiply Packed Signed Doubleword Integers and Store Low Result.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMULLD m256 ymm ymm
|
|
// VPMULLD ymm ymm ymm
|
|
// VPMULLD m128 xmm xmm
|
|
// VPMULLD xmm xmm xmm
|
|
// VPMULLD m128 xmm k xmm
|
|
// VPMULLD m256 ymm k ymm
|
|
// VPMULLD xmm xmm k xmm
|
|
// VPMULLD ymm ymm k ymm
|
|
// VPMULLD m512 zmm k zmm
|
|
// VPMULLD m512 zmm zmm
|
|
// VPMULLD zmm zmm k zmm
|
|
// VPMULLD zmm zmm zmm
|
|
//
|
|
// Construct and append a VPMULLD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMULLD(ops ...operand.Op) { ctx.VPMULLD(ops...) }
|
|
|
|
// VPMULLD_BCST: Multiply Packed Signed Doubleword Integers and Store Low Result (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMULLD.BCST m32 xmm k xmm
|
|
// VPMULLD.BCST m32 xmm xmm
|
|
// VPMULLD.BCST m32 ymm k ymm
|
|
// VPMULLD.BCST m32 ymm ymm
|
|
// VPMULLD.BCST m32 zmm k zmm
|
|
// VPMULLD.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VPMULLD.BCST instruction to the active function.
|
|
func (c *Context) VPMULLD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPMULLD_BCST(ops...))
|
|
}
|
|
|
|
// VPMULLD_BCST: Multiply Packed Signed Doubleword Integers and Store Low Result (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMULLD.BCST m32 xmm k xmm
|
|
// VPMULLD.BCST m32 xmm xmm
|
|
// VPMULLD.BCST m32 ymm k ymm
|
|
// VPMULLD.BCST m32 ymm ymm
|
|
// VPMULLD.BCST m32 zmm k zmm
|
|
// VPMULLD.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VPMULLD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMULLD_BCST(ops ...operand.Op) { ctx.VPMULLD_BCST(ops...) }
|
|
|
|
// VPMULLD_BCST_Z: Multiply Packed Signed Doubleword Integers and Store Low Result (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMULLD.BCST.Z m32 xmm k xmm
|
|
// VPMULLD.BCST.Z m32 ymm k ymm
|
|
// VPMULLD.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VPMULLD.BCST.Z instruction to the active function.
|
|
func (c *Context) VPMULLD_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPMULLD_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPMULLD_BCST_Z: Multiply Packed Signed Doubleword Integers and Store Low Result (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMULLD.BCST.Z m32 xmm k xmm
|
|
// VPMULLD.BCST.Z m32 ymm k ymm
|
|
// VPMULLD.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VPMULLD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMULLD_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VPMULLD_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VPMULLD_Z: Multiply Packed Signed Doubleword Integers and Store Low Result (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMULLD.Z m128 xmm k xmm
|
|
// VPMULLD.Z m256 ymm k ymm
|
|
// VPMULLD.Z xmm xmm k xmm
|
|
// VPMULLD.Z ymm ymm k ymm
|
|
// VPMULLD.Z m512 zmm k zmm
|
|
// VPMULLD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPMULLD.Z instruction to the active function.
|
|
func (c *Context) VPMULLD_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPMULLD_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPMULLD_Z: Multiply Packed Signed Doubleword Integers and Store Low Result (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMULLD.Z m128 xmm k xmm
|
|
// VPMULLD.Z m256 ymm k ymm
|
|
// VPMULLD.Z xmm xmm k xmm
|
|
// VPMULLD.Z ymm ymm k ymm
|
|
// VPMULLD.Z m512 zmm k zmm
|
|
// VPMULLD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPMULLD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMULLD_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPMULLD_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPMULLQ: Multiply Packed Signed Quadword Integers and Store Low Result.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMULLQ m128 xmm k xmm
|
|
// VPMULLQ m128 xmm xmm
|
|
// VPMULLQ m256 ymm k ymm
|
|
// VPMULLQ m256 ymm ymm
|
|
// VPMULLQ xmm xmm k xmm
|
|
// VPMULLQ xmm xmm xmm
|
|
// VPMULLQ ymm ymm k ymm
|
|
// VPMULLQ ymm ymm ymm
|
|
// VPMULLQ m512 zmm k zmm
|
|
// VPMULLQ m512 zmm zmm
|
|
// VPMULLQ zmm zmm k zmm
|
|
// VPMULLQ zmm zmm zmm
|
|
//
|
|
// Construct and append a VPMULLQ instruction to the active function.
|
|
func (c *Context) VPMULLQ(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPMULLQ(ops...))
|
|
}
|
|
|
|
// VPMULLQ: Multiply Packed Signed Quadword Integers and Store Low Result.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMULLQ m128 xmm k xmm
|
|
// VPMULLQ m128 xmm xmm
|
|
// VPMULLQ m256 ymm k ymm
|
|
// VPMULLQ m256 ymm ymm
|
|
// VPMULLQ xmm xmm k xmm
|
|
// VPMULLQ xmm xmm xmm
|
|
// VPMULLQ ymm ymm k ymm
|
|
// VPMULLQ ymm ymm ymm
|
|
// VPMULLQ m512 zmm k zmm
|
|
// VPMULLQ m512 zmm zmm
|
|
// VPMULLQ zmm zmm k zmm
|
|
// VPMULLQ zmm zmm zmm
|
|
//
|
|
// Construct and append a VPMULLQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMULLQ(ops ...operand.Op) { ctx.VPMULLQ(ops...) }
|
|
|
|
// VPMULLQ_BCST: Multiply Packed Signed Quadword Integers and Store Low Result (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMULLQ.BCST m64 xmm k xmm
|
|
// VPMULLQ.BCST m64 xmm xmm
|
|
// VPMULLQ.BCST m64 ymm k ymm
|
|
// VPMULLQ.BCST m64 ymm ymm
|
|
// VPMULLQ.BCST m64 zmm k zmm
|
|
// VPMULLQ.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VPMULLQ.BCST instruction to the active function.
|
|
func (c *Context) VPMULLQ_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPMULLQ_BCST(ops...))
|
|
}
|
|
|
|
// VPMULLQ_BCST: Multiply Packed Signed Quadword Integers and Store Low Result (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMULLQ.BCST m64 xmm k xmm
|
|
// VPMULLQ.BCST m64 xmm xmm
|
|
// VPMULLQ.BCST m64 ymm k ymm
|
|
// VPMULLQ.BCST m64 ymm ymm
|
|
// VPMULLQ.BCST m64 zmm k zmm
|
|
// VPMULLQ.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VPMULLQ.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMULLQ_BCST(ops ...operand.Op) { ctx.VPMULLQ_BCST(ops...) }
|
|
|
|
// VPMULLQ_BCST_Z: Multiply Packed Signed Quadword Integers and Store Low Result (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMULLQ.BCST.Z m64 xmm k xmm
|
|
// VPMULLQ.BCST.Z m64 ymm k ymm
|
|
// VPMULLQ.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VPMULLQ.BCST.Z instruction to the active function.
|
|
func (c *Context) VPMULLQ_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPMULLQ_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPMULLQ_BCST_Z: Multiply Packed Signed Quadword Integers and Store Low Result (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMULLQ.BCST.Z m64 xmm k xmm
|
|
// VPMULLQ.BCST.Z m64 ymm k ymm
|
|
// VPMULLQ.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VPMULLQ.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMULLQ_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VPMULLQ_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VPMULLQ_Z: Multiply Packed Signed Quadword Integers and Store Low Result (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMULLQ.Z m128 xmm k xmm
|
|
// VPMULLQ.Z m256 ymm k ymm
|
|
// VPMULLQ.Z xmm xmm k xmm
|
|
// VPMULLQ.Z ymm ymm k ymm
|
|
// VPMULLQ.Z m512 zmm k zmm
|
|
// VPMULLQ.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPMULLQ.Z instruction to the active function.
|
|
func (c *Context) VPMULLQ_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPMULLQ_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPMULLQ_Z: Multiply Packed Signed Quadword Integers and Store Low Result (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMULLQ.Z m128 xmm k xmm
|
|
// VPMULLQ.Z m256 ymm k ymm
|
|
// VPMULLQ.Z xmm xmm k xmm
|
|
// VPMULLQ.Z ymm ymm k ymm
|
|
// VPMULLQ.Z m512 zmm k zmm
|
|
// VPMULLQ.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPMULLQ.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMULLQ_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPMULLQ_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPMULLW: Multiply Packed Signed Word Integers and Store Low Result.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMULLW m256 ymm ymm
|
|
// VPMULLW ymm ymm ymm
|
|
// VPMULLW m128 xmm xmm
|
|
// VPMULLW xmm xmm xmm
|
|
// VPMULLW m128 xmm k xmm
|
|
// VPMULLW m256 ymm k ymm
|
|
// VPMULLW xmm xmm k xmm
|
|
// VPMULLW ymm ymm k ymm
|
|
// VPMULLW m512 zmm k zmm
|
|
// VPMULLW m512 zmm zmm
|
|
// VPMULLW zmm zmm k zmm
|
|
// VPMULLW zmm zmm zmm
|
|
//
|
|
// Construct and append a VPMULLW instruction to the active function.
|
|
func (c *Context) VPMULLW(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPMULLW(ops...))
|
|
}
|
|
|
|
// VPMULLW: Multiply Packed Signed Word Integers and Store Low Result.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMULLW m256 ymm ymm
|
|
// VPMULLW ymm ymm ymm
|
|
// VPMULLW m128 xmm xmm
|
|
// VPMULLW xmm xmm xmm
|
|
// VPMULLW m128 xmm k xmm
|
|
// VPMULLW m256 ymm k ymm
|
|
// VPMULLW xmm xmm k xmm
|
|
// VPMULLW ymm ymm k ymm
|
|
// VPMULLW m512 zmm k zmm
|
|
// VPMULLW m512 zmm zmm
|
|
// VPMULLW zmm zmm k zmm
|
|
// VPMULLW zmm zmm zmm
|
|
//
|
|
// Construct and append a VPMULLW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMULLW(ops ...operand.Op) { ctx.VPMULLW(ops...) }
|
|
|
|
// VPMULLW_Z: Multiply Packed Signed Word Integers and Store Low Result (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMULLW.Z m128 xmm k xmm
|
|
// VPMULLW.Z m256 ymm k ymm
|
|
// VPMULLW.Z xmm xmm k xmm
|
|
// VPMULLW.Z ymm ymm k ymm
|
|
// VPMULLW.Z m512 zmm k zmm
|
|
// VPMULLW.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPMULLW.Z instruction to the active function.
|
|
func (c *Context) VPMULLW_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPMULLW_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPMULLW_Z: Multiply Packed Signed Word Integers and Store Low Result (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMULLW.Z m128 xmm k xmm
|
|
// VPMULLW.Z m256 ymm k ymm
|
|
// VPMULLW.Z xmm xmm k xmm
|
|
// VPMULLW.Z ymm ymm k ymm
|
|
// VPMULLW.Z m512 zmm k zmm
|
|
// VPMULLW.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPMULLW.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMULLW_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPMULLW_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPMULTISHIFTQB: Select Packed Unaligned Bytes from Quadword Sources.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMULTISHIFTQB m128 xmm k xmm
|
|
// VPMULTISHIFTQB m128 xmm xmm
|
|
// VPMULTISHIFTQB m256 ymm k ymm
|
|
// VPMULTISHIFTQB m256 ymm ymm
|
|
// VPMULTISHIFTQB xmm xmm k xmm
|
|
// VPMULTISHIFTQB xmm xmm xmm
|
|
// VPMULTISHIFTQB ymm ymm k ymm
|
|
// VPMULTISHIFTQB ymm ymm ymm
|
|
// VPMULTISHIFTQB m512 zmm k zmm
|
|
// VPMULTISHIFTQB m512 zmm zmm
|
|
// VPMULTISHIFTQB zmm zmm k zmm
|
|
// VPMULTISHIFTQB zmm zmm zmm
|
|
//
|
|
// Construct and append a VPMULTISHIFTQB instruction to the active function.
|
|
func (c *Context) VPMULTISHIFTQB(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPMULTISHIFTQB(ops...))
|
|
}
|
|
|
|
// VPMULTISHIFTQB: Select Packed Unaligned Bytes from Quadword Sources.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMULTISHIFTQB m128 xmm k xmm
|
|
// VPMULTISHIFTQB m128 xmm xmm
|
|
// VPMULTISHIFTQB m256 ymm k ymm
|
|
// VPMULTISHIFTQB m256 ymm ymm
|
|
// VPMULTISHIFTQB xmm xmm k xmm
|
|
// VPMULTISHIFTQB xmm xmm xmm
|
|
// VPMULTISHIFTQB ymm ymm k ymm
|
|
// VPMULTISHIFTQB ymm ymm ymm
|
|
// VPMULTISHIFTQB m512 zmm k zmm
|
|
// VPMULTISHIFTQB m512 zmm zmm
|
|
// VPMULTISHIFTQB zmm zmm k zmm
|
|
// VPMULTISHIFTQB zmm zmm zmm
|
|
//
|
|
// Construct and append a VPMULTISHIFTQB instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMULTISHIFTQB(ops ...operand.Op) { ctx.VPMULTISHIFTQB(ops...) }
|
|
|
|
// VPMULTISHIFTQB_BCST: Select Packed Unaligned Bytes from Quadword Sources (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMULTISHIFTQB.BCST m64 xmm k xmm
|
|
// VPMULTISHIFTQB.BCST m64 xmm xmm
|
|
// VPMULTISHIFTQB.BCST m64 ymm k ymm
|
|
// VPMULTISHIFTQB.BCST m64 ymm ymm
|
|
// VPMULTISHIFTQB.BCST m64 zmm k zmm
|
|
// VPMULTISHIFTQB.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VPMULTISHIFTQB.BCST instruction to the active function.
|
|
func (c *Context) VPMULTISHIFTQB_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPMULTISHIFTQB_BCST(ops...))
|
|
}
|
|
|
|
// VPMULTISHIFTQB_BCST: Select Packed Unaligned Bytes from Quadword Sources (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMULTISHIFTQB.BCST m64 xmm k xmm
|
|
// VPMULTISHIFTQB.BCST m64 xmm xmm
|
|
// VPMULTISHIFTQB.BCST m64 ymm k ymm
|
|
// VPMULTISHIFTQB.BCST m64 ymm ymm
|
|
// VPMULTISHIFTQB.BCST m64 zmm k zmm
|
|
// VPMULTISHIFTQB.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VPMULTISHIFTQB.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMULTISHIFTQB_BCST(ops ...operand.Op) { ctx.VPMULTISHIFTQB_BCST(ops...) }
|
|
|
|
// VPMULTISHIFTQB_BCST_Z: Select Packed Unaligned Bytes from Quadword Sources (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMULTISHIFTQB.BCST.Z m64 xmm k xmm
|
|
// VPMULTISHIFTQB.BCST.Z m64 ymm k ymm
|
|
// VPMULTISHIFTQB.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VPMULTISHIFTQB.BCST.Z instruction to the active function.
|
|
func (c *Context) VPMULTISHIFTQB_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPMULTISHIFTQB_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPMULTISHIFTQB_BCST_Z: Select Packed Unaligned Bytes from Quadword Sources (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMULTISHIFTQB.BCST.Z m64 xmm k xmm
|
|
// VPMULTISHIFTQB.BCST.Z m64 ymm k ymm
|
|
// VPMULTISHIFTQB.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VPMULTISHIFTQB.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMULTISHIFTQB_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VPMULTISHIFTQB_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VPMULTISHIFTQB_Z: Select Packed Unaligned Bytes from Quadword Sources (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMULTISHIFTQB.Z m128 xmm k xmm
|
|
// VPMULTISHIFTQB.Z m256 ymm k ymm
|
|
// VPMULTISHIFTQB.Z xmm xmm k xmm
|
|
// VPMULTISHIFTQB.Z ymm ymm k ymm
|
|
// VPMULTISHIFTQB.Z m512 zmm k zmm
|
|
// VPMULTISHIFTQB.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPMULTISHIFTQB.Z instruction to the active function.
|
|
func (c *Context) VPMULTISHIFTQB_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPMULTISHIFTQB_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPMULTISHIFTQB_Z: Select Packed Unaligned Bytes from Quadword Sources (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMULTISHIFTQB.Z m128 xmm k xmm
|
|
// VPMULTISHIFTQB.Z m256 ymm k ymm
|
|
// VPMULTISHIFTQB.Z xmm xmm k xmm
|
|
// VPMULTISHIFTQB.Z ymm ymm k ymm
|
|
// VPMULTISHIFTQB.Z m512 zmm k zmm
|
|
// VPMULTISHIFTQB.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPMULTISHIFTQB.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMULTISHIFTQB_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPMULTISHIFTQB_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPMULUDQ: Multiply Packed Unsigned Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMULUDQ m256 ymm ymm
|
|
// VPMULUDQ ymm ymm ymm
|
|
// VPMULUDQ m128 xmm xmm
|
|
// VPMULUDQ xmm xmm xmm
|
|
// VPMULUDQ m128 xmm k xmm
|
|
// VPMULUDQ m256 ymm k ymm
|
|
// VPMULUDQ xmm xmm k xmm
|
|
// VPMULUDQ ymm ymm k ymm
|
|
// VPMULUDQ m512 zmm k zmm
|
|
// VPMULUDQ m512 zmm zmm
|
|
// VPMULUDQ zmm zmm k zmm
|
|
// VPMULUDQ zmm zmm zmm
|
|
//
|
|
// Construct and append a VPMULUDQ instruction to the active function.
|
|
func (c *Context) VPMULUDQ(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPMULUDQ(ops...))
|
|
}
|
|
|
|
// VPMULUDQ: Multiply Packed Unsigned Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMULUDQ m256 ymm ymm
|
|
// VPMULUDQ ymm ymm ymm
|
|
// VPMULUDQ m128 xmm xmm
|
|
// VPMULUDQ xmm xmm xmm
|
|
// VPMULUDQ m128 xmm k xmm
|
|
// VPMULUDQ m256 ymm k ymm
|
|
// VPMULUDQ xmm xmm k xmm
|
|
// VPMULUDQ ymm ymm k ymm
|
|
// VPMULUDQ m512 zmm k zmm
|
|
// VPMULUDQ m512 zmm zmm
|
|
// VPMULUDQ zmm zmm k zmm
|
|
// VPMULUDQ zmm zmm zmm
|
|
//
|
|
// Construct and append a VPMULUDQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMULUDQ(ops ...operand.Op) { ctx.VPMULUDQ(ops...) }
|
|
|
|
// VPMULUDQ_BCST: Multiply Packed Unsigned Doubleword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMULUDQ.BCST m64 xmm k xmm
|
|
// VPMULUDQ.BCST m64 xmm xmm
|
|
// VPMULUDQ.BCST m64 ymm k ymm
|
|
// VPMULUDQ.BCST m64 ymm ymm
|
|
// VPMULUDQ.BCST m64 zmm k zmm
|
|
// VPMULUDQ.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VPMULUDQ.BCST instruction to the active function.
|
|
func (c *Context) VPMULUDQ_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPMULUDQ_BCST(ops...))
|
|
}
|
|
|
|
// VPMULUDQ_BCST: Multiply Packed Unsigned Doubleword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMULUDQ.BCST m64 xmm k xmm
|
|
// VPMULUDQ.BCST m64 xmm xmm
|
|
// VPMULUDQ.BCST m64 ymm k ymm
|
|
// VPMULUDQ.BCST m64 ymm ymm
|
|
// VPMULUDQ.BCST m64 zmm k zmm
|
|
// VPMULUDQ.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VPMULUDQ.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMULUDQ_BCST(ops ...operand.Op) { ctx.VPMULUDQ_BCST(ops...) }
|
|
|
|
// VPMULUDQ_BCST_Z: Multiply Packed Unsigned Doubleword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMULUDQ.BCST.Z m64 xmm k xmm
|
|
// VPMULUDQ.BCST.Z m64 ymm k ymm
|
|
// VPMULUDQ.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VPMULUDQ.BCST.Z instruction to the active function.
|
|
func (c *Context) VPMULUDQ_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPMULUDQ_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPMULUDQ_BCST_Z: Multiply Packed Unsigned Doubleword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMULUDQ.BCST.Z m64 xmm k xmm
|
|
// VPMULUDQ.BCST.Z m64 ymm k ymm
|
|
// VPMULUDQ.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VPMULUDQ.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMULUDQ_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VPMULUDQ_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VPMULUDQ_Z: Multiply Packed Unsigned Doubleword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMULUDQ.Z m128 xmm k xmm
|
|
// VPMULUDQ.Z m256 ymm k ymm
|
|
// VPMULUDQ.Z xmm xmm k xmm
|
|
// VPMULUDQ.Z ymm ymm k ymm
|
|
// VPMULUDQ.Z m512 zmm k zmm
|
|
// VPMULUDQ.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPMULUDQ.Z instruction to the active function.
|
|
func (c *Context) VPMULUDQ_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPMULUDQ_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPMULUDQ_Z: Multiply Packed Unsigned Doubleword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPMULUDQ.Z m128 xmm k xmm
|
|
// VPMULUDQ.Z m256 ymm k ymm
|
|
// VPMULUDQ.Z xmm xmm k xmm
|
|
// VPMULUDQ.Z ymm ymm k ymm
|
|
// VPMULUDQ.Z m512 zmm k zmm
|
|
// VPMULUDQ.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPMULUDQ.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPMULUDQ_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPMULUDQ_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPOPCNTD: Packed Population Count for Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPOPCNTD m512 k zmm
|
|
// VPOPCNTD m512 zmm
|
|
// VPOPCNTD zmm k zmm
|
|
// VPOPCNTD zmm zmm
|
|
//
|
|
// Construct and append a VPOPCNTD instruction to the active function.
|
|
func (c *Context) VPOPCNTD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPOPCNTD(ops...))
|
|
}
|
|
|
|
// VPOPCNTD: Packed Population Count for Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPOPCNTD m512 k zmm
|
|
// VPOPCNTD m512 zmm
|
|
// VPOPCNTD zmm k zmm
|
|
// VPOPCNTD zmm zmm
|
|
//
|
|
// Construct and append a VPOPCNTD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPOPCNTD(ops ...operand.Op) { ctx.VPOPCNTD(ops...) }
|
|
|
|
// VPOPCNTD_BCST: Packed Population Count for Doubleword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPOPCNTD.BCST m32 k zmm
|
|
// VPOPCNTD.BCST m32 zmm
|
|
//
|
|
// Construct and append a VPOPCNTD.BCST instruction to the active function.
|
|
func (c *Context) VPOPCNTD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPOPCNTD_BCST(ops...))
|
|
}
|
|
|
|
// VPOPCNTD_BCST: Packed Population Count for Doubleword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPOPCNTD.BCST m32 k zmm
|
|
// VPOPCNTD.BCST m32 zmm
|
|
//
|
|
// Construct and append a VPOPCNTD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPOPCNTD_BCST(ops ...operand.Op) { ctx.VPOPCNTD_BCST(ops...) }
|
|
|
|
// VPOPCNTD_BCST_Z: Packed Population Count for Doubleword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPOPCNTD.BCST.Z m32 k zmm
|
|
//
|
|
// Construct and append a VPOPCNTD.BCST.Z instruction to the active function.
|
|
func (c *Context) VPOPCNTD_BCST_Z(m, k, z operand.Op) {
|
|
c.addinstruction(x86.VPOPCNTD_BCST_Z(m, k, z))
|
|
}
|
|
|
|
// VPOPCNTD_BCST_Z: Packed Population Count for Doubleword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPOPCNTD.BCST.Z m32 k zmm
|
|
//
|
|
// Construct and append a VPOPCNTD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPOPCNTD_BCST_Z(m, k, z operand.Op) { ctx.VPOPCNTD_BCST_Z(m, k, z) }
|
|
|
|
// VPOPCNTD_Z: Packed Population Count for Doubleword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPOPCNTD.Z m512 k zmm
|
|
// VPOPCNTD.Z zmm k zmm
|
|
//
|
|
// Construct and append a VPOPCNTD.Z instruction to the active function.
|
|
func (c *Context) VPOPCNTD_Z(mz, k, z operand.Op) {
|
|
c.addinstruction(x86.VPOPCNTD_Z(mz, k, z))
|
|
}
|
|
|
|
// VPOPCNTD_Z: Packed Population Count for Doubleword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPOPCNTD.Z m512 k zmm
|
|
// VPOPCNTD.Z zmm k zmm
|
|
//
|
|
// Construct and append a VPOPCNTD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPOPCNTD_Z(mz, k, z operand.Op) { ctx.VPOPCNTD_Z(mz, k, z) }
|
|
|
|
// VPOPCNTQ: Packed Population Count for Quadword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPOPCNTQ m512 k zmm
|
|
// VPOPCNTQ m512 zmm
|
|
// VPOPCNTQ zmm k zmm
|
|
// VPOPCNTQ zmm zmm
|
|
//
|
|
// Construct and append a VPOPCNTQ instruction to the active function.
|
|
func (c *Context) VPOPCNTQ(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPOPCNTQ(ops...))
|
|
}
|
|
|
|
// VPOPCNTQ: Packed Population Count for Quadword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPOPCNTQ m512 k zmm
|
|
// VPOPCNTQ m512 zmm
|
|
// VPOPCNTQ zmm k zmm
|
|
// VPOPCNTQ zmm zmm
|
|
//
|
|
// Construct and append a VPOPCNTQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPOPCNTQ(ops ...operand.Op) { ctx.VPOPCNTQ(ops...) }
|
|
|
|
// VPOPCNTQ_BCST: Packed Population Count for Quadword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPOPCNTQ.BCST m64 k zmm
|
|
// VPOPCNTQ.BCST m64 zmm
|
|
//
|
|
// Construct and append a VPOPCNTQ.BCST instruction to the active function.
|
|
func (c *Context) VPOPCNTQ_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPOPCNTQ_BCST(ops...))
|
|
}
|
|
|
|
// VPOPCNTQ_BCST: Packed Population Count for Quadword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPOPCNTQ.BCST m64 k zmm
|
|
// VPOPCNTQ.BCST m64 zmm
|
|
//
|
|
// Construct and append a VPOPCNTQ.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPOPCNTQ_BCST(ops ...operand.Op) { ctx.VPOPCNTQ_BCST(ops...) }
|
|
|
|
// VPOPCNTQ_BCST_Z: Packed Population Count for Quadword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPOPCNTQ.BCST.Z m64 k zmm
|
|
//
|
|
// Construct and append a VPOPCNTQ.BCST.Z instruction to the active function.
|
|
func (c *Context) VPOPCNTQ_BCST_Z(m, k, z operand.Op) {
|
|
c.addinstruction(x86.VPOPCNTQ_BCST_Z(m, k, z))
|
|
}
|
|
|
|
// VPOPCNTQ_BCST_Z: Packed Population Count for Quadword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPOPCNTQ.BCST.Z m64 k zmm
|
|
//
|
|
// Construct and append a VPOPCNTQ.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPOPCNTQ_BCST_Z(m, k, z operand.Op) { ctx.VPOPCNTQ_BCST_Z(m, k, z) }
|
|
|
|
// VPOPCNTQ_Z: Packed Population Count for Quadword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPOPCNTQ.Z m512 k zmm
|
|
// VPOPCNTQ.Z zmm k zmm
|
|
//
|
|
// Construct and append a VPOPCNTQ.Z instruction to the active function.
|
|
func (c *Context) VPOPCNTQ_Z(mz, k, z operand.Op) {
|
|
c.addinstruction(x86.VPOPCNTQ_Z(mz, k, z))
|
|
}
|
|
|
|
// VPOPCNTQ_Z: Packed Population Count for Quadword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPOPCNTQ.Z m512 k zmm
|
|
// VPOPCNTQ.Z zmm k zmm
|
|
//
|
|
// Construct and append a VPOPCNTQ.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPOPCNTQ_Z(mz, k, z operand.Op) { ctx.VPOPCNTQ_Z(mz, k, z) }
|
|
|
|
// VPOR: Packed Bitwise Logical OR.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPOR m256 ymm ymm
|
|
// VPOR ymm ymm ymm
|
|
// VPOR m128 xmm xmm
|
|
// VPOR xmm xmm xmm
|
|
//
|
|
// Construct and append a VPOR instruction to the active function.
|
|
func (c *Context) VPOR(mxy, xy, xy1 operand.Op) {
|
|
c.addinstruction(x86.VPOR(mxy, xy, xy1))
|
|
}
|
|
|
|
// VPOR: Packed Bitwise Logical OR.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPOR m256 ymm ymm
|
|
// VPOR ymm ymm ymm
|
|
// VPOR m128 xmm xmm
|
|
// VPOR xmm xmm xmm
|
|
//
|
|
// Construct and append a VPOR instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPOR(mxy, xy, xy1 operand.Op) { ctx.VPOR(mxy, xy, xy1) }
|
|
|
|
// VPORD: Bitwise Logical OR of Packed Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPORD m128 xmm k xmm
|
|
// VPORD m128 xmm xmm
|
|
// VPORD m256 ymm k ymm
|
|
// VPORD m256 ymm ymm
|
|
// VPORD xmm xmm k xmm
|
|
// VPORD xmm xmm xmm
|
|
// VPORD ymm ymm k ymm
|
|
// VPORD ymm ymm ymm
|
|
// VPORD m512 zmm k zmm
|
|
// VPORD m512 zmm zmm
|
|
// VPORD zmm zmm k zmm
|
|
// VPORD zmm zmm zmm
|
|
//
|
|
// Construct and append a VPORD instruction to the active function.
|
|
func (c *Context) VPORD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPORD(ops...))
|
|
}
|
|
|
|
// VPORD: Bitwise Logical OR of Packed Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPORD m128 xmm k xmm
|
|
// VPORD m128 xmm xmm
|
|
// VPORD m256 ymm k ymm
|
|
// VPORD m256 ymm ymm
|
|
// VPORD xmm xmm k xmm
|
|
// VPORD xmm xmm xmm
|
|
// VPORD ymm ymm k ymm
|
|
// VPORD ymm ymm ymm
|
|
// VPORD m512 zmm k zmm
|
|
// VPORD m512 zmm zmm
|
|
// VPORD zmm zmm k zmm
|
|
// VPORD zmm zmm zmm
|
|
//
|
|
// Construct and append a VPORD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPORD(ops ...operand.Op) { ctx.VPORD(ops...) }
|
|
|
|
// VPORD_BCST: Bitwise Logical OR of Packed Doubleword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPORD.BCST m32 xmm k xmm
|
|
// VPORD.BCST m32 xmm xmm
|
|
// VPORD.BCST m32 ymm k ymm
|
|
// VPORD.BCST m32 ymm ymm
|
|
// VPORD.BCST m32 zmm k zmm
|
|
// VPORD.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VPORD.BCST instruction to the active function.
|
|
func (c *Context) VPORD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPORD_BCST(ops...))
|
|
}
|
|
|
|
// VPORD_BCST: Bitwise Logical OR of Packed Doubleword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPORD.BCST m32 xmm k xmm
|
|
// VPORD.BCST m32 xmm xmm
|
|
// VPORD.BCST m32 ymm k ymm
|
|
// VPORD.BCST m32 ymm ymm
|
|
// VPORD.BCST m32 zmm k zmm
|
|
// VPORD.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VPORD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPORD_BCST(ops ...operand.Op) { ctx.VPORD_BCST(ops...) }
|
|
|
|
// VPORD_BCST_Z: Bitwise Logical OR of Packed Doubleword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPORD.BCST.Z m32 xmm k xmm
|
|
// VPORD.BCST.Z m32 ymm k ymm
|
|
// VPORD.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VPORD.BCST.Z instruction to the active function.
|
|
func (c *Context) VPORD_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPORD_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPORD_BCST_Z: Bitwise Logical OR of Packed Doubleword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPORD.BCST.Z m32 xmm k xmm
|
|
// VPORD.BCST.Z m32 ymm k ymm
|
|
// VPORD.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VPORD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPORD_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VPORD_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VPORD_Z: Bitwise Logical OR of Packed Doubleword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPORD.Z m128 xmm k xmm
|
|
// VPORD.Z m256 ymm k ymm
|
|
// VPORD.Z xmm xmm k xmm
|
|
// VPORD.Z ymm ymm k ymm
|
|
// VPORD.Z m512 zmm k zmm
|
|
// VPORD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPORD.Z instruction to the active function.
|
|
func (c *Context) VPORD_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPORD_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPORD_Z: Bitwise Logical OR of Packed Doubleword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPORD.Z m128 xmm k xmm
|
|
// VPORD.Z m256 ymm k ymm
|
|
// VPORD.Z xmm xmm k xmm
|
|
// VPORD.Z ymm ymm k ymm
|
|
// VPORD.Z m512 zmm k zmm
|
|
// VPORD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPORD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPORD_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPORD_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPORQ: Bitwise Logical OR of Packed Quadword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPORQ m128 xmm k xmm
|
|
// VPORQ m128 xmm xmm
|
|
// VPORQ m256 ymm k ymm
|
|
// VPORQ m256 ymm ymm
|
|
// VPORQ xmm xmm k xmm
|
|
// VPORQ xmm xmm xmm
|
|
// VPORQ ymm ymm k ymm
|
|
// VPORQ ymm ymm ymm
|
|
// VPORQ m512 zmm k zmm
|
|
// VPORQ m512 zmm zmm
|
|
// VPORQ zmm zmm k zmm
|
|
// VPORQ zmm zmm zmm
|
|
//
|
|
// Construct and append a VPORQ instruction to the active function.
|
|
func (c *Context) VPORQ(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPORQ(ops...))
|
|
}
|
|
|
|
// VPORQ: Bitwise Logical OR of Packed Quadword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPORQ m128 xmm k xmm
|
|
// VPORQ m128 xmm xmm
|
|
// VPORQ m256 ymm k ymm
|
|
// VPORQ m256 ymm ymm
|
|
// VPORQ xmm xmm k xmm
|
|
// VPORQ xmm xmm xmm
|
|
// VPORQ ymm ymm k ymm
|
|
// VPORQ ymm ymm ymm
|
|
// VPORQ m512 zmm k zmm
|
|
// VPORQ m512 zmm zmm
|
|
// VPORQ zmm zmm k zmm
|
|
// VPORQ zmm zmm zmm
|
|
//
|
|
// Construct and append a VPORQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPORQ(ops ...operand.Op) { ctx.VPORQ(ops...) }
|
|
|
|
// VPORQ_BCST: Bitwise Logical OR of Packed Quadword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPORQ.BCST m64 xmm k xmm
|
|
// VPORQ.BCST m64 xmm xmm
|
|
// VPORQ.BCST m64 ymm k ymm
|
|
// VPORQ.BCST m64 ymm ymm
|
|
// VPORQ.BCST m64 zmm k zmm
|
|
// VPORQ.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VPORQ.BCST instruction to the active function.
|
|
func (c *Context) VPORQ_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPORQ_BCST(ops...))
|
|
}
|
|
|
|
// VPORQ_BCST: Bitwise Logical OR of Packed Quadword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPORQ.BCST m64 xmm k xmm
|
|
// VPORQ.BCST m64 xmm xmm
|
|
// VPORQ.BCST m64 ymm k ymm
|
|
// VPORQ.BCST m64 ymm ymm
|
|
// VPORQ.BCST m64 zmm k zmm
|
|
// VPORQ.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VPORQ.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPORQ_BCST(ops ...operand.Op) { ctx.VPORQ_BCST(ops...) }
|
|
|
|
// VPORQ_BCST_Z: Bitwise Logical OR of Packed Quadword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPORQ.BCST.Z m64 xmm k xmm
|
|
// VPORQ.BCST.Z m64 ymm k ymm
|
|
// VPORQ.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VPORQ.BCST.Z instruction to the active function.
|
|
func (c *Context) VPORQ_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPORQ_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPORQ_BCST_Z: Bitwise Logical OR of Packed Quadword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPORQ.BCST.Z m64 xmm k xmm
|
|
// VPORQ.BCST.Z m64 ymm k ymm
|
|
// VPORQ.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VPORQ.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPORQ_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VPORQ_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VPORQ_Z: Bitwise Logical OR of Packed Quadword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPORQ.Z m128 xmm k xmm
|
|
// VPORQ.Z m256 ymm k ymm
|
|
// VPORQ.Z xmm xmm k xmm
|
|
// VPORQ.Z ymm ymm k ymm
|
|
// VPORQ.Z m512 zmm k zmm
|
|
// VPORQ.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPORQ.Z instruction to the active function.
|
|
func (c *Context) VPORQ_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPORQ_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPORQ_Z: Bitwise Logical OR of Packed Quadword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPORQ.Z m128 xmm k xmm
|
|
// VPORQ.Z m256 ymm k ymm
|
|
// VPORQ.Z xmm xmm k xmm
|
|
// VPORQ.Z ymm ymm k ymm
|
|
// VPORQ.Z m512 zmm k zmm
|
|
// VPORQ.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPORQ.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPORQ_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPORQ_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPROLD: Rotate Packed Doubleword Left.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPROLD imm8 m128 k xmm
|
|
// VPROLD imm8 m128 xmm
|
|
// VPROLD imm8 m256 k ymm
|
|
// VPROLD imm8 m256 ymm
|
|
// VPROLD imm8 xmm k xmm
|
|
// VPROLD imm8 xmm xmm
|
|
// VPROLD imm8 ymm k ymm
|
|
// VPROLD imm8 ymm ymm
|
|
// VPROLD imm8 m512 k zmm
|
|
// VPROLD imm8 m512 zmm
|
|
// VPROLD imm8 zmm k zmm
|
|
// VPROLD imm8 zmm zmm
|
|
//
|
|
// Construct and append a VPROLD instruction to the active function.
|
|
func (c *Context) VPROLD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPROLD(ops...))
|
|
}
|
|
|
|
// VPROLD: Rotate Packed Doubleword Left.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPROLD imm8 m128 k xmm
|
|
// VPROLD imm8 m128 xmm
|
|
// VPROLD imm8 m256 k ymm
|
|
// VPROLD imm8 m256 ymm
|
|
// VPROLD imm8 xmm k xmm
|
|
// VPROLD imm8 xmm xmm
|
|
// VPROLD imm8 ymm k ymm
|
|
// VPROLD imm8 ymm ymm
|
|
// VPROLD imm8 m512 k zmm
|
|
// VPROLD imm8 m512 zmm
|
|
// VPROLD imm8 zmm k zmm
|
|
// VPROLD imm8 zmm zmm
|
|
//
|
|
// Construct and append a VPROLD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPROLD(ops ...operand.Op) { ctx.VPROLD(ops...) }
|
|
|
|
// VPROLD_BCST: Rotate Packed Doubleword Left (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPROLD.BCST imm8 m32 k xmm
|
|
// VPROLD.BCST imm8 m32 k ymm
|
|
// VPROLD.BCST imm8 m32 xmm
|
|
// VPROLD.BCST imm8 m32 ymm
|
|
// VPROLD.BCST imm8 m32 k zmm
|
|
// VPROLD.BCST imm8 m32 zmm
|
|
//
|
|
// Construct and append a VPROLD.BCST instruction to the active function.
|
|
func (c *Context) VPROLD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPROLD_BCST(ops...))
|
|
}
|
|
|
|
// VPROLD_BCST: Rotate Packed Doubleword Left (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPROLD.BCST imm8 m32 k xmm
|
|
// VPROLD.BCST imm8 m32 k ymm
|
|
// VPROLD.BCST imm8 m32 xmm
|
|
// VPROLD.BCST imm8 m32 ymm
|
|
// VPROLD.BCST imm8 m32 k zmm
|
|
// VPROLD.BCST imm8 m32 zmm
|
|
//
|
|
// Construct and append a VPROLD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPROLD_BCST(ops ...operand.Op) { ctx.VPROLD_BCST(ops...) }
|
|
|
|
// VPROLD_BCST_Z: Rotate Packed Doubleword Left (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPROLD.BCST.Z imm8 m32 k xmm
|
|
// VPROLD.BCST.Z imm8 m32 k ymm
|
|
// VPROLD.BCST.Z imm8 m32 k zmm
|
|
//
|
|
// Construct and append a VPROLD.BCST.Z instruction to the active function.
|
|
func (c *Context) VPROLD_BCST_Z(i, m, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VPROLD_BCST_Z(i, m, k, xyz))
|
|
}
|
|
|
|
// VPROLD_BCST_Z: Rotate Packed Doubleword Left (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPROLD.BCST.Z imm8 m32 k xmm
|
|
// VPROLD.BCST.Z imm8 m32 k ymm
|
|
// VPROLD.BCST.Z imm8 m32 k zmm
|
|
//
|
|
// Construct and append a VPROLD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPROLD_BCST_Z(i, m, k, xyz operand.Op) { ctx.VPROLD_BCST_Z(i, m, k, xyz) }
|
|
|
|
// VPROLD_Z: Rotate Packed Doubleword Left (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPROLD.Z imm8 m128 k xmm
|
|
// VPROLD.Z imm8 m256 k ymm
|
|
// VPROLD.Z imm8 xmm k xmm
|
|
// VPROLD.Z imm8 ymm k ymm
|
|
// VPROLD.Z imm8 m512 k zmm
|
|
// VPROLD.Z imm8 zmm k zmm
|
|
//
|
|
// Construct and append a VPROLD.Z instruction to the active function.
|
|
func (c *Context) VPROLD_Z(i, mxyz, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VPROLD_Z(i, mxyz, k, xyz))
|
|
}
|
|
|
|
// VPROLD_Z: Rotate Packed Doubleword Left (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPROLD.Z imm8 m128 k xmm
|
|
// VPROLD.Z imm8 m256 k ymm
|
|
// VPROLD.Z imm8 xmm k xmm
|
|
// VPROLD.Z imm8 ymm k ymm
|
|
// VPROLD.Z imm8 m512 k zmm
|
|
// VPROLD.Z imm8 zmm k zmm
|
|
//
|
|
// Construct and append a VPROLD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPROLD_Z(i, mxyz, k, xyz operand.Op) { ctx.VPROLD_Z(i, mxyz, k, xyz) }
|
|
|
|
// VPROLQ: Rotate Packed Quadword Left.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPROLQ imm8 m128 k xmm
|
|
// VPROLQ imm8 m128 xmm
|
|
// VPROLQ imm8 m256 k ymm
|
|
// VPROLQ imm8 m256 ymm
|
|
// VPROLQ imm8 xmm k xmm
|
|
// VPROLQ imm8 xmm xmm
|
|
// VPROLQ imm8 ymm k ymm
|
|
// VPROLQ imm8 ymm ymm
|
|
// VPROLQ imm8 m512 k zmm
|
|
// VPROLQ imm8 m512 zmm
|
|
// VPROLQ imm8 zmm k zmm
|
|
// VPROLQ imm8 zmm zmm
|
|
//
|
|
// Construct and append a VPROLQ instruction to the active function.
|
|
func (c *Context) VPROLQ(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPROLQ(ops...))
|
|
}
|
|
|
|
// VPROLQ: Rotate Packed Quadword Left.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPROLQ imm8 m128 k xmm
|
|
// VPROLQ imm8 m128 xmm
|
|
// VPROLQ imm8 m256 k ymm
|
|
// VPROLQ imm8 m256 ymm
|
|
// VPROLQ imm8 xmm k xmm
|
|
// VPROLQ imm8 xmm xmm
|
|
// VPROLQ imm8 ymm k ymm
|
|
// VPROLQ imm8 ymm ymm
|
|
// VPROLQ imm8 m512 k zmm
|
|
// VPROLQ imm8 m512 zmm
|
|
// VPROLQ imm8 zmm k zmm
|
|
// VPROLQ imm8 zmm zmm
|
|
//
|
|
// Construct and append a VPROLQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPROLQ(ops ...operand.Op) { ctx.VPROLQ(ops...) }
|
|
|
|
// VPROLQ_BCST: Rotate Packed Quadword Left (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPROLQ.BCST imm8 m64 k xmm
|
|
// VPROLQ.BCST imm8 m64 k ymm
|
|
// VPROLQ.BCST imm8 m64 xmm
|
|
// VPROLQ.BCST imm8 m64 ymm
|
|
// VPROLQ.BCST imm8 m64 k zmm
|
|
// VPROLQ.BCST imm8 m64 zmm
|
|
//
|
|
// Construct and append a VPROLQ.BCST instruction to the active function.
|
|
func (c *Context) VPROLQ_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPROLQ_BCST(ops...))
|
|
}
|
|
|
|
// VPROLQ_BCST: Rotate Packed Quadword Left (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPROLQ.BCST imm8 m64 k xmm
|
|
// VPROLQ.BCST imm8 m64 k ymm
|
|
// VPROLQ.BCST imm8 m64 xmm
|
|
// VPROLQ.BCST imm8 m64 ymm
|
|
// VPROLQ.BCST imm8 m64 k zmm
|
|
// VPROLQ.BCST imm8 m64 zmm
|
|
//
|
|
// Construct and append a VPROLQ.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPROLQ_BCST(ops ...operand.Op) { ctx.VPROLQ_BCST(ops...) }
|
|
|
|
// VPROLQ_BCST_Z: Rotate Packed Quadword Left (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPROLQ.BCST.Z imm8 m64 k xmm
|
|
// VPROLQ.BCST.Z imm8 m64 k ymm
|
|
// VPROLQ.BCST.Z imm8 m64 k zmm
|
|
//
|
|
// Construct and append a VPROLQ.BCST.Z instruction to the active function.
|
|
func (c *Context) VPROLQ_BCST_Z(i, m, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VPROLQ_BCST_Z(i, m, k, xyz))
|
|
}
|
|
|
|
// VPROLQ_BCST_Z: Rotate Packed Quadword Left (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPROLQ.BCST.Z imm8 m64 k xmm
|
|
// VPROLQ.BCST.Z imm8 m64 k ymm
|
|
// VPROLQ.BCST.Z imm8 m64 k zmm
|
|
//
|
|
// Construct and append a VPROLQ.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPROLQ_BCST_Z(i, m, k, xyz operand.Op) { ctx.VPROLQ_BCST_Z(i, m, k, xyz) }
|
|
|
|
// VPROLQ_Z: Rotate Packed Quadword Left (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPROLQ.Z imm8 m128 k xmm
|
|
// VPROLQ.Z imm8 m256 k ymm
|
|
// VPROLQ.Z imm8 xmm k xmm
|
|
// VPROLQ.Z imm8 ymm k ymm
|
|
// VPROLQ.Z imm8 m512 k zmm
|
|
// VPROLQ.Z imm8 zmm k zmm
|
|
//
|
|
// Construct and append a VPROLQ.Z instruction to the active function.
|
|
func (c *Context) VPROLQ_Z(i, mxyz, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VPROLQ_Z(i, mxyz, k, xyz))
|
|
}
|
|
|
|
// VPROLQ_Z: Rotate Packed Quadword Left (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPROLQ.Z imm8 m128 k xmm
|
|
// VPROLQ.Z imm8 m256 k ymm
|
|
// VPROLQ.Z imm8 xmm k xmm
|
|
// VPROLQ.Z imm8 ymm k ymm
|
|
// VPROLQ.Z imm8 m512 k zmm
|
|
// VPROLQ.Z imm8 zmm k zmm
|
|
//
|
|
// Construct and append a VPROLQ.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPROLQ_Z(i, mxyz, k, xyz operand.Op) { ctx.VPROLQ_Z(i, mxyz, k, xyz) }
|
|
|
|
// VPROLVD: Variable Rotate Packed Doubleword Left.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPROLVD m128 xmm k xmm
|
|
// VPROLVD m128 xmm xmm
|
|
// VPROLVD m256 ymm k ymm
|
|
// VPROLVD m256 ymm ymm
|
|
// VPROLVD xmm xmm k xmm
|
|
// VPROLVD xmm xmm xmm
|
|
// VPROLVD ymm ymm k ymm
|
|
// VPROLVD ymm ymm ymm
|
|
// VPROLVD m512 zmm k zmm
|
|
// VPROLVD m512 zmm zmm
|
|
// VPROLVD zmm zmm k zmm
|
|
// VPROLVD zmm zmm zmm
|
|
//
|
|
// Construct and append a VPROLVD instruction to the active function.
|
|
func (c *Context) VPROLVD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPROLVD(ops...))
|
|
}
|
|
|
|
// VPROLVD: Variable Rotate Packed Doubleword Left.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPROLVD m128 xmm k xmm
|
|
// VPROLVD m128 xmm xmm
|
|
// VPROLVD m256 ymm k ymm
|
|
// VPROLVD m256 ymm ymm
|
|
// VPROLVD xmm xmm k xmm
|
|
// VPROLVD xmm xmm xmm
|
|
// VPROLVD ymm ymm k ymm
|
|
// VPROLVD ymm ymm ymm
|
|
// VPROLVD m512 zmm k zmm
|
|
// VPROLVD m512 zmm zmm
|
|
// VPROLVD zmm zmm k zmm
|
|
// VPROLVD zmm zmm zmm
|
|
//
|
|
// Construct and append a VPROLVD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPROLVD(ops ...operand.Op) { ctx.VPROLVD(ops...) }
|
|
|
|
// VPROLVD_BCST: Variable Rotate Packed Doubleword Left (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPROLVD.BCST m32 xmm k xmm
|
|
// VPROLVD.BCST m32 xmm xmm
|
|
// VPROLVD.BCST m32 ymm k ymm
|
|
// VPROLVD.BCST m32 ymm ymm
|
|
// VPROLVD.BCST m32 zmm k zmm
|
|
// VPROLVD.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VPROLVD.BCST instruction to the active function.
|
|
func (c *Context) VPROLVD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPROLVD_BCST(ops...))
|
|
}
|
|
|
|
// VPROLVD_BCST: Variable Rotate Packed Doubleword Left (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPROLVD.BCST m32 xmm k xmm
|
|
// VPROLVD.BCST m32 xmm xmm
|
|
// VPROLVD.BCST m32 ymm k ymm
|
|
// VPROLVD.BCST m32 ymm ymm
|
|
// VPROLVD.BCST m32 zmm k zmm
|
|
// VPROLVD.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VPROLVD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPROLVD_BCST(ops ...operand.Op) { ctx.VPROLVD_BCST(ops...) }
|
|
|
|
// VPROLVD_BCST_Z: Variable Rotate Packed Doubleword Left (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPROLVD.BCST.Z m32 xmm k xmm
|
|
// VPROLVD.BCST.Z m32 ymm k ymm
|
|
// VPROLVD.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VPROLVD.BCST.Z instruction to the active function.
|
|
func (c *Context) VPROLVD_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPROLVD_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPROLVD_BCST_Z: Variable Rotate Packed Doubleword Left (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPROLVD.BCST.Z m32 xmm k xmm
|
|
// VPROLVD.BCST.Z m32 ymm k ymm
|
|
// VPROLVD.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VPROLVD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPROLVD_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VPROLVD_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VPROLVD_Z: Variable Rotate Packed Doubleword Left (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPROLVD.Z m128 xmm k xmm
|
|
// VPROLVD.Z m256 ymm k ymm
|
|
// VPROLVD.Z xmm xmm k xmm
|
|
// VPROLVD.Z ymm ymm k ymm
|
|
// VPROLVD.Z m512 zmm k zmm
|
|
// VPROLVD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPROLVD.Z instruction to the active function.
|
|
func (c *Context) VPROLVD_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPROLVD_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPROLVD_Z: Variable Rotate Packed Doubleword Left (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPROLVD.Z m128 xmm k xmm
|
|
// VPROLVD.Z m256 ymm k ymm
|
|
// VPROLVD.Z xmm xmm k xmm
|
|
// VPROLVD.Z ymm ymm k ymm
|
|
// VPROLVD.Z m512 zmm k zmm
|
|
// VPROLVD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPROLVD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPROLVD_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPROLVD_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPROLVQ: Variable Rotate Packed Quadword Left.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPROLVQ m128 xmm k xmm
|
|
// VPROLVQ m128 xmm xmm
|
|
// VPROLVQ m256 ymm k ymm
|
|
// VPROLVQ m256 ymm ymm
|
|
// VPROLVQ xmm xmm k xmm
|
|
// VPROLVQ xmm xmm xmm
|
|
// VPROLVQ ymm ymm k ymm
|
|
// VPROLVQ ymm ymm ymm
|
|
// VPROLVQ m512 zmm k zmm
|
|
// VPROLVQ m512 zmm zmm
|
|
// VPROLVQ zmm zmm k zmm
|
|
// VPROLVQ zmm zmm zmm
|
|
//
|
|
// Construct and append a VPROLVQ instruction to the active function.
|
|
func (c *Context) VPROLVQ(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPROLVQ(ops...))
|
|
}
|
|
|
|
// VPROLVQ: Variable Rotate Packed Quadword Left.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPROLVQ m128 xmm k xmm
|
|
// VPROLVQ m128 xmm xmm
|
|
// VPROLVQ m256 ymm k ymm
|
|
// VPROLVQ m256 ymm ymm
|
|
// VPROLVQ xmm xmm k xmm
|
|
// VPROLVQ xmm xmm xmm
|
|
// VPROLVQ ymm ymm k ymm
|
|
// VPROLVQ ymm ymm ymm
|
|
// VPROLVQ m512 zmm k zmm
|
|
// VPROLVQ m512 zmm zmm
|
|
// VPROLVQ zmm zmm k zmm
|
|
// VPROLVQ zmm zmm zmm
|
|
//
|
|
// Construct and append a VPROLVQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPROLVQ(ops ...operand.Op) { ctx.VPROLVQ(ops...) }
|
|
|
|
// VPROLVQ_BCST: Variable Rotate Packed Quadword Left (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPROLVQ.BCST m64 xmm k xmm
|
|
// VPROLVQ.BCST m64 xmm xmm
|
|
// VPROLVQ.BCST m64 ymm k ymm
|
|
// VPROLVQ.BCST m64 ymm ymm
|
|
// VPROLVQ.BCST m64 zmm k zmm
|
|
// VPROLVQ.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VPROLVQ.BCST instruction to the active function.
|
|
func (c *Context) VPROLVQ_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPROLVQ_BCST(ops...))
|
|
}
|
|
|
|
// VPROLVQ_BCST: Variable Rotate Packed Quadword Left (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPROLVQ.BCST m64 xmm k xmm
|
|
// VPROLVQ.BCST m64 xmm xmm
|
|
// VPROLVQ.BCST m64 ymm k ymm
|
|
// VPROLVQ.BCST m64 ymm ymm
|
|
// VPROLVQ.BCST m64 zmm k zmm
|
|
// VPROLVQ.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VPROLVQ.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPROLVQ_BCST(ops ...operand.Op) { ctx.VPROLVQ_BCST(ops...) }
|
|
|
|
// VPROLVQ_BCST_Z: Variable Rotate Packed Quadword Left (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPROLVQ.BCST.Z m64 xmm k xmm
|
|
// VPROLVQ.BCST.Z m64 ymm k ymm
|
|
// VPROLVQ.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VPROLVQ.BCST.Z instruction to the active function.
|
|
func (c *Context) VPROLVQ_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPROLVQ_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPROLVQ_BCST_Z: Variable Rotate Packed Quadword Left (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPROLVQ.BCST.Z m64 xmm k xmm
|
|
// VPROLVQ.BCST.Z m64 ymm k ymm
|
|
// VPROLVQ.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VPROLVQ.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPROLVQ_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VPROLVQ_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VPROLVQ_Z: Variable Rotate Packed Quadword Left (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPROLVQ.Z m128 xmm k xmm
|
|
// VPROLVQ.Z m256 ymm k ymm
|
|
// VPROLVQ.Z xmm xmm k xmm
|
|
// VPROLVQ.Z ymm ymm k ymm
|
|
// VPROLVQ.Z m512 zmm k zmm
|
|
// VPROLVQ.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPROLVQ.Z instruction to the active function.
|
|
func (c *Context) VPROLVQ_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPROLVQ_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPROLVQ_Z: Variable Rotate Packed Quadword Left (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPROLVQ.Z m128 xmm k xmm
|
|
// VPROLVQ.Z m256 ymm k ymm
|
|
// VPROLVQ.Z xmm xmm k xmm
|
|
// VPROLVQ.Z ymm ymm k ymm
|
|
// VPROLVQ.Z m512 zmm k zmm
|
|
// VPROLVQ.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPROLVQ.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPROLVQ_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPROLVQ_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPRORD: Rotate Packed Doubleword Right.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPRORD imm8 m128 k xmm
|
|
// VPRORD imm8 m128 xmm
|
|
// VPRORD imm8 m256 k ymm
|
|
// VPRORD imm8 m256 ymm
|
|
// VPRORD imm8 xmm k xmm
|
|
// VPRORD imm8 xmm xmm
|
|
// VPRORD imm8 ymm k ymm
|
|
// VPRORD imm8 ymm ymm
|
|
// VPRORD imm8 m512 k zmm
|
|
// VPRORD imm8 m512 zmm
|
|
// VPRORD imm8 zmm k zmm
|
|
// VPRORD imm8 zmm zmm
|
|
//
|
|
// Construct and append a VPRORD instruction to the active function.
|
|
func (c *Context) VPRORD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPRORD(ops...))
|
|
}
|
|
|
|
// VPRORD: Rotate Packed Doubleword Right.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPRORD imm8 m128 k xmm
|
|
// VPRORD imm8 m128 xmm
|
|
// VPRORD imm8 m256 k ymm
|
|
// VPRORD imm8 m256 ymm
|
|
// VPRORD imm8 xmm k xmm
|
|
// VPRORD imm8 xmm xmm
|
|
// VPRORD imm8 ymm k ymm
|
|
// VPRORD imm8 ymm ymm
|
|
// VPRORD imm8 m512 k zmm
|
|
// VPRORD imm8 m512 zmm
|
|
// VPRORD imm8 zmm k zmm
|
|
// VPRORD imm8 zmm zmm
|
|
//
|
|
// Construct and append a VPRORD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPRORD(ops ...operand.Op) { ctx.VPRORD(ops...) }
|
|
|
|
// VPRORD_BCST: Rotate Packed Doubleword Right (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPRORD.BCST imm8 m32 k xmm
|
|
// VPRORD.BCST imm8 m32 k ymm
|
|
// VPRORD.BCST imm8 m32 xmm
|
|
// VPRORD.BCST imm8 m32 ymm
|
|
// VPRORD.BCST imm8 m32 k zmm
|
|
// VPRORD.BCST imm8 m32 zmm
|
|
//
|
|
// Construct and append a VPRORD.BCST instruction to the active function.
|
|
func (c *Context) VPRORD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPRORD_BCST(ops...))
|
|
}
|
|
|
|
// VPRORD_BCST: Rotate Packed Doubleword Right (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPRORD.BCST imm8 m32 k xmm
|
|
// VPRORD.BCST imm8 m32 k ymm
|
|
// VPRORD.BCST imm8 m32 xmm
|
|
// VPRORD.BCST imm8 m32 ymm
|
|
// VPRORD.BCST imm8 m32 k zmm
|
|
// VPRORD.BCST imm8 m32 zmm
|
|
//
|
|
// Construct and append a VPRORD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPRORD_BCST(ops ...operand.Op) { ctx.VPRORD_BCST(ops...) }
|
|
|
|
// VPRORD_BCST_Z: Rotate Packed Doubleword Right (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPRORD.BCST.Z imm8 m32 k xmm
|
|
// VPRORD.BCST.Z imm8 m32 k ymm
|
|
// VPRORD.BCST.Z imm8 m32 k zmm
|
|
//
|
|
// Construct and append a VPRORD.BCST.Z instruction to the active function.
|
|
func (c *Context) VPRORD_BCST_Z(i, m, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VPRORD_BCST_Z(i, m, k, xyz))
|
|
}
|
|
|
|
// VPRORD_BCST_Z: Rotate Packed Doubleword Right (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPRORD.BCST.Z imm8 m32 k xmm
|
|
// VPRORD.BCST.Z imm8 m32 k ymm
|
|
// VPRORD.BCST.Z imm8 m32 k zmm
|
|
//
|
|
// Construct and append a VPRORD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPRORD_BCST_Z(i, m, k, xyz operand.Op) { ctx.VPRORD_BCST_Z(i, m, k, xyz) }
|
|
|
|
// VPRORD_Z: Rotate Packed Doubleword Right (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPRORD.Z imm8 m128 k xmm
|
|
// VPRORD.Z imm8 m256 k ymm
|
|
// VPRORD.Z imm8 xmm k xmm
|
|
// VPRORD.Z imm8 ymm k ymm
|
|
// VPRORD.Z imm8 m512 k zmm
|
|
// VPRORD.Z imm8 zmm k zmm
|
|
//
|
|
// Construct and append a VPRORD.Z instruction to the active function.
|
|
func (c *Context) VPRORD_Z(i, mxyz, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VPRORD_Z(i, mxyz, k, xyz))
|
|
}
|
|
|
|
// VPRORD_Z: Rotate Packed Doubleword Right (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPRORD.Z imm8 m128 k xmm
|
|
// VPRORD.Z imm8 m256 k ymm
|
|
// VPRORD.Z imm8 xmm k xmm
|
|
// VPRORD.Z imm8 ymm k ymm
|
|
// VPRORD.Z imm8 m512 k zmm
|
|
// VPRORD.Z imm8 zmm k zmm
|
|
//
|
|
// Construct and append a VPRORD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPRORD_Z(i, mxyz, k, xyz operand.Op) { ctx.VPRORD_Z(i, mxyz, k, xyz) }
|
|
|
|
// VPRORQ: Rotate Packed Quadword Right.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPRORQ imm8 m128 k xmm
|
|
// VPRORQ imm8 m128 xmm
|
|
// VPRORQ imm8 m256 k ymm
|
|
// VPRORQ imm8 m256 ymm
|
|
// VPRORQ imm8 xmm k xmm
|
|
// VPRORQ imm8 xmm xmm
|
|
// VPRORQ imm8 ymm k ymm
|
|
// VPRORQ imm8 ymm ymm
|
|
// VPRORQ imm8 m512 k zmm
|
|
// VPRORQ imm8 m512 zmm
|
|
// VPRORQ imm8 zmm k zmm
|
|
// VPRORQ imm8 zmm zmm
|
|
//
|
|
// Construct and append a VPRORQ instruction to the active function.
|
|
func (c *Context) VPRORQ(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPRORQ(ops...))
|
|
}
|
|
|
|
// VPRORQ: Rotate Packed Quadword Right.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPRORQ imm8 m128 k xmm
|
|
// VPRORQ imm8 m128 xmm
|
|
// VPRORQ imm8 m256 k ymm
|
|
// VPRORQ imm8 m256 ymm
|
|
// VPRORQ imm8 xmm k xmm
|
|
// VPRORQ imm8 xmm xmm
|
|
// VPRORQ imm8 ymm k ymm
|
|
// VPRORQ imm8 ymm ymm
|
|
// VPRORQ imm8 m512 k zmm
|
|
// VPRORQ imm8 m512 zmm
|
|
// VPRORQ imm8 zmm k zmm
|
|
// VPRORQ imm8 zmm zmm
|
|
//
|
|
// Construct and append a VPRORQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPRORQ(ops ...operand.Op) { ctx.VPRORQ(ops...) }
|
|
|
|
// VPRORQ_BCST: Rotate Packed Quadword Right (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPRORQ.BCST imm8 m64 k xmm
|
|
// VPRORQ.BCST imm8 m64 k ymm
|
|
// VPRORQ.BCST imm8 m64 xmm
|
|
// VPRORQ.BCST imm8 m64 ymm
|
|
// VPRORQ.BCST imm8 m64 k zmm
|
|
// VPRORQ.BCST imm8 m64 zmm
|
|
//
|
|
// Construct and append a VPRORQ.BCST instruction to the active function.
|
|
func (c *Context) VPRORQ_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPRORQ_BCST(ops...))
|
|
}
|
|
|
|
// VPRORQ_BCST: Rotate Packed Quadword Right (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPRORQ.BCST imm8 m64 k xmm
|
|
// VPRORQ.BCST imm8 m64 k ymm
|
|
// VPRORQ.BCST imm8 m64 xmm
|
|
// VPRORQ.BCST imm8 m64 ymm
|
|
// VPRORQ.BCST imm8 m64 k zmm
|
|
// VPRORQ.BCST imm8 m64 zmm
|
|
//
|
|
// Construct and append a VPRORQ.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPRORQ_BCST(ops ...operand.Op) { ctx.VPRORQ_BCST(ops...) }
|
|
|
|
// VPRORQ_BCST_Z: Rotate Packed Quadword Right (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPRORQ.BCST.Z imm8 m64 k xmm
|
|
// VPRORQ.BCST.Z imm8 m64 k ymm
|
|
// VPRORQ.BCST.Z imm8 m64 k zmm
|
|
//
|
|
// Construct and append a VPRORQ.BCST.Z instruction to the active function.
|
|
func (c *Context) VPRORQ_BCST_Z(i, m, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VPRORQ_BCST_Z(i, m, k, xyz))
|
|
}
|
|
|
|
// VPRORQ_BCST_Z: Rotate Packed Quadword Right (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPRORQ.BCST.Z imm8 m64 k xmm
|
|
// VPRORQ.BCST.Z imm8 m64 k ymm
|
|
// VPRORQ.BCST.Z imm8 m64 k zmm
|
|
//
|
|
// Construct and append a VPRORQ.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPRORQ_BCST_Z(i, m, k, xyz operand.Op) { ctx.VPRORQ_BCST_Z(i, m, k, xyz) }
|
|
|
|
// VPRORQ_Z: Rotate Packed Quadword Right (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPRORQ.Z imm8 m128 k xmm
|
|
// VPRORQ.Z imm8 m256 k ymm
|
|
// VPRORQ.Z imm8 xmm k xmm
|
|
// VPRORQ.Z imm8 ymm k ymm
|
|
// VPRORQ.Z imm8 m512 k zmm
|
|
// VPRORQ.Z imm8 zmm k zmm
|
|
//
|
|
// Construct and append a VPRORQ.Z instruction to the active function.
|
|
func (c *Context) VPRORQ_Z(i, mxyz, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VPRORQ_Z(i, mxyz, k, xyz))
|
|
}
|
|
|
|
// VPRORQ_Z: Rotate Packed Quadword Right (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPRORQ.Z imm8 m128 k xmm
|
|
// VPRORQ.Z imm8 m256 k ymm
|
|
// VPRORQ.Z imm8 xmm k xmm
|
|
// VPRORQ.Z imm8 ymm k ymm
|
|
// VPRORQ.Z imm8 m512 k zmm
|
|
// VPRORQ.Z imm8 zmm k zmm
|
|
//
|
|
// Construct and append a VPRORQ.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPRORQ_Z(i, mxyz, k, xyz operand.Op) { ctx.VPRORQ_Z(i, mxyz, k, xyz) }
|
|
|
|
// VPRORVD: Variable Rotate Packed Doubleword Right.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPRORVD m128 xmm k xmm
|
|
// VPRORVD m128 xmm xmm
|
|
// VPRORVD m256 ymm k ymm
|
|
// VPRORVD m256 ymm ymm
|
|
// VPRORVD xmm xmm k xmm
|
|
// VPRORVD xmm xmm xmm
|
|
// VPRORVD ymm ymm k ymm
|
|
// VPRORVD ymm ymm ymm
|
|
// VPRORVD m512 zmm k zmm
|
|
// VPRORVD m512 zmm zmm
|
|
// VPRORVD zmm zmm k zmm
|
|
// VPRORVD zmm zmm zmm
|
|
//
|
|
// Construct and append a VPRORVD instruction to the active function.
|
|
func (c *Context) VPRORVD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPRORVD(ops...))
|
|
}
|
|
|
|
// VPRORVD: Variable Rotate Packed Doubleword Right.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPRORVD m128 xmm k xmm
|
|
// VPRORVD m128 xmm xmm
|
|
// VPRORVD m256 ymm k ymm
|
|
// VPRORVD m256 ymm ymm
|
|
// VPRORVD xmm xmm k xmm
|
|
// VPRORVD xmm xmm xmm
|
|
// VPRORVD ymm ymm k ymm
|
|
// VPRORVD ymm ymm ymm
|
|
// VPRORVD m512 zmm k zmm
|
|
// VPRORVD m512 zmm zmm
|
|
// VPRORVD zmm zmm k zmm
|
|
// VPRORVD zmm zmm zmm
|
|
//
|
|
// Construct and append a VPRORVD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPRORVD(ops ...operand.Op) { ctx.VPRORVD(ops...) }
|
|
|
|
// VPRORVD_BCST: Variable Rotate Packed Doubleword Right (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPRORVD.BCST m32 xmm k xmm
|
|
// VPRORVD.BCST m32 xmm xmm
|
|
// VPRORVD.BCST m32 ymm k ymm
|
|
// VPRORVD.BCST m32 ymm ymm
|
|
// VPRORVD.BCST m32 zmm k zmm
|
|
// VPRORVD.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VPRORVD.BCST instruction to the active function.
|
|
func (c *Context) VPRORVD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPRORVD_BCST(ops...))
|
|
}
|
|
|
|
// VPRORVD_BCST: Variable Rotate Packed Doubleword Right (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPRORVD.BCST m32 xmm k xmm
|
|
// VPRORVD.BCST m32 xmm xmm
|
|
// VPRORVD.BCST m32 ymm k ymm
|
|
// VPRORVD.BCST m32 ymm ymm
|
|
// VPRORVD.BCST m32 zmm k zmm
|
|
// VPRORVD.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VPRORVD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPRORVD_BCST(ops ...operand.Op) { ctx.VPRORVD_BCST(ops...) }
|
|
|
|
// VPRORVD_BCST_Z: Variable Rotate Packed Doubleword Right (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPRORVD.BCST.Z m32 xmm k xmm
|
|
// VPRORVD.BCST.Z m32 ymm k ymm
|
|
// VPRORVD.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VPRORVD.BCST.Z instruction to the active function.
|
|
func (c *Context) VPRORVD_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPRORVD_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPRORVD_BCST_Z: Variable Rotate Packed Doubleword Right (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPRORVD.BCST.Z m32 xmm k xmm
|
|
// VPRORVD.BCST.Z m32 ymm k ymm
|
|
// VPRORVD.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VPRORVD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPRORVD_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VPRORVD_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VPRORVD_Z: Variable Rotate Packed Doubleword Right (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPRORVD.Z m128 xmm k xmm
|
|
// VPRORVD.Z m256 ymm k ymm
|
|
// VPRORVD.Z xmm xmm k xmm
|
|
// VPRORVD.Z ymm ymm k ymm
|
|
// VPRORVD.Z m512 zmm k zmm
|
|
// VPRORVD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPRORVD.Z instruction to the active function.
|
|
func (c *Context) VPRORVD_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPRORVD_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPRORVD_Z: Variable Rotate Packed Doubleword Right (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPRORVD.Z m128 xmm k xmm
|
|
// VPRORVD.Z m256 ymm k ymm
|
|
// VPRORVD.Z xmm xmm k xmm
|
|
// VPRORVD.Z ymm ymm k ymm
|
|
// VPRORVD.Z m512 zmm k zmm
|
|
// VPRORVD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPRORVD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPRORVD_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPRORVD_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPRORVQ: Variable Rotate Packed Quadword Right.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPRORVQ m128 xmm k xmm
|
|
// VPRORVQ m128 xmm xmm
|
|
// VPRORVQ m256 ymm k ymm
|
|
// VPRORVQ m256 ymm ymm
|
|
// VPRORVQ xmm xmm k xmm
|
|
// VPRORVQ xmm xmm xmm
|
|
// VPRORVQ ymm ymm k ymm
|
|
// VPRORVQ ymm ymm ymm
|
|
// VPRORVQ m512 zmm k zmm
|
|
// VPRORVQ m512 zmm zmm
|
|
// VPRORVQ zmm zmm k zmm
|
|
// VPRORVQ zmm zmm zmm
|
|
//
|
|
// Construct and append a VPRORVQ instruction to the active function.
|
|
func (c *Context) VPRORVQ(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPRORVQ(ops...))
|
|
}
|
|
|
|
// VPRORVQ: Variable Rotate Packed Quadword Right.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPRORVQ m128 xmm k xmm
|
|
// VPRORVQ m128 xmm xmm
|
|
// VPRORVQ m256 ymm k ymm
|
|
// VPRORVQ m256 ymm ymm
|
|
// VPRORVQ xmm xmm k xmm
|
|
// VPRORVQ xmm xmm xmm
|
|
// VPRORVQ ymm ymm k ymm
|
|
// VPRORVQ ymm ymm ymm
|
|
// VPRORVQ m512 zmm k zmm
|
|
// VPRORVQ m512 zmm zmm
|
|
// VPRORVQ zmm zmm k zmm
|
|
// VPRORVQ zmm zmm zmm
|
|
//
|
|
// Construct and append a VPRORVQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPRORVQ(ops ...operand.Op) { ctx.VPRORVQ(ops...) }
|
|
|
|
// VPRORVQ_BCST: Variable Rotate Packed Quadword Right (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPRORVQ.BCST m64 xmm k xmm
|
|
// VPRORVQ.BCST m64 xmm xmm
|
|
// VPRORVQ.BCST m64 ymm k ymm
|
|
// VPRORVQ.BCST m64 ymm ymm
|
|
// VPRORVQ.BCST m64 zmm k zmm
|
|
// VPRORVQ.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VPRORVQ.BCST instruction to the active function.
|
|
func (c *Context) VPRORVQ_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPRORVQ_BCST(ops...))
|
|
}
|
|
|
|
// VPRORVQ_BCST: Variable Rotate Packed Quadword Right (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPRORVQ.BCST m64 xmm k xmm
|
|
// VPRORVQ.BCST m64 xmm xmm
|
|
// VPRORVQ.BCST m64 ymm k ymm
|
|
// VPRORVQ.BCST m64 ymm ymm
|
|
// VPRORVQ.BCST m64 zmm k zmm
|
|
// VPRORVQ.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VPRORVQ.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPRORVQ_BCST(ops ...operand.Op) { ctx.VPRORVQ_BCST(ops...) }
|
|
|
|
// VPRORVQ_BCST_Z: Variable Rotate Packed Quadword Right (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPRORVQ.BCST.Z m64 xmm k xmm
|
|
// VPRORVQ.BCST.Z m64 ymm k ymm
|
|
// VPRORVQ.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VPRORVQ.BCST.Z instruction to the active function.
|
|
func (c *Context) VPRORVQ_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPRORVQ_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPRORVQ_BCST_Z: Variable Rotate Packed Quadword Right (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPRORVQ.BCST.Z m64 xmm k xmm
|
|
// VPRORVQ.BCST.Z m64 ymm k ymm
|
|
// VPRORVQ.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VPRORVQ.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPRORVQ_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VPRORVQ_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VPRORVQ_Z: Variable Rotate Packed Quadword Right (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPRORVQ.Z m128 xmm k xmm
|
|
// VPRORVQ.Z m256 ymm k ymm
|
|
// VPRORVQ.Z xmm xmm k xmm
|
|
// VPRORVQ.Z ymm ymm k ymm
|
|
// VPRORVQ.Z m512 zmm k zmm
|
|
// VPRORVQ.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPRORVQ.Z instruction to the active function.
|
|
func (c *Context) VPRORVQ_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPRORVQ_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPRORVQ_Z: Variable Rotate Packed Quadword Right (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPRORVQ.Z m128 xmm k xmm
|
|
// VPRORVQ.Z m256 ymm k ymm
|
|
// VPRORVQ.Z xmm xmm k xmm
|
|
// VPRORVQ.Z ymm ymm k ymm
|
|
// VPRORVQ.Z m512 zmm k zmm
|
|
// VPRORVQ.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPRORVQ.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPRORVQ_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPRORVQ_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPSADBW: Compute Sum of Absolute Differences.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSADBW m256 ymm ymm
|
|
// VPSADBW ymm ymm ymm
|
|
// VPSADBW m128 xmm xmm
|
|
// VPSADBW xmm xmm xmm
|
|
// VPSADBW m512 zmm zmm
|
|
// VPSADBW zmm zmm zmm
|
|
//
|
|
// Construct and append a VPSADBW instruction to the active function.
|
|
func (c *Context) VPSADBW(mxyz, xyz, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPSADBW(mxyz, xyz, xyz1))
|
|
}
|
|
|
|
// VPSADBW: Compute Sum of Absolute Differences.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSADBW m256 ymm ymm
|
|
// VPSADBW ymm ymm ymm
|
|
// VPSADBW m128 xmm xmm
|
|
// VPSADBW xmm xmm xmm
|
|
// VPSADBW m512 zmm zmm
|
|
// VPSADBW zmm zmm zmm
|
|
//
|
|
// Construct and append a VPSADBW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSADBW(mxyz, xyz, xyz1 operand.Op) { ctx.VPSADBW(mxyz, xyz, xyz1) }
|
|
|
|
// VPSCATTERDD: Scatter Packed Doubleword Values with Signed Doubleword Indices.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSCATTERDD xmm k vm32x
|
|
// VPSCATTERDD ymm k vm32y
|
|
// VPSCATTERDD zmm k vm32z
|
|
//
|
|
// Construct and append a VPSCATTERDD instruction to the active function.
|
|
func (c *Context) VPSCATTERDD(xyz, k, v operand.Op) {
|
|
c.addinstruction(x86.VPSCATTERDD(xyz, k, v))
|
|
}
|
|
|
|
// VPSCATTERDD: Scatter Packed Doubleword Values with Signed Doubleword Indices.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSCATTERDD xmm k vm32x
|
|
// VPSCATTERDD ymm k vm32y
|
|
// VPSCATTERDD zmm k vm32z
|
|
//
|
|
// Construct and append a VPSCATTERDD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSCATTERDD(xyz, k, v operand.Op) { ctx.VPSCATTERDD(xyz, k, v) }
|
|
|
|
// VPSCATTERDQ: Scatter Packed Quadword Values with Signed Doubleword Indices.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSCATTERDQ xmm k vm32x
|
|
// VPSCATTERDQ ymm k vm32x
|
|
// VPSCATTERDQ zmm k vm32y
|
|
//
|
|
// Construct and append a VPSCATTERDQ instruction to the active function.
|
|
func (c *Context) VPSCATTERDQ(xyz, k, v operand.Op) {
|
|
c.addinstruction(x86.VPSCATTERDQ(xyz, k, v))
|
|
}
|
|
|
|
// VPSCATTERDQ: Scatter Packed Quadword Values with Signed Doubleword Indices.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSCATTERDQ xmm k vm32x
|
|
// VPSCATTERDQ ymm k vm32x
|
|
// VPSCATTERDQ zmm k vm32y
|
|
//
|
|
// Construct and append a VPSCATTERDQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSCATTERDQ(xyz, k, v operand.Op) { ctx.VPSCATTERDQ(xyz, k, v) }
|
|
|
|
// VPSCATTERQD: Scatter Packed Doubleword Values with Signed Quadword Indices.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSCATTERQD xmm k vm64x
|
|
// VPSCATTERQD xmm k vm64y
|
|
// VPSCATTERQD ymm k vm64z
|
|
//
|
|
// Construct and append a VPSCATTERQD instruction to the active function.
|
|
func (c *Context) VPSCATTERQD(xy, k, v operand.Op) {
|
|
c.addinstruction(x86.VPSCATTERQD(xy, k, v))
|
|
}
|
|
|
|
// VPSCATTERQD: Scatter Packed Doubleword Values with Signed Quadword Indices.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSCATTERQD xmm k vm64x
|
|
// VPSCATTERQD xmm k vm64y
|
|
// VPSCATTERQD ymm k vm64z
|
|
//
|
|
// Construct and append a VPSCATTERQD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSCATTERQD(xy, k, v operand.Op) { ctx.VPSCATTERQD(xy, k, v) }
|
|
|
|
// VPSCATTERQQ: Scatter Packed Quadword Values with Signed Quadword Indices.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSCATTERQQ xmm k vm64x
|
|
// VPSCATTERQQ ymm k vm64y
|
|
// VPSCATTERQQ zmm k vm64z
|
|
//
|
|
// Construct and append a VPSCATTERQQ instruction to the active function.
|
|
func (c *Context) VPSCATTERQQ(xyz, k, v operand.Op) {
|
|
c.addinstruction(x86.VPSCATTERQQ(xyz, k, v))
|
|
}
|
|
|
|
// VPSCATTERQQ: Scatter Packed Quadword Values with Signed Quadword Indices.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSCATTERQQ xmm k vm64x
|
|
// VPSCATTERQQ ymm k vm64y
|
|
// VPSCATTERQQ zmm k vm64z
|
|
//
|
|
// Construct and append a VPSCATTERQQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSCATTERQQ(xyz, k, v operand.Op) { ctx.VPSCATTERQQ(xyz, k, v) }
|
|
|
|
// VPSHUFB: Packed Shuffle Bytes.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSHUFB m256 ymm ymm
|
|
// VPSHUFB ymm ymm ymm
|
|
// VPSHUFB m128 xmm xmm
|
|
// VPSHUFB xmm xmm xmm
|
|
// VPSHUFB m128 xmm k xmm
|
|
// VPSHUFB m256 ymm k ymm
|
|
// VPSHUFB xmm xmm k xmm
|
|
// VPSHUFB ymm ymm k ymm
|
|
// VPSHUFB m512 zmm k zmm
|
|
// VPSHUFB m512 zmm zmm
|
|
// VPSHUFB zmm zmm k zmm
|
|
// VPSHUFB zmm zmm zmm
|
|
//
|
|
// Construct and append a VPSHUFB instruction to the active function.
|
|
func (c *Context) VPSHUFB(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPSHUFB(ops...))
|
|
}
|
|
|
|
// VPSHUFB: Packed Shuffle Bytes.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSHUFB m256 ymm ymm
|
|
// VPSHUFB ymm ymm ymm
|
|
// VPSHUFB m128 xmm xmm
|
|
// VPSHUFB xmm xmm xmm
|
|
// VPSHUFB m128 xmm k xmm
|
|
// VPSHUFB m256 ymm k ymm
|
|
// VPSHUFB xmm xmm k xmm
|
|
// VPSHUFB ymm ymm k ymm
|
|
// VPSHUFB m512 zmm k zmm
|
|
// VPSHUFB m512 zmm zmm
|
|
// VPSHUFB zmm zmm k zmm
|
|
// VPSHUFB zmm zmm zmm
|
|
//
|
|
// Construct and append a VPSHUFB instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSHUFB(ops ...operand.Op) { ctx.VPSHUFB(ops...) }
|
|
|
|
// VPSHUFB_Z: Packed Shuffle Bytes (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSHUFB.Z m128 xmm k xmm
|
|
// VPSHUFB.Z m256 ymm k ymm
|
|
// VPSHUFB.Z xmm xmm k xmm
|
|
// VPSHUFB.Z ymm ymm k ymm
|
|
// VPSHUFB.Z m512 zmm k zmm
|
|
// VPSHUFB.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPSHUFB.Z instruction to the active function.
|
|
func (c *Context) VPSHUFB_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPSHUFB_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPSHUFB_Z: Packed Shuffle Bytes (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSHUFB.Z m128 xmm k xmm
|
|
// VPSHUFB.Z m256 ymm k ymm
|
|
// VPSHUFB.Z xmm xmm k xmm
|
|
// VPSHUFB.Z ymm ymm k ymm
|
|
// VPSHUFB.Z m512 zmm k zmm
|
|
// VPSHUFB.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPSHUFB.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSHUFB_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPSHUFB_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPSHUFD: Shuffle Packed Doublewords.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSHUFD imm8 m256 ymm
|
|
// VPSHUFD imm8 ymm ymm
|
|
// VPSHUFD imm8 m128 xmm
|
|
// VPSHUFD imm8 xmm xmm
|
|
// VPSHUFD imm8 m128 k xmm
|
|
// VPSHUFD imm8 m256 k ymm
|
|
// VPSHUFD imm8 xmm k xmm
|
|
// VPSHUFD imm8 ymm k ymm
|
|
// VPSHUFD imm8 m512 k zmm
|
|
// VPSHUFD imm8 m512 zmm
|
|
// VPSHUFD imm8 zmm k zmm
|
|
// VPSHUFD imm8 zmm zmm
|
|
//
|
|
// Construct and append a VPSHUFD instruction to the active function.
|
|
func (c *Context) VPSHUFD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPSHUFD(ops...))
|
|
}
|
|
|
|
// VPSHUFD: Shuffle Packed Doublewords.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSHUFD imm8 m256 ymm
|
|
// VPSHUFD imm8 ymm ymm
|
|
// VPSHUFD imm8 m128 xmm
|
|
// VPSHUFD imm8 xmm xmm
|
|
// VPSHUFD imm8 m128 k xmm
|
|
// VPSHUFD imm8 m256 k ymm
|
|
// VPSHUFD imm8 xmm k xmm
|
|
// VPSHUFD imm8 ymm k ymm
|
|
// VPSHUFD imm8 m512 k zmm
|
|
// VPSHUFD imm8 m512 zmm
|
|
// VPSHUFD imm8 zmm k zmm
|
|
// VPSHUFD imm8 zmm zmm
|
|
//
|
|
// Construct and append a VPSHUFD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSHUFD(ops ...operand.Op) { ctx.VPSHUFD(ops...) }
|
|
|
|
// VPSHUFD_BCST: Shuffle Packed Doublewords (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSHUFD.BCST imm8 m32 k xmm
|
|
// VPSHUFD.BCST imm8 m32 k ymm
|
|
// VPSHUFD.BCST imm8 m32 xmm
|
|
// VPSHUFD.BCST imm8 m32 ymm
|
|
// VPSHUFD.BCST imm8 m32 k zmm
|
|
// VPSHUFD.BCST imm8 m32 zmm
|
|
//
|
|
// Construct and append a VPSHUFD.BCST instruction to the active function.
|
|
func (c *Context) VPSHUFD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPSHUFD_BCST(ops...))
|
|
}
|
|
|
|
// VPSHUFD_BCST: Shuffle Packed Doublewords (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSHUFD.BCST imm8 m32 k xmm
|
|
// VPSHUFD.BCST imm8 m32 k ymm
|
|
// VPSHUFD.BCST imm8 m32 xmm
|
|
// VPSHUFD.BCST imm8 m32 ymm
|
|
// VPSHUFD.BCST imm8 m32 k zmm
|
|
// VPSHUFD.BCST imm8 m32 zmm
|
|
//
|
|
// Construct and append a VPSHUFD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSHUFD_BCST(ops ...operand.Op) { ctx.VPSHUFD_BCST(ops...) }
|
|
|
|
// VPSHUFD_BCST_Z: Shuffle Packed Doublewords (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSHUFD.BCST.Z imm8 m32 k xmm
|
|
// VPSHUFD.BCST.Z imm8 m32 k ymm
|
|
// VPSHUFD.BCST.Z imm8 m32 k zmm
|
|
//
|
|
// Construct and append a VPSHUFD.BCST.Z instruction to the active function.
|
|
func (c *Context) VPSHUFD_BCST_Z(i, m, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VPSHUFD_BCST_Z(i, m, k, xyz))
|
|
}
|
|
|
|
// VPSHUFD_BCST_Z: Shuffle Packed Doublewords (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSHUFD.BCST.Z imm8 m32 k xmm
|
|
// VPSHUFD.BCST.Z imm8 m32 k ymm
|
|
// VPSHUFD.BCST.Z imm8 m32 k zmm
|
|
//
|
|
// Construct and append a VPSHUFD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSHUFD_BCST_Z(i, m, k, xyz operand.Op) { ctx.VPSHUFD_BCST_Z(i, m, k, xyz) }
|
|
|
|
// VPSHUFD_Z: Shuffle Packed Doublewords (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSHUFD.Z imm8 m128 k xmm
|
|
// VPSHUFD.Z imm8 m256 k ymm
|
|
// VPSHUFD.Z imm8 xmm k xmm
|
|
// VPSHUFD.Z imm8 ymm k ymm
|
|
// VPSHUFD.Z imm8 m512 k zmm
|
|
// VPSHUFD.Z imm8 zmm k zmm
|
|
//
|
|
// Construct and append a VPSHUFD.Z instruction to the active function.
|
|
func (c *Context) VPSHUFD_Z(i, mxyz, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VPSHUFD_Z(i, mxyz, k, xyz))
|
|
}
|
|
|
|
// VPSHUFD_Z: Shuffle Packed Doublewords (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSHUFD.Z imm8 m128 k xmm
|
|
// VPSHUFD.Z imm8 m256 k ymm
|
|
// VPSHUFD.Z imm8 xmm k xmm
|
|
// VPSHUFD.Z imm8 ymm k ymm
|
|
// VPSHUFD.Z imm8 m512 k zmm
|
|
// VPSHUFD.Z imm8 zmm k zmm
|
|
//
|
|
// Construct and append a VPSHUFD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSHUFD_Z(i, mxyz, k, xyz operand.Op) { ctx.VPSHUFD_Z(i, mxyz, k, xyz) }
|
|
|
|
// VPSHUFHW: Shuffle Packed High Words.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSHUFHW imm8 m256 ymm
|
|
// VPSHUFHW imm8 ymm ymm
|
|
// VPSHUFHW imm8 m128 xmm
|
|
// VPSHUFHW imm8 xmm xmm
|
|
// VPSHUFHW imm8 m128 k xmm
|
|
// VPSHUFHW imm8 m256 k ymm
|
|
// VPSHUFHW imm8 xmm k xmm
|
|
// VPSHUFHW imm8 ymm k ymm
|
|
// VPSHUFHW imm8 m512 k zmm
|
|
// VPSHUFHW imm8 m512 zmm
|
|
// VPSHUFHW imm8 zmm k zmm
|
|
// VPSHUFHW imm8 zmm zmm
|
|
//
|
|
// Construct and append a VPSHUFHW instruction to the active function.
|
|
func (c *Context) VPSHUFHW(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPSHUFHW(ops...))
|
|
}
|
|
|
|
// VPSHUFHW: Shuffle Packed High Words.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSHUFHW imm8 m256 ymm
|
|
// VPSHUFHW imm8 ymm ymm
|
|
// VPSHUFHW imm8 m128 xmm
|
|
// VPSHUFHW imm8 xmm xmm
|
|
// VPSHUFHW imm8 m128 k xmm
|
|
// VPSHUFHW imm8 m256 k ymm
|
|
// VPSHUFHW imm8 xmm k xmm
|
|
// VPSHUFHW imm8 ymm k ymm
|
|
// VPSHUFHW imm8 m512 k zmm
|
|
// VPSHUFHW imm8 m512 zmm
|
|
// VPSHUFHW imm8 zmm k zmm
|
|
// VPSHUFHW imm8 zmm zmm
|
|
//
|
|
// Construct and append a VPSHUFHW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSHUFHW(ops ...operand.Op) { ctx.VPSHUFHW(ops...) }
|
|
|
|
// VPSHUFHW_Z: Shuffle Packed High Words (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSHUFHW.Z imm8 m128 k xmm
|
|
// VPSHUFHW.Z imm8 m256 k ymm
|
|
// VPSHUFHW.Z imm8 xmm k xmm
|
|
// VPSHUFHW.Z imm8 ymm k ymm
|
|
// VPSHUFHW.Z imm8 m512 k zmm
|
|
// VPSHUFHW.Z imm8 zmm k zmm
|
|
//
|
|
// Construct and append a VPSHUFHW.Z instruction to the active function.
|
|
func (c *Context) VPSHUFHW_Z(i, mxyz, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VPSHUFHW_Z(i, mxyz, k, xyz))
|
|
}
|
|
|
|
// VPSHUFHW_Z: Shuffle Packed High Words (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSHUFHW.Z imm8 m128 k xmm
|
|
// VPSHUFHW.Z imm8 m256 k ymm
|
|
// VPSHUFHW.Z imm8 xmm k xmm
|
|
// VPSHUFHW.Z imm8 ymm k ymm
|
|
// VPSHUFHW.Z imm8 m512 k zmm
|
|
// VPSHUFHW.Z imm8 zmm k zmm
|
|
//
|
|
// Construct and append a VPSHUFHW.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSHUFHW_Z(i, mxyz, k, xyz operand.Op) { ctx.VPSHUFHW_Z(i, mxyz, k, xyz) }
|
|
|
|
// VPSHUFLW: Shuffle Packed Low Words.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSHUFLW imm8 m256 ymm
|
|
// VPSHUFLW imm8 ymm ymm
|
|
// VPSHUFLW imm8 m128 xmm
|
|
// VPSHUFLW imm8 xmm xmm
|
|
// VPSHUFLW imm8 m128 k xmm
|
|
// VPSHUFLW imm8 m256 k ymm
|
|
// VPSHUFLW imm8 xmm k xmm
|
|
// VPSHUFLW imm8 ymm k ymm
|
|
// VPSHUFLW imm8 m512 k zmm
|
|
// VPSHUFLW imm8 m512 zmm
|
|
// VPSHUFLW imm8 zmm k zmm
|
|
// VPSHUFLW imm8 zmm zmm
|
|
//
|
|
// Construct and append a VPSHUFLW instruction to the active function.
|
|
func (c *Context) VPSHUFLW(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPSHUFLW(ops...))
|
|
}
|
|
|
|
// VPSHUFLW: Shuffle Packed Low Words.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSHUFLW imm8 m256 ymm
|
|
// VPSHUFLW imm8 ymm ymm
|
|
// VPSHUFLW imm8 m128 xmm
|
|
// VPSHUFLW imm8 xmm xmm
|
|
// VPSHUFLW imm8 m128 k xmm
|
|
// VPSHUFLW imm8 m256 k ymm
|
|
// VPSHUFLW imm8 xmm k xmm
|
|
// VPSHUFLW imm8 ymm k ymm
|
|
// VPSHUFLW imm8 m512 k zmm
|
|
// VPSHUFLW imm8 m512 zmm
|
|
// VPSHUFLW imm8 zmm k zmm
|
|
// VPSHUFLW imm8 zmm zmm
|
|
//
|
|
// Construct and append a VPSHUFLW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSHUFLW(ops ...operand.Op) { ctx.VPSHUFLW(ops...) }
|
|
|
|
// VPSHUFLW_Z: Shuffle Packed Low Words (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSHUFLW.Z imm8 m128 k xmm
|
|
// VPSHUFLW.Z imm8 m256 k ymm
|
|
// VPSHUFLW.Z imm8 xmm k xmm
|
|
// VPSHUFLW.Z imm8 ymm k ymm
|
|
// VPSHUFLW.Z imm8 m512 k zmm
|
|
// VPSHUFLW.Z imm8 zmm k zmm
|
|
//
|
|
// Construct and append a VPSHUFLW.Z instruction to the active function.
|
|
func (c *Context) VPSHUFLW_Z(i, mxyz, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VPSHUFLW_Z(i, mxyz, k, xyz))
|
|
}
|
|
|
|
// VPSHUFLW_Z: Shuffle Packed Low Words (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSHUFLW.Z imm8 m128 k xmm
|
|
// VPSHUFLW.Z imm8 m256 k ymm
|
|
// VPSHUFLW.Z imm8 xmm k xmm
|
|
// VPSHUFLW.Z imm8 ymm k ymm
|
|
// VPSHUFLW.Z imm8 m512 k zmm
|
|
// VPSHUFLW.Z imm8 zmm k zmm
|
|
//
|
|
// Construct and append a VPSHUFLW.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSHUFLW_Z(i, mxyz, k, xyz operand.Op) { ctx.VPSHUFLW_Z(i, mxyz, k, xyz) }
|
|
|
|
// VPSIGNB: Packed Sign of Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSIGNB m256 ymm ymm
|
|
// VPSIGNB ymm ymm ymm
|
|
// VPSIGNB m128 xmm xmm
|
|
// VPSIGNB xmm xmm xmm
|
|
//
|
|
// Construct and append a VPSIGNB instruction to the active function.
|
|
func (c *Context) VPSIGNB(mxy, xy, xy1 operand.Op) {
|
|
c.addinstruction(x86.VPSIGNB(mxy, xy, xy1))
|
|
}
|
|
|
|
// VPSIGNB: Packed Sign of Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSIGNB m256 ymm ymm
|
|
// VPSIGNB ymm ymm ymm
|
|
// VPSIGNB m128 xmm xmm
|
|
// VPSIGNB xmm xmm xmm
|
|
//
|
|
// Construct and append a VPSIGNB instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSIGNB(mxy, xy, xy1 operand.Op) { ctx.VPSIGNB(mxy, xy, xy1) }
|
|
|
|
// VPSIGND: Packed Sign of Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSIGND m256 ymm ymm
|
|
// VPSIGND ymm ymm ymm
|
|
// VPSIGND m128 xmm xmm
|
|
// VPSIGND xmm xmm xmm
|
|
//
|
|
// Construct and append a VPSIGND instruction to the active function.
|
|
func (c *Context) VPSIGND(mxy, xy, xy1 operand.Op) {
|
|
c.addinstruction(x86.VPSIGND(mxy, xy, xy1))
|
|
}
|
|
|
|
// VPSIGND: Packed Sign of Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSIGND m256 ymm ymm
|
|
// VPSIGND ymm ymm ymm
|
|
// VPSIGND m128 xmm xmm
|
|
// VPSIGND xmm xmm xmm
|
|
//
|
|
// Construct and append a VPSIGND instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSIGND(mxy, xy, xy1 operand.Op) { ctx.VPSIGND(mxy, xy, xy1) }
|
|
|
|
// VPSIGNW: Packed Sign of Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSIGNW m256 ymm ymm
|
|
// VPSIGNW ymm ymm ymm
|
|
// VPSIGNW m128 xmm xmm
|
|
// VPSIGNW xmm xmm xmm
|
|
//
|
|
// Construct and append a VPSIGNW instruction to the active function.
|
|
func (c *Context) VPSIGNW(mxy, xy, xy1 operand.Op) {
|
|
c.addinstruction(x86.VPSIGNW(mxy, xy, xy1))
|
|
}
|
|
|
|
// VPSIGNW: Packed Sign of Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSIGNW m256 ymm ymm
|
|
// VPSIGNW ymm ymm ymm
|
|
// VPSIGNW m128 xmm xmm
|
|
// VPSIGNW xmm xmm xmm
|
|
//
|
|
// Construct and append a VPSIGNW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSIGNW(mxy, xy, xy1 operand.Op) { ctx.VPSIGNW(mxy, xy, xy1) }
|
|
|
|
// VPSLLD: Shift Packed Doubleword Data Left Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSLLD imm8 ymm ymm
|
|
// VPSLLD m128 ymm ymm
|
|
// VPSLLD xmm ymm ymm
|
|
// VPSLLD imm8 xmm xmm
|
|
// VPSLLD m128 xmm xmm
|
|
// VPSLLD xmm xmm xmm
|
|
// VPSLLD imm8 m128 k xmm
|
|
// VPSLLD imm8 m128 xmm
|
|
// VPSLLD imm8 m256 k ymm
|
|
// VPSLLD imm8 m256 ymm
|
|
// VPSLLD imm8 xmm k xmm
|
|
// VPSLLD imm8 ymm k ymm
|
|
// VPSLLD m128 xmm k xmm
|
|
// VPSLLD m128 ymm k ymm
|
|
// VPSLLD xmm xmm k xmm
|
|
// VPSLLD xmm ymm k ymm
|
|
// VPSLLD imm8 m512 k zmm
|
|
// VPSLLD imm8 m512 zmm
|
|
// VPSLLD imm8 zmm k zmm
|
|
// VPSLLD imm8 zmm zmm
|
|
// VPSLLD m128 zmm k zmm
|
|
// VPSLLD m128 zmm zmm
|
|
// VPSLLD xmm zmm k zmm
|
|
// VPSLLD xmm zmm zmm
|
|
//
|
|
// Construct and append a VPSLLD instruction to the active function.
|
|
func (c *Context) VPSLLD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPSLLD(ops...))
|
|
}
|
|
|
|
// VPSLLD: Shift Packed Doubleword Data Left Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSLLD imm8 ymm ymm
|
|
// VPSLLD m128 ymm ymm
|
|
// VPSLLD xmm ymm ymm
|
|
// VPSLLD imm8 xmm xmm
|
|
// VPSLLD m128 xmm xmm
|
|
// VPSLLD xmm xmm xmm
|
|
// VPSLLD imm8 m128 k xmm
|
|
// VPSLLD imm8 m128 xmm
|
|
// VPSLLD imm8 m256 k ymm
|
|
// VPSLLD imm8 m256 ymm
|
|
// VPSLLD imm8 xmm k xmm
|
|
// VPSLLD imm8 ymm k ymm
|
|
// VPSLLD m128 xmm k xmm
|
|
// VPSLLD m128 ymm k ymm
|
|
// VPSLLD xmm xmm k xmm
|
|
// VPSLLD xmm ymm k ymm
|
|
// VPSLLD imm8 m512 k zmm
|
|
// VPSLLD imm8 m512 zmm
|
|
// VPSLLD imm8 zmm k zmm
|
|
// VPSLLD imm8 zmm zmm
|
|
// VPSLLD m128 zmm k zmm
|
|
// VPSLLD m128 zmm zmm
|
|
// VPSLLD xmm zmm k zmm
|
|
// VPSLLD xmm zmm zmm
|
|
//
|
|
// Construct and append a VPSLLD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSLLD(ops ...operand.Op) { ctx.VPSLLD(ops...) }
|
|
|
|
// VPSLLDQ: Shift Packed Double Quadword Left Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSLLDQ imm8 ymm ymm
|
|
// VPSLLDQ imm8 xmm xmm
|
|
// VPSLLDQ imm8 m128 xmm
|
|
// VPSLLDQ imm8 m256 ymm
|
|
// VPSLLDQ imm8 m512 zmm
|
|
// VPSLLDQ imm8 zmm zmm
|
|
//
|
|
// Construct and append a VPSLLDQ instruction to the active function.
|
|
func (c *Context) VPSLLDQ(i, mxyz, xyz operand.Op) {
|
|
c.addinstruction(x86.VPSLLDQ(i, mxyz, xyz))
|
|
}
|
|
|
|
// VPSLLDQ: Shift Packed Double Quadword Left Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSLLDQ imm8 ymm ymm
|
|
// VPSLLDQ imm8 xmm xmm
|
|
// VPSLLDQ imm8 m128 xmm
|
|
// VPSLLDQ imm8 m256 ymm
|
|
// VPSLLDQ imm8 m512 zmm
|
|
// VPSLLDQ imm8 zmm zmm
|
|
//
|
|
// Construct and append a VPSLLDQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSLLDQ(i, mxyz, xyz operand.Op) { ctx.VPSLLDQ(i, mxyz, xyz) }
|
|
|
|
// VPSLLD_BCST: Shift Packed Doubleword Data Left Logical (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSLLD.BCST imm8 m32 k xmm
|
|
// VPSLLD.BCST imm8 m32 k ymm
|
|
// VPSLLD.BCST imm8 m32 xmm
|
|
// VPSLLD.BCST imm8 m32 ymm
|
|
// VPSLLD.BCST imm8 m32 k zmm
|
|
// VPSLLD.BCST imm8 m32 zmm
|
|
//
|
|
// Construct and append a VPSLLD.BCST instruction to the active function.
|
|
func (c *Context) VPSLLD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPSLLD_BCST(ops...))
|
|
}
|
|
|
|
// VPSLLD_BCST: Shift Packed Doubleword Data Left Logical (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSLLD.BCST imm8 m32 k xmm
|
|
// VPSLLD.BCST imm8 m32 k ymm
|
|
// VPSLLD.BCST imm8 m32 xmm
|
|
// VPSLLD.BCST imm8 m32 ymm
|
|
// VPSLLD.BCST imm8 m32 k zmm
|
|
// VPSLLD.BCST imm8 m32 zmm
|
|
//
|
|
// Construct and append a VPSLLD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSLLD_BCST(ops ...operand.Op) { ctx.VPSLLD_BCST(ops...) }
|
|
|
|
// VPSLLD_BCST_Z: Shift Packed Doubleword Data Left Logical (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSLLD.BCST.Z imm8 m32 k xmm
|
|
// VPSLLD.BCST.Z imm8 m32 k ymm
|
|
// VPSLLD.BCST.Z imm8 m32 k zmm
|
|
//
|
|
// Construct and append a VPSLLD.BCST.Z instruction to the active function.
|
|
func (c *Context) VPSLLD_BCST_Z(i, m, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VPSLLD_BCST_Z(i, m, k, xyz))
|
|
}
|
|
|
|
// VPSLLD_BCST_Z: Shift Packed Doubleword Data Left Logical (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSLLD.BCST.Z imm8 m32 k xmm
|
|
// VPSLLD.BCST.Z imm8 m32 k ymm
|
|
// VPSLLD.BCST.Z imm8 m32 k zmm
|
|
//
|
|
// Construct and append a VPSLLD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSLLD_BCST_Z(i, m, k, xyz operand.Op) { ctx.VPSLLD_BCST_Z(i, m, k, xyz) }
|
|
|
|
// VPSLLD_Z: Shift Packed Doubleword Data Left Logical (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSLLD.Z imm8 m128 k xmm
|
|
// VPSLLD.Z imm8 m256 k ymm
|
|
// VPSLLD.Z imm8 xmm k xmm
|
|
// VPSLLD.Z imm8 ymm k ymm
|
|
// VPSLLD.Z m128 xmm k xmm
|
|
// VPSLLD.Z m128 ymm k ymm
|
|
// VPSLLD.Z xmm xmm k xmm
|
|
// VPSLLD.Z xmm ymm k ymm
|
|
// VPSLLD.Z imm8 m512 k zmm
|
|
// VPSLLD.Z imm8 zmm k zmm
|
|
// VPSLLD.Z m128 zmm k zmm
|
|
// VPSLLD.Z xmm zmm k zmm
|
|
//
|
|
// Construct and append a VPSLLD.Z instruction to the active function.
|
|
func (c *Context) VPSLLD_Z(imx, mxyz, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VPSLLD_Z(imx, mxyz, k, xyz))
|
|
}
|
|
|
|
// VPSLLD_Z: Shift Packed Doubleword Data Left Logical (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSLLD.Z imm8 m128 k xmm
|
|
// VPSLLD.Z imm8 m256 k ymm
|
|
// VPSLLD.Z imm8 xmm k xmm
|
|
// VPSLLD.Z imm8 ymm k ymm
|
|
// VPSLLD.Z m128 xmm k xmm
|
|
// VPSLLD.Z m128 ymm k ymm
|
|
// VPSLLD.Z xmm xmm k xmm
|
|
// VPSLLD.Z xmm ymm k ymm
|
|
// VPSLLD.Z imm8 m512 k zmm
|
|
// VPSLLD.Z imm8 zmm k zmm
|
|
// VPSLLD.Z m128 zmm k zmm
|
|
// VPSLLD.Z xmm zmm k zmm
|
|
//
|
|
// Construct and append a VPSLLD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSLLD_Z(imx, mxyz, k, xyz operand.Op) { ctx.VPSLLD_Z(imx, mxyz, k, xyz) }
|
|
|
|
// VPSLLQ: Shift Packed Quadword Data Left Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSLLQ imm8 ymm ymm
|
|
// VPSLLQ m128 ymm ymm
|
|
// VPSLLQ xmm ymm ymm
|
|
// VPSLLQ imm8 xmm xmm
|
|
// VPSLLQ m128 xmm xmm
|
|
// VPSLLQ xmm xmm xmm
|
|
// VPSLLQ imm8 m128 k xmm
|
|
// VPSLLQ imm8 m128 xmm
|
|
// VPSLLQ imm8 m256 k ymm
|
|
// VPSLLQ imm8 m256 ymm
|
|
// VPSLLQ imm8 xmm k xmm
|
|
// VPSLLQ imm8 ymm k ymm
|
|
// VPSLLQ m128 xmm k xmm
|
|
// VPSLLQ m128 ymm k ymm
|
|
// VPSLLQ xmm xmm k xmm
|
|
// VPSLLQ xmm ymm k ymm
|
|
// VPSLLQ imm8 m512 k zmm
|
|
// VPSLLQ imm8 m512 zmm
|
|
// VPSLLQ imm8 zmm k zmm
|
|
// VPSLLQ imm8 zmm zmm
|
|
// VPSLLQ m128 zmm k zmm
|
|
// VPSLLQ m128 zmm zmm
|
|
// VPSLLQ xmm zmm k zmm
|
|
// VPSLLQ xmm zmm zmm
|
|
//
|
|
// Construct and append a VPSLLQ instruction to the active function.
|
|
func (c *Context) VPSLLQ(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPSLLQ(ops...))
|
|
}
|
|
|
|
// VPSLLQ: Shift Packed Quadword Data Left Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSLLQ imm8 ymm ymm
|
|
// VPSLLQ m128 ymm ymm
|
|
// VPSLLQ xmm ymm ymm
|
|
// VPSLLQ imm8 xmm xmm
|
|
// VPSLLQ m128 xmm xmm
|
|
// VPSLLQ xmm xmm xmm
|
|
// VPSLLQ imm8 m128 k xmm
|
|
// VPSLLQ imm8 m128 xmm
|
|
// VPSLLQ imm8 m256 k ymm
|
|
// VPSLLQ imm8 m256 ymm
|
|
// VPSLLQ imm8 xmm k xmm
|
|
// VPSLLQ imm8 ymm k ymm
|
|
// VPSLLQ m128 xmm k xmm
|
|
// VPSLLQ m128 ymm k ymm
|
|
// VPSLLQ xmm xmm k xmm
|
|
// VPSLLQ xmm ymm k ymm
|
|
// VPSLLQ imm8 m512 k zmm
|
|
// VPSLLQ imm8 m512 zmm
|
|
// VPSLLQ imm8 zmm k zmm
|
|
// VPSLLQ imm8 zmm zmm
|
|
// VPSLLQ m128 zmm k zmm
|
|
// VPSLLQ m128 zmm zmm
|
|
// VPSLLQ xmm zmm k zmm
|
|
// VPSLLQ xmm zmm zmm
|
|
//
|
|
// Construct and append a VPSLLQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSLLQ(ops ...operand.Op) { ctx.VPSLLQ(ops...) }
|
|
|
|
// VPSLLQ_BCST: Shift Packed Quadword Data Left Logical (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSLLQ.BCST imm8 m64 k xmm
|
|
// VPSLLQ.BCST imm8 m64 k ymm
|
|
// VPSLLQ.BCST imm8 m64 xmm
|
|
// VPSLLQ.BCST imm8 m64 ymm
|
|
// VPSLLQ.BCST imm8 m64 k zmm
|
|
// VPSLLQ.BCST imm8 m64 zmm
|
|
//
|
|
// Construct and append a VPSLLQ.BCST instruction to the active function.
|
|
func (c *Context) VPSLLQ_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPSLLQ_BCST(ops...))
|
|
}
|
|
|
|
// VPSLLQ_BCST: Shift Packed Quadword Data Left Logical (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSLLQ.BCST imm8 m64 k xmm
|
|
// VPSLLQ.BCST imm8 m64 k ymm
|
|
// VPSLLQ.BCST imm8 m64 xmm
|
|
// VPSLLQ.BCST imm8 m64 ymm
|
|
// VPSLLQ.BCST imm8 m64 k zmm
|
|
// VPSLLQ.BCST imm8 m64 zmm
|
|
//
|
|
// Construct and append a VPSLLQ.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSLLQ_BCST(ops ...operand.Op) { ctx.VPSLLQ_BCST(ops...) }
|
|
|
|
// VPSLLQ_BCST_Z: Shift Packed Quadword Data Left Logical (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSLLQ.BCST.Z imm8 m64 k xmm
|
|
// VPSLLQ.BCST.Z imm8 m64 k ymm
|
|
// VPSLLQ.BCST.Z imm8 m64 k zmm
|
|
//
|
|
// Construct and append a VPSLLQ.BCST.Z instruction to the active function.
|
|
func (c *Context) VPSLLQ_BCST_Z(i, m, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VPSLLQ_BCST_Z(i, m, k, xyz))
|
|
}
|
|
|
|
// VPSLLQ_BCST_Z: Shift Packed Quadword Data Left Logical (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSLLQ.BCST.Z imm8 m64 k xmm
|
|
// VPSLLQ.BCST.Z imm8 m64 k ymm
|
|
// VPSLLQ.BCST.Z imm8 m64 k zmm
|
|
//
|
|
// Construct and append a VPSLLQ.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSLLQ_BCST_Z(i, m, k, xyz operand.Op) { ctx.VPSLLQ_BCST_Z(i, m, k, xyz) }
|
|
|
|
// VPSLLQ_Z: Shift Packed Quadword Data Left Logical (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSLLQ.Z imm8 m128 k xmm
|
|
// VPSLLQ.Z imm8 m256 k ymm
|
|
// VPSLLQ.Z imm8 xmm k xmm
|
|
// VPSLLQ.Z imm8 ymm k ymm
|
|
// VPSLLQ.Z m128 xmm k xmm
|
|
// VPSLLQ.Z m128 ymm k ymm
|
|
// VPSLLQ.Z xmm xmm k xmm
|
|
// VPSLLQ.Z xmm ymm k ymm
|
|
// VPSLLQ.Z imm8 m512 k zmm
|
|
// VPSLLQ.Z imm8 zmm k zmm
|
|
// VPSLLQ.Z m128 zmm k zmm
|
|
// VPSLLQ.Z xmm zmm k zmm
|
|
//
|
|
// Construct and append a VPSLLQ.Z instruction to the active function.
|
|
func (c *Context) VPSLLQ_Z(imx, mxyz, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VPSLLQ_Z(imx, mxyz, k, xyz))
|
|
}
|
|
|
|
// VPSLLQ_Z: Shift Packed Quadword Data Left Logical (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSLLQ.Z imm8 m128 k xmm
|
|
// VPSLLQ.Z imm8 m256 k ymm
|
|
// VPSLLQ.Z imm8 xmm k xmm
|
|
// VPSLLQ.Z imm8 ymm k ymm
|
|
// VPSLLQ.Z m128 xmm k xmm
|
|
// VPSLLQ.Z m128 ymm k ymm
|
|
// VPSLLQ.Z xmm xmm k xmm
|
|
// VPSLLQ.Z xmm ymm k ymm
|
|
// VPSLLQ.Z imm8 m512 k zmm
|
|
// VPSLLQ.Z imm8 zmm k zmm
|
|
// VPSLLQ.Z m128 zmm k zmm
|
|
// VPSLLQ.Z xmm zmm k zmm
|
|
//
|
|
// Construct and append a VPSLLQ.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSLLQ_Z(imx, mxyz, k, xyz operand.Op) { ctx.VPSLLQ_Z(imx, mxyz, k, xyz) }
|
|
|
|
// VPSLLVD: Variable Shift Packed Doubleword Data Left Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSLLVD m128 xmm xmm
|
|
// VPSLLVD m256 ymm ymm
|
|
// VPSLLVD xmm xmm xmm
|
|
// VPSLLVD ymm ymm ymm
|
|
// VPSLLVD m128 xmm k xmm
|
|
// VPSLLVD m256 ymm k ymm
|
|
// VPSLLVD xmm xmm k xmm
|
|
// VPSLLVD ymm ymm k ymm
|
|
// VPSLLVD m512 zmm k zmm
|
|
// VPSLLVD m512 zmm zmm
|
|
// VPSLLVD zmm zmm k zmm
|
|
// VPSLLVD zmm zmm zmm
|
|
//
|
|
// Construct and append a VPSLLVD instruction to the active function.
|
|
func (c *Context) VPSLLVD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPSLLVD(ops...))
|
|
}
|
|
|
|
// VPSLLVD: Variable Shift Packed Doubleword Data Left Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSLLVD m128 xmm xmm
|
|
// VPSLLVD m256 ymm ymm
|
|
// VPSLLVD xmm xmm xmm
|
|
// VPSLLVD ymm ymm ymm
|
|
// VPSLLVD m128 xmm k xmm
|
|
// VPSLLVD m256 ymm k ymm
|
|
// VPSLLVD xmm xmm k xmm
|
|
// VPSLLVD ymm ymm k ymm
|
|
// VPSLLVD m512 zmm k zmm
|
|
// VPSLLVD m512 zmm zmm
|
|
// VPSLLVD zmm zmm k zmm
|
|
// VPSLLVD zmm zmm zmm
|
|
//
|
|
// Construct and append a VPSLLVD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSLLVD(ops ...operand.Op) { ctx.VPSLLVD(ops...) }
|
|
|
|
// VPSLLVD_BCST: Variable Shift Packed Doubleword Data Left Logical (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSLLVD.BCST m32 xmm k xmm
|
|
// VPSLLVD.BCST m32 xmm xmm
|
|
// VPSLLVD.BCST m32 ymm k ymm
|
|
// VPSLLVD.BCST m32 ymm ymm
|
|
// VPSLLVD.BCST m32 zmm k zmm
|
|
// VPSLLVD.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VPSLLVD.BCST instruction to the active function.
|
|
func (c *Context) VPSLLVD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPSLLVD_BCST(ops...))
|
|
}
|
|
|
|
// VPSLLVD_BCST: Variable Shift Packed Doubleword Data Left Logical (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSLLVD.BCST m32 xmm k xmm
|
|
// VPSLLVD.BCST m32 xmm xmm
|
|
// VPSLLVD.BCST m32 ymm k ymm
|
|
// VPSLLVD.BCST m32 ymm ymm
|
|
// VPSLLVD.BCST m32 zmm k zmm
|
|
// VPSLLVD.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VPSLLVD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSLLVD_BCST(ops ...operand.Op) { ctx.VPSLLVD_BCST(ops...) }
|
|
|
|
// VPSLLVD_BCST_Z: Variable Shift Packed Doubleword Data Left Logical (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSLLVD.BCST.Z m32 xmm k xmm
|
|
// VPSLLVD.BCST.Z m32 ymm k ymm
|
|
// VPSLLVD.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VPSLLVD.BCST.Z instruction to the active function.
|
|
func (c *Context) VPSLLVD_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPSLLVD_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPSLLVD_BCST_Z: Variable Shift Packed Doubleword Data Left Logical (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSLLVD.BCST.Z m32 xmm k xmm
|
|
// VPSLLVD.BCST.Z m32 ymm k ymm
|
|
// VPSLLVD.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VPSLLVD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSLLVD_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VPSLLVD_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VPSLLVD_Z: Variable Shift Packed Doubleword Data Left Logical (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSLLVD.Z m128 xmm k xmm
|
|
// VPSLLVD.Z m256 ymm k ymm
|
|
// VPSLLVD.Z xmm xmm k xmm
|
|
// VPSLLVD.Z ymm ymm k ymm
|
|
// VPSLLVD.Z m512 zmm k zmm
|
|
// VPSLLVD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPSLLVD.Z instruction to the active function.
|
|
func (c *Context) VPSLLVD_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPSLLVD_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPSLLVD_Z: Variable Shift Packed Doubleword Data Left Logical (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSLLVD.Z m128 xmm k xmm
|
|
// VPSLLVD.Z m256 ymm k ymm
|
|
// VPSLLVD.Z xmm xmm k xmm
|
|
// VPSLLVD.Z ymm ymm k ymm
|
|
// VPSLLVD.Z m512 zmm k zmm
|
|
// VPSLLVD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPSLLVD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSLLVD_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPSLLVD_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPSLLVQ: Variable Shift Packed Quadword Data Left Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSLLVQ m128 xmm xmm
|
|
// VPSLLVQ m256 ymm ymm
|
|
// VPSLLVQ xmm xmm xmm
|
|
// VPSLLVQ ymm ymm ymm
|
|
// VPSLLVQ m128 xmm k xmm
|
|
// VPSLLVQ m256 ymm k ymm
|
|
// VPSLLVQ xmm xmm k xmm
|
|
// VPSLLVQ ymm ymm k ymm
|
|
// VPSLLVQ m512 zmm k zmm
|
|
// VPSLLVQ m512 zmm zmm
|
|
// VPSLLVQ zmm zmm k zmm
|
|
// VPSLLVQ zmm zmm zmm
|
|
//
|
|
// Construct and append a VPSLLVQ instruction to the active function.
|
|
func (c *Context) VPSLLVQ(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPSLLVQ(ops...))
|
|
}
|
|
|
|
// VPSLLVQ: Variable Shift Packed Quadword Data Left Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSLLVQ m128 xmm xmm
|
|
// VPSLLVQ m256 ymm ymm
|
|
// VPSLLVQ xmm xmm xmm
|
|
// VPSLLVQ ymm ymm ymm
|
|
// VPSLLVQ m128 xmm k xmm
|
|
// VPSLLVQ m256 ymm k ymm
|
|
// VPSLLVQ xmm xmm k xmm
|
|
// VPSLLVQ ymm ymm k ymm
|
|
// VPSLLVQ m512 zmm k zmm
|
|
// VPSLLVQ m512 zmm zmm
|
|
// VPSLLVQ zmm zmm k zmm
|
|
// VPSLLVQ zmm zmm zmm
|
|
//
|
|
// Construct and append a VPSLLVQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSLLVQ(ops ...operand.Op) { ctx.VPSLLVQ(ops...) }
|
|
|
|
// VPSLLVQ_BCST: Variable Shift Packed Quadword Data Left Logical (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSLLVQ.BCST m64 xmm k xmm
|
|
// VPSLLVQ.BCST m64 xmm xmm
|
|
// VPSLLVQ.BCST m64 ymm k ymm
|
|
// VPSLLVQ.BCST m64 ymm ymm
|
|
// VPSLLVQ.BCST m64 zmm k zmm
|
|
// VPSLLVQ.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VPSLLVQ.BCST instruction to the active function.
|
|
func (c *Context) VPSLLVQ_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPSLLVQ_BCST(ops...))
|
|
}
|
|
|
|
// VPSLLVQ_BCST: Variable Shift Packed Quadword Data Left Logical (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSLLVQ.BCST m64 xmm k xmm
|
|
// VPSLLVQ.BCST m64 xmm xmm
|
|
// VPSLLVQ.BCST m64 ymm k ymm
|
|
// VPSLLVQ.BCST m64 ymm ymm
|
|
// VPSLLVQ.BCST m64 zmm k zmm
|
|
// VPSLLVQ.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VPSLLVQ.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSLLVQ_BCST(ops ...operand.Op) { ctx.VPSLLVQ_BCST(ops...) }
|
|
|
|
// VPSLLVQ_BCST_Z: Variable Shift Packed Quadword Data Left Logical (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSLLVQ.BCST.Z m64 xmm k xmm
|
|
// VPSLLVQ.BCST.Z m64 ymm k ymm
|
|
// VPSLLVQ.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VPSLLVQ.BCST.Z instruction to the active function.
|
|
func (c *Context) VPSLLVQ_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPSLLVQ_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPSLLVQ_BCST_Z: Variable Shift Packed Quadword Data Left Logical (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSLLVQ.BCST.Z m64 xmm k xmm
|
|
// VPSLLVQ.BCST.Z m64 ymm k ymm
|
|
// VPSLLVQ.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VPSLLVQ.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSLLVQ_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VPSLLVQ_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VPSLLVQ_Z: Variable Shift Packed Quadword Data Left Logical (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSLLVQ.Z m128 xmm k xmm
|
|
// VPSLLVQ.Z m256 ymm k ymm
|
|
// VPSLLVQ.Z xmm xmm k xmm
|
|
// VPSLLVQ.Z ymm ymm k ymm
|
|
// VPSLLVQ.Z m512 zmm k zmm
|
|
// VPSLLVQ.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPSLLVQ.Z instruction to the active function.
|
|
func (c *Context) VPSLLVQ_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPSLLVQ_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPSLLVQ_Z: Variable Shift Packed Quadword Data Left Logical (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSLLVQ.Z m128 xmm k xmm
|
|
// VPSLLVQ.Z m256 ymm k ymm
|
|
// VPSLLVQ.Z xmm xmm k xmm
|
|
// VPSLLVQ.Z ymm ymm k ymm
|
|
// VPSLLVQ.Z m512 zmm k zmm
|
|
// VPSLLVQ.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPSLLVQ.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSLLVQ_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPSLLVQ_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPSLLVW: Variable Shift Packed Word Data Left Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSLLVW m128 xmm k xmm
|
|
// VPSLLVW m128 xmm xmm
|
|
// VPSLLVW m256 ymm k ymm
|
|
// VPSLLVW m256 ymm ymm
|
|
// VPSLLVW xmm xmm k xmm
|
|
// VPSLLVW xmm xmm xmm
|
|
// VPSLLVW ymm ymm k ymm
|
|
// VPSLLVW ymm ymm ymm
|
|
// VPSLLVW m512 zmm k zmm
|
|
// VPSLLVW m512 zmm zmm
|
|
// VPSLLVW zmm zmm k zmm
|
|
// VPSLLVW zmm zmm zmm
|
|
//
|
|
// Construct and append a VPSLLVW instruction to the active function.
|
|
func (c *Context) VPSLLVW(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPSLLVW(ops...))
|
|
}
|
|
|
|
// VPSLLVW: Variable Shift Packed Word Data Left Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSLLVW m128 xmm k xmm
|
|
// VPSLLVW m128 xmm xmm
|
|
// VPSLLVW m256 ymm k ymm
|
|
// VPSLLVW m256 ymm ymm
|
|
// VPSLLVW xmm xmm k xmm
|
|
// VPSLLVW xmm xmm xmm
|
|
// VPSLLVW ymm ymm k ymm
|
|
// VPSLLVW ymm ymm ymm
|
|
// VPSLLVW m512 zmm k zmm
|
|
// VPSLLVW m512 zmm zmm
|
|
// VPSLLVW zmm zmm k zmm
|
|
// VPSLLVW zmm zmm zmm
|
|
//
|
|
// Construct and append a VPSLLVW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSLLVW(ops ...operand.Op) { ctx.VPSLLVW(ops...) }
|
|
|
|
// VPSLLVW_Z: Variable Shift Packed Word Data Left Logical (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSLLVW.Z m128 xmm k xmm
|
|
// VPSLLVW.Z m256 ymm k ymm
|
|
// VPSLLVW.Z xmm xmm k xmm
|
|
// VPSLLVW.Z ymm ymm k ymm
|
|
// VPSLLVW.Z m512 zmm k zmm
|
|
// VPSLLVW.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPSLLVW.Z instruction to the active function.
|
|
func (c *Context) VPSLLVW_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPSLLVW_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPSLLVW_Z: Variable Shift Packed Word Data Left Logical (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSLLVW.Z m128 xmm k xmm
|
|
// VPSLLVW.Z m256 ymm k ymm
|
|
// VPSLLVW.Z xmm xmm k xmm
|
|
// VPSLLVW.Z ymm ymm k ymm
|
|
// VPSLLVW.Z m512 zmm k zmm
|
|
// VPSLLVW.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPSLLVW.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSLLVW_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPSLLVW_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPSLLW: Shift Packed Word Data Left Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSLLW imm8 ymm ymm
|
|
// VPSLLW m128 ymm ymm
|
|
// VPSLLW xmm ymm ymm
|
|
// VPSLLW imm8 xmm xmm
|
|
// VPSLLW m128 xmm xmm
|
|
// VPSLLW xmm xmm xmm
|
|
// VPSLLW imm8 m128 k xmm
|
|
// VPSLLW imm8 m128 xmm
|
|
// VPSLLW imm8 m256 k ymm
|
|
// VPSLLW imm8 m256 ymm
|
|
// VPSLLW imm8 xmm k xmm
|
|
// VPSLLW imm8 ymm k ymm
|
|
// VPSLLW m128 xmm k xmm
|
|
// VPSLLW m128 ymm k ymm
|
|
// VPSLLW xmm xmm k xmm
|
|
// VPSLLW xmm ymm k ymm
|
|
// VPSLLW imm8 m512 k zmm
|
|
// VPSLLW imm8 m512 zmm
|
|
// VPSLLW imm8 zmm k zmm
|
|
// VPSLLW imm8 zmm zmm
|
|
// VPSLLW m128 zmm k zmm
|
|
// VPSLLW m128 zmm zmm
|
|
// VPSLLW xmm zmm k zmm
|
|
// VPSLLW xmm zmm zmm
|
|
//
|
|
// Construct and append a VPSLLW instruction to the active function.
|
|
func (c *Context) VPSLLW(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPSLLW(ops...))
|
|
}
|
|
|
|
// VPSLLW: Shift Packed Word Data Left Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSLLW imm8 ymm ymm
|
|
// VPSLLW m128 ymm ymm
|
|
// VPSLLW xmm ymm ymm
|
|
// VPSLLW imm8 xmm xmm
|
|
// VPSLLW m128 xmm xmm
|
|
// VPSLLW xmm xmm xmm
|
|
// VPSLLW imm8 m128 k xmm
|
|
// VPSLLW imm8 m128 xmm
|
|
// VPSLLW imm8 m256 k ymm
|
|
// VPSLLW imm8 m256 ymm
|
|
// VPSLLW imm8 xmm k xmm
|
|
// VPSLLW imm8 ymm k ymm
|
|
// VPSLLW m128 xmm k xmm
|
|
// VPSLLW m128 ymm k ymm
|
|
// VPSLLW xmm xmm k xmm
|
|
// VPSLLW xmm ymm k ymm
|
|
// VPSLLW imm8 m512 k zmm
|
|
// VPSLLW imm8 m512 zmm
|
|
// VPSLLW imm8 zmm k zmm
|
|
// VPSLLW imm8 zmm zmm
|
|
// VPSLLW m128 zmm k zmm
|
|
// VPSLLW m128 zmm zmm
|
|
// VPSLLW xmm zmm k zmm
|
|
// VPSLLW xmm zmm zmm
|
|
//
|
|
// Construct and append a VPSLLW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSLLW(ops ...operand.Op) { ctx.VPSLLW(ops...) }
|
|
|
|
// VPSLLW_Z: Shift Packed Word Data Left Logical (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSLLW.Z imm8 m128 k xmm
|
|
// VPSLLW.Z imm8 m256 k ymm
|
|
// VPSLLW.Z imm8 xmm k xmm
|
|
// VPSLLW.Z imm8 ymm k ymm
|
|
// VPSLLW.Z m128 xmm k xmm
|
|
// VPSLLW.Z m128 ymm k ymm
|
|
// VPSLLW.Z xmm xmm k xmm
|
|
// VPSLLW.Z xmm ymm k ymm
|
|
// VPSLLW.Z imm8 m512 k zmm
|
|
// VPSLLW.Z imm8 zmm k zmm
|
|
// VPSLLW.Z m128 zmm k zmm
|
|
// VPSLLW.Z xmm zmm k zmm
|
|
//
|
|
// Construct and append a VPSLLW.Z instruction to the active function.
|
|
func (c *Context) VPSLLW_Z(imx, mxyz, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VPSLLW_Z(imx, mxyz, k, xyz))
|
|
}
|
|
|
|
// VPSLLW_Z: Shift Packed Word Data Left Logical (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSLLW.Z imm8 m128 k xmm
|
|
// VPSLLW.Z imm8 m256 k ymm
|
|
// VPSLLW.Z imm8 xmm k xmm
|
|
// VPSLLW.Z imm8 ymm k ymm
|
|
// VPSLLW.Z m128 xmm k xmm
|
|
// VPSLLW.Z m128 ymm k ymm
|
|
// VPSLLW.Z xmm xmm k xmm
|
|
// VPSLLW.Z xmm ymm k ymm
|
|
// VPSLLW.Z imm8 m512 k zmm
|
|
// VPSLLW.Z imm8 zmm k zmm
|
|
// VPSLLW.Z m128 zmm k zmm
|
|
// VPSLLW.Z xmm zmm k zmm
|
|
//
|
|
// Construct and append a VPSLLW.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSLLW_Z(imx, mxyz, k, xyz operand.Op) { ctx.VPSLLW_Z(imx, mxyz, k, xyz) }
|
|
|
|
// VPSRAD: Shift Packed Doubleword Data Right Arithmetic.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRAD imm8 ymm ymm
|
|
// VPSRAD m128 ymm ymm
|
|
// VPSRAD xmm ymm ymm
|
|
// VPSRAD imm8 xmm xmm
|
|
// VPSRAD m128 xmm xmm
|
|
// VPSRAD xmm xmm xmm
|
|
// VPSRAD imm8 m128 k xmm
|
|
// VPSRAD imm8 m128 xmm
|
|
// VPSRAD imm8 m256 k ymm
|
|
// VPSRAD imm8 m256 ymm
|
|
// VPSRAD imm8 xmm k xmm
|
|
// VPSRAD imm8 ymm k ymm
|
|
// VPSRAD m128 xmm k xmm
|
|
// VPSRAD m128 ymm k ymm
|
|
// VPSRAD xmm xmm k xmm
|
|
// VPSRAD xmm ymm k ymm
|
|
// VPSRAD imm8 m512 k zmm
|
|
// VPSRAD imm8 m512 zmm
|
|
// VPSRAD imm8 zmm k zmm
|
|
// VPSRAD imm8 zmm zmm
|
|
// VPSRAD m128 zmm k zmm
|
|
// VPSRAD m128 zmm zmm
|
|
// VPSRAD xmm zmm k zmm
|
|
// VPSRAD xmm zmm zmm
|
|
//
|
|
// Construct and append a VPSRAD instruction to the active function.
|
|
func (c *Context) VPSRAD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPSRAD(ops...))
|
|
}
|
|
|
|
// VPSRAD: Shift Packed Doubleword Data Right Arithmetic.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRAD imm8 ymm ymm
|
|
// VPSRAD m128 ymm ymm
|
|
// VPSRAD xmm ymm ymm
|
|
// VPSRAD imm8 xmm xmm
|
|
// VPSRAD m128 xmm xmm
|
|
// VPSRAD xmm xmm xmm
|
|
// VPSRAD imm8 m128 k xmm
|
|
// VPSRAD imm8 m128 xmm
|
|
// VPSRAD imm8 m256 k ymm
|
|
// VPSRAD imm8 m256 ymm
|
|
// VPSRAD imm8 xmm k xmm
|
|
// VPSRAD imm8 ymm k ymm
|
|
// VPSRAD m128 xmm k xmm
|
|
// VPSRAD m128 ymm k ymm
|
|
// VPSRAD xmm xmm k xmm
|
|
// VPSRAD xmm ymm k ymm
|
|
// VPSRAD imm8 m512 k zmm
|
|
// VPSRAD imm8 m512 zmm
|
|
// VPSRAD imm8 zmm k zmm
|
|
// VPSRAD imm8 zmm zmm
|
|
// VPSRAD m128 zmm k zmm
|
|
// VPSRAD m128 zmm zmm
|
|
// VPSRAD xmm zmm k zmm
|
|
// VPSRAD xmm zmm zmm
|
|
//
|
|
// Construct and append a VPSRAD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSRAD(ops ...operand.Op) { ctx.VPSRAD(ops...) }
|
|
|
|
// VPSRAD_BCST: Shift Packed Doubleword Data Right Arithmetic (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRAD.BCST imm8 m32 k xmm
|
|
// VPSRAD.BCST imm8 m32 k ymm
|
|
// VPSRAD.BCST imm8 m32 xmm
|
|
// VPSRAD.BCST imm8 m32 ymm
|
|
// VPSRAD.BCST imm8 m32 k zmm
|
|
// VPSRAD.BCST imm8 m32 zmm
|
|
//
|
|
// Construct and append a VPSRAD.BCST instruction to the active function.
|
|
func (c *Context) VPSRAD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPSRAD_BCST(ops...))
|
|
}
|
|
|
|
// VPSRAD_BCST: Shift Packed Doubleword Data Right Arithmetic (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRAD.BCST imm8 m32 k xmm
|
|
// VPSRAD.BCST imm8 m32 k ymm
|
|
// VPSRAD.BCST imm8 m32 xmm
|
|
// VPSRAD.BCST imm8 m32 ymm
|
|
// VPSRAD.BCST imm8 m32 k zmm
|
|
// VPSRAD.BCST imm8 m32 zmm
|
|
//
|
|
// Construct and append a VPSRAD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSRAD_BCST(ops ...operand.Op) { ctx.VPSRAD_BCST(ops...) }
|
|
|
|
// VPSRAD_BCST_Z: Shift Packed Doubleword Data Right Arithmetic (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRAD.BCST.Z imm8 m32 k xmm
|
|
// VPSRAD.BCST.Z imm8 m32 k ymm
|
|
// VPSRAD.BCST.Z imm8 m32 k zmm
|
|
//
|
|
// Construct and append a VPSRAD.BCST.Z instruction to the active function.
|
|
func (c *Context) VPSRAD_BCST_Z(i, m, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VPSRAD_BCST_Z(i, m, k, xyz))
|
|
}
|
|
|
|
// VPSRAD_BCST_Z: Shift Packed Doubleword Data Right Arithmetic (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRAD.BCST.Z imm8 m32 k xmm
|
|
// VPSRAD.BCST.Z imm8 m32 k ymm
|
|
// VPSRAD.BCST.Z imm8 m32 k zmm
|
|
//
|
|
// Construct and append a VPSRAD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSRAD_BCST_Z(i, m, k, xyz operand.Op) { ctx.VPSRAD_BCST_Z(i, m, k, xyz) }
|
|
|
|
// VPSRAD_Z: Shift Packed Doubleword Data Right Arithmetic (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRAD.Z imm8 m128 k xmm
|
|
// VPSRAD.Z imm8 m256 k ymm
|
|
// VPSRAD.Z imm8 xmm k xmm
|
|
// VPSRAD.Z imm8 ymm k ymm
|
|
// VPSRAD.Z m128 xmm k xmm
|
|
// VPSRAD.Z m128 ymm k ymm
|
|
// VPSRAD.Z xmm xmm k xmm
|
|
// VPSRAD.Z xmm ymm k ymm
|
|
// VPSRAD.Z imm8 m512 k zmm
|
|
// VPSRAD.Z imm8 zmm k zmm
|
|
// VPSRAD.Z m128 zmm k zmm
|
|
// VPSRAD.Z xmm zmm k zmm
|
|
//
|
|
// Construct and append a VPSRAD.Z instruction to the active function.
|
|
func (c *Context) VPSRAD_Z(imx, mxyz, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VPSRAD_Z(imx, mxyz, k, xyz))
|
|
}
|
|
|
|
// VPSRAD_Z: Shift Packed Doubleword Data Right Arithmetic (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRAD.Z imm8 m128 k xmm
|
|
// VPSRAD.Z imm8 m256 k ymm
|
|
// VPSRAD.Z imm8 xmm k xmm
|
|
// VPSRAD.Z imm8 ymm k ymm
|
|
// VPSRAD.Z m128 xmm k xmm
|
|
// VPSRAD.Z m128 ymm k ymm
|
|
// VPSRAD.Z xmm xmm k xmm
|
|
// VPSRAD.Z xmm ymm k ymm
|
|
// VPSRAD.Z imm8 m512 k zmm
|
|
// VPSRAD.Z imm8 zmm k zmm
|
|
// VPSRAD.Z m128 zmm k zmm
|
|
// VPSRAD.Z xmm zmm k zmm
|
|
//
|
|
// Construct and append a VPSRAD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSRAD_Z(imx, mxyz, k, xyz operand.Op) { ctx.VPSRAD_Z(imx, mxyz, k, xyz) }
|
|
|
|
// VPSRAQ: Shift Packed Quadword Data Right Arithmetic.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRAQ imm8 m128 k xmm
|
|
// VPSRAQ imm8 m128 xmm
|
|
// VPSRAQ imm8 m256 k ymm
|
|
// VPSRAQ imm8 m256 ymm
|
|
// VPSRAQ imm8 xmm k xmm
|
|
// VPSRAQ imm8 xmm xmm
|
|
// VPSRAQ imm8 ymm k ymm
|
|
// VPSRAQ imm8 ymm ymm
|
|
// VPSRAQ m128 xmm k xmm
|
|
// VPSRAQ m128 xmm xmm
|
|
// VPSRAQ m128 ymm k ymm
|
|
// VPSRAQ m128 ymm ymm
|
|
// VPSRAQ xmm xmm k xmm
|
|
// VPSRAQ xmm xmm xmm
|
|
// VPSRAQ xmm ymm k ymm
|
|
// VPSRAQ xmm ymm ymm
|
|
// VPSRAQ imm8 m512 k zmm
|
|
// VPSRAQ imm8 m512 zmm
|
|
// VPSRAQ imm8 zmm k zmm
|
|
// VPSRAQ imm8 zmm zmm
|
|
// VPSRAQ m128 zmm k zmm
|
|
// VPSRAQ m128 zmm zmm
|
|
// VPSRAQ xmm zmm k zmm
|
|
// VPSRAQ xmm zmm zmm
|
|
//
|
|
// Construct and append a VPSRAQ instruction to the active function.
|
|
func (c *Context) VPSRAQ(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPSRAQ(ops...))
|
|
}
|
|
|
|
// VPSRAQ: Shift Packed Quadword Data Right Arithmetic.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRAQ imm8 m128 k xmm
|
|
// VPSRAQ imm8 m128 xmm
|
|
// VPSRAQ imm8 m256 k ymm
|
|
// VPSRAQ imm8 m256 ymm
|
|
// VPSRAQ imm8 xmm k xmm
|
|
// VPSRAQ imm8 xmm xmm
|
|
// VPSRAQ imm8 ymm k ymm
|
|
// VPSRAQ imm8 ymm ymm
|
|
// VPSRAQ m128 xmm k xmm
|
|
// VPSRAQ m128 xmm xmm
|
|
// VPSRAQ m128 ymm k ymm
|
|
// VPSRAQ m128 ymm ymm
|
|
// VPSRAQ xmm xmm k xmm
|
|
// VPSRAQ xmm xmm xmm
|
|
// VPSRAQ xmm ymm k ymm
|
|
// VPSRAQ xmm ymm ymm
|
|
// VPSRAQ imm8 m512 k zmm
|
|
// VPSRAQ imm8 m512 zmm
|
|
// VPSRAQ imm8 zmm k zmm
|
|
// VPSRAQ imm8 zmm zmm
|
|
// VPSRAQ m128 zmm k zmm
|
|
// VPSRAQ m128 zmm zmm
|
|
// VPSRAQ xmm zmm k zmm
|
|
// VPSRAQ xmm zmm zmm
|
|
//
|
|
// Construct and append a VPSRAQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSRAQ(ops ...operand.Op) { ctx.VPSRAQ(ops...) }
|
|
|
|
// VPSRAQ_BCST: Shift Packed Quadword Data Right Arithmetic (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRAQ.BCST imm8 m64 k xmm
|
|
// VPSRAQ.BCST imm8 m64 k ymm
|
|
// VPSRAQ.BCST imm8 m64 xmm
|
|
// VPSRAQ.BCST imm8 m64 ymm
|
|
// VPSRAQ.BCST imm8 m64 k zmm
|
|
// VPSRAQ.BCST imm8 m64 zmm
|
|
//
|
|
// Construct and append a VPSRAQ.BCST instruction to the active function.
|
|
func (c *Context) VPSRAQ_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPSRAQ_BCST(ops...))
|
|
}
|
|
|
|
// VPSRAQ_BCST: Shift Packed Quadword Data Right Arithmetic (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRAQ.BCST imm8 m64 k xmm
|
|
// VPSRAQ.BCST imm8 m64 k ymm
|
|
// VPSRAQ.BCST imm8 m64 xmm
|
|
// VPSRAQ.BCST imm8 m64 ymm
|
|
// VPSRAQ.BCST imm8 m64 k zmm
|
|
// VPSRAQ.BCST imm8 m64 zmm
|
|
//
|
|
// Construct and append a VPSRAQ.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSRAQ_BCST(ops ...operand.Op) { ctx.VPSRAQ_BCST(ops...) }
|
|
|
|
// VPSRAQ_BCST_Z: Shift Packed Quadword Data Right Arithmetic (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRAQ.BCST.Z imm8 m64 k xmm
|
|
// VPSRAQ.BCST.Z imm8 m64 k ymm
|
|
// VPSRAQ.BCST.Z imm8 m64 k zmm
|
|
//
|
|
// Construct and append a VPSRAQ.BCST.Z instruction to the active function.
|
|
func (c *Context) VPSRAQ_BCST_Z(i, m, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VPSRAQ_BCST_Z(i, m, k, xyz))
|
|
}
|
|
|
|
// VPSRAQ_BCST_Z: Shift Packed Quadword Data Right Arithmetic (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRAQ.BCST.Z imm8 m64 k xmm
|
|
// VPSRAQ.BCST.Z imm8 m64 k ymm
|
|
// VPSRAQ.BCST.Z imm8 m64 k zmm
|
|
//
|
|
// Construct and append a VPSRAQ.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSRAQ_BCST_Z(i, m, k, xyz operand.Op) { ctx.VPSRAQ_BCST_Z(i, m, k, xyz) }
|
|
|
|
// VPSRAQ_Z: Shift Packed Quadword Data Right Arithmetic (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRAQ.Z imm8 m128 k xmm
|
|
// VPSRAQ.Z imm8 m256 k ymm
|
|
// VPSRAQ.Z imm8 xmm k xmm
|
|
// VPSRAQ.Z imm8 ymm k ymm
|
|
// VPSRAQ.Z m128 xmm k xmm
|
|
// VPSRAQ.Z m128 ymm k ymm
|
|
// VPSRAQ.Z xmm xmm k xmm
|
|
// VPSRAQ.Z xmm ymm k ymm
|
|
// VPSRAQ.Z imm8 m512 k zmm
|
|
// VPSRAQ.Z imm8 zmm k zmm
|
|
// VPSRAQ.Z m128 zmm k zmm
|
|
// VPSRAQ.Z xmm zmm k zmm
|
|
//
|
|
// Construct and append a VPSRAQ.Z instruction to the active function.
|
|
func (c *Context) VPSRAQ_Z(imx, mxyz, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VPSRAQ_Z(imx, mxyz, k, xyz))
|
|
}
|
|
|
|
// VPSRAQ_Z: Shift Packed Quadword Data Right Arithmetic (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRAQ.Z imm8 m128 k xmm
|
|
// VPSRAQ.Z imm8 m256 k ymm
|
|
// VPSRAQ.Z imm8 xmm k xmm
|
|
// VPSRAQ.Z imm8 ymm k ymm
|
|
// VPSRAQ.Z m128 xmm k xmm
|
|
// VPSRAQ.Z m128 ymm k ymm
|
|
// VPSRAQ.Z xmm xmm k xmm
|
|
// VPSRAQ.Z xmm ymm k ymm
|
|
// VPSRAQ.Z imm8 m512 k zmm
|
|
// VPSRAQ.Z imm8 zmm k zmm
|
|
// VPSRAQ.Z m128 zmm k zmm
|
|
// VPSRAQ.Z xmm zmm k zmm
|
|
//
|
|
// Construct and append a VPSRAQ.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSRAQ_Z(imx, mxyz, k, xyz operand.Op) { ctx.VPSRAQ_Z(imx, mxyz, k, xyz) }
|
|
|
|
// VPSRAVD: Variable Shift Packed Doubleword Data Right Arithmetic.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRAVD m128 xmm xmm
|
|
// VPSRAVD m256 ymm ymm
|
|
// VPSRAVD xmm xmm xmm
|
|
// VPSRAVD ymm ymm ymm
|
|
// VPSRAVD m128 xmm k xmm
|
|
// VPSRAVD m256 ymm k ymm
|
|
// VPSRAVD xmm xmm k xmm
|
|
// VPSRAVD ymm ymm k ymm
|
|
// VPSRAVD m512 zmm k zmm
|
|
// VPSRAVD m512 zmm zmm
|
|
// VPSRAVD zmm zmm k zmm
|
|
// VPSRAVD zmm zmm zmm
|
|
//
|
|
// Construct and append a VPSRAVD instruction to the active function.
|
|
func (c *Context) VPSRAVD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPSRAVD(ops...))
|
|
}
|
|
|
|
// VPSRAVD: Variable Shift Packed Doubleword Data Right Arithmetic.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRAVD m128 xmm xmm
|
|
// VPSRAVD m256 ymm ymm
|
|
// VPSRAVD xmm xmm xmm
|
|
// VPSRAVD ymm ymm ymm
|
|
// VPSRAVD m128 xmm k xmm
|
|
// VPSRAVD m256 ymm k ymm
|
|
// VPSRAVD xmm xmm k xmm
|
|
// VPSRAVD ymm ymm k ymm
|
|
// VPSRAVD m512 zmm k zmm
|
|
// VPSRAVD m512 zmm zmm
|
|
// VPSRAVD zmm zmm k zmm
|
|
// VPSRAVD zmm zmm zmm
|
|
//
|
|
// Construct and append a VPSRAVD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSRAVD(ops ...operand.Op) { ctx.VPSRAVD(ops...) }
|
|
|
|
// VPSRAVD_BCST: Variable Shift Packed Doubleword Data Right Arithmetic (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRAVD.BCST m32 xmm k xmm
|
|
// VPSRAVD.BCST m32 xmm xmm
|
|
// VPSRAVD.BCST m32 ymm k ymm
|
|
// VPSRAVD.BCST m32 ymm ymm
|
|
// VPSRAVD.BCST m32 zmm k zmm
|
|
// VPSRAVD.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VPSRAVD.BCST instruction to the active function.
|
|
func (c *Context) VPSRAVD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPSRAVD_BCST(ops...))
|
|
}
|
|
|
|
// VPSRAVD_BCST: Variable Shift Packed Doubleword Data Right Arithmetic (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRAVD.BCST m32 xmm k xmm
|
|
// VPSRAVD.BCST m32 xmm xmm
|
|
// VPSRAVD.BCST m32 ymm k ymm
|
|
// VPSRAVD.BCST m32 ymm ymm
|
|
// VPSRAVD.BCST m32 zmm k zmm
|
|
// VPSRAVD.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VPSRAVD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSRAVD_BCST(ops ...operand.Op) { ctx.VPSRAVD_BCST(ops...) }
|
|
|
|
// VPSRAVD_BCST_Z: Variable Shift Packed Doubleword Data Right Arithmetic (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRAVD.BCST.Z m32 xmm k xmm
|
|
// VPSRAVD.BCST.Z m32 ymm k ymm
|
|
// VPSRAVD.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VPSRAVD.BCST.Z instruction to the active function.
|
|
func (c *Context) VPSRAVD_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPSRAVD_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPSRAVD_BCST_Z: Variable Shift Packed Doubleword Data Right Arithmetic (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRAVD.BCST.Z m32 xmm k xmm
|
|
// VPSRAVD.BCST.Z m32 ymm k ymm
|
|
// VPSRAVD.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VPSRAVD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSRAVD_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VPSRAVD_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VPSRAVD_Z: Variable Shift Packed Doubleword Data Right Arithmetic (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRAVD.Z m128 xmm k xmm
|
|
// VPSRAVD.Z m256 ymm k ymm
|
|
// VPSRAVD.Z xmm xmm k xmm
|
|
// VPSRAVD.Z ymm ymm k ymm
|
|
// VPSRAVD.Z m512 zmm k zmm
|
|
// VPSRAVD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPSRAVD.Z instruction to the active function.
|
|
func (c *Context) VPSRAVD_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPSRAVD_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPSRAVD_Z: Variable Shift Packed Doubleword Data Right Arithmetic (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRAVD.Z m128 xmm k xmm
|
|
// VPSRAVD.Z m256 ymm k ymm
|
|
// VPSRAVD.Z xmm xmm k xmm
|
|
// VPSRAVD.Z ymm ymm k ymm
|
|
// VPSRAVD.Z m512 zmm k zmm
|
|
// VPSRAVD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPSRAVD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSRAVD_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPSRAVD_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPSRAVQ: Variable Shift Packed Quadword Data Right Arithmetic.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRAVQ m128 xmm k xmm
|
|
// VPSRAVQ m128 xmm xmm
|
|
// VPSRAVQ m256 ymm k ymm
|
|
// VPSRAVQ m256 ymm ymm
|
|
// VPSRAVQ xmm xmm k xmm
|
|
// VPSRAVQ xmm xmm xmm
|
|
// VPSRAVQ ymm ymm k ymm
|
|
// VPSRAVQ ymm ymm ymm
|
|
// VPSRAVQ m512 zmm k zmm
|
|
// VPSRAVQ m512 zmm zmm
|
|
// VPSRAVQ zmm zmm k zmm
|
|
// VPSRAVQ zmm zmm zmm
|
|
//
|
|
// Construct and append a VPSRAVQ instruction to the active function.
|
|
func (c *Context) VPSRAVQ(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPSRAVQ(ops...))
|
|
}
|
|
|
|
// VPSRAVQ: Variable Shift Packed Quadword Data Right Arithmetic.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRAVQ m128 xmm k xmm
|
|
// VPSRAVQ m128 xmm xmm
|
|
// VPSRAVQ m256 ymm k ymm
|
|
// VPSRAVQ m256 ymm ymm
|
|
// VPSRAVQ xmm xmm k xmm
|
|
// VPSRAVQ xmm xmm xmm
|
|
// VPSRAVQ ymm ymm k ymm
|
|
// VPSRAVQ ymm ymm ymm
|
|
// VPSRAVQ m512 zmm k zmm
|
|
// VPSRAVQ m512 zmm zmm
|
|
// VPSRAVQ zmm zmm k zmm
|
|
// VPSRAVQ zmm zmm zmm
|
|
//
|
|
// Construct and append a VPSRAVQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSRAVQ(ops ...operand.Op) { ctx.VPSRAVQ(ops...) }
|
|
|
|
// VPSRAVQ_BCST: Variable Shift Packed Quadword Data Right Arithmetic (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRAVQ.BCST m64 xmm k xmm
|
|
// VPSRAVQ.BCST m64 xmm xmm
|
|
// VPSRAVQ.BCST m64 ymm k ymm
|
|
// VPSRAVQ.BCST m64 ymm ymm
|
|
// VPSRAVQ.BCST m64 zmm k zmm
|
|
// VPSRAVQ.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VPSRAVQ.BCST instruction to the active function.
|
|
func (c *Context) VPSRAVQ_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPSRAVQ_BCST(ops...))
|
|
}
|
|
|
|
// VPSRAVQ_BCST: Variable Shift Packed Quadword Data Right Arithmetic (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRAVQ.BCST m64 xmm k xmm
|
|
// VPSRAVQ.BCST m64 xmm xmm
|
|
// VPSRAVQ.BCST m64 ymm k ymm
|
|
// VPSRAVQ.BCST m64 ymm ymm
|
|
// VPSRAVQ.BCST m64 zmm k zmm
|
|
// VPSRAVQ.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VPSRAVQ.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSRAVQ_BCST(ops ...operand.Op) { ctx.VPSRAVQ_BCST(ops...) }
|
|
|
|
// VPSRAVQ_BCST_Z: Variable Shift Packed Quadword Data Right Arithmetic (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRAVQ.BCST.Z m64 xmm k xmm
|
|
// VPSRAVQ.BCST.Z m64 ymm k ymm
|
|
// VPSRAVQ.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VPSRAVQ.BCST.Z instruction to the active function.
|
|
func (c *Context) VPSRAVQ_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPSRAVQ_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPSRAVQ_BCST_Z: Variable Shift Packed Quadword Data Right Arithmetic (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRAVQ.BCST.Z m64 xmm k xmm
|
|
// VPSRAVQ.BCST.Z m64 ymm k ymm
|
|
// VPSRAVQ.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VPSRAVQ.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSRAVQ_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VPSRAVQ_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VPSRAVQ_Z: Variable Shift Packed Quadword Data Right Arithmetic (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRAVQ.Z m128 xmm k xmm
|
|
// VPSRAVQ.Z m256 ymm k ymm
|
|
// VPSRAVQ.Z xmm xmm k xmm
|
|
// VPSRAVQ.Z ymm ymm k ymm
|
|
// VPSRAVQ.Z m512 zmm k zmm
|
|
// VPSRAVQ.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPSRAVQ.Z instruction to the active function.
|
|
func (c *Context) VPSRAVQ_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPSRAVQ_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPSRAVQ_Z: Variable Shift Packed Quadword Data Right Arithmetic (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRAVQ.Z m128 xmm k xmm
|
|
// VPSRAVQ.Z m256 ymm k ymm
|
|
// VPSRAVQ.Z xmm xmm k xmm
|
|
// VPSRAVQ.Z ymm ymm k ymm
|
|
// VPSRAVQ.Z m512 zmm k zmm
|
|
// VPSRAVQ.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPSRAVQ.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSRAVQ_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPSRAVQ_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPSRAVW: Variable Shift Packed Word Data Right Arithmetic.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRAVW m128 xmm k xmm
|
|
// VPSRAVW m128 xmm xmm
|
|
// VPSRAVW m256 ymm k ymm
|
|
// VPSRAVW m256 ymm ymm
|
|
// VPSRAVW xmm xmm k xmm
|
|
// VPSRAVW xmm xmm xmm
|
|
// VPSRAVW ymm ymm k ymm
|
|
// VPSRAVW ymm ymm ymm
|
|
// VPSRAVW m512 zmm k zmm
|
|
// VPSRAVW m512 zmm zmm
|
|
// VPSRAVW zmm zmm k zmm
|
|
// VPSRAVW zmm zmm zmm
|
|
//
|
|
// Construct and append a VPSRAVW instruction to the active function.
|
|
func (c *Context) VPSRAVW(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPSRAVW(ops...))
|
|
}
|
|
|
|
// VPSRAVW: Variable Shift Packed Word Data Right Arithmetic.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRAVW m128 xmm k xmm
|
|
// VPSRAVW m128 xmm xmm
|
|
// VPSRAVW m256 ymm k ymm
|
|
// VPSRAVW m256 ymm ymm
|
|
// VPSRAVW xmm xmm k xmm
|
|
// VPSRAVW xmm xmm xmm
|
|
// VPSRAVW ymm ymm k ymm
|
|
// VPSRAVW ymm ymm ymm
|
|
// VPSRAVW m512 zmm k zmm
|
|
// VPSRAVW m512 zmm zmm
|
|
// VPSRAVW zmm zmm k zmm
|
|
// VPSRAVW zmm zmm zmm
|
|
//
|
|
// Construct and append a VPSRAVW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSRAVW(ops ...operand.Op) { ctx.VPSRAVW(ops...) }
|
|
|
|
// VPSRAVW_Z: Variable Shift Packed Word Data Right Arithmetic (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRAVW.Z m128 xmm k xmm
|
|
// VPSRAVW.Z m256 ymm k ymm
|
|
// VPSRAVW.Z xmm xmm k xmm
|
|
// VPSRAVW.Z ymm ymm k ymm
|
|
// VPSRAVW.Z m512 zmm k zmm
|
|
// VPSRAVW.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPSRAVW.Z instruction to the active function.
|
|
func (c *Context) VPSRAVW_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPSRAVW_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPSRAVW_Z: Variable Shift Packed Word Data Right Arithmetic (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRAVW.Z m128 xmm k xmm
|
|
// VPSRAVW.Z m256 ymm k ymm
|
|
// VPSRAVW.Z xmm xmm k xmm
|
|
// VPSRAVW.Z ymm ymm k ymm
|
|
// VPSRAVW.Z m512 zmm k zmm
|
|
// VPSRAVW.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPSRAVW.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSRAVW_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPSRAVW_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPSRAW: Shift Packed Word Data Right Arithmetic.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRAW imm8 ymm ymm
|
|
// VPSRAW m128 ymm ymm
|
|
// VPSRAW xmm ymm ymm
|
|
// VPSRAW imm8 xmm xmm
|
|
// VPSRAW m128 xmm xmm
|
|
// VPSRAW xmm xmm xmm
|
|
// VPSRAW imm8 m128 k xmm
|
|
// VPSRAW imm8 m128 xmm
|
|
// VPSRAW imm8 m256 k ymm
|
|
// VPSRAW imm8 m256 ymm
|
|
// VPSRAW imm8 xmm k xmm
|
|
// VPSRAW imm8 ymm k ymm
|
|
// VPSRAW m128 xmm k xmm
|
|
// VPSRAW m128 ymm k ymm
|
|
// VPSRAW xmm xmm k xmm
|
|
// VPSRAW xmm ymm k ymm
|
|
// VPSRAW imm8 m512 k zmm
|
|
// VPSRAW imm8 m512 zmm
|
|
// VPSRAW imm8 zmm k zmm
|
|
// VPSRAW imm8 zmm zmm
|
|
// VPSRAW m128 zmm k zmm
|
|
// VPSRAW m128 zmm zmm
|
|
// VPSRAW xmm zmm k zmm
|
|
// VPSRAW xmm zmm zmm
|
|
//
|
|
// Construct and append a VPSRAW instruction to the active function.
|
|
func (c *Context) VPSRAW(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPSRAW(ops...))
|
|
}
|
|
|
|
// VPSRAW: Shift Packed Word Data Right Arithmetic.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRAW imm8 ymm ymm
|
|
// VPSRAW m128 ymm ymm
|
|
// VPSRAW xmm ymm ymm
|
|
// VPSRAW imm8 xmm xmm
|
|
// VPSRAW m128 xmm xmm
|
|
// VPSRAW xmm xmm xmm
|
|
// VPSRAW imm8 m128 k xmm
|
|
// VPSRAW imm8 m128 xmm
|
|
// VPSRAW imm8 m256 k ymm
|
|
// VPSRAW imm8 m256 ymm
|
|
// VPSRAW imm8 xmm k xmm
|
|
// VPSRAW imm8 ymm k ymm
|
|
// VPSRAW m128 xmm k xmm
|
|
// VPSRAW m128 ymm k ymm
|
|
// VPSRAW xmm xmm k xmm
|
|
// VPSRAW xmm ymm k ymm
|
|
// VPSRAW imm8 m512 k zmm
|
|
// VPSRAW imm8 m512 zmm
|
|
// VPSRAW imm8 zmm k zmm
|
|
// VPSRAW imm8 zmm zmm
|
|
// VPSRAW m128 zmm k zmm
|
|
// VPSRAW m128 zmm zmm
|
|
// VPSRAW xmm zmm k zmm
|
|
// VPSRAW xmm zmm zmm
|
|
//
|
|
// Construct and append a VPSRAW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSRAW(ops ...operand.Op) { ctx.VPSRAW(ops...) }
|
|
|
|
// VPSRAW_Z: Shift Packed Word Data Right Arithmetic (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRAW.Z imm8 m128 k xmm
|
|
// VPSRAW.Z imm8 m256 k ymm
|
|
// VPSRAW.Z imm8 xmm k xmm
|
|
// VPSRAW.Z imm8 ymm k ymm
|
|
// VPSRAW.Z m128 xmm k xmm
|
|
// VPSRAW.Z m128 ymm k ymm
|
|
// VPSRAW.Z xmm xmm k xmm
|
|
// VPSRAW.Z xmm ymm k ymm
|
|
// VPSRAW.Z imm8 m512 k zmm
|
|
// VPSRAW.Z imm8 zmm k zmm
|
|
// VPSRAW.Z m128 zmm k zmm
|
|
// VPSRAW.Z xmm zmm k zmm
|
|
//
|
|
// Construct and append a VPSRAW.Z instruction to the active function.
|
|
func (c *Context) VPSRAW_Z(imx, mxyz, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VPSRAW_Z(imx, mxyz, k, xyz))
|
|
}
|
|
|
|
// VPSRAW_Z: Shift Packed Word Data Right Arithmetic (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRAW.Z imm8 m128 k xmm
|
|
// VPSRAW.Z imm8 m256 k ymm
|
|
// VPSRAW.Z imm8 xmm k xmm
|
|
// VPSRAW.Z imm8 ymm k ymm
|
|
// VPSRAW.Z m128 xmm k xmm
|
|
// VPSRAW.Z m128 ymm k ymm
|
|
// VPSRAW.Z xmm xmm k xmm
|
|
// VPSRAW.Z xmm ymm k ymm
|
|
// VPSRAW.Z imm8 m512 k zmm
|
|
// VPSRAW.Z imm8 zmm k zmm
|
|
// VPSRAW.Z m128 zmm k zmm
|
|
// VPSRAW.Z xmm zmm k zmm
|
|
//
|
|
// Construct and append a VPSRAW.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSRAW_Z(imx, mxyz, k, xyz operand.Op) { ctx.VPSRAW_Z(imx, mxyz, k, xyz) }
|
|
|
|
// VPSRLD: Shift Packed Doubleword Data Right Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRLD imm8 ymm ymm
|
|
// VPSRLD m128 ymm ymm
|
|
// VPSRLD xmm ymm ymm
|
|
// VPSRLD imm8 xmm xmm
|
|
// VPSRLD m128 xmm xmm
|
|
// VPSRLD xmm xmm xmm
|
|
// VPSRLD imm8 m128 k xmm
|
|
// VPSRLD imm8 m128 xmm
|
|
// VPSRLD imm8 m256 k ymm
|
|
// VPSRLD imm8 m256 ymm
|
|
// VPSRLD imm8 xmm k xmm
|
|
// VPSRLD imm8 ymm k ymm
|
|
// VPSRLD m128 xmm k xmm
|
|
// VPSRLD m128 ymm k ymm
|
|
// VPSRLD xmm xmm k xmm
|
|
// VPSRLD xmm ymm k ymm
|
|
// VPSRLD imm8 m512 k zmm
|
|
// VPSRLD imm8 m512 zmm
|
|
// VPSRLD imm8 zmm k zmm
|
|
// VPSRLD imm8 zmm zmm
|
|
// VPSRLD m128 zmm k zmm
|
|
// VPSRLD m128 zmm zmm
|
|
// VPSRLD xmm zmm k zmm
|
|
// VPSRLD xmm zmm zmm
|
|
//
|
|
// Construct and append a VPSRLD instruction to the active function.
|
|
func (c *Context) VPSRLD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPSRLD(ops...))
|
|
}
|
|
|
|
// VPSRLD: Shift Packed Doubleword Data Right Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRLD imm8 ymm ymm
|
|
// VPSRLD m128 ymm ymm
|
|
// VPSRLD xmm ymm ymm
|
|
// VPSRLD imm8 xmm xmm
|
|
// VPSRLD m128 xmm xmm
|
|
// VPSRLD xmm xmm xmm
|
|
// VPSRLD imm8 m128 k xmm
|
|
// VPSRLD imm8 m128 xmm
|
|
// VPSRLD imm8 m256 k ymm
|
|
// VPSRLD imm8 m256 ymm
|
|
// VPSRLD imm8 xmm k xmm
|
|
// VPSRLD imm8 ymm k ymm
|
|
// VPSRLD m128 xmm k xmm
|
|
// VPSRLD m128 ymm k ymm
|
|
// VPSRLD xmm xmm k xmm
|
|
// VPSRLD xmm ymm k ymm
|
|
// VPSRLD imm8 m512 k zmm
|
|
// VPSRLD imm8 m512 zmm
|
|
// VPSRLD imm8 zmm k zmm
|
|
// VPSRLD imm8 zmm zmm
|
|
// VPSRLD m128 zmm k zmm
|
|
// VPSRLD m128 zmm zmm
|
|
// VPSRLD xmm zmm k zmm
|
|
// VPSRLD xmm zmm zmm
|
|
//
|
|
// Construct and append a VPSRLD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSRLD(ops ...operand.Op) { ctx.VPSRLD(ops...) }
|
|
|
|
// VPSRLDQ: Shift Packed Double Quadword Right Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRLDQ imm8 ymm ymm
|
|
// VPSRLDQ imm8 xmm xmm
|
|
// VPSRLDQ imm8 m128 xmm
|
|
// VPSRLDQ imm8 m256 ymm
|
|
// VPSRLDQ imm8 m512 zmm
|
|
// VPSRLDQ imm8 zmm zmm
|
|
//
|
|
// Construct and append a VPSRLDQ instruction to the active function.
|
|
func (c *Context) VPSRLDQ(i, mxyz, xyz operand.Op) {
|
|
c.addinstruction(x86.VPSRLDQ(i, mxyz, xyz))
|
|
}
|
|
|
|
// VPSRLDQ: Shift Packed Double Quadword Right Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRLDQ imm8 ymm ymm
|
|
// VPSRLDQ imm8 xmm xmm
|
|
// VPSRLDQ imm8 m128 xmm
|
|
// VPSRLDQ imm8 m256 ymm
|
|
// VPSRLDQ imm8 m512 zmm
|
|
// VPSRLDQ imm8 zmm zmm
|
|
//
|
|
// Construct and append a VPSRLDQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSRLDQ(i, mxyz, xyz operand.Op) { ctx.VPSRLDQ(i, mxyz, xyz) }
|
|
|
|
// VPSRLD_BCST: Shift Packed Doubleword Data Right Logical (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRLD.BCST imm8 m32 k xmm
|
|
// VPSRLD.BCST imm8 m32 k ymm
|
|
// VPSRLD.BCST imm8 m32 xmm
|
|
// VPSRLD.BCST imm8 m32 ymm
|
|
// VPSRLD.BCST imm8 m32 k zmm
|
|
// VPSRLD.BCST imm8 m32 zmm
|
|
//
|
|
// Construct and append a VPSRLD.BCST instruction to the active function.
|
|
func (c *Context) VPSRLD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPSRLD_BCST(ops...))
|
|
}
|
|
|
|
// VPSRLD_BCST: Shift Packed Doubleword Data Right Logical (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRLD.BCST imm8 m32 k xmm
|
|
// VPSRLD.BCST imm8 m32 k ymm
|
|
// VPSRLD.BCST imm8 m32 xmm
|
|
// VPSRLD.BCST imm8 m32 ymm
|
|
// VPSRLD.BCST imm8 m32 k zmm
|
|
// VPSRLD.BCST imm8 m32 zmm
|
|
//
|
|
// Construct and append a VPSRLD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSRLD_BCST(ops ...operand.Op) { ctx.VPSRLD_BCST(ops...) }
|
|
|
|
// VPSRLD_BCST_Z: Shift Packed Doubleword Data Right Logical (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRLD.BCST.Z imm8 m32 k xmm
|
|
// VPSRLD.BCST.Z imm8 m32 k ymm
|
|
// VPSRLD.BCST.Z imm8 m32 k zmm
|
|
//
|
|
// Construct and append a VPSRLD.BCST.Z instruction to the active function.
|
|
func (c *Context) VPSRLD_BCST_Z(i, m, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VPSRLD_BCST_Z(i, m, k, xyz))
|
|
}
|
|
|
|
// VPSRLD_BCST_Z: Shift Packed Doubleword Data Right Logical (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRLD.BCST.Z imm8 m32 k xmm
|
|
// VPSRLD.BCST.Z imm8 m32 k ymm
|
|
// VPSRLD.BCST.Z imm8 m32 k zmm
|
|
//
|
|
// Construct and append a VPSRLD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSRLD_BCST_Z(i, m, k, xyz operand.Op) { ctx.VPSRLD_BCST_Z(i, m, k, xyz) }
|
|
|
|
// VPSRLD_Z: Shift Packed Doubleword Data Right Logical (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRLD.Z imm8 m128 k xmm
|
|
// VPSRLD.Z imm8 m256 k ymm
|
|
// VPSRLD.Z imm8 xmm k xmm
|
|
// VPSRLD.Z imm8 ymm k ymm
|
|
// VPSRLD.Z m128 xmm k xmm
|
|
// VPSRLD.Z m128 ymm k ymm
|
|
// VPSRLD.Z xmm xmm k xmm
|
|
// VPSRLD.Z xmm ymm k ymm
|
|
// VPSRLD.Z imm8 m512 k zmm
|
|
// VPSRLD.Z imm8 zmm k zmm
|
|
// VPSRLD.Z m128 zmm k zmm
|
|
// VPSRLD.Z xmm zmm k zmm
|
|
//
|
|
// Construct and append a VPSRLD.Z instruction to the active function.
|
|
func (c *Context) VPSRLD_Z(imx, mxyz, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VPSRLD_Z(imx, mxyz, k, xyz))
|
|
}
|
|
|
|
// VPSRLD_Z: Shift Packed Doubleword Data Right Logical (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRLD.Z imm8 m128 k xmm
|
|
// VPSRLD.Z imm8 m256 k ymm
|
|
// VPSRLD.Z imm8 xmm k xmm
|
|
// VPSRLD.Z imm8 ymm k ymm
|
|
// VPSRLD.Z m128 xmm k xmm
|
|
// VPSRLD.Z m128 ymm k ymm
|
|
// VPSRLD.Z xmm xmm k xmm
|
|
// VPSRLD.Z xmm ymm k ymm
|
|
// VPSRLD.Z imm8 m512 k zmm
|
|
// VPSRLD.Z imm8 zmm k zmm
|
|
// VPSRLD.Z m128 zmm k zmm
|
|
// VPSRLD.Z xmm zmm k zmm
|
|
//
|
|
// Construct and append a VPSRLD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSRLD_Z(imx, mxyz, k, xyz operand.Op) { ctx.VPSRLD_Z(imx, mxyz, k, xyz) }
|
|
|
|
// VPSRLQ: Shift Packed Quadword Data Right Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRLQ imm8 ymm ymm
|
|
// VPSRLQ m128 ymm ymm
|
|
// VPSRLQ xmm ymm ymm
|
|
// VPSRLQ imm8 xmm xmm
|
|
// VPSRLQ m128 xmm xmm
|
|
// VPSRLQ xmm xmm xmm
|
|
// VPSRLQ imm8 m128 k xmm
|
|
// VPSRLQ imm8 m128 xmm
|
|
// VPSRLQ imm8 m256 k ymm
|
|
// VPSRLQ imm8 m256 ymm
|
|
// VPSRLQ imm8 xmm k xmm
|
|
// VPSRLQ imm8 ymm k ymm
|
|
// VPSRLQ m128 xmm k xmm
|
|
// VPSRLQ m128 ymm k ymm
|
|
// VPSRLQ xmm xmm k xmm
|
|
// VPSRLQ xmm ymm k ymm
|
|
// VPSRLQ imm8 m512 k zmm
|
|
// VPSRLQ imm8 m512 zmm
|
|
// VPSRLQ imm8 zmm k zmm
|
|
// VPSRLQ imm8 zmm zmm
|
|
// VPSRLQ m128 zmm k zmm
|
|
// VPSRLQ m128 zmm zmm
|
|
// VPSRLQ xmm zmm k zmm
|
|
// VPSRLQ xmm zmm zmm
|
|
//
|
|
// Construct and append a VPSRLQ instruction to the active function.
|
|
func (c *Context) VPSRLQ(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPSRLQ(ops...))
|
|
}
|
|
|
|
// VPSRLQ: Shift Packed Quadword Data Right Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRLQ imm8 ymm ymm
|
|
// VPSRLQ m128 ymm ymm
|
|
// VPSRLQ xmm ymm ymm
|
|
// VPSRLQ imm8 xmm xmm
|
|
// VPSRLQ m128 xmm xmm
|
|
// VPSRLQ xmm xmm xmm
|
|
// VPSRLQ imm8 m128 k xmm
|
|
// VPSRLQ imm8 m128 xmm
|
|
// VPSRLQ imm8 m256 k ymm
|
|
// VPSRLQ imm8 m256 ymm
|
|
// VPSRLQ imm8 xmm k xmm
|
|
// VPSRLQ imm8 ymm k ymm
|
|
// VPSRLQ m128 xmm k xmm
|
|
// VPSRLQ m128 ymm k ymm
|
|
// VPSRLQ xmm xmm k xmm
|
|
// VPSRLQ xmm ymm k ymm
|
|
// VPSRLQ imm8 m512 k zmm
|
|
// VPSRLQ imm8 m512 zmm
|
|
// VPSRLQ imm8 zmm k zmm
|
|
// VPSRLQ imm8 zmm zmm
|
|
// VPSRLQ m128 zmm k zmm
|
|
// VPSRLQ m128 zmm zmm
|
|
// VPSRLQ xmm zmm k zmm
|
|
// VPSRLQ xmm zmm zmm
|
|
//
|
|
// Construct and append a VPSRLQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSRLQ(ops ...operand.Op) { ctx.VPSRLQ(ops...) }
|
|
|
|
// VPSRLQ_BCST: Shift Packed Quadword Data Right Logical (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRLQ.BCST imm8 m64 k xmm
|
|
// VPSRLQ.BCST imm8 m64 k ymm
|
|
// VPSRLQ.BCST imm8 m64 xmm
|
|
// VPSRLQ.BCST imm8 m64 ymm
|
|
// VPSRLQ.BCST imm8 m64 k zmm
|
|
// VPSRLQ.BCST imm8 m64 zmm
|
|
//
|
|
// Construct and append a VPSRLQ.BCST instruction to the active function.
|
|
func (c *Context) VPSRLQ_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPSRLQ_BCST(ops...))
|
|
}
|
|
|
|
// VPSRLQ_BCST: Shift Packed Quadword Data Right Logical (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRLQ.BCST imm8 m64 k xmm
|
|
// VPSRLQ.BCST imm8 m64 k ymm
|
|
// VPSRLQ.BCST imm8 m64 xmm
|
|
// VPSRLQ.BCST imm8 m64 ymm
|
|
// VPSRLQ.BCST imm8 m64 k zmm
|
|
// VPSRLQ.BCST imm8 m64 zmm
|
|
//
|
|
// Construct and append a VPSRLQ.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSRLQ_BCST(ops ...operand.Op) { ctx.VPSRLQ_BCST(ops...) }
|
|
|
|
// VPSRLQ_BCST_Z: Shift Packed Quadword Data Right Logical (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRLQ.BCST.Z imm8 m64 k xmm
|
|
// VPSRLQ.BCST.Z imm8 m64 k ymm
|
|
// VPSRLQ.BCST.Z imm8 m64 k zmm
|
|
//
|
|
// Construct and append a VPSRLQ.BCST.Z instruction to the active function.
|
|
func (c *Context) VPSRLQ_BCST_Z(i, m, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VPSRLQ_BCST_Z(i, m, k, xyz))
|
|
}
|
|
|
|
// VPSRLQ_BCST_Z: Shift Packed Quadword Data Right Logical (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRLQ.BCST.Z imm8 m64 k xmm
|
|
// VPSRLQ.BCST.Z imm8 m64 k ymm
|
|
// VPSRLQ.BCST.Z imm8 m64 k zmm
|
|
//
|
|
// Construct and append a VPSRLQ.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSRLQ_BCST_Z(i, m, k, xyz operand.Op) { ctx.VPSRLQ_BCST_Z(i, m, k, xyz) }
|
|
|
|
// VPSRLQ_Z: Shift Packed Quadword Data Right Logical (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRLQ.Z imm8 m128 k xmm
|
|
// VPSRLQ.Z imm8 m256 k ymm
|
|
// VPSRLQ.Z imm8 xmm k xmm
|
|
// VPSRLQ.Z imm8 ymm k ymm
|
|
// VPSRLQ.Z m128 xmm k xmm
|
|
// VPSRLQ.Z m128 ymm k ymm
|
|
// VPSRLQ.Z xmm xmm k xmm
|
|
// VPSRLQ.Z xmm ymm k ymm
|
|
// VPSRLQ.Z imm8 m512 k zmm
|
|
// VPSRLQ.Z imm8 zmm k zmm
|
|
// VPSRLQ.Z m128 zmm k zmm
|
|
// VPSRLQ.Z xmm zmm k zmm
|
|
//
|
|
// Construct and append a VPSRLQ.Z instruction to the active function.
|
|
func (c *Context) VPSRLQ_Z(imx, mxyz, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VPSRLQ_Z(imx, mxyz, k, xyz))
|
|
}
|
|
|
|
// VPSRLQ_Z: Shift Packed Quadword Data Right Logical (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRLQ.Z imm8 m128 k xmm
|
|
// VPSRLQ.Z imm8 m256 k ymm
|
|
// VPSRLQ.Z imm8 xmm k xmm
|
|
// VPSRLQ.Z imm8 ymm k ymm
|
|
// VPSRLQ.Z m128 xmm k xmm
|
|
// VPSRLQ.Z m128 ymm k ymm
|
|
// VPSRLQ.Z xmm xmm k xmm
|
|
// VPSRLQ.Z xmm ymm k ymm
|
|
// VPSRLQ.Z imm8 m512 k zmm
|
|
// VPSRLQ.Z imm8 zmm k zmm
|
|
// VPSRLQ.Z m128 zmm k zmm
|
|
// VPSRLQ.Z xmm zmm k zmm
|
|
//
|
|
// Construct and append a VPSRLQ.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSRLQ_Z(imx, mxyz, k, xyz operand.Op) { ctx.VPSRLQ_Z(imx, mxyz, k, xyz) }
|
|
|
|
// VPSRLVD: Variable Shift Packed Doubleword Data Right Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRLVD m128 xmm xmm
|
|
// VPSRLVD m256 ymm ymm
|
|
// VPSRLVD xmm xmm xmm
|
|
// VPSRLVD ymm ymm ymm
|
|
// VPSRLVD m128 xmm k xmm
|
|
// VPSRLVD m256 ymm k ymm
|
|
// VPSRLVD xmm xmm k xmm
|
|
// VPSRLVD ymm ymm k ymm
|
|
// VPSRLVD m512 zmm k zmm
|
|
// VPSRLVD m512 zmm zmm
|
|
// VPSRLVD zmm zmm k zmm
|
|
// VPSRLVD zmm zmm zmm
|
|
//
|
|
// Construct and append a VPSRLVD instruction to the active function.
|
|
func (c *Context) VPSRLVD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPSRLVD(ops...))
|
|
}
|
|
|
|
// VPSRLVD: Variable Shift Packed Doubleword Data Right Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRLVD m128 xmm xmm
|
|
// VPSRLVD m256 ymm ymm
|
|
// VPSRLVD xmm xmm xmm
|
|
// VPSRLVD ymm ymm ymm
|
|
// VPSRLVD m128 xmm k xmm
|
|
// VPSRLVD m256 ymm k ymm
|
|
// VPSRLVD xmm xmm k xmm
|
|
// VPSRLVD ymm ymm k ymm
|
|
// VPSRLVD m512 zmm k zmm
|
|
// VPSRLVD m512 zmm zmm
|
|
// VPSRLVD zmm zmm k zmm
|
|
// VPSRLVD zmm zmm zmm
|
|
//
|
|
// Construct and append a VPSRLVD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSRLVD(ops ...operand.Op) { ctx.VPSRLVD(ops...) }
|
|
|
|
// VPSRLVD_BCST: Variable Shift Packed Doubleword Data Right Logical (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRLVD.BCST m32 xmm k xmm
|
|
// VPSRLVD.BCST m32 xmm xmm
|
|
// VPSRLVD.BCST m32 ymm k ymm
|
|
// VPSRLVD.BCST m32 ymm ymm
|
|
// VPSRLVD.BCST m32 zmm k zmm
|
|
// VPSRLVD.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VPSRLVD.BCST instruction to the active function.
|
|
func (c *Context) VPSRLVD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPSRLVD_BCST(ops...))
|
|
}
|
|
|
|
// VPSRLVD_BCST: Variable Shift Packed Doubleword Data Right Logical (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRLVD.BCST m32 xmm k xmm
|
|
// VPSRLVD.BCST m32 xmm xmm
|
|
// VPSRLVD.BCST m32 ymm k ymm
|
|
// VPSRLVD.BCST m32 ymm ymm
|
|
// VPSRLVD.BCST m32 zmm k zmm
|
|
// VPSRLVD.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VPSRLVD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSRLVD_BCST(ops ...operand.Op) { ctx.VPSRLVD_BCST(ops...) }
|
|
|
|
// VPSRLVD_BCST_Z: Variable Shift Packed Doubleword Data Right Logical (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRLVD.BCST.Z m32 xmm k xmm
|
|
// VPSRLVD.BCST.Z m32 ymm k ymm
|
|
// VPSRLVD.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VPSRLVD.BCST.Z instruction to the active function.
|
|
func (c *Context) VPSRLVD_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPSRLVD_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPSRLVD_BCST_Z: Variable Shift Packed Doubleword Data Right Logical (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRLVD.BCST.Z m32 xmm k xmm
|
|
// VPSRLVD.BCST.Z m32 ymm k ymm
|
|
// VPSRLVD.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VPSRLVD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSRLVD_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VPSRLVD_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VPSRLVD_Z: Variable Shift Packed Doubleword Data Right Logical (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRLVD.Z m128 xmm k xmm
|
|
// VPSRLVD.Z m256 ymm k ymm
|
|
// VPSRLVD.Z xmm xmm k xmm
|
|
// VPSRLVD.Z ymm ymm k ymm
|
|
// VPSRLVD.Z m512 zmm k zmm
|
|
// VPSRLVD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPSRLVD.Z instruction to the active function.
|
|
func (c *Context) VPSRLVD_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPSRLVD_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPSRLVD_Z: Variable Shift Packed Doubleword Data Right Logical (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRLVD.Z m128 xmm k xmm
|
|
// VPSRLVD.Z m256 ymm k ymm
|
|
// VPSRLVD.Z xmm xmm k xmm
|
|
// VPSRLVD.Z ymm ymm k ymm
|
|
// VPSRLVD.Z m512 zmm k zmm
|
|
// VPSRLVD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPSRLVD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSRLVD_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPSRLVD_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPSRLVQ: Variable Shift Packed Quadword Data Right Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRLVQ m128 xmm xmm
|
|
// VPSRLVQ m256 ymm ymm
|
|
// VPSRLVQ xmm xmm xmm
|
|
// VPSRLVQ ymm ymm ymm
|
|
// VPSRLVQ m128 xmm k xmm
|
|
// VPSRLVQ m256 ymm k ymm
|
|
// VPSRLVQ xmm xmm k xmm
|
|
// VPSRLVQ ymm ymm k ymm
|
|
// VPSRLVQ m512 zmm k zmm
|
|
// VPSRLVQ m512 zmm zmm
|
|
// VPSRLVQ zmm zmm k zmm
|
|
// VPSRLVQ zmm zmm zmm
|
|
//
|
|
// Construct and append a VPSRLVQ instruction to the active function.
|
|
func (c *Context) VPSRLVQ(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPSRLVQ(ops...))
|
|
}
|
|
|
|
// VPSRLVQ: Variable Shift Packed Quadword Data Right Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRLVQ m128 xmm xmm
|
|
// VPSRLVQ m256 ymm ymm
|
|
// VPSRLVQ xmm xmm xmm
|
|
// VPSRLVQ ymm ymm ymm
|
|
// VPSRLVQ m128 xmm k xmm
|
|
// VPSRLVQ m256 ymm k ymm
|
|
// VPSRLVQ xmm xmm k xmm
|
|
// VPSRLVQ ymm ymm k ymm
|
|
// VPSRLVQ m512 zmm k zmm
|
|
// VPSRLVQ m512 zmm zmm
|
|
// VPSRLVQ zmm zmm k zmm
|
|
// VPSRLVQ zmm zmm zmm
|
|
//
|
|
// Construct and append a VPSRLVQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSRLVQ(ops ...operand.Op) { ctx.VPSRLVQ(ops...) }
|
|
|
|
// VPSRLVQ_BCST: Variable Shift Packed Quadword Data Right Logical (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRLVQ.BCST m64 xmm k xmm
|
|
// VPSRLVQ.BCST m64 xmm xmm
|
|
// VPSRLVQ.BCST m64 ymm k ymm
|
|
// VPSRLVQ.BCST m64 ymm ymm
|
|
// VPSRLVQ.BCST m64 zmm k zmm
|
|
// VPSRLVQ.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VPSRLVQ.BCST instruction to the active function.
|
|
func (c *Context) VPSRLVQ_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPSRLVQ_BCST(ops...))
|
|
}
|
|
|
|
// VPSRLVQ_BCST: Variable Shift Packed Quadword Data Right Logical (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRLVQ.BCST m64 xmm k xmm
|
|
// VPSRLVQ.BCST m64 xmm xmm
|
|
// VPSRLVQ.BCST m64 ymm k ymm
|
|
// VPSRLVQ.BCST m64 ymm ymm
|
|
// VPSRLVQ.BCST m64 zmm k zmm
|
|
// VPSRLVQ.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VPSRLVQ.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSRLVQ_BCST(ops ...operand.Op) { ctx.VPSRLVQ_BCST(ops...) }
|
|
|
|
// VPSRLVQ_BCST_Z: Variable Shift Packed Quadword Data Right Logical (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRLVQ.BCST.Z m64 xmm k xmm
|
|
// VPSRLVQ.BCST.Z m64 ymm k ymm
|
|
// VPSRLVQ.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VPSRLVQ.BCST.Z instruction to the active function.
|
|
func (c *Context) VPSRLVQ_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPSRLVQ_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPSRLVQ_BCST_Z: Variable Shift Packed Quadword Data Right Logical (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRLVQ.BCST.Z m64 xmm k xmm
|
|
// VPSRLVQ.BCST.Z m64 ymm k ymm
|
|
// VPSRLVQ.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VPSRLVQ.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSRLVQ_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VPSRLVQ_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VPSRLVQ_Z: Variable Shift Packed Quadword Data Right Logical (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRLVQ.Z m128 xmm k xmm
|
|
// VPSRLVQ.Z m256 ymm k ymm
|
|
// VPSRLVQ.Z xmm xmm k xmm
|
|
// VPSRLVQ.Z ymm ymm k ymm
|
|
// VPSRLVQ.Z m512 zmm k zmm
|
|
// VPSRLVQ.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPSRLVQ.Z instruction to the active function.
|
|
func (c *Context) VPSRLVQ_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPSRLVQ_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPSRLVQ_Z: Variable Shift Packed Quadword Data Right Logical (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRLVQ.Z m128 xmm k xmm
|
|
// VPSRLVQ.Z m256 ymm k ymm
|
|
// VPSRLVQ.Z xmm xmm k xmm
|
|
// VPSRLVQ.Z ymm ymm k ymm
|
|
// VPSRLVQ.Z m512 zmm k zmm
|
|
// VPSRLVQ.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPSRLVQ.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSRLVQ_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPSRLVQ_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPSRLVW: Variable Shift Packed Word Data Right Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRLVW m128 xmm k xmm
|
|
// VPSRLVW m128 xmm xmm
|
|
// VPSRLVW m256 ymm k ymm
|
|
// VPSRLVW m256 ymm ymm
|
|
// VPSRLVW xmm xmm k xmm
|
|
// VPSRLVW xmm xmm xmm
|
|
// VPSRLVW ymm ymm k ymm
|
|
// VPSRLVW ymm ymm ymm
|
|
// VPSRLVW m512 zmm k zmm
|
|
// VPSRLVW m512 zmm zmm
|
|
// VPSRLVW zmm zmm k zmm
|
|
// VPSRLVW zmm zmm zmm
|
|
//
|
|
// Construct and append a VPSRLVW instruction to the active function.
|
|
func (c *Context) VPSRLVW(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPSRLVW(ops...))
|
|
}
|
|
|
|
// VPSRLVW: Variable Shift Packed Word Data Right Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRLVW m128 xmm k xmm
|
|
// VPSRLVW m128 xmm xmm
|
|
// VPSRLVW m256 ymm k ymm
|
|
// VPSRLVW m256 ymm ymm
|
|
// VPSRLVW xmm xmm k xmm
|
|
// VPSRLVW xmm xmm xmm
|
|
// VPSRLVW ymm ymm k ymm
|
|
// VPSRLVW ymm ymm ymm
|
|
// VPSRLVW m512 zmm k zmm
|
|
// VPSRLVW m512 zmm zmm
|
|
// VPSRLVW zmm zmm k zmm
|
|
// VPSRLVW zmm zmm zmm
|
|
//
|
|
// Construct and append a VPSRLVW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSRLVW(ops ...operand.Op) { ctx.VPSRLVW(ops...) }
|
|
|
|
// VPSRLVW_Z: Variable Shift Packed Word Data Right Logical (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRLVW.Z m128 xmm k xmm
|
|
// VPSRLVW.Z m256 ymm k ymm
|
|
// VPSRLVW.Z xmm xmm k xmm
|
|
// VPSRLVW.Z ymm ymm k ymm
|
|
// VPSRLVW.Z m512 zmm k zmm
|
|
// VPSRLVW.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPSRLVW.Z instruction to the active function.
|
|
func (c *Context) VPSRLVW_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPSRLVW_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPSRLVW_Z: Variable Shift Packed Word Data Right Logical (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRLVW.Z m128 xmm k xmm
|
|
// VPSRLVW.Z m256 ymm k ymm
|
|
// VPSRLVW.Z xmm xmm k xmm
|
|
// VPSRLVW.Z ymm ymm k ymm
|
|
// VPSRLVW.Z m512 zmm k zmm
|
|
// VPSRLVW.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPSRLVW.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSRLVW_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPSRLVW_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPSRLW: Shift Packed Word Data Right Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRLW imm8 ymm ymm
|
|
// VPSRLW m128 ymm ymm
|
|
// VPSRLW xmm ymm ymm
|
|
// VPSRLW imm8 xmm xmm
|
|
// VPSRLW m128 xmm xmm
|
|
// VPSRLW xmm xmm xmm
|
|
// VPSRLW imm8 m128 k xmm
|
|
// VPSRLW imm8 m128 xmm
|
|
// VPSRLW imm8 m256 k ymm
|
|
// VPSRLW imm8 m256 ymm
|
|
// VPSRLW imm8 xmm k xmm
|
|
// VPSRLW imm8 ymm k ymm
|
|
// VPSRLW m128 xmm k xmm
|
|
// VPSRLW m128 ymm k ymm
|
|
// VPSRLW xmm xmm k xmm
|
|
// VPSRLW xmm ymm k ymm
|
|
// VPSRLW imm8 m512 k zmm
|
|
// VPSRLW imm8 m512 zmm
|
|
// VPSRLW imm8 zmm k zmm
|
|
// VPSRLW imm8 zmm zmm
|
|
// VPSRLW m128 zmm k zmm
|
|
// VPSRLW m128 zmm zmm
|
|
// VPSRLW xmm zmm k zmm
|
|
// VPSRLW xmm zmm zmm
|
|
//
|
|
// Construct and append a VPSRLW instruction to the active function.
|
|
func (c *Context) VPSRLW(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPSRLW(ops...))
|
|
}
|
|
|
|
// VPSRLW: Shift Packed Word Data Right Logical.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRLW imm8 ymm ymm
|
|
// VPSRLW m128 ymm ymm
|
|
// VPSRLW xmm ymm ymm
|
|
// VPSRLW imm8 xmm xmm
|
|
// VPSRLW m128 xmm xmm
|
|
// VPSRLW xmm xmm xmm
|
|
// VPSRLW imm8 m128 k xmm
|
|
// VPSRLW imm8 m128 xmm
|
|
// VPSRLW imm8 m256 k ymm
|
|
// VPSRLW imm8 m256 ymm
|
|
// VPSRLW imm8 xmm k xmm
|
|
// VPSRLW imm8 ymm k ymm
|
|
// VPSRLW m128 xmm k xmm
|
|
// VPSRLW m128 ymm k ymm
|
|
// VPSRLW xmm xmm k xmm
|
|
// VPSRLW xmm ymm k ymm
|
|
// VPSRLW imm8 m512 k zmm
|
|
// VPSRLW imm8 m512 zmm
|
|
// VPSRLW imm8 zmm k zmm
|
|
// VPSRLW imm8 zmm zmm
|
|
// VPSRLW m128 zmm k zmm
|
|
// VPSRLW m128 zmm zmm
|
|
// VPSRLW xmm zmm k zmm
|
|
// VPSRLW xmm zmm zmm
|
|
//
|
|
// Construct and append a VPSRLW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSRLW(ops ...operand.Op) { ctx.VPSRLW(ops...) }
|
|
|
|
// VPSRLW_Z: Shift Packed Word Data Right Logical (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRLW.Z imm8 m128 k xmm
|
|
// VPSRLW.Z imm8 m256 k ymm
|
|
// VPSRLW.Z imm8 xmm k xmm
|
|
// VPSRLW.Z imm8 ymm k ymm
|
|
// VPSRLW.Z m128 xmm k xmm
|
|
// VPSRLW.Z m128 ymm k ymm
|
|
// VPSRLW.Z xmm xmm k xmm
|
|
// VPSRLW.Z xmm ymm k ymm
|
|
// VPSRLW.Z imm8 m512 k zmm
|
|
// VPSRLW.Z imm8 zmm k zmm
|
|
// VPSRLW.Z m128 zmm k zmm
|
|
// VPSRLW.Z xmm zmm k zmm
|
|
//
|
|
// Construct and append a VPSRLW.Z instruction to the active function.
|
|
func (c *Context) VPSRLW_Z(imx, mxyz, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VPSRLW_Z(imx, mxyz, k, xyz))
|
|
}
|
|
|
|
// VPSRLW_Z: Shift Packed Word Data Right Logical (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSRLW.Z imm8 m128 k xmm
|
|
// VPSRLW.Z imm8 m256 k ymm
|
|
// VPSRLW.Z imm8 xmm k xmm
|
|
// VPSRLW.Z imm8 ymm k ymm
|
|
// VPSRLW.Z m128 xmm k xmm
|
|
// VPSRLW.Z m128 ymm k ymm
|
|
// VPSRLW.Z xmm xmm k xmm
|
|
// VPSRLW.Z xmm ymm k ymm
|
|
// VPSRLW.Z imm8 m512 k zmm
|
|
// VPSRLW.Z imm8 zmm k zmm
|
|
// VPSRLW.Z m128 zmm k zmm
|
|
// VPSRLW.Z xmm zmm k zmm
|
|
//
|
|
// Construct and append a VPSRLW.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSRLW_Z(imx, mxyz, k, xyz operand.Op) { ctx.VPSRLW_Z(imx, mxyz, k, xyz) }
|
|
|
|
// VPSUBB: Subtract Packed Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSUBB m256 ymm ymm
|
|
// VPSUBB ymm ymm ymm
|
|
// VPSUBB m128 xmm xmm
|
|
// VPSUBB xmm xmm xmm
|
|
// VPSUBB m128 xmm k xmm
|
|
// VPSUBB m256 ymm k ymm
|
|
// VPSUBB xmm xmm k xmm
|
|
// VPSUBB ymm ymm k ymm
|
|
// VPSUBB m512 zmm k zmm
|
|
// VPSUBB m512 zmm zmm
|
|
// VPSUBB zmm zmm k zmm
|
|
// VPSUBB zmm zmm zmm
|
|
//
|
|
// Construct and append a VPSUBB instruction to the active function.
|
|
func (c *Context) VPSUBB(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPSUBB(ops...))
|
|
}
|
|
|
|
// VPSUBB: Subtract Packed Byte Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSUBB m256 ymm ymm
|
|
// VPSUBB ymm ymm ymm
|
|
// VPSUBB m128 xmm xmm
|
|
// VPSUBB xmm xmm xmm
|
|
// VPSUBB m128 xmm k xmm
|
|
// VPSUBB m256 ymm k ymm
|
|
// VPSUBB xmm xmm k xmm
|
|
// VPSUBB ymm ymm k ymm
|
|
// VPSUBB m512 zmm k zmm
|
|
// VPSUBB m512 zmm zmm
|
|
// VPSUBB zmm zmm k zmm
|
|
// VPSUBB zmm zmm zmm
|
|
//
|
|
// Construct and append a VPSUBB instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSUBB(ops ...operand.Op) { ctx.VPSUBB(ops...) }
|
|
|
|
// VPSUBB_Z: Subtract Packed Byte Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSUBB.Z m128 xmm k xmm
|
|
// VPSUBB.Z m256 ymm k ymm
|
|
// VPSUBB.Z xmm xmm k xmm
|
|
// VPSUBB.Z ymm ymm k ymm
|
|
// VPSUBB.Z m512 zmm k zmm
|
|
// VPSUBB.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPSUBB.Z instruction to the active function.
|
|
func (c *Context) VPSUBB_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPSUBB_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPSUBB_Z: Subtract Packed Byte Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSUBB.Z m128 xmm k xmm
|
|
// VPSUBB.Z m256 ymm k ymm
|
|
// VPSUBB.Z xmm xmm k xmm
|
|
// VPSUBB.Z ymm ymm k ymm
|
|
// VPSUBB.Z m512 zmm k zmm
|
|
// VPSUBB.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPSUBB.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSUBB_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPSUBB_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPSUBD: Subtract Packed Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSUBD m256 ymm ymm
|
|
// VPSUBD ymm ymm ymm
|
|
// VPSUBD m128 xmm xmm
|
|
// VPSUBD xmm xmm xmm
|
|
// VPSUBD m128 xmm k xmm
|
|
// VPSUBD m256 ymm k ymm
|
|
// VPSUBD xmm xmm k xmm
|
|
// VPSUBD ymm ymm k ymm
|
|
// VPSUBD m512 zmm k zmm
|
|
// VPSUBD m512 zmm zmm
|
|
// VPSUBD zmm zmm k zmm
|
|
// VPSUBD zmm zmm zmm
|
|
//
|
|
// Construct and append a VPSUBD instruction to the active function.
|
|
func (c *Context) VPSUBD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPSUBD(ops...))
|
|
}
|
|
|
|
// VPSUBD: Subtract Packed Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSUBD m256 ymm ymm
|
|
// VPSUBD ymm ymm ymm
|
|
// VPSUBD m128 xmm xmm
|
|
// VPSUBD xmm xmm xmm
|
|
// VPSUBD m128 xmm k xmm
|
|
// VPSUBD m256 ymm k ymm
|
|
// VPSUBD xmm xmm k xmm
|
|
// VPSUBD ymm ymm k ymm
|
|
// VPSUBD m512 zmm k zmm
|
|
// VPSUBD m512 zmm zmm
|
|
// VPSUBD zmm zmm k zmm
|
|
// VPSUBD zmm zmm zmm
|
|
//
|
|
// Construct and append a VPSUBD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSUBD(ops ...operand.Op) { ctx.VPSUBD(ops...) }
|
|
|
|
// VPSUBD_BCST: Subtract Packed Doubleword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSUBD.BCST m32 xmm k xmm
|
|
// VPSUBD.BCST m32 xmm xmm
|
|
// VPSUBD.BCST m32 ymm k ymm
|
|
// VPSUBD.BCST m32 ymm ymm
|
|
// VPSUBD.BCST m32 zmm k zmm
|
|
// VPSUBD.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VPSUBD.BCST instruction to the active function.
|
|
func (c *Context) VPSUBD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPSUBD_BCST(ops...))
|
|
}
|
|
|
|
// VPSUBD_BCST: Subtract Packed Doubleword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSUBD.BCST m32 xmm k xmm
|
|
// VPSUBD.BCST m32 xmm xmm
|
|
// VPSUBD.BCST m32 ymm k ymm
|
|
// VPSUBD.BCST m32 ymm ymm
|
|
// VPSUBD.BCST m32 zmm k zmm
|
|
// VPSUBD.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VPSUBD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSUBD_BCST(ops ...operand.Op) { ctx.VPSUBD_BCST(ops...) }
|
|
|
|
// VPSUBD_BCST_Z: Subtract Packed Doubleword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSUBD.BCST.Z m32 xmm k xmm
|
|
// VPSUBD.BCST.Z m32 ymm k ymm
|
|
// VPSUBD.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VPSUBD.BCST.Z instruction to the active function.
|
|
func (c *Context) VPSUBD_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPSUBD_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPSUBD_BCST_Z: Subtract Packed Doubleword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSUBD.BCST.Z m32 xmm k xmm
|
|
// VPSUBD.BCST.Z m32 ymm k ymm
|
|
// VPSUBD.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VPSUBD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSUBD_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VPSUBD_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VPSUBD_Z: Subtract Packed Doubleword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSUBD.Z m128 xmm k xmm
|
|
// VPSUBD.Z m256 ymm k ymm
|
|
// VPSUBD.Z xmm xmm k xmm
|
|
// VPSUBD.Z ymm ymm k ymm
|
|
// VPSUBD.Z m512 zmm k zmm
|
|
// VPSUBD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPSUBD.Z instruction to the active function.
|
|
func (c *Context) VPSUBD_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPSUBD_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPSUBD_Z: Subtract Packed Doubleword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSUBD.Z m128 xmm k xmm
|
|
// VPSUBD.Z m256 ymm k ymm
|
|
// VPSUBD.Z xmm xmm k xmm
|
|
// VPSUBD.Z ymm ymm k ymm
|
|
// VPSUBD.Z m512 zmm k zmm
|
|
// VPSUBD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPSUBD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSUBD_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPSUBD_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPSUBQ: Subtract Packed Quadword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSUBQ m256 ymm ymm
|
|
// VPSUBQ ymm ymm ymm
|
|
// VPSUBQ m128 xmm xmm
|
|
// VPSUBQ xmm xmm xmm
|
|
// VPSUBQ m128 xmm k xmm
|
|
// VPSUBQ m256 ymm k ymm
|
|
// VPSUBQ xmm xmm k xmm
|
|
// VPSUBQ ymm ymm k ymm
|
|
// VPSUBQ m512 zmm k zmm
|
|
// VPSUBQ m512 zmm zmm
|
|
// VPSUBQ zmm zmm k zmm
|
|
// VPSUBQ zmm zmm zmm
|
|
//
|
|
// Construct and append a VPSUBQ instruction to the active function.
|
|
func (c *Context) VPSUBQ(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPSUBQ(ops...))
|
|
}
|
|
|
|
// VPSUBQ: Subtract Packed Quadword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSUBQ m256 ymm ymm
|
|
// VPSUBQ ymm ymm ymm
|
|
// VPSUBQ m128 xmm xmm
|
|
// VPSUBQ xmm xmm xmm
|
|
// VPSUBQ m128 xmm k xmm
|
|
// VPSUBQ m256 ymm k ymm
|
|
// VPSUBQ xmm xmm k xmm
|
|
// VPSUBQ ymm ymm k ymm
|
|
// VPSUBQ m512 zmm k zmm
|
|
// VPSUBQ m512 zmm zmm
|
|
// VPSUBQ zmm zmm k zmm
|
|
// VPSUBQ zmm zmm zmm
|
|
//
|
|
// Construct and append a VPSUBQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSUBQ(ops ...operand.Op) { ctx.VPSUBQ(ops...) }
|
|
|
|
// VPSUBQ_BCST: Subtract Packed Quadword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSUBQ.BCST m64 xmm k xmm
|
|
// VPSUBQ.BCST m64 xmm xmm
|
|
// VPSUBQ.BCST m64 ymm k ymm
|
|
// VPSUBQ.BCST m64 ymm ymm
|
|
// VPSUBQ.BCST m64 zmm k zmm
|
|
// VPSUBQ.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VPSUBQ.BCST instruction to the active function.
|
|
func (c *Context) VPSUBQ_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPSUBQ_BCST(ops...))
|
|
}
|
|
|
|
// VPSUBQ_BCST: Subtract Packed Quadword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSUBQ.BCST m64 xmm k xmm
|
|
// VPSUBQ.BCST m64 xmm xmm
|
|
// VPSUBQ.BCST m64 ymm k ymm
|
|
// VPSUBQ.BCST m64 ymm ymm
|
|
// VPSUBQ.BCST m64 zmm k zmm
|
|
// VPSUBQ.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VPSUBQ.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSUBQ_BCST(ops ...operand.Op) { ctx.VPSUBQ_BCST(ops...) }
|
|
|
|
// VPSUBQ_BCST_Z: Subtract Packed Quadword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSUBQ.BCST.Z m64 xmm k xmm
|
|
// VPSUBQ.BCST.Z m64 ymm k ymm
|
|
// VPSUBQ.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VPSUBQ.BCST.Z instruction to the active function.
|
|
func (c *Context) VPSUBQ_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPSUBQ_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPSUBQ_BCST_Z: Subtract Packed Quadword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSUBQ.BCST.Z m64 xmm k xmm
|
|
// VPSUBQ.BCST.Z m64 ymm k ymm
|
|
// VPSUBQ.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VPSUBQ.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSUBQ_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VPSUBQ_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VPSUBQ_Z: Subtract Packed Quadword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSUBQ.Z m128 xmm k xmm
|
|
// VPSUBQ.Z m256 ymm k ymm
|
|
// VPSUBQ.Z xmm xmm k xmm
|
|
// VPSUBQ.Z ymm ymm k ymm
|
|
// VPSUBQ.Z m512 zmm k zmm
|
|
// VPSUBQ.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPSUBQ.Z instruction to the active function.
|
|
func (c *Context) VPSUBQ_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPSUBQ_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPSUBQ_Z: Subtract Packed Quadword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSUBQ.Z m128 xmm k xmm
|
|
// VPSUBQ.Z m256 ymm k ymm
|
|
// VPSUBQ.Z xmm xmm k xmm
|
|
// VPSUBQ.Z ymm ymm k ymm
|
|
// VPSUBQ.Z m512 zmm k zmm
|
|
// VPSUBQ.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPSUBQ.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSUBQ_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPSUBQ_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPSUBSB: Subtract Packed Signed Byte Integers with Signed Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSUBSB m256 ymm ymm
|
|
// VPSUBSB ymm ymm ymm
|
|
// VPSUBSB m128 xmm xmm
|
|
// VPSUBSB xmm xmm xmm
|
|
// VPSUBSB m128 xmm k xmm
|
|
// VPSUBSB m256 ymm k ymm
|
|
// VPSUBSB xmm xmm k xmm
|
|
// VPSUBSB ymm ymm k ymm
|
|
// VPSUBSB m512 zmm k zmm
|
|
// VPSUBSB m512 zmm zmm
|
|
// VPSUBSB zmm zmm k zmm
|
|
// VPSUBSB zmm zmm zmm
|
|
//
|
|
// Construct and append a VPSUBSB instruction to the active function.
|
|
func (c *Context) VPSUBSB(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPSUBSB(ops...))
|
|
}
|
|
|
|
// VPSUBSB: Subtract Packed Signed Byte Integers with Signed Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSUBSB m256 ymm ymm
|
|
// VPSUBSB ymm ymm ymm
|
|
// VPSUBSB m128 xmm xmm
|
|
// VPSUBSB xmm xmm xmm
|
|
// VPSUBSB m128 xmm k xmm
|
|
// VPSUBSB m256 ymm k ymm
|
|
// VPSUBSB xmm xmm k xmm
|
|
// VPSUBSB ymm ymm k ymm
|
|
// VPSUBSB m512 zmm k zmm
|
|
// VPSUBSB m512 zmm zmm
|
|
// VPSUBSB zmm zmm k zmm
|
|
// VPSUBSB zmm zmm zmm
|
|
//
|
|
// Construct and append a VPSUBSB instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSUBSB(ops ...operand.Op) { ctx.VPSUBSB(ops...) }
|
|
|
|
// VPSUBSB_Z: Subtract Packed Signed Byte Integers with Signed Saturation (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSUBSB.Z m128 xmm k xmm
|
|
// VPSUBSB.Z m256 ymm k ymm
|
|
// VPSUBSB.Z xmm xmm k xmm
|
|
// VPSUBSB.Z ymm ymm k ymm
|
|
// VPSUBSB.Z m512 zmm k zmm
|
|
// VPSUBSB.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPSUBSB.Z instruction to the active function.
|
|
func (c *Context) VPSUBSB_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPSUBSB_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPSUBSB_Z: Subtract Packed Signed Byte Integers with Signed Saturation (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSUBSB.Z m128 xmm k xmm
|
|
// VPSUBSB.Z m256 ymm k ymm
|
|
// VPSUBSB.Z xmm xmm k xmm
|
|
// VPSUBSB.Z ymm ymm k ymm
|
|
// VPSUBSB.Z m512 zmm k zmm
|
|
// VPSUBSB.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPSUBSB.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSUBSB_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPSUBSB_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPSUBSW: Subtract Packed Signed Word Integers with Signed Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSUBSW m256 ymm ymm
|
|
// VPSUBSW ymm ymm ymm
|
|
// VPSUBSW m128 xmm xmm
|
|
// VPSUBSW xmm xmm xmm
|
|
// VPSUBSW m128 xmm k xmm
|
|
// VPSUBSW m256 ymm k ymm
|
|
// VPSUBSW xmm xmm k xmm
|
|
// VPSUBSW ymm ymm k ymm
|
|
// VPSUBSW m512 zmm k zmm
|
|
// VPSUBSW m512 zmm zmm
|
|
// VPSUBSW zmm zmm k zmm
|
|
// VPSUBSW zmm zmm zmm
|
|
//
|
|
// Construct and append a VPSUBSW instruction to the active function.
|
|
func (c *Context) VPSUBSW(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPSUBSW(ops...))
|
|
}
|
|
|
|
// VPSUBSW: Subtract Packed Signed Word Integers with Signed Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSUBSW m256 ymm ymm
|
|
// VPSUBSW ymm ymm ymm
|
|
// VPSUBSW m128 xmm xmm
|
|
// VPSUBSW xmm xmm xmm
|
|
// VPSUBSW m128 xmm k xmm
|
|
// VPSUBSW m256 ymm k ymm
|
|
// VPSUBSW xmm xmm k xmm
|
|
// VPSUBSW ymm ymm k ymm
|
|
// VPSUBSW m512 zmm k zmm
|
|
// VPSUBSW m512 zmm zmm
|
|
// VPSUBSW zmm zmm k zmm
|
|
// VPSUBSW zmm zmm zmm
|
|
//
|
|
// Construct and append a VPSUBSW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSUBSW(ops ...operand.Op) { ctx.VPSUBSW(ops...) }
|
|
|
|
// VPSUBSW_Z: Subtract Packed Signed Word Integers with Signed Saturation (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSUBSW.Z m128 xmm k xmm
|
|
// VPSUBSW.Z m256 ymm k ymm
|
|
// VPSUBSW.Z xmm xmm k xmm
|
|
// VPSUBSW.Z ymm ymm k ymm
|
|
// VPSUBSW.Z m512 zmm k zmm
|
|
// VPSUBSW.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPSUBSW.Z instruction to the active function.
|
|
func (c *Context) VPSUBSW_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPSUBSW_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPSUBSW_Z: Subtract Packed Signed Word Integers with Signed Saturation (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSUBSW.Z m128 xmm k xmm
|
|
// VPSUBSW.Z m256 ymm k ymm
|
|
// VPSUBSW.Z xmm xmm k xmm
|
|
// VPSUBSW.Z ymm ymm k ymm
|
|
// VPSUBSW.Z m512 zmm k zmm
|
|
// VPSUBSW.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPSUBSW.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSUBSW_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPSUBSW_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPSUBUSB: Subtract Packed Unsigned Byte Integers with Unsigned Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSUBUSB m256 ymm ymm
|
|
// VPSUBUSB ymm ymm ymm
|
|
// VPSUBUSB m128 xmm xmm
|
|
// VPSUBUSB xmm xmm xmm
|
|
// VPSUBUSB m128 xmm k xmm
|
|
// VPSUBUSB m256 ymm k ymm
|
|
// VPSUBUSB xmm xmm k xmm
|
|
// VPSUBUSB ymm ymm k ymm
|
|
// VPSUBUSB m512 zmm k zmm
|
|
// VPSUBUSB m512 zmm zmm
|
|
// VPSUBUSB zmm zmm k zmm
|
|
// VPSUBUSB zmm zmm zmm
|
|
//
|
|
// Construct and append a VPSUBUSB instruction to the active function.
|
|
func (c *Context) VPSUBUSB(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPSUBUSB(ops...))
|
|
}
|
|
|
|
// VPSUBUSB: Subtract Packed Unsigned Byte Integers with Unsigned Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSUBUSB m256 ymm ymm
|
|
// VPSUBUSB ymm ymm ymm
|
|
// VPSUBUSB m128 xmm xmm
|
|
// VPSUBUSB xmm xmm xmm
|
|
// VPSUBUSB m128 xmm k xmm
|
|
// VPSUBUSB m256 ymm k ymm
|
|
// VPSUBUSB xmm xmm k xmm
|
|
// VPSUBUSB ymm ymm k ymm
|
|
// VPSUBUSB m512 zmm k zmm
|
|
// VPSUBUSB m512 zmm zmm
|
|
// VPSUBUSB zmm zmm k zmm
|
|
// VPSUBUSB zmm zmm zmm
|
|
//
|
|
// Construct and append a VPSUBUSB instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSUBUSB(ops ...operand.Op) { ctx.VPSUBUSB(ops...) }
|
|
|
|
// VPSUBUSB_Z: Subtract Packed Unsigned Byte Integers with Unsigned Saturation (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSUBUSB.Z m128 xmm k xmm
|
|
// VPSUBUSB.Z m256 ymm k ymm
|
|
// VPSUBUSB.Z xmm xmm k xmm
|
|
// VPSUBUSB.Z ymm ymm k ymm
|
|
// VPSUBUSB.Z m512 zmm k zmm
|
|
// VPSUBUSB.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPSUBUSB.Z instruction to the active function.
|
|
func (c *Context) VPSUBUSB_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPSUBUSB_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPSUBUSB_Z: Subtract Packed Unsigned Byte Integers with Unsigned Saturation (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSUBUSB.Z m128 xmm k xmm
|
|
// VPSUBUSB.Z m256 ymm k ymm
|
|
// VPSUBUSB.Z xmm xmm k xmm
|
|
// VPSUBUSB.Z ymm ymm k ymm
|
|
// VPSUBUSB.Z m512 zmm k zmm
|
|
// VPSUBUSB.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPSUBUSB.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSUBUSB_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPSUBUSB_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPSUBUSW: Subtract Packed Unsigned Word Integers with Unsigned Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSUBUSW m256 ymm ymm
|
|
// VPSUBUSW ymm ymm ymm
|
|
// VPSUBUSW m128 xmm xmm
|
|
// VPSUBUSW xmm xmm xmm
|
|
// VPSUBUSW m128 xmm k xmm
|
|
// VPSUBUSW m256 ymm k ymm
|
|
// VPSUBUSW xmm xmm k xmm
|
|
// VPSUBUSW ymm ymm k ymm
|
|
// VPSUBUSW m512 zmm k zmm
|
|
// VPSUBUSW m512 zmm zmm
|
|
// VPSUBUSW zmm zmm k zmm
|
|
// VPSUBUSW zmm zmm zmm
|
|
//
|
|
// Construct and append a VPSUBUSW instruction to the active function.
|
|
func (c *Context) VPSUBUSW(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPSUBUSW(ops...))
|
|
}
|
|
|
|
// VPSUBUSW: Subtract Packed Unsigned Word Integers with Unsigned Saturation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSUBUSW m256 ymm ymm
|
|
// VPSUBUSW ymm ymm ymm
|
|
// VPSUBUSW m128 xmm xmm
|
|
// VPSUBUSW xmm xmm xmm
|
|
// VPSUBUSW m128 xmm k xmm
|
|
// VPSUBUSW m256 ymm k ymm
|
|
// VPSUBUSW xmm xmm k xmm
|
|
// VPSUBUSW ymm ymm k ymm
|
|
// VPSUBUSW m512 zmm k zmm
|
|
// VPSUBUSW m512 zmm zmm
|
|
// VPSUBUSW zmm zmm k zmm
|
|
// VPSUBUSW zmm zmm zmm
|
|
//
|
|
// Construct and append a VPSUBUSW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSUBUSW(ops ...operand.Op) { ctx.VPSUBUSW(ops...) }
|
|
|
|
// VPSUBUSW_Z: Subtract Packed Unsigned Word Integers with Unsigned Saturation (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSUBUSW.Z m128 xmm k xmm
|
|
// VPSUBUSW.Z m256 ymm k ymm
|
|
// VPSUBUSW.Z xmm xmm k xmm
|
|
// VPSUBUSW.Z ymm ymm k ymm
|
|
// VPSUBUSW.Z m512 zmm k zmm
|
|
// VPSUBUSW.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPSUBUSW.Z instruction to the active function.
|
|
func (c *Context) VPSUBUSW_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPSUBUSW_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPSUBUSW_Z: Subtract Packed Unsigned Word Integers with Unsigned Saturation (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSUBUSW.Z m128 xmm k xmm
|
|
// VPSUBUSW.Z m256 ymm k ymm
|
|
// VPSUBUSW.Z xmm xmm k xmm
|
|
// VPSUBUSW.Z ymm ymm k ymm
|
|
// VPSUBUSW.Z m512 zmm k zmm
|
|
// VPSUBUSW.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPSUBUSW.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSUBUSW_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPSUBUSW_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPSUBW: Subtract Packed Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSUBW m256 ymm ymm
|
|
// VPSUBW ymm ymm ymm
|
|
// VPSUBW m128 xmm xmm
|
|
// VPSUBW xmm xmm xmm
|
|
// VPSUBW m128 xmm k xmm
|
|
// VPSUBW m256 ymm k ymm
|
|
// VPSUBW xmm xmm k xmm
|
|
// VPSUBW ymm ymm k ymm
|
|
// VPSUBW m512 zmm k zmm
|
|
// VPSUBW m512 zmm zmm
|
|
// VPSUBW zmm zmm k zmm
|
|
// VPSUBW zmm zmm zmm
|
|
//
|
|
// Construct and append a VPSUBW instruction to the active function.
|
|
func (c *Context) VPSUBW(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPSUBW(ops...))
|
|
}
|
|
|
|
// VPSUBW: Subtract Packed Word Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSUBW m256 ymm ymm
|
|
// VPSUBW ymm ymm ymm
|
|
// VPSUBW m128 xmm xmm
|
|
// VPSUBW xmm xmm xmm
|
|
// VPSUBW m128 xmm k xmm
|
|
// VPSUBW m256 ymm k ymm
|
|
// VPSUBW xmm xmm k xmm
|
|
// VPSUBW ymm ymm k ymm
|
|
// VPSUBW m512 zmm k zmm
|
|
// VPSUBW m512 zmm zmm
|
|
// VPSUBW zmm zmm k zmm
|
|
// VPSUBW zmm zmm zmm
|
|
//
|
|
// Construct and append a VPSUBW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSUBW(ops ...operand.Op) { ctx.VPSUBW(ops...) }
|
|
|
|
// VPSUBW_Z: Subtract Packed Word Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSUBW.Z m128 xmm k xmm
|
|
// VPSUBW.Z m256 ymm k ymm
|
|
// VPSUBW.Z xmm xmm k xmm
|
|
// VPSUBW.Z ymm ymm k ymm
|
|
// VPSUBW.Z m512 zmm k zmm
|
|
// VPSUBW.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPSUBW.Z instruction to the active function.
|
|
func (c *Context) VPSUBW_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPSUBW_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPSUBW_Z: Subtract Packed Word Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPSUBW.Z m128 xmm k xmm
|
|
// VPSUBW.Z m256 ymm k ymm
|
|
// VPSUBW.Z xmm xmm k xmm
|
|
// VPSUBW.Z ymm ymm k ymm
|
|
// VPSUBW.Z m512 zmm k zmm
|
|
// VPSUBW.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPSUBW.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPSUBW_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPSUBW_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPTERNLOGD: Bitwise Ternary Logical Operation on Doubleword Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPTERNLOGD imm8 m128 xmm k xmm
|
|
// VPTERNLOGD imm8 m128 xmm xmm
|
|
// VPTERNLOGD imm8 m256 ymm k ymm
|
|
// VPTERNLOGD imm8 m256 ymm ymm
|
|
// VPTERNLOGD imm8 xmm xmm k xmm
|
|
// VPTERNLOGD imm8 xmm xmm xmm
|
|
// VPTERNLOGD imm8 ymm ymm k ymm
|
|
// VPTERNLOGD imm8 ymm ymm ymm
|
|
// VPTERNLOGD imm8 m512 zmm k zmm
|
|
// VPTERNLOGD imm8 m512 zmm zmm
|
|
// VPTERNLOGD imm8 zmm zmm k zmm
|
|
// VPTERNLOGD imm8 zmm zmm zmm
|
|
//
|
|
// Construct and append a VPTERNLOGD instruction to the active function.
|
|
func (c *Context) VPTERNLOGD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPTERNLOGD(ops...))
|
|
}
|
|
|
|
// VPTERNLOGD: Bitwise Ternary Logical Operation on Doubleword Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPTERNLOGD imm8 m128 xmm k xmm
|
|
// VPTERNLOGD imm8 m128 xmm xmm
|
|
// VPTERNLOGD imm8 m256 ymm k ymm
|
|
// VPTERNLOGD imm8 m256 ymm ymm
|
|
// VPTERNLOGD imm8 xmm xmm k xmm
|
|
// VPTERNLOGD imm8 xmm xmm xmm
|
|
// VPTERNLOGD imm8 ymm ymm k ymm
|
|
// VPTERNLOGD imm8 ymm ymm ymm
|
|
// VPTERNLOGD imm8 m512 zmm k zmm
|
|
// VPTERNLOGD imm8 m512 zmm zmm
|
|
// VPTERNLOGD imm8 zmm zmm k zmm
|
|
// VPTERNLOGD imm8 zmm zmm zmm
|
|
//
|
|
// Construct and append a VPTERNLOGD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPTERNLOGD(ops ...operand.Op) { ctx.VPTERNLOGD(ops...) }
|
|
|
|
// VPTERNLOGD_BCST: Bitwise Ternary Logical Operation on Doubleword Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPTERNLOGD.BCST imm8 m32 xmm k xmm
|
|
// VPTERNLOGD.BCST imm8 m32 xmm xmm
|
|
// VPTERNLOGD.BCST imm8 m32 ymm k ymm
|
|
// VPTERNLOGD.BCST imm8 m32 ymm ymm
|
|
// VPTERNLOGD.BCST imm8 m32 zmm k zmm
|
|
// VPTERNLOGD.BCST imm8 m32 zmm zmm
|
|
//
|
|
// Construct and append a VPTERNLOGD.BCST instruction to the active function.
|
|
func (c *Context) VPTERNLOGD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPTERNLOGD_BCST(ops...))
|
|
}
|
|
|
|
// VPTERNLOGD_BCST: Bitwise Ternary Logical Operation on Doubleword Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPTERNLOGD.BCST imm8 m32 xmm k xmm
|
|
// VPTERNLOGD.BCST imm8 m32 xmm xmm
|
|
// VPTERNLOGD.BCST imm8 m32 ymm k ymm
|
|
// VPTERNLOGD.BCST imm8 m32 ymm ymm
|
|
// VPTERNLOGD.BCST imm8 m32 zmm k zmm
|
|
// VPTERNLOGD.BCST imm8 m32 zmm zmm
|
|
//
|
|
// Construct and append a VPTERNLOGD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPTERNLOGD_BCST(ops ...operand.Op) { ctx.VPTERNLOGD_BCST(ops...) }
|
|
|
|
// VPTERNLOGD_BCST_Z: Bitwise Ternary Logical Operation on Doubleword Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPTERNLOGD.BCST.Z imm8 m32 xmm k xmm
|
|
// VPTERNLOGD.BCST.Z imm8 m32 ymm k ymm
|
|
// VPTERNLOGD.BCST.Z imm8 m32 zmm k zmm
|
|
//
|
|
// Construct and append a VPTERNLOGD.BCST.Z instruction to the active function.
|
|
func (c *Context) VPTERNLOGD_BCST_Z(i, m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPTERNLOGD_BCST_Z(i, m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPTERNLOGD_BCST_Z: Bitwise Ternary Logical Operation on Doubleword Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPTERNLOGD.BCST.Z imm8 m32 xmm k xmm
|
|
// VPTERNLOGD.BCST.Z imm8 m32 ymm k ymm
|
|
// VPTERNLOGD.BCST.Z imm8 m32 zmm k zmm
|
|
//
|
|
// Construct and append a VPTERNLOGD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPTERNLOGD_BCST_Z(i, m, xyz, k, xyz1 operand.Op) { ctx.VPTERNLOGD_BCST_Z(i, m, xyz, k, xyz1) }
|
|
|
|
// VPTERNLOGD_Z: Bitwise Ternary Logical Operation on Doubleword Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPTERNLOGD.Z imm8 m128 xmm k xmm
|
|
// VPTERNLOGD.Z imm8 m256 ymm k ymm
|
|
// VPTERNLOGD.Z imm8 xmm xmm k xmm
|
|
// VPTERNLOGD.Z imm8 ymm ymm k ymm
|
|
// VPTERNLOGD.Z imm8 m512 zmm k zmm
|
|
// VPTERNLOGD.Z imm8 zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPTERNLOGD.Z instruction to the active function.
|
|
func (c *Context) VPTERNLOGD_Z(i, mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPTERNLOGD_Z(i, mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPTERNLOGD_Z: Bitwise Ternary Logical Operation on Doubleword Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPTERNLOGD.Z imm8 m128 xmm k xmm
|
|
// VPTERNLOGD.Z imm8 m256 ymm k ymm
|
|
// VPTERNLOGD.Z imm8 xmm xmm k xmm
|
|
// VPTERNLOGD.Z imm8 ymm ymm k ymm
|
|
// VPTERNLOGD.Z imm8 m512 zmm k zmm
|
|
// VPTERNLOGD.Z imm8 zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPTERNLOGD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPTERNLOGD_Z(i, mxyz, xyz, k, xyz1 operand.Op) { ctx.VPTERNLOGD_Z(i, mxyz, xyz, k, xyz1) }
|
|
|
|
// VPTERNLOGQ: Bitwise Ternary Logical Operation on Quadword Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPTERNLOGQ imm8 m128 xmm k xmm
|
|
// VPTERNLOGQ imm8 m128 xmm xmm
|
|
// VPTERNLOGQ imm8 m256 ymm k ymm
|
|
// VPTERNLOGQ imm8 m256 ymm ymm
|
|
// VPTERNLOGQ imm8 xmm xmm k xmm
|
|
// VPTERNLOGQ imm8 xmm xmm xmm
|
|
// VPTERNLOGQ imm8 ymm ymm k ymm
|
|
// VPTERNLOGQ imm8 ymm ymm ymm
|
|
// VPTERNLOGQ imm8 m512 zmm k zmm
|
|
// VPTERNLOGQ imm8 m512 zmm zmm
|
|
// VPTERNLOGQ imm8 zmm zmm k zmm
|
|
// VPTERNLOGQ imm8 zmm zmm zmm
|
|
//
|
|
// Construct and append a VPTERNLOGQ instruction to the active function.
|
|
func (c *Context) VPTERNLOGQ(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPTERNLOGQ(ops...))
|
|
}
|
|
|
|
// VPTERNLOGQ: Bitwise Ternary Logical Operation on Quadword Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPTERNLOGQ imm8 m128 xmm k xmm
|
|
// VPTERNLOGQ imm8 m128 xmm xmm
|
|
// VPTERNLOGQ imm8 m256 ymm k ymm
|
|
// VPTERNLOGQ imm8 m256 ymm ymm
|
|
// VPTERNLOGQ imm8 xmm xmm k xmm
|
|
// VPTERNLOGQ imm8 xmm xmm xmm
|
|
// VPTERNLOGQ imm8 ymm ymm k ymm
|
|
// VPTERNLOGQ imm8 ymm ymm ymm
|
|
// VPTERNLOGQ imm8 m512 zmm k zmm
|
|
// VPTERNLOGQ imm8 m512 zmm zmm
|
|
// VPTERNLOGQ imm8 zmm zmm k zmm
|
|
// VPTERNLOGQ imm8 zmm zmm zmm
|
|
//
|
|
// Construct and append a VPTERNLOGQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPTERNLOGQ(ops ...operand.Op) { ctx.VPTERNLOGQ(ops...) }
|
|
|
|
// VPTERNLOGQ_BCST: Bitwise Ternary Logical Operation on Quadword Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPTERNLOGQ.BCST imm8 m64 xmm k xmm
|
|
// VPTERNLOGQ.BCST imm8 m64 xmm xmm
|
|
// VPTERNLOGQ.BCST imm8 m64 ymm k ymm
|
|
// VPTERNLOGQ.BCST imm8 m64 ymm ymm
|
|
// VPTERNLOGQ.BCST imm8 m64 zmm k zmm
|
|
// VPTERNLOGQ.BCST imm8 m64 zmm zmm
|
|
//
|
|
// Construct and append a VPTERNLOGQ.BCST instruction to the active function.
|
|
func (c *Context) VPTERNLOGQ_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPTERNLOGQ_BCST(ops...))
|
|
}
|
|
|
|
// VPTERNLOGQ_BCST: Bitwise Ternary Logical Operation on Quadword Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPTERNLOGQ.BCST imm8 m64 xmm k xmm
|
|
// VPTERNLOGQ.BCST imm8 m64 xmm xmm
|
|
// VPTERNLOGQ.BCST imm8 m64 ymm k ymm
|
|
// VPTERNLOGQ.BCST imm8 m64 ymm ymm
|
|
// VPTERNLOGQ.BCST imm8 m64 zmm k zmm
|
|
// VPTERNLOGQ.BCST imm8 m64 zmm zmm
|
|
//
|
|
// Construct and append a VPTERNLOGQ.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPTERNLOGQ_BCST(ops ...operand.Op) { ctx.VPTERNLOGQ_BCST(ops...) }
|
|
|
|
// VPTERNLOGQ_BCST_Z: Bitwise Ternary Logical Operation on Quadword Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPTERNLOGQ.BCST.Z imm8 m64 xmm k xmm
|
|
// VPTERNLOGQ.BCST.Z imm8 m64 ymm k ymm
|
|
// VPTERNLOGQ.BCST.Z imm8 m64 zmm k zmm
|
|
//
|
|
// Construct and append a VPTERNLOGQ.BCST.Z instruction to the active function.
|
|
func (c *Context) VPTERNLOGQ_BCST_Z(i, m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPTERNLOGQ_BCST_Z(i, m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPTERNLOGQ_BCST_Z: Bitwise Ternary Logical Operation on Quadword Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPTERNLOGQ.BCST.Z imm8 m64 xmm k xmm
|
|
// VPTERNLOGQ.BCST.Z imm8 m64 ymm k ymm
|
|
// VPTERNLOGQ.BCST.Z imm8 m64 zmm k zmm
|
|
//
|
|
// Construct and append a VPTERNLOGQ.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPTERNLOGQ_BCST_Z(i, m, xyz, k, xyz1 operand.Op) { ctx.VPTERNLOGQ_BCST_Z(i, m, xyz, k, xyz1) }
|
|
|
|
// VPTERNLOGQ_Z: Bitwise Ternary Logical Operation on Quadword Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPTERNLOGQ.Z imm8 m128 xmm k xmm
|
|
// VPTERNLOGQ.Z imm8 m256 ymm k ymm
|
|
// VPTERNLOGQ.Z imm8 xmm xmm k xmm
|
|
// VPTERNLOGQ.Z imm8 ymm ymm k ymm
|
|
// VPTERNLOGQ.Z imm8 m512 zmm k zmm
|
|
// VPTERNLOGQ.Z imm8 zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPTERNLOGQ.Z instruction to the active function.
|
|
func (c *Context) VPTERNLOGQ_Z(i, mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPTERNLOGQ_Z(i, mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPTERNLOGQ_Z: Bitwise Ternary Logical Operation on Quadword Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPTERNLOGQ.Z imm8 m128 xmm k xmm
|
|
// VPTERNLOGQ.Z imm8 m256 ymm k ymm
|
|
// VPTERNLOGQ.Z imm8 xmm xmm k xmm
|
|
// VPTERNLOGQ.Z imm8 ymm ymm k ymm
|
|
// VPTERNLOGQ.Z imm8 m512 zmm k zmm
|
|
// VPTERNLOGQ.Z imm8 zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPTERNLOGQ.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPTERNLOGQ_Z(i, mxyz, xyz, k, xyz1 operand.Op) { ctx.VPTERNLOGQ_Z(i, mxyz, xyz, k, xyz1) }
|
|
|
|
// VPTEST: Packed Logical Compare.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPTEST m128 xmm
|
|
// VPTEST m256 ymm
|
|
// VPTEST xmm xmm
|
|
// VPTEST ymm ymm
|
|
//
|
|
// Construct and append a VPTEST instruction to the active function.
|
|
func (c *Context) VPTEST(mxy, xy operand.Op) {
|
|
c.addinstruction(x86.VPTEST(mxy, xy))
|
|
}
|
|
|
|
// VPTEST: Packed Logical Compare.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPTEST m128 xmm
|
|
// VPTEST m256 ymm
|
|
// VPTEST xmm xmm
|
|
// VPTEST ymm ymm
|
|
//
|
|
// Construct and append a VPTEST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPTEST(mxy, xy operand.Op) { ctx.VPTEST(mxy, xy) }
|
|
|
|
// VPTESTMB: Logical AND of Packed Byte Integer Values and Set Mask.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPTESTMB m128 xmm k k
|
|
// VPTESTMB m128 xmm k
|
|
// VPTESTMB m256 ymm k k
|
|
// VPTESTMB m256 ymm k
|
|
// VPTESTMB xmm xmm k k
|
|
// VPTESTMB xmm xmm k
|
|
// VPTESTMB ymm ymm k k
|
|
// VPTESTMB ymm ymm k
|
|
// VPTESTMB m512 zmm k k
|
|
// VPTESTMB m512 zmm k
|
|
// VPTESTMB zmm zmm k k
|
|
// VPTESTMB zmm zmm k
|
|
//
|
|
// Construct and append a VPTESTMB instruction to the active function.
|
|
func (c *Context) VPTESTMB(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPTESTMB(ops...))
|
|
}
|
|
|
|
// VPTESTMB: Logical AND of Packed Byte Integer Values and Set Mask.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPTESTMB m128 xmm k k
|
|
// VPTESTMB m128 xmm k
|
|
// VPTESTMB m256 ymm k k
|
|
// VPTESTMB m256 ymm k
|
|
// VPTESTMB xmm xmm k k
|
|
// VPTESTMB xmm xmm k
|
|
// VPTESTMB ymm ymm k k
|
|
// VPTESTMB ymm ymm k
|
|
// VPTESTMB m512 zmm k k
|
|
// VPTESTMB m512 zmm k
|
|
// VPTESTMB zmm zmm k k
|
|
// VPTESTMB zmm zmm k
|
|
//
|
|
// Construct and append a VPTESTMB instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPTESTMB(ops ...operand.Op) { ctx.VPTESTMB(ops...) }
|
|
|
|
// VPTESTMD: Logical AND of Packed Doubleword Integer Values and Set Mask.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPTESTMD m128 xmm k k
|
|
// VPTESTMD m128 xmm k
|
|
// VPTESTMD m256 ymm k k
|
|
// VPTESTMD m256 ymm k
|
|
// VPTESTMD xmm xmm k k
|
|
// VPTESTMD xmm xmm k
|
|
// VPTESTMD ymm ymm k k
|
|
// VPTESTMD ymm ymm k
|
|
// VPTESTMD m512 zmm k k
|
|
// VPTESTMD m512 zmm k
|
|
// VPTESTMD zmm zmm k k
|
|
// VPTESTMD zmm zmm k
|
|
//
|
|
// Construct and append a VPTESTMD instruction to the active function.
|
|
func (c *Context) VPTESTMD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPTESTMD(ops...))
|
|
}
|
|
|
|
// VPTESTMD: Logical AND of Packed Doubleword Integer Values and Set Mask.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPTESTMD m128 xmm k k
|
|
// VPTESTMD m128 xmm k
|
|
// VPTESTMD m256 ymm k k
|
|
// VPTESTMD m256 ymm k
|
|
// VPTESTMD xmm xmm k k
|
|
// VPTESTMD xmm xmm k
|
|
// VPTESTMD ymm ymm k k
|
|
// VPTESTMD ymm ymm k
|
|
// VPTESTMD m512 zmm k k
|
|
// VPTESTMD m512 zmm k
|
|
// VPTESTMD zmm zmm k k
|
|
// VPTESTMD zmm zmm k
|
|
//
|
|
// Construct and append a VPTESTMD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPTESTMD(ops ...operand.Op) { ctx.VPTESTMD(ops...) }
|
|
|
|
// VPTESTMD_BCST: Logical AND of Packed Doubleword Integer Values and Set Mask (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPTESTMD.BCST m32 xmm k k
|
|
// VPTESTMD.BCST m32 xmm k
|
|
// VPTESTMD.BCST m32 ymm k k
|
|
// VPTESTMD.BCST m32 ymm k
|
|
// VPTESTMD.BCST m32 zmm k k
|
|
// VPTESTMD.BCST m32 zmm k
|
|
//
|
|
// Construct and append a VPTESTMD.BCST instruction to the active function.
|
|
func (c *Context) VPTESTMD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPTESTMD_BCST(ops...))
|
|
}
|
|
|
|
// VPTESTMD_BCST: Logical AND of Packed Doubleword Integer Values and Set Mask (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPTESTMD.BCST m32 xmm k k
|
|
// VPTESTMD.BCST m32 xmm k
|
|
// VPTESTMD.BCST m32 ymm k k
|
|
// VPTESTMD.BCST m32 ymm k
|
|
// VPTESTMD.BCST m32 zmm k k
|
|
// VPTESTMD.BCST m32 zmm k
|
|
//
|
|
// Construct and append a VPTESTMD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPTESTMD_BCST(ops ...operand.Op) { ctx.VPTESTMD_BCST(ops...) }
|
|
|
|
// VPTESTMQ: Logical AND of Packed Quadword Integer Values and Set Mask.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPTESTMQ m128 xmm k k
|
|
// VPTESTMQ m128 xmm k
|
|
// VPTESTMQ m256 ymm k k
|
|
// VPTESTMQ m256 ymm k
|
|
// VPTESTMQ xmm xmm k k
|
|
// VPTESTMQ xmm xmm k
|
|
// VPTESTMQ ymm ymm k k
|
|
// VPTESTMQ ymm ymm k
|
|
// VPTESTMQ m512 zmm k k
|
|
// VPTESTMQ m512 zmm k
|
|
// VPTESTMQ zmm zmm k k
|
|
// VPTESTMQ zmm zmm k
|
|
//
|
|
// Construct and append a VPTESTMQ instruction to the active function.
|
|
func (c *Context) VPTESTMQ(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPTESTMQ(ops...))
|
|
}
|
|
|
|
// VPTESTMQ: Logical AND of Packed Quadword Integer Values and Set Mask.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPTESTMQ m128 xmm k k
|
|
// VPTESTMQ m128 xmm k
|
|
// VPTESTMQ m256 ymm k k
|
|
// VPTESTMQ m256 ymm k
|
|
// VPTESTMQ xmm xmm k k
|
|
// VPTESTMQ xmm xmm k
|
|
// VPTESTMQ ymm ymm k k
|
|
// VPTESTMQ ymm ymm k
|
|
// VPTESTMQ m512 zmm k k
|
|
// VPTESTMQ m512 zmm k
|
|
// VPTESTMQ zmm zmm k k
|
|
// VPTESTMQ zmm zmm k
|
|
//
|
|
// Construct and append a VPTESTMQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPTESTMQ(ops ...operand.Op) { ctx.VPTESTMQ(ops...) }
|
|
|
|
// VPTESTMQ_BCST: Logical AND of Packed Quadword Integer Values and Set Mask (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPTESTMQ.BCST m64 xmm k k
|
|
// VPTESTMQ.BCST m64 xmm k
|
|
// VPTESTMQ.BCST m64 ymm k k
|
|
// VPTESTMQ.BCST m64 ymm k
|
|
// VPTESTMQ.BCST m64 zmm k k
|
|
// VPTESTMQ.BCST m64 zmm k
|
|
//
|
|
// Construct and append a VPTESTMQ.BCST instruction to the active function.
|
|
func (c *Context) VPTESTMQ_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPTESTMQ_BCST(ops...))
|
|
}
|
|
|
|
// VPTESTMQ_BCST: Logical AND of Packed Quadword Integer Values and Set Mask (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPTESTMQ.BCST m64 xmm k k
|
|
// VPTESTMQ.BCST m64 xmm k
|
|
// VPTESTMQ.BCST m64 ymm k k
|
|
// VPTESTMQ.BCST m64 ymm k
|
|
// VPTESTMQ.BCST m64 zmm k k
|
|
// VPTESTMQ.BCST m64 zmm k
|
|
//
|
|
// Construct and append a VPTESTMQ.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPTESTMQ_BCST(ops ...operand.Op) { ctx.VPTESTMQ_BCST(ops...) }
|
|
|
|
// VPTESTMW: Logical AND of Packed Word Integer Values and Set Mask.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPTESTMW m128 xmm k k
|
|
// VPTESTMW m128 xmm k
|
|
// VPTESTMW m256 ymm k k
|
|
// VPTESTMW m256 ymm k
|
|
// VPTESTMW xmm xmm k k
|
|
// VPTESTMW xmm xmm k
|
|
// VPTESTMW ymm ymm k k
|
|
// VPTESTMW ymm ymm k
|
|
// VPTESTMW m512 zmm k k
|
|
// VPTESTMW m512 zmm k
|
|
// VPTESTMW zmm zmm k k
|
|
// VPTESTMW zmm zmm k
|
|
//
|
|
// Construct and append a VPTESTMW instruction to the active function.
|
|
func (c *Context) VPTESTMW(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPTESTMW(ops...))
|
|
}
|
|
|
|
// VPTESTMW: Logical AND of Packed Word Integer Values and Set Mask.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPTESTMW m128 xmm k k
|
|
// VPTESTMW m128 xmm k
|
|
// VPTESTMW m256 ymm k k
|
|
// VPTESTMW m256 ymm k
|
|
// VPTESTMW xmm xmm k k
|
|
// VPTESTMW xmm xmm k
|
|
// VPTESTMW ymm ymm k k
|
|
// VPTESTMW ymm ymm k
|
|
// VPTESTMW m512 zmm k k
|
|
// VPTESTMW m512 zmm k
|
|
// VPTESTMW zmm zmm k k
|
|
// VPTESTMW zmm zmm k
|
|
//
|
|
// Construct and append a VPTESTMW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPTESTMW(ops ...operand.Op) { ctx.VPTESTMW(ops...) }
|
|
|
|
// VPTESTNMB: Logical NAND of Packed Byte Integer Values and Set Mask.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPTESTNMB m512 zmm k k
|
|
// VPTESTNMB m512 zmm k
|
|
// VPTESTNMB zmm zmm k k
|
|
// VPTESTNMB zmm zmm k
|
|
// VPTESTNMB m128 xmm k k
|
|
// VPTESTNMB m128 xmm k
|
|
// VPTESTNMB m256 ymm k k
|
|
// VPTESTNMB m256 ymm k
|
|
// VPTESTNMB xmm xmm k k
|
|
// VPTESTNMB xmm xmm k
|
|
// VPTESTNMB ymm ymm k k
|
|
// VPTESTNMB ymm ymm k
|
|
//
|
|
// Construct and append a VPTESTNMB instruction to the active function.
|
|
func (c *Context) VPTESTNMB(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPTESTNMB(ops...))
|
|
}
|
|
|
|
// VPTESTNMB: Logical NAND of Packed Byte Integer Values and Set Mask.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPTESTNMB m512 zmm k k
|
|
// VPTESTNMB m512 zmm k
|
|
// VPTESTNMB zmm zmm k k
|
|
// VPTESTNMB zmm zmm k
|
|
// VPTESTNMB m128 xmm k k
|
|
// VPTESTNMB m128 xmm k
|
|
// VPTESTNMB m256 ymm k k
|
|
// VPTESTNMB m256 ymm k
|
|
// VPTESTNMB xmm xmm k k
|
|
// VPTESTNMB xmm xmm k
|
|
// VPTESTNMB ymm ymm k k
|
|
// VPTESTNMB ymm ymm k
|
|
//
|
|
// Construct and append a VPTESTNMB instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPTESTNMB(ops ...operand.Op) { ctx.VPTESTNMB(ops...) }
|
|
|
|
// VPTESTNMD: Logical NAND of Packed Doubleword Integer Values and Set Mask.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPTESTNMD m128 xmm k k
|
|
// VPTESTNMD m128 xmm k
|
|
// VPTESTNMD m256 ymm k k
|
|
// VPTESTNMD m256 ymm k
|
|
// VPTESTNMD xmm xmm k k
|
|
// VPTESTNMD xmm xmm k
|
|
// VPTESTNMD ymm ymm k k
|
|
// VPTESTNMD ymm ymm k
|
|
// VPTESTNMD m512 zmm k k
|
|
// VPTESTNMD m512 zmm k
|
|
// VPTESTNMD zmm zmm k k
|
|
// VPTESTNMD zmm zmm k
|
|
//
|
|
// Construct and append a VPTESTNMD instruction to the active function.
|
|
func (c *Context) VPTESTNMD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPTESTNMD(ops...))
|
|
}
|
|
|
|
// VPTESTNMD: Logical NAND of Packed Doubleword Integer Values and Set Mask.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPTESTNMD m128 xmm k k
|
|
// VPTESTNMD m128 xmm k
|
|
// VPTESTNMD m256 ymm k k
|
|
// VPTESTNMD m256 ymm k
|
|
// VPTESTNMD xmm xmm k k
|
|
// VPTESTNMD xmm xmm k
|
|
// VPTESTNMD ymm ymm k k
|
|
// VPTESTNMD ymm ymm k
|
|
// VPTESTNMD m512 zmm k k
|
|
// VPTESTNMD m512 zmm k
|
|
// VPTESTNMD zmm zmm k k
|
|
// VPTESTNMD zmm zmm k
|
|
//
|
|
// Construct and append a VPTESTNMD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPTESTNMD(ops ...operand.Op) { ctx.VPTESTNMD(ops...) }
|
|
|
|
// VPTESTNMD_BCST: Logical NAND of Packed Doubleword Integer Values and Set Mask (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPTESTNMD.BCST m32 xmm k k
|
|
// VPTESTNMD.BCST m32 xmm k
|
|
// VPTESTNMD.BCST m32 ymm k k
|
|
// VPTESTNMD.BCST m32 ymm k
|
|
// VPTESTNMD.BCST m32 zmm k k
|
|
// VPTESTNMD.BCST m32 zmm k
|
|
//
|
|
// Construct and append a VPTESTNMD.BCST instruction to the active function.
|
|
func (c *Context) VPTESTNMD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPTESTNMD_BCST(ops...))
|
|
}
|
|
|
|
// VPTESTNMD_BCST: Logical NAND of Packed Doubleword Integer Values and Set Mask (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPTESTNMD.BCST m32 xmm k k
|
|
// VPTESTNMD.BCST m32 xmm k
|
|
// VPTESTNMD.BCST m32 ymm k k
|
|
// VPTESTNMD.BCST m32 ymm k
|
|
// VPTESTNMD.BCST m32 zmm k k
|
|
// VPTESTNMD.BCST m32 zmm k
|
|
//
|
|
// Construct and append a VPTESTNMD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPTESTNMD_BCST(ops ...operand.Op) { ctx.VPTESTNMD_BCST(ops...) }
|
|
|
|
// VPTESTNMQ: Logical NAND of Packed Quadword Integer Values and Set Mask.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPTESTNMQ m128 xmm k k
|
|
// VPTESTNMQ m128 xmm k
|
|
// VPTESTNMQ m256 ymm k k
|
|
// VPTESTNMQ m256 ymm k
|
|
// VPTESTNMQ xmm xmm k k
|
|
// VPTESTNMQ xmm xmm k
|
|
// VPTESTNMQ ymm ymm k k
|
|
// VPTESTNMQ ymm ymm k
|
|
// VPTESTNMQ m512 zmm k k
|
|
// VPTESTNMQ m512 zmm k
|
|
// VPTESTNMQ zmm zmm k k
|
|
// VPTESTNMQ zmm zmm k
|
|
//
|
|
// Construct and append a VPTESTNMQ instruction to the active function.
|
|
func (c *Context) VPTESTNMQ(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPTESTNMQ(ops...))
|
|
}
|
|
|
|
// VPTESTNMQ: Logical NAND of Packed Quadword Integer Values and Set Mask.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPTESTNMQ m128 xmm k k
|
|
// VPTESTNMQ m128 xmm k
|
|
// VPTESTNMQ m256 ymm k k
|
|
// VPTESTNMQ m256 ymm k
|
|
// VPTESTNMQ xmm xmm k k
|
|
// VPTESTNMQ xmm xmm k
|
|
// VPTESTNMQ ymm ymm k k
|
|
// VPTESTNMQ ymm ymm k
|
|
// VPTESTNMQ m512 zmm k k
|
|
// VPTESTNMQ m512 zmm k
|
|
// VPTESTNMQ zmm zmm k k
|
|
// VPTESTNMQ zmm zmm k
|
|
//
|
|
// Construct and append a VPTESTNMQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPTESTNMQ(ops ...operand.Op) { ctx.VPTESTNMQ(ops...) }
|
|
|
|
// VPTESTNMQ_BCST: Logical NAND of Packed Quadword Integer Values and Set Mask (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPTESTNMQ.BCST m64 xmm k k
|
|
// VPTESTNMQ.BCST m64 xmm k
|
|
// VPTESTNMQ.BCST m64 ymm k k
|
|
// VPTESTNMQ.BCST m64 ymm k
|
|
// VPTESTNMQ.BCST m64 zmm k k
|
|
// VPTESTNMQ.BCST m64 zmm k
|
|
//
|
|
// Construct and append a VPTESTNMQ.BCST instruction to the active function.
|
|
func (c *Context) VPTESTNMQ_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPTESTNMQ_BCST(ops...))
|
|
}
|
|
|
|
// VPTESTNMQ_BCST: Logical NAND of Packed Quadword Integer Values and Set Mask (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPTESTNMQ.BCST m64 xmm k k
|
|
// VPTESTNMQ.BCST m64 xmm k
|
|
// VPTESTNMQ.BCST m64 ymm k k
|
|
// VPTESTNMQ.BCST m64 ymm k
|
|
// VPTESTNMQ.BCST m64 zmm k k
|
|
// VPTESTNMQ.BCST m64 zmm k
|
|
//
|
|
// Construct and append a VPTESTNMQ.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPTESTNMQ_BCST(ops ...operand.Op) { ctx.VPTESTNMQ_BCST(ops...) }
|
|
|
|
// VPTESTNMW: Logical NAND of Packed Word Integer Values and Set Mask.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPTESTNMW m512 zmm k k
|
|
// VPTESTNMW m512 zmm k
|
|
// VPTESTNMW zmm zmm k k
|
|
// VPTESTNMW zmm zmm k
|
|
// VPTESTNMW m128 xmm k k
|
|
// VPTESTNMW m128 xmm k
|
|
// VPTESTNMW m256 ymm k k
|
|
// VPTESTNMW m256 ymm k
|
|
// VPTESTNMW xmm xmm k k
|
|
// VPTESTNMW xmm xmm k
|
|
// VPTESTNMW ymm ymm k k
|
|
// VPTESTNMW ymm ymm k
|
|
//
|
|
// Construct and append a VPTESTNMW instruction to the active function.
|
|
func (c *Context) VPTESTNMW(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPTESTNMW(ops...))
|
|
}
|
|
|
|
// VPTESTNMW: Logical NAND of Packed Word Integer Values and Set Mask.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPTESTNMW m512 zmm k k
|
|
// VPTESTNMW m512 zmm k
|
|
// VPTESTNMW zmm zmm k k
|
|
// VPTESTNMW zmm zmm k
|
|
// VPTESTNMW m128 xmm k k
|
|
// VPTESTNMW m128 xmm k
|
|
// VPTESTNMW m256 ymm k k
|
|
// VPTESTNMW m256 ymm k
|
|
// VPTESTNMW xmm xmm k k
|
|
// VPTESTNMW xmm xmm k
|
|
// VPTESTNMW ymm ymm k k
|
|
// VPTESTNMW ymm ymm k
|
|
//
|
|
// Construct and append a VPTESTNMW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPTESTNMW(ops ...operand.Op) { ctx.VPTESTNMW(ops...) }
|
|
|
|
// VPUNPCKHBW: Unpack and Interleave High-Order Bytes into Words.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPUNPCKHBW m256 ymm ymm
|
|
// VPUNPCKHBW ymm ymm ymm
|
|
// VPUNPCKHBW m128 xmm xmm
|
|
// VPUNPCKHBW xmm xmm xmm
|
|
// VPUNPCKHBW m128 xmm k xmm
|
|
// VPUNPCKHBW m256 ymm k ymm
|
|
// VPUNPCKHBW xmm xmm k xmm
|
|
// VPUNPCKHBW ymm ymm k ymm
|
|
// VPUNPCKHBW m512 zmm k zmm
|
|
// VPUNPCKHBW m512 zmm zmm
|
|
// VPUNPCKHBW zmm zmm k zmm
|
|
// VPUNPCKHBW zmm zmm zmm
|
|
//
|
|
// Construct and append a VPUNPCKHBW instruction to the active function.
|
|
func (c *Context) VPUNPCKHBW(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPUNPCKHBW(ops...))
|
|
}
|
|
|
|
// VPUNPCKHBW: Unpack and Interleave High-Order Bytes into Words.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPUNPCKHBW m256 ymm ymm
|
|
// VPUNPCKHBW ymm ymm ymm
|
|
// VPUNPCKHBW m128 xmm xmm
|
|
// VPUNPCKHBW xmm xmm xmm
|
|
// VPUNPCKHBW m128 xmm k xmm
|
|
// VPUNPCKHBW m256 ymm k ymm
|
|
// VPUNPCKHBW xmm xmm k xmm
|
|
// VPUNPCKHBW ymm ymm k ymm
|
|
// VPUNPCKHBW m512 zmm k zmm
|
|
// VPUNPCKHBW m512 zmm zmm
|
|
// VPUNPCKHBW zmm zmm k zmm
|
|
// VPUNPCKHBW zmm zmm zmm
|
|
//
|
|
// Construct and append a VPUNPCKHBW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPUNPCKHBW(ops ...operand.Op) { ctx.VPUNPCKHBW(ops...) }
|
|
|
|
// VPUNPCKHBW_Z: Unpack and Interleave High-Order Bytes into Words (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPUNPCKHBW.Z m128 xmm k xmm
|
|
// VPUNPCKHBW.Z m256 ymm k ymm
|
|
// VPUNPCKHBW.Z xmm xmm k xmm
|
|
// VPUNPCKHBW.Z ymm ymm k ymm
|
|
// VPUNPCKHBW.Z m512 zmm k zmm
|
|
// VPUNPCKHBW.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPUNPCKHBW.Z instruction to the active function.
|
|
func (c *Context) VPUNPCKHBW_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPUNPCKHBW_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPUNPCKHBW_Z: Unpack and Interleave High-Order Bytes into Words (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPUNPCKHBW.Z m128 xmm k xmm
|
|
// VPUNPCKHBW.Z m256 ymm k ymm
|
|
// VPUNPCKHBW.Z xmm xmm k xmm
|
|
// VPUNPCKHBW.Z ymm ymm k ymm
|
|
// VPUNPCKHBW.Z m512 zmm k zmm
|
|
// VPUNPCKHBW.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPUNPCKHBW.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPUNPCKHBW_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPUNPCKHBW_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPUNPCKHDQ: Unpack and Interleave High-Order Doublewords into Quadwords.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPUNPCKHDQ m256 ymm ymm
|
|
// VPUNPCKHDQ ymm ymm ymm
|
|
// VPUNPCKHDQ m128 xmm xmm
|
|
// VPUNPCKHDQ xmm xmm xmm
|
|
// VPUNPCKHDQ m128 xmm k xmm
|
|
// VPUNPCKHDQ m256 ymm k ymm
|
|
// VPUNPCKHDQ xmm xmm k xmm
|
|
// VPUNPCKHDQ ymm ymm k ymm
|
|
// VPUNPCKHDQ m512 zmm k zmm
|
|
// VPUNPCKHDQ m512 zmm zmm
|
|
// VPUNPCKHDQ zmm zmm k zmm
|
|
// VPUNPCKHDQ zmm zmm zmm
|
|
//
|
|
// Construct and append a VPUNPCKHDQ instruction to the active function.
|
|
func (c *Context) VPUNPCKHDQ(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPUNPCKHDQ(ops...))
|
|
}
|
|
|
|
// VPUNPCKHDQ: Unpack and Interleave High-Order Doublewords into Quadwords.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPUNPCKHDQ m256 ymm ymm
|
|
// VPUNPCKHDQ ymm ymm ymm
|
|
// VPUNPCKHDQ m128 xmm xmm
|
|
// VPUNPCKHDQ xmm xmm xmm
|
|
// VPUNPCKHDQ m128 xmm k xmm
|
|
// VPUNPCKHDQ m256 ymm k ymm
|
|
// VPUNPCKHDQ xmm xmm k xmm
|
|
// VPUNPCKHDQ ymm ymm k ymm
|
|
// VPUNPCKHDQ m512 zmm k zmm
|
|
// VPUNPCKHDQ m512 zmm zmm
|
|
// VPUNPCKHDQ zmm zmm k zmm
|
|
// VPUNPCKHDQ zmm zmm zmm
|
|
//
|
|
// Construct and append a VPUNPCKHDQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPUNPCKHDQ(ops ...operand.Op) { ctx.VPUNPCKHDQ(ops...) }
|
|
|
|
// VPUNPCKHDQ_BCST: Unpack and Interleave High-Order Doublewords into Quadwords (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPUNPCKHDQ.BCST m32 xmm k xmm
|
|
// VPUNPCKHDQ.BCST m32 xmm xmm
|
|
// VPUNPCKHDQ.BCST m32 ymm k ymm
|
|
// VPUNPCKHDQ.BCST m32 ymm ymm
|
|
// VPUNPCKHDQ.BCST m32 zmm k zmm
|
|
// VPUNPCKHDQ.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VPUNPCKHDQ.BCST instruction to the active function.
|
|
func (c *Context) VPUNPCKHDQ_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPUNPCKHDQ_BCST(ops...))
|
|
}
|
|
|
|
// VPUNPCKHDQ_BCST: Unpack and Interleave High-Order Doublewords into Quadwords (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPUNPCKHDQ.BCST m32 xmm k xmm
|
|
// VPUNPCKHDQ.BCST m32 xmm xmm
|
|
// VPUNPCKHDQ.BCST m32 ymm k ymm
|
|
// VPUNPCKHDQ.BCST m32 ymm ymm
|
|
// VPUNPCKHDQ.BCST m32 zmm k zmm
|
|
// VPUNPCKHDQ.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VPUNPCKHDQ.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPUNPCKHDQ_BCST(ops ...operand.Op) { ctx.VPUNPCKHDQ_BCST(ops...) }
|
|
|
|
// VPUNPCKHDQ_BCST_Z: Unpack and Interleave High-Order Doublewords into Quadwords (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPUNPCKHDQ.BCST.Z m32 xmm k xmm
|
|
// VPUNPCKHDQ.BCST.Z m32 ymm k ymm
|
|
// VPUNPCKHDQ.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VPUNPCKHDQ.BCST.Z instruction to the active function.
|
|
func (c *Context) VPUNPCKHDQ_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPUNPCKHDQ_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPUNPCKHDQ_BCST_Z: Unpack and Interleave High-Order Doublewords into Quadwords (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPUNPCKHDQ.BCST.Z m32 xmm k xmm
|
|
// VPUNPCKHDQ.BCST.Z m32 ymm k ymm
|
|
// VPUNPCKHDQ.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VPUNPCKHDQ.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPUNPCKHDQ_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VPUNPCKHDQ_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VPUNPCKHDQ_Z: Unpack and Interleave High-Order Doublewords into Quadwords (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPUNPCKHDQ.Z m128 xmm k xmm
|
|
// VPUNPCKHDQ.Z m256 ymm k ymm
|
|
// VPUNPCKHDQ.Z xmm xmm k xmm
|
|
// VPUNPCKHDQ.Z ymm ymm k ymm
|
|
// VPUNPCKHDQ.Z m512 zmm k zmm
|
|
// VPUNPCKHDQ.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPUNPCKHDQ.Z instruction to the active function.
|
|
func (c *Context) VPUNPCKHDQ_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPUNPCKHDQ_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPUNPCKHDQ_Z: Unpack and Interleave High-Order Doublewords into Quadwords (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPUNPCKHDQ.Z m128 xmm k xmm
|
|
// VPUNPCKHDQ.Z m256 ymm k ymm
|
|
// VPUNPCKHDQ.Z xmm xmm k xmm
|
|
// VPUNPCKHDQ.Z ymm ymm k ymm
|
|
// VPUNPCKHDQ.Z m512 zmm k zmm
|
|
// VPUNPCKHDQ.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPUNPCKHDQ.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPUNPCKHDQ_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPUNPCKHDQ_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPUNPCKHQDQ: Unpack and Interleave High-Order Quadwords into Double Quadwords.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPUNPCKHQDQ m256 ymm ymm
|
|
// VPUNPCKHQDQ ymm ymm ymm
|
|
// VPUNPCKHQDQ m128 xmm xmm
|
|
// VPUNPCKHQDQ xmm xmm xmm
|
|
// VPUNPCKHQDQ m128 xmm k xmm
|
|
// VPUNPCKHQDQ m256 ymm k ymm
|
|
// VPUNPCKHQDQ xmm xmm k xmm
|
|
// VPUNPCKHQDQ ymm ymm k ymm
|
|
// VPUNPCKHQDQ m512 zmm k zmm
|
|
// VPUNPCKHQDQ m512 zmm zmm
|
|
// VPUNPCKHQDQ zmm zmm k zmm
|
|
// VPUNPCKHQDQ zmm zmm zmm
|
|
//
|
|
// Construct and append a VPUNPCKHQDQ instruction to the active function.
|
|
func (c *Context) VPUNPCKHQDQ(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPUNPCKHQDQ(ops...))
|
|
}
|
|
|
|
// VPUNPCKHQDQ: Unpack and Interleave High-Order Quadwords into Double Quadwords.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPUNPCKHQDQ m256 ymm ymm
|
|
// VPUNPCKHQDQ ymm ymm ymm
|
|
// VPUNPCKHQDQ m128 xmm xmm
|
|
// VPUNPCKHQDQ xmm xmm xmm
|
|
// VPUNPCKHQDQ m128 xmm k xmm
|
|
// VPUNPCKHQDQ m256 ymm k ymm
|
|
// VPUNPCKHQDQ xmm xmm k xmm
|
|
// VPUNPCKHQDQ ymm ymm k ymm
|
|
// VPUNPCKHQDQ m512 zmm k zmm
|
|
// VPUNPCKHQDQ m512 zmm zmm
|
|
// VPUNPCKHQDQ zmm zmm k zmm
|
|
// VPUNPCKHQDQ zmm zmm zmm
|
|
//
|
|
// Construct and append a VPUNPCKHQDQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPUNPCKHQDQ(ops ...operand.Op) { ctx.VPUNPCKHQDQ(ops...) }
|
|
|
|
// VPUNPCKHQDQ_BCST: Unpack and Interleave High-Order Quadwords into Double Quadwords (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPUNPCKHQDQ.BCST m64 xmm k xmm
|
|
// VPUNPCKHQDQ.BCST m64 xmm xmm
|
|
// VPUNPCKHQDQ.BCST m64 ymm k ymm
|
|
// VPUNPCKHQDQ.BCST m64 ymm ymm
|
|
// VPUNPCKHQDQ.BCST m64 zmm k zmm
|
|
// VPUNPCKHQDQ.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VPUNPCKHQDQ.BCST instruction to the active function.
|
|
func (c *Context) VPUNPCKHQDQ_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPUNPCKHQDQ_BCST(ops...))
|
|
}
|
|
|
|
// VPUNPCKHQDQ_BCST: Unpack and Interleave High-Order Quadwords into Double Quadwords (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPUNPCKHQDQ.BCST m64 xmm k xmm
|
|
// VPUNPCKHQDQ.BCST m64 xmm xmm
|
|
// VPUNPCKHQDQ.BCST m64 ymm k ymm
|
|
// VPUNPCKHQDQ.BCST m64 ymm ymm
|
|
// VPUNPCKHQDQ.BCST m64 zmm k zmm
|
|
// VPUNPCKHQDQ.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VPUNPCKHQDQ.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPUNPCKHQDQ_BCST(ops ...operand.Op) { ctx.VPUNPCKHQDQ_BCST(ops...) }
|
|
|
|
// VPUNPCKHQDQ_BCST_Z: Unpack and Interleave High-Order Quadwords into Double Quadwords (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPUNPCKHQDQ.BCST.Z m64 xmm k xmm
|
|
// VPUNPCKHQDQ.BCST.Z m64 ymm k ymm
|
|
// VPUNPCKHQDQ.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VPUNPCKHQDQ.BCST.Z instruction to the active function.
|
|
func (c *Context) VPUNPCKHQDQ_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPUNPCKHQDQ_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPUNPCKHQDQ_BCST_Z: Unpack and Interleave High-Order Quadwords into Double Quadwords (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPUNPCKHQDQ.BCST.Z m64 xmm k xmm
|
|
// VPUNPCKHQDQ.BCST.Z m64 ymm k ymm
|
|
// VPUNPCKHQDQ.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VPUNPCKHQDQ.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPUNPCKHQDQ_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VPUNPCKHQDQ_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VPUNPCKHQDQ_Z: Unpack and Interleave High-Order Quadwords into Double Quadwords (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPUNPCKHQDQ.Z m128 xmm k xmm
|
|
// VPUNPCKHQDQ.Z m256 ymm k ymm
|
|
// VPUNPCKHQDQ.Z xmm xmm k xmm
|
|
// VPUNPCKHQDQ.Z ymm ymm k ymm
|
|
// VPUNPCKHQDQ.Z m512 zmm k zmm
|
|
// VPUNPCKHQDQ.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPUNPCKHQDQ.Z instruction to the active function.
|
|
func (c *Context) VPUNPCKHQDQ_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPUNPCKHQDQ_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPUNPCKHQDQ_Z: Unpack and Interleave High-Order Quadwords into Double Quadwords (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPUNPCKHQDQ.Z m128 xmm k xmm
|
|
// VPUNPCKHQDQ.Z m256 ymm k ymm
|
|
// VPUNPCKHQDQ.Z xmm xmm k xmm
|
|
// VPUNPCKHQDQ.Z ymm ymm k ymm
|
|
// VPUNPCKHQDQ.Z m512 zmm k zmm
|
|
// VPUNPCKHQDQ.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPUNPCKHQDQ.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPUNPCKHQDQ_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPUNPCKHQDQ_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPUNPCKHWD: Unpack and Interleave High-Order Words into Doublewords.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPUNPCKHWD m256 ymm ymm
|
|
// VPUNPCKHWD ymm ymm ymm
|
|
// VPUNPCKHWD m128 xmm xmm
|
|
// VPUNPCKHWD xmm xmm xmm
|
|
// VPUNPCKHWD m128 xmm k xmm
|
|
// VPUNPCKHWD m256 ymm k ymm
|
|
// VPUNPCKHWD xmm xmm k xmm
|
|
// VPUNPCKHWD ymm ymm k ymm
|
|
// VPUNPCKHWD m512 zmm k zmm
|
|
// VPUNPCKHWD m512 zmm zmm
|
|
// VPUNPCKHWD zmm zmm k zmm
|
|
// VPUNPCKHWD zmm zmm zmm
|
|
//
|
|
// Construct and append a VPUNPCKHWD instruction to the active function.
|
|
func (c *Context) VPUNPCKHWD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPUNPCKHWD(ops...))
|
|
}
|
|
|
|
// VPUNPCKHWD: Unpack and Interleave High-Order Words into Doublewords.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPUNPCKHWD m256 ymm ymm
|
|
// VPUNPCKHWD ymm ymm ymm
|
|
// VPUNPCKHWD m128 xmm xmm
|
|
// VPUNPCKHWD xmm xmm xmm
|
|
// VPUNPCKHWD m128 xmm k xmm
|
|
// VPUNPCKHWD m256 ymm k ymm
|
|
// VPUNPCKHWD xmm xmm k xmm
|
|
// VPUNPCKHWD ymm ymm k ymm
|
|
// VPUNPCKHWD m512 zmm k zmm
|
|
// VPUNPCKHWD m512 zmm zmm
|
|
// VPUNPCKHWD zmm zmm k zmm
|
|
// VPUNPCKHWD zmm zmm zmm
|
|
//
|
|
// Construct and append a VPUNPCKHWD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPUNPCKHWD(ops ...operand.Op) { ctx.VPUNPCKHWD(ops...) }
|
|
|
|
// VPUNPCKHWD_Z: Unpack and Interleave High-Order Words into Doublewords (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPUNPCKHWD.Z m128 xmm k xmm
|
|
// VPUNPCKHWD.Z m256 ymm k ymm
|
|
// VPUNPCKHWD.Z xmm xmm k xmm
|
|
// VPUNPCKHWD.Z ymm ymm k ymm
|
|
// VPUNPCKHWD.Z m512 zmm k zmm
|
|
// VPUNPCKHWD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPUNPCKHWD.Z instruction to the active function.
|
|
func (c *Context) VPUNPCKHWD_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPUNPCKHWD_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPUNPCKHWD_Z: Unpack and Interleave High-Order Words into Doublewords (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPUNPCKHWD.Z m128 xmm k xmm
|
|
// VPUNPCKHWD.Z m256 ymm k ymm
|
|
// VPUNPCKHWD.Z xmm xmm k xmm
|
|
// VPUNPCKHWD.Z ymm ymm k ymm
|
|
// VPUNPCKHWD.Z m512 zmm k zmm
|
|
// VPUNPCKHWD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPUNPCKHWD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPUNPCKHWD_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPUNPCKHWD_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPUNPCKLBW: Unpack and Interleave Low-Order Bytes into Words.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPUNPCKLBW m256 ymm ymm
|
|
// VPUNPCKLBW ymm ymm ymm
|
|
// VPUNPCKLBW m128 xmm xmm
|
|
// VPUNPCKLBW xmm xmm xmm
|
|
// VPUNPCKLBW m128 xmm k xmm
|
|
// VPUNPCKLBW m256 ymm k ymm
|
|
// VPUNPCKLBW xmm xmm k xmm
|
|
// VPUNPCKLBW ymm ymm k ymm
|
|
// VPUNPCKLBW m512 zmm k zmm
|
|
// VPUNPCKLBW m512 zmm zmm
|
|
// VPUNPCKLBW zmm zmm k zmm
|
|
// VPUNPCKLBW zmm zmm zmm
|
|
//
|
|
// Construct and append a VPUNPCKLBW instruction to the active function.
|
|
func (c *Context) VPUNPCKLBW(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPUNPCKLBW(ops...))
|
|
}
|
|
|
|
// VPUNPCKLBW: Unpack and Interleave Low-Order Bytes into Words.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPUNPCKLBW m256 ymm ymm
|
|
// VPUNPCKLBW ymm ymm ymm
|
|
// VPUNPCKLBW m128 xmm xmm
|
|
// VPUNPCKLBW xmm xmm xmm
|
|
// VPUNPCKLBW m128 xmm k xmm
|
|
// VPUNPCKLBW m256 ymm k ymm
|
|
// VPUNPCKLBW xmm xmm k xmm
|
|
// VPUNPCKLBW ymm ymm k ymm
|
|
// VPUNPCKLBW m512 zmm k zmm
|
|
// VPUNPCKLBW m512 zmm zmm
|
|
// VPUNPCKLBW zmm zmm k zmm
|
|
// VPUNPCKLBW zmm zmm zmm
|
|
//
|
|
// Construct and append a VPUNPCKLBW instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPUNPCKLBW(ops ...operand.Op) { ctx.VPUNPCKLBW(ops...) }
|
|
|
|
// VPUNPCKLBW_Z: Unpack and Interleave Low-Order Bytes into Words (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPUNPCKLBW.Z m128 xmm k xmm
|
|
// VPUNPCKLBW.Z m256 ymm k ymm
|
|
// VPUNPCKLBW.Z xmm xmm k xmm
|
|
// VPUNPCKLBW.Z ymm ymm k ymm
|
|
// VPUNPCKLBW.Z m512 zmm k zmm
|
|
// VPUNPCKLBW.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPUNPCKLBW.Z instruction to the active function.
|
|
func (c *Context) VPUNPCKLBW_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPUNPCKLBW_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPUNPCKLBW_Z: Unpack and Interleave Low-Order Bytes into Words (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPUNPCKLBW.Z m128 xmm k xmm
|
|
// VPUNPCKLBW.Z m256 ymm k ymm
|
|
// VPUNPCKLBW.Z xmm xmm k xmm
|
|
// VPUNPCKLBW.Z ymm ymm k ymm
|
|
// VPUNPCKLBW.Z m512 zmm k zmm
|
|
// VPUNPCKLBW.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPUNPCKLBW.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPUNPCKLBW_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPUNPCKLBW_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPUNPCKLDQ: Unpack and Interleave Low-Order Doublewords into Quadwords.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPUNPCKLDQ m256 ymm ymm
|
|
// VPUNPCKLDQ ymm ymm ymm
|
|
// VPUNPCKLDQ m128 xmm xmm
|
|
// VPUNPCKLDQ xmm xmm xmm
|
|
// VPUNPCKLDQ m128 xmm k xmm
|
|
// VPUNPCKLDQ m256 ymm k ymm
|
|
// VPUNPCKLDQ xmm xmm k xmm
|
|
// VPUNPCKLDQ ymm ymm k ymm
|
|
// VPUNPCKLDQ m512 zmm k zmm
|
|
// VPUNPCKLDQ m512 zmm zmm
|
|
// VPUNPCKLDQ zmm zmm k zmm
|
|
// VPUNPCKLDQ zmm zmm zmm
|
|
//
|
|
// Construct and append a VPUNPCKLDQ instruction to the active function.
|
|
func (c *Context) VPUNPCKLDQ(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPUNPCKLDQ(ops...))
|
|
}
|
|
|
|
// VPUNPCKLDQ: Unpack and Interleave Low-Order Doublewords into Quadwords.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPUNPCKLDQ m256 ymm ymm
|
|
// VPUNPCKLDQ ymm ymm ymm
|
|
// VPUNPCKLDQ m128 xmm xmm
|
|
// VPUNPCKLDQ xmm xmm xmm
|
|
// VPUNPCKLDQ m128 xmm k xmm
|
|
// VPUNPCKLDQ m256 ymm k ymm
|
|
// VPUNPCKLDQ xmm xmm k xmm
|
|
// VPUNPCKLDQ ymm ymm k ymm
|
|
// VPUNPCKLDQ m512 zmm k zmm
|
|
// VPUNPCKLDQ m512 zmm zmm
|
|
// VPUNPCKLDQ zmm zmm k zmm
|
|
// VPUNPCKLDQ zmm zmm zmm
|
|
//
|
|
// Construct and append a VPUNPCKLDQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPUNPCKLDQ(ops ...operand.Op) { ctx.VPUNPCKLDQ(ops...) }
|
|
|
|
// VPUNPCKLDQ_BCST: Unpack and Interleave Low-Order Doublewords into Quadwords (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPUNPCKLDQ.BCST m32 xmm k xmm
|
|
// VPUNPCKLDQ.BCST m32 xmm xmm
|
|
// VPUNPCKLDQ.BCST m32 ymm k ymm
|
|
// VPUNPCKLDQ.BCST m32 ymm ymm
|
|
// VPUNPCKLDQ.BCST m32 zmm k zmm
|
|
// VPUNPCKLDQ.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VPUNPCKLDQ.BCST instruction to the active function.
|
|
func (c *Context) VPUNPCKLDQ_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPUNPCKLDQ_BCST(ops...))
|
|
}
|
|
|
|
// VPUNPCKLDQ_BCST: Unpack and Interleave Low-Order Doublewords into Quadwords (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPUNPCKLDQ.BCST m32 xmm k xmm
|
|
// VPUNPCKLDQ.BCST m32 xmm xmm
|
|
// VPUNPCKLDQ.BCST m32 ymm k ymm
|
|
// VPUNPCKLDQ.BCST m32 ymm ymm
|
|
// VPUNPCKLDQ.BCST m32 zmm k zmm
|
|
// VPUNPCKLDQ.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VPUNPCKLDQ.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPUNPCKLDQ_BCST(ops ...operand.Op) { ctx.VPUNPCKLDQ_BCST(ops...) }
|
|
|
|
// VPUNPCKLDQ_BCST_Z: Unpack and Interleave Low-Order Doublewords into Quadwords (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPUNPCKLDQ.BCST.Z m32 xmm k xmm
|
|
// VPUNPCKLDQ.BCST.Z m32 ymm k ymm
|
|
// VPUNPCKLDQ.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VPUNPCKLDQ.BCST.Z instruction to the active function.
|
|
func (c *Context) VPUNPCKLDQ_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPUNPCKLDQ_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPUNPCKLDQ_BCST_Z: Unpack and Interleave Low-Order Doublewords into Quadwords (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPUNPCKLDQ.BCST.Z m32 xmm k xmm
|
|
// VPUNPCKLDQ.BCST.Z m32 ymm k ymm
|
|
// VPUNPCKLDQ.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VPUNPCKLDQ.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPUNPCKLDQ_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VPUNPCKLDQ_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VPUNPCKLDQ_Z: Unpack and Interleave Low-Order Doublewords into Quadwords (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPUNPCKLDQ.Z m128 xmm k xmm
|
|
// VPUNPCKLDQ.Z m256 ymm k ymm
|
|
// VPUNPCKLDQ.Z xmm xmm k xmm
|
|
// VPUNPCKLDQ.Z ymm ymm k ymm
|
|
// VPUNPCKLDQ.Z m512 zmm k zmm
|
|
// VPUNPCKLDQ.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPUNPCKLDQ.Z instruction to the active function.
|
|
func (c *Context) VPUNPCKLDQ_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPUNPCKLDQ_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPUNPCKLDQ_Z: Unpack and Interleave Low-Order Doublewords into Quadwords (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPUNPCKLDQ.Z m128 xmm k xmm
|
|
// VPUNPCKLDQ.Z m256 ymm k ymm
|
|
// VPUNPCKLDQ.Z xmm xmm k xmm
|
|
// VPUNPCKLDQ.Z ymm ymm k ymm
|
|
// VPUNPCKLDQ.Z m512 zmm k zmm
|
|
// VPUNPCKLDQ.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPUNPCKLDQ.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPUNPCKLDQ_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPUNPCKLDQ_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPUNPCKLQDQ: Unpack and Interleave Low-Order Quadwords into Double Quadwords.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPUNPCKLQDQ m256 ymm ymm
|
|
// VPUNPCKLQDQ ymm ymm ymm
|
|
// VPUNPCKLQDQ m128 xmm xmm
|
|
// VPUNPCKLQDQ xmm xmm xmm
|
|
// VPUNPCKLQDQ m128 xmm k xmm
|
|
// VPUNPCKLQDQ m256 ymm k ymm
|
|
// VPUNPCKLQDQ xmm xmm k xmm
|
|
// VPUNPCKLQDQ ymm ymm k ymm
|
|
// VPUNPCKLQDQ m512 zmm k zmm
|
|
// VPUNPCKLQDQ m512 zmm zmm
|
|
// VPUNPCKLQDQ zmm zmm k zmm
|
|
// VPUNPCKLQDQ zmm zmm zmm
|
|
//
|
|
// Construct and append a VPUNPCKLQDQ instruction to the active function.
|
|
func (c *Context) VPUNPCKLQDQ(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPUNPCKLQDQ(ops...))
|
|
}
|
|
|
|
// VPUNPCKLQDQ: Unpack and Interleave Low-Order Quadwords into Double Quadwords.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPUNPCKLQDQ m256 ymm ymm
|
|
// VPUNPCKLQDQ ymm ymm ymm
|
|
// VPUNPCKLQDQ m128 xmm xmm
|
|
// VPUNPCKLQDQ xmm xmm xmm
|
|
// VPUNPCKLQDQ m128 xmm k xmm
|
|
// VPUNPCKLQDQ m256 ymm k ymm
|
|
// VPUNPCKLQDQ xmm xmm k xmm
|
|
// VPUNPCKLQDQ ymm ymm k ymm
|
|
// VPUNPCKLQDQ m512 zmm k zmm
|
|
// VPUNPCKLQDQ m512 zmm zmm
|
|
// VPUNPCKLQDQ zmm zmm k zmm
|
|
// VPUNPCKLQDQ zmm zmm zmm
|
|
//
|
|
// Construct and append a VPUNPCKLQDQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPUNPCKLQDQ(ops ...operand.Op) { ctx.VPUNPCKLQDQ(ops...) }
|
|
|
|
// VPUNPCKLQDQ_BCST: Unpack and Interleave Low-Order Quadwords into Double Quadwords (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPUNPCKLQDQ.BCST m64 xmm k xmm
|
|
// VPUNPCKLQDQ.BCST m64 xmm xmm
|
|
// VPUNPCKLQDQ.BCST m64 ymm k ymm
|
|
// VPUNPCKLQDQ.BCST m64 ymm ymm
|
|
// VPUNPCKLQDQ.BCST m64 zmm k zmm
|
|
// VPUNPCKLQDQ.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VPUNPCKLQDQ.BCST instruction to the active function.
|
|
func (c *Context) VPUNPCKLQDQ_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPUNPCKLQDQ_BCST(ops...))
|
|
}
|
|
|
|
// VPUNPCKLQDQ_BCST: Unpack and Interleave Low-Order Quadwords into Double Quadwords (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPUNPCKLQDQ.BCST m64 xmm k xmm
|
|
// VPUNPCKLQDQ.BCST m64 xmm xmm
|
|
// VPUNPCKLQDQ.BCST m64 ymm k ymm
|
|
// VPUNPCKLQDQ.BCST m64 ymm ymm
|
|
// VPUNPCKLQDQ.BCST m64 zmm k zmm
|
|
// VPUNPCKLQDQ.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VPUNPCKLQDQ.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPUNPCKLQDQ_BCST(ops ...operand.Op) { ctx.VPUNPCKLQDQ_BCST(ops...) }
|
|
|
|
// VPUNPCKLQDQ_BCST_Z: Unpack and Interleave Low-Order Quadwords into Double Quadwords (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPUNPCKLQDQ.BCST.Z m64 xmm k xmm
|
|
// VPUNPCKLQDQ.BCST.Z m64 ymm k ymm
|
|
// VPUNPCKLQDQ.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VPUNPCKLQDQ.BCST.Z instruction to the active function.
|
|
func (c *Context) VPUNPCKLQDQ_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPUNPCKLQDQ_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPUNPCKLQDQ_BCST_Z: Unpack and Interleave Low-Order Quadwords into Double Quadwords (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPUNPCKLQDQ.BCST.Z m64 xmm k xmm
|
|
// VPUNPCKLQDQ.BCST.Z m64 ymm k ymm
|
|
// VPUNPCKLQDQ.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VPUNPCKLQDQ.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPUNPCKLQDQ_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VPUNPCKLQDQ_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VPUNPCKLQDQ_Z: Unpack and Interleave Low-Order Quadwords into Double Quadwords (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPUNPCKLQDQ.Z m128 xmm k xmm
|
|
// VPUNPCKLQDQ.Z m256 ymm k ymm
|
|
// VPUNPCKLQDQ.Z xmm xmm k xmm
|
|
// VPUNPCKLQDQ.Z ymm ymm k ymm
|
|
// VPUNPCKLQDQ.Z m512 zmm k zmm
|
|
// VPUNPCKLQDQ.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPUNPCKLQDQ.Z instruction to the active function.
|
|
func (c *Context) VPUNPCKLQDQ_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPUNPCKLQDQ_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPUNPCKLQDQ_Z: Unpack and Interleave Low-Order Quadwords into Double Quadwords (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPUNPCKLQDQ.Z m128 xmm k xmm
|
|
// VPUNPCKLQDQ.Z m256 ymm k ymm
|
|
// VPUNPCKLQDQ.Z xmm xmm k xmm
|
|
// VPUNPCKLQDQ.Z ymm ymm k ymm
|
|
// VPUNPCKLQDQ.Z m512 zmm k zmm
|
|
// VPUNPCKLQDQ.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPUNPCKLQDQ.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPUNPCKLQDQ_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPUNPCKLQDQ_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPUNPCKLWD: Unpack and Interleave Low-Order Words into Doublewords.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPUNPCKLWD m256 ymm ymm
|
|
// VPUNPCKLWD ymm ymm ymm
|
|
// VPUNPCKLWD m128 xmm xmm
|
|
// VPUNPCKLWD xmm xmm xmm
|
|
// VPUNPCKLWD m128 xmm k xmm
|
|
// VPUNPCKLWD m256 ymm k ymm
|
|
// VPUNPCKLWD xmm xmm k xmm
|
|
// VPUNPCKLWD ymm ymm k ymm
|
|
// VPUNPCKLWD m512 zmm k zmm
|
|
// VPUNPCKLWD m512 zmm zmm
|
|
// VPUNPCKLWD zmm zmm k zmm
|
|
// VPUNPCKLWD zmm zmm zmm
|
|
//
|
|
// Construct and append a VPUNPCKLWD instruction to the active function.
|
|
func (c *Context) VPUNPCKLWD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPUNPCKLWD(ops...))
|
|
}
|
|
|
|
// VPUNPCKLWD: Unpack and Interleave Low-Order Words into Doublewords.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPUNPCKLWD m256 ymm ymm
|
|
// VPUNPCKLWD ymm ymm ymm
|
|
// VPUNPCKLWD m128 xmm xmm
|
|
// VPUNPCKLWD xmm xmm xmm
|
|
// VPUNPCKLWD m128 xmm k xmm
|
|
// VPUNPCKLWD m256 ymm k ymm
|
|
// VPUNPCKLWD xmm xmm k xmm
|
|
// VPUNPCKLWD ymm ymm k ymm
|
|
// VPUNPCKLWD m512 zmm k zmm
|
|
// VPUNPCKLWD m512 zmm zmm
|
|
// VPUNPCKLWD zmm zmm k zmm
|
|
// VPUNPCKLWD zmm zmm zmm
|
|
//
|
|
// Construct and append a VPUNPCKLWD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPUNPCKLWD(ops ...operand.Op) { ctx.VPUNPCKLWD(ops...) }
|
|
|
|
// VPUNPCKLWD_Z: Unpack and Interleave Low-Order Words into Doublewords (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPUNPCKLWD.Z m128 xmm k xmm
|
|
// VPUNPCKLWD.Z m256 ymm k ymm
|
|
// VPUNPCKLWD.Z xmm xmm k xmm
|
|
// VPUNPCKLWD.Z ymm ymm k ymm
|
|
// VPUNPCKLWD.Z m512 zmm k zmm
|
|
// VPUNPCKLWD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPUNPCKLWD.Z instruction to the active function.
|
|
func (c *Context) VPUNPCKLWD_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPUNPCKLWD_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPUNPCKLWD_Z: Unpack and Interleave Low-Order Words into Doublewords (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPUNPCKLWD.Z m128 xmm k xmm
|
|
// VPUNPCKLWD.Z m256 ymm k ymm
|
|
// VPUNPCKLWD.Z xmm xmm k xmm
|
|
// VPUNPCKLWD.Z ymm ymm k ymm
|
|
// VPUNPCKLWD.Z m512 zmm k zmm
|
|
// VPUNPCKLWD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPUNPCKLWD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPUNPCKLWD_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPUNPCKLWD_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPXOR: Packed Bitwise Logical Exclusive OR.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPXOR m256 ymm ymm
|
|
// VPXOR ymm ymm ymm
|
|
// VPXOR m128 xmm xmm
|
|
// VPXOR xmm xmm xmm
|
|
//
|
|
// Construct and append a VPXOR instruction to the active function.
|
|
func (c *Context) VPXOR(mxy, xy, xy1 operand.Op) {
|
|
c.addinstruction(x86.VPXOR(mxy, xy, xy1))
|
|
}
|
|
|
|
// VPXOR: Packed Bitwise Logical Exclusive OR.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPXOR m256 ymm ymm
|
|
// VPXOR ymm ymm ymm
|
|
// VPXOR m128 xmm xmm
|
|
// VPXOR xmm xmm xmm
|
|
//
|
|
// Construct and append a VPXOR instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPXOR(mxy, xy, xy1 operand.Op) { ctx.VPXOR(mxy, xy, xy1) }
|
|
|
|
// VPXORD: Bitwise Logical Exclusive OR of Packed Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPXORD m128 xmm k xmm
|
|
// VPXORD m128 xmm xmm
|
|
// VPXORD m256 ymm k ymm
|
|
// VPXORD m256 ymm ymm
|
|
// VPXORD xmm xmm k xmm
|
|
// VPXORD xmm xmm xmm
|
|
// VPXORD ymm ymm k ymm
|
|
// VPXORD ymm ymm ymm
|
|
// VPXORD m512 zmm k zmm
|
|
// VPXORD m512 zmm zmm
|
|
// VPXORD zmm zmm k zmm
|
|
// VPXORD zmm zmm zmm
|
|
//
|
|
// Construct and append a VPXORD instruction to the active function.
|
|
func (c *Context) VPXORD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPXORD(ops...))
|
|
}
|
|
|
|
// VPXORD: Bitwise Logical Exclusive OR of Packed Doubleword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPXORD m128 xmm k xmm
|
|
// VPXORD m128 xmm xmm
|
|
// VPXORD m256 ymm k ymm
|
|
// VPXORD m256 ymm ymm
|
|
// VPXORD xmm xmm k xmm
|
|
// VPXORD xmm xmm xmm
|
|
// VPXORD ymm ymm k ymm
|
|
// VPXORD ymm ymm ymm
|
|
// VPXORD m512 zmm k zmm
|
|
// VPXORD m512 zmm zmm
|
|
// VPXORD zmm zmm k zmm
|
|
// VPXORD zmm zmm zmm
|
|
//
|
|
// Construct and append a VPXORD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPXORD(ops ...operand.Op) { ctx.VPXORD(ops...) }
|
|
|
|
// VPXORD_BCST: Bitwise Logical Exclusive OR of Packed Doubleword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPXORD.BCST m32 xmm k xmm
|
|
// VPXORD.BCST m32 xmm xmm
|
|
// VPXORD.BCST m32 ymm k ymm
|
|
// VPXORD.BCST m32 ymm ymm
|
|
// VPXORD.BCST m32 zmm k zmm
|
|
// VPXORD.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VPXORD.BCST instruction to the active function.
|
|
func (c *Context) VPXORD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPXORD_BCST(ops...))
|
|
}
|
|
|
|
// VPXORD_BCST: Bitwise Logical Exclusive OR of Packed Doubleword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPXORD.BCST m32 xmm k xmm
|
|
// VPXORD.BCST m32 xmm xmm
|
|
// VPXORD.BCST m32 ymm k ymm
|
|
// VPXORD.BCST m32 ymm ymm
|
|
// VPXORD.BCST m32 zmm k zmm
|
|
// VPXORD.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VPXORD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPXORD_BCST(ops ...operand.Op) { ctx.VPXORD_BCST(ops...) }
|
|
|
|
// VPXORD_BCST_Z: Bitwise Logical Exclusive OR of Packed Doubleword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPXORD.BCST.Z m32 xmm k xmm
|
|
// VPXORD.BCST.Z m32 ymm k ymm
|
|
// VPXORD.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VPXORD.BCST.Z instruction to the active function.
|
|
func (c *Context) VPXORD_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPXORD_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPXORD_BCST_Z: Bitwise Logical Exclusive OR of Packed Doubleword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPXORD.BCST.Z m32 xmm k xmm
|
|
// VPXORD.BCST.Z m32 ymm k ymm
|
|
// VPXORD.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VPXORD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPXORD_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VPXORD_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VPXORD_Z: Bitwise Logical Exclusive OR of Packed Doubleword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPXORD.Z m128 xmm k xmm
|
|
// VPXORD.Z m256 ymm k ymm
|
|
// VPXORD.Z xmm xmm k xmm
|
|
// VPXORD.Z ymm ymm k ymm
|
|
// VPXORD.Z m512 zmm k zmm
|
|
// VPXORD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPXORD.Z instruction to the active function.
|
|
func (c *Context) VPXORD_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPXORD_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPXORD_Z: Bitwise Logical Exclusive OR of Packed Doubleword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPXORD.Z m128 xmm k xmm
|
|
// VPXORD.Z m256 ymm k ymm
|
|
// VPXORD.Z xmm xmm k xmm
|
|
// VPXORD.Z ymm ymm k ymm
|
|
// VPXORD.Z m512 zmm k zmm
|
|
// VPXORD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPXORD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPXORD_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPXORD_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VPXORQ: Bitwise Logical Exclusive OR of Packed Quadword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPXORQ m128 xmm k xmm
|
|
// VPXORQ m128 xmm xmm
|
|
// VPXORQ m256 ymm k ymm
|
|
// VPXORQ m256 ymm ymm
|
|
// VPXORQ xmm xmm k xmm
|
|
// VPXORQ xmm xmm xmm
|
|
// VPXORQ ymm ymm k ymm
|
|
// VPXORQ ymm ymm ymm
|
|
// VPXORQ m512 zmm k zmm
|
|
// VPXORQ m512 zmm zmm
|
|
// VPXORQ zmm zmm k zmm
|
|
// VPXORQ zmm zmm zmm
|
|
//
|
|
// Construct and append a VPXORQ instruction to the active function.
|
|
func (c *Context) VPXORQ(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPXORQ(ops...))
|
|
}
|
|
|
|
// VPXORQ: Bitwise Logical Exclusive OR of Packed Quadword Integers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPXORQ m128 xmm k xmm
|
|
// VPXORQ m128 xmm xmm
|
|
// VPXORQ m256 ymm k ymm
|
|
// VPXORQ m256 ymm ymm
|
|
// VPXORQ xmm xmm k xmm
|
|
// VPXORQ xmm xmm xmm
|
|
// VPXORQ ymm ymm k ymm
|
|
// VPXORQ ymm ymm ymm
|
|
// VPXORQ m512 zmm k zmm
|
|
// VPXORQ m512 zmm zmm
|
|
// VPXORQ zmm zmm k zmm
|
|
// VPXORQ zmm zmm zmm
|
|
//
|
|
// Construct and append a VPXORQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPXORQ(ops ...operand.Op) { ctx.VPXORQ(ops...) }
|
|
|
|
// VPXORQ_BCST: Bitwise Logical Exclusive OR of Packed Quadword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPXORQ.BCST m64 xmm k xmm
|
|
// VPXORQ.BCST m64 xmm xmm
|
|
// VPXORQ.BCST m64 ymm k ymm
|
|
// VPXORQ.BCST m64 ymm ymm
|
|
// VPXORQ.BCST m64 zmm k zmm
|
|
// VPXORQ.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VPXORQ.BCST instruction to the active function.
|
|
func (c *Context) VPXORQ_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VPXORQ_BCST(ops...))
|
|
}
|
|
|
|
// VPXORQ_BCST: Bitwise Logical Exclusive OR of Packed Quadword Integers (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPXORQ.BCST m64 xmm k xmm
|
|
// VPXORQ.BCST m64 xmm xmm
|
|
// VPXORQ.BCST m64 ymm k ymm
|
|
// VPXORQ.BCST m64 ymm ymm
|
|
// VPXORQ.BCST m64 zmm k zmm
|
|
// VPXORQ.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VPXORQ.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPXORQ_BCST(ops ...operand.Op) { ctx.VPXORQ_BCST(ops...) }
|
|
|
|
// VPXORQ_BCST_Z: Bitwise Logical Exclusive OR of Packed Quadword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPXORQ.BCST.Z m64 xmm k xmm
|
|
// VPXORQ.BCST.Z m64 ymm k ymm
|
|
// VPXORQ.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VPXORQ.BCST.Z instruction to the active function.
|
|
func (c *Context) VPXORQ_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPXORQ_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPXORQ_BCST_Z: Bitwise Logical Exclusive OR of Packed Quadword Integers (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPXORQ.BCST.Z m64 xmm k xmm
|
|
// VPXORQ.BCST.Z m64 ymm k ymm
|
|
// VPXORQ.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VPXORQ.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPXORQ_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VPXORQ_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VPXORQ_Z: Bitwise Logical Exclusive OR of Packed Quadword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPXORQ.Z m128 xmm k xmm
|
|
// VPXORQ.Z m256 ymm k ymm
|
|
// VPXORQ.Z xmm xmm k xmm
|
|
// VPXORQ.Z ymm ymm k ymm
|
|
// VPXORQ.Z m512 zmm k zmm
|
|
// VPXORQ.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPXORQ.Z instruction to the active function.
|
|
func (c *Context) VPXORQ_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VPXORQ_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VPXORQ_Z: Bitwise Logical Exclusive OR of Packed Quadword Integers (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VPXORQ.Z m128 xmm k xmm
|
|
// VPXORQ.Z m256 ymm k ymm
|
|
// VPXORQ.Z xmm xmm k xmm
|
|
// VPXORQ.Z ymm ymm k ymm
|
|
// VPXORQ.Z m512 zmm k zmm
|
|
// VPXORQ.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VPXORQ.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VPXORQ_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPXORQ_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VRANGEPD: Range Restriction Calculation For Packed Pairs of Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRANGEPD imm8 m128 xmm k xmm
|
|
// VRANGEPD imm8 m128 xmm xmm
|
|
// VRANGEPD imm8 m256 ymm k ymm
|
|
// VRANGEPD imm8 m256 ymm ymm
|
|
// VRANGEPD imm8 xmm xmm k xmm
|
|
// VRANGEPD imm8 xmm xmm xmm
|
|
// VRANGEPD imm8 ymm ymm k ymm
|
|
// VRANGEPD imm8 ymm ymm ymm
|
|
// VRANGEPD imm8 m512 zmm k zmm
|
|
// VRANGEPD imm8 m512 zmm zmm
|
|
// VRANGEPD imm8 zmm zmm k zmm
|
|
// VRANGEPD imm8 zmm zmm zmm
|
|
//
|
|
// Construct and append a VRANGEPD instruction to the active function.
|
|
func (c *Context) VRANGEPD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VRANGEPD(ops...))
|
|
}
|
|
|
|
// VRANGEPD: Range Restriction Calculation For Packed Pairs of Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRANGEPD imm8 m128 xmm k xmm
|
|
// VRANGEPD imm8 m128 xmm xmm
|
|
// VRANGEPD imm8 m256 ymm k ymm
|
|
// VRANGEPD imm8 m256 ymm ymm
|
|
// VRANGEPD imm8 xmm xmm k xmm
|
|
// VRANGEPD imm8 xmm xmm xmm
|
|
// VRANGEPD imm8 ymm ymm k ymm
|
|
// VRANGEPD imm8 ymm ymm ymm
|
|
// VRANGEPD imm8 m512 zmm k zmm
|
|
// VRANGEPD imm8 m512 zmm zmm
|
|
// VRANGEPD imm8 zmm zmm k zmm
|
|
// VRANGEPD imm8 zmm zmm zmm
|
|
//
|
|
// Construct and append a VRANGEPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRANGEPD(ops ...operand.Op) { ctx.VRANGEPD(ops...) }
|
|
|
|
// VRANGEPD_BCST: Range Restriction Calculation For Packed Pairs of Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRANGEPD.BCST imm8 m64 xmm k xmm
|
|
// VRANGEPD.BCST imm8 m64 xmm xmm
|
|
// VRANGEPD.BCST imm8 m64 ymm k ymm
|
|
// VRANGEPD.BCST imm8 m64 ymm ymm
|
|
// VRANGEPD.BCST imm8 m64 zmm k zmm
|
|
// VRANGEPD.BCST imm8 m64 zmm zmm
|
|
//
|
|
// Construct and append a VRANGEPD.BCST instruction to the active function.
|
|
func (c *Context) VRANGEPD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VRANGEPD_BCST(ops...))
|
|
}
|
|
|
|
// VRANGEPD_BCST: Range Restriction Calculation For Packed Pairs of Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRANGEPD.BCST imm8 m64 xmm k xmm
|
|
// VRANGEPD.BCST imm8 m64 xmm xmm
|
|
// VRANGEPD.BCST imm8 m64 ymm k ymm
|
|
// VRANGEPD.BCST imm8 m64 ymm ymm
|
|
// VRANGEPD.BCST imm8 m64 zmm k zmm
|
|
// VRANGEPD.BCST imm8 m64 zmm zmm
|
|
//
|
|
// Construct and append a VRANGEPD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRANGEPD_BCST(ops ...operand.Op) { ctx.VRANGEPD_BCST(ops...) }
|
|
|
|
// VRANGEPD_BCST_Z: Range Restriction Calculation For Packed Pairs of Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRANGEPD.BCST.Z imm8 m64 xmm k xmm
|
|
// VRANGEPD.BCST.Z imm8 m64 ymm k ymm
|
|
// VRANGEPD.BCST.Z imm8 m64 zmm k zmm
|
|
//
|
|
// Construct and append a VRANGEPD.BCST.Z instruction to the active function.
|
|
func (c *Context) VRANGEPD_BCST_Z(i, m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VRANGEPD_BCST_Z(i, m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VRANGEPD_BCST_Z: Range Restriction Calculation For Packed Pairs of Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRANGEPD.BCST.Z imm8 m64 xmm k xmm
|
|
// VRANGEPD.BCST.Z imm8 m64 ymm k ymm
|
|
// VRANGEPD.BCST.Z imm8 m64 zmm k zmm
|
|
//
|
|
// Construct and append a VRANGEPD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRANGEPD_BCST_Z(i, m, xyz, k, xyz1 operand.Op) { ctx.VRANGEPD_BCST_Z(i, m, xyz, k, xyz1) }
|
|
|
|
// VRANGEPD_SAE: Range Restriction Calculation For Packed Pairs of Double-Precision Floating-Point Values (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRANGEPD.SAE imm8 zmm zmm k zmm
|
|
// VRANGEPD.SAE imm8 zmm zmm zmm
|
|
//
|
|
// Construct and append a VRANGEPD.SAE instruction to the active function.
|
|
func (c *Context) VRANGEPD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VRANGEPD_SAE(ops...))
|
|
}
|
|
|
|
// VRANGEPD_SAE: Range Restriction Calculation For Packed Pairs of Double-Precision Floating-Point Values (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRANGEPD.SAE imm8 zmm zmm k zmm
|
|
// VRANGEPD.SAE imm8 zmm zmm zmm
|
|
//
|
|
// Construct and append a VRANGEPD.SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRANGEPD_SAE(ops ...operand.Op) { ctx.VRANGEPD_SAE(ops...) }
|
|
|
|
// VRANGEPD_SAE_Z: Range Restriction Calculation For Packed Pairs of Double-Precision Floating-Point Values (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRANGEPD.SAE.Z imm8 zmm zmm k zmm
|
|
//
|
|
// Construct and append a VRANGEPD.SAE.Z instruction to the active function.
|
|
func (c *Context) VRANGEPD_SAE_Z(i, z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VRANGEPD_SAE_Z(i, z, z1, k, z2))
|
|
}
|
|
|
|
// VRANGEPD_SAE_Z: Range Restriction Calculation For Packed Pairs of Double-Precision Floating-Point Values (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRANGEPD.SAE.Z imm8 zmm zmm k zmm
|
|
//
|
|
// Construct and append a VRANGEPD.SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRANGEPD_SAE_Z(i, z, z1, k, z2 operand.Op) { ctx.VRANGEPD_SAE_Z(i, z, z1, k, z2) }
|
|
|
|
// VRANGEPD_Z: Range Restriction Calculation For Packed Pairs of Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRANGEPD.Z imm8 m128 xmm k xmm
|
|
// VRANGEPD.Z imm8 m256 ymm k ymm
|
|
// VRANGEPD.Z imm8 xmm xmm k xmm
|
|
// VRANGEPD.Z imm8 ymm ymm k ymm
|
|
// VRANGEPD.Z imm8 m512 zmm k zmm
|
|
// VRANGEPD.Z imm8 zmm zmm k zmm
|
|
//
|
|
// Construct and append a VRANGEPD.Z instruction to the active function.
|
|
func (c *Context) VRANGEPD_Z(i, mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VRANGEPD_Z(i, mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VRANGEPD_Z: Range Restriction Calculation For Packed Pairs of Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRANGEPD.Z imm8 m128 xmm k xmm
|
|
// VRANGEPD.Z imm8 m256 ymm k ymm
|
|
// VRANGEPD.Z imm8 xmm xmm k xmm
|
|
// VRANGEPD.Z imm8 ymm ymm k ymm
|
|
// VRANGEPD.Z imm8 m512 zmm k zmm
|
|
// VRANGEPD.Z imm8 zmm zmm k zmm
|
|
//
|
|
// Construct and append a VRANGEPD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRANGEPD_Z(i, mxyz, xyz, k, xyz1 operand.Op) { ctx.VRANGEPD_Z(i, mxyz, xyz, k, xyz1) }
|
|
|
|
// VRANGEPS: Range Restriction Calculation For Packed Pairs of Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRANGEPS imm8 m128 xmm k xmm
|
|
// VRANGEPS imm8 m128 xmm xmm
|
|
// VRANGEPS imm8 m256 ymm k ymm
|
|
// VRANGEPS imm8 m256 ymm ymm
|
|
// VRANGEPS imm8 xmm xmm k xmm
|
|
// VRANGEPS imm8 xmm xmm xmm
|
|
// VRANGEPS imm8 ymm ymm k ymm
|
|
// VRANGEPS imm8 ymm ymm ymm
|
|
// VRANGEPS imm8 m512 zmm k zmm
|
|
// VRANGEPS imm8 m512 zmm zmm
|
|
// VRANGEPS imm8 zmm zmm k zmm
|
|
// VRANGEPS imm8 zmm zmm zmm
|
|
//
|
|
// Construct and append a VRANGEPS instruction to the active function.
|
|
func (c *Context) VRANGEPS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VRANGEPS(ops...))
|
|
}
|
|
|
|
// VRANGEPS: Range Restriction Calculation For Packed Pairs of Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRANGEPS imm8 m128 xmm k xmm
|
|
// VRANGEPS imm8 m128 xmm xmm
|
|
// VRANGEPS imm8 m256 ymm k ymm
|
|
// VRANGEPS imm8 m256 ymm ymm
|
|
// VRANGEPS imm8 xmm xmm k xmm
|
|
// VRANGEPS imm8 xmm xmm xmm
|
|
// VRANGEPS imm8 ymm ymm k ymm
|
|
// VRANGEPS imm8 ymm ymm ymm
|
|
// VRANGEPS imm8 m512 zmm k zmm
|
|
// VRANGEPS imm8 m512 zmm zmm
|
|
// VRANGEPS imm8 zmm zmm k zmm
|
|
// VRANGEPS imm8 zmm zmm zmm
|
|
//
|
|
// Construct and append a VRANGEPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRANGEPS(ops ...operand.Op) { ctx.VRANGEPS(ops...) }
|
|
|
|
// VRANGEPS_BCST: Range Restriction Calculation For Packed Pairs of Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRANGEPS.BCST imm8 m32 xmm k xmm
|
|
// VRANGEPS.BCST imm8 m32 xmm xmm
|
|
// VRANGEPS.BCST imm8 m32 ymm k ymm
|
|
// VRANGEPS.BCST imm8 m32 ymm ymm
|
|
// VRANGEPS.BCST imm8 m32 zmm k zmm
|
|
// VRANGEPS.BCST imm8 m32 zmm zmm
|
|
//
|
|
// Construct and append a VRANGEPS.BCST instruction to the active function.
|
|
func (c *Context) VRANGEPS_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VRANGEPS_BCST(ops...))
|
|
}
|
|
|
|
// VRANGEPS_BCST: Range Restriction Calculation For Packed Pairs of Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRANGEPS.BCST imm8 m32 xmm k xmm
|
|
// VRANGEPS.BCST imm8 m32 xmm xmm
|
|
// VRANGEPS.BCST imm8 m32 ymm k ymm
|
|
// VRANGEPS.BCST imm8 m32 ymm ymm
|
|
// VRANGEPS.BCST imm8 m32 zmm k zmm
|
|
// VRANGEPS.BCST imm8 m32 zmm zmm
|
|
//
|
|
// Construct and append a VRANGEPS.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRANGEPS_BCST(ops ...operand.Op) { ctx.VRANGEPS_BCST(ops...) }
|
|
|
|
// VRANGEPS_BCST_Z: Range Restriction Calculation For Packed Pairs of Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRANGEPS.BCST.Z imm8 m32 xmm k xmm
|
|
// VRANGEPS.BCST.Z imm8 m32 ymm k ymm
|
|
// VRANGEPS.BCST.Z imm8 m32 zmm k zmm
|
|
//
|
|
// Construct and append a VRANGEPS.BCST.Z instruction to the active function.
|
|
func (c *Context) VRANGEPS_BCST_Z(i, m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VRANGEPS_BCST_Z(i, m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VRANGEPS_BCST_Z: Range Restriction Calculation For Packed Pairs of Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRANGEPS.BCST.Z imm8 m32 xmm k xmm
|
|
// VRANGEPS.BCST.Z imm8 m32 ymm k ymm
|
|
// VRANGEPS.BCST.Z imm8 m32 zmm k zmm
|
|
//
|
|
// Construct and append a VRANGEPS.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRANGEPS_BCST_Z(i, m, xyz, k, xyz1 operand.Op) { ctx.VRANGEPS_BCST_Z(i, m, xyz, k, xyz1) }
|
|
|
|
// VRANGEPS_SAE: Range Restriction Calculation For Packed Pairs of Single-Precision Floating-Point Values (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRANGEPS.SAE imm8 zmm zmm k zmm
|
|
// VRANGEPS.SAE imm8 zmm zmm zmm
|
|
//
|
|
// Construct and append a VRANGEPS.SAE instruction to the active function.
|
|
func (c *Context) VRANGEPS_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VRANGEPS_SAE(ops...))
|
|
}
|
|
|
|
// VRANGEPS_SAE: Range Restriction Calculation For Packed Pairs of Single-Precision Floating-Point Values (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRANGEPS.SAE imm8 zmm zmm k zmm
|
|
// VRANGEPS.SAE imm8 zmm zmm zmm
|
|
//
|
|
// Construct and append a VRANGEPS.SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRANGEPS_SAE(ops ...operand.Op) { ctx.VRANGEPS_SAE(ops...) }
|
|
|
|
// VRANGEPS_SAE_Z: Range Restriction Calculation For Packed Pairs of Single-Precision Floating-Point Values (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRANGEPS.SAE.Z imm8 zmm zmm k zmm
|
|
//
|
|
// Construct and append a VRANGEPS.SAE.Z instruction to the active function.
|
|
func (c *Context) VRANGEPS_SAE_Z(i, z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VRANGEPS_SAE_Z(i, z, z1, k, z2))
|
|
}
|
|
|
|
// VRANGEPS_SAE_Z: Range Restriction Calculation For Packed Pairs of Single-Precision Floating-Point Values (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRANGEPS.SAE.Z imm8 zmm zmm k zmm
|
|
//
|
|
// Construct and append a VRANGEPS.SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRANGEPS_SAE_Z(i, z, z1, k, z2 operand.Op) { ctx.VRANGEPS_SAE_Z(i, z, z1, k, z2) }
|
|
|
|
// VRANGEPS_Z: Range Restriction Calculation For Packed Pairs of Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRANGEPS.Z imm8 m128 xmm k xmm
|
|
// VRANGEPS.Z imm8 m256 ymm k ymm
|
|
// VRANGEPS.Z imm8 xmm xmm k xmm
|
|
// VRANGEPS.Z imm8 ymm ymm k ymm
|
|
// VRANGEPS.Z imm8 m512 zmm k zmm
|
|
// VRANGEPS.Z imm8 zmm zmm k zmm
|
|
//
|
|
// Construct and append a VRANGEPS.Z instruction to the active function.
|
|
func (c *Context) VRANGEPS_Z(i, mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VRANGEPS_Z(i, mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VRANGEPS_Z: Range Restriction Calculation For Packed Pairs of Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRANGEPS.Z imm8 m128 xmm k xmm
|
|
// VRANGEPS.Z imm8 m256 ymm k ymm
|
|
// VRANGEPS.Z imm8 xmm xmm k xmm
|
|
// VRANGEPS.Z imm8 ymm ymm k ymm
|
|
// VRANGEPS.Z imm8 m512 zmm k zmm
|
|
// VRANGEPS.Z imm8 zmm zmm k zmm
|
|
//
|
|
// Construct and append a VRANGEPS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRANGEPS_Z(i, mxyz, xyz, k, xyz1 operand.Op) { ctx.VRANGEPS_Z(i, mxyz, xyz, k, xyz1) }
|
|
|
|
// VRANGESD: Range Restriction Calculation For a pair of Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRANGESD imm8 m64 xmm k xmm
|
|
// VRANGESD imm8 m64 xmm xmm
|
|
// VRANGESD imm8 xmm xmm k xmm
|
|
// VRANGESD imm8 xmm xmm xmm
|
|
//
|
|
// Construct and append a VRANGESD instruction to the active function.
|
|
func (c *Context) VRANGESD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VRANGESD(ops...))
|
|
}
|
|
|
|
// VRANGESD: Range Restriction Calculation For a pair of Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRANGESD imm8 m64 xmm k xmm
|
|
// VRANGESD imm8 m64 xmm xmm
|
|
// VRANGESD imm8 xmm xmm k xmm
|
|
// VRANGESD imm8 xmm xmm xmm
|
|
//
|
|
// Construct and append a VRANGESD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRANGESD(ops ...operand.Op) { ctx.VRANGESD(ops...) }
|
|
|
|
// VRANGESD_SAE: Range Restriction Calculation For a pair of Scalar Double-Precision Floating-Point Values (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRANGESD.SAE imm8 xmm xmm k xmm
|
|
// VRANGESD.SAE imm8 xmm xmm xmm
|
|
//
|
|
// Construct and append a VRANGESD.SAE instruction to the active function.
|
|
func (c *Context) VRANGESD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VRANGESD_SAE(ops...))
|
|
}
|
|
|
|
// VRANGESD_SAE: Range Restriction Calculation For a pair of Scalar Double-Precision Floating-Point Values (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRANGESD.SAE imm8 xmm xmm k xmm
|
|
// VRANGESD.SAE imm8 xmm xmm xmm
|
|
//
|
|
// Construct and append a VRANGESD.SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRANGESD_SAE(ops ...operand.Op) { ctx.VRANGESD_SAE(ops...) }
|
|
|
|
// VRANGESD_SAE_Z: Range Restriction Calculation For a pair of Scalar Double-Precision Floating-Point Values (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRANGESD.SAE.Z imm8 xmm xmm k xmm
|
|
//
|
|
// Construct and append a VRANGESD.SAE.Z instruction to the active function.
|
|
func (c *Context) VRANGESD_SAE_Z(i, x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VRANGESD_SAE_Z(i, x, x1, k, x2))
|
|
}
|
|
|
|
// VRANGESD_SAE_Z: Range Restriction Calculation For a pair of Scalar Double-Precision Floating-Point Values (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRANGESD.SAE.Z imm8 xmm xmm k xmm
|
|
//
|
|
// Construct and append a VRANGESD.SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRANGESD_SAE_Z(i, x, x1, k, x2 operand.Op) { ctx.VRANGESD_SAE_Z(i, x, x1, k, x2) }
|
|
|
|
// VRANGESD_Z: Range Restriction Calculation For a pair of Scalar Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRANGESD.Z imm8 m64 xmm k xmm
|
|
// VRANGESD.Z imm8 xmm xmm k xmm
|
|
//
|
|
// Construct and append a VRANGESD.Z instruction to the active function.
|
|
func (c *Context) VRANGESD_Z(i, mx, x, k, x1 operand.Op) {
|
|
c.addinstruction(x86.VRANGESD_Z(i, mx, x, k, x1))
|
|
}
|
|
|
|
// VRANGESD_Z: Range Restriction Calculation For a pair of Scalar Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRANGESD.Z imm8 m64 xmm k xmm
|
|
// VRANGESD.Z imm8 xmm xmm k xmm
|
|
//
|
|
// Construct and append a VRANGESD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRANGESD_Z(i, mx, x, k, x1 operand.Op) { ctx.VRANGESD_Z(i, mx, x, k, x1) }
|
|
|
|
// VRANGESS: Range Restriction Calculation For a pair of Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRANGESS imm8 m32 xmm k xmm
|
|
// VRANGESS imm8 m32 xmm xmm
|
|
// VRANGESS imm8 xmm xmm k xmm
|
|
// VRANGESS imm8 xmm xmm xmm
|
|
//
|
|
// Construct and append a VRANGESS instruction to the active function.
|
|
func (c *Context) VRANGESS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VRANGESS(ops...))
|
|
}
|
|
|
|
// VRANGESS: Range Restriction Calculation For a pair of Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRANGESS imm8 m32 xmm k xmm
|
|
// VRANGESS imm8 m32 xmm xmm
|
|
// VRANGESS imm8 xmm xmm k xmm
|
|
// VRANGESS imm8 xmm xmm xmm
|
|
//
|
|
// Construct and append a VRANGESS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRANGESS(ops ...operand.Op) { ctx.VRANGESS(ops...) }
|
|
|
|
// VRANGESS_SAE: Range Restriction Calculation For a pair of Scalar Single-Precision Floating-Point Values (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRANGESS.SAE imm8 xmm xmm k xmm
|
|
// VRANGESS.SAE imm8 xmm xmm xmm
|
|
//
|
|
// Construct and append a VRANGESS.SAE instruction to the active function.
|
|
func (c *Context) VRANGESS_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VRANGESS_SAE(ops...))
|
|
}
|
|
|
|
// VRANGESS_SAE: Range Restriction Calculation For a pair of Scalar Single-Precision Floating-Point Values (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRANGESS.SAE imm8 xmm xmm k xmm
|
|
// VRANGESS.SAE imm8 xmm xmm xmm
|
|
//
|
|
// Construct and append a VRANGESS.SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRANGESS_SAE(ops ...operand.Op) { ctx.VRANGESS_SAE(ops...) }
|
|
|
|
// VRANGESS_SAE_Z: Range Restriction Calculation For a pair of Scalar Single-Precision Floating-Point Values (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRANGESS.SAE.Z imm8 xmm xmm k xmm
|
|
//
|
|
// Construct and append a VRANGESS.SAE.Z instruction to the active function.
|
|
func (c *Context) VRANGESS_SAE_Z(i, x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VRANGESS_SAE_Z(i, x, x1, k, x2))
|
|
}
|
|
|
|
// VRANGESS_SAE_Z: Range Restriction Calculation For a pair of Scalar Single-Precision Floating-Point Values (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRANGESS.SAE.Z imm8 xmm xmm k xmm
|
|
//
|
|
// Construct and append a VRANGESS.SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRANGESS_SAE_Z(i, x, x1, k, x2 operand.Op) { ctx.VRANGESS_SAE_Z(i, x, x1, k, x2) }
|
|
|
|
// VRANGESS_Z: Range Restriction Calculation For a pair of Scalar Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRANGESS.Z imm8 m32 xmm k xmm
|
|
// VRANGESS.Z imm8 xmm xmm k xmm
|
|
//
|
|
// Construct and append a VRANGESS.Z instruction to the active function.
|
|
func (c *Context) VRANGESS_Z(i, mx, x, k, x1 operand.Op) {
|
|
c.addinstruction(x86.VRANGESS_Z(i, mx, x, k, x1))
|
|
}
|
|
|
|
// VRANGESS_Z: Range Restriction Calculation For a pair of Scalar Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRANGESS.Z imm8 m32 xmm k xmm
|
|
// VRANGESS.Z imm8 xmm xmm k xmm
|
|
//
|
|
// Construct and append a VRANGESS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRANGESS_Z(i, mx, x, k, x1 operand.Op) { ctx.VRANGESS_Z(i, mx, x, k, x1) }
|
|
|
|
// VRCP14PD: Compute Approximate Reciprocals of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRCP14PD m128 k xmm
|
|
// VRCP14PD m128 xmm
|
|
// VRCP14PD m256 k ymm
|
|
// VRCP14PD m256 ymm
|
|
// VRCP14PD xmm k xmm
|
|
// VRCP14PD xmm xmm
|
|
// VRCP14PD ymm k ymm
|
|
// VRCP14PD ymm ymm
|
|
// VRCP14PD m512 k zmm
|
|
// VRCP14PD m512 zmm
|
|
// VRCP14PD zmm k zmm
|
|
// VRCP14PD zmm zmm
|
|
//
|
|
// Construct and append a VRCP14PD instruction to the active function.
|
|
func (c *Context) VRCP14PD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VRCP14PD(ops...))
|
|
}
|
|
|
|
// VRCP14PD: Compute Approximate Reciprocals of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRCP14PD m128 k xmm
|
|
// VRCP14PD m128 xmm
|
|
// VRCP14PD m256 k ymm
|
|
// VRCP14PD m256 ymm
|
|
// VRCP14PD xmm k xmm
|
|
// VRCP14PD xmm xmm
|
|
// VRCP14PD ymm k ymm
|
|
// VRCP14PD ymm ymm
|
|
// VRCP14PD m512 k zmm
|
|
// VRCP14PD m512 zmm
|
|
// VRCP14PD zmm k zmm
|
|
// VRCP14PD zmm zmm
|
|
//
|
|
// Construct and append a VRCP14PD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRCP14PD(ops ...operand.Op) { ctx.VRCP14PD(ops...) }
|
|
|
|
// VRCP14PD_BCST: Compute Approximate Reciprocals of Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRCP14PD.BCST m64 k xmm
|
|
// VRCP14PD.BCST m64 k ymm
|
|
// VRCP14PD.BCST m64 xmm
|
|
// VRCP14PD.BCST m64 ymm
|
|
// VRCP14PD.BCST m64 k zmm
|
|
// VRCP14PD.BCST m64 zmm
|
|
//
|
|
// Construct and append a VRCP14PD.BCST instruction to the active function.
|
|
func (c *Context) VRCP14PD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VRCP14PD_BCST(ops...))
|
|
}
|
|
|
|
// VRCP14PD_BCST: Compute Approximate Reciprocals of Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRCP14PD.BCST m64 k xmm
|
|
// VRCP14PD.BCST m64 k ymm
|
|
// VRCP14PD.BCST m64 xmm
|
|
// VRCP14PD.BCST m64 ymm
|
|
// VRCP14PD.BCST m64 k zmm
|
|
// VRCP14PD.BCST m64 zmm
|
|
//
|
|
// Construct and append a VRCP14PD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRCP14PD_BCST(ops ...operand.Op) { ctx.VRCP14PD_BCST(ops...) }
|
|
|
|
// VRCP14PD_BCST_Z: Compute Approximate Reciprocals of Packed Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRCP14PD.BCST.Z m64 k xmm
|
|
// VRCP14PD.BCST.Z m64 k ymm
|
|
// VRCP14PD.BCST.Z m64 k zmm
|
|
//
|
|
// Construct and append a VRCP14PD.BCST.Z instruction to the active function.
|
|
func (c *Context) VRCP14PD_BCST_Z(m, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VRCP14PD_BCST_Z(m, k, xyz))
|
|
}
|
|
|
|
// VRCP14PD_BCST_Z: Compute Approximate Reciprocals of Packed Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRCP14PD.BCST.Z m64 k xmm
|
|
// VRCP14PD.BCST.Z m64 k ymm
|
|
// VRCP14PD.BCST.Z m64 k zmm
|
|
//
|
|
// Construct and append a VRCP14PD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRCP14PD_BCST_Z(m, k, xyz operand.Op) { ctx.VRCP14PD_BCST_Z(m, k, xyz) }
|
|
|
|
// VRCP14PD_Z: Compute Approximate Reciprocals of Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRCP14PD.Z m128 k xmm
|
|
// VRCP14PD.Z m256 k ymm
|
|
// VRCP14PD.Z xmm k xmm
|
|
// VRCP14PD.Z ymm k ymm
|
|
// VRCP14PD.Z m512 k zmm
|
|
// VRCP14PD.Z zmm k zmm
|
|
//
|
|
// Construct and append a VRCP14PD.Z instruction to the active function.
|
|
func (c *Context) VRCP14PD_Z(mxyz, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VRCP14PD_Z(mxyz, k, xyz))
|
|
}
|
|
|
|
// VRCP14PD_Z: Compute Approximate Reciprocals of Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRCP14PD.Z m128 k xmm
|
|
// VRCP14PD.Z m256 k ymm
|
|
// VRCP14PD.Z xmm k xmm
|
|
// VRCP14PD.Z ymm k ymm
|
|
// VRCP14PD.Z m512 k zmm
|
|
// VRCP14PD.Z zmm k zmm
|
|
//
|
|
// Construct and append a VRCP14PD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRCP14PD_Z(mxyz, k, xyz operand.Op) { ctx.VRCP14PD_Z(mxyz, k, xyz) }
|
|
|
|
// VRCP14PS: Compute Approximate Reciprocals of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRCP14PS m128 k xmm
|
|
// VRCP14PS m128 xmm
|
|
// VRCP14PS m256 k ymm
|
|
// VRCP14PS m256 ymm
|
|
// VRCP14PS xmm k xmm
|
|
// VRCP14PS xmm xmm
|
|
// VRCP14PS ymm k ymm
|
|
// VRCP14PS ymm ymm
|
|
// VRCP14PS m512 k zmm
|
|
// VRCP14PS m512 zmm
|
|
// VRCP14PS zmm k zmm
|
|
// VRCP14PS zmm zmm
|
|
//
|
|
// Construct and append a VRCP14PS instruction to the active function.
|
|
func (c *Context) VRCP14PS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VRCP14PS(ops...))
|
|
}
|
|
|
|
// VRCP14PS: Compute Approximate Reciprocals of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRCP14PS m128 k xmm
|
|
// VRCP14PS m128 xmm
|
|
// VRCP14PS m256 k ymm
|
|
// VRCP14PS m256 ymm
|
|
// VRCP14PS xmm k xmm
|
|
// VRCP14PS xmm xmm
|
|
// VRCP14PS ymm k ymm
|
|
// VRCP14PS ymm ymm
|
|
// VRCP14PS m512 k zmm
|
|
// VRCP14PS m512 zmm
|
|
// VRCP14PS zmm k zmm
|
|
// VRCP14PS zmm zmm
|
|
//
|
|
// Construct and append a VRCP14PS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRCP14PS(ops ...operand.Op) { ctx.VRCP14PS(ops...) }
|
|
|
|
// VRCP14PS_BCST: Compute Approximate Reciprocals of Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRCP14PS.BCST m32 k xmm
|
|
// VRCP14PS.BCST m32 k ymm
|
|
// VRCP14PS.BCST m32 xmm
|
|
// VRCP14PS.BCST m32 ymm
|
|
// VRCP14PS.BCST m32 k zmm
|
|
// VRCP14PS.BCST m32 zmm
|
|
//
|
|
// Construct and append a VRCP14PS.BCST instruction to the active function.
|
|
func (c *Context) VRCP14PS_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VRCP14PS_BCST(ops...))
|
|
}
|
|
|
|
// VRCP14PS_BCST: Compute Approximate Reciprocals of Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRCP14PS.BCST m32 k xmm
|
|
// VRCP14PS.BCST m32 k ymm
|
|
// VRCP14PS.BCST m32 xmm
|
|
// VRCP14PS.BCST m32 ymm
|
|
// VRCP14PS.BCST m32 k zmm
|
|
// VRCP14PS.BCST m32 zmm
|
|
//
|
|
// Construct and append a VRCP14PS.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRCP14PS_BCST(ops ...operand.Op) { ctx.VRCP14PS_BCST(ops...) }
|
|
|
|
// VRCP14PS_BCST_Z: Compute Approximate Reciprocals of Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRCP14PS.BCST.Z m32 k xmm
|
|
// VRCP14PS.BCST.Z m32 k ymm
|
|
// VRCP14PS.BCST.Z m32 k zmm
|
|
//
|
|
// Construct and append a VRCP14PS.BCST.Z instruction to the active function.
|
|
func (c *Context) VRCP14PS_BCST_Z(m, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VRCP14PS_BCST_Z(m, k, xyz))
|
|
}
|
|
|
|
// VRCP14PS_BCST_Z: Compute Approximate Reciprocals of Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRCP14PS.BCST.Z m32 k xmm
|
|
// VRCP14PS.BCST.Z m32 k ymm
|
|
// VRCP14PS.BCST.Z m32 k zmm
|
|
//
|
|
// Construct and append a VRCP14PS.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRCP14PS_BCST_Z(m, k, xyz operand.Op) { ctx.VRCP14PS_BCST_Z(m, k, xyz) }
|
|
|
|
// VRCP14PS_Z: Compute Approximate Reciprocals of Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRCP14PS.Z m128 k xmm
|
|
// VRCP14PS.Z m256 k ymm
|
|
// VRCP14PS.Z xmm k xmm
|
|
// VRCP14PS.Z ymm k ymm
|
|
// VRCP14PS.Z m512 k zmm
|
|
// VRCP14PS.Z zmm k zmm
|
|
//
|
|
// Construct and append a VRCP14PS.Z instruction to the active function.
|
|
func (c *Context) VRCP14PS_Z(mxyz, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VRCP14PS_Z(mxyz, k, xyz))
|
|
}
|
|
|
|
// VRCP14PS_Z: Compute Approximate Reciprocals of Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRCP14PS.Z m128 k xmm
|
|
// VRCP14PS.Z m256 k ymm
|
|
// VRCP14PS.Z xmm k xmm
|
|
// VRCP14PS.Z ymm k ymm
|
|
// VRCP14PS.Z m512 k zmm
|
|
// VRCP14PS.Z zmm k zmm
|
|
//
|
|
// Construct and append a VRCP14PS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRCP14PS_Z(mxyz, k, xyz operand.Op) { ctx.VRCP14PS_Z(mxyz, k, xyz) }
|
|
|
|
// VRCP14SD: Compute Approximate Reciprocal of a Scalar Double-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRCP14SD m64 xmm k xmm
|
|
// VRCP14SD m64 xmm xmm
|
|
// VRCP14SD xmm xmm k xmm
|
|
// VRCP14SD xmm xmm xmm
|
|
//
|
|
// Construct and append a VRCP14SD instruction to the active function.
|
|
func (c *Context) VRCP14SD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VRCP14SD(ops...))
|
|
}
|
|
|
|
// VRCP14SD: Compute Approximate Reciprocal of a Scalar Double-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRCP14SD m64 xmm k xmm
|
|
// VRCP14SD m64 xmm xmm
|
|
// VRCP14SD xmm xmm k xmm
|
|
// VRCP14SD xmm xmm xmm
|
|
//
|
|
// Construct and append a VRCP14SD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRCP14SD(ops ...operand.Op) { ctx.VRCP14SD(ops...) }
|
|
|
|
// VRCP14SD_Z: Compute Approximate Reciprocal of a Scalar Double-Precision Floating-Point Value (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRCP14SD.Z m64 xmm k xmm
|
|
// VRCP14SD.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VRCP14SD.Z instruction to the active function.
|
|
func (c *Context) VRCP14SD_Z(mx, x, k, x1 operand.Op) {
|
|
c.addinstruction(x86.VRCP14SD_Z(mx, x, k, x1))
|
|
}
|
|
|
|
// VRCP14SD_Z: Compute Approximate Reciprocal of a Scalar Double-Precision Floating-Point Value (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRCP14SD.Z m64 xmm k xmm
|
|
// VRCP14SD.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VRCP14SD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRCP14SD_Z(mx, x, k, x1 operand.Op) { ctx.VRCP14SD_Z(mx, x, k, x1) }
|
|
|
|
// VRCP14SS: Compute Approximate Reciprocal of a Scalar Single-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRCP14SS m32 xmm k xmm
|
|
// VRCP14SS m32 xmm xmm
|
|
// VRCP14SS xmm xmm k xmm
|
|
// VRCP14SS xmm xmm xmm
|
|
//
|
|
// Construct and append a VRCP14SS instruction to the active function.
|
|
func (c *Context) VRCP14SS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VRCP14SS(ops...))
|
|
}
|
|
|
|
// VRCP14SS: Compute Approximate Reciprocal of a Scalar Single-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRCP14SS m32 xmm k xmm
|
|
// VRCP14SS m32 xmm xmm
|
|
// VRCP14SS xmm xmm k xmm
|
|
// VRCP14SS xmm xmm xmm
|
|
//
|
|
// Construct and append a VRCP14SS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRCP14SS(ops ...operand.Op) { ctx.VRCP14SS(ops...) }
|
|
|
|
// VRCP14SS_Z: Compute Approximate Reciprocal of a Scalar Single-Precision Floating-Point Value (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRCP14SS.Z m32 xmm k xmm
|
|
// VRCP14SS.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VRCP14SS.Z instruction to the active function.
|
|
func (c *Context) VRCP14SS_Z(mx, x, k, x1 operand.Op) {
|
|
c.addinstruction(x86.VRCP14SS_Z(mx, x, k, x1))
|
|
}
|
|
|
|
// VRCP14SS_Z: Compute Approximate Reciprocal of a Scalar Single-Precision Floating-Point Value (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRCP14SS.Z m32 xmm k xmm
|
|
// VRCP14SS.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VRCP14SS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRCP14SS_Z(mx, x, k, x1 operand.Op) { ctx.VRCP14SS_Z(mx, x, k, x1) }
|
|
|
|
// VRCP28PD: Approximation to the Reciprocal of Packed Double-Precision Floating-Point Values with Less Than 2^-28 Relative Error.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRCP28PD m512 k zmm
|
|
// VRCP28PD m512 zmm
|
|
// VRCP28PD zmm k zmm
|
|
// VRCP28PD zmm zmm
|
|
//
|
|
// Construct and append a VRCP28PD instruction to the active function.
|
|
func (c *Context) VRCP28PD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VRCP28PD(ops...))
|
|
}
|
|
|
|
// VRCP28PD: Approximation to the Reciprocal of Packed Double-Precision Floating-Point Values with Less Than 2^-28 Relative Error.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRCP28PD m512 k zmm
|
|
// VRCP28PD m512 zmm
|
|
// VRCP28PD zmm k zmm
|
|
// VRCP28PD zmm zmm
|
|
//
|
|
// Construct and append a VRCP28PD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRCP28PD(ops ...operand.Op) { ctx.VRCP28PD(ops...) }
|
|
|
|
// VRCP28PD_BCST: Approximation to the Reciprocal of Packed Double-Precision Floating-Point Values with Less Than 2^-28 Relative Error (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRCP28PD.BCST m64 k zmm
|
|
// VRCP28PD.BCST m64 zmm
|
|
//
|
|
// Construct and append a VRCP28PD.BCST instruction to the active function.
|
|
func (c *Context) VRCP28PD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VRCP28PD_BCST(ops...))
|
|
}
|
|
|
|
// VRCP28PD_BCST: Approximation to the Reciprocal of Packed Double-Precision Floating-Point Values with Less Than 2^-28 Relative Error (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRCP28PD.BCST m64 k zmm
|
|
// VRCP28PD.BCST m64 zmm
|
|
//
|
|
// Construct and append a VRCP28PD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRCP28PD_BCST(ops ...operand.Op) { ctx.VRCP28PD_BCST(ops...) }
|
|
|
|
// VRCP28PD_BCST_Z: Approximation to the Reciprocal of Packed Double-Precision Floating-Point Values with Less Than 2^-28 Relative Error (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRCP28PD.BCST.Z m64 k zmm
|
|
//
|
|
// Construct and append a VRCP28PD.BCST.Z instruction to the active function.
|
|
func (c *Context) VRCP28PD_BCST_Z(m, k, z operand.Op) {
|
|
c.addinstruction(x86.VRCP28PD_BCST_Z(m, k, z))
|
|
}
|
|
|
|
// VRCP28PD_BCST_Z: Approximation to the Reciprocal of Packed Double-Precision Floating-Point Values with Less Than 2^-28 Relative Error (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRCP28PD.BCST.Z m64 k zmm
|
|
//
|
|
// Construct and append a VRCP28PD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRCP28PD_BCST_Z(m, k, z operand.Op) { ctx.VRCP28PD_BCST_Z(m, k, z) }
|
|
|
|
// VRCP28PD_SAE: Approximation to the Reciprocal of Packed Double-Precision Floating-Point Values with Less Than 2^-28 Relative Error (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRCP28PD.SAE zmm k zmm
|
|
// VRCP28PD.SAE zmm zmm
|
|
//
|
|
// Construct and append a VRCP28PD.SAE instruction to the active function.
|
|
func (c *Context) VRCP28PD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VRCP28PD_SAE(ops...))
|
|
}
|
|
|
|
// VRCP28PD_SAE: Approximation to the Reciprocal of Packed Double-Precision Floating-Point Values with Less Than 2^-28 Relative Error (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRCP28PD.SAE zmm k zmm
|
|
// VRCP28PD.SAE zmm zmm
|
|
//
|
|
// Construct and append a VRCP28PD.SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRCP28PD_SAE(ops ...operand.Op) { ctx.VRCP28PD_SAE(ops...) }
|
|
|
|
// VRCP28PD_SAE_Z: Approximation to the Reciprocal of Packed Double-Precision Floating-Point Values with Less Than 2^-28 Relative Error (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRCP28PD.SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VRCP28PD.SAE.Z instruction to the active function.
|
|
func (c *Context) VRCP28PD_SAE_Z(z, k, z1 operand.Op) {
|
|
c.addinstruction(x86.VRCP28PD_SAE_Z(z, k, z1))
|
|
}
|
|
|
|
// VRCP28PD_SAE_Z: Approximation to the Reciprocal of Packed Double-Precision Floating-Point Values with Less Than 2^-28 Relative Error (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRCP28PD.SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VRCP28PD.SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRCP28PD_SAE_Z(z, k, z1 operand.Op) { ctx.VRCP28PD_SAE_Z(z, k, z1) }
|
|
|
|
// VRCP28PD_Z: Approximation to the Reciprocal of Packed Double-Precision Floating-Point Values with Less Than 2^-28 Relative Error (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRCP28PD.Z m512 k zmm
|
|
// VRCP28PD.Z zmm k zmm
|
|
//
|
|
// Construct and append a VRCP28PD.Z instruction to the active function.
|
|
func (c *Context) VRCP28PD_Z(mz, k, z operand.Op) {
|
|
c.addinstruction(x86.VRCP28PD_Z(mz, k, z))
|
|
}
|
|
|
|
// VRCP28PD_Z: Approximation to the Reciprocal of Packed Double-Precision Floating-Point Values with Less Than 2^-28 Relative Error (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRCP28PD.Z m512 k zmm
|
|
// VRCP28PD.Z zmm k zmm
|
|
//
|
|
// Construct and append a VRCP28PD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRCP28PD_Z(mz, k, z operand.Op) { ctx.VRCP28PD_Z(mz, k, z) }
|
|
|
|
// VRCP28PS: Approximation to the Reciprocal of Packed Single-Precision Floating-Point Values with Less Than 2^-28 Relative Error.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRCP28PS m512 k zmm
|
|
// VRCP28PS m512 zmm
|
|
// VRCP28PS zmm k zmm
|
|
// VRCP28PS zmm zmm
|
|
//
|
|
// Construct and append a VRCP28PS instruction to the active function.
|
|
func (c *Context) VRCP28PS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VRCP28PS(ops...))
|
|
}
|
|
|
|
// VRCP28PS: Approximation to the Reciprocal of Packed Single-Precision Floating-Point Values with Less Than 2^-28 Relative Error.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRCP28PS m512 k zmm
|
|
// VRCP28PS m512 zmm
|
|
// VRCP28PS zmm k zmm
|
|
// VRCP28PS zmm zmm
|
|
//
|
|
// Construct and append a VRCP28PS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRCP28PS(ops ...operand.Op) { ctx.VRCP28PS(ops...) }
|
|
|
|
// VRCP28PS_BCST: Approximation to the Reciprocal of Packed Single-Precision Floating-Point Values with Less Than 2^-28 Relative Error (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRCP28PS.BCST m32 k zmm
|
|
// VRCP28PS.BCST m32 zmm
|
|
//
|
|
// Construct and append a VRCP28PS.BCST instruction to the active function.
|
|
func (c *Context) VRCP28PS_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VRCP28PS_BCST(ops...))
|
|
}
|
|
|
|
// VRCP28PS_BCST: Approximation to the Reciprocal of Packed Single-Precision Floating-Point Values with Less Than 2^-28 Relative Error (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRCP28PS.BCST m32 k zmm
|
|
// VRCP28PS.BCST m32 zmm
|
|
//
|
|
// Construct and append a VRCP28PS.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRCP28PS_BCST(ops ...operand.Op) { ctx.VRCP28PS_BCST(ops...) }
|
|
|
|
// VRCP28PS_BCST_Z: Approximation to the Reciprocal of Packed Single-Precision Floating-Point Values with Less Than 2^-28 Relative Error (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRCP28PS.BCST.Z m32 k zmm
|
|
//
|
|
// Construct and append a VRCP28PS.BCST.Z instruction to the active function.
|
|
func (c *Context) VRCP28PS_BCST_Z(m, k, z operand.Op) {
|
|
c.addinstruction(x86.VRCP28PS_BCST_Z(m, k, z))
|
|
}
|
|
|
|
// VRCP28PS_BCST_Z: Approximation to the Reciprocal of Packed Single-Precision Floating-Point Values with Less Than 2^-28 Relative Error (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRCP28PS.BCST.Z m32 k zmm
|
|
//
|
|
// Construct and append a VRCP28PS.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRCP28PS_BCST_Z(m, k, z operand.Op) { ctx.VRCP28PS_BCST_Z(m, k, z) }
|
|
|
|
// VRCP28PS_SAE: Approximation to the Reciprocal of Packed Single-Precision Floating-Point Values with Less Than 2^-28 Relative Error (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRCP28PS.SAE zmm k zmm
|
|
// VRCP28PS.SAE zmm zmm
|
|
//
|
|
// Construct and append a VRCP28PS.SAE instruction to the active function.
|
|
func (c *Context) VRCP28PS_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VRCP28PS_SAE(ops...))
|
|
}
|
|
|
|
// VRCP28PS_SAE: Approximation to the Reciprocal of Packed Single-Precision Floating-Point Values with Less Than 2^-28 Relative Error (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRCP28PS.SAE zmm k zmm
|
|
// VRCP28PS.SAE zmm zmm
|
|
//
|
|
// Construct and append a VRCP28PS.SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRCP28PS_SAE(ops ...operand.Op) { ctx.VRCP28PS_SAE(ops...) }
|
|
|
|
// VRCP28PS_SAE_Z: Approximation to the Reciprocal of Packed Single-Precision Floating-Point Values with Less Than 2^-28 Relative Error (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRCP28PS.SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VRCP28PS.SAE.Z instruction to the active function.
|
|
func (c *Context) VRCP28PS_SAE_Z(z, k, z1 operand.Op) {
|
|
c.addinstruction(x86.VRCP28PS_SAE_Z(z, k, z1))
|
|
}
|
|
|
|
// VRCP28PS_SAE_Z: Approximation to the Reciprocal of Packed Single-Precision Floating-Point Values with Less Than 2^-28 Relative Error (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRCP28PS.SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VRCP28PS.SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRCP28PS_SAE_Z(z, k, z1 operand.Op) { ctx.VRCP28PS_SAE_Z(z, k, z1) }
|
|
|
|
// VRCP28PS_Z: Approximation to the Reciprocal of Packed Single-Precision Floating-Point Values with Less Than 2^-28 Relative Error (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRCP28PS.Z m512 k zmm
|
|
// VRCP28PS.Z zmm k zmm
|
|
//
|
|
// Construct and append a VRCP28PS.Z instruction to the active function.
|
|
func (c *Context) VRCP28PS_Z(mz, k, z operand.Op) {
|
|
c.addinstruction(x86.VRCP28PS_Z(mz, k, z))
|
|
}
|
|
|
|
// VRCP28PS_Z: Approximation to the Reciprocal of Packed Single-Precision Floating-Point Values with Less Than 2^-28 Relative Error (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRCP28PS.Z m512 k zmm
|
|
// VRCP28PS.Z zmm k zmm
|
|
//
|
|
// Construct and append a VRCP28PS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRCP28PS_Z(mz, k, z operand.Op) { ctx.VRCP28PS_Z(mz, k, z) }
|
|
|
|
// VRCP28SD: Approximation to the Reciprocal of a Scalar Double-Precision Floating-Point Value with Less Than 2^-28 Relative Error.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRCP28SD m64 xmm k xmm
|
|
// VRCP28SD m64 xmm xmm
|
|
// VRCP28SD xmm xmm k xmm
|
|
// VRCP28SD xmm xmm xmm
|
|
//
|
|
// Construct and append a VRCP28SD instruction to the active function.
|
|
func (c *Context) VRCP28SD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VRCP28SD(ops...))
|
|
}
|
|
|
|
// VRCP28SD: Approximation to the Reciprocal of a Scalar Double-Precision Floating-Point Value with Less Than 2^-28 Relative Error.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRCP28SD m64 xmm k xmm
|
|
// VRCP28SD m64 xmm xmm
|
|
// VRCP28SD xmm xmm k xmm
|
|
// VRCP28SD xmm xmm xmm
|
|
//
|
|
// Construct and append a VRCP28SD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRCP28SD(ops ...operand.Op) { ctx.VRCP28SD(ops...) }
|
|
|
|
// VRCP28SD_SAE: Approximation to the Reciprocal of a Scalar Double-Precision Floating-Point Value with Less Than 2^-28 Relative Error (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRCP28SD.SAE xmm xmm k xmm
|
|
// VRCP28SD.SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VRCP28SD.SAE instruction to the active function.
|
|
func (c *Context) VRCP28SD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VRCP28SD_SAE(ops...))
|
|
}
|
|
|
|
// VRCP28SD_SAE: Approximation to the Reciprocal of a Scalar Double-Precision Floating-Point Value with Less Than 2^-28 Relative Error (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRCP28SD.SAE xmm xmm k xmm
|
|
// VRCP28SD.SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VRCP28SD.SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRCP28SD_SAE(ops ...operand.Op) { ctx.VRCP28SD_SAE(ops...) }
|
|
|
|
// VRCP28SD_SAE_Z: Approximation to the Reciprocal of a Scalar Double-Precision Floating-Point Value with Less Than 2^-28 Relative Error (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRCP28SD.SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VRCP28SD.SAE.Z instruction to the active function.
|
|
func (c *Context) VRCP28SD_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VRCP28SD_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VRCP28SD_SAE_Z: Approximation to the Reciprocal of a Scalar Double-Precision Floating-Point Value with Less Than 2^-28 Relative Error (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRCP28SD.SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VRCP28SD.SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRCP28SD_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VRCP28SD_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VRCP28SD_Z: Approximation to the Reciprocal of a Scalar Double-Precision Floating-Point Value with Less Than 2^-28 Relative Error (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRCP28SD.Z m64 xmm k xmm
|
|
// VRCP28SD.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VRCP28SD.Z instruction to the active function.
|
|
func (c *Context) VRCP28SD_Z(mx, x, k, x1 operand.Op) {
|
|
c.addinstruction(x86.VRCP28SD_Z(mx, x, k, x1))
|
|
}
|
|
|
|
// VRCP28SD_Z: Approximation to the Reciprocal of a Scalar Double-Precision Floating-Point Value with Less Than 2^-28 Relative Error (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRCP28SD.Z m64 xmm k xmm
|
|
// VRCP28SD.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VRCP28SD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRCP28SD_Z(mx, x, k, x1 operand.Op) { ctx.VRCP28SD_Z(mx, x, k, x1) }
|
|
|
|
// VRCP28SS: Approximation to the Reciprocal of a Scalar Single-Precision Floating-Point Value with Less Than 2^-28 Relative Error.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRCP28SS m32 xmm k xmm
|
|
// VRCP28SS m32 xmm xmm
|
|
// VRCP28SS xmm xmm k xmm
|
|
// VRCP28SS xmm xmm xmm
|
|
//
|
|
// Construct and append a VRCP28SS instruction to the active function.
|
|
func (c *Context) VRCP28SS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VRCP28SS(ops...))
|
|
}
|
|
|
|
// VRCP28SS: Approximation to the Reciprocal of a Scalar Single-Precision Floating-Point Value with Less Than 2^-28 Relative Error.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRCP28SS m32 xmm k xmm
|
|
// VRCP28SS m32 xmm xmm
|
|
// VRCP28SS xmm xmm k xmm
|
|
// VRCP28SS xmm xmm xmm
|
|
//
|
|
// Construct and append a VRCP28SS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRCP28SS(ops ...operand.Op) { ctx.VRCP28SS(ops...) }
|
|
|
|
// VRCP28SS_SAE: Approximation to the Reciprocal of a Scalar Single-Precision Floating-Point Value with Less Than 2^-28 Relative Error (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRCP28SS.SAE xmm xmm k xmm
|
|
// VRCP28SS.SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VRCP28SS.SAE instruction to the active function.
|
|
func (c *Context) VRCP28SS_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VRCP28SS_SAE(ops...))
|
|
}
|
|
|
|
// VRCP28SS_SAE: Approximation to the Reciprocal of a Scalar Single-Precision Floating-Point Value with Less Than 2^-28 Relative Error (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRCP28SS.SAE xmm xmm k xmm
|
|
// VRCP28SS.SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VRCP28SS.SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRCP28SS_SAE(ops ...operand.Op) { ctx.VRCP28SS_SAE(ops...) }
|
|
|
|
// VRCP28SS_SAE_Z: Approximation to the Reciprocal of a Scalar Single-Precision Floating-Point Value with Less Than 2^-28 Relative Error (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRCP28SS.SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VRCP28SS.SAE.Z instruction to the active function.
|
|
func (c *Context) VRCP28SS_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VRCP28SS_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VRCP28SS_SAE_Z: Approximation to the Reciprocal of a Scalar Single-Precision Floating-Point Value with Less Than 2^-28 Relative Error (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRCP28SS.SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VRCP28SS.SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRCP28SS_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VRCP28SS_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VRCP28SS_Z: Approximation to the Reciprocal of a Scalar Single-Precision Floating-Point Value with Less Than 2^-28 Relative Error (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRCP28SS.Z m32 xmm k xmm
|
|
// VRCP28SS.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VRCP28SS.Z instruction to the active function.
|
|
func (c *Context) VRCP28SS_Z(mx, x, k, x1 operand.Op) {
|
|
c.addinstruction(x86.VRCP28SS_Z(mx, x, k, x1))
|
|
}
|
|
|
|
// VRCP28SS_Z: Approximation to the Reciprocal of a Scalar Single-Precision Floating-Point Value with Less Than 2^-28 Relative Error (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRCP28SS.Z m32 xmm k xmm
|
|
// VRCP28SS.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VRCP28SS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRCP28SS_Z(mx, x, k, x1 operand.Op) { ctx.VRCP28SS_Z(mx, x, k, x1) }
|
|
|
|
// VRCPPS: Compute Approximate Reciprocals of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRCPPS m128 xmm
|
|
// VRCPPS m256 ymm
|
|
// VRCPPS xmm xmm
|
|
// VRCPPS ymm ymm
|
|
//
|
|
// Construct and append a VRCPPS instruction to the active function.
|
|
func (c *Context) VRCPPS(mxy, xy operand.Op) {
|
|
c.addinstruction(x86.VRCPPS(mxy, xy))
|
|
}
|
|
|
|
// VRCPPS: Compute Approximate Reciprocals of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRCPPS m128 xmm
|
|
// VRCPPS m256 ymm
|
|
// VRCPPS xmm xmm
|
|
// VRCPPS ymm ymm
|
|
//
|
|
// Construct and append a VRCPPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRCPPS(mxy, xy operand.Op) { ctx.VRCPPS(mxy, xy) }
|
|
|
|
// VRCPSS: Compute Approximate Reciprocal of Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRCPSS m32 xmm xmm
|
|
// VRCPSS xmm xmm xmm
|
|
//
|
|
// Construct and append a VRCPSS instruction to the active function.
|
|
func (c *Context) VRCPSS(mx, x, x1 operand.Op) {
|
|
c.addinstruction(x86.VRCPSS(mx, x, x1))
|
|
}
|
|
|
|
// VRCPSS: Compute Approximate Reciprocal of Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRCPSS m32 xmm xmm
|
|
// VRCPSS xmm xmm xmm
|
|
//
|
|
// Construct and append a VRCPSS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRCPSS(mx, x, x1 operand.Op) { ctx.VRCPSS(mx, x, x1) }
|
|
|
|
// VREDUCEPD: Perform Reduction Transformation on Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VREDUCEPD imm8 m128 k xmm
|
|
// VREDUCEPD imm8 m128 xmm
|
|
// VREDUCEPD imm8 m256 k ymm
|
|
// VREDUCEPD imm8 m256 ymm
|
|
// VREDUCEPD imm8 xmm k xmm
|
|
// VREDUCEPD imm8 xmm xmm
|
|
// VREDUCEPD imm8 ymm k ymm
|
|
// VREDUCEPD imm8 ymm ymm
|
|
// VREDUCEPD imm8 m512 k zmm
|
|
// VREDUCEPD imm8 m512 zmm
|
|
// VREDUCEPD imm8 zmm k zmm
|
|
// VREDUCEPD imm8 zmm zmm
|
|
//
|
|
// Construct and append a VREDUCEPD instruction to the active function.
|
|
func (c *Context) VREDUCEPD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VREDUCEPD(ops...))
|
|
}
|
|
|
|
// VREDUCEPD: Perform Reduction Transformation on Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VREDUCEPD imm8 m128 k xmm
|
|
// VREDUCEPD imm8 m128 xmm
|
|
// VREDUCEPD imm8 m256 k ymm
|
|
// VREDUCEPD imm8 m256 ymm
|
|
// VREDUCEPD imm8 xmm k xmm
|
|
// VREDUCEPD imm8 xmm xmm
|
|
// VREDUCEPD imm8 ymm k ymm
|
|
// VREDUCEPD imm8 ymm ymm
|
|
// VREDUCEPD imm8 m512 k zmm
|
|
// VREDUCEPD imm8 m512 zmm
|
|
// VREDUCEPD imm8 zmm k zmm
|
|
// VREDUCEPD imm8 zmm zmm
|
|
//
|
|
// Construct and append a VREDUCEPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VREDUCEPD(ops ...operand.Op) { ctx.VREDUCEPD(ops...) }
|
|
|
|
// VREDUCEPD_BCST: Perform Reduction Transformation on Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VREDUCEPD.BCST imm8 m64 k xmm
|
|
// VREDUCEPD.BCST imm8 m64 k ymm
|
|
// VREDUCEPD.BCST imm8 m64 xmm
|
|
// VREDUCEPD.BCST imm8 m64 ymm
|
|
// VREDUCEPD.BCST imm8 m64 k zmm
|
|
// VREDUCEPD.BCST imm8 m64 zmm
|
|
//
|
|
// Construct and append a VREDUCEPD.BCST instruction to the active function.
|
|
func (c *Context) VREDUCEPD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VREDUCEPD_BCST(ops...))
|
|
}
|
|
|
|
// VREDUCEPD_BCST: Perform Reduction Transformation on Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VREDUCEPD.BCST imm8 m64 k xmm
|
|
// VREDUCEPD.BCST imm8 m64 k ymm
|
|
// VREDUCEPD.BCST imm8 m64 xmm
|
|
// VREDUCEPD.BCST imm8 m64 ymm
|
|
// VREDUCEPD.BCST imm8 m64 k zmm
|
|
// VREDUCEPD.BCST imm8 m64 zmm
|
|
//
|
|
// Construct and append a VREDUCEPD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VREDUCEPD_BCST(ops ...operand.Op) { ctx.VREDUCEPD_BCST(ops...) }
|
|
|
|
// VREDUCEPD_BCST_Z: Perform Reduction Transformation on Packed Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VREDUCEPD.BCST.Z imm8 m64 k xmm
|
|
// VREDUCEPD.BCST.Z imm8 m64 k ymm
|
|
// VREDUCEPD.BCST.Z imm8 m64 k zmm
|
|
//
|
|
// Construct and append a VREDUCEPD.BCST.Z instruction to the active function.
|
|
func (c *Context) VREDUCEPD_BCST_Z(i, m, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VREDUCEPD_BCST_Z(i, m, k, xyz))
|
|
}
|
|
|
|
// VREDUCEPD_BCST_Z: Perform Reduction Transformation on Packed Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VREDUCEPD.BCST.Z imm8 m64 k xmm
|
|
// VREDUCEPD.BCST.Z imm8 m64 k ymm
|
|
// VREDUCEPD.BCST.Z imm8 m64 k zmm
|
|
//
|
|
// Construct and append a VREDUCEPD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VREDUCEPD_BCST_Z(i, m, k, xyz operand.Op) { ctx.VREDUCEPD_BCST_Z(i, m, k, xyz) }
|
|
|
|
// VREDUCEPD_Z: Perform Reduction Transformation on Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VREDUCEPD.Z imm8 m128 k xmm
|
|
// VREDUCEPD.Z imm8 m256 k ymm
|
|
// VREDUCEPD.Z imm8 xmm k xmm
|
|
// VREDUCEPD.Z imm8 ymm k ymm
|
|
// VREDUCEPD.Z imm8 m512 k zmm
|
|
// VREDUCEPD.Z imm8 zmm k zmm
|
|
//
|
|
// Construct and append a VREDUCEPD.Z instruction to the active function.
|
|
func (c *Context) VREDUCEPD_Z(i, mxyz, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VREDUCEPD_Z(i, mxyz, k, xyz))
|
|
}
|
|
|
|
// VREDUCEPD_Z: Perform Reduction Transformation on Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VREDUCEPD.Z imm8 m128 k xmm
|
|
// VREDUCEPD.Z imm8 m256 k ymm
|
|
// VREDUCEPD.Z imm8 xmm k xmm
|
|
// VREDUCEPD.Z imm8 ymm k ymm
|
|
// VREDUCEPD.Z imm8 m512 k zmm
|
|
// VREDUCEPD.Z imm8 zmm k zmm
|
|
//
|
|
// Construct and append a VREDUCEPD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VREDUCEPD_Z(i, mxyz, k, xyz operand.Op) { ctx.VREDUCEPD_Z(i, mxyz, k, xyz) }
|
|
|
|
// VREDUCEPS: Perform Reduction Transformation on Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VREDUCEPS imm8 m128 k xmm
|
|
// VREDUCEPS imm8 m128 xmm
|
|
// VREDUCEPS imm8 m256 k ymm
|
|
// VREDUCEPS imm8 m256 ymm
|
|
// VREDUCEPS imm8 xmm k xmm
|
|
// VREDUCEPS imm8 xmm xmm
|
|
// VREDUCEPS imm8 ymm k ymm
|
|
// VREDUCEPS imm8 ymm ymm
|
|
// VREDUCEPS imm8 m512 k zmm
|
|
// VREDUCEPS imm8 m512 zmm
|
|
// VREDUCEPS imm8 zmm k zmm
|
|
// VREDUCEPS imm8 zmm zmm
|
|
//
|
|
// Construct and append a VREDUCEPS instruction to the active function.
|
|
func (c *Context) VREDUCEPS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VREDUCEPS(ops...))
|
|
}
|
|
|
|
// VREDUCEPS: Perform Reduction Transformation on Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VREDUCEPS imm8 m128 k xmm
|
|
// VREDUCEPS imm8 m128 xmm
|
|
// VREDUCEPS imm8 m256 k ymm
|
|
// VREDUCEPS imm8 m256 ymm
|
|
// VREDUCEPS imm8 xmm k xmm
|
|
// VREDUCEPS imm8 xmm xmm
|
|
// VREDUCEPS imm8 ymm k ymm
|
|
// VREDUCEPS imm8 ymm ymm
|
|
// VREDUCEPS imm8 m512 k zmm
|
|
// VREDUCEPS imm8 m512 zmm
|
|
// VREDUCEPS imm8 zmm k zmm
|
|
// VREDUCEPS imm8 zmm zmm
|
|
//
|
|
// Construct and append a VREDUCEPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VREDUCEPS(ops ...operand.Op) { ctx.VREDUCEPS(ops...) }
|
|
|
|
// VREDUCEPS_BCST: Perform Reduction Transformation on Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VREDUCEPS.BCST imm8 m32 k xmm
|
|
// VREDUCEPS.BCST imm8 m32 k ymm
|
|
// VREDUCEPS.BCST imm8 m32 xmm
|
|
// VREDUCEPS.BCST imm8 m32 ymm
|
|
// VREDUCEPS.BCST imm8 m32 k zmm
|
|
// VREDUCEPS.BCST imm8 m32 zmm
|
|
//
|
|
// Construct and append a VREDUCEPS.BCST instruction to the active function.
|
|
func (c *Context) VREDUCEPS_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VREDUCEPS_BCST(ops...))
|
|
}
|
|
|
|
// VREDUCEPS_BCST: Perform Reduction Transformation on Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VREDUCEPS.BCST imm8 m32 k xmm
|
|
// VREDUCEPS.BCST imm8 m32 k ymm
|
|
// VREDUCEPS.BCST imm8 m32 xmm
|
|
// VREDUCEPS.BCST imm8 m32 ymm
|
|
// VREDUCEPS.BCST imm8 m32 k zmm
|
|
// VREDUCEPS.BCST imm8 m32 zmm
|
|
//
|
|
// Construct and append a VREDUCEPS.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VREDUCEPS_BCST(ops ...operand.Op) { ctx.VREDUCEPS_BCST(ops...) }
|
|
|
|
// VREDUCEPS_BCST_Z: Perform Reduction Transformation on Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VREDUCEPS.BCST.Z imm8 m32 k xmm
|
|
// VREDUCEPS.BCST.Z imm8 m32 k ymm
|
|
// VREDUCEPS.BCST.Z imm8 m32 k zmm
|
|
//
|
|
// Construct and append a VREDUCEPS.BCST.Z instruction to the active function.
|
|
func (c *Context) VREDUCEPS_BCST_Z(i, m, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VREDUCEPS_BCST_Z(i, m, k, xyz))
|
|
}
|
|
|
|
// VREDUCEPS_BCST_Z: Perform Reduction Transformation on Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VREDUCEPS.BCST.Z imm8 m32 k xmm
|
|
// VREDUCEPS.BCST.Z imm8 m32 k ymm
|
|
// VREDUCEPS.BCST.Z imm8 m32 k zmm
|
|
//
|
|
// Construct and append a VREDUCEPS.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VREDUCEPS_BCST_Z(i, m, k, xyz operand.Op) { ctx.VREDUCEPS_BCST_Z(i, m, k, xyz) }
|
|
|
|
// VREDUCEPS_Z: Perform Reduction Transformation on Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VREDUCEPS.Z imm8 m128 k xmm
|
|
// VREDUCEPS.Z imm8 m256 k ymm
|
|
// VREDUCEPS.Z imm8 xmm k xmm
|
|
// VREDUCEPS.Z imm8 ymm k ymm
|
|
// VREDUCEPS.Z imm8 m512 k zmm
|
|
// VREDUCEPS.Z imm8 zmm k zmm
|
|
//
|
|
// Construct and append a VREDUCEPS.Z instruction to the active function.
|
|
func (c *Context) VREDUCEPS_Z(i, mxyz, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VREDUCEPS_Z(i, mxyz, k, xyz))
|
|
}
|
|
|
|
// VREDUCEPS_Z: Perform Reduction Transformation on Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VREDUCEPS.Z imm8 m128 k xmm
|
|
// VREDUCEPS.Z imm8 m256 k ymm
|
|
// VREDUCEPS.Z imm8 xmm k xmm
|
|
// VREDUCEPS.Z imm8 ymm k ymm
|
|
// VREDUCEPS.Z imm8 m512 k zmm
|
|
// VREDUCEPS.Z imm8 zmm k zmm
|
|
//
|
|
// Construct and append a VREDUCEPS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VREDUCEPS_Z(i, mxyz, k, xyz operand.Op) { ctx.VREDUCEPS_Z(i, mxyz, k, xyz) }
|
|
|
|
// VREDUCESD: Perform Reduction Transformation on a Scalar Double-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VREDUCESD imm8 m64 xmm k xmm
|
|
// VREDUCESD imm8 m64 xmm xmm
|
|
// VREDUCESD imm8 xmm xmm k xmm
|
|
// VREDUCESD imm8 xmm xmm xmm
|
|
//
|
|
// Construct and append a VREDUCESD instruction to the active function.
|
|
func (c *Context) VREDUCESD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VREDUCESD(ops...))
|
|
}
|
|
|
|
// VREDUCESD: Perform Reduction Transformation on a Scalar Double-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VREDUCESD imm8 m64 xmm k xmm
|
|
// VREDUCESD imm8 m64 xmm xmm
|
|
// VREDUCESD imm8 xmm xmm k xmm
|
|
// VREDUCESD imm8 xmm xmm xmm
|
|
//
|
|
// Construct and append a VREDUCESD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VREDUCESD(ops ...operand.Op) { ctx.VREDUCESD(ops...) }
|
|
|
|
// VREDUCESD_Z: Perform Reduction Transformation on a Scalar Double-Precision Floating-Point Value (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VREDUCESD.Z imm8 m64 xmm k xmm
|
|
// VREDUCESD.Z imm8 xmm xmm k xmm
|
|
//
|
|
// Construct and append a VREDUCESD.Z instruction to the active function.
|
|
func (c *Context) VREDUCESD_Z(i, mx, x, k, x1 operand.Op) {
|
|
c.addinstruction(x86.VREDUCESD_Z(i, mx, x, k, x1))
|
|
}
|
|
|
|
// VREDUCESD_Z: Perform Reduction Transformation on a Scalar Double-Precision Floating-Point Value (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VREDUCESD.Z imm8 m64 xmm k xmm
|
|
// VREDUCESD.Z imm8 xmm xmm k xmm
|
|
//
|
|
// Construct and append a VREDUCESD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VREDUCESD_Z(i, mx, x, k, x1 operand.Op) { ctx.VREDUCESD_Z(i, mx, x, k, x1) }
|
|
|
|
// VREDUCESS: Perform Reduction Transformation on a Scalar Single-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VREDUCESS imm8 m32 xmm k xmm
|
|
// VREDUCESS imm8 m32 xmm xmm
|
|
// VREDUCESS imm8 xmm xmm k xmm
|
|
// VREDUCESS imm8 xmm xmm xmm
|
|
//
|
|
// Construct and append a VREDUCESS instruction to the active function.
|
|
func (c *Context) VREDUCESS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VREDUCESS(ops...))
|
|
}
|
|
|
|
// VREDUCESS: Perform Reduction Transformation on a Scalar Single-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VREDUCESS imm8 m32 xmm k xmm
|
|
// VREDUCESS imm8 m32 xmm xmm
|
|
// VREDUCESS imm8 xmm xmm k xmm
|
|
// VREDUCESS imm8 xmm xmm xmm
|
|
//
|
|
// Construct and append a VREDUCESS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VREDUCESS(ops ...operand.Op) { ctx.VREDUCESS(ops...) }
|
|
|
|
// VREDUCESS_Z: Perform Reduction Transformation on a Scalar Single-Precision Floating-Point Value (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VREDUCESS.Z imm8 m32 xmm k xmm
|
|
// VREDUCESS.Z imm8 xmm xmm k xmm
|
|
//
|
|
// Construct and append a VREDUCESS.Z instruction to the active function.
|
|
func (c *Context) VREDUCESS_Z(i, mx, x, k, x1 operand.Op) {
|
|
c.addinstruction(x86.VREDUCESS_Z(i, mx, x, k, x1))
|
|
}
|
|
|
|
// VREDUCESS_Z: Perform Reduction Transformation on a Scalar Single-Precision Floating-Point Value (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VREDUCESS.Z imm8 m32 xmm k xmm
|
|
// VREDUCESS.Z imm8 xmm xmm k xmm
|
|
//
|
|
// Construct and append a VREDUCESS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VREDUCESS_Z(i, mx, x, k, x1 operand.Op) { ctx.VREDUCESS_Z(i, mx, x, k, x1) }
|
|
|
|
// VRNDSCALEPD: Round Packed Double-Precision Floating-Point Values To Include A Given Number Of Fraction Bits.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRNDSCALEPD imm8 m128 k xmm
|
|
// VRNDSCALEPD imm8 m128 xmm
|
|
// VRNDSCALEPD imm8 m256 k ymm
|
|
// VRNDSCALEPD imm8 m256 ymm
|
|
// VRNDSCALEPD imm8 xmm k xmm
|
|
// VRNDSCALEPD imm8 xmm xmm
|
|
// VRNDSCALEPD imm8 ymm k ymm
|
|
// VRNDSCALEPD imm8 ymm ymm
|
|
// VRNDSCALEPD imm8 m512 k zmm
|
|
// VRNDSCALEPD imm8 m512 zmm
|
|
// VRNDSCALEPD imm8 zmm k zmm
|
|
// VRNDSCALEPD imm8 zmm zmm
|
|
//
|
|
// Construct and append a VRNDSCALEPD instruction to the active function.
|
|
func (c *Context) VRNDSCALEPD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VRNDSCALEPD(ops...))
|
|
}
|
|
|
|
// VRNDSCALEPD: Round Packed Double-Precision Floating-Point Values To Include A Given Number Of Fraction Bits.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRNDSCALEPD imm8 m128 k xmm
|
|
// VRNDSCALEPD imm8 m128 xmm
|
|
// VRNDSCALEPD imm8 m256 k ymm
|
|
// VRNDSCALEPD imm8 m256 ymm
|
|
// VRNDSCALEPD imm8 xmm k xmm
|
|
// VRNDSCALEPD imm8 xmm xmm
|
|
// VRNDSCALEPD imm8 ymm k ymm
|
|
// VRNDSCALEPD imm8 ymm ymm
|
|
// VRNDSCALEPD imm8 m512 k zmm
|
|
// VRNDSCALEPD imm8 m512 zmm
|
|
// VRNDSCALEPD imm8 zmm k zmm
|
|
// VRNDSCALEPD imm8 zmm zmm
|
|
//
|
|
// Construct and append a VRNDSCALEPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRNDSCALEPD(ops ...operand.Op) { ctx.VRNDSCALEPD(ops...) }
|
|
|
|
// VRNDSCALEPD_BCST: Round Packed Double-Precision Floating-Point Values To Include A Given Number Of Fraction Bits (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRNDSCALEPD.BCST imm8 m64 k xmm
|
|
// VRNDSCALEPD.BCST imm8 m64 k ymm
|
|
// VRNDSCALEPD.BCST imm8 m64 xmm
|
|
// VRNDSCALEPD.BCST imm8 m64 ymm
|
|
// VRNDSCALEPD.BCST imm8 m64 k zmm
|
|
// VRNDSCALEPD.BCST imm8 m64 zmm
|
|
//
|
|
// Construct and append a VRNDSCALEPD.BCST instruction to the active function.
|
|
func (c *Context) VRNDSCALEPD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VRNDSCALEPD_BCST(ops...))
|
|
}
|
|
|
|
// VRNDSCALEPD_BCST: Round Packed Double-Precision Floating-Point Values To Include A Given Number Of Fraction Bits (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRNDSCALEPD.BCST imm8 m64 k xmm
|
|
// VRNDSCALEPD.BCST imm8 m64 k ymm
|
|
// VRNDSCALEPD.BCST imm8 m64 xmm
|
|
// VRNDSCALEPD.BCST imm8 m64 ymm
|
|
// VRNDSCALEPD.BCST imm8 m64 k zmm
|
|
// VRNDSCALEPD.BCST imm8 m64 zmm
|
|
//
|
|
// Construct and append a VRNDSCALEPD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRNDSCALEPD_BCST(ops ...operand.Op) { ctx.VRNDSCALEPD_BCST(ops...) }
|
|
|
|
// VRNDSCALEPD_BCST_Z: Round Packed Double-Precision Floating-Point Values To Include A Given Number Of Fraction Bits (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRNDSCALEPD.BCST.Z imm8 m64 k xmm
|
|
// VRNDSCALEPD.BCST.Z imm8 m64 k ymm
|
|
// VRNDSCALEPD.BCST.Z imm8 m64 k zmm
|
|
//
|
|
// Construct and append a VRNDSCALEPD.BCST.Z instruction to the active function.
|
|
func (c *Context) VRNDSCALEPD_BCST_Z(i, m, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VRNDSCALEPD_BCST_Z(i, m, k, xyz))
|
|
}
|
|
|
|
// VRNDSCALEPD_BCST_Z: Round Packed Double-Precision Floating-Point Values To Include A Given Number Of Fraction Bits (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRNDSCALEPD.BCST.Z imm8 m64 k xmm
|
|
// VRNDSCALEPD.BCST.Z imm8 m64 k ymm
|
|
// VRNDSCALEPD.BCST.Z imm8 m64 k zmm
|
|
//
|
|
// Construct and append a VRNDSCALEPD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRNDSCALEPD_BCST_Z(i, m, k, xyz operand.Op) { ctx.VRNDSCALEPD_BCST_Z(i, m, k, xyz) }
|
|
|
|
// VRNDSCALEPD_SAE: Round Packed Double-Precision Floating-Point Values To Include A Given Number Of Fraction Bits (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRNDSCALEPD.SAE imm8 zmm k zmm
|
|
// VRNDSCALEPD.SAE imm8 zmm zmm
|
|
//
|
|
// Construct and append a VRNDSCALEPD.SAE instruction to the active function.
|
|
func (c *Context) VRNDSCALEPD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VRNDSCALEPD_SAE(ops...))
|
|
}
|
|
|
|
// VRNDSCALEPD_SAE: Round Packed Double-Precision Floating-Point Values To Include A Given Number Of Fraction Bits (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRNDSCALEPD.SAE imm8 zmm k zmm
|
|
// VRNDSCALEPD.SAE imm8 zmm zmm
|
|
//
|
|
// Construct and append a VRNDSCALEPD.SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRNDSCALEPD_SAE(ops ...operand.Op) { ctx.VRNDSCALEPD_SAE(ops...) }
|
|
|
|
// VRNDSCALEPD_SAE_Z: Round Packed Double-Precision Floating-Point Values To Include A Given Number Of Fraction Bits (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRNDSCALEPD.SAE.Z imm8 zmm k zmm
|
|
//
|
|
// Construct and append a VRNDSCALEPD.SAE.Z instruction to the active function.
|
|
func (c *Context) VRNDSCALEPD_SAE_Z(i, z, k, z1 operand.Op) {
|
|
c.addinstruction(x86.VRNDSCALEPD_SAE_Z(i, z, k, z1))
|
|
}
|
|
|
|
// VRNDSCALEPD_SAE_Z: Round Packed Double-Precision Floating-Point Values To Include A Given Number Of Fraction Bits (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRNDSCALEPD.SAE.Z imm8 zmm k zmm
|
|
//
|
|
// Construct and append a VRNDSCALEPD.SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRNDSCALEPD_SAE_Z(i, z, k, z1 operand.Op) { ctx.VRNDSCALEPD_SAE_Z(i, z, k, z1) }
|
|
|
|
// VRNDSCALEPD_Z: Round Packed Double-Precision Floating-Point Values To Include A Given Number Of Fraction Bits (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRNDSCALEPD.Z imm8 m128 k xmm
|
|
// VRNDSCALEPD.Z imm8 m256 k ymm
|
|
// VRNDSCALEPD.Z imm8 xmm k xmm
|
|
// VRNDSCALEPD.Z imm8 ymm k ymm
|
|
// VRNDSCALEPD.Z imm8 m512 k zmm
|
|
// VRNDSCALEPD.Z imm8 zmm k zmm
|
|
//
|
|
// Construct and append a VRNDSCALEPD.Z instruction to the active function.
|
|
func (c *Context) VRNDSCALEPD_Z(i, mxyz, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VRNDSCALEPD_Z(i, mxyz, k, xyz))
|
|
}
|
|
|
|
// VRNDSCALEPD_Z: Round Packed Double-Precision Floating-Point Values To Include A Given Number Of Fraction Bits (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRNDSCALEPD.Z imm8 m128 k xmm
|
|
// VRNDSCALEPD.Z imm8 m256 k ymm
|
|
// VRNDSCALEPD.Z imm8 xmm k xmm
|
|
// VRNDSCALEPD.Z imm8 ymm k ymm
|
|
// VRNDSCALEPD.Z imm8 m512 k zmm
|
|
// VRNDSCALEPD.Z imm8 zmm k zmm
|
|
//
|
|
// Construct and append a VRNDSCALEPD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRNDSCALEPD_Z(i, mxyz, k, xyz operand.Op) { ctx.VRNDSCALEPD_Z(i, mxyz, k, xyz) }
|
|
|
|
// VRNDSCALEPS: Round Packed Single-Precision Floating-Point Values To Include A Given Number Of Fraction Bits.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRNDSCALEPS imm8 m128 k xmm
|
|
// VRNDSCALEPS imm8 m128 xmm
|
|
// VRNDSCALEPS imm8 m256 k ymm
|
|
// VRNDSCALEPS imm8 m256 ymm
|
|
// VRNDSCALEPS imm8 xmm k xmm
|
|
// VRNDSCALEPS imm8 xmm xmm
|
|
// VRNDSCALEPS imm8 ymm k ymm
|
|
// VRNDSCALEPS imm8 ymm ymm
|
|
// VRNDSCALEPS imm8 m512 k zmm
|
|
// VRNDSCALEPS imm8 m512 zmm
|
|
// VRNDSCALEPS imm8 zmm k zmm
|
|
// VRNDSCALEPS imm8 zmm zmm
|
|
//
|
|
// Construct and append a VRNDSCALEPS instruction to the active function.
|
|
func (c *Context) VRNDSCALEPS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VRNDSCALEPS(ops...))
|
|
}
|
|
|
|
// VRNDSCALEPS: Round Packed Single-Precision Floating-Point Values To Include A Given Number Of Fraction Bits.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRNDSCALEPS imm8 m128 k xmm
|
|
// VRNDSCALEPS imm8 m128 xmm
|
|
// VRNDSCALEPS imm8 m256 k ymm
|
|
// VRNDSCALEPS imm8 m256 ymm
|
|
// VRNDSCALEPS imm8 xmm k xmm
|
|
// VRNDSCALEPS imm8 xmm xmm
|
|
// VRNDSCALEPS imm8 ymm k ymm
|
|
// VRNDSCALEPS imm8 ymm ymm
|
|
// VRNDSCALEPS imm8 m512 k zmm
|
|
// VRNDSCALEPS imm8 m512 zmm
|
|
// VRNDSCALEPS imm8 zmm k zmm
|
|
// VRNDSCALEPS imm8 zmm zmm
|
|
//
|
|
// Construct and append a VRNDSCALEPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRNDSCALEPS(ops ...operand.Op) { ctx.VRNDSCALEPS(ops...) }
|
|
|
|
// VRNDSCALEPS_BCST: Round Packed Single-Precision Floating-Point Values To Include A Given Number Of Fraction Bits (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRNDSCALEPS.BCST imm8 m32 k xmm
|
|
// VRNDSCALEPS.BCST imm8 m32 k ymm
|
|
// VRNDSCALEPS.BCST imm8 m32 xmm
|
|
// VRNDSCALEPS.BCST imm8 m32 ymm
|
|
// VRNDSCALEPS.BCST imm8 m32 k zmm
|
|
// VRNDSCALEPS.BCST imm8 m32 zmm
|
|
//
|
|
// Construct and append a VRNDSCALEPS.BCST instruction to the active function.
|
|
func (c *Context) VRNDSCALEPS_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VRNDSCALEPS_BCST(ops...))
|
|
}
|
|
|
|
// VRNDSCALEPS_BCST: Round Packed Single-Precision Floating-Point Values To Include A Given Number Of Fraction Bits (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRNDSCALEPS.BCST imm8 m32 k xmm
|
|
// VRNDSCALEPS.BCST imm8 m32 k ymm
|
|
// VRNDSCALEPS.BCST imm8 m32 xmm
|
|
// VRNDSCALEPS.BCST imm8 m32 ymm
|
|
// VRNDSCALEPS.BCST imm8 m32 k zmm
|
|
// VRNDSCALEPS.BCST imm8 m32 zmm
|
|
//
|
|
// Construct and append a VRNDSCALEPS.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRNDSCALEPS_BCST(ops ...operand.Op) { ctx.VRNDSCALEPS_BCST(ops...) }
|
|
|
|
// VRNDSCALEPS_BCST_Z: Round Packed Single-Precision Floating-Point Values To Include A Given Number Of Fraction Bits (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRNDSCALEPS.BCST.Z imm8 m32 k xmm
|
|
// VRNDSCALEPS.BCST.Z imm8 m32 k ymm
|
|
// VRNDSCALEPS.BCST.Z imm8 m32 k zmm
|
|
//
|
|
// Construct and append a VRNDSCALEPS.BCST.Z instruction to the active function.
|
|
func (c *Context) VRNDSCALEPS_BCST_Z(i, m, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VRNDSCALEPS_BCST_Z(i, m, k, xyz))
|
|
}
|
|
|
|
// VRNDSCALEPS_BCST_Z: Round Packed Single-Precision Floating-Point Values To Include A Given Number Of Fraction Bits (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRNDSCALEPS.BCST.Z imm8 m32 k xmm
|
|
// VRNDSCALEPS.BCST.Z imm8 m32 k ymm
|
|
// VRNDSCALEPS.BCST.Z imm8 m32 k zmm
|
|
//
|
|
// Construct and append a VRNDSCALEPS.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRNDSCALEPS_BCST_Z(i, m, k, xyz operand.Op) { ctx.VRNDSCALEPS_BCST_Z(i, m, k, xyz) }
|
|
|
|
// VRNDSCALEPS_SAE: Round Packed Single-Precision Floating-Point Values To Include A Given Number Of Fraction Bits (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRNDSCALEPS.SAE imm8 zmm k zmm
|
|
// VRNDSCALEPS.SAE imm8 zmm zmm
|
|
//
|
|
// Construct and append a VRNDSCALEPS.SAE instruction to the active function.
|
|
func (c *Context) VRNDSCALEPS_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VRNDSCALEPS_SAE(ops...))
|
|
}
|
|
|
|
// VRNDSCALEPS_SAE: Round Packed Single-Precision Floating-Point Values To Include A Given Number Of Fraction Bits (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRNDSCALEPS.SAE imm8 zmm k zmm
|
|
// VRNDSCALEPS.SAE imm8 zmm zmm
|
|
//
|
|
// Construct and append a VRNDSCALEPS.SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRNDSCALEPS_SAE(ops ...operand.Op) { ctx.VRNDSCALEPS_SAE(ops...) }
|
|
|
|
// VRNDSCALEPS_SAE_Z: Round Packed Single-Precision Floating-Point Values To Include A Given Number Of Fraction Bits (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRNDSCALEPS.SAE.Z imm8 zmm k zmm
|
|
//
|
|
// Construct and append a VRNDSCALEPS.SAE.Z instruction to the active function.
|
|
func (c *Context) VRNDSCALEPS_SAE_Z(i, z, k, z1 operand.Op) {
|
|
c.addinstruction(x86.VRNDSCALEPS_SAE_Z(i, z, k, z1))
|
|
}
|
|
|
|
// VRNDSCALEPS_SAE_Z: Round Packed Single-Precision Floating-Point Values To Include A Given Number Of Fraction Bits (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRNDSCALEPS.SAE.Z imm8 zmm k zmm
|
|
//
|
|
// Construct and append a VRNDSCALEPS.SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRNDSCALEPS_SAE_Z(i, z, k, z1 operand.Op) { ctx.VRNDSCALEPS_SAE_Z(i, z, k, z1) }
|
|
|
|
// VRNDSCALEPS_Z: Round Packed Single-Precision Floating-Point Values To Include A Given Number Of Fraction Bits (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRNDSCALEPS.Z imm8 m128 k xmm
|
|
// VRNDSCALEPS.Z imm8 m256 k ymm
|
|
// VRNDSCALEPS.Z imm8 xmm k xmm
|
|
// VRNDSCALEPS.Z imm8 ymm k ymm
|
|
// VRNDSCALEPS.Z imm8 m512 k zmm
|
|
// VRNDSCALEPS.Z imm8 zmm k zmm
|
|
//
|
|
// Construct and append a VRNDSCALEPS.Z instruction to the active function.
|
|
func (c *Context) VRNDSCALEPS_Z(i, mxyz, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VRNDSCALEPS_Z(i, mxyz, k, xyz))
|
|
}
|
|
|
|
// VRNDSCALEPS_Z: Round Packed Single-Precision Floating-Point Values To Include A Given Number Of Fraction Bits (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRNDSCALEPS.Z imm8 m128 k xmm
|
|
// VRNDSCALEPS.Z imm8 m256 k ymm
|
|
// VRNDSCALEPS.Z imm8 xmm k xmm
|
|
// VRNDSCALEPS.Z imm8 ymm k ymm
|
|
// VRNDSCALEPS.Z imm8 m512 k zmm
|
|
// VRNDSCALEPS.Z imm8 zmm k zmm
|
|
//
|
|
// Construct and append a VRNDSCALEPS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRNDSCALEPS_Z(i, mxyz, k, xyz operand.Op) { ctx.VRNDSCALEPS_Z(i, mxyz, k, xyz) }
|
|
|
|
// VRNDSCALESD: Round Scalar Double-Precision Floating-Point Value To Include A Given Number Of Fraction Bits.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRNDSCALESD imm8 m64 xmm k xmm
|
|
// VRNDSCALESD imm8 m64 xmm xmm
|
|
// VRNDSCALESD imm8 xmm xmm k xmm
|
|
// VRNDSCALESD imm8 xmm xmm xmm
|
|
//
|
|
// Construct and append a VRNDSCALESD instruction to the active function.
|
|
func (c *Context) VRNDSCALESD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VRNDSCALESD(ops...))
|
|
}
|
|
|
|
// VRNDSCALESD: Round Scalar Double-Precision Floating-Point Value To Include A Given Number Of Fraction Bits.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRNDSCALESD imm8 m64 xmm k xmm
|
|
// VRNDSCALESD imm8 m64 xmm xmm
|
|
// VRNDSCALESD imm8 xmm xmm k xmm
|
|
// VRNDSCALESD imm8 xmm xmm xmm
|
|
//
|
|
// Construct and append a VRNDSCALESD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRNDSCALESD(ops ...operand.Op) { ctx.VRNDSCALESD(ops...) }
|
|
|
|
// VRNDSCALESD_SAE: Round Scalar Double-Precision Floating-Point Value To Include A Given Number Of Fraction Bits (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRNDSCALESD.SAE imm8 xmm xmm k xmm
|
|
// VRNDSCALESD.SAE imm8 xmm xmm xmm
|
|
//
|
|
// Construct and append a VRNDSCALESD.SAE instruction to the active function.
|
|
func (c *Context) VRNDSCALESD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VRNDSCALESD_SAE(ops...))
|
|
}
|
|
|
|
// VRNDSCALESD_SAE: Round Scalar Double-Precision Floating-Point Value To Include A Given Number Of Fraction Bits (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRNDSCALESD.SAE imm8 xmm xmm k xmm
|
|
// VRNDSCALESD.SAE imm8 xmm xmm xmm
|
|
//
|
|
// Construct and append a VRNDSCALESD.SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRNDSCALESD_SAE(ops ...operand.Op) { ctx.VRNDSCALESD_SAE(ops...) }
|
|
|
|
// VRNDSCALESD_SAE_Z: Round Scalar Double-Precision Floating-Point Value To Include A Given Number Of Fraction Bits (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRNDSCALESD.SAE.Z imm8 xmm xmm k xmm
|
|
//
|
|
// Construct and append a VRNDSCALESD.SAE.Z instruction to the active function.
|
|
func (c *Context) VRNDSCALESD_SAE_Z(i, x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VRNDSCALESD_SAE_Z(i, x, x1, k, x2))
|
|
}
|
|
|
|
// VRNDSCALESD_SAE_Z: Round Scalar Double-Precision Floating-Point Value To Include A Given Number Of Fraction Bits (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRNDSCALESD.SAE.Z imm8 xmm xmm k xmm
|
|
//
|
|
// Construct and append a VRNDSCALESD.SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRNDSCALESD_SAE_Z(i, x, x1, k, x2 operand.Op) { ctx.VRNDSCALESD_SAE_Z(i, x, x1, k, x2) }
|
|
|
|
// VRNDSCALESD_Z: Round Scalar Double-Precision Floating-Point Value To Include A Given Number Of Fraction Bits (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRNDSCALESD.Z imm8 m64 xmm k xmm
|
|
// VRNDSCALESD.Z imm8 xmm xmm k xmm
|
|
//
|
|
// Construct and append a VRNDSCALESD.Z instruction to the active function.
|
|
func (c *Context) VRNDSCALESD_Z(i, mx, x, k, x1 operand.Op) {
|
|
c.addinstruction(x86.VRNDSCALESD_Z(i, mx, x, k, x1))
|
|
}
|
|
|
|
// VRNDSCALESD_Z: Round Scalar Double-Precision Floating-Point Value To Include A Given Number Of Fraction Bits (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRNDSCALESD.Z imm8 m64 xmm k xmm
|
|
// VRNDSCALESD.Z imm8 xmm xmm k xmm
|
|
//
|
|
// Construct and append a VRNDSCALESD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRNDSCALESD_Z(i, mx, x, k, x1 operand.Op) { ctx.VRNDSCALESD_Z(i, mx, x, k, x1) }
|
|
|
|
// VRNDSCALESS: Round Scalar Single-Precision Floating-Point Value To Include A Given Number Of Fraction Bits.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRNDSCALESS imm8 m32 xmm k xmm
|
|
// VRNDSCALESS imm8 m32 xmm xmm
|
|
// VRNDSCALESS imm8 xmm xmm k xmm
|
|
// VRNDSCALESS imm8 xmm xmm xmm
|
|
//
|
|
// Construct and append a VRNDSCALESS instruction to the active function.
|
|
func (c *Context) VRNDSCALESS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VRNDSCALESS(ops...))
|
|
}
|
|
|
|
// VRNDSCALESS: Round Scalar Single-Precision Floating-Point Value To Include A Given Number Of Fraction Bits.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRNDSCALESS imm8 m32 xmm k xmm
|
|
// VRNDSCALESS imm8 m32 xmm xmm
|
|
// VRNDSCALESS imm8 xmm xmm k xmm
|
|
// VRNDSCALESS imm8 xmm xmm xmm
|
|
//
|
|
// Construct and append a VRNDSCALESS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRNDSCALESS(ops ...operand.Op) { ctx.VRNDSCALESS(ops...) }
|
|
|
|
// VRNDSCALESS_SAE: Round Scalar Single-Precision Floating-Point Value To Include A Given Number Of Fraction Bits (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRNDSCALESS.SAE imm8 xmm xmm k xmm
|
|
// VRNDSCALESS.SAE imm8 xmm xmm xmm
|
|
//
|
|
// Construct and append a VRNDSCALESS.SAE instruction to the active function.
|
|
func (c *Context) VRNDSCALESS_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VRNDSCALESS_SAE(ops...))
|
|
}
|
|
|
|
// VRNDSCALESS_SAE: Round Scalar Single-Precision Floating-Point Value To Include A Given Number Of Fraction Bits (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRNDSCALESS.SAE imm8 xmm xmm k xmm
|
|
// VRNDSCALESS.SAE imm8 xmm xmm xmm
|
|
//
|
|
// Construct and append a VRNDSCALESS.SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRNDSCALESS_SAE(ops ...operand.Op) { ctx.VRNDSCALESS_SAE(ops...) }
|
|
|
|
// VRNDSCALESS_SAE_Z: Round Scalar Single-Precision Floating-Point Value To Include A Given Number Of Fraction Bits (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRNDSCALESS.SAE.Z imm8 xmm xmm k xmm
|
|
//
|
|
// Construct and append a VRNDSCALESS.SAE.Z instruction to the active function.
|
|
func (c *Context) VRNDSCALESS_SAE_Z(i, x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VRNDSCALESS_SAE_Z(i, x, x1, k, x2))
|
|
}
|
|
|
|
// VRNDSCALESS_SAE_Z: Round Scalar Single-Precision Floating-Point Value To Include A Given Number Of Fraction Bits (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRNDSCALESS.SAE.Z imm8 xmm xmm k xmm
|
|
//
|
|
// Construct and append a VRNDSCALESS.SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRNDSCALESS_SAE_Z(i, x, x1, k, x2 operand.Op) { ctx.VRNDSCALESS_SAE_Z(i, x, x1, k, x2) }
|
|
|
|
// VRNDSCALESS_Z: Round Scalar Single-Precision Floating-Point Value To Include A Given Number Of Fraction Bits (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRNDSCALESS.Z imm8 m32 xmm k xmm
|
|
// VRNDSCALESS.Z imm8 xmm xmm k xmm
|
|
//
|
|
// Construct and append a VRNDSCALESS.Z instruction to the active function.
|
|
func (c *Context) VRNDSCALESS_Z(i, mx, x, k, x1 operand.Op) {
|
|
c.addinstruction(x86.VRNDSCALESS_Z(i, mx, x, k, x1))
|
|
}
|
|
|
|
// VRNDSCALESS_Z: Round Scalar Single-Precision Floating-Point Value To Include A Given Number Of Fraction Bits (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRNDSCALESS.Z imm8 m32 xmm k xmm
|
|
// VRNDSCALESS.Z imm8 xmm xmm k xmm
|
|
//
|
|
// Construct and append a VRNDSCALESS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRNDSCALESS_Z(i, mx, x, k, x1 operand.Op) { ctx.VRNDSCALESS_Z(i, mx, x, k, x1) }
|
|
|
|
// VROUNDPD: Round Packed Double Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VROUNDPD imm8 m128 xmm
|
|
// VROUNDPD imm8 m256 ymm
|
|
// VROUNDPD imm8 xmm xmm
|
|
// VROUNDPD imm8 ymm ymm
|
|
//
|
|
// Construct and append a VROUNDPD instruction to the active function.
|
|
func (c *Context) VROUNDPD(i, mxy, xy operand.Op) {
|
|
c.addinstruction(x86.VROUNDPD(i, mxy, xy))
|
|
}
|
|
|
|
// VROUNDPD: Round Packed Double Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VROUNDPD imm8 m128 xmm
|
|
// VROUNDPD imm8 m256 ymm
|
|
// VROUNDPD imm8 xmm xmm
|
|
// VROUNDPD imm8 ymm ymm
|
|
//
|
|
// Construct and append a VROUNDPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VROUNDPD(i, mxy, xy operand.Op) { ctx.VROUNDPD(i, mxy, xy) }
|
|
|
|
// VROUNDPS: Round Packed Single Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VROUNDPS imm8 m128 xmm
|
|
// VROUNDPS imm8 m256 ymm
|
|
// VROUNDPS imm8 xmm xmm
|
|
// VROUNDPS imm8 ymm ymm
|
|
//
|
|
// Construct and append a VROUNDPS instruction to the active function.
|
|
func (c *Context) VROUNDPS(i, mxy, xy operand.Op) {
|
|
c.addinstruction(x86.VROUNDPS(i, mxy, xy))
|
|
}
|
|
|
|
// VROUNDPS: Round Packed Single Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VROUNDPS imm8 m128 xmm
|
|
// VROUNDPS imm8 m256 ymm
|
|
// VROUNDPS imm8 xmm xmm
|
|
// VROUNDPS imm8 ymm ymm
|
|
//
|
|
// Construct and append a VROUNDPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VROUNDPS(i, mxy, xy operand.Op) { ctx.VROUNDPS(i, mxy, xy) }
|
|
|
|
// VROUNDSD: Round Scalar Double Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VROUNDSD imm8 m64 xmm xmm
|
|
// VROUNDSD imm8 xmm xmm xmm
|
|
//
|
|
// Construct and append a VROUNDSD instruction to the active function.
|
|
func (c *Context) VROUNDSD(i, mx, x, x1 operand.Op) {
|
|
c.addinstruction(x86.VROUNDSD(i, mx, x, x1))
|
|
}
|
|
|
|
// VROUNDSD: Round Scalar Double Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VROUNDSD imm8 m64 xmm xmm
|
|
// VROUNDSD imm8 xmm xmm xmm
|
|
//
|
|
// Construct and append a VROUNDSD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VROUNDSD(i, mx, x, x1 operand.Op) { ctx.VROUNDSD(i, mx, x, x1) }
|
|
|
|
// VROUNDSS: Round Scalar Single Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VROUNDSS imm8 m32 xmm xmm
|
|
// VROUNDSS imm8 xmm xmm xmm
|
|
//
|
|
// Construct and append a VROUNDSS instruction to the active function.
|
|
func (c *Context) VROUNDSS(i, mx, x, x1 operand.Op) {
|
|
c.addinstruction(x86.VROUNDSS(i, mx, x, x1))
|
|
}
|
|
|
|
// VROUNDSS: Round Scalar Single Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VROUNDSS imm8 m32 xmm xmm
|
|
// VROUNDSS imm8 xmm xmm xmm
|
|
//
|
|
// Construct and append a VROUNDSS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VROUNDSS(i, mx, x, x1 operand.Op) { ctx.VROUNDSS(i, mx, x, x1) }
|
|
|
|
// VRSQRT14PD: Compute Approximate Reciprocals of Square Roots of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRSQRT14PD m128 k xmm
|
|
// VRSQRT14PD m128 xmm
|
|
// VRSQRT14PD m256 k ymm
|
|
// VRSQRT14PD m256 ymm
|
|
// VRSQRT14PD xmm k xmm
|
|
// VRSQRT14PD xmm xmm
|
|
// VRSQRT14PD ymm k ymm
|
|
// VRSQRT14PD ymm ymm
|
|
// VRSQRT14PD m512 k zmm
|
|
// VRSQRT14PD m512 zmm
|
|
// VRSQRT14PD zmm k zmm
|
|
// VRSQRT14PD zmm zmm
|
|
//
|
|
// Construct and append a VRSQRT14PD instruction to the active function.
|
|
func (c *Context) VRSQRT14PD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VRSQRT14PD(ops...))
|
|
}
|
|
|
|
// VRSQRT14PD: Compute Approximate Reciprocals of Square Roots of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRSQRT14PD m128 k xmm
|
|
// VRSQRT14PD m128 xmm
|
|
// VRSQRT14PD m256 k ymm
|
|
// VRSQRT14PD m256 ymm
|
|
// VRSQRT14PD xmm k xmm
|
|
// VRSQRT14PD xmm xmm
|
|
// VRSQRT14PD ymm k ymm
|
|
// VRSQRT14PD ymm ymm
|
|
// VRSQRT14PD m512 k zmm
|
|
// VRSQRT14PD m512 zmm
|
|
// VRSQRT14PD zmm k zmm
|
|
// VRSQRT14PD zmm zmm
|
|
//
|
|
// Construct and append a VRSQRT14PD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRSQRT14PD(ops ...operand.Op) { ctx.VRSQRT14PD(ops...) }
|
|
|
|
// VRSQRT14PD_BCST: Compute Approximate Reciprocals of Square Roots of Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRSQRT14PD.BCST m64 k xmm
|
|
// VRSQRT14PD.BCST m64 k ymm
|
|
// VRSQRT14PD.BCST m64 xmm
|
|
// VRSQRT14PD.BCST m64 ymm
|
|
// VRSQRT14PD.BCST m64 k zmm
|
|
// VRSQRT14PD.BCST m64 zmm
|
|
//
|
|
// Construct and append a VRSQRT14PD.BCST instruction to the active function.
|
|
func (c *Context) VRSQRT14PD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VRSQRT14PD_BCST(ops...))
|
|
}
|
|
|
|
// VRSQRT14PD_BCST: Compute Approximate Reciprocals of Square Roots of Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRSQRT14PD.BCST m64 k xmm
|
|
// VRSQRT14PD.BCST m64 k ymm
|
|
// VRSQRT14PD.BCST m64 xmm
|
|
// VRSQRT14PD.BCST m64 ymm
|
|
// VRSQRT14PD.BCST m64 k zmm
|
|
// VRSQRT14PD.BCST m64 zmm
|
|
//
|
|
// Construct and append a VRSQRT14PD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRSQRT14PD_BCST(ops ...operand.Op) { ctx.VRSQRT14PD_BCST(ops...) }
|
|
|
|
// VRSQRT14PD_BCST_Z: Compute Approximate Reciprocals of Square Roots of Packed Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRSQRT14PD.BCST.Z m64 k xmm
|
|
// VRSQRT14PD.BCST.Z m64 k ymm
|
|
// VRSQRT14PD.BCST.Z m64 k zmm
|
|
//
|
|
// Construct and append a VRSQRT14PD.BCST.Z instruction to the active function.
|
|
func (c *Context) VRSQRT14PD_BCST_Z(m, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VRSQRT14PD_BCST_Z(m, k, xyz))
|
|
}
|
|
|
|
// VRSQRT14PD_BCST_Z: Compute Approximate Reciprocals of Square Roots of Packed Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRSQRT14PD.BCST.Z m64 k xmm
|
|
// VRSQRT14PD.BCST.Z m64 k ymm
|
|
// VRSQRT14PD.BCST.Z m64 k zmm
|
|
//
|
|
// Construct and append a VRSQRT14PD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRSQRT14PD_BCST_Z(m, k, xyz operand.Op) { ctx.VRSQRT14PD_BCST_Z(m, k, xyz) }
|
|
|
|
// VRSQRT14PD_Z: Compute Approximate Reciprocals of Square Roots of Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRSQRT14PD.Z m128 k xmm
|
|
// VRSQRT14PD.Z m256 k ymm
|
|
// VRSQRT14PD.Z xmm k xmm
|
|
// VRSQRT14PD.Z ymm k ymm
|
|
// VRSQRT14PD.Z m512 k zmm
|
|
// VRSQRT14PD.Z zmm k zmm
|
|
//
|
|
// Construct and append a VRSQRT14PD.Z instruction to the active function.
|
|
func (c *Context) VRSQRT14PD_Z(mxyz, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VRSQRT14PD_Z(mxyz, k, xyz))
|
|
}
|
|
|
|
// VRSQRT14PD_Z: Compute Approximate Reciprocals of Square Roots of Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRSQRT14PD.Z m128 k xmm
|
|
// VRSQRT14PD.Z m256 k ymm
|
|
// VRSQRT14PD.Z xmm k xmm
|
|
// VRSQRT14PD.Z ymm k ymm
|
|
// VRSQRT14PD.Z m512 k zmm
|
|
// VRSQRT14PD.Z zmm k zmm
|
|
//
|
|
// Construct and append a VRSQRT14PD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRSQRT14PD_Z(mxyz, k, xyz operand.Op) { ctx.VRSQRT14PD_Z(mxyz, k, xyz) }
|
|
|
|
// VRSQRT14PS: Compute Approximate Reciprocals of Square Roots of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRSQRT14PS m128 k xmm
|
|
// VRSQRT14PS m128 xmm
|
|
// VRSQRT14PS m256 k ymm
|
|
// VRSQRT14PS m256 ymm
|
|
// VRSQRT14PS xmm k xmm
|
|
// VRSQRT14PS xmm xmm
|
|
// VRSQRT14PS ymm k ymm
|
|
// VRSQRT14PS ymm ymm
|
|
// VRSQRT14PS m512 k zmm
|
|
// VRSQRT14PS m512 zmm
|
|
// VRSQRT14PS zmm k zmm
|
|
// VRSQRT14PS zmm zmm
|
|
//
|
|
// Construct and append a VRSQRT14PS instruction to the active function.
|
|
func (c *Context) VRSQRT14PS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VRSQRT14PS(ops...))
|
|
}
|
|
|
|
// VRSQRT14PS: Compute Approximate Reciprocals of Square Roots of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRSQRT14PS m128 k xmm
|
|
// VRSQRT14PS m128 xmm
|
|
// VRSQRT14PS m256 k ymm
|
|
// VRSQRT14PS m256 ymm
|
|
// VRSQRT14PS xmm k xmm
|
|
// VRSQRT14PS xmm xmm
|
|
// VRSQRT14PS ymm k ymm
|
|
// VRSQRT14PS ymm ymm
|
|
// VRSQRT14PS m512 k zmm
|
|
// VRSQRT14PS m512 zmm
|
|
// VRSQRT14PS zmm k zmm
|
|
// VRSQRT14PS zmm zmm
|
|
//
|
|
// Construct and append a VRSQRT14PS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRSQRT14PS(ops ...operand.Op) { ctx.VRSQRT14PS(ops...) }
|
|
|
|
// VRSQRT14PS_BCST: Compute Approximate Reciprocals of Square Roots of Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRSQRT14PS.BCST m32 k xmm
|
|
// VRSQRT14PS.BCST m32 k ymm
|
|
// VRSQRT14PS.BCST m32 xmm
|
|
// VRSQRT14PS.BCST m32 ymm
|
|
// VRSQRT14PS.BCST m32 k zmm
|
|
// VRSQRT14PS.BCST m32 zmm
|
|
//
|
|
// Construct and append a VRSQRT14PS.BCST instruction to the active function.
|
|
func (c *Context) VRSQRT14PS_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VRSQRT14PS_BCST(ops...))
|
|
}
|
|
|
|
// VRSQRT14PS_BCST: Compute Approximate Reciprocals of Square Roots of Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRSQRT14PS.BCST m32 k xmm
|
|
// VRSQRT14PS.BCST m32 k ymm
|
|
// VRSQRT14PS.BCST m32 xmm
|
|
// VRSQRT14PS.BCST m32 ymm
|
|
// VRSQRT14PS.BCST m32 k zmm
|
|
// VRSQRT14PS.BCST m32 zmm
|
|
//
|
|
// Construct and append a VRSQRT14PS.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRSQRT14PS_BCST(ops ...operand.Op) { ctx.VRSQRT14PS_BCST(ops...) }
|
|
|
|
// VRSQRT14PS_BCST_Z: Compute Approximate Reciprocals of Square Roots of Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRSQRT14PS.BCST.Z m32 k xmm
|
|
// VRSQRT14PS.BCST.Z m32 k ymm
|
|
// VRSQRT14PS.BCST.Z m32 k zmm
|
|
//
|
|
// Construct and append a VRSQRT14PS.BCST.Z instruction to the active function.
|
|
func (c *Context) VRSQRT14PS_BCST_Z(m, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VRSQRT14PS_BCST_Z(m, k, xyz))
|
|
}
|
|
|
|
// VRSQRT14PS_BCST_Z: Compute Approximate Reciprocals of Square Roots of Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRSQRT14PS.BCST.Z m32 k xmm
|
|
// VRSQRT14PS.BCST.Z m32 k ymm
|
|
// VRSQRT14PS.BCST.Z m32 k zmm
|
|
//
|
|
// Construct and append a VRSQRT14PS.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRSQRT14PS_BCST_Z(m, k, xyz operand.Op) { ctx.VRSQRT14PS_BCST_Z(m, k, xyz) }
|
|
|
|
// VRSQRT14PS_Z: Compute Approximate Reciprocals of Square Roots of Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRSQRT14PS.Z m128 k xmm
|
|
// VRSQRT14PS.Z m256 k ymm
|
|
// VRSQRT14PS.Z xmm k xmm
|
|
// VRSQRT14PS.Z ymm k ymm
|
|
// VRSQRT14PS.Z m512 k zmm
|
|
// VRSQRT14PS.Z zmm k zmm
|
|
//
|
|
// Construct and append a VRSQRT14PS.Z instruction to the active function.
|
|
func (c *Context) VRSQRT14PS_Z(mxyz, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VRSQRT14PS_Z(mxyz, k, xyz))
|
|
}
|
|
|
|
// VRSQRT14PS_Z: Compute Approximate Reciprocals of Square Roots of Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRSQRT14PS.Z m128 k xmm
|
|
// VRSQRT14PS.Z m256 k ymm
|
|
// VRSQRT14PS.Z xmm k xmm
|
|
// VRSQRT14PS.Z ymm k ymm
|
|
// VRSQRT14PS.Z m512 k zmm
|
|
// VRSQRT14PS.Z zmm k zmm
|
|
//
|
|
// Construct and append a VRSQRT14PS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRSQRT14PS_Z(mxyz, k, xyz operand.Op) { ctx.VRSQRT14PS_Z(mxyz, k, xyz) }
|
|
|
|
// VRSQRT14SD: Compute Approximate Reciprocal of a Square Root of a Scalar Double-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRSQRT14SD m64 xmm k xmm
|
|
// VRSQRT14SD m64 xmm xmm
|
|
// VRSQRT14SD xmm xmm k xmm
|
|
// VRSQRT14SD xmm xmm xmm
|
|
//
|
|
// Construct and append a VRSQRT14SD instruction to the active function.
|
|
func (c *Context) VRSQRT14SD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VRSQRT14SD(ops...))
|
|
}
|
|
|
|
// VRSQRT14SD: Compute Approximate Reciprocal of a Square Root of a Scalar Double-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRSQRT14SD m64 xmm k xmm
|
|
// VRSQRT14SD m64 xmm xmm
|
|
// VRSQRT14SD xmm xmm k xmm
|
|
// VRSQRT14SD xmm xmm xmm
|
|
//
|
|
// Construct and append a VRSQRT14SD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRSQRT14SD(ops ...operand.Op) { ctx.VRSQRT14SD(ops...) }
|
|
|
|
// VRSQRT14SD_Z: Compute Approximate Reciprocal of a Square Root of a Scalar Double-Precision Floating-Point Value (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRSQRT14SD.Z m64 xmm k xmm
|
|
// VRSQRT14SD.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VRSQRT14SD.Z instruction to the active function.
|
|
func (c *Context) VRSQRT14SD_Z(mx, x, k, x1 operand.Op) {
|
|
c.addinstruction(x86.VRSQRT14SD_Z(mx, x, k, x1))
|
|
}
|
|
|
|
// VRSQRT14SD_Z: Compute Approximate Reciprocal of a Square Root of a Scalar Double-Precision Floating-Point Value (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRSQRT14SD.Z m64 xmm k xmm
|
|
// VRSQRT14SD.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VRSQRT14SD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRSQRT14SD_Z(mx, x, k, x1 operand.Op) { ctx.VRSQRT14SD_Z(mx, x, k, x1) }
|
|
|
|
// VRSQRT14SS: Compute Approximate Reciprocal of a Square Root of a Scalar Single-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRSQRT14SS m32 xmm k xmm
|
|
// VRSQRT14SS m32 xmm xmm
|
|
// VRSQRT14SS xmm xmm k xmm
|
|
// VRSQRT14SS xmm xmm xmm
|
|
//
|
|
// Construct and append a VRSQRT14SS instruction to the active function.
|
|
func (c *Context) VRSQRT14SS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VRSQRT14SS(ops...))
|
|
}
|
|
|
|
// VRSQRT14SS: Compute Approximate Reciprocal of a Square Root of a Scalar Single-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRSQRT14SS m32 xmm k xmm
|
|
// VRSQRT14SS m32 xmm xmm
|
|
// VRSQRT14SS xmm xmm k xmm
|
|
// VRSQRT14SS xmm xmm xmm
|
|
//
|
|
// Construct and append a VRSQRT14SS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRSQRT14SS(ops ...operand.Op) { ctx.VRSQRT14SS(ops...) }
|
|
|
|
// VRSQRT14SS_Z: Compute Approximate Reciprocal of a Square Root of a Scalar Single-Precision Floating-Point Value (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRSQRT14SS.Z m32 xmm k xmm
|
|
// VRSQRT14SS.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VRSQRT14SS.Z instruction to the active function.
|
|
func (c *Context) VRSQRT14SS_Z(mx, x, k, x1 operand.Op) {
|
|
c.addinstruction(x86.VRSQRT14SS_Z(mx, x, k, x1))
|
|
}
|
|
|
|
// VRSQRT14SS_Z: Compute Approximate Reciprocal of a Square Root of a Scalar Single-Precision Floating-Point Value (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRSQRT14SS.Z m32 xmm k xmm
|
|
// VRSQRT14SS.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VRSQRT14SS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRSQRT14SS_Z(mx, x, k, x1 operand.Op) { ctx.VRSQRT14SS_Z(mx, x, k, x1) }
|
|
|
|
// VRSQRT28PD: Approximation to the Reciprocal Square Root of Packed Double-Precision Floating-Point Values with Less Than 2^-28 Relative Error.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRSQRT28PD m512 k zmm
|
|
// VRSQRT28PD m512 zmm
|
|
// VRSQRT28PD zmm k zmm
|
|
// VRSQRT28PD zmm zmm
|
|
//
|
|
// Construct and append a VRSQRT28PD instruction to the active function.
|
|
func (c *Context) VRSQRT28PD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VRSQRT28PD(ops...))
|
|
}
|
|
|
|
// VRSQRT28PD: Approximation to the Reciprocal Square Root of Packed Double-Precision Floating-Point Values with Less Than 2^-28 Relative Error.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRSQRT28PD m512 k zmm
|
|
// VRSQRT28PD m512 zmm
|
|
// VRSQRT28PD zmm k zmm
|
|
// VRSQRT28PD zmm zmm
|
|
//
|
|
// Construct and append a VRSQRT28PD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRSQRT28PD(ops ...operand.Op) { ctx.VRSQRT28PD(ops...) }
|
|
|
|
// VRSQRT28PD_BCST: Approximation to the Reciprocal Square Root of Packed Double-Precision Floating-Point Values with Less Than 2^-28 Relative Error (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRSQRT28PD.BCST m64 k zmm
|
|
// VRSQRT28PD.BCST m64 zmm
|
|
//
|
|
// Construct and append a VRSQRT28PD.BCST instruction to the active function.
|
|
func (c *Context) VRSQRT28PD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VRSQRT28PD_BCST(ops...))
|
|
}
|
|
|
|
// VRSQRT28PD_BCST: Approximation to the Reciprocal Square Root of Packed Double-Precision Floating-Point Values with Less Than 2^-28 Relative Error (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRSQRT28PD.BCST m64 k zmm
|
|
// VRSQRT28PD.BCST m64 zmm
|
|
//
|
|
// Construct and append a VRSQRT28PD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRSQRT28PD_BCST(ops ...operand.Op) { ctx.VRSQRT28PD_BCST(ops...) }
|
|
|
|
// VRSQRT28PD_BCST_Z: Approximation to the Reciprocal Square Root of Packed Double-Precision Floating-Point Values with Less Than 2^-28 Relative Error (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRSQRT28PD.BCST.Z m64 k zmm
|
|
//
|
|
// Construct and append a VRSQRT28PD.BCST.Z instruction to the active function.
|
|
func (c *Context) VRSQRT28PD_BCST_Z(m, k, z operand.Op) {
|
|
c.addinstruction(x86.VRSQRT28PD_BCST_Z(m, k, z))
|
|
}
|
|
|
|
// VRSQRT28PD_BCST_Z: Approximation to the Reciprocal Square Root of Packed Double-Precision Floating-Point Values with Less Than 2^-28 Relative Error (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRSQRT28PD.BCST.Z m64 k zmm
|
|
//
|
|
// Construct and append a VRSQRT28PD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRSQRT28PD_BCST_Z(m, k, z operand.Op) { ctx.VRSQRT28PD_BCST_Z(m, k, z) }
|
|
|
|
// VRSQRT28PD_SAE: Approximation to the Reciprocal Square Root of Packed Double-Precision Floating-Point Values with Less Than 2^-28 Relative Error (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRSQRT28PD.SAE zmm k zmm
|
|
// VRSQRT28PD.SAE zmm zmm
|
|
//
|
|
// Construct and append a VRSQRT28PD.SAE instruction to the active function.
|
|
func (c *Context) VRSQRT28PD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VRSQRT28PD_SAE(ops...))
|
|
}
|
|
|
|
// VRSQRT28PD_SAE: Approximation to the Reciprocal Square Root of Packed Double-Precision Floating-Point Values with Less Than 2^-28 Relative Error (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRSQRT28PD.SAE zmm k zmm
|
|
// VRSQRT28PD.SAE zmm zmm
|
|
//
|
|
// Construct and append a VRSQRT28PD.SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRSQRT28PD_SAE(ops ...operand.Op) { ctx.VRSQRT28PD_SAE(ops...) }
|
|
|
|
// VRSQRT28PD_SAE_Z: Approximation to the Reciprocal Square Root of Packed Double-Precision Floating-Point Values with Less Than 2^-28 Relative Error (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRSQRT28PD.SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VRSQRT28PD.SAE.Z instruction to the active function.
|
|
func (c *Context) VRSQRT28PD_SAE_Z(z, k, z1 operand.Op) {
|
|
c.addinstruction(x86.VRSQRT28PD_SAE_Z(z, k, z1))
|
|
}
|
|
|
|
// VRSQRT28PD_SAE_Z: Approximation to the Reciprocal Square Root of Packed Double-Precision Floating-Point Values with Less Than 2^-28 Relative Error (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRSQRT28PD.SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VRSQRT28PD.SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRSQRT28PD_SAE_Z(z, k, z1 operand.Op) { ctx.VRSQRT28PD_SAE_Z(z, k, z1) }
|
|
|
|
// VRSQRT28PD_Z: Approximation to the Reciprocal Square Root of Packed Double-Precision Floating-Point Values with Less Than 2^-28 Relative Error (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRSQRT28PD.Z m512 k zmm
|
|
// VRSQRT28PD.Z zmm k zmm
|
|
//
|
|
// Construct and append a VRSQRT28PD.Z instruction to the active function.
|
|
func (c *Context) VRSQRT28PD_Z(mz, k, z operand.Op) {
|
|
c.addinstruction(x86.VRSQRT28PD_Z(mz, k, z))
|
|
}
|
|
|
|
// VRSQRT28PD_Z: Approximation to the Reciprocal Square Root of Packed Double-Precision Floating-Point Values with Less Than 2^-28 Relative Error (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRSQRT28PD.Z m512 k zmm
|
|
// VRSQRT28PD.Z zmm k zmm
|
|
//
|
|
// Construct and append a VRSQRT28PD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRSQRT28PD_Z(mz, k, z operand.Op) { ctx.VRSQRT28PD_Z(mz, k, z) }
|
|
|
|
// VRSQRT28PS: Approximation to the Reciprocal Square Root of Packed Single-Precision Floating-Point Values with Less Than 2^-28 Relative Error.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRSQRT28PS m512 k zmm
|
|
// VRSQRT28PS m512 zmm
|
|
// VRSQRT28PS zmm k zmm
|
|
// VRSQRT28PS zmm zmm
|
|
//
|
|
// Construct and append a VRSQRT28PS instruction to the active function.
|
|
func (c *Context) VRSQRT28PS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VRSQRT28PS(ops...))
|
|
}
|
|
|
|
// VRSQRT28PS: Approximation to the Reciprocal Square Root of Packed Single-Precision Floating-Point Values with Less Than 2^-28 Relative Error.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRSQRT28PS m512 k zmm
|
|
// VRSQRT28PS m512 zmm
|
|
// VRSQRT28PS zmm k zmm
|
|
// VRSQRT28PS zmm zmm
|
|
//
|
|
// Construct and append a VRSQRT28PS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRSQRT28PS(ops ...operand.Op) { ctx.VRSQRT28PS(ops...) }
|
|
|
|
// VRSQRT28PS_BCST: Approximation to the Reciprocal Square Root of Packed Single-Precision Floating-Point Values with Less Than 2^-28 Relative Error (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRSQRT28PS.BCST m32 k zmm
|
|
// VRSQRT28PS.BCST m32 zmm
|
|
//
|
|
// Construct and append a VRSQRT28PS.BCST instruction to the active function.
|
|
func (c *Context) VRSQRT28PS_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VRSQRT28PS_BCST(ops...))
|
|
}
|
|
|
|
// VRSQRT28PS_BCST: Approximation to the Reciprocal Square Root of Packed Single-Precision Floating-Point Values with Less Than 2^-28 Relative Error (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRSQRT28PS.BCST m32 k zmm
|
|
// VRSQRT28PS.BCST m32 zmm
|
|
//
|
|
// Construct and append a VRSQRT28PS.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRSQRT28PS_BCST(ops ...operand.Op) { ctx.VRSQRT28PS_BCST(ops...) }
|
|
|
|
// VRSQRT28PS_BCST_Z: Approximation to the Reciprocal Square Root of Packed Single-Precision Floating-Point Values with Less Than 2^-28 Relative Error (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRSQRT28PS.BCST.Z m32 k zmm
|
|
//
|
|
// Construct and append a VRSQRT28PS.BCST.Z instruction to the active function.
|
|
func (c *Context) VRSQRT28PS_BCST_Z(m, k, z operand.Op) {
|
|
c.addinstruction(x86.VRSQRT28PS_BCST_Z(m, k, z))
|
|
}
|
|
|
|
// VRSQRT28PS_BCST_Z: Approximation to the Reciprocal Square Root of Packed Single-Precision Floating-Point Values with Less Than 2^-28 Relative Error (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRSQRT28PS.BCST.Z m32 k zmm
|
|
//
|
|
// Construct and append a VRSQRT28PS.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRSQRT28PS_BCST_Z(m, k, z operand.Op) { ctx.VRSQRT28PS_BCST_Z(m, k, z) }
|
|
|
|
// VRSQRT28PS_SAE: Approximation to the Reciprocal Square Root of Packed Single-Precision Floating-Point Values with Less Than 2^-28 Relative Error (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRSQRT28PS.SAE zmm k zmm
|
|
// VRSQRT28PS.SAE zmm zmm
|
|
//
|
|
// Construct and append a VRSQRT28PS.SAE instruction to the active function.
|
|
func (c *Context) VRSQRT28PS_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VRSQRT28PS_SAE(ops...))
|
|
}
|
|
|
|
// VRSQRT28PS_SAE: Approximation to the Reciprocal Square Root of Packed Single-Precision Floating-Point Values with Less Than 2^-28 Relative Error (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRSQRT28PS.SAE zmm k zmm
|
|
// VRSQRT28PS.SAE zmm zmm
|
|
//
|
|
// Construct and append a VRSQRT28PS.SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRSQRT28PS_SAE(ops ...operand.Op) { ctx.VRSQRT28PS_SAE(ops...) }
|
|
|
|
// VRSQRT28PS_SAE_Z: Approximation to the Reciprocal Square Root of Packed Single-Precision Floating-Point Values with Less Than 2^-28 Relative Error (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRSQRT28PS.SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VRSQRT28PS.SAE.Z instruction to the active function.
|
|
func (c *Context) VRSQRT28PS_SAE_Z(z, k, z1 operand.Op) {
|
|
c.addinstruction(x86.VRSQRT28PS_SAE_Z(z, k, z1))
|
|
}
|
|
|
|
// VRSQRT28PS_SAE_Z: Approximation to the Reciprocal Square Root of Packed Single-Precision Floating-Point Values with Less Than 2^-28 Relative Error (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRSQRT28PS.SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VRSQRT28PS.SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRSQRT28PS_SAE_Z(z, k, z1 operand.Op) { ctx.VRSQRT28PS_SAE_Z(z, k, z1) }
|
|
|
|
// VRSQRT28PS_Z: Approximation to the Reciprocal Square Root of Packed Single-Precision Floating-Point Values with Less Than 2^-28 Relative Error (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRSQRT28PS.Z m512 k zmm
|
|
// VRSQRT28PS.Z zmm k zmm
|
|
//
|
|
// Construct and append a VRSQRT28PS.Z instruction to the active function.
|
|
func (c *Context) VRSQRT28PS_Z(mz, k, z operand.Op) {
|
|
c.addinstruction(x86.VRSQRT28PS_Z(mz, k, z))
|
|
}
|
|
|
|
// VRSQRT28PS_Z: Approximation to the Reciprocal Square Root of Packed Single-Precision Floating-Point Values with Less Than 2^-28 Relative Error (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRSQRT28PS.Z m512 k zmm
|
|
// VRSQRT28PS.Z zmm k zmm
|
|
//
|
|
// Construct and append a VRSQRT28PS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRSQRT28PS_Z(mz, k, z operand.Op) { ctx.VRSQRT28PS_Z(mz, k, z) }
|
|
|
|
// VRSQRT28SD: Approximation to the Reciprocal Square Root of a Scalar Double-Precision Floating-Point Value with Less Than 2^-28 Relative Error.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRSQRT28SD m64 xmm k xmm
|
|
// VRSQRT28SD m64 xmm xmm
|
|
// VRSQRT28SD xmm xmm k xmm
|
|
// VRSQRT28SD xmm xmm xmm
|
|
//
|
|
// Construct and append a VRSQRT28SD instruction to the active function.
|
|
func (c *Context) VRSQRT28SD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VRSQRT28SD(ops...))
|
|
}
|
|
|
|
// VRSQRT28SD: Approximation to the Reciprocal Square Root of a Scalar Double-Precision Floating-Point Value with Less Than 2^-28 Relative Error.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRSQRT28SD m64 xmm k xmm
|
|
// VRSQRT28SD m64 xmm xmm
|
|
// VRSQRT28SD xmm xmm k xmm
|
|
// VRSQRT28SD xmm xmm xmm
|
|
//
|
|
// Construct and append a VRSQRT28SD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRSQRT28SD(ops ...operand.Op) { ctx.VRSQRT28SD(ops...) }
|
|
|
|
// VRSQRT28SD_SAE: Approximation to the Reciprocal Square Root of a Scalar Double-Precision Floating-Point Value with Less Than 2^-28 Relative Error (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRSQRT28SD.SAE xmm xmm k xmm
|
|
// VRSQRT28SD.SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VRSQRT28SD.SAE instruction to the active function.
|
|
func (c *Context) VRSQRT28SD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VRSQRT28SD_SAE(ops...))
|
|
}
|
|
|
|
// VRSQRT28SD_SAE: Approximation to the Reciprocal Square Root of a Scalar Double-Precision Floating-Point Value with Less Than 2^-28 Relative Error (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRSQRT28SD.SAE xmm xmm k xmm
|
|
// VRSQRT28SD.SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VRSQRT28SD.SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRSQRT28SD_SAE(ops ...operand.Op) { ctx.VRSQRT28SD_SAE(ops...) }
|
|
|
|
// VRSQRT28SD_SAE_Z: Approximation to the Reciprocal Square Root of a Scalar Double-Precision Floating-Point Value with Less Than 2^-28 Relative Error (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRSQRT28SD.SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VRSQRT28SD.SAE.Z instruction to the active function.
|
|
func (c *Context) VRSQRT28SD_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VRSQRT28SD_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VRSQRT28SD_SAE_Z: Approximation to the Reciprocal Square Root of a Scalar Double-Precision Floating-Point Value with Less Than 2^-28 Relative Error (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRSQRT28SD.SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VRSQRT28SD.SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRSQRT28SD_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VRSQRT28SD_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VRSQRT28SD_Z: Approximation to the Reciprocal Square Root of a Scalar Double-Precision Floating-Point Value with Less Than 2^-28 Relative Error (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRSQRT28SD.Z m64 xmm k xmm
|
|
// VRSQRT28SD.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VRSQRT28SD.Z instruction to the active function.
|
|
func (c *Context) VRSQRT28SD_Z(mx, x, k, x1 operand.Op) {
|
|
c.addinstruction(x86.VRSQRT28SD_Z(mx, x, k, x1))
|
|
}
|
|
|
|
// VRSQRT28SD_Z: Approximation to the Reciprocal Square Root of a Scalar Double-Precision Floating-Point Value with Less Than 2^-28 Relative Error (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRSQRT28SD.Z m64 xmm k xmm
|
|
// VRSQRT28SD.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VRSQRT28SD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRSQRT28SD_Z(mx, x, k, x1 operand.Op) { ctx.VRSQRT28SD_Z(mx, x, k, x1) }
|
|
|
|
// VRSQRT28SS: Approximation to the Reciprocal Square Root of a Scalar Single-Precision Floating-Point Value with Less Than 2^-28 Relative Error.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRSQRT28SS m32 xmm k xmm
|
|
// VRSQRT28SS m32 xmm xmm
|
|
// VRSQRT28SS xmm xmm k xmm
|
|
// VRSQRT28SS xmm xmm xmm
|
|
//
|
|
// Construct and append a VRSQRT28SS instruction to the active function.
|
|
func (c *Context) VRSQRT28SS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VRSQRT28SS(ops...))
|
|
}
|
|
|
|
// VRSQRT28SS: Approximation to the Reciprocal Square Root of a Scalar Single-Precision Floating-Point Value with Less Than 2^-28 Relative Error.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRSQRT28SS m32 xmm k xmm
|
|
// VRSQRT28SS m32 xmm xmm
|
|
// VRSQRT28SS xmm xmm k xmm
|
|
// VRSQRT28SS xmm xmm xmm
|
|
//
|
|
// Construct and append a VRSQRT28SS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRSQRT28SS(ops ...operand.Op) { ctx.VRSQRT28SS(ops...) }
|
|
|
|
// VRSQRT28SS_SAE: Approximation to the Reciprocal Square Root of a Scalar Single-Precision Floating-Point Value with Less Than 2^-28 Relative Error (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRSQRT28SS.SAE xmm xmm k xmm
|
|
// VRSQRT28SS.SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VRSQRT28SS.SAE instruction to the active function.
|
|
func (c *Context) VRSQRT28SS_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VRSQRT28SS_SAE(ops...))
|
|
}
|
|
|
|
// VRSQRT28SS_SAE: Approximation to the Reciprocal Square Root of a Scalar Single-Precision Floating-Point Value with Less Than 2^-28 Relative Error (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRSQRT28SS.SAE xmm xmm k xmm
|
|
// VRSQRT28SS.SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VRSQRT28SS.SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRSQRT28SS_SAE(ops ...operand.Op) { ctx.VRSQRT28SS_SAE(ops...) }
|
|
|
|
// VRSQRT28SS_SAE_Z: Approximation to the Reciprocal Square Root of a Scalar Single-Precision Floating-Point Value with Less Than 2^-28 Relative Error (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRSQRT28SS.SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VRSQRT28SS.SAE.Z instruction to the active function.
|
|
func (c *Context) VRSQRT28SS_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VRSQRT28SS_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VRSQRT28SS_SAE_Z: Approximation to the Reciprocal Square Root of a Scalar Single-Precision Floating-Point Value with Less Than 2^-28 Relative Error (Suppress All Exceptions, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRSQRT28SS.SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VRSQRT28SS.SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRSQRT28SS_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VRSQRT28SS_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VRSQRT28SS_Z: Approximation to the Reciprocal Square Root of a Scalar Single-Precision Floating-Point Value with Less Than 2^-28 Relative Error (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRSQRT28SS.Z m32 xmm k xmm
|
|
// VRSQRT28SS.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VRSQRT28SS.Z instruction to the active function.
|
|
func (c *Context) VRSQRT28SS_Z(mx, x, k, x1 operand.Op) {
|
|
c.addinstruction(x86.VRSQRT28SS_Z(mx, x, k, x1))
|
|
}
|
|
|
|
// VRSQRT28SS_Z: Approximation to the Reciprocal Square Root of a Scalar Single-Precision Floating-Point Value with Less Than 2^-28 Relative Error (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRSQRT28SS.Z m32 xmm k xmm
|
|
// VRSQRT28SS.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VRSQRT28SS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRSQRT28SS_Z(mx, x, k, x1 operand.Op) { ctx.VRSQRT28SS_Z(mx, x, k, x1) }
|
|
|
|
// VRSQRTPS: Compute Reciprocals of Square Roots of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRSQRTPS m128 xmm
|
|
// VRSQRTPS m256 ymm
|
|
// VRSQRTPS xmm xmm
|
|
// VRSQRTPS ymm ymm
|
|
//
|
|
// Construct and append a VRSQRTPS instruction to the active function.
|
|
func (c *Context) VRSQRTPS(mxy, xy operand.Op) {
|
|
c.addinstruction(x86.VRSQRTPS(mxy, xy))
|
|
}
|
|
|
|
// VRSQRTPS: Compute Reciprocals of Square Roots of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRSQRTPS m128 xmm
|
|
// VRSQRTPS m256 ymm
|
|
// VRSQRTPS xmm xmm
|
|
// VRSQRTPS ymm ymm
|
|
//
|
|
// Construct and append a VRSQRTPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRSQRTPS(mxy, xy operand.Op) { ctx.VRSQRTPS(mxy, xy) }
|
|
|
|
// VRSQRTSS: Compute Reciprocal of Square Root of Scalar Single-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRSQRTSS m32 xmm xmm
|
|
// VRSQRTSS xmm xmm xmm
|
|
//
|
|
// Construct and append a VRSQRTSS instruction to the active function.
|
|
func (c *Context) VRSQRTSS(mx, x, x1 operand.Op) {
|
|
c.addinstruction(x86.VRSQRTSS(mx, x, x1))
|
|
}
|
|
|
|
// VRSQRTSS: Compute Reciprocal of Square Root of Scalar Single-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VRSQRTSS m32 xmm xmm
|
|
// VRSQRTSS xmm xmm xmm
|
|
//
|
|
// Construct and append a VRSQRTSS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VRSQRTSS(mx, x, x1 operand.Op) { ctx.VRSQRTSS(mx, x, x1) }
|
|
|
|
// VSCALEFPD: Scale Packed Double-Precision Floating-Point Values With Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFPD m128 xmm k xmm
|
|
// VSCALEFPD m128 xmm xmm
|
|
// VSCALEFPD m256 ymm k ymm
|
|
// VSCALEFPD m256 ymm ymm
|
|
// VSCALEFPD xmm xmm k xmm
|
|
// VSCALEFPD xmm xmm xmm
|
|
// VSCALEFPD ymm ymm k ymm
|
|
// VSCALEFPD ymm ymm ymm
|
|
// VSCALEFPD m512 zmm k zmm
|
|
// VSCALEFPD m512 zmm zmm
|
|
// VSCALEFPD zmm zmm k zmm
|
|
// VSCALEFPD zmm zmm zmm
|
|
//
|
|
// Construct and append a VSCALEFPD instruction to the active function.
|
|
func (c *Context) VSCALEFPD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VSCALEFPD(ops...))
|
|
}
|
|
|
|
// VSCALEFPD: Scale Packed Double-Precision Floating-Point Values With Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFPD m128 xmm k xmm
|
|
// VSCALEFPD m128 xmm xmm
|
|
// VSCALEFPD m256 ymm k ymm
|
|
// VSCALEFPD m256 ymm ymm
|
|
// VSCALEFPD xmm xmm k xmm
|
|
// VSCALEFPD xmm xmm xmm
|
|
// VSCALEFPD ymm ymm k ymm
|
|
// VSCALEFPD ymm ymm ymm
|
|
// VSCALEFPD m512 zmm k zmm
|
|
// VSCALEFPD m512 zmm zmm
|
|
// VSCALEFPD zmm zmm k zmm
|
|
// VSCALEFPD zmm zmm zmm
|
|
//
|
|
// Construct and append a VSCALEFPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSCALEFPD(ops ...operand.Op) { ctx.VSCALEFPD(ops...) }
|
|
|
|
// VSCALEFPD_BCST: Scale Packed Double-Precision Floating-Point Values With Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFPD.BCST m64 xmm k xmm
|
|
// VSCALEFPD.BCST m64 xmm xmm
|
|
// VSCALEFPD.BCST m64 ymm k ymm
|
|
// VSCALEFPD.BCST m64 ymm ymm
|
|
// VSCALEFPD.BCST m64 zmm k zmm
|
|
// VSCALEFPD.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VSCALEFPD.BCST instruction to the active function.
|
|
func (c *Context) VSCALEFPD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VSCALEFPD_BCST(ops...))
|
|
}
|
|
|
|
// VSCALEFPD_BCST: Scale Packed Double-Precision Floating-Point Values With Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFPD.BCST m64 xmm k xmm
|
|
// VSCALEFPD.BCST m64 xmm xmm
|
|
// VSCALEFPD.BCST m64 ymm k ymm
|
|
// VSCALEFPD.BCST m64 ymm ymm
|
|
// VSCALEFPD.BCST m64 zmm k zmm
|
|
// VSCALEFPD.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VSCALEFPD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSCALEFPD_BCST(ops ...operand.Op) { ctx.VSCALEFPD_BCST(ops...) }
|
|
|
|
// VSCALEFPD_BCST_Z: Scale Packed Double-Precision Floating-Point Values With Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFPD.BCST.Z m64 xmm k xmm
|
|
// VSCALEFPD.BCST.Z m64 ymm k ymm
|
|
// VSCALEFPD.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VSCALEFPD.BCST.Z instruction to the active function.
|
|
func (c *Context) VSCALEFPD_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VSCALEFPD_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VSCALEFPD_BCST_Z: Scale Packed Double-Precision Floating-Point Values With Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFPD.BCST.Z m64 xmm k xmm
|
|
// VSCALEFPD.BCST.Z m64 ymm k ymm
|
|
// VSCALEFPD.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VSCALEFPD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSCALEFPD_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VSCALEFPD_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VSCALEFPD_RD_SAE: Scale Packed Double-Precision Floating-Point Values With Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFPD.RD_SAE zmm zmm k zmm
|
|
// VSCALEFPD.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VSCALEFPD.RD_SAE instruction to the active function.
|
|
func (c *Context) VSCALEFPD_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VSCALEFPD_RD_SAE(ops...))
|
|
}
|
|
|
|
// VSCALEFPD_RD_SAE: Scale Packed Double-Precision Floating-Point Values With Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFPD.RD_SAE zmm zmm k zmm
|
|
// VSCALEFPD.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VSCALEFPD.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSCALEFPD_RD_SAE(ops ...operand.Op) { ctx.VSCALEFPD_RD_SAE(ops...) }
|
|
|
|
// VSCALEFPD_RD_SAE_Z: Scale Packed Double-Precision Floating-Point Values With Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFPD.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VSCALEFPD.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VSCALEFPD_RD_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VSCALEFPD_RD_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VSCALEFPD_RD_SAE_Z: Scale Packed Double-Precision Floating-Point Values With Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFPD.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VSCALEFPD.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSCALEFPD_RD_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VSCALEFPD_RD_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VSCALEFPD_RN_SAE: Scale Packed Double-Precision Floating-Point Values With Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFPD.RN_SAE zmm zmm k zmm
|
|
// VSCALEFPD.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VSCALEFPD.RN_SAE instruction to the active function.
|
|
func (c *Context) VSCALEFPD_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VSCALEFPD_RN_SAE(ops...))
|
|
}
|
|
|
|
// VSCALEFPD_RN_SAE: Scale Packed Double-Precision Floating-Point Values With Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFPD.RN_SAE zmm zmm k zmm
|
|
// VSCALEFPD.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VSCALEFPD.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSCALEFPD_RN_SAE(ops ...operand.Op) { ctx.VSCALEFPD_RN_SAE(ops...) }
|
|
|
|
// VSCALEFPD_RN_SAE_Z: Scale Packed Double-Precision Floating-Point Values With Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFPD.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VSCALEFPD.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VSCALEFPD_RN_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VSCALEFPD_RN_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VSCALEFPD_RN_SAE_Z: Scale Packed Double-Precision Floating-Point Values With Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFPD.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VSCALEFPD.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSCALEFPD_RN_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VSCALEFPD_RN_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VSCALEFPD_RU_SAE: Scale Packed Double-Precision Floating-Point Values With Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFPD.RU_SAE zmm zmm k zmm
|
|
// VSCALEFPD.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VSCALEFPD.RU_SAE instruction to the active function.
|
|
func (c *Context) VSCALEFPD_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VSCALEFPD_RU_SAE(ops...))
|
|
}
|
|
|
|
// VSCALEFPD_RU_SAE: Scale Packed Double-Precision Floating-Point Values With Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFPD.RU_SAE zmm zmm k zmm
|
|
// VSCALEFPD.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VSCALEFPD.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSCALEFPD_RU_SAE(ops ...operand.Op) { ctx.VSCALEFPD_RU_SAE(ops...) }
|
|
|
|
// VSCALEFPD_RU_SAE_Z: Scale Packed Double-Precision Floating-Point Values With Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFPD.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VSCALEFPD.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VSCALEFPD_RU_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VSCALEFPD_RU_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VSCALEFPD_RU_SAE_Z: Scale Packed Double-Precision Floating-Point Values With Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFPD.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VSCALEFPD.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSCALEFPD_RU_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VSCALEFPD_RU_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VSCALEFPD_RZ_SAE: Scale Packed Double-Precision Floating-Point Values With Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFPD.RZ_SAE zmm zmm k zmm
|
|
// VSCALEFPD.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VSCALEFPD.RZ_SAE instruction to the active function.
|
|
func (c *Context) VSCALEFPD_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VSCALEFPD_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VSCALEFPD_RZ_SAE: Scale Packed Double-Precision Floating-Point Values With Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFPD.RZ_SAE zmm zmm k zmm
|
|
// VSCALEFPD.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VSCALEFPD.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSCALEFPD_RZ_SAE(ops ...operand.Op) { ctx.VSCALEFPD_RZ_SAE(ops...) }
|
|
|
|
// VSCALEFPD_RZ_SAE_Z: Scale Packed Double-Precision Floating-Point Values With Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFPD.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VSCALEFPD.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VSCALEFPD_RZ_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VSCALEFPD_RZ_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VSCALEFPD_RZ_SAE_Z: Scale Packed Double-Precision Floating-Point Values With Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFPD.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VSCALEFPD.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSCALEFPD_RZ_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VSCALEFPD_RZ_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VSCALEFPD_Z: Scale Packed Double-Precision Floating-Point Values With Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFPD.Z m128 xmm k xmm
|
|
// VSCALEFPD.Z m256 ymm k ymm
|
|
// VSCALEFPD.Z xmm xmm k xmm
|
|
// VSCALEFPD.Z ymm ymm k ymm
|
|
// VSCALEFPD.Z m512 zmm k zmm
|
|
// VSCALEFPD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VSCALEFPD.Z instruction to the active function.
|
|
func (c *Context) VSCALEFPD_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VSCALEFPD_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VSCALEFPD_Z: Scale Packed Double-Precision Floating-Point Values With Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFPD.Z m128 xmm k xmm
|
|
// VSCALEFPD.Z m256 ymm k ymm
|
|
// VSCALEFPD.Z xmm xmm k xmm
|
|
// VSCALEFPD.Z ymm ymm k ymm
|
|
// VSCALEFPD.Z m512 zmm k zmm
|
|
// VSCALEFPD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VSCALEFPD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSCALEFPD_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VSCALEFPD_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VSCALEFPS: Scale Packed Single-Precision Floating-Point Values With Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFPS m128 xmm k xmm
|
|
// VSCALEFPS m128 xmm xmm
|
|
// VSCALEFPS m256 ymm k ymm
|
|
// VSCALEFPS m256 ymm ymm
|
|
// VSCALEFPS xmm xmm k xmm
|
|
// VSCALEFPS xmm xmm xmm
|
|
// VSCALEFPS ymm ymm k ymm
|
|
// VSCALEFPS ymm ymm ymm
|
|
// VSCALEFPS m512 zmm k zmm
|
|
// VSCALEFPS m512 zmm zmm
|
|
// VSCALEFPS zmm zmm k zmm
|
|
// VSCALEFPS zmm zmm zmm
|
|
//
|
|
// Construct and append a VSCALEFPS instruction to the active function.
|
|
func (c *Context) VSCALEFPS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VSCALEFPS(ops...))
|
|
}
|
|
|
|
// VSCALEFPS: Scale Packed Single-Precision Floating-Point Values With Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFPS m128 xmm k xmm
|
|
// VSCALEFPS m128 xmm xmm
|
|
// VSCALEFPS m256 ymm k ymm
|
|
// VSCALEFPS m256 ymm ymm
|
|
// VSCALEFPS xmm xmm k xmm
|
|
// VSCALEFPS xmm xmm xmm
|
|
// VSCALEFPS ymm ymm k ymm
|
|
// VSCALEFPS ymm ymm ymm
|
|
// VSCALEFPS m512 zmm k zmm
|
|
// VSCALEFPS m512 zmm zmm
|
|
// VSCALEFPS zmm zmm k zmm
|
|
// VSCALEFPS zmm zmm zmm
|
|
//
|
|
// Construct and append a VSCALEFPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSCALEFPS(ops ...operand.Op) { ctx.VSCALEFPS(ops...) }
|
|
|
|
// VSCALEFPS_BCST: Scale Packed Single-Precision Floating-Point Values With Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFPS.BCST m32 xmm k xmm
|
|
// VSCALEFPS.BCST m32 xmm xmm
|
|
// VSCALEFPS.BCST m32 ymm k ymm
|
|
// VSCALEFPS.BCST m32 ymm ymm
|
|
// VSCALEFPS.BCST m32 zmm k zmm
|
|
// VSCALEFPS.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VSCALEFPS.BCST instruction to the active function.
|
|
func (c *Context) VSCALEFPS_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VSCALEFPS_BCST(ops...))
|
|
}
|
|
|
|
// VSCALEFPS_BCST: Scale Packed Single-Precision Floating-Point Values With Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFPS.BCST m32 xmm k xmm
|
|
// VSCALEFPS.BCST m32 xmm xmm
|
|
// VSCALEFPS.BCST m32 ymm k ymm
|
|
// VSCALEFPS.BCST m32 ymm ymm
|
|
// VSCALEFPS.BCST m32 zmm k zmm
|
|
// VSCALEFPS.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VSCALEFPS.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSCALEFPS_BCST(ops ...operand.Op) { ctx.VSCALEFPS_BCST(ops...) }
|
|
|
|
// VSCALEFPS_BCST_Z: Scale Packed Single-Precision Floating-Point Values With Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFPS.BCST.Z m32 xmm k xmm
|
|
// VSCALEFPS.BCST.Z m32 ymm k ymm
|
|
// VSCALEFPS.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VSCALEFPS.BCST.Z instruction to the active function.
|
|
func (c *Context) VSCALEFPS_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VSCALEFPS_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VSCALEFPS_BCST_Z: Scale Packed Single-Precision Floating-Point Values With Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFPS.BCST.Z m32 xmm k xmm
|
|
// VSCALEFPS.BCST.Z m32 ymm k ymm
|
|
// VSCALEFPS.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VSCALEFPS.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSCALEFPS_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VSCALEFPS_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VSCALEFPS_RD_SAE: Scale Packed Single-Precision Floating-Point Values With Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFPS.RD_SAE zmm zmm k zmm
|
|
// VSCALEFPS.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VSCALEFPS.RD_SAE instruction to the active function.
|
|
func (c *Context) VSCALEFPS_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VSCALEFPS_RD_SAE(ops...))
|
|
}
|
|
|
|
// VSCALEFPS_RD_SAE: Scale Packed Single-Precision Floating-Point Values With Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFPS.RD_SAE zmm zmm k zmm
|
|
// VSCALEFPS.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VSCALEFPS.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSCALEFPS_RD_SAE(ops ...operand.Op) { ctx.VSCALEFPS_RD_SAE(ops...) }
|
|
|
|
// VSCALEFPS_RD_SAE_Z: Scale Packed Single-Precision Floating-Point Values With Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFPS.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VSCALEFPS.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VSCALEFPS_RD_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VSCALEFPS_RD_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VSCALEFPS_RD_SAE_Z: Scale Packed Single-Precision Floating-Point Values With Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFPS.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VSCALEFPS.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSCALEFPS_RD_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VSCALEFPS_RD_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VSCALEFPS_RN_SAE: Scale Packed Single-Precision Floating-Point Values With Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFPS.RN_SAE zmm zmm k zmm
|
|
// VSCALEFPS.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VSCALEFPS.RN_SAE instruction to the active function.
|
|
func (c *Context) VSCALEFPS_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VSCALEFPS_RN_SAE(ops...))
|
|
}
|
|
|
|
// VSCALEFPS_RN_SAE: Scale Packed Single-Precision Floating-Point Values With Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFPS.RN_SAE zmm zmm k zmm
|
|
// VSCALEFPS.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VSCALEFPS.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSCALEFPS_RN_SAE(ops ...operand.Op) { ctx.VSCALEFPS_RN_SAE(ops...) }
|
|
|
|
// VSCALEFPS_RN_SAE_Z: Scale Packed Single-Precision Floating-Point Values With Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFPS.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VSCALEFPS.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VSCALEFPS_RN_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VSCALEFPS_RN_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VSCALEFPS_RN_SAE_Z: Scale Packed Single-Precision Floating-Point Values With Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFPS.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VSCALEFPS.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSCALEFPS_RN_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VSCALEFPS_RN_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VSCALEFPS_RU_SAE: Scale Packed Single-Precision Floating-Point Values With Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFPS.RU_SAE zmm zmm k zmm
|
|
// VSCALEFPS.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VSCALEFPS.RU_SAE instruction to the active function.
|
|
func (c *Context) VSCALEFPS_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VSCALEFPS_RU_SAE(ops...))
|
|
}
|
|
|
|
// VSCALEFPS_RU_SAE: Scale Packed Single-Precision Floating-Point Values With Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFPS.RU_SAE zmm zmm k zmm
|
|
// VSCALEFPS.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VSCALEFPS.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSCALEFPS_RU_SAE(ops ...operand.Op) { ctx.VSCALEFPS_RU_SAE(ops...) }
|
|
|
|
// VSCALEFPS_RU_SAE_Z: Scale Packed Single-Precision Floating-Point Values With Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFPS.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VSCALEFPS.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VSCALEFPS_RU_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VSCALEFPS_RU_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VSCALEFPS_RU_SAE_Z: Scale Packed Single-Precision Floating-Point Values With Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFPS.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VSCALEFPS.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSCALEFPS_RU_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VSCALEFPS_RU_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VSCALEFPS_RZ_SAE: Scale Packed Single-Precision Floating-Point Values With Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFPS.RZ_SAE zmm zmm k zmm
|
|
// VSCALEFPS.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VSCALEFPS.RZ_SAE instruction to the active function.
|
|
func (c *Context) VSCALEFPS_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VSCALEFPS_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VSCALEFPS_RZ_SAE: Scale Packed Single-Precision Floating-Point Values With Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFPS.RZ_SAE zmm zmm k zmm
|
|
// VSCALEFPS.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VSCALEFPS.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSCALEFPS_RZ_SAE(ops ...operand.Op) { ctx.VSCALEFPS_RZ_SAE(ops...) }
|
|
|
|
// VSCALEFPS_RZ_SAE_Z: Scale Packed Single-Precision Floating-Point Values With Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFPS.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VSCALEFPS.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VSCALEFPS_RZ_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VSCALEFPS_RZ_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VSCALEFPS_RZ_SAE_Z: Scale Packed Single-Precision Floating-Point Values With Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFPS.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VSCALEFPS.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSCALEFPS_RZ_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VSCALEFPS_RZ_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VSCALEFPS_Z: Scale Packed Single-Precision Floating-Point Values With Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFPS.Z m128 xmm k xmm
|
|
// VSCALEFPS.Z m256 ymm k ymm
|
|
// VSCALEFPS.Z xmm xmm k xmm
|
|
// VSCALEFPS.Z ymm ymm k ymm
|
|
// VSCALEFPS.Z m512 zmm k zmm
|
|
// VSCALEFPS.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VSCALEFPS.Z instruction to the active function.
|
|
func (c *Context) VSCALEFPS_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VSCALEFPS_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VSCALEFPS_Z: Scale Packed Single-Precision Floating-Point Values With Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFPS.Z m128 xmm k xmm
|
|
// VSCALEFPS.Z m256 ymm k ymm
|
|
// VSCALEFPS.Z xmm xmm k xmm
|
|
// VSCALEFPS.Z ymm ymm k ymm
|
|
// VSCALEFPS.Z m512 zmm k zmm
|
|
// VSCALEFPS.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VSCALEFPS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSCALEFPS_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VSCALEFPS_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VSCALEFSD: Scale Scalar Double-Precision Floating-Point Value With a Double-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFSD m64 xmm k xmm
|
|
// VSCALEFSD m64 xmm xmm
|
|
// VSCALEFSD xmm xmm k xmm
|
|
// VSCALEFSD xmm xmm xmm
|
|
//
|
|
// Construct and append a VSCALEFSD instruction to the active function.
|
|
func (c *Context) VSCALEFSD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VSCALEFSD(ops...))
|
|
}
|
|
|
|
// VSCALEFSD: Scale Scalar Double-Precision Floating-Point Value With a Double-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFSD m64 xmm k xmm
|
|
// VSCALEFSD m64 xmm xmm
|
|
// VSCALEFSD xmm xmm k xmm
|
|
// VSCALEFSD xmm xmm xmm
|
|
//
|
|
// Construct and append a VSCALEFSD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSCALEFSD(ops ...operand.Op) { ctx.VSCALEFSD(ops...) }
|
|
|
|
// VSCALEFSD_RD_SAE: Scale Scalar Double-Precision Floating-Point Value With a Double-Precision Floating-Point Value (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFSD.RD_SAE xmm xmm k xmm
|
|
// VSCALEFSD.RD_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VSCALEFSD.RD_SAE instruction to the active function.
|
|
func (c *Context) VSCALEFSD_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VSCALEFSD_RD_SAE(ops...))
|
|
}
|
|
|
|
// VSCALEFSD_RD_SAE: Scale Scalar Double-Precision Floating-Point Value With a Double-Precision Floating-Point Value (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFSD.RD_SAE xmm xmm k xmm
|
|
// VSCALEFSD.RD_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VSCALEFSD.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSCALEFSD_RD_SAE(ops ...operand.Op) { ctx.VSCALEFSD_RD_SAE(ops...) }
|
|
|
|
// VSCALEFSD_RD_SAE_Z: Scale Scalar Double-Precision Floating-Point Value With a Double-Precision Floating-Point Value (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFSD.RD_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VSCALEFSD.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VSCALEFSD_RD_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VSCALEFSD_RD_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VSCALEFSD_RD_SAE_Z: Scale Scalar Double-Precision Floating-Point Value With a Double-Precision Floating-Point Value (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFSD.RD_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VSCALEFSD.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSCALEFSD_RD_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VSCALEFSD_RD_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VSCALEFSD_RN_SAE: Scale Scalar Double-Precision Floating-Point Value With a Double-Precision Floating-Point Value (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFSD.RN_SAE xmm xmm k xmm
|
|
// VSCALEFSD.RN_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VSCALEFSD.RN_SAE instruction to the active function.
|
|
func (c *Context) VSCALEFSD_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VSCALEFSD_RN_SAE(ops...))
|
|
}
|
|
|
|
// VSCALEFSD_RN_SAE: Scale Scalar Double-Precision Floating-Point Value With a Double-Precision Floating-Point Value (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFSD.RN_SAE xmm xmm k xmm
|
|
// VSCALEFSD.RN_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VSCALEFSD.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSCALEFSD_RN_SAE(ops ...operand.Op) { ctx.VSCALEFSD_RN_SAE(ops...) }
|
|
|
|
// VSCALEFSD_RN_SAE_Z: Scale Scalar Double-Precision Floating-Point Value With a Double-Precision Floating-Point Value (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFSD.RN_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VSCALEFSD.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VSCALEFSD_RN_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VSCALEFSD_RN_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VSCALEFSD_RN_SAE_Z: Scale Scalar Double-Precision Floating-Point Value With a Double-Precision Floating-Point Value (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFSD.RN_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VSCALEFSD.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSCALEFSD_RN_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VSCALEFSD_RN_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VSCALEFSD_RU_SAE: Scale Scalar Double-Precision Floating-Point Value With a Double-Precision Floating-Point Value (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFSD.RU_SAE xmm xmm k xmm
|
|
// VSCALEFSD.RU_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VSCALEFSD.RU_SAE instruction to the active function.
|
|
func (c *Context) VSCALEFSD_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VSCALEFSD_RU_SAE(ops...))
|
|
}
|
|
|
|
// VSCALEFSD_RU_SAE: Scale Scalar Double-Precision Floating-Point Value With a Double-Precision Floating-Point Value (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFSD.RU_SAE xmm xmm k xmm
|
|
// VSCALEFSD.RU_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VSCALEFSD.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSCALEFSD_RU_SAE(ops ...operand.Op) { ctx.VSCALEFSD_RU_SAE(ops...) }
|
|
|
|
// VSCALEFSD_RU_SAE_Z: Scale Scalar Double-Precision Floating-Point Value With a Double-Precision Floating-Point Value (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFSD.RU_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VSCALEFSD.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VSCALEFSD_RU_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VSCALEFSD_RU_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VSCALEFSD_RU_SAE_Z: Scale Scalar Double-Precision Floating-Point Value With a Double-Precision Floating-Point Value (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFSD.RU_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VSCALEFSD.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSCALEFSD_RU_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VSCALEFSD_RU_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VSCALEFSD_RZ_SAE: Scale Scalar Double-Precision Floating-Point Value With a Double-Precision Floating-Point Value (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFSD.RZ_SAE xmm xmm k xmm
|
|
// VSCALEFSD.RZ_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VSCALEFSD.RZ_SAE instruction to the active function.
|
|
func (c *Context) VSCALEFSD_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VSCALEFSD_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VSCALEFSD_RZ_SAE: Scale Scalar Double-Precision Floating-Point Value With a Double-Precision Floating-Point Value (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFSD.RZ_SAE xmm xmm k xmm
|
|
// VSCALEFSD.RZ_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VSCALEFSD.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSCALEFSD_RZ_SAE(ops ...operand.Op) { ctx.VSCALEFSD_RZ_SAE(ops...) }
|
|
|
|
// VSCALEFSD_RZ_SAE_Z: Scale Scalar Double-Precision Floating-Point Value With a Double-Precision Floating-Point Value (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFSD.RZ_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VSCALEFSD.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VSCALEFSD_RZ_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VSCALEFSD_RZ_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VSCALEFSD_RZ_SAE_Z: Scale Scalar Double-Precision Floating-Point Value With a Double-Precision Floating-Point Value (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFSD.RZ_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VSCALEFSD.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSCALEFSD_RZ_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VSCALEFSD_RZ_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VSCALEFSD_Z: Scale Scalar Double-Precision Floating-Point Value With a Double-Precision Floating-Point Value (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFSD.Z m64 xmm k xmm
|
|
// VSCALEFSD.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VSCALEFSD.Z instruction to the active function.
|
|
func (c *Context) VSCALEFSD_Z(mx, x, k, x1 operand.Op) {
|
|
c.addinstruction(x86.VSCALEFSD_Z(mx, x, k, x1))
|
|
}
|
|
|
|
// VSCALEFSD_Z: Scale Scalar Double-Precision Floating-Point Value With a Double-Precision Floating-Point Value (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFSD.Z m64 xmm k xmm
|
|
// VSCALEFSD.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VSCALEFSD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSCALEFSD_Z(mx, x, k, x1 operand.Op) { ctx.VSCALEFSD_Z(mx, x, k, x1) }
|
|
|
|
// VSCALEFSS: Scale Scalar Single-Precision Floating-Point Value With a Single-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFSS m32 xmm k xmm
|
|
// VSCALEFSS m32 xmm xmm
|
|
// VSCALEFSS xmm xmm k xmm
|
|
// VSCALEFSS xmm xmm xmm
|
|
//
|
|
// Construct and append a VSCALEFSS instruction to the active function.
|
|
func (c *Context) VSCALEFSS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VSCALEFSS(ops...))
|
|
}
|
|
|
|
// VSCALEFSS: Scale Scalar Single-Precision Floating-Point Value With a Single-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFSS m32 xmm k xmm
|
|
// VSCALEFSS m32 xmm xmm
|
|
// VSCALEFSS xmm xmm k xmm
|
|
// VSCALEFSS xmm xmm xmm
|
|
//
|
|
// Construct and append a VSCALEFSS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSCALEFSS(ops ...operand.Op) { ctx.VSCALEFSS(ops...) }
|
|
|
|
// VSCALEFSS_RD_SAE: Scale Scalar Single-Precision Floating-Point Value With a Single-Precision Floating-Point Value (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFSS.RD_SAE xmm xmm k xmm
|
|
// VSCALEFSS.RD_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VSCALEFSS.RD_SAE instruction to the active function.
|
|
func (c *Context) VSCALEFSS_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VSCALEFSS_RD_SAE(ops...))
|
|
}
|
|
|
|
// VSCALEFSS_RD_SAE: Scale Scalar Single-Precision Floating-Point Value With a Single-Precision Floating-Point Value (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFSS.RD_SAE xmm xmm k xmm
|
|
// VSCALEFSS.RD_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VSCALEFSS.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSCALEFSS_RD_SAE(ops ...operand.Op) { ctx.VSCALEFSS_RD_SAE(ops...) }
|
|
|
|
// VSCALEFSS_RD_SAE_Z: Scale Scalar Single-Precision Floating-Point Value With a Single-Precision Floating-Point Value (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFSS.RD_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VSCALEFSS.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VSCALEFSS_RD_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VSCALEFSS_RD_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VSCALEFSS_RD_SAE_Z: Scale Scalar Single-Precision Floating-Point Value With a Single-Precision Floating-Point Value (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFSS.RD_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VSCALEFSS.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSCALEFSS_RD_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VSCALEFSS_RD_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VSCALEFSS_RN_SAE: Scale Scalar Single-Precision Floating-Point Value With a Single-Precision Floating-Point Value (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFSS.RN_SAE xmm xmm k xmm
|
|
// VSCALEFSS.RN_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VSCALEFSS.RN_SAE instruction to the active function.
|
|
func (c *Context) VSCALEFSS_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VSCALEFSS_RN_SAE(ops...))
|
|
}
|
|
|
|
// VSCALEFSS_RN_SAE: Scale Scalar Single-Precision Floating-Point Value With a Single-Precision Floating-Point Value (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFSS.RN_SAE xmm xmm k xmm
|
|
// VSCALEFSS.RN_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VSCALEFSS.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSCALEFSS_RN_SAE(ops ...operand.Op) { ctx.VSCALEFSS_RN_SAE(ops...) }
|
|
|
|
// VSCALEFSS_RN_SAE_Z: Scale Scalar Single-Precision Floating-Point Value With a Single-Precision Floating-Point Value (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFSS.RN_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VSCALEFSS.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VSCALEFSS_RN_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VSCALEFSS_RN_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VSCALEFSS_RN_SAE_Z: Scale Scalar Single-Precision Floating-Point Value With a Single-Precision Floating-Point Value (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFSS.RN_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VSCALEFSS.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSCALEFSS_RN_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VSCALEFSS_RN_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VSCALEFSS_RU_SAE: Scale Scalar Single-Precision Floating-Point Value With a Single-Precision Floating-Point Value (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFSS.RU_SAE xmm xmm k xmm
|
|
// VSCALEFSS.RU_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VSCALEFSS.RU_SAE instruction to the active function.
|
|
func (c *Context) VSCALEFSS_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VSCALEFSS_RU_SAE(ops...))
|
|
}
|
|
|
|
// VSCALEFSS_RU_SAE: Scale Scalar Single-Precision Floating-Point Value With a Single-Precision Floating-Point Value (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFSS.RU_SAE xmm xmm k xmm
|
|
// VSCALEFSS.RU_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VSCALEFSS.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSCALEFSS_RU_SAE(ops ...operand.Op) { ctx.VSCALEFSS_RU_SAE(ops...) }
|
|
|
|
// VSCALEFSS_RU_SAE_Z: Scale Scalar Single-Precision Floating-Point Value With a Single-Precision Floating-Point Value (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFSS.RU_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VSCALEFSS.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VSCALEFSS_RU_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VSCALEFSS_RU_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VSCALEFSS_RU_SAE_Z: Scale Scalar Single-Precision Floating-Point Value With a Single-Precision Floating-Point Value (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFSS.RU_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VSCALEFSS.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSCALEFSS_RU_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VSCALEFSS_RU_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VSCALEFSS_RZ_SAE: Scale Scalar Single-Precision Floating-Point Value With a Single-Precision Floating-Point Value (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFSS.RZ_SAE xmm xmm k xmm
|
|
// VSCALEFSS.RZ_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VSCALEFSS.RZ_SAE instruction to the active function.
|
|
func (c *Context) VSCALEFSS_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VSCALEFSS_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VSCALEFSS_RZ_SAE: Scale Scalar Single-Precision Floating-Point Value With a Single-Precision Floating-Point Value (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFSS.RZ_SAE xmm xmm k xmm
|
|
// VSCALEFSS.RZ_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VSCALEFSS.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSCALEFSS_RZ_SAE(ops ...operand.Op) { ctx.VSCALEFSS_RZ_SAE(ops...) }
|
|
|
|
// VSCALEFSS_RZ_SAE_Z: Scale Scalar Single-Precision Floating-Point Value With a Single-Precision Floating-Point Value (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFSS.RZ_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VSCALEFSS.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VSCALEFSS_RZ_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VSCALEFSS_RZ_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VSCALEFSS_RZ_SAE_Z: Scale Scalar Single-Precision Floating-Point Value With a Single-Precision Floating-Point Value (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFSS.RZ_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VSCALEFSS.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSCALEFSS_RZ_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VSCALEFSS_RZ_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VSCALEFSS_Z: Scale Scalar Single-Precision Floating-Point Value With a Single-Precision Floating-Point Value (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFSS.Z m32 xmm k xmm
|
|
// VSCALEFSS.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VSCALEFSS.Z instruction to the active function.
|
|
func (c *Context) VSCALEFSS_Z(mx, x, k, x1 operand.Op) {
|
|
c.addinstruction(x86.VSCALEFSS_Z(mx, x, k, x1))
|
|
}
|
|
|
|
// VSCALEFSS_Z: Scale Scalar Single-Precision Floating-Point Value With a Single-Precision Floating-Point Value (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCALEFSS.Z m32 xmm k xmm
|
|
// VSCALEFSS.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VSCALEFSS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSCALEFSS_Z(mx, x, k, x1 operand.Op) { ctx.VSCALEFSS_Z(mx, x, k, x1) }
|
|
|
|
// VSCATTERDPD: Scatter Packed Double-Precision Floating-Point Values with Signed Doubleword Indices.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCATTERDPD xmm k vm32x
|
|
// VSCATTERDPD ymm k vm32x
|
|
// VSCATTERDPD zmm k vm32y
|
|
//
|
|
// Construct and append a VSCATTERDPD instruction to the active function.
|
|
func (c *Context) VSCATTERDPD(xyz, k, v operand.Op) {
|
|
c.addinstruction(x86.VSCATTERDPD(xyz, k, v))
|
|
}
|
|
|
|
// VSCATTERDPD: Scatter Packed Double-Precision Floating-Point Values with Signed Doubleword Indices.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCATTERDPD xmm k vm32x
|
|
// VSCATTERDPD ymm k vm32x
|
|
// VSCATTERDPD zmm k vm32y
|
|
//
|
|
// Construct and append a VSCATTERDPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSCATTERDPD(xyz, k, v operand.Op) { ctx.VSCATTERDPD(xyz, k, v) }
|
|
|
|
// VSCATTERDPS: Scatter Packed Single-Precision Floating-Point Values with Signed Doubleword Indices.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCATTERDPS xmm k vm32x
|
|
// VSCATTERDPS ymm k vm32y
|
|
// VSCATTERDPS zmm k vm32z
|
|
//
|
|
// Construct and append a VSCATTERDPS instruction to the active function.
|
|
func (c *Context) VSCATTERDPS(xyz, k, v operand.Op) {
|
|
c.addinstruction(x86.VSCATTERDPS(xyz, k, v))
|
|
}
|
|
|
|
// VSCATTERDPS: Scatter Packed Single-Precision Floating-Point Values with Signed Doubleword Indices.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCATTERDPS xmm k vm32x
|
|
// VSCATTERDPS ymm k vm32y
|
|
// VSCATTERDPS zmm k vm32z
|
|
//
|
|
// Construct and append a VSCATTERDPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSCATTERDPS(xyz, k, v operand.Op) { ctx.VSCATTERDPS(xyz, k, v) }
|
|
|
|
// VSCATTERQPD: Scatter Packed Double-Precision Floating-Point Values with Signed Quadword Indices.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCATTERQPD xmm k vm64x
|
|
// VSCATTERQPD ymm k vm64y
|
|
// VSCATTERQPD zmm k vm64z
|
|
//
|
|
// Construct and append a VSCATTERQPD instruction to the active function.
|
|
func (c *Context) VSCATTERQPD(xyz, k, v operand.Op) {
|
|
c.addinstruction(x86.VSCATTERQPD(xyz, k, v))
|
|
}
|
|
|
|
// VSCATTERQPD: Scatter Packed Double-Precision Floating-Point Values with Signed Quadword Indices.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCATTERQPD xmm k vm64x
|
|
// VSCATTERQPD ymm k vm64y
|
|
// VSCATTERQPD zmm k vm64z
|
|
//
|
|
// Construct and append a VSCATTERQPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSCATTERQPD(xyz, k, v operand.Op) { ctx.VSCATTERQPD(xyz, k, v) }
|
|
|
|
// VSCATTERQPS: Scatter Packed Single-Precision Floating-Point Values with Signed Quadword Indices.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCATTERQPS xmm k vm64x
|
|
// VSCATTERQPS xmm k vm64y
|
|
// VSCATTERQPS ymm k vm64z
|
|
//
|
|
// Construct and append a VSCATTERQPS instruction to the active function.
|
|
func (c *Context) VSCATTERQPS(xy, k, v operand.Op) {
|
|
c.addinstruction(x86.VSCATTERQPS(xy, k, v))
|
|
}
|
|
|
|
// VSCATTERQPS: Scatter Packed Single-Precision Floating-Point Values with Signed Quadword Indices.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSCATTERQPS xmm k vm64x
|
|
// VSCATTERQPS xmm k vm64y
|
|
// VSCATTERQPS ymm k vm64z
|
|
//
|
|
// Construct and append a VSCATTERQPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSCATTERQPS(xy, k, v operand.Op) { ctx.VSCATTERQPS(xy, k, v) }
|
|
|
|
// VSHUFF32X4: Shuffle 128-Bit Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSHUFF32X4 imm8 m256 ymm k ymm
|
|
// VSHUFF32X4 imm8 m256 ymm ymm
|
|
// VSHUFF32X4 imm8 ymm ymm k ymm
|
|
// VSHUFF32X4 imm8 ymm ymm ymm
|
|
// VSHUFF32X4 imm8 m512 zmm k zmm
|
|
// VSHUFF32X4 imm8 m512 zmm zmm
|
|
// VSHUFF32X4 imm8 zmm zmm k zmm
|
|
// VSHUFF32X4 imm8 zmm zmm zmm
|
|
//
|
|
// Construct and append a VSHUFF32X4 instruction to the active function.
|
|
func (c *Context) VSHUFF32X4(ops ...operand.Op) {
|
|
c.addinstruction(x86.VSHUFF32X4(ops...))
|
|
}
|
|
|
|
// VSHUFF32X4: Shuffle 128-Bit Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSHUFF32X4 imm8 m256 ymm k ymm
|
|
// VSHUFF32X4 imm8 m256 ymm ymm
|
|
// VSHUFF32X4 imm8 ymm ymm k ymm
|
|
// VSHUFF32X4 imm8 ymm ymm ymm
|
|
// VSHUFF32X4 imm8 m512 zmm k zmm
|
|
// VSHUFF32X4 imm8 m512 zmm zmm
|
|
// VSHUFF32X4 imm8 zmm zmm k zmm
|
|
// VSHUFF32X4 imm8 zmm zmm zmm
|
|
//
|
|
// Construct and append a VSHUFF32X4 instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSHUFF32X4(ops ...operand.Op) { ctx.VSHUFF32X4(ops...) }
|
|
|
|
// VSHUFF32X4_BCST: Shuffle 128-Bit Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSHUFF32X4.BCST imm8 m32 ymm k ymm
|
|
// VSHUFF32X4.BCST imm8 m32 ymm ymm
|
|
// VSHUFF32X4.BCST imm8 m32 zmm k zmm
|
|
// VSHUFF32X4.BCST imm8 m32 zmm zmm
|
|
//
|
|
// Construct and append a VSHUFF32X4.BCST instruction to the active function.
|
|
func (c *Context) VSHUFF32X4_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VSHUFF32X4_BCST(ops...))
|
|
}
|
|
|
|
// VSHUFF32X4_BCST: Shuffle 128-Bit Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSHUFF32X4.BCST imm8 m32 ymm k ymm
|
|
// VSHUFF32X4.BCST imm8 m32 ymm ymm
|
|
// VSHUFF32X4.BCST imm8 m32 zmm k zmm
|
|
// VSHUFF32X4.BCST imm8 m32 zmm zmm
|
|
//
|
|
// Construct and append a VSHUFF32X4.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSHUFF32X4_BCST(ops ...operand.Op) { ctx.VSHUFF32X4_BCST(ops...) }
|
|
|
|
// VSHUFF32X4_BCST_Z: Shuffle 128-Bit Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSHUFF32X4.BCST.Z imm8 m32 ymm k ymm
|
|
// VSHUFF32X4.BCST.Z imm8 m32 zmm k zmm
|
|
//
|
|
// Construct and append a VSHUFF32X4.BCST.Z instruction to the active function.
|
|
func (c *Context) VSHUFF32X4_BCST_Z(i, m, yz, k, yz1 operand.Op) {
|
|
c.addinstruction(x86.VSHUFF32X4_BCST_Z(i, m, yz, k, yz1))
|
|
}
|
|
|
|
// VSHUFF32X4_BCST_Z: Shuffle 128-Bit Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSHUFF32X4.BCST.Z imm8 m32 ymm k ymm
|
|
// VSHUFF32X4.BCST.Z imm8 m32 zmm k zmm
|
|
//
|
|
// Construct and append a VSHUFF32X4.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSHUFF32X4_BCST_Z(i, m, yz, k, yz1 operand.Op) { ctx.VSHUFF32X4_BCST_Z(i, m, yz, k, yz1) }
|
|
|
|
// VSHUFF32X4_Z: Shuffle 128-Bit Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSHUFF32X4.Z imm8 m256 ymm k ymm
|
|
// VSHUFF32X4.Z imm8 ymm ymm k ymm
|
|
// VSHUFF32X4.Z imm8 m512 zmm k zmm
|
|
// VSHUFF32X4.Z imm8 zmm zmm k zmm
|
|
//
|
|
// Construct and append a VSHUFF32X4.Z instruction to the active function.
|
|
func (c *Context) VSHUFF32X4_Z(i, myz, yz, k, yz1 operand.Op) {
|
|
c.addinstruction(x86.VSHUFF32X4_Z(i, myz, yz, k, yz1))
|
|
}
|
|
|
|
// VSHUFF32X4_Z: Shuffle 128-Bit Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSHUFF32X4.Z imm8 m256 ymm k ymm
|
|
// VSHUFF32X4.Z imm8 ymm ymm k ymm
|
|
// VSHUFF32X4.Z imm8 m512 zmm k zmm
|
|
// VSHUFF32X4.Z imm8 zmm zmm k zmm
|
|
//
|
|
// Construct and append a VSHUFF32X4.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSHUFF32X4_Z(i, myz, yz, k, yz1 operand.Op) { ctx.VSHUFF32X4_Z(i, myz, yz, k, yz1) }
|
|
|
|
// VSHUFF64X2: Shuffle 128-Bit Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSHUFF64X2 imm8 m256 ymm k ymm
|
|
// VSHUFF64X2 imm8 m256 ymm ymm
|
|
// VSHUFF64X2 imm8 ymm ymm k ymm
|
|
// VSHUFF64X2 imm8 ymm ymm ymm
|
|
// VSHUFF64X2 imm8 m512 zmm k zmm
|
|
// VSHUFF64X2 imm8 m512 zmm zmm
|
|
// VSHUFF64X2 imm8 zmm zmm k zmm
|
|
// VSHUFF64X2 imm8 zmm zmm zmm
|
|
//
|
|
// Construct and append a VSHUFF64X2 instruction to the active function.
|
|
func (c *Context) VSHUFF64X2(ops ...operand.Op) {
|
|
c.addinstruction(x86.VSHUFF64X2(ops...))
|
|
}
|
|
|
|
// VSHUFF64X2: Shuffle 128-Bit Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSHUFF64X2 imm8 m256 ymm k ymm
|
|
// VSHUFF64X2 imm8 m256 ymm ymm
|
|
// VSHUFF64X2 imm8 ymm ymm k ymm
|
|
// VSHUFF64X2 imm8 ymm ymm ymm
|
|
// VSHUFF64X2 imm8 m512 zmm k zmm
|
|
// VSHUFF64X2 imm8 m512 zmm zmm
|
|
// VSHUFF64X2 imm8 zmm zmm k zmm
|
|
// VSHUFF64X2 imm8 zmm zmm zmm
|
|
//
|
|
// Construct and append a VSHUFF64X2 instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSHUFF64X2(ops ...operand.Op) { ctx.VSHUFF64X2(ops...) }
|
|
|
|
// VSHUFF64X2_BCST: Shuffle 128-Bit Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSHUFF64X2.BCST imm8 m64 ymm k ymm
|
|
// VSHUFF64X2.BCST imm8 m64 ymm ymm
|
|
// VSHUFF64X2.BCST imm8 m64 zmm k zmm
|
|
// VSHUFF64X2.BCST imm8 m64 zmm zmm
|
|
//
|
|
// Construct and append a VSHUFF64X2.BCST instruction to the active function.
|
|
func (c *Context) VSHUFF64X2_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VSHUFF64X2_BCST(ops...))
|
|
}
|
|
|
|
// VSHUFF64X2_BCST: Shuffle 128-Bit Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSHUFF64X2.BCST imm8 m64 ymm k ymm
|
|
// VSHUFF64X2.BCST imm8 m64 ymm ymm
|
|
// VSHUFF64X2.BCST imm8 m64 zmm k zmm
|
|
// VSHUFF64X2.BCST imm8 m64 zmm zmm
|
|
//
|
|
// Construct and append a VSHUFF64X2.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSHUFF64X2_BCST(ops ...operand.Op) { ctx.VSHUFF64X2_BCST(ops...) }
|
|
|
|
// VSHUFF64X2_BCST_Z: Shuffle 128-Bit Packed Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSHUFF64X2.BCST.Z imm8 m64 ymm k ymm
|
|
// VSHUFF64X2.BCST.Z imm8 m64 zmm k zmm
|
|
//
|
|
// Construct and append a VSHUFF64X2.BCST.Z instruction to the active function.
|
|
func (c *Context) VSHUFF64X2_BCST_Z(i, m, yz, k, yz1 operand.Op) {
|
|
c.addinstruction(x86.VSHUFF64X2_BCST_Z(i, m, yz, k, yz1))
|
|
}
|
|
|
|
// VSHUFF64X2_BCST_Z: Shuffle 128-Bit Packed Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSHUFF64X2.BCST.Z imm8 m64 ymm k ymm
|
|
// VSHUFF64X2.BCST.Z imm8 m64 zmm k zmm
|
|
//
|
|
// Construct and append a VSHUFF64X2.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSHUFF64X2_BCST_Z(i, m, yz, k, yz1 operand.Op) { ctx.VSHUFF64X2_BCST_Z(i, m, yz, k, yz1) }
|
|
|
|
// VSHUFF64X2_Z: Shuffle 128-Bit Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSHUFF64X2.Z imm8 m256 ymm k ymm
|
|
// VSHUFF64X2.Z imm8 ymm ymm k ymm
|
|
// VSHUFF64X2.Z imm8 m512 zmm k zmm
|
|
// VSHUFF64X2.Z imm8 zmm zmm k zmm
|
|
//
|
|
// Construct and append a VSHUFF64X2.Z instruction to the active function.
|
|
func (c *Context) VSHUFF64X2_Z(i, myz, yz, k, yz1 operand.Op) {
|
|
c.addinstruction(x86.VSHUFF64X2_Z(i, myz, yz, k, yz1))
|
|
}
|
|
|
|
// VSHUFF64X2_Z: Shuffle 128-Bit Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSHUFF64X2.Z imm8 m256 ymm k ymm
|
|
// VSHUFF64X2.Z imm8 ymm ymm k ymm
|
|
// VSHUFF64X2.Z imm8 m512 zmm k zmm
|
|
// VSHUFF64X2.Z imm8 zmm zmm k zmm
|
|
//
|
|
// Construct and append a VSHUFF64X2.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSHUFF64X2_Z(i, myz, yz, k, yz1 operand.Op) { ctx.VSHUFF64X2_Z(i, myz, yz, k, yz1) }
|
|
|
|
// VSHUFI32X4: Shuffle 128-Bit Packed Doubleword Integer Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSHUFI32X4 imm8 m256 ymm k ymm
|
|
// VSHUFI32X4 imm8 m256 ymm ymm
|
|
// VSHUFI32X4 imm8 ymm ymm k ymm
|
|
// VSHUFI32X4 imm8 ymm ymm ymm
|
|
// VSHUFI32X4 imm8 m512 zmm k zmm
|
|
// VSHUFI32X4 imm8 m512 zmm zmm
|
|
// VSHUFI32X4 imm8 zmm zmm k zmm
|
|
// VSHUFI32X4 imm8 zmm zmm zmm
|
|
//
|
|
// Construct and append a VSHUFI32X4 instruction to the active function.
|
|
func (c *Context) VSHUFI32X4(ops ...operand.Op) {
|
|
c.addinstruction(x86.VSHUFI32X4(ops...))
|
|
}
|
|
|
|
// VSHUFI32X4: Shuffle 128-Bit Packed Doubleword Integer Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSHUFI32X4 imm8 m256 ymm k ymm
|
|
// VSHUFI32X4 imm8 m256 ymm ymm
|
|
// VSHUFI32X4 imm8 ymm ymm k ymm
|
|
// VSHUFI32X4 imm8 ymm ymm ymm
|
|
// VSHUFI32X4 imm8 m512 zmm k zmm
|
|
// VSHUFI32X4 imm8 m512 zmm zmm
|
|
// VSHUFI32X4 imm8 zmm zmm k zmm
|
|
// VSHUFI32X4 imm8 zmm zmm zmm
|
|
//
|
|
// Construct and append a VSHUFI32X4 instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSHUFI32X4(ops ...operand.Op) { ctx.VSHUFI32X4(ops...) }
|
|
|
|
// VSHUFI32X4_BCST: Shuffle 128-Bit Packed Doubleword Integer Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSHUFI32X4.BCST imm8 m32 ymm k ymm
|
|
// VSHUFI32X4.BCST imm8 m32 ymm ymm
|
|
// VSHUFI32X4.BCST imm8 m32 zmm k zmm
|
|
// VSHUFI32X4.BCST imm8 m32 zmm zmm
|
|
//
|
|
// Construct and append a VSHUFI32X4.BCST instruction to the active function.
|
|
func (c *Context) VSHUFI32X4_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VSHUFI32X4_BCST(ops...))
|
|
}
|
|
|
|
// VSHUFI32X4_BCST: Shuffle 128-Bit Packed Doubleword Integer Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSHUFI32X4.BCST imm8 m32 ymm k ymm
|
|
// VSHUFI32X4.BCST imm8 m32 ymm ymm
|
|
// VSHUFI32X4.BCST imm8 m32 zmm k zmm
|
|
// VSHUFI32X4.BCST imm8 m32 zmm zmm
|
|
//
|
|
// Construct and append a VSHUFI32X4.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSHUFI32X4_BCST(ops ...operand.Op) { ctx.VSHUFI32X4_BCST(ops...) }
|
|
|
|
// VSHUFI32X4_BCST_Z: Shuffle 128-Bit Packed Doubleword Integer Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSHUFI32X4.BCST.Z imm8 m32 ymm k ymm
|
|
// VSHUFI32X4.BCST.Z imm8 m32 zmm k zmm
|
|
//
|
|
// Construct and append a VSHUFI32X4.BCST.Z instruction to the active function.
|
|
func (c *Context) VSHUFI32X4_BCST_Z(i, m, yz, k, yz1 operand.Op) {
|
|
c.addinstruction(x86.VSHUFI32X4_BCST_Z(i, m, yz, k, yz1))
|
|
}
|
|
|
|
// VSHUFI32X4_BCST_Z: Shuffle 128-Bit Packed Doubleword Integer Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSHUFI32X4.BCST.Z imm8 m32 ymm k ymm
|
|
// VSHUFI32X4.BCST.Z imm8 m32 zmm k zmm
|
|
//
|
|
// Construct and append a VSHUFI32X4.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSHUFI32X4_BCST_Z(i, m, yz, k, yz1 operand.Op) { ctx.VSHUFI32X4_BCST_Z(i, m, yz, k, yz1) }
|
|
|
|
// VSHUFI32X4_Z: Shuffle 128-Bit Packed Doubleword Integer Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSHUFI32X4.Z imm8 m256 ymm k ymm
|
|
// VSHUFI32X4.Z imm8 ymm ymm k ymm
|
|
// VSHUFI32X4.Z imm8 m512 zmm k zmm
|
|
// VSHUFI32X4.Z imm8 zmm zmm k zmm
|
|
//
|
|
// Construct and append a VSHUFI32X4.Z instruction to the active function.
|
|
func (c *Context) VSHUFI32X4_Z(i, myz, yz, k, yz1 operand.Op) {
|
|
c.addinstruction(x86.VSHUFI32X4_Z(i, myz, yz, k, yz1))
|
|
}
|
|
|
|
// VSHUFI32X4_Z: Shuffle 128-Bit Packed Doubleword Integer Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSHUFI32X4.Z imm8 m256 ymm k ymm
|
|
// VSHUFI32X4.Z imm8 ymm ymm k ymm
|
|
// VSHUFI32X4.Z imm8 m512 zmm k zmm
|
|
// VSHUFI32X4.Z imm8 zmm zmm k zmm
|
|
//
|
|
// Construct and append a VSHUFI32X4.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSHUFI32X4_Z(i, myz, yz, k, yz1 operand.Op) { ctx.VSHUFI32X4_Z(i, myz, yz, k, yz1) }
|
|
|
|
// VSHUFI64X2: Shuffle 128-Bit Packed Quadword Integer Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSHUFI64X2 imm8 m256 ymm k ymm
|
|
// VSHUFI64X2 imm8 m256 ymm ymm
|
|
// VSHUFI64X2 imm8 ymm ymm k ymm
|
|
// VSHUFI64X2 imm8 ymm ymm ymm
|
|
// VSHUFI64X2 imm8 m512 zmm k zmm
|
|
// VSHUFI64X2 imm8 m512 zmm zmm
|
|
// VSHUFI64X2 imm8 zmm zmm k zmm
|
|
// VSHUFI64X2 imm8 zmm zmm zmm
|
|
//
|
|
// Construct and append a VSHUFI64X2 instruction to the active function.
|
|
func (c *Context) VSHUFI64X2(ops ...operand.Op) {
|
|
c.addinstruction(x86.VSHUFI64X2(ops...))
|
|
}
|
|
|
|
// VSHUFI64X2: Shuffle 128-Bit Packed Quadword Integer Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSHUFI64X2 imm8 m256 ymm k ymm
|
|
// VSHUFI64X2 imm8 m256 ymm ymm
|
|
// VSHUFI64X2 imm8 ymm ymm k ymm
|
|
// VSHUFI64X2 imm8 ymm ymm ymm
|
|
// VSHUFI64X2 imm8 m512 zmm k zmm
|
|
// VSHUFI64X2 imm8 m512 zmm zmm
|
|
// VSHUFI64X2 imm8 zmm zmm k zmm
|
|
// VSHUFI64X2 imm8 zmm zmm zmm
|
|
//
|
|
// Construct and append a VSHUFI64X2 instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSHUFI64X2(ops ...operand.Op) { ctx.VSHUFI64X2(ops...) }
|
|
|
|
// VSHUFI64X2_BCST: Shuffle 128-Bit Packed Quadword Integer Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSHUFI64X2.BCST imm8 m64 ymm k ymm
|
|
// VSHUFI64X2.BCST imm8 m64 ymm ymm
|
|
// VSHUFI64X2.BCST imm8 m64 zmm k zmm
|
|
// VSHUFI64X2.BCST imm8 m64 zmm zmm
|
|
//
|
|
// Construct and append a VSHUFI64X2.BCST instruction to the active function.
|
|
func (c *Context) VSHUFI64X2_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VSHUFI64X2_BCST(ops...))
|
|
}
|
|
|
|
// VSHUFI64X2_BCST: Shuffle 128-Bit Packed Quadword Integer Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSHUFI64X2.BCST imm8 m64 ymm k ymm
|
|
// VSHUFI64X2.BCST imm8 m64 ymm ymm
|
|
// VSHUFI64X2.BCST imm8 m64 zmm k zmm
|
|
// VSHUFI64X2.BCST imm8 m64 zmm zmm
|
|
//
|
|
// Construct and append a VSHUFI64X2.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSHUFI64X2_BCST(ops ...operand.Op) { ctx.VSHUFI64X2_BCST(ops...) }
|
|
|
|
// VSHUFI64X2_BCST_Z: Shuffle 128-Bit Packed Quadword Integer Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSHUFI64X2.BCST.Z imm8 m64 ymm k ymm
|
|
// VSHUFI64X2.BCST.Z imm8 m64 zmm k zmm
|
|
//
|
|
// Construct and append a VSHUFI64X2.BCST.Z instruction to the active function.
|
|
func (c *Context) VSHUFI64X2_BCST_Z(i, m, yz, k, yz1 operand.Op) {
|
|
c.addinstruction(x86.VSHUFI64X2_BCST_Z(i, m, yz, k, yz1))
|
|
}
|
|
|
|
// VSHUFI64X2_BCST_Z: Shuffle 128-Bit Packed Quadword Integer Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSHUFI64X2.BCST.Z imm8 m64 ymm k ymm
|
|
// VSHUFI64X2.BCST.Z imm8 m64 zmm k zmm
|
|
//
|
|
// Construct and append a VSHUFI64X2.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSHUFI64X2_BCST_Z(i, m, yz, k, yz1 operand.Op) { ctx.VSHUFI64X2_BCST_Z(i, m, yz, k, yz1) }
|
|
|
|
// VSHUFI64X2_Z: Shuffle 128-Bit Packed Quadword Integer Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSHUFI64X2.Z imm8 m256 ymm k ymm
|
|
// VSHUFI64X2.Z imm8 ymm ymm k ymm
|
|
// VSHUFI64X2.Z imm8 m512 zmm k zmm
|
|
// VSHUFI64X2.Z imm8 zmm zmm k zmm
|
|
//
|
|
// Construct and append a VSHUFI64X2.Z instruction to the active function.
|
|
func (c *Context) VSHUFI64X2_Z(i, myz, yz, k, yz1 operand.Op) {
|
|
c.addinstruction(x86.VSHUFI64X2_Z(i, myz, yz, k, yz1))
|
|
}
|
|
|
|
// VSHUFI64X2_Z: Shuffle 128-Bit Packed Quadword Integer Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSHUFI64X2.Z imm8 m256 ymm k ymm
|
|
// VSHUFI64X2.Z imm8 ymm ymm k ymm
|
|
// VSHUFI64X2.Z imm8 m512 zmm k zmm
|
|
// VSHUFI64X2.Z imm8 zmm zmm k zmm
|
|
//
|
|
// Construct and append a VSHUFI64X2.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSHUFI64X2_Z(i, myz, yz, k, yz1 operand.Op) { ctx.VSHUFI64X2_Z(i, myz, yz, k, yz1) }
|
|
|
|
// VSHUFPD: Shuffle Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSHUFPD imm8 m128 xmm xmm
|
|
// VSHUFPD imm8 m256 ymm ymm
|
|
// VSHUFPD imm8 xmm xmm xmm
|
|
// VSHUFPD imm8 ymm ymm ymm
|
|
// VSHUFPD imm8 m128 xmm k xmm
|
|
// VSHUFPD imm8 m256 ymm k ymm
|
|
// VSHUFPD imm8 xmm xmm k xmm
|
|
// VSHUFPD imm8 ymm ymm k ymm
|
|
// VSHUFPD imm8 m512 zmm k zmm
|
|
// VSHUFPD imm8 m512 zmm zmm
|
|
// VSHUFPD imm8 zmm zmm k zmm
|
|
// VSHUFPD imm8 zmm zmm zmm
|
|
//
|
|
// Construct and append a VSHUFPD instruction to the active function.
|
|
func (c *Context) VSHUFPD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VSHUFPD(ops...))
|
|
}
|
|
|
|
// VSHUFPD: Shuffle Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSHUFPD imm8 m128 xmm xmm
|
|
// VSHUFPD imm8 m256 ymm ymm
|
|
// VSHUFPD imm8 xmm xmm xmm
|
|
// VSHUFPD imm8 ymm ymm ymm
|
|
// VSHUFPD imm8 m128 xmm k xmm
|
|
// VSHUFPD imm8 m256 ymm k ymm
|
|
// VSHUFPD imm8 xmm xmm k xmm
|
|
// VSHUFPD imm8 ymm ymm k ymm
|
|
// VSHUFPD imm8 m512 zmm k zmm
|
|
// VSHUFPD imm8 m512 zmm zmm
|
|
// VSHUFPD imm8 zmm zmm k zmm
|
|
// VSHUFPD imm8 zmm zmm zmm
|
|
//
|
|
// Construct and append a VSHUFPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSHUFPD(ops ...operand.Op) { ctx.VSHUFPD(ops...) }
|
|
|
|
// VSHUFPD_BCST: Shuffle Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSHUFPD.BCST imm8 m64 xmm k xmm
|
|
// VSHUFPD.BCST imm8 m64 xmm xmm
|
|
// VSHUFPD.BCST imm8 m64 ymm k ymm
|
|
// VSHUFPD.BCST imm8 m64 ymm ymm
|
|
// VSHUFPD.BCST imm8 m64 zmm k zmm
|
|
// VSHUFPD.BCST imm8 m64 zmm zmm
|
|
//
|
|
// Construct and append a VSHUFPD.BCST instruction to the active function.
|
|
func (c *Context) VSHUFPD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VSHUFPD_BCST(ops...))
|
|
}
|
|
|
|
// VSHUFPD_BCST: Shuffle Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSHUFPD.BCST imm8 m64 xmm k xmm
|
|
// VSHUFPD.BCST imm8 m64 xmm xmm
|
|
// VSHUFPD.BCST imm8 m64 ymm k ymm
|
|
// VSHUFPD.BCST imm8 m64 ymm ymm
|
|
// VSHUFPD.BCST imm8 m64 zmm k zmm
|
|
// VSHUFPD.BCST imm8 m64 zmm zmm
|
|
//
|
|
// Construct and append a VSHUFPD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSHUFPD_BCST(ops ...operand.Op) { ctx.VSHUFPD_BCST(ops...) }
|
|
|
|
// VSHUFPD_BCST_Z: Shuffle Packed Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSHUFPD.BCST.Z imm8 m64 xmm k xmm
|
|
// VSHUFPD.BCST.Z imm8 m64 ymm k ymm
|
|
// VSHUFPD.BCST.Z imm8 m64 zmm k zmm
|
|
//
|
|
// Construct and append a VSHUFPD.BCST.Z instruction to the active function.
|
|
func (c *Context) VSHUFPD_BCST_Z(i, m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VSHUFPD_BCST_Z(i, m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VSHUFPD_BCST_Z: Shuffle Packed Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSHUFPD.BCST.Z imm8 m64 xmm k xmm
|
|
// VSHUFPD.BCST.Z imm8 m64 ymm k ymm
|
|
// VSHUFPD.BCST.Z imm8 m64 zmm k zmm
|
|
//
|
|
// Construct and append a VSHUFPD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSHUFPD_BCST_Z(i, m, xyz, k, xyz1 operand.Op) { ctx.VSHUFPD_BCST_Z(i, m, xyz, k, xyz1) }
|
|
|
|
// VSHUFPD_Z: Shuffle Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSHUFPD.Z imm8 m128 xmm k xmm
|
|
// VSHUFPD.Z imm8 m256 ymm k ymm
|
|
// VSHUFPD.Z imm8 xmm xmm k xmm
|
|
// VSHUFPD.Z imm8 ymm ymm k ymm
|
|
// VSHUFPD.Z imm8 m512 zmm k zmm
|
|
// VSHUFPD.Z imm8 zmm zmm k zmm
|
|
//
|
|
// Construct and append a VSHUFPD.Z instruction to the active function.
|
|
func (c *Context) VSHUFPD_Z(i, mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VSHUFPD_Z(i, mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VSHUFPD_Z: Shuffle Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSHUFPD.Z imm8 m128 xmm k xmm
|
|
// VSHUFPD.Z imm8 m256 ymm k ymm
|
|
// VSHUFPD.Z imm8 xmm xmm k xmm
|
|
// VSHUFPD.Z imm8 ymm ymm k ymm
|
|
// VSHUFPD.Z imm8 m512 zmm k zmm
|
|
// VSHUFPD.Z imm8 zmm zmm k zmm
|
|
//
|
|
// Construct and append a VSHUFPD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSHUFPD_Z(i, mxyz, xyz, k, xyz1 operand.Op) { ctx.VSHUFPD_Z(i, mxyz, xyz, k, xyz1) }
|
|
|
|
// VSHUFPS: Shuffle Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSHUFPS imm8 m128 xmm xmm
|
|
// VSHUFPS imm8 m256 ymm ymm
|
|
// VSHUFPS imm8 xmm xmm xmm
|
|
// VSHUFPS imm8 ymm ymm ymm
|
|
// VSHUFPS imm8 m128 xmm k xmm
|
|
// VSHUFPS imm8 m256 ymm k ymm
|
|
// VSHUFPS imm8 xmm xmm k xmm
|
|
// VSHUFPS imm8 ymm ymm k ymm
|
|
// VSHUFPS imm8 m512 zmm k zmm
|
|
// VSHUFPS imm8 m512 zmm zmm
|
|
// VSHUFPS imm8 zmm zmm k zmm
|
|
// VSHUFPS imm8 zmm zmm zmm
|
|
//
|
|
// Construct and append a VSHUFPS instruction to the active function.
|
|
func (c *Context) VSHUFPS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VSHUFPS(ops...))
|
|
}
|
|
|
|
// VSHUFPS: Shuffle Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSHUFPS imm8 m128 xmm xmm
|
|
// VSHUFPS imm8 m256 ymm ymm
|
|
// VSHUFPS imm8 xmm xmm xmm
|
|
// VSHUFPS imm8 ymm ymm ymm
|
|
// VSHUFPS imm8 m128 xmm k xmm
|
|
// VSHUFPS imm8 m256 ymm k ymm
|
|
// VSHUFPS imm8 xmm xmm k xmm
|
|
// VSHUFPS imm8 ymm ymm k ymm
|
|
// VSHUFPS imm8 m512 zmm k zmm
|
|
// VSHUFPS imm8 m512 zmm zmm
|
|
// VSHUFPS imm8 zmm zmm k zmm
|
|
// VSHUFPS imm8 zmm zmm zmm
|
|
//
|
|
// Construct and append a VSHUFPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSHUFPS(ops ...operand.Op) { ctx.VSHUFPS(ops...) }
|
|
|
|
// VSHUFPS_BCST: Shuffle Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSHUFPS.BCST imm8 m32 xmm k xmm
|
|
// VSHUFPS.BCST imm8 m32 xmm xmm
|
|
// VSHUFPS.BCST imm8 m32 ymm k ymm
|
|
// VSHUFPS.BCST imm8 m32 ymm ymm
|
|
// VSHUFPS.BCST imm8 m32 zmm k zmm
|
|
// VSHUFPS.BCST imm8 m32 zmm zmm
|
|
//
|
|
// Construct and append a VSHUFPS.BCST instruction to the active function.
|
|
func (c *Context) VSHUFPS_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VSHUFPS_BCST(ops...))
|
|
}
|
|
|
|
// VSHUFPS_BCST: Shuffle Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSHUFPS.BCST imm8 m32 xmm k xmm
|
|
// VSHUFPS.BCST imm8 m32 xmm xmm
|
|
// VSHUFPS.BCST imm8 m32 ymm k ymm
|
|
// VSHUFPS.BCST imm8 m32 ymm ymm
|
|
// VSHUFPS.BCST imm8 m32 zmm k zmm
|
|
// VSHUFPS.BCST imm8 m32 zmm zmm
|
|
//
|
|
// Construct and append a VSHUFPS.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSHUFPS_BCST(ops ...operand.Op) { ctx.VSHUFPS_BCST(ops...) }
|
|
|
|
// VSHUFPS_BCST_Z: Shuffle Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSHUFPS.BCST.Z imm8 m32 xmm k xmm
|
|
// VSHUFPS.BCST.Z imm8 m32 ymm k ymm
|
|
// VSHUFPS.BCST.Z imm8 m32 zmm k zmm
|
|
//
|
|
// Construct and append a VSHUFPS.BCST.Z instruction to the active function.
|
|
func (c *Context) VSHUFPS_BCST_Z(i, m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VSHUFPS_BCST_Z(i, m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VSHUFPS_BCST_Z: Shuffle Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSHUFPS.BCST.Z imm8 m32 xmm k xmm
|
|
// VSHUFPS.BCST.Z imm8 m32 ymm k ymm
|
|
// VSHUFPS.BCST.Z imm8 m32 zmm k zmm
|
|
//
|
|
// Construct and append a VSHUFPS.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSHUFPS_BCST_Z(i, m, xyz, k, xyz1 operand.Op) { ctx.VSHUFPS_BCST_Z(i, m, xyz, k, xyz1) }
|
|
|
|
// VSHUFPS_Z: Shuffle Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSHUFPS.Z imm8 m128 xmm k xmm
|
|
// VSHUFPS.Z imm8 m256 ymm k ymm
|
|
// VSHUFPS.Z imm8 xmm xmm k xmm
|
|
// VSHUFPS.Z imm8 ymm ymm k ymm
|
|
// VSHUFPS.Z imm8 m512 zmm k zmm
|
|
// VSHUFPS.Z imm8 zmm zmm k zmm
|
|
//
|
|
// Construct and append a VSHUFPS.Z instruction to the active function.
|
|
func (c *Context) VSHUFPS_Z(i, mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VSHUFPS_Z(i, mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VSHUFPS_Z: Shuffle Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSHUFPS.Z imm8 m128 xmm k xmm
|
|
// VSHUFPS.Z imm8 m256 ymm k ymm
|
|
// VSHUFPS.Z imm8 xmm xmm k xmm
|
|
// VSHUFPS.Z imm8 ymm ymm k ymm
|
|
// VSHUFPS.Z imm8 m512 zmm k zmm
|
|
// VSHUFPS.Z imm8 zmm zmm k zmm
|
|
//
|
|
// Construct and append a VSHUFPS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSHUFPS_Z(i, mxyz, xyz, k, xyz1 operand.Op) { ctx.VSHUFPS_Z(i, mxyz, xyz, k, xyz1) }
|
|
|
|
// VSQRTPD: Compute Square Roots of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTPD m128 xmm
|
|
// VSQRTPD m256 ymm
|
|
// VSQRTPD xmm xmm
|
|
// VSQRTPD ymm ymm
|
|
// VSQRTPD m128 k xmm
|
|
// VSQRTPD m256 k ymm
|
|
// VSQRTPD xmm k xmm
|
|
// VSQRTPD ymm k ymm
|
|
// VSQRTPD m512 k zmm
|
|
// VSQRTPD m512 zmm
|
|
// VSQRTPD zmm k zmm
|
|
// VSQRTPD zmm zmm
|
|
//
|
|
// Construct and append a VSQRTPD instruction to the active function.
|
|
func (c *Context) VSQRTPD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VSQRTPD(ops...))
|
|
}
|
|
|
|
// VSQRTPD: Compute Square Roots of Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTPD m128 xmm
|
|
// VSQRTPD m256 ymm
|
|
// VSQRTPD xmm xmm
|
|
// VSQRTPD ymm ymm
|
|
// VSQRTPD m128 k xmm
|
|
// VSQRTPD m256 k ymm
|
|
// VSQRTPD xmm k xmm
|
|
// VSQRTPD ymm k ymm
|
|
// VSQRTPD m512 k zmm
|
|
// VSQRTPD m512 zmm
|
|
// VSQRTPD zmm k zmm
|
|
// VSQRTPD zmm zmm
|
|
//
|
|
// Construct and append a VSQRTPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSQRTPD(ops ...operand.Op) { ctx.VSQRTPD(ops...) }
|
|
|
|
// VSQRTPD_BCST: Compute Square Roots of Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTPD.BCST m32 k xmm
|
|
// VSQRTPD.BCST m32 k ymm
|
|
// VSQRTPD.BCST m32 xmm
|
|
// VSQRTPD.BCST m32 ymm
|
|
// VSQRTPD.BCST m64 k zmm
|
|
// VSQRTPD.BCST m64 zmm
|
|
//
|
|
// Construct and append a VSQRTPD.BCST instruction to the active function.
|
|
func (c *Context) VSQRTPD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VSQRTPD_BCST(ops...))
|
|
}
|
|
|
|
// VSQRTPD_BCST: Compute Square Roots of Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTPD.BCST m32 k xmm
|
|
// VSQRTPD.BCST m32 k ymm
|
|
// VSQRTPD.BCST m32 xmm
|
|
// VSQRTPD.BCST m32 ymm
|
|
// VSQRTPD.BCST m64 k zmm
|
|
// VSQRTPD.BCST m64 zmm
|
|
//
|
|
// Construct and append a VSQRTPD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSQRTPD_BCST(ops ...operand.Op) { ctx.VSQRTPD_BCST(ops...) }
|
|
|
|
// VSQRTPD_BCST_Z: Compute Square Roots of Packed Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTPD.BCST.Z m32 k xmm
|
|
// VSQRTPD.BCST.Z m32 k ymm
|
|
// VSQRTPD.BCST.Z m64 k zmm
|
|
//
|
|
// Construct and append a VSQRTPD.BCST.Z instruction to the active function.
|
|
func (c *Context) VSQRTPD_BCST_Z(m, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VSQRTPD_BCST_Z(m, k, xyz))
|
|
}
|
|
|
|
// VSQRTPD_BCST_Z: Compute Square Roots of Packed Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTPD.BCST.Z m32 k xmm
|
|
// VSQRTPD.BCST.Z m32 k ymm
|
|
// VSQRTPD.BCST.Z m64 k zmm
|
|
//
|
|
// Construct and append a VSQRTPD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSQRTPD_BCST_Z(m, k, xyz operand.Op) { ctx.VSQRTPD_BCST_Z(m, k, xyz) }
|
|
|
|
// VSQRTPD_RD_SAE: Compute Square Roots of Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTPD.RD_SAE zmm k zmm
|
|
// VSQRTPD.RD_SAE zmm zmm
|
|
//
|
|
// Construct and append a VSQRTPD.RD_SAE instruction to the active function.
|
|
func (c *Context) VSQRTPD_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VSQRTPD_RD_SAE(ops...))
|
|
}
|
|
|
|
// VSQRTPD_RD_SAE: Compute Square Roots of Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTPD.RD_SAE zmm k zmm
|
|
// VSQRTPD.RD_SAE zmm zmm
|
|
//
|
|
// Construct and append a VSQRTPD.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSQRTPD_RD_SAE(ops ...operand.Op) { ctx.VSQRTPD_RD_SAE(ops...) }
|
|
|
|
// VSQRTPD_RD_SAE_Z: Compute Square Roots of Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTPD.RD_SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VSQRTPD.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VSQRTPD_RD_SAE_Z(z, k, z1 operand.Op) {
|
|
c.addinstruction(x86.VSQRTPD_RD_SAE_Z(z, k, z1))
|
|
}
|
|
|
|
// VSQRTPD_RD_SAE_Z: Compute Square Roots of Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTPD.RD_SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VSQRTPD.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSQRTPD_RD_SAE_Z(z, k, z1 operand.Op) { ctx.VSQRTPD_RD_SAE_Z(z, k, z1) }
|
|
|
|
// VSQRTPD_RN_SAE: Compute Square Roots of Packed Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTPD.RN_SAE zmm k zmm
|
|
// VSQRTPD.RN_SAE zmm zmm
|
|
//
|
|
// Construct and append a VSQRTPD.RN_SAE instruction to the active function.
|
|
func (c *Context) VSQRTPD_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VSQRTPD_RN_SAE(ops...))
|
|
}
|
|
|
|
// VSQRTPD_RN_SAE: Compute Square Roots of Packed Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTPD.RN_SAE zmm k zmm
|
|
// VSQRTPD.RN_SAE zmm zmm
|
|
//
|
|
// Construct and append a VSQRTPD.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSQRTPD_RN_SAE(ops ...operand.Op) { ctx.VSQRTPD_RN_SAE(ops...) }
|
|
|
|
// VSQRTPD_RN_SAE_Z: Compute Square Roots of Packed Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTPD.RN_SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VSQRTPD.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VSQRTPD_RN_SAE_Z(z, k, z1 operand.Op) {
|
|
c.addinstruction(x86.VSQRTPD_RN_SAE_Z(z, k, z1))
|
|
}
|
|
|
|
// VSQRTPD_RN_SAE_Z: Compute Square Roots of Packed Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTPD.RN_SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VSQRTPD.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSQRTPD_RN_SAE_Z(z, k, z1 operand.Op) { ctx.VSQRTPD_RN_SAE_Z(z, k, z1) }
|
|
|
|
// VSQRTPD_RU_SAE: Compute Square Roots of Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTPD.RU_SAE zmm k zmm
|
|
// VSQRTPD.RU_SAE zmm zmm
|
|
//
|
|
// Construct and append a VSQRTPD.RU_SAE instruction to the active function.
|
|
func (c *Context) VSQRTPD_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VSQRTPD_RU_SAE(ops...))
|
|
}
|
|
|
|
// VSQRTPD_RU_SAE: Compute Square Roots of Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTPD.RU_SAE zmm k zmm
|
|
// VSQRTPD.RU_SAE zmm zmm
|
|
//
|
|
// Construct and append a VSQRTPD.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSQRTPD_RU_SAE(ops ...operand.Op) { ctx.VSQRTPD_RU_SAE(ops...) }
|
|
|
|
// VSQRTPD_RU_SAE_Z: Compute Square Roots of Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTPD.RU_SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VSQRTPD.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VSQRTPD_RU_SAE_Z(z, k, z1 operand.Op) {
|
|
c.addinstruction(x86.VSQRTPD_RU_SAE_Z(z, k, z1))
|
|
}
|
|
|
|
// VSQRTPD_RU_SAE_Z: Compute Square Roots of Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTPD.RU_SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VSQRTPD.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSQRTPD_RU_SAE_Z(z, k, z1 operand.Op) { ctx.VSQRTPD_RU_SAE_Z(z, k, z1) }
|
|
|
|
// VSQRTPD_RZ_SAE: Compute Square Roots of Packed Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTPD.RZ_SAE zmm k zmm
|
|
// VSQRTPD.RZ_SAE zmm zmm
|
|
//
|
|
// Construct and append a VSQRTPD.RZ_SAE instruction to the active function.
|
|
func (c *Context) VSQRTPD_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VSQRTPD_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VSQRTPD_RZ_SAE: Compute Square Roots of Packed Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTPD.RZ_SAE zmm k zmm
|
|
// VSQRTPD.RZ_SAE zmm zmm
|
|
//
|
|
// Construct and append a VSQRTPD.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSQRTPD_RZ_SAE(ops ...operand.Op) { ctx.VSQRTPD_RZ_SAE(ops...) }
|
|
|
|
// VSQRTPD_RZ_SAE_Z: Compute Square Roots of Packed Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTPD.RZ_SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VSQRTPD.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VSQRTPD_RZ_SAE_Z(z, k, z1 operand.Op) {
|
|
c.addinstruction(x86.VSQRTPD_RZ_SAE_Z(z, k, z1))
|
|
}
|
|
|
|
// VSQRTPD_RZ_SAE_Z: Compute Square Roots of Packed Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTPD.RZ_SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VSQRTPD.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSQRTPD_RZ_SAE_Z(z, k, z1 operand.Op) { ctx.VSQRTPD_RZ_SAE_Z(z, k, z1) }
|
|
|
|
// VSQRTPD_Z: Compute Square Roots of Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTPD.Z m128 k xmm
|
|
// VSQRTPD.Z m256 k ymm
|
|
// VSQRTPD.Z xmm k xmm
|
|
// VSQRTPD.Z ymm k ymm
|
|
// VSQRTPD.Z m512 k zmm
|
|
// VSQRTPD.Z zmm k zmm
|
|
//
|
|
// Construct and append a VSQRTPD.Z instruction to the active function.
|
|
func (c *Context) VSQRTPD_Z(mxyz, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VSQRTPD_Z(mxyz, k, xyz))
|
|
}
|
|
|
|
// VSQRTPD_Z: Compute Square Roots of Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTPD.Z m128 k xmm
|
|
// VSQRTPD.Z m256 k ymm
|
|
// VSQRTPD.Z xmm k xmm
|
|
// VSQRTPD.Z ymm k ymm
|
|
// VSQRTPD.Z m512 k zmm
|
|
// VSQRTPD.Z zmm k zmm
|
|
//
|
|
// Construct and append a VSQRTPD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSQRTPD_Z(mxyz, k, xyz operand.Op) { ctx.VSQRTPD_Z(mxyz, k, xyz) }
|
|
|
|
// VSQRTPS: Compute Square Roots of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTPS m128 xmm
|
|
// VSQRTPS m256 ymm
|
|
// VSQRTPS xmm xmm
|
|
// VSQRTPS ymm ymm
|
|
// VSQRTPS m128 k xmm
|
|
// VSQRTPS m256 k ymm
|
|
// VSQRTPS xmm k xmm
|
|
// VSQRTPS ymm k ymm
|
|
// VSQRTPS m512 k zmm
|
|
// VSQRTPS m512 zmm
|
|
// VSQRTPS zmm k zmm
|
|
// VSQRTPS zmm zmm
|
|
//
|
|
// Construct and append a VSQRTPS instruction to the active function.
|
|
func (c *Context) VSQRTPS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VSQRTPS(ops...))
|
|
}
|
|
|
|
// VSQRTPS: Compute Square Roots of Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTPS m128 xmm
|
|
// VSQRTPS m256 ymm
|
|
// VSQRTPS xmm xmm
|
|
// VSQRTPS ymm ymm
|
|
// VSQRTPS m128 k xmm
|
|
// VSQRTPS m256 k ymm
|
|
// VSQRTPS xmm k xmm
|
|
// VSQRTPS ymm k ymm
|
|
// VSQRTPS m512 k zmm
|
|
// VSQRTPS m512 zmm
|
|
// VSQRTPS zmm k zmm
|
|
// VSQRTPS zmm zmm
|
|
//
|
|
// Construct and append a VSQRTPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSQRTPS(ops ...operand.Op) { ctx.VSQRTPS(ops...) }
|
|
|
|
// VSQRTPS_BCST: Compute Square Roots of Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTPS.BCST m32 k xmm
|
|
// VSQRTPS.BCST m32 k ymm
|
|
// VSQRTPS.BCST m32 xmm
|
|
// VSQRTPS.BCST m32 ymm
|
|
// VSQRTPS.BCST m32 k zmm
|
|
// VSQRTPS.BCST m32 zmm
|
|
//
|
|
// Construct and append a VSQRTPS.BCST instruction to the active function.
|
|
func (c *Context) VSQRTPS_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VSQRTPS_BCST(ops...))
|
|
}
|
|
|
|
// VSQRTPS_BCST: Compute Square Roots of Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTPS.BCST m32 k xmm
|
|
// VSQRTPS.BCST m32 k ymm
|
|
// VSQRTPS.BCST m32 xmm
|
|
// VSQRTPS.BCST m32 ymm
|
|
// VSQRTPS.BCST m32 k zmm
|
|
// VSQRTPS.BCST m32 zmm
|
|
//
|
|
// Construct and append a VSQRTPS.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSQRTPS_BCST(ops ...operand.Op) { ctx.VSQRTPS_BCST(ops...) }
|
|
|
|
// VSQRTPS_BCST_Z: Compute Square Roots of Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTPS.BCST.Z m32 k xmm
|
|
// VSQRTPS.BCST.Z m32 k ymm
|
|
// VSQRTPS.BCST.Z m32 k zmm
|
|
//
|
|
// Construct and append a VSQRTPS.BCST.Z instruction to the active function.
|
|
func (c *Context) VSQRTPS_BCST_Z(m, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VSQRTPS_BCST_Z(m, k, xyz))
|
|
}
|
|
|
|
// VSQRTPS_BCST_Z: Compute Square Roots of Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTPS.BCST.Z m32 k xmm
|
|
// VSQRTPS.BCST.Z m32 k ymm
|
|
// VSQRTPS.BCST.Z m32 k zmm
|
|
//
|
|
// Construct and append a VSQRTPS.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSQRTPS_BCST_Z(m, k, xyz operand.Op) { ctx.VSQRTPS_BCST_Z(m, k, xyz) }
|
|
|
|
// VSQRTPS_RD_SAE: Compute Square Roots of Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTPS.RD_SAE zmm k zmm
|
|
// VSQRTPS.RD_SAE zmm zmm
|
|
//
|
|
// Construct and append a VSQRTPS.RD_SAE instruction to the active function.
|
|
func (c *Context) VSQRTPS_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VSQRTPS_RD_SAE(ops...))
|
|
}
|
|
|
|
// VSQRTPS_RD_SAE: Compute Square Roots of Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTPS.RD_SAE zmm k zmm
|
|
// VSQRTPS.RD_SAE zmm zmm
|
|
//
|
|
// Construct and append a VSQRTPS.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSQRTPS_RD_SAE(ops ...operand.Op) { ctx.VSQRTPS_RD_SAE(ops...) }
|
|
|
|
// VSQRTPS_RD_SAE_Z: Compute Square Roots of Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTPS.RD_SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VSQRTPS.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VSQRTPS_RD_SAE_Z(z, k, z1 operand.Op) {
|
|
c.addinstruction(x86.VSQRTPS_RD_SAE_Z(z, k, z1))
|
|
}
|
|
|
|
// VSQRTPS_RD_SAE_Z: Compute Square Roots of Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTPS.RD_SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VSQRTPS.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSQRTPS_RD_SAE_Z(z, k, z1 operand.Op) { ctx.VSQRTPS_RD_SAE_Z(z, k, z1) }
|
|
|
|
// VSQRTPS_RN_SAE: Compute Square Roots of Packed Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTPS.RN_SAE zmm k zmm
|
|
// VSQRTPS.RN_SAE zmm zmm
|
|
//
|
|
// Construct and append a VSQRTPS.RN_SAE instruction to the active function.
|
|
func (c *Context) VSQRTPS_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VSQRTPS_RN_SAE(ops...))
|
|
}
|
|
|
|
// VSQRTPS_RN_SAE: Compute Square Roots of Packed Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTPS.RN_SAE zmm k zmm
|
|
// VSQRTPS.RN_SAE zmm zmm
|
|
//
|
|
// Construct and append a VSQRTPS.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSQRTPS_RN_SAE(ops ...operand.Op) { ctx.VSQRTPS_RN_SAE(ops...) }
|
|
|
|
// VSQRTPS_RN_SAE_Z: Compute Square Roots of Packed Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTPS.RN_SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VSQRTPS.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VSQRTPS_RN_SAE_Z(z, k, z1 operand.Op) {
|
|
c.addinstruction(x86.VSQRTPS_RN_SAE_Z(z, k, z1))
|
|
}
|
|
|
|
// VSQRTPS_RN_SAE_Z: Compute Square Roots of Packed Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTPS.RN_SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VSQRTPS.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSQRTPS_RN_SAE_Z(z, k, z1 operand.Op) { ctx.VSQRTPS_RN_SAE_Z(z, k, z1) }
|
|
|
|
// VSQRTPS_RU_SAE: Compute Square Roots of Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTPS.RU_SAE zmm k zmm
|
|
// VSQRTPS.RU_SAE zmm zmm
|
|
//
|
|
// Construct and append a VSQRTPS.RU_SAE instruction to the active function.
|
|
func (c *Context) VSQRTPS_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VSQRTPS_RU_SAE(ops...))
|
|
}
|
|
|
|
// VSQRTPS_RU_SAE: Compute Square Roots of Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTPS.RU_SAE zmm k zmm
|
|
// VSQRTPS.RU_SAE zmm zmm
|
|
//
|
|
// Construct and append a VSQRTPS.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSQRTPS_RU_SAE(ops ...operand.Op) { ctx.VSQRTPS_RU_SAE(ops...) }
|
|
|
|
// VSQRTPS_RU_SAE_Z: Compute Square Roots of Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTPS.RU_SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VSQRTPS.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VSQRTPS_RU_SAE_Z(z, k, z1 operand.Op) {
|
|
c.addinstruction(x86.VSQRTPS_RU_SAE_Z(z, k, z1))
|
|
}
|
|
|
|
// VSQRTPS_RU_SAE_Z: Compute Square Roots of Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTPS.RU_SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VSQRTPS.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSQRTPS_RU_SAE_Z(z, k, z1 operand.Op) { ctx.VSQRTPS_RU_SAE_Z(z, k, z1) }
|
|
|
|
// VSQRTPS_RZ_SAE: Compute Square Roots of Packed Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTPS.RZ_SAE zmm k zmm
|
|
// VSQRTPS.RZ_SAE zmm zmm
|
|
//
|
|
// Construct and append a VSQRTPS.RZ_SAE instruction to the active function.
|
|
func (c *Context) VSQRTPS_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VSQRTPS_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VSQRTPS_RZ_SAE: Compute Square Roots of Packed Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTPS.RZ_SAE zmm k zmm
|
|
// VSQRTPS.RZ_SAE zmm zmm
|
|
//
|
|
// Construct and append a VSQRTPS.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSQRTPS_RZ_SAE(ops ...operand.Op) { ctx.VSQRTPS_RZ_SAE(ops...) }
|
|
|
|
// VSQRTPS_RZ_SAE_Z: Compute Square Roots of Packed Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTPS.RZ_SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VSQRTPS.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VSQRTPS_RZ_SAE_Z(z, k, z1 operand.Op) {
|
|
c.addinstruction(x86.VSQRTPS_RZ_SAE_Z(z, k, z1))
|
|
}
|
|
|
|
// VSQRTPS_RZ_SAE_Z: Compute Square Roots of Packed Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTPS.RZ_SAE.Z zmm k zmm
|
|
//
|
|
// Construct and append a VSQRTPS.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSQRTPS_RZ_SAE_Z(z, k, z1 operand.Op) { ctx.VSQRTPS_RZ_SAE_Z(z, k, z1) }
|
|
|
|
// VSQRTPS_Z: Compute Square Roots of Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTPS.Z m128 k xmm
|
|
// VSQRTPS.Z m256 k ymm
|
|
// VSQRTPS.Z xmm k xmm
|
|
// VSQRTPS.Z ymm k ymm
|
|
// VSQRTPS.Z m512 k zmm
|
|
// VSQRTPS.Z zmm k zmm
|
|
//
|
|
// Construct and append a VSQRTPS.Z instruction to the active function.
|
|
func (c *Context) VSQRTPS_Z(mxyz, k, xyz operand.Op) {
|
|
c.addinstruction(x86.VSQRTPS_Z(mxyz, k, xyz))
|
|
}
|
|
|
|
// VSQRTPS_Z: Compute Square Roots of Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTPS.Z m128 k xmm
|
|
// VSQRTPS.Z m256 k ymm
|
|
// VSQRTPS.Z xmm k xmm
|
|
// VSQRTPS.Z ymm k ymm
|
|
// VSQRTPS.Z m512 k zmm
|
|
// VSQRTPS.Z zmm k zmm
|
|
//
|
|
// Construct and append a VSQRTPS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSQRTPS_Z(mxyz, k, xyz operand.Op) { ctx.VSQRTPS_Z(mxyz, k, xyz) }
|
|
|
|
// VSQRTSD: Compute Square Root of Scalar Double-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTSD m64 xmm xmm
|
|
// VSQRTSD xmm xmm xmm
|
|
// VSQRTSD m64 xmm k xmm
|
|
// VSQRTSD xmm xmm k xmm
|
|
//
|
|
// Construct and append a VSQRTSD instruction to the active function.
|
|
func (c *Context) VSQRTSD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VSQRTSD(ops...))
|
|
}
|
|
|
|
// VSQRTSD: Compute Square Root of Scalar Double-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTSD m64 xmm xmm
|
|
// VSQRTSD xmm xmm xmm
|
|
// VSQRTSD m64 xmm k xmm
|
|
// VSQRTSD xmm xmm k xmm
|
|
//
|
|
// Construct and append a VSQRTSD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSQRTSD(ops ...operand.Op) { ctx.VSQRTSD(ops...) }
|
|
|
|
// VSQRTSD_RD_SAE: Compute Square Root of Scalar Double-Precision Floating-Point Value (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTSD.RD_SAE xmm xmm k xmm
|
|
// VSQRTSD.RD_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VSQRTSD.RD_SAE instruction to the active function.
|
|
func (c *Context) VSQRTSD_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VSQRTSD_RD_SAE(ops...))
|
|
}
|
|
|
|
// VSQRTSD_RD_SAE: Compute Square Root of Scalar Double-Precision Floating-Point Value (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTSD.RD_SAE xmm xmm k xmm
|
|
// VSQRTSD.RD_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VSQRTSD.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSQRTSD_RD_SAE(ops ...operand.Op) { ctx.VSQRTSD_RD_SAE(ops...) }
|
|
|
|
// VSQRTSD_RD_SAE_Z: Compute Square Root of Scalar Double-Precision Floating-Point Value (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTSD.RD_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VSQRTSD.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VSQRTSD_RD_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VSQRTSD_RD_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VSQRTSD_RD_SAE_Z: Compute Square Root of Scalar Double-Precision Floating-Point Value (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTSD.RD_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VSQRTSD.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSQRTSD_RD_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VSQRTSD_RD_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VSQRTSD_RN_SAE: Compute Square Root of Scalar Double-Precision Floating-Point Value (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTSD.RN_SAE xmm xmm k xmm
|
|
// VSQRTSD.RN_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VSQRTSD.RN_SAE instruction to the active function.
|
|
func (c *Context) VSQRTSD_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VSQRTSD_RN_SAE(ops...))
|
|
}
|
|
|
|
// VSQRTSD_RN_SAE: Compute Square Root of Scalar Double-Precision Floating-Point Value (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTSD.RN_SAE xmm xmm k xmm
|
|
// VSQRTSD.RN_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VSQRTSD.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSQRTSD_RN_SAE(ops ...operand.Op) { ctx.VSQRTSD_RN_SAE(ops...) }
|
|
|
|
// VSQRTSD_RN_SAE_Z: Compute Square Root of Scalar Double-Precision Floating-Point Value (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTSD.RN_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VSQRTSD.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VSQRTSD_RN_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VSQRTSD_RN_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VSQRTSD_RN_SAE_Z: Compute Square Root of Scalar Double-Precision Floating-Point Value (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTSD.RN_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VSQRTSD.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSQRTSD_RN_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VSQRTSD_RN_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VSQRTSD_RU_SAE: Compute Square Root of Scalar Double-Precision Floating-Point Value (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTSD.RU_SAE xmm xmm k xmm
|
|
// VSQRTSD.RU_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VSQRTSD.RU_SAE instruction to the active function.
|
|
func (c *Context) VSQRTSD_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VSQRTSD_RU_SAE(ops...))
|
|
}
|
|
|
|
// VSQRTSD_RU_SAE: Compute Square Root of Scalar Double-Precision Floating-Point Value (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTSD.RU_SAE xmm xmm k xmm
|
|
// VSQRTSD.RU_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VSQRTSD.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSQRTSD_RU_SAE(ops ...operand.Op) { ctx.VSQRTSD_RU_SAE(ops...) }
|
|
|
|
// VSQRTSD_RU_SAE_Z: Compute Square Root of Scalar Double-Precision Floating-Point Value (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTSD.RU_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VSQRTSD.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VSQRTSD_RU_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VSQRTSD_RU_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VSQRTSD_RU_SAE_Z: Compute Square Root of Scalar Double-Precision Floating-Point Value (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTSD.RU_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VSQRTSD.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSQRTSD_RU_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VSQRTSD_RU_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VSQRTSD_RZ_SAE: Compute Square Root of Scalar Double-Precision Floating-Point Value (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTSD.RZ_SAE xmm xmm k xmm
|
|
// VSQRTSD.RZ_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VSQRTSD.RZ_SAE instruction to the active function.
|
|
func (c *Context) VSQRTSD_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VSQRTSD_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VSQRTSD_RZ_SAE: Compute Square Root of Scalar Double-Precision Floating-Point Value (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTSD.RZ_SAE xmm xmm k xmm
|
|
// VSQRTSD.RZ_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VSQRTSD.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSQRTSD_RZ_SAE(ops ...operand.Op) { ctx.VSQRTSD_RZ_SAE(ops...) }
|
|
|
|
// VSQRTSD_RZ_SAE_Z: Compute Square Root of Scalar Double-Precision Floating-Point Value (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTSD.RZ_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VSQRTSD.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VSQRTSD_RZ_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VSQRTSD_RZ_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VSQRTSD_RZ_SAE_Z: Compute Square Root of Scalar Double-Precision Floating-Point Value (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTSD.RZ_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VSQRTSD.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSQRTSD_RZ_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VSQRTSD_RZ_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VSQRTSD_Z: Compute Square Root of Scalar Double-Precision Floating-Point Value (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTSD.Z m64 xmm k xmm
|
|
// VSQRTSD.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VSQRTSD.Z instruction to the active function.
|
|
func (c *Context) VSQRTSD_Z(mx, x, k, x1 operand.Op) {
|
|
c.addinstruction(x86.VSQRTSD_Z(mx, x, k, x1))
|
|
}
|
|
|
|
// VSQRTSD_Z: Compute Square Root of Scalar Double-Precision Floating-Point Value (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTSD.Z m64 xmm k xmm
|
|
// VSQRTSD.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VSQRTSD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSQRTSD_Z(mx, x, k, x1 operand.Op) { ctx.VSQRTSD_Z(mx, x, k, x1) }
|
|
|
|
// VSQRTSS: Compute Square Root of Scalar Single-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTSS m32 xmm xmm
|
|
// VSQRTSS xmm xmm xmm
|
|
// VSQRTSS m32 xmm k xmm
|
|
// VSQRTSS xmm xmm k xmm
|
|
//
|
|
// Construct and append a VSQRTSS instruction to the active function.
|
|
func (c *Context) VSQRTSS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VSQRTSS(ops...))
|
|
}
|
|
|
|
// VSQRTSS: Compute Square Root of Scalar Single-Precision Floating-Point Value.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTSS m32 xmm xmm
|
|
// VSQRTSS xmm xmm xmm
|
|
// VSQRTSS m32 xmm k xmm
|
|
// VSQRTSS xmm xmm k xmm
|
|
//
|
|
// Construct and append a VSQRTSS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSQRTSS(ops ...operand.Op) { ctx.VSQRTSS(ops...) }
|
|
|
|
// VSQRTSS_RD_SAE: Compute Square Root of Scalar Single-Precision Floating-Point Value (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTSS.RD_SAE xmm xmm k xmm
|
|
// VSQRTSS.RD_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VSQRTSS.RD_SAE instruction to the active function.
|
|
func (c *Context) VSQRTSS_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VSQRTSS_RD_SAE(ops...))
|
|
}
|
|
|
|
// VSQRTSS_RD_SAE: Compute Square Root of Scalar Single-Precision Floating-Point Value (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTSS.RD_SAE xmm xmm k xmm
|
|
// VSQRTSS.RD_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VSQRTSS.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSQRTSS_RD_SAE(ops ...operand.Op) { ctx.VSQRTSS_RD_SAE(ops...) }
|
|
|
|
// VSQRTSS_RD_SAE_Z: Compute Square Root of Scalar Single-Precision Floating-Point Value (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTSS.RD_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VSQRTSS.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VSQRTSS_RD_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VSQRTSS_RD_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VSQRTSS_RD_SAE_Z: Compute Square Root of Scalar Single-Precision Floating-Point Value (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTSS.RD_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VSQRTSS.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSQRTSS_RD_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VSQRTSS_RD_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VSQRTSS_RN_SAE: Compute Square Root of Scalar Single-Precision Floating-Point Value (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTSS.RN_SAE xmm xmm k xmm
|
|
// VSQRTSS.RN_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VSQRTSS.RN_SAE instruction to the active function.
|
|
func (c *Context) VSQRTSS_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VSQRTSS_RN_SAE(ops...))
|
|
}
|
|
|
|
// VSQRTSS_RN_SAE: Compute Square Root of Scalar Single-Precision Floating-Point Value (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTSS.RN_SAE xmm xmm k xmm
|
|
// VSQRTSS.RN_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VSQRTSS.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSQRTSS_RN_SAE(ops ...operand.Op) { ctx.VSQRTSS_RN_SAE(ops...) }
|
|
|
|
// VSQRTSS_RN_SAE_Z: Compute Square Root of Scalar Single-Precision Floating-Point Value (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTSS.RN_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VSQRTSS.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VSQRTSS_RN_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VSQRTSS_RN_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VSQRTSS_RN_SAE_Z: Compute Square Root of Scalar Single-Precision Floating-Point Value (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTSS.RN_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VSQRTSS.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSQRTSS_RN_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VSQRTSS_RN_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VSQRTSS_RU_SAE: Compute Square Root of Scalar Single-Precision Floating-Point Value (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTSS.RU_SAE xmm xmm k xmm
|
|
// VSQRTSS.RU_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VSQRTSS.RU_SAE instruction to the active function.
|
|
func (c *Context) VSQRTSS_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VSQRTSS_RU_SAE(ops...))
|
|
}
|
|
|
|
// VSQRTSS_RU_SAE: Compute Square Root of Scalar Single-Precision Floating-Point Value (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTSS.RU_SAE xmm xmm k xmm
|
|
// VSQRTSS.RU_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VSQRTSS.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSQRTSS_RU_SAE(ops ...operand.Op) { ctx.VSQRTSS_RU_SAE(ops...) }
|
|
|
|
// VSQRTSS_RU_SAE_Z: Compute Square Root of Scalar Single-Precision Floating-Point Value (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTSS.RU_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VSQRTSS.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VSQRTSS_RU_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VSQRTSS_RU_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VSQRTSS_RU_SAE_Z: Compute Square Root of Scalar Single-Precision Floating-Point Value (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTSS.RU_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VSQRTSS.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSQRTSS_RU_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VSQRTSS_RU_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VSQRTSS_RZ_SAE: Compute Square Root of Scalar Single-Precision Floating-Point Value (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTSS.RZ_SAE xmm xmm k xmm
|
|
// VSQRTSS.RZ_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VSQRTSS.RZ_SAE instruction to the active function.
|
|
func (c *Context) VSQRTSS_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VSQRTSS_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VSQRTSS_RZ_SAE: Compute Square Root of Scalar Single-Precision Floating-Point Value (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTSS.RZ_SAE xmm xmm k xmm
|
|
// VSQRTSS.RZ_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VSQRTSS.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSQRTSS_RZ_SAE(ops ...operand.Op) { ctx.VSQRTSS_RZ_SAE(ops...) }
|
|
|
|
// VSQRTSS_RZ_SAE_Z: Compute Square Root of Scalar Single-Precision Floating-Point Value (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTSS.RZ_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VSQRTSS.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VSQRTSS_RZ_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VSQRTSS_RZ_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VSQRTSS_RZ_SAE_Z: Compute Square Root of Scalar Single-Precision Floating-Point Value (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTSS.RZ_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VSQRTSS.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSQRTSS_RZ_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VSQRTSS_RZ_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VSQRTSS_Z: Compute Square Root of Scalar Single-Precision Floating-Point Value (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTSS.Z m32 xmm k xmm
|
|
// VSQRTSS.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VSQRTSS.Z instruction to the active function.
|
|
func (c *Context) VSQRTSS_Z(mx, x, k, x1 operand.Op) {
|
|
c.addinstruction(x86.VSQRTSS_Z(mx, x, k, x1))
|
|
}
|
|
|
|
// VSQRTSS_Z: Compute Square Root of Scalar Single-Precision Floating-Point Value (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSQRTSS.Z m32 xmm k xmm
|
|
// VSQRTSS.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VSQRTSS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSQRTSS_Z(mx, x, k, x1 operand.Op) { ctx.VSQRTSS_Z(mx, x, k, x1) }
|
|
|
|
// VSTMXCSR: Store MXCSR Register State.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSTMXCSR m32
|
|
//
|
|
// Construct and append a VSTMXCSR instruction to the active function.
|
|
func (c *Context) VSTMXCSR(m operand.Op) {
|
|
c.addinstruction(x86.VSTMXCSR(m))
|
|
}
|
|
|
|
// VSTMXCSR: Store MXCSR Register State.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSTMXCSR m32
|
|
//
|
|
// Construct and append a VSTMXCSR instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSTMXCSR(m operand.Op) { ctx.VSTMXCSR(m) }
|
|
|
|
// VSUBPD: Subtract Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBPD m128 xmm xmm
|
|
// VSUBPD m256 ymm ymm
|
|
// VSUBPD xmm xmm xmm
|
|
// VSUBPD ymm ymm ymm
|
|
// VSUBPD m128 xmm k xmm
|
|
// VSUBPD m256 ymm k ymm
|
|
// VSUBPD xmm xmm k xmm
|
|
// VSUBPD ymm ymm k ymm
|
|
// VSUBPD m512 zmm k zmm
|
|
// VSUBPD m512 zmm zmm
|
|
// VSUBPD zmm zmm k zmm
|
|
// VSUBPD zmm zmm zmm
|
|
//
|
|
// Construct and append a VSUBPD instruction to the active function.
|
|
func (c *Context) VSUBPD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VSUBPD(ops...))
|
|
}
|
|
|
|
// VSUBPD: Subtract Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBPD m128 xmm xmm
|
|
// VSUBPD m256 ymm ymm
|
|
// VSUBPD xmm xmm xmm
|
|
// VSUBPD ymm ymm ymm
|
|
// VSUBPD m128 xmm k xmm
|
|
// VSUBPD m256 ymm k ymm
|
|
// VSUBPD xmm xmm k xmm
|
|
// VSUBPD ymm ymm k ymm
|
|
// VSUBPD m512 zmm k zmm
|
|
// VSUBPD m512 zmm zmm
|
|
// VSUBPD zmm zmm k zmm
|
|
// VSUBPD zmm zmm zmm
|
|
//
|
|
// Construct and append a VSUBPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSUBPD(ops ...operand.Op) { ctx.VSUBPD(ops...) }
|
|
|
|
// VSUBPD_BCST: Subtract Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBPD.BCST m64 xmm k xmm
|
|
// VSUBPD.BCST m64 xmm xmm
|
|
// VSUBPD.BCST m64 ymm k ymm
|
|
// VSUBPD.BCST m64 ymm ymm
|
|
// VSUBPD.BCST m64 zmm k zmm
|
|
// VSUBPD.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VSUBPD.BCST instruction to the active function.
|
|
func (c *Context) VSUBPD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VSUBPD_BCST(ops...))
|
|
}
|
|
|
|
// VSUBPD_BCST: Subtract Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBPD.BCST m64 xmm k xmm
|
|
// VSUBPD.BCST m64 xmm xmm
|
|
// VSUBPD.BCST m64 ymm k ymm
|
|
// VSUBPD.BCST m64 ymm ymm
|
|
// VSUBPD.BCST m64 zmm k zmm
|
|
// VSUBPD.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VSUBPD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSUBPD_BCST(ops ...operand.Op) { ctx.VSUBPD_BCST(ops...) }
|
|
|
|
// VSUBPD_BCST_Z: Subtract Packed Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBPD.BCST.Z m64 xmm k xmm
|
|
// VSUBPD.BCST.Z m64 ymm k ymm
|
|
// VSUBPD.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VSUBPD.BCST.Z instruction to the active function.
|
|
func (c *Context) VSUBPD_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VSUBPD_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VSUBPD_BCST_Z: Subtract Packed Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBPD.BCST.Z m64 xmm k xmm
|
|
// VSUBPD.BCST.Z m64 ymm k ymm
|
|
// VSUBPD.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VSUBPD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSUBPD_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VSUBPD_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VSUBPD_RD_SAE: Subtract Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBPD.RD_SAE zmm zmm k zmm
|
|
// VSUBPD.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VSUBPD.RD_SAE instruction to the active function.
|
|
func (c *Context) VSUBPD_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VSUBPD_RD_SAE(ops...))
|
|
}
|
|
|
|
// VSUBPD_RD_SAE: Subtract Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBPD.RD_SAE zmm zmm k zmm
|
|
// VSUBPD.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VSUBPD.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSUBPD_RD_SAE(ops ...operand.Op) { ctx.VSUBPD_RD_SAE(ops...) }
|
|
|
|
// VSUBPD_RD_SAE_Z: Subtract Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBPD.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VSUBPD.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VSUBPD_RD_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VSUBPD_RD_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VSUBPD_RD_SAE_Z: Subtract Packed Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBPD.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VSUBPD.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSUBPD_RD_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VSUBPD_RD_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VSUBPD_RN_SAE: Subtract Packed Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBPD.RN_SAE zmm zmm k zmm
|
|
// VSUBPD.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VSUBPD.RN_SAE instruction to the active function.
|
|
func (c *Context) VSUBPD_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VSUBPD_RN_SAE(ops...))
|
|
}
|
|
|
|
// VSUBPD_RN_SAE: Subtract Packed Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBPD.RN_SAE zmm zmm k zmm
|
|
// VSUBPD.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VSUBPD.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSUBPD_RN_SAE(ops ...operand.Op) { ctx.VSUBPD_RN_SAE(ops...) }
|
|
|
|
// VSUBPD_RN_SAE_Z: Subtract Packed Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBPD.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VSUBPD.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VSUBPD_RN_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VSUBPD_RN_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VSUBPD_RN_SAE_Z: Subtract Packed Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBPD.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VSUBPD.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSUBPD_RN_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VSUBPD_RN_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VSUBPD_RU_SAE: Subtract Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBPD.RU_SAE zmm zmm k zmm
|
|
// VSUBPD.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VSUBPD.RU_SAE instruction to the active function.
|
|
func (c *Context) VSUBPD_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VSUBPD_RU_SAE(ops...))
|
|
}
|
|
|
|
// VSUBPD_RU_SAE: Subtract Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBPD.RU_SAE zmm zmm k zmm
|
|
// VSUBPD.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VSUBPD.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSUBPD_RU_SAE(ops ...operand.Op) { ctx.VSUBPD_RU_SAE(ops...) }
|
|
|
|
// VSUBPD_RU_SAE_Z: Subtract Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBPD.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VSUBPD.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VSUBPD_RU_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VSUBPD_RU_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VSUBPD_RU_SAE_Z: Subtract Packed Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBPD.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VSUBPD.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSUBPD_RU_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VSUBPD_RU_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VSUBPD_RZ_SAE: Subtract Packed Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBPD.RZ_SAE zmm zmm k zmm
|
|
// VSUBPD.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VSUBPD.RZ_SAE instruction to the active function.
|
|
func (c *Context) VSUBPD_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VSUBPD_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VSUBPD_RZ_SAE: Subtract Packed Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBPD.RZ_SAE zmm zmm k zmm
|
|
// VSUBPD.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VSUBPD.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSUBPD_RZ_SAE(ops ...operand.Op) { ctx.VSUBPD_RZ_SAE(ops...) }
|
|
|
|
// VSUBPD_RZ_SAE_Z: Subtract Packed Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBPD.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VSUBPD.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VSUBPD_RZ_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VSUBPD_RZ_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VSUBPD_RZ_SAE_Z: Subtract Packed Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBPD.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VSUBPD.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSUBPD_RZ_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VSUBPD_RZ_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VSUBPD_Z: Subtract Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBPD.Z m128 xmm k xmm
|
|
// VSUBPD.Z m256 ymm k ymm
|
|
// VSUBPD.Z xmm xmm k xmm
|
|
// VSUBPD.Z ymm ymm k ymm
|
|
// VSUBPD.Z m512 zmm k zmm
|
|
// VSUBPD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VSUBPD.Z instruction to the active function.
|
|
func (c *Context) VSUBPD_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VSUBPD_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VSUBPD_Z: Subtract Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBPD.Z m128 xmm k xmm
|
|
// VSUBPD.Z m256 ymm k ymm
|
|
// VSUBPD.Z xmm xmm k xmm
|
|
// VSUBPD.Z ymm ymm k ymm
|
|
// VSUBPD.Z m512 zmm k zmm
|
|
// VSUBPD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VSUBPD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSUBPD_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VSUBPD_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VSUBPS: Subtract Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBPS m128 xmm xmm
|
|
// VSUBPS m256 ymm ymm
|
|
// VSUBPS xmm xmm xmm
|
|
// VSUBPS ymm ymm ymm
|
|
// VSUBPS m128 xmm k xmm
|
|
// VSUBPS m256 ymm k ymm
|
|
// VSUBPS xmm xmm k xmm
|
|
// VSUBPS ymm ymm k ymm
|
|
// VSUBPS m512 zmm k zmm
|
|
// VSUBPS m512 zmm zmm
|
|
// VSUBPS zmm zmm k zmm
|
|
// VSUBPS zmm zmm zmm
|
|
//
|
|
// Construct and append a VSUBPS instruction to the active function.
|
|
func (c *Context) VSUBPS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VSUBPS(ops...))
|
|
}
|
|
|
|
// VSUBPS: Subtract Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBPS m128 xmm xmm
|
|
// VSUBPS m256 ymm ymm
|
|
// VSUBPS xmm xmm xmm
|
|
// VSUBPS ymm ymm ymm
|
|
// VSUBPS m128 xmm k xmm
|
|
// VSUBPS m256 ymm k ymm
|
|
// VSUBPS xmm xmm k xmm
|
|
// VSUBPS ymm ymm k ymm
|
|
// VSUBPS m512 zmm k zmm
|
|
// VSUBPS m512 zmm zmm
|
|
// VSUBPS zmm zmm k zmm
|
|
// VSUBPS zmm zmm zmm
|
|
//
|
|
// Construct and append a VSUBPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSUBPS(ops ...operand.Op) { ctx.VSUBPS(ops...) }
|
|
|
|
// VSUBPS_BCST: Subtract Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBPS.BCST m32 xmm k xmm
|
|
// VSUBPS.BCST m32 xmm xmm
|
|
// VSUBPS.BCST m32 ymm k ymm
|
|
// VSUBPS.BCST m32 ymm ymm
|
|
// VSUBPS.BCST m32 zmm k zmm
|
|
// VSUBPS.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VSUBPS.BCST instruction to the active function.
|
|
func (c *Context) VSUBPS_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VSUBPS_BCST(ops...))
|
|
}
|
|
|
|
// VSUBPS_BCST: Subtract Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBPS.BCST m32 xmm k xmm
|
|
// VSUBPS.BCST m32 xmm xmm
|
|
// VSUBPS.BCST m32 ymm k ymm
|
|
// VSUBPS.BCST m32 ymm ymm
|
|
// VSUBPS.BCST m32 zmm k zmm
|
|
// VSUBPS.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VSUBPS.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSUBPS_BCST(ops ...operand.Op) { ctx.VSUBPS_BCST(ops...) }
|
|
|
|
// VSUBPS_BCST_Z: Subtract Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBPS.BCST.Z m32 xmm k xmm
|
|
// VSUBPS.BCST.Z m32 ymm k ymm
|
|
// VSUBPS.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VSUBPS.BCST.Z instruction to the active function.
|
|
func (c *Context) VSUBPS_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VSUBPS_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VSUBPS_BCST_Z: Subtract Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBPS.BCST.Z m32 xmm k xmm
|
|
// VSUBPS.BCST.Z m32 ymm k ymm
|
|
// VSUBPS.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VSUBPS.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSUBPS_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VSUBPS_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VSUBPS_RD_SAE: Subtract Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBPS.RD_SAE zmm zmm k zmm
|
|
// VSUBPS.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VSUBPS.RD_SAE instruction to the active function.
|
|
func (c *Context) VSUBPS_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VSUBPS_RD_SAE(ops...))
|
|
}
|
|
|
|
// VSUBPS_RD_SAE: Subtract Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBPS.RD_SAE zmm zmm k zmm
|
|
// VSUBPS.RD_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VSUBPS.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSUBPS_RD_SAE(ops ...operand.Op) { ctx.VSUBPS_RD_SAE(ops...) }
|
|
|
|
// VSUBPS_RD_SAE_Z: Subtract Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBPS.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VSUBPS.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VSUBPS_RD_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VSUBPS_RD_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VSUBPS_RD_SAE_Z: Subtract Packed Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBPS.RD_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VSUBPS.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSUBPS_RD_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VSUBPS_RD_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VSUBPS_RN_SAE: Subtract Packed Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBPS.RN_SAE zmm zmm k zmm
|
|
// VSUBPS.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VSUBPS.RN_SAE instruction to the active function.
|
|
func (c *Context) VSUBPS_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VSUBPS_RN_SAE(ops...))
|
|
}
|
|
|
|
// VSUBPS_RN_SAE: Subtract Packed Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBPS.RN_SAE zmm zmm k zmm
|
|
// VSUBPS.RN_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VSUBPS.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSUBPS_RN_SAE(ops ...operand.Op) { ctx.VSUBPS_RN_SAE(ops...) }
|
|
|
|
// VSUBPS_RN_SAE_Z: Subtract Packed Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBPS.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VSUBPS.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VSUBPS_RN_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VSUBPS_RN_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VSUBPS_RN_SAE_Z: Subtract Packed Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBPS.RN_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VSUBPS.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSUBPS_RN_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VSUBPS_RN_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VSUBPS_RU_SAE: Subtract Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBPS.RU_SAE zmm zmm k zmm
|
|
// VSUBPS.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VSUBPS.RU_SAE instruction to the active function.
|
|
func (c *Context) VSUBPS_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VSUBPS_RU_SAE(ops...))
|
|
}
|
|
|
|
// VSUBPS_RU_SAE: Subtract Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBPS.RU_SAE zmm zmm k zmm
|
|
// VSUBPS.RU_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VSUBPS.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSUBPS_RU_SAE(ops ...operand.Op) { ctx.VSUBPS_RU_SAE(ops...) }
|
|
|
|
// VSUBPS_RU_SAE_Z: Subtract Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBPS.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VSUBPS.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VSUBPS_RU_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VSUBPS_RU_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VSUBPS_RU_SAE_Z: Subtract Packed Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBPS.RU_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VSUBPS.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSUBPS_RU_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VSUBPS_RU_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VSUBPS_RZ_SAE: Subtract Packed Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBPS.RZ_SAE zmm zmm k zmm
|
|
// VSUBPS.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VSUBPS.RZ_SAE instruction to the active function.
|
|
func (c *Context) VSUBPS_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VSUBPS_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VSUBPS_RZ_SAE: Subtract Packed Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBPS.RZ_SAE zmm zmm k zmm
|
|
// VSUBPS.RZ_SAE zmm zmm zmm
|
|
//
|
|
// Construct and append a VSUBPS.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSUBPS_RZ_SAE(ops ...operand.Op) { ctx.VSUBPS_RZ_SAE(ops...) }
|
|
|
|
// VSUBPS_RZ_SAE_Z: Subtract Packed Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBPS.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VSUBPS.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VSUBPS_RZ_SAE_Z(z, z1, k, z2 operand.Op) {
|
|
c.addinstruction(x86.VSUBPS_RZ_SAE_Z(z, z1, k, z2))
|
|
}
|
|
|
|
// VSUBPS_RZ_SAE_Z: Subtract Packed Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBPS.RZ_SAE.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VSUBPS.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSUBPS_RZ_SAE_Z(z, z1, k, z2 operand.Op) { ctx.VSUBPS_RZ_SAE_Z(z, z1, k, z2) }
|
|
|
|
// VSUBPS_Z: Subtract Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBPS.Z m128 xmm k xmm
|
|
// VSUBPS.Z m256 ymm k ymm
|
|
// VSUBPS.Z xmm xmm k xmm
|
|
// VSUBPS.Z ymm ymm k ymm
|
|
// VSUBPS.Z m512 zmm k zmm
|
|
// VSUBPS.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VSUBPS.Z instruction to the active function.
|
|
func (c *Context) VSUBPS_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VSUBPS_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VSUBPS_Z: Subtract Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBPS.Z m128 xmm k xmm
|
|
// VSUBPS.Z m256 ymm k ymm
|
|
// VSUBPS.Z xmm xmm k xmm
|
|
// VSUBPS.Z ymm ymm k ymm
|
|
// VSUBPS.Z m512 zmm k zmm
|
|
// VSUBPS.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VSUBPS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSUBPS_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VSUBPS_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VSUBSD: Subtract Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBSD m64 xmm xmm
|
|
// VSUBSD xmm xmm xmm
|
|
// VSUBSD m64 xmm k xmm
|
|
// VSUBSD xmm xmm k xmm
|
|
//
|
|
// Construct and append a VSUBSD instruction to the active function.
|
|
func (c *Context) VSUBSD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VSUBSD(ops...))
|
|
}
|
|
|
|
// VSUBSD: Subtract Scalar Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBSD m64 xmm xmm
|
|
// VSUBSD xmm xmm xmm
|
|
// VSUBSD m64 xmm k xmm
|
|
// VSUBSD xmm xmm k xmm
|
|
//
|
|
// Construct and append a VSUBSD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSUBSD(ops ...operand.Op) { ctx.VSUBSD(ops...) }
|
|
|
|
// VSUBSD_RD_SAE: Subtract Scalar Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBSD.RD_SAE xmm xmm k xmm
|
|
// VSUBSD.RD_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VSUBSD.RD_SAE instruction to the active function.
|
|
func (c *Context) VSUBSD_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VSUBSD_RD_SAE(ops...))
|
|
}
|
|
|
|
// VSUBSD_RD_SAE: Subtract Scalar Double-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBSD.RD_SAE xmm xmm k xmm
|
|
// VSUBSD.RD_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VSUBSD.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSUBSD_RD_SAE(ops ...operand.Op) { ctx.VSUBSD_RD_SAE(ops...) }
|
|
|
|
// VSUBSD_RD_SAE_Z: Subtract Scalar Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBSD.RD_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VSUBSD.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VSUBSD_RD_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VSUBSD_RD_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VSUBSD_RD_SAE_Z: Subtract Scalar Double-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBSD.RD_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VSUBSD.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSUBSD_RD_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VSUBSD_RD_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VSUBSD_RN_SAE: Subtract Scalar Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBSD.RN_SAE xmm xmm k xmm
|
|
// VSUBSD.RN_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VSUBSD.RN_SAE instruction to the active function.
|
|
func (c *Context) VSUBSD_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VSUBSD_RN_SAE(ops...))
|
|
}
|
|
|
|
// VSUBSD_RN_SAE: Subtract Scalar Double-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBSD.RN_SAE xmm xmm k xmm
|
|
// VSUBSD.RN_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VSUBSD.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSUBSD_RN_SAE(ops ...operand.Op) { ctx.VSUBSD_RN_SAE(ops...) }
|
|
|
|
// VSUBSD_RN_SAE_Z: Subtract Scalar Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBSD.RN_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VSUBSD.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VSUBSD_RN_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VSUBSD_RN_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VSUBSD_RN_SAE_Z: Subtract Scalar Double-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBSD.RN_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VSUBSD.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSUBSD_RN_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VSUBSD_RN_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VSUBSD_RU_SAE: Subtract Scalar Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBSD.RU_SAE xmm xmm k xmm
|
|
// VSUBSD.RU_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VSUBSD.RU_SAE instruction to the active function.
|
|
func (c *Context) VSUBSD_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VSUBSD_RU_SAE(ops...))
|
|
}
|
|
|
|
// VSUBSD_RU_SAE: Subtract Scalar Double-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBSD.RU_SAE xmm xmm k xmm
|
|
// VSUBSD.RU_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VSUBSD.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSUBSD_RU_SAE(ops ...operand.Op) { ctx.VSUBSD_RU_SAE(ops...) }
|
|
|
|
// VSUBSD_RU_SAE_Z: Subtract Scalar Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBSD.RU_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VSUBSD.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VSUBSD_RU_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VSUBSD_RU_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VSUBSD_RU_SAE_Z: Subtract Scalar Double-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBSD.RU_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VSUBSD.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSUBSD_RU_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VSUBSD_RU_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VSUBSD_RZ_SAE: Subtract Scalar Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBSD.RZ_SAE xmm xmm k xmm
|
|
// VSUBSD.RZ_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VSUBSD.RZ_SAE instruction to the active function.
|
|
func (c *Context) VSUBSD_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VSUBSD_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VSUBSD_RZ_SAE: Subtract Scalar Double-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBSD.RZ_SAE xmm xmm k xmm
|
|
// VSUBSD.RZ_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VSUBSD.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSUBSD_RZ_SAE(ops ...operand.Op) { ctx.VSUBSD_RZ_SAE(ops...) }
|
|
|
|
// VSUBSD_RZ_SAE_Z: Subtract Scalar Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBSD.RZ_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VSUBSD.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VSUBSD_RZ_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VSUBSD_RZ_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VSUBSD_RZ_SAE_Z: Subtract Scalar Double-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBSD.RZ_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VSUBSD.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSUBSD_RZ_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VSUBSD_RZ_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VSUBSD_Z: Subtract Scalar Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBSD.Z m64 xmm k xmm
|
|
// VSUBSD.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VSUBSD.Z instruction to the active function.
|
|
func (c *Context) VSUBSD_Z(mx, x, k, x1 operand.Op) {
|
|
c.addinstruction(x86.VSUBSD_Z(mx, x, k, x1))
|
|
}
|
|
|
|
// VSUBSD_Z: Subtract Scalar Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBSD.Z m64 xmm k xmm
|
|
// VSUBSD.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VSUBSD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSUBSD_Z(mx, x, k, x1 operand.Op) { ctx.VSUBSD_Z(mx, x, k, x1) }
|
|
|
|
// VSUBSS: Subtract Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBSS m32 xmm xmm
|
|
// VSUBSS xmm xmm xmm
|
|
// VSUBSS m32 xmm k xmm
|
|
// VSUBSS xmm xmm k xmm
|
|
//
|
|
// Construct and append a VSUBSS instruction to the active function.
|
|
func (c *Context) VSUBSS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VSUBSS(ops...))
|
|
}
|
|
|
|
// VSUBSS: Subtract Scalar Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBSS m32 xmm xmm
|
|
// VSUBSS xmm xmm xmm
|
|
// VSUBSS m32 xmm k xmm
|
|
// VSUBSS xmm xmm k xmm
|
|
//
|
|
// Construct and append a VSUBSS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSUBSS(ops ...operand.Op) { ctx.VSUBSS(ops...) }
|
|
|
|
// VSUBSS_RD_SAE: Subtract Scalar Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBSS.RD_SAE xmm xmm k xmm
|
|
// VSUBSS.RD_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VSUBSS.RD_SAE instruction to the active function.
|
|
func (c *Context) VSUBSS_RD_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VSUBSS_RD_SAE(ops...))
|
|
}
|
|
|
|
// VSUBSS_RD_SAE: Subtract Scalar Single-Precision Floating-Point Values (Round Towards Negative Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBSS.RD_SAE xmm xmm k xmm
|
|
// VSUBSS.RD_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VSUBSS.RD_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSUBSS_RD_SAE(ops ...operand.Op) { ctx.VSUBSS_RD_SAE(ops...) }
|
|
|
|
// VSUBSS_RD_SAE_Z: Subtract Scalar Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBSS.RD_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VSUBSS.RD_SAE.Z instruction to the active function.
|
|
func (c *Context) VSUBSS_RD_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VSUBSS_RD_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VSUBSS_RD_SAE_Z: Subtract Scalar Single-Precision Floating-Point Values (Round Towards Negative Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBSS.RD_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VSUBSS.RD_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSUBSS_RD_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VSUBSS_RD_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VSUBSS_RN_SAE: Subtract Scalar Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBSS.RN_SAE xmm xmm k xmm
|
|
// VSUBSS.RN_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VSUBSS.RN_SAE instruction to the active function.
|
|
func (c *Context) VSUBSS_RN_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VSUBSS_RN_SAE(ops...))
|
|
}
|
|
|
|
// VSUBSS_RN_SAE: Subtract Scalar Single-Precision Floating-Point Values (Round Towards Nearest).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBSS.RN_SAE xmm xmm k xmm
|
|
// VSUBSS.RN_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VSUBSS.RN_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSUBSS_RN_SAE(ops ...operand.Op) { ctx.VSUBSS_RN_SAE(ops...) }
|
|
|
|
// VSUBSS_RN_SAE_Z: Subtract Scalar Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBSS.RN_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VSUBSS.RN_SAE.Z instruction to the active function.
|
|
func (c *Context) VSUBSS_RN_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VSUBSS_RN_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VSUBSS_RN_SAE_Z: Subtract Scalar Single-Precision Floating-Point Values (Round Towards Nearest, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBSS.RN_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VSUBSS.RN_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSUBSS_RN_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VSUBSS_RN_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VSUBSS_RU_SAE: Subtract Scalar Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBSS.RU_SAE xmm xmm k xmm
|
|
// VSUBSS.RU_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VSUBSS.RU_SAE instruction to the active function.
|
|
func (c *Context) VSUBSS_RU_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VSUBSS_RU_SAE(ops...))
|
|
}
|
|
|
|
// VSUBSS_RU_SAE: Subtract Scalar Single-Precision Floating-Point Values (Round Towards Positive Infinity).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBSS.RU_SAE xmm xmm k xmm
|
|
// VSUBSS.RU_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VSUBSS.RU_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSUBSS_RU_SAE(ops ...operand.Op) { ctx.VSUBSS_RU_SAE(ops...) }
|
|
|
|
// VSUBSS_RU_SAE_Z: Subtract Scalar Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBSS.RU_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VSUBSS.RU_SAE.Z instruction to the active function.
|
|
func (c *Context) VSUBSS_RU_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VSUBSS_RU_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VSUBSS_RU_SAE_Z: Subtract Scalar Single-Precision Floating-Point Values (Round Towards Positive Infinity, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBSS.RU_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VSUBSS.RU_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSUBSS_RU_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VSUBSS_RU_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VSUBSS_RZ_SAE: Subtract Scalar Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBSS.RZ_SAE xmm xmm k xmm
|
|
// VSUBSS.RZ_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VSUBSS.RZ_SAE instruction to the active function.
|
|
func (c *Context) VSUBSS_RZ_SAE(ops ...operand.Op) {
|
|
c.addinstruction(x86.VSUBSS_RZ_SAE(ops...))
|
|
}
|
|
|
|
// VSUBSS_RZ_SAE: Subtract Scalar Single-Precision Floating-Point Values (Round Towards Zero).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBSS.RZ_SAE xmm xmm k xmm
|
|
// VSUBSS.RZ_SAE xmm xmm xmm
|
|
//
|
|
// Construct and append a VSUBSS.RZ_SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSUBSS_RZ_SAE(ops ...operand.Op) { ctx.VSUBSS_RZ_SAE(ops...) }
|
|
|
|
// VSUBSS_RZ_SAE_Z: Subtract Scalar Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBSS.RZ_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VSUBSS.RZ_SAE.Z instruction to the active function.
|
|
func (c *Context) VSUBSS_RZ_SAE_Z(x, x1, k, x2 operand.Op) {
|
|
c.addinstruction(x86.VSUBSS_RZ_SAE_Z(x, x1, k, x2))
|
|
}
|
|
|
|
// VSUBSS_RZ_SAE_Z: Subtract Scalar Single-Precision Floating-Point Values (Round Towards Zero, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBSS.RZ_SAE.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VSUBSS.RZ_SAE.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSUBSS_RZ_SAE_Z(x, x1, k, x2 operand.Op) { ctx.VSUBSS_RZ_SAE_Z(x, x1, k, x2) }
|
|
|
|
// VSUBSS_Z: Subtract Scalar Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBSS.Z m32 xmm k xmm
|
|
// VSUBSS.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VSUBSS.Z instruction to the active function.
|
|
func (c *Context) VSUBSS_Z(mx, x, k, x1 operand.Op) {
|
|
c.addinstruction(x86.VSUBSS_Z(mx, x, k, x1))
|
|
}
|
|
|
|
// VSUBSS_Z: Subtract Scalar Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VSUBSS.Z m32 xmm k xmm
|
|
// VSUBSS.Z xmm xmm k xmm
|
|
//
|
|
// Construct and append a VSUBSS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VSUBSS_Z(mx, x, k, x1 operand.Op) { ctx.VSUBSS_Z(mx, x, k, x1) }
|
|
|
|
// VTESTPD: Packed Double-Precision Floating-Point Bit Test.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VTESTPD m128 xmm
|
|
// VTESTPD m256 ymm
|
|
// VTESTPD xmm xmm
|
|
// VTESTPD ymm ymm
|
|
//
|
|
// Construct and append a VTESTPD instruction to the active function.
|
|
func (c *Context) VTESTPD(mxy, xy operand.Op) {
|
|
c.addinstruction(x86.VTESTPD(mxy, xy))
|
|
}
|
|
|
|
// VTESTPD: Packed Double-Precision Floating-Point Bit Test.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VTESTPD m128 xmm
|
|
// VTESTPD m256 ymm
|
|
// VTESTPD xmm xmm
|
|
// VTESTPD ymm ymm
|
|
//
|
|
// Construct and append a VTESTPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VTESTPD(mxy, xy operand.Op) { ctx.VTESTPD(mxy, xy) }
|
|
|
|
// VTESTPS: Packed Single-Precision Floating-Point Bit Test.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VTESTPS m128 xmm
|
|
// VTESTPS m256 ymm
|
|
// VTESTPS xmm xmm
|
|
// VTESTPS ymm ymm
|
|
//
|
|
// Construct and append a VTESTPS instruction to the active function.
|
|
func (c *Context) VTESTPS(mxy, xy operand.Op) {
|
|
c.addinstruction(x86.VTESTPS(mxy, xy))
|
|
}
|
|
|
|
// VTESTPS: Packed Single-Precision Floating-Point Bit Test.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VTESTPS m128 xmm
|
|
// VTESTPS m256 ymm
|
|
// VTESTPS xmm xmm
|
|
// VTESTPS ymm ymm
|
|
//
|
|
// Construct and append a VTESTPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VTESTPS(mxy, xy operand.Op) { ctx.VTESTPS(mxy, xy) }
|
|
|
|
// VUCOMISD: Unordered Compare Scalar Double-Precision Floating-Point Values and Set EFLAGS.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VUCOMISD m64 xmm
|
|
// VUCOMISD xmm xmm
|
|
//
|
|
// Construct and append a VUCOMISD instruction to the active function.
|
|
func (c *Context) VUCOMISD(mx, x operand.Op) {
|
|
c.addinstruction(x86.VUCOMISD(mx, x))
|
|
}
|
|
|
|
// VUCOMISD: Unordered Compare Scalar Double-Precision Floating-Point Values and Set EFLAGS.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VUCOMISD m64 xmm
|
|
// VUCOMISD xmm xmm
|
|
//
|
|
// Construct and append a VUCOMISD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VUCOMISD(mx, x operand.Op) { ctx.VUCOMISD(mx, x) }
|
|
|
|
// VUCOMISD_SAE: Unordered Compare Scalar Double-Precision Floating-Point Values and Set EFLAGS (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VUCOMISD.SAE xmm xmm
|
|
//
|
|
// Construct and append a VUCOMISD.SAE instruction to the active function.
|
|
func (c *Context) VUCOMISD_SAE(x, x1 operand.Op) {
|
|
c.addinstruction(x86.VUCOMISD_SAE(x, x1))
|
|
}
|
|
|
|
// VUCOMISD_SAE: Unordered Compare Scalar Double-Precision Floating-Point Values and Set EFLAGS (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VUCOMISD.SAE xmm xmm
|
|
//
|
|
// Construct and append a VUCOMISD.SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VUCOMISD_SAE(x, x1 operand.Op) { ctx.VUCOMISD_SAE(x, x1) }
|
|
|
|
// VUCOMISS: Unordered Compare Scalar Single-Precision Floating-Point Values and Set EFLAGS.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VUCOMISS m32 xmm
|
|
// VUCOMISS xmm xmm
|
|
//
|
|
// Construct and append a VUCOMISS instruction to the active function.
|
|
func (c *Context) VUCOMISS(mx, x operand.Op) {
|
|
c.addinstruction(x86.VUCOMISS(mx, x))
|
|
}
|
|
|
|
// VUCOMISS: Unordered Compare Scalar Single-Precision Floating-Point Values and Set EFLAGS.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VUCOMISS m32 xmm
|
|
// VUCOMISS xmm xmm
|
|
//
|
|
// Construct and append a VUCOMISS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VUCOMISS(mx, x operand.Op) { ctx.VUCOMISS(mx, x) }
|
|
|
|
// VUCOMISS_SAE: Unordered Compare Scalar Single-Precision Floating-Point Values and Set EFLAGS (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VUCOMISS.SAE xmm xmm
|
|
//
|
|
// Construct and append a VUCOMISS.SAE instruction to the active function.
|
|
func (c *Context) VUCOMISS_SAE(x, x1 operand.Op) {
|
|
c.addinstruction(x86.VUCOMISS_SAE(x, x1))
|
|
}
|
|
|
|
// VUCOMISS_SAE: Unordered Compare Scalar Single-Precision Floating-Point Values and Set EFLAGS (Suppress All Exceptions).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VUCOMISS.SAE xmm xmm
|
|
//
|
|
// Construct and append a VUCOMISS.SAE instruction to the active function.
|
|
// Operates on the global context.
|
|
func VUCOMISS_SAE(x, x1 operand.Op) { ctx.VUCOMISS_SAE(x, x1) }
|
|
|
|
// VUNPCKHPD: Unpack and Interleave High Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VUNPCKHPD m128 xmm xmm
|
|
// VUNPCKHPD m256 ymm ymm
|
|
// VUNPCKHPD xmm xmm xmm
|
|
// VUNPCKHPD ymm ymm ymm
|
|
// VUNPCKHPD m128 xmm k xmm
|
|
// VUNPCKHPD m256 ymm k ymm
|
|
// VUNPCKHPD xmm xmm k xmm
|
|
// VUNPCKHPD ymm ymm k ymm
|
|
// VUNPCKHPD m512 zmm k zmm
|
|
// VUNPCKHPD m512 zmm zmm
|
|
// VUNPCKHPD zmm zmm k zmm
|
|
// VUNPCKHPD zmm zmm zmm
|
|
//
|
|
// Construct and append a VUNPCKHPD instruction to the active function.
|
|
func (c *Context) VUNPCKHPD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VUNPCKHPD(ops...))
|
|
}
|
|
|
|
// VUNPCKHPD: Unpack and Interleave High Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VUNPCKHPD m128 xmm xmm
|
|
// VUNPCKHPD m256 ymm ymm
|
|
// VUNPCKHPD xmm xmm xmm
|
|
// VUNPCKHPD ymm ymm ymm
|
|
// VUNPCKHPD m128 xmm k xmm
|
|
// VUNPCKHPD m256 ymm k ymm
|
|
// VUNPCKHPD xmm xmm k xmm
|
|
// VUNPCKHPD ymm ymm k ymm
|
|
// VUNPCKHPD m512 zmm k zmm
|
|
// VUNPCKHPD m512 zmm zmm
|
|
// VUNPCKHPD zmm zmm k zmm
|
|
// VUNPCKHPD zmm zmm zmm
|
|
//
|
|
// Construct and append a VUNPCKHPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VUNPCKHPD(ops ...operand.Op) { ctx.VUNPCKHPD(ops...) }
|
|
|
|
// VUNPCKHPD_BCST: Unpack and Interleave High Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VUNPCKHPD.BCST m64 xmm k xmm
|
|
// VUNPCKHPD.BCST m64 xmm xmm
|
|
// VUNPCKHPD.BCST m64 ymm k ymm
|
|
// VUNPCKHPD.BCST m64 ymm ymm
|
|
// VUNPCKHPD.BCST m64 zmm k zmm
|
|
// VUNPCKHPD.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VUNPCKHPD.BCST instruction to the active function.
|
|
func (c *Context) VUNPCKHPD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VUNPCKHPD_BCST(ops...))
|
|
}
|
|
|
|
// VUNPCKHPD_BCST: Unpack and Interleave High Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VUNPCKHPD.BCST m64 xmm k xmm
|
|
// VUNPCKHPD.BCST m64 xmm xmm
|
|
// VUNPCKHPD.BCST m64 ymm k ymm
|
|
// VUNPCKHPD.BCST m64 ymm ymm
|
|
// VUNPCKHPD.BCST m64 zmm k zmm
|
|
// VUNPCKHPD.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VUNPCKHPD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VUNPCKHPD_BCST(ops ...operand.Op) { ctx.VUNPCKHPD_BCST(ops...) }
|
|
|
|
// VUNPCKHPD_BCST_Z: Unpack and Interleave High Packed Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VUNPCKHPD.BCST.Z m64 xmm k xmm
|
|
// VUNPCKHPD.BCST.Z m64 ymm k ymm
|
|
// VUNPCKHPD.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VUNPCKHPD.BCST.Z instruction to the active function.
|
|
func (c *Context) VUNPCKHPD_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VUNPCKHPD_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VUNPCKHPD_BCST_Z: Unpack and Interleave High Packed Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VUNPCKHPD.BCST.Z m64 xmm k xmm
|
|
// VUNPCKHPD.BCST.Z m64 ymm k ymm
|
|
// VUNPCKHPD.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VUNPCKHPD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VUNPCKHPD_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VUNPCKHPD_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VUNPCKHPD_Z: Unpack and Interleave High Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VUNPCKHPD.Z m128 xmm k xmm
|
|
// VUNPCKHPD.Z m256 ymm k ymm
|
|
// VUNPCKHPD.Z xmm xmm k xmm
|
|
// VUNPCKHPD.Z ymm ymm k ymm
|
|
// VUNPCKHPD.Z m512 zmm k zmm
|
|
// VUNPCKHPD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VUNPCKHPD.Z instruction to the active function.
|
|
func (c *Context) VUNPCKHPD_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VUNPCKHPD_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VUNPCKHPD_Z: Unpack and Interleave High Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VUNPCKHPD.Z m128 xmm k xmm
|
|
// VUNPCKHPD.Z m256 ymm k ymm
|
|
// VUNPCKHPD.Z xmm xmm k xmm
|
|
// VUNPCKHPD.Z ymm ymm k ymm
|
|
// VUNPCKHPD.Z m512 zmm k zmm
|
|
// VUNPCKHPD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VUNPCKHPD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VUNPCKHPD_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VUNPCKHPD_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VUNPCKHPS: Unpack and Interleave High Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VUNPCKHPS m128 xmm xmm
|
|
// VUNPCKHPS m256 ymm ymm
|
|
// VUNPCKHPS xmm xmm xmm
|
|
// VUNPCKHPS ymm ymm ymm
|
|
// VUNPCKHPS m128 xmm k xmm
|
|
// VUNPCKHPS m256 ymm k ymm
|
|
// VUNPCKHPS xmm xmm k xmm
|
|
// VUNPCKHPS ymm ymm k ymm
|
|
// VUNPCKHPS m512 zmm k zmm
|
|
// VUNPCKHPS m512 zmm zmm
|
|
// VUNPCKHPS zmm zmm k zmm
|
|
// VUNPCKHPS zmm zmm zmm
|
|
//
|
|
// Construct and append a VUNPCKHPS instruction to the active function.
|
|
func (c *Context) VUNPCKHPS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VUNPCKHPS(ops...))
|
|
}
|
|
|
|
// VUNPCKHPS: Unpack and Interleave High Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VUNPCKHPS m128 xmm xmm
|
|
// VUNPCKHPS m256 ymm ymm
|
|
// VUNPCKHPS xmm xmm xmm
|
|
// VUNPCKHPS ymm ymm ymm
|
|
// VUNPCKHPS m128 xmm k xmm
|
|
// VUNPCKHPS m256 ymm k ymm
|
|
// VUNPCKHPS xmm xmm k xmm
|
|
// VUNPCKHPS ymm ymm k ymm
|
|
// VUNPCKHPS m512 zmm k zmm
|
|
// VUNPCKHPS m512 zmm zmm
|
|
// VUNPCKHPS zmm zmm k zmm
|
|
// VUNPCKHPS zmm zmm zmm
|
|
//
|
|
// Construct and append a VUNPCKHPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VUNPCKHPS(ops ...operand.Op) { ctx.VUNPCKHPS(ops...) }
|
|
|
|
// VUNPCKHPS_BCST: Unpack and Interleave High Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VUNPCKHPS.BCST m32 xmm k xmm
|
|
// VUNPCKHPS.BCST m32 xmm xmm
|
|
// VUNPCKHPS.BCST m32 ymm k ymm
|
|
// VUNPCKHPS.BCST m32 ymm ymm
|
|
// VUNPCKHPS.BCST m32 zmm k zmm
|
|
// VUNPCKHPS.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VUNPCKHPS.BCST instruction to the active function.
|
|
func (c *Context) VUNPCKHPS_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VUNPCKHPS_BCST(ops...))
|
|
}
|
|
|
|
// VUNPCKHPS_BCST: Unpack and Interleave High Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VUNPCKHPS.BCST m32 xmm k xmm
|
|
// VUNPCKHPS.BCST m32 xmm xmm
|
|
// VUNPCKHPS.BCST m32 ymm k ymm
|
|
// VUNPCKHPS.BCST m32 ymm ymm
|
|
// VUNPCKHPS.BCST m32 zmm k zmm
|
|
// VUNPCKHPS.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VUNPCKHPS.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VUNPCKHPS_BCST(ops ...operand.Op) { ctx.VUNPCKHPS_BCST(ops...) }
|
|
|
|
// VUNPCKHPS_BCST_Z: Unpack and Interleave High Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VUNPCKHPS.BCST.Z m32 xmm k xmm
|
|
// VUNPCKHPS.BCST.Z m32 ymm k ymm
|
|
// VUNPCKHPS.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VUNPCKHPS.BCST.Z instruction to the active function.
|
|
func (c *Context) VUNPCKHPS_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VUNPCKHPS_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VUNPCKHPS_BCST_Z: Unpack and Interleave High Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VUNPCKHPS.BCST.Z m32 xmm k xmm
|
|
// VUNPCKHPS.BCST.Z m32 ymm k ymm
|
|
// VUNPCKHPS.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VUNPCKHPS.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VUNPCKHPS_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VUNPCKHPS_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VUNPCKHPS_Z: Unpack and Interleave High Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VUNPCKHPS.Z m128 xmm k xmm
|
|
// VUNPCKHPS.Z m256 ymm k ymm
|
|
// VUNPCKHPS.Z xmm xmm k xmm
|
|
// VUNPCKHPS.Z ymm ymm k ymm
|
|
// VUNPCKHPS.Z m512 zmm k zmm
|
|
// VUNPCKHPS.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VUNPCKHPS.Z instruction to the active function.
|
|
func (c *Context) VUNPCKHPS_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VUNPCKHPS_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VUNPCKHPS_Z: Unpack and Interleave High Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VUNPCKHPS.Z m128 xmm k xmm
|
|
// VUNPCKHPS.Z m256 ymm k ymm
|
|
// VUNPCKHPS.Z xmm xmm k xmm
|
|
// VUNPCKHPS.Z ymm ymm k ymm
|
|
// VUNPCKHPS.Z m512 zmm k zmm
|
|
// VUNPCKHPS.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VUNPCKHPS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VUNPCKHPS_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VUNPCKHPS_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VUNPCKLPD: Unpack and Interleave Low Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VUNPCKLPD m128 xmm xmm
|
|
// VUNPCKLPD m256 ymm ymm
|
|
// VUNPCKLPD xmm xmm xmm
|
|
// VUNPCKLPD ymm ymm ymm
|
|
// VUNPCKLPD m128 xmm k xmm
|
|
// VUNPCKLPD m256 ymm k ymm
|
|
// VUNPCKLPD xmm xmm k xmm
|
|
// VUNPCKLPD ymm ymm k ymm
|
|
// VUNPCKLPD m512 zmm k zmm
|
|
// VUNPCKLPD m512 zmm zmm
|
|
// VUNPCKLPD zmm zmm k zmm
|
|
// VUNPCKLPD zmm zmm zmm
|
|
//
|
|
// Construct and append a VUNPCKLPD instruction to the active function.
|
|
func (c *Context) VUNPCKLPD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VUNPCKLPD(ops...))
|
|
}
|
|
|
|
// VUNPCKLPD: Unpack and Interleave Low Packed Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VUNPCKLPD m128 xmm xmm
|
|
// VUNPCKLPD m256 ymm ymm
|
|
// VUNPCKLPD xmm xmm xmm
|
|
// VUNPCKLPD ymm ymm ymm
|
|
// VUNPCKLPD m128 xmm k xmm
|
|
// VUNPCKLPD m256 ymm k ymm
|
|
// VUNPCKLPD xmm xmm k xmm
|
|
// VUNPCKLPD ymm ymm k ymm
|
|
// VUNPCKLPD m512 zmm k zmm
|
|
// VUNPCKLPD m512 zmm zmm
|
|
// VUNPCKLPD zmm zmm k zmm
|
|
// VUNPCKLPD zmm zmm zmm
|
|
//
|
|
// Construct and append a VUNPCKLPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VUNPCKLPD(ops ...operand.Op) { ctx.VUNPCKLPD(ops...) }
|
|
|
|
// VUNPCKLPD_BCST: Unpack and Interleave Low Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VUNPCKLPD.BCST m64 xmm k xmm
|
|
// VUNPCKLPD.BCST m64 xmm xmm
|
|
// VUNPCKLPD.BCST m64 ymm k ymm
|
|
// VUNPCKLPD.BCST m64 ymm ymm
|
|
// VUNPCKLPD.BCST m64 zmm k zmm
|
|
// VUNPCKLPD.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VUNPCKLPD.BCST instruction to the active function.
|
|
func (c *Context) VUNPCKLPD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VUNPCKLPD_BCST(ops...))
|
|
}
|
|
|
|
// VUNPCKLPD_BCST: Unpack and Interleave Low Packed Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VUNPCKLPD.BCST m64 xmm k xmm
|
|
// VUNPCKLPD.BCST m64 xmm xmm
|
|
// VUNPCKLPD.BCST m64 ymm k ymm
|
|
// VUNPCKLPD.BCST m64 ymm ymm
|
|
// VUNPCKLPD.BCST m64 zmm k zmm
|
|
// VUNPCKLPD.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VUNPCKLPD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VUNPCKLPD_BCST(ops ...operand.Op) { ctx.VUNPCKLPD_BCST(ops...) }
|
|
|
|
// VUNPCKLPD_BCST_Z: Unpack and Interleave Low Packed Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VUNPCKLPD.BCST.Z m64 xmm k xmm
|
|
// VUNPCKLPD.BCST.Z m64 ymm k ymm
|
|
// VUNPCKLPD.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VUNPCKLPD.BCST.Z instruction to the active function.
|
|
func (c *Context) VUNPCKLPD_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VUNPCKLPD_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VUNPCKLPD_BCST_Z: Unpack and Interleave Low Packed Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VUNPCKLPD.BCST.Z m64 xmm k xmm
|
|
// VUNPCKLPD.BCST.Z m64 ymm k ymm
|
|
// VUNPCKLPD.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VUNPCKLPD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VUNPCKLPD_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VUNPCKLPD_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VUNPCKLPD_Z: Unpack and Interleave Low Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VUNPCKLPD.Z m128 xmm k xmm
|
|
// VUNPCKLPD.Z m256 ymm k ymm
|
|
// VUNPCKLPD.Z xmm xmm k xmm
|
|
// VUNPCKLPD.Z ymm ymm k ymm
|
|
// VUNPCKLPD.Z m512 zmm k zmm
|
|
// VUNPCKLPD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VUNPCKLPD.Z instruction to the active function.
|
|
func (c *Context) VUNPCKLPD_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VUNPCKLPD_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VUNPCKLPD_Z: Unpack and Interleave Low Packed Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VUNPCKLPD.Z m128 xmm k xmm
|
|
// VUNPCKLPD.Z m256 ymm k ymm
|
|
// VUNPCKLPD.Z xmm xmm k xmm
|
|
// VUNPCKLPD.Z ymm ymm k ymm
|
|
// VUNPCKLPD.Z m512 zmm k zmm
|
|
// VUNPCKLPD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VUNPCKLPD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VUNPCKLPD_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VUNPCKLPD_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VUNPCKLPS: Unpack and Interleave Low Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VUNPCKLPS m128 xmm xmm
|
|
// VUNPCKLPS m256 ymm ymm
|
|
// VUNPCKLPS xmm xmm xmm
|
|
// VUNPCKLPS ymm ymm ymm
|
|
// VUNPCKLPS m128 xmm k xmm
|
|
// VUNPCKLPS m256 ymm k ymm
|
|
// VUNPCKLPS xmm xmm k xmm
|
|
// VUNPCKLPS ymm ymm k ymm
|
|
// VUNPCKLPS m512 zmm k zmm
|
|
// VUNPCKLPS m512 zmm zmm
|
|
// VUNPCKLPS zmm zmm k zmm
|
|
// VUNPCKLPS zmm zmm zmm
|
|
//
|
|
// Construct and append a VUNPCKLPS instruction to the active function.
|
|
func (c *Context) VUNPCKLPS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VUNPCKLPS(ops...))
|
|
}
|
|
|
|
// VUNPCKLPS: Unpack and Interleave Low Packed Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VUNPCKLPS m128 xmm xmm
|
|
// VUNPCKLPS m256 ymm ymm
|
|
// VUNPCKLPS xmm xmm xmm
|
|
// VUNPCKLPS ymm ymm ymm
|
|
// VUNPCKLPS m128 xmm k xmm
|
|
// VUNPCKLPS m256 ymm k ymm
|
|
// VUNPCKLPS xmm xmm k xmm
|
|
// VUNPCKLPS ymm ymm k ymm
|
|
// VUNPCKLPS m512 zmm k zmm
|
|
// VUNPCKLPS m512 zmm zmm
|
|
// VUNPCKLPS zmm zmm k zmm
|
|
// VUNPCKLPS zmm zmm zmm
|
|
//
|
|
// Construct and append a VUNPCKLPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VUNPCKLPS(ops ...operand.Op) { ctx.VUNPCKLPS(ops...) }
|
|
|
|
// VUNPCKLPS_BCST: Unpack and Interleave Low Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VUNPCKLPS.BCST m32 xmm k xmm
|
|
// VUNPCKLPS.BCST m32 xmm xmm
|
|
// VUNPCKLPS.BCST m32 ymm k ymm
|
|
// VUNPCKLPS.BCST m32 ymm ymm
|
|
// VUNPCKLPS.BCST m32 zmm k zmm
|
|
// VUNPCKLPS.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VUNPCKLPS.BCST instruction to the active function.
|
|
func (c *Context) VUNPCKLPS_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VUNPCKLPS_BCST(ops...))
|
|
}
|
|
|
|
// VUNPCKLPS_BCST: Unpack and Interleave Low Packed Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VUNPCKLPS.BCST m32 xmm k xmm
|
|
// VUNPCKLPS.BCST m32 xmm xmm
|
|
// VUNPCKLPS.BCST m32 ymm k ymm
|
|
// VUNPCKLPS.BCST m32 ymm ymm
|
|
// VUNPCKLPS.BCST m32 zmm k zmm
|
|
// VUNPCKLPS.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VUNPCKLPS.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VUNPCKLPS_BCST(ops ...operand.Op) { ctx.VUNPCKLPS_BCST(ops...) }
|
|
|
|
// VUNPCKLPS_BCST_Z: Unpack and Interleave Low Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VUNPCKLPS.BCST.Z m32 xmm k xmm
|
|
// VUNPCKLPS.BCST.Z m32 ymm k ymm
|
|
// VUNPCKLPS.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VUNPCKLPS.BCST.Z instruction to the active function.
|
|
func (c *Context) VUNPCKLPS_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VUNPCKLPS_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VUNPCKLPS_BCST_Z: Unpack and Interleave Low Packed Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VUNPCKLPS.BCST.Z m32 xmm k xmm
|
|
// VUNPCKLPS.BCST.Z m32 ymm k ymm
|
|
// VUNPCKLPS.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VUNPCKLPS.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VUNPCKLPS_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VUNPCKLPS_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VUNPCKLPS_Z: Unpack and Interleave Low Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VUNPCKLPS.Z m128 xmm k xmm
|
|
// VUNPCKLPS.Z m256 ymm k ymm
|
|
// VUNPCKLPS.Z xmm xmm k xmm
|
|
// VUNPCKLPS.Z ymm ymm k ymm
|
|
// VUNPCKLPS.Z m512 zmm k zmm
|
|
// VUNPCKLPS.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VUNPCKLPS.Z instruction to the active function.
|
|
func (c *Context) VUNPCKLPS_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VUNPCKLPS_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VUNPCKLPS_Z: Unpack and Interleave Low Packed Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VUNPCKLPS.Z m128 xmm k xmm
|
|
// VUNPCKLPS.Z m256 ymm k ymm
|
|
// VUNPCKLPS.Z xmm xmm k xmm
|
|
// VUNPCKLPS.Z ymm ymm k ymm
|
|
// VUNPCKLPS.Z m512 zmm k zmm
|
|
// VUNPCKLPS.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VUNPCKLPS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VUNPCKLPS_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VUNPCKLPS_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VXORPD: Bitwise Logical XOR for Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VXORPD m128 xmm xmm
|
|
// VXORPD m256 ymm ymm
|
|
// VXORPD xmm xmm xmm
|
|
// VXORPD ymm ymm ymm
|
|
// VXORPD m128 xmm k xmm
|
|
// VXORPD m256 ymm k ymm
|
|
// VXORPD xmm xmm k xmm
|
|
// VXORPD ymm ymm k ymm
|
|
// VXORPD m512 zmm k zmm
|
|
// VXORPD m512 zmm zmm
|
|
// VXORPD zmm zmm k zmm
|
|
// VXORPD zmm zmm zmm
|
|
//
|
|
// Construct and append a VXORPD instruction to the active function.
|
|
func (c *Context) VXORPD(ops ...operand.Op) {
|
|
c.addinstruction(x86.VXORPD(ops...))
|
|
}
|
|
|
|
// VXORPD: Bitwise Logical XOR for Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VXORPD m128 xmm xmm
|
|
// VXORPD m256 ymm ymm
|
|
// VXORPD xmm xmm xmm
|
|
// VXORPD ymm ymm ymm
|
|
// VXORPD m128 xmm k xmm
|
|
// VXORPD m256 ymm k ymm
|
|
// VXORPD xmm xmm k xmm
|
|
// VXORPD ymm ymm k ymm
|
|
// VXORPD m512 zmm k zmm
|
|
// VXORPD m512 zmm zmm
|
|
// VXORPD zmm zmm k zmm
|
|
// VXORPD zmm zmm zmm
|
|
//
|
|
// Construct and append a VXORPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func VXORPD(ops ...operand.Op) { ctx.VXORPD(ops...) }
|
|
|
|
// VXORPD_BCST: Bitwise Logical XOR for Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VXORPD.BCST m64 xmm k xmm
|
|
// VXORPD.BCST m64 xmm xmm
|
|
// VXORPD.BCST m64 ymm k ymm
|
|
// VXORPD.BCST m64 ymm ymm
|
|
// VXORPD.BCST m64 zmm k zmm
|
|
// VXORPD.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VXORPD.BCST instruction to the active function.
|
|
func (c *Context) VXORPD_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VXORPD_BCST(ops...))
|
|
}
|
|
|
|
// VXORPD_BCST: Bitwise Logical XOR for Double-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VXORPD.BCST m64 xmm k xmm
|
|
// VXORPD.BCST m64 xmm xmm
|
|
// VXORPD.BCST m64 ymm k ymm
|
|
// VXORPD.BCST m64 ymm ymm
|
|
// VXORPD.BCST m64 zmm k zmm
|
|
// VXORPD.BCST m64 zmm zmm
|
|
//
|
|
// Construct and append a VXORPD.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VXORPD_BCST(ops ...operand.Op) { ctx.VXORPD_BCST(ops...) }
|
|
|
|
// VXORPD_BCST_Z: Bitwise Logical XOR for Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VXORPD.BCST.Z m64 xmm k xmm
|
|
// VXORPD.BCST.Z m64 ymm k ymm
|
|
// VXORPD.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VXORPD.BCST.Z instruction to the active function.
|
|
func (c *Context) VXORPD_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VXORPD_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VXORPD_BCST_Z: Bitwise Logical XOR for Double-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VXORPD.BCST.Z m64 xmm k xmm
|
|
// VXORPD.BCST.Z m64 ymm k ymm
|
|
// VXORPD.BCST.Z m64 zmm k zmm
|
|
//
|
|
// Construct and append a VXORPD.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VXORPD_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VXORPD_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VXORPD_Z: Bitwise Logical XOR for Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VXORPD.Z m128 xmm k xmm
|
|
// VXORPD.Z m256 ymm k ymm
|
|
// VXORPD.Z xmm xmm k xmm
|
|
// VXORPD.Z ymm ymm k ymm
|
|
// VXORPD.Z m512 zmm k zmm
|
|
// VXORPD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VXORPD.Z instruction to the active function.
|
|
func (c *Context) VXORPD_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VXORPD_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VXORPD_Z: Bitwise Logical XOR for Double-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VXORPD.Z m128 xmm k xmm
|
|
// VXORPD.Z m256 ymm k ymm
|
|
// VXORPD.Z xmm xmm k xmm
|
|
// VXORPD.Z ymm ymm k ymm
|
|
// VXORPD.Z m512 zmm k zmm
|
|
// VXORPD.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VXORPD.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VXORPD_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VXORPD_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VXORPS: Bitwise Logical XOR for Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VXORPS m128 xmm xmm
|
|
// VXORPS m256 ymm ymm
|
|
// VXORPS xmm xmm xmm
|
|
// VXORPS ymm ymm ymm
|
|
// VXORPS m128 xmm k xmm
|
|
// VXORPS m256 ymm k ymm
|
|
// VXORPS xmm xmm k xmm
|
|
// VXORPS ymm ymm k ymm
|
|
// VXORPS m512 zmm k zmm
|
|
// VXORPS m512 zmm zmm
|
|
// VXORPS zmm zmm k zmm
|
|
// VXORPS zmm zmm zmm
|
|
//
|
|
// Construct and append a VXORPS instruction to the active function.
|
|
func (c *Context) VXORPS(ops ...operand.Op) {
|
|
c.addinstruction(x86.VXORPS(ops...))
|
|
}
|
|
|
|
// VXORPS: Bitwise Logical XOR for Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VXORPS m128 xmm xmm
|
|
// VXORPS m256 ymm ymm
|
|
// VXORPS xmm xmm xmm
|
|
// VXORPS ymm ymm ymm
|
|
// VXORPS m128 xmm k xmm
|
|
// VXORPS m256 ymm k ymm
|
|
// VXORPS xmm xmm k xmm
|
|
// VXORPS ymm ymm k ymm
|
|
// VXORPS m512 zmm k zmm
|
|
// VXORPS m512 zmm zmm
|
|
// VXORPS zmm zmm k zmm
|
|
// VXORPS zmm zmm zmm
|
|
//
|
|
// Construct and append a VXORPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func VXORPS(ops ...operand.Op) { ctx.VXORPS(ops...) }
|
|
|
|
// VXORPS_BCST: Bitwise Logical XOR for Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VXORPS.BCST m32 xmm k xmm
|
|
// VXORPS.BCST m32 xmm xmm
|
|
// VXORPS.BCST m32 ymm k ymm
|
|
// VXORPS.BCST m32 ymm ymm
|
|
// VXORPS.BCST m32 zmm k zmm
|
|
// VXORPS.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VXORPS.BCST instruction to the active function.
|
|
func (c *Context) VXORPS_BCST(ops ...operand.Op) {
|
|
c.addinstruction(x86.VXORPS_BCST(ops...))
|
|
}
|
|
|
|
// VXORPS_BCST: Bitwise Logical XOR for Single-Precision Floating-Point Values (Broadcast).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VXORPS.BCST m32 xmm k xmm
|
|
// VXORPS.BCST m32 xmm xmm
|
|
// VXORPS.BCST m32 ymm k ymm
|
|
// VXORPS.BCST m32 ymm ymm
|
|
// VXORPS.BCST m32 zmm k zmm
|
|
// VXORPS.BCST m32 zmm zmm
|
|
//
|
|
// Construct and append a VXORPS.BCST instruction to the active function.
|
|
// Operates on the global context.
|
|
func VXORPS_BCST(ops ...operand.Op) { ctx.VXORPS_BCST(ops...) }
|
|
|
|
// VXORPS_BCST_Z: Bitwise Logical XOR for Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VXORPS.BCST.Z m32 xmm k xmm
|
|
// VXORPS.BCST.Z m32 ymm k ymm
|
|
// VXORPS.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VXORPS.BCST.Z instruction to the active function.
|
|
func (c *Context) VXORPS_BCST_Z(m, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VXORPS_BCST_Z(m, xyz, k, xyz1))
|
|
}
|
|
|
|
// VXORPS_BCST_Z: Bitwise Logical XOR for Single-Precision Floating-Point Values (Broadcast, Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VXORPS.BCST.Z m32 xmm k xmm
|
|
// VXORPS.BCST.Z m32 ymm k ymm
|
|
// VXORPS.BCST.Z m32 zmm k zmm
|
|
//
|
|
// Construct and append a VXORPS.BCST.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VXORPS_BCST_Z(m, xyz, k, xyz1 operand.Op) { ctx.VXORPS_BCST_Z(m, xyz, k, xyz1) }
|
|
|
|
// VXORPS_Z: Bitwise Logical XOR for Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VXORPS.Z m128 xmm k xmm
|
|
// VXORPS.Z m256 ymm k ymm
|
|
// VXORPS.Z xmm xmm k xmm
|
|
// VXORPS.Z ymm ymm k ymm
|
|
// VXORPS.Z m512 zmm k zmm
|
|
// VXORPS.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VXORPS.Z instruction to the active function.
|
|
func (c *Context) VXORPS_Z(mxyz, xyz, k, xyz1 operand.Op) {
|
|
c.addinstruction(x86.VXORPS_Z(mxyz, xyz, k, xyz1))
|
|
}
|
|
|
|
// VXORPS_Z: Bitwise Logical XOR for Single-Precision Floating-Point Values (Zeroing Masking).
|
|
//
|
|
// Forms:
|
|
//
|
|
// VXORPS.Z m128 xmm k xmm
|
|
// VXORPS.Z m256 ymm k ymm
|
|
// VXORPS.Z xmm xmm k xmm
|
|
// VXORPS.Z ymm ymm k ymm
|
|
// VXORPS.Z m512 zmm k zmm
|
|
// VXORPS.Z zmm zmm k zmm
|
|
//
|
|
// Construct and append a VXORPS.Z instruction to the active function.
|
|
// Operates on the global context.
|
|
func VXORPS_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VXORPS_Z(mxyz, xyz, k, xyz1) }
|
|
|
|
// VZEROALL: Zero All YMM Registers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VZEROALL
|
|
//
|
|
// Construct and append a VZEROALL instruction to the active function.
|
|
func (c *Context) VZEROALL() {
|
|
c.addinstruction(x86.VZEROALL())
|
|
}
|
|
|
|
// VZEROALL: Zero All YMM Registers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VZEROALL
|
|
//
|
|
// Construct and append a VZEROALL instruction to the active function.
|
|
// Operates on the global context.
|
|
func VZEROALL() { ctx.VZEROALL() }
|
|
|
|
// VZEROUPPER: Zero Upper Bits of YMM Registers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VZEROUPPER
|
|
//
|
|
// Construct and append a VZEROUPPER instruction to the active function.
|
|
func (c *Context) VZEROUPPER() {
|
|
c.addinstruction(x86.VZEROUPPER())
|
|
}
|
|
|
|
// VZEROUPPER: Zero Upper Bits of YMM Registers.
|
|
//
|
|
// Forms:
|
|
//
|
|
// VZEROUPPER
|
|
//
|
|
// Construct and append a VZEROUPPER instruction to the active function.
|
|
// Operates on the global context.
|
|
func VZEROUPPER() { ctx.VZEROUPPER() }
|
|
|
|
// XADDB: Exchange and Add.
|
|
//
|
|
// Forms:
|
|
//
|
|
// XADDB r8 m8
|
|
// XADDB r8 r8
|
|
//
|
|
// Construct and append a XADDB instruction to the active function.
|
|
func (c *Context) XADDB(r, mr operand.Op) {
|
|
c.addinstruction(x86.XADDB(r, mr))
|
|
}
|
|
|
|
// XADDB: Exchange and Add.
|
|
//
|
|
// Forms:
|
|
//
|
|
// XADDB r8 m8
|
|
// XADDB r8 r8
|
|
//
|
|
// Construct and append a XADDB instruction to the active function.
|
|
// Operates on the global context.
|
|
func XADDB(r, mr operand.Op) { ctx.XADDB(r, mr) }
|
|
|
|
// XADDL: Exchange and Add.
|
|
//
|
|
// Forms:
|
|
//
|
|
// XADDL r32 m32
|
|
// XADDL r32 r32
|
|
//
|
|
// Construct and append a XADDL instruction to the active function.
|
|
func (c *Context) XADDL(r, mr operand.Op) {
|
|
c.addinstruction(x86.XADDL(r, mr))
|
|
}
|
|
|
|
// XADDL: Exchange and Add.
|
|
//
|
|
// Forms:
|
|
//
|
|
// XADDL r32 m32
|
|
// XADDL r32 r32
|
|
//
|
|
// Construct and append a XADDL instruction to the active function.
|
|
// Operates on the global context.
|
|
func XADDL(r, mr operand.Op) { ctx.XADDL(r, mr) }
|
|
|
|
// XADDQ: Exchange and Add.
|
|
//
|
|
// Forms:
|
|
//
|
|
// XADDQ r64 m64
|
|
// XADDQ r64 r64
|
|
//
|
|
// Construct and append a XADDQ instruction to the active function.
|
|
func (c *Context) XADDQ(r, mr operand.Op) {
|
|
c.addinstruction(x86.XADDQ(r, mr))
|
|
}
|
|
|
|
// XADDQ: Exchange and Add.
|
|
//
|
|
// Forms:
|
|
//
|
|
// XADDQ r64 m64
|
|
// XADDQ r64 r64
|
|
//
|
|
// Construct and append a XADDQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func XADDQ(r, mr operand.Op) { ctx.XADDQ(r, mr) }
|
|
|
|
// XADDW: Exchange and Add.
|
|
//
|
|
// Forms:
|
|
//
|
|
// XADDW r16 m16
|
|
// XADDW r16 r16
|
|
//
|
|
// Construct and append a XADDW instruction to the active function.
|
|
func (c *Context) XADDW(r, mr operand.Op) {
|
|
c.addinstruction(x86.XADDW(r, mr))
|
|
}
|
|
|
|
// XADDW: Exchange and Add.
|
|
//
|
|
// Forms:
|
|
//
|
|
// XADDW r16 m16
|
|
// XADDW r16 r16
|
|
//
|
|
// Construct and append a XADDW instruction to the active function.
|
|
// Operates on the global context.
|
|
func XADDW(r, mr operand.Op) { ctx.XADDW(r, mr) }
|
|
|
|
// XCHGB: Exchange Register/Memory with Register.
|
|
//
|
|
// Forms:
|
|
//
|
|
// XCHGB m8 r8
|
|
// XCHGB r8 m8
|
|
// XCHGB r8 r8
|
|
//
|
|
// Construct and append a XCHGB instruction to the active function.
|
|
func (c *Context) XCHGB(mr, mr1 operand.Op) {
|
|
c.addinstruction(x86.XCHGB(mr, mr1))
|
|
}
|
|
|
|
// XCHGB: Exchange Register/Memory with Register.
|
|
//
|
|
// Forms:
|
|
//
|
|
// XCHGB m8 r8
|
|
// XCHGB r8 m8
|
|
// XCHGB r8 r8
|
|
//
|
|
// Construct and append a XCHGB instruction to the active function.
|
|
// Operates on the global context.
|
|
func XCHGB(mr, mr1 operand.Op) { ctx.XCHGB(mr, mr1) }
|
|
|
|
// XCHGL: Exchange Register/Memory with Register.
|
|
//
|
|
// Forms:
|
|
//
|
|
// XCHGL eax r32
|
|
// XCHGL m32 r32
|
|
// XCHGL r32 eax
|
|
// XCHGL r32 m32
|
|
// XCHGL r32 r32
|
|
//
|
|
// Construct and append a XCHGL instruction to the active function.
|
|
func (c *Context) XCHGL(emr, emr1 operand.Op) {
|
|
c.addinstruction(x86.XCHGL(emr, emr1))
|
|
}
|
|
|
|
// XCHGL: Exchange Register/Memory with Register.
|
|
//
|
|
// Forms:
|
|
//
|
|
// XCHGL eax r32
|
|
// XCHGL m32 r32
|
|
// XCHGL r32 eax
|
|
// XCHGL r32 m32
|
|
// XCHGL r32 r32
|
|
//
|
|
// Construct and append a XCHGL instruction to the active function.
|
|
// Operates on the global context.
|
|
func XCHGL(emr, emr1 operand.Op) { ctx.XCHGL(emr, emr1) }
|
|
|
|
// XCHGQ: Exchange Register/Memory with Register.
|
|
//
|
|
// Forms:
|
|
//
|
|
// XCHGQ m64 r64
|
|
// XCHGQ r64 m64
|
|
// XCHGQ r64 r64
|
|
// XCHGQ r64 rax
|
|
// XCHGQ rax r64
|
|
//
|
|
// Construct and append a XCHGQ instruction to the active function.
|
|
func (c *Context) XCHGQ(mr, mr1 operand.Op) {
|
|
c.addinstruction(x86.XCHGQ(mr, mr1))
|
|
}
|
|
|
|
// XCHGQ: Exchange Register/Memory with Register.
|
|
//
|
|
// Forms:
|
|
//
|
|
// XCHGQ m64 r64
|
|
// XCHGQ r64 m64
|
|
// XCHGQ r64 r64
|
|
// XCHGQ r64 rax
|
|
// XCHGQ rax r64
|
|
//
|
|
// Construct and append a XCHGQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func XCHGQ(mr, mr1 operand.Op) { ctx.XCHGQ(mr, mr1) }
|
|
|
|
// XCHGW: Exchange Register/Memory with Register.
|
|
//
|
|
// Forms:
|
|
//
|
|
// XCHGW ax r16
|
|
// XCHGW m16 r16
|
|
// XCHGW r16 ax
|
|
// XCHGW r16 m16
|
|
// XCHGW r16 r16
|
|
//
|
|
// Construct and append a XCHGW instruction to the active function.
|
|
func (c *Context) XCHGW(amr, amr1 operand.Op) {
|
|
c.addinstruction(x86.XCHGW(amr, amr1))
|
|
}
|
|
|
|
// XCHGW: Exchange Register/Memory with Register.
|
|
//
|
|
// Forms:
|
|
//
|
|
// XCHGW ax r16
|
|
// XCHGW m16 r16
|
|
// XCHGW r16 ax
|
|
// XCHGW r16 m16
|
|
// XCHGW r16 r16
|
|
//
|
|
// Construct and append a XCHGW instruction to the active function.
|
|
// Operates on the global context.
|
|
func XCHGW(amr, amr1 operand.Op) { ctx.XCHGW(amr, amr1) }
|
|
|
|
// XGETBV: Get Value of Extended Control Register.
|
|
//
|
|
// Forms:
|
|
//
|
|
// XGETBV
|
|
//
|
|
// Construct and append a XGETBV instruction to the active function.
|
|
func (c *Context) XGETBV() {
|
|
c.addinstruction(x86.XGETBV())
|
|
}
|
|
|
|
// XGETBV: Get Value of Extended Control Register.
|
|
//
|
|
// Forms:
|
|
//
|
|
// XGETBV
|
|
//
|
|
// Construct and append a XGETBV instruction to the active function.
|
|
// Operates on the global context.
|
|
func XGETBV() { ctx.XGETBV() }
|
|
|
|
// XLAT: Table Look-up Translation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// XLAT
|
|
//
|
|
// Construct and append a XLAT instruction to the active function.
|
|
func (c *Context) XLAT() {
|
|
c.addinstruction(x86.XLAT())
|
|
}
|
|
|
|
// XLAT: Table Look-up Translation.
|
|
//
|
|
// Forms:
|
|
//
|
|
// XLAT
|
|
//
|
|
// Construct and append a XLAT instruction to the active function.
|
|
// Operates on the global context.
|
|
func XLAT() { ctx.XLAT() }
|
|
|
|
// XORB: Logical Exclusive OR.
|
|
//
|
|
// Forms:
|
|
//
|
|
// XORB imm8 al
|
|
// XORB imm8 m8
|
|
// XORB imm8 r8
|
|
// XORB m8 r8
|
|
// XORB r8 m8
|
|
// XORB r8 r8
|
|
//
|
|
// Construct and append a XORB instruction to the active function.
|
|
func (c *Context) XORB(imr, amr operand.Op) {
|
|
c.addinstruction(x86.XORB(imr, amr))
|
|
}
|
|
|
|
// XORB: Logical Exclusive OR.
|
|
//
|
|
// Forms:
|
|
//
|
|
// XORB imm8 al
|
|
// XORB imm8 m8
|
|
// XORB imm8 r8
|
|
// XORB m8 r8
|
|
// XORB r8 m8
|
|
// XORB r8 r8
|
|
//
|
|
// Construct and append a XORB instruction to the active function.
|
|
// Operates on the global context.
|
|
func XORB(imr, amr operand.Op) { ctx.XORB(imr, amr) }
|
|
|
|
// XORL: Logical Exclusive OR.
|
|
//
|
|
// Forms:
|
|
//
|
|
// XORL imm32 eax
|
|
// XORL imm32 m32
|
|
// XORL imm32 r32
|
|
// XORL imm8 m32
|
|
// XORL imm8 r32
|
|
// XORL m32 r32
|
|
// XORL r32 m32
|
|
// XORL r32 r32
|
|
//
|
|
// Construct and append a XORL instruction to the active function.
|
|
func (c *Context) XORL(imr, emr operand.Op) {
|
|
c.addinstruction(x86.XORL(imr, emr))
|
|
}
|
|
|
|
// XORL: Logical Exclusive OR.
|
|
//
|
|
// Forms:
|
|
//
|
|
// XORL imm32 eax
|
|
// XORL imm32 m32
|
|
// XORL imm32 r32
|
|
// XORL imm8 m32
|
|
// XORL imm8 r32
|
|
// XORL m32 r32
|
|
// XORL r32 m32
|
|
// XORL r32 r32
|
|
//
|
|
// Construct and append a XORL instruction to the active function.
|
|
// Operates on the global context.
|
|
func XORL(imr, emr operand.Op) { ctx.XORL(imr, emr) }
|
|
|
|
// XORPD: Bitwise Logical XOR for Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// XORPD m128 xmm
|
|
// XORPD xmm xmm
|
|
//
|
|
// Construct and append a XORPD instruction to the active function.
|
|
func (c *Context) XORPD(mx, x operand.Op) {
|
|
c.addinstruction(x86.XORPD(mx, x))
|
|
}
|
|
|
|
// XORPD: Bitwise Logical XOR for Double-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// XORPD m128 xmm
|
|
// XORPD xmm xmm
|
|
//
|
|
// Construct and append a XORPD instruction to the active function.
|
|
// Operates on the global context.
|
|
func XORPD(mx, x operand.Op) { ctx.XORPD(mx, x) }
|
|
|
|
// XORPS: Bitwise Logical XOR for Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// XORPS m128 xmm
|
|
// XORPS xmm xmm
|
|
//
|
|
// Construct and append a XORPS instruction to the active function.
|
|
func (c *Context) XORPS(mx, x operand.Op) {
|
|
c.addinstruction(x86.XORPS(mx, x))
|
|
}
|
|
|
|
// XORPS: Bitwise Logical XOR for Single-Precision Floating-Point Values.
|
|
//
|
|
// Forms:
|
|
//
|
|
// XORPS m128 xmm
|
|
// XORPS xmm xmm
|
|
//
|
|
// Construct and append a XORPS instruction to the active function.
|
|
// Operates on the global context.
|
|
func XORPS(mx, x operand.Op) { ctx.XORPS(mx, x) }
|
|
|
|
// XORQ: Logical Exclusive OR.
|
|
//
|
|
// Forms:
|
|
//
|
|
// XORQ imm32 m64
|
|
// XORQ imm32 r64
|
|
// XORQ imm32 rax
|
|
// XORQ imm8 m64
|
|
// XORQ imm8 r64
|
|
// XORQ m64 r64
|
|
// XORQ r64 m64
|
|
// XORQ r64 r64
|
|
//
|
|
// Construct and append a XORQ instruction to the active function.
|
|
func (c *Context) XORQ(imr, mr operand.Op) {
|
|
c.addinstruction(x86.XORQ(imr, mr))
|
|
}
|
|
|
|
// XORQ: Logical Exclusive OR.
|
|
//
|
|
// Forms:
|
|
//
|
|
// XORQ imm32 m64
|
|
// XORQ imm32 r64
|
|
// XORQ imm32 rax
|
|
// XORQ imm8 m64
|
|
// XORQ imm8 r64
|
|
// XORQ m64 r64
|
|
// XORQ r64 m64
|
|
// XORQ r64 r64
|
|
//
|
|
// Construct and append a XORQ instruction to the active function.
|
|
// Operates on the global context.
|
|
func XORQ(imr, mr operand.Op) { ctx.XORQ(imr, mr) }
|
|
|
|
// XORW: Logical Exclusive OR.
|
|
//
|
|
// Forms:
|
|
//
|
|
// XORW imm16 ax
|
|
// XORW imm16 m16
|
|
// XORW imm16 r16
|
|
// XORW imm8 m16
|
|
// XORW imm8 r16
|
|
// XORW m16 r16
|
|
// XORW r16 m16
|
|
// XORW r16 r16
|
|
//
|
|
// Construct and append a XORW instruction to the active function.
|
|
func (c *Context) XORW(imr, amr operand.Op) {
|
|
c.addinstruction(x86.XORW(imr, amr))
|
|
}
|
|
|
|
// XORW: Logical Exclusive OR.
|
|
//
|
|
// Forms:
|
|
//
|
|
// XORW imm16 ax
|
|
// XORW imm16 m16
|
|
// XORW imm16 r16
|
|
// XORW imm8 m16
|
|
// XORW imm8 r16
|
|
// XORW m16 r16
|
|
// XORW r16 m16
|
|
// XORW r16 r16
|
|
//
|
|
// Construct and append a XORW instruction to the active function.
|
|
// Operates on the global context.
|
|
func XORW(imr, amr operand.Op) { ctx.XORW(imr, amr) }
|