A32: Implement ASIMD VQSUB instruction
This commit is contained in:
parent
16ff880f8f
commit
f3845cea9a
8 changed files with 157 additions and 184 deletions
|
@ -16,22 +16,16 @@ using namespace Xbyak::util;
|
||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
void EmitVectorSaturatedNative(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst, void (Xbyak::CodeGenerator::*saturated_fn)(const Xbyak::Mmx& mmx, const Xbyak::Operand&), void (Xbyak::CodeGenerator::*unsaturated_fn)(const Xbyak::Mmx& mmx, const Xbyak::Operand&), void (Xbyak::CodeGenerator::*sub_fn)(const Xbyak::Mmx& mmx, const Xbyak::Operand&)) {
|
void EmitVectorSaturatedNative(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst, void (Xbyak::CodeGenerator::*saturated_fn)(const Xbyak::Mmx& mmx, const Xbyak::Operand&), void (Xbyak::CodeGenerator::*unsaturated_fn)(const Xbyak::Mmx& mmx, const Xbyak::Operand&), void (Xbyak::CodeGenerator::*sub_fn)(const Xbyak::Mmx& mmx, const Xbyak::Operand&)) {
|
||||||
const auto overflow_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetOverflowFromOp);
|
|
||||||
|
|
||||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||||
|
|
||||||
const Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]);
|
const Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]);
|
||||||
const Xbyak::Xmm addend = ctx.reg_alloc.UseXmm(args[1]);
|
const Xbyak::Xmm addend = ctx.reg_alloc.UseXmm(args[1]);
|
||||||
|
const Xbyak::Reg8 overflow = ctx.reg_alloc.ScratchGpr().cvt8();
|
||||||
|
|
||||||
if (overflow_inst) {
|
|
||||||
code.movaps(xmm0, result);
|
code.movaps(xmm0, result);
|
||||||
}
|
|
||||||
|
|
||||||
(code.*saturated_fn)(result, addend);
|
(code.*saturated_fn)(result, addend);
|
||||||
|
|
||||||
if (overflow_inst) {
|
|
||||||
const Xbyak::Reg8 overflow = ctx.reg_alloc.ScratchGpr().cvt8();
|
|
||||||
|
|
||||||
(code.*unsaturated_fn)(xmm0, addend);
|
(code.*unsaturated_fn)(xmm0, addend);
|
||||||
(code.*sub_fn)(xmm0, result);
|
(code.*sub_fn)(xmm0, result);
|
||||||
if (code.DoesCpuSupport(Xbyak::util::Cpu::tSSE41)) {
|
if (code.DoesCpuSupport(Xbyak::util::Cpu::tSSE41)) {
|
||||||
|
@ -45,10 +39,7 @@ void EmitVectorSaturatedNative(BlockOfCode& code, EmitContext& ctx, IR::Inst* in
|
||||||
code.test(overflow.cvt32(), overflow.cvt32());
|
code.test(overflow.cvt32(), overflow.cvt32());
|
||||||
}
|
}
|
||||||
code.setnz(overflow);
|
code.setnz(overflow);
|
||||||
|
code.or_(code.byte[code.r15 + code.GetJitStateInfo().offsetof_fpsr_qc], overflow);
|
||||||
ctx.reg_alloc.DefineValue(overflow_inst, overflow);
|
|
||||||
ctx.EraseInstruction(overflow_inst);
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx.reg_alloc.DefineValue(inst, result);
|
ctx.reg_alloc.DefineValue(inst, result);
|
||||||
}
|
}
|
||||||
|
@ -63,13 +54,12 @@ void EmitVectorSignedSaturated(BlockOfCode& code, EmitContext& ctx, IR::Inst* in
|
||||||
static_assert(esize == 32 || esize == 64);
|
static_assert(esize == 32 || esize == 64);
|
||||||
constexpr u64 msb_mask = esize == 32 ? 0x8000000080000000 : 0x8000000000000000;
|
constexpr u64 msb_mask = esize == 32 ? 0x8000000080000000 : 0x8000000000000000;
|
||||||
|
|
||||||
const auto overflow_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetOverflowFromOp);
|
|
||||||
|
|
||||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||||
|
|
||||||
const Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]);
|
const Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]);
|
||||||
const Xbyak::Xmm arg = ctx.reg_alloc.UseXmm(args[1]);
|
const Xbyak::Xmm arg = ctx.reg_alloc.UseXmm(args[1]);
|
||||||
const Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm();
|
const Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm();
|
||||||
|
const Xbyak::Reg8 overflow = ctx.reg_alloc.ScratchGpr().cvt8();
|
||||||
|
|
||||||
// TODO AVX-512: vpternlog, vpsraq
|
// TODO AVX-512: vpternlog, vpsraq
|
||||||
// TODO AVX2 implementation
|
// TODO AVX2 implementation
|
||||||
|
@ -106,9 +96,6 @@ void EmitVectorSignedSaturated(BlockOfCode& code, EmitContext& ctx, IR::Inst* in
|
||||||
}
|
}
|
||||||
code.pxor(tmp, code.MConst(xword, msb_mask, msb_mask));
|
code.pxor(tmp, code.MConst(xword, msb_mask, msb_mask));
|
||||||
|
|
||||||
if (overflow_inst) {
|
|
||||||
const Xbyak::Reg8 overflow = ctx.reg_alloc.ScratchGpr().cvt8();
|
|
||||||
|
|
||||||
if (code.DoesCpuSupport(Xbyak::util::Cpu::tSSE41)) {
|
if (code.DoesCpuSupport(Xbyak::util::Cpu::tSSE41)) {
|
||||||
code.ptest(xmm0, code.MConst(xword, msb_mask, msb_mask));
|
code.ptest(xmm0, code.MConst(xword, msb_mask, msb_mask));
|
||||||
} else {
|
} else {
|
||||||
|
@ -120,10 +107,7 @@ void EmitVectorSignedSaturated(BlockOfCode& code, EmitContext& ctx, IR::Inst* in
|
||||||
code.test(overflow.cvt32(), overflow.cvt32());
|
code.test(overflow.cvt32(), overflow.cvt32());
|
||||||
}
|
}
|
||||||
code.setnz(overflow);
|
code.setnz(overflow);
|
||||||
|
code.or_(code.byte[code.r15 + code.GetJitStateInfo().offsetof_fpsr_qc], overflow);
|
||||||
ctx.reg_alloc.DefineValue(overflow_inst, overflow);
|
|
||||||
ctx.EraseInstruction(overflow_inst);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (code.DoesCpuSupport(Xbyak::util::Cpu::tSSE41)) {
|
if (code.DoesCpuSupport(Xbyak::util::Cpu::tSSE41)) {
|
||||||
if constexpr (esize == 32) {
|
if constexpr (esize == 32) {
|
||||||
|
@ -190,13 +174,12 @@ void EmitX64::EmitVectorUnsignedSaturatedAdd16(EmitContext& ctx, IR::Inst* inst)
|
||||||
}
|
}
|
||||||
|
|
||||||
void EmitX64::EmitVectorUnsignedSaturatedAdd32(EmitContext& ctx, IR::Inst* inst) {
|
void EmitX64::EmitVectorUnsignedSaturatedAdd32(EmitContext& ctx, IR::Inst* inst) {
|
||||||
const auto overflow_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetOverflowFromOp);
|
|
||||||
|
|
||||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||||
|
|
||||||
const Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]);
|
const Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]);
|
||||||
const Xbyak::Xmm addend = ctx.reg_alloc.UseXmm(args[1]);
|
const Xbyak::Xmm addend = ctx.reg_alloc.UseXmm(args[1]);
|
||||||
const Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm();
|
const Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm();
|
||||||
|
const Xbyak::Reg8 overflow = ctx.reg_alloc.ScratchGpr().cvt8();
|
||||||
|
|
||||||
// TODO AVX2, AVX-512: vpternlog
|
// TODO AVX2, AVX-512: vpternlog
|
||||||
|
|
||||||
|
@ -213,9 +196,6 @@ void EmitX64::EmitVectorUnsignedSaturatedAdd32(EmitContext& ctx, IR::Inst* inst)
|
||||||
|
|
||||||
code.por(result, tmp);
|
code.por(result, tmp);
|
||||||
|
|
||||||
if (overflow_inst) {
|
|
||||||
const Xbyak::Reg8 overflow = ctx.reg_alloc.ScratchGpr().cvt8();
|
|
||||||
|
|
||||||
if (code.DoesCpuSupport(Xbyak::util::Cpu::tSSE41)) {
|
if (code.DoesCpuSupport(Xbyak::util::Cpu::tSSE41)) {
|
||||||
code.ptest(tmp, tmp);
|
code.ptest(tmp, tmp);
|
||||||
} else {
|
} else {
|
||||||
|
@ -223,22 +203,18 @@ void EmitX64::EmitVectorUnsignedSaturatedAdd32(EmitContext& ctx, IR::Inst* inst)
|
||||||
code.test(overflow.cvt32(), overflow.cvt32());
|
code.test(overflow.cvt32(), overflow.cvt32());
|
||||||
}
|
}
|
||||||
code.setnz(overflow);
|
code.setnz(overflow);
|
||||||
|
code.or_(code.byte[code.r15 + code.GetJitStateInfo().offsetof_fpsr_qc], overflow);
|
||||||
ctx.reg_alloc.DefineValue(overflow_inst, overflow);
|
|
||||||
ctx.EraseInstruction(overflow_inst);
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx.reg_alloc.DefineValue(inst, result);
|
ctx.reg_alloc.DefineValue(inst, result);
|
||||||
}
|
}
|
||||||
|
|
||||||
void EmitX64::EmitVectorUnsignedSaturatedAdd64(EmitContext& ctx, IR::Inst* inst) {
|
void EmitX64::EmitVectorUnsignedSaturatedAdd64(EmitContext& ctx, IR::Inst* inst) {
|
||||||
const auto overflow_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetOverflowFromOp);
|
|
||||||
|
|
||||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||||
|
|
||||||
const Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]);
|
const Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]);
|
||||||
const Xbyak::Xmm addend = ctx.reg_alloc.UseXmm(args[1]);
|
const Xbyak::Xmm addend = ctx.reg_alloc.UseXmm(args[1]);
|
||||||
const Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm();
|
const Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm();
|
||||||
|
const Xbyak::Reg8 overflow = ctx.reg_alloc.ScratchGpr().cvt8();
|
||||||
|
|
||||||
// TODO AVX2, AVX-512: vpternlog
|
// TODO AVX2, AVX-512: vpternlog
|
||||||
|
|
||||||
|
@ -256,9 +232,6 @@ void EmitX64::EmitVectorUnsignedSaturatedAdd64(EmitContext& ctx, IR::Inst* inst)
|
||||||
|
|
||||||
code.por(result, tmp);
|
code.por(result, tmp);
|
||||||
|
|
||||||
if (overflow_inst) {
|
|
||||||
const Xbyak::Reg8 overflow = ctx.reg_alloc.ScratchGpr().cvt8();
|
|
||||||
|
|
||||||
if (code.DoesCpuSupport(Xbyak::util::Cpu::tSSE41)) {
|
if (code.DoesCpuSupport(Xbyak::util::Cpu::tSSE41)) {
|
||||||
code.ptest(tmp, tmp);
|
code.ptest(tmp, tmp);
|
||||||
} else {
|
} else {
|
||||||
|
@ -266,10 +239,7 @@ void EmitX64::EmitVectorUnsignedSaturatedAdd64(EmitContext& ctx, IR::Inst* inst)
|
||||||
code.test(overflow.cvt32(), overflow.cvt32());
|
code.test(overflow.cvt32(), overflow.cvt32());
|
||||||
}
|
}
|
||||||
code.setnz(overflow);
|
code.setnz(overflow);
|
||||||
|
code.or_(code.byte[code.r15 + code.GetJitStateInfo().offsetof_fpsr_qc], overflow);
|
||||||
ctx.reg_alloc.DefineValue(overflow_inst, overflow);
|
|
||||||
ctx.EraseInstruction(overflow_inst);
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx.reg_alloc.DefineValue(inst, result);
|
ctx.reg_alloc.DefineValue(inst, result);
|
||||||
}
|
}
|
||||||
|
@ -283,13 +253,12 @@ void EmitX64::EmitVectorUnsignedSaturatedSub16(EmitContext& ctx, IR::Inst* inst)
|
||||||
}
|
}
|
||||||
|
|
||||||
void EmitX64::EmitVectorUnsignedSaturatedSub32(EmitContext& ctx, IR::Inst* inst) {
|
void EmitX64::EmitVectorUnsignedSaturatedSub32(EmitContext& ctx, IR::Inst* inst) {
|
||||||
const auto overflow_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetOverflowFromOp);
|
|
||||||
|
|
||||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||||
|
|
||||||
const Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]);
|
const Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]);
|
||||||
const Xbyak::Xmm subtrahend = ctx.reg_alloc.UseXmm(args[1]);
|
const Xbyak::Xmm subtrahend = ctx.reg_alloc.UseXmm(args[1]);
|
||||||
const Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm();
|
const Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm();
|
||||||
|
const Xbyak::Reg8 overflow = ctx.reg_alloc.ScratchGpr().cvt8();
|
||||||
|
|
||||||
// TODO AVX2, AVX-512: vpternlog
|
// TODO AVX2, AVX-512: vpternlog
|
||||||
|
|
||||||
|
@ -304,9 +273,6 @@ void EmitX64::EmitVectorUnsignedSaturatedSub32(EmitContext& ctx, IR::Inst* inst)
|
||||||
code.psubd(tmp, xmm0);
|
code.psubd(tmp, xmm0);
|
||||||
code.psrad(tmp, 31);
|
code.psrad(tmp, 31);
|
||||||
|
|
||||||
if (overflow_inst) {
|
|
||||||
const Xbyak::Reg8 overflow = ctx.reg_alloc.ScratchGpr().cvt8();
|
|
||||||
|
|
||||||
if (code.DoesCpuSupport(Xbyak::util::Cpu::tSSE41)) {
|
if (code.DoesCpuSupport(Xbyak::util::Cpu::tSSE41)) {
|
||||||
code.ptest(tmp, tmp);
|
code.ptest(tmp, tmp);
|
||||||
} else {
|
} else {
|
||||||
|
@ -314,24 +280,19 @@ void EmitX64::EmitVectorUnsignedSaturatedSub32(EmitContext& ctx, IR::Inst* inst)
|
||||||
code.test(overflow.cvt32(), overflow.cvt32());
|
code.test(overflow.cvt32(), overflow.cvt32());
|
||||||
}
|
}
|
||||||
code.setnz(overflow);
|
code.setnz(overflow);
|
||||||
|
code.or_(code.byte[code.r15 + code.GetJitStateInfo().offsetof_fpsr_qc], overflow);
|
||||||
ctx.reg_alloc.DefineValue(overflow_inst, overflow);
|
|
||||||
ctx.EraseInstruction(overflow_inst);
|
|
||||||
}
|
|
||||||
|
|
||||||
code.pandn(tmp, result);
|
code.pandn(tmp, result);
|
||||||
|
|
||||||
ctx.reg_alloc.DefineValue(inst, tmp);
|
ctx.reg_alloc.DefineValue(inst, tmp);
|
||||||
}
|
}
|
||||||
|
|
||||||
void EmitX64::EmitVectorUnsignedSaturatedSub64(EmitContext& ctx, IR::Inst* inst) {
|
void EmitX64::EmitVectorUnsignedSaturatedSub64(EmitContext& ctx, IR::Inst* inst) {
|
||||||
const auto overflow_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetOverflowFromOp);
|
|
||||||
|
|
||||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||||
|
|
||||||
const Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]);
|
const Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]);
|
||||||
const Xbyak::Xmm subtrahend = ctx.reg_alloc.UseXmm(args[1]);
|
const Xbyak::Xmm subtrahend = ctx.reg_alloc.UseXmm(args[1]);
|
||||||
const Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm();
|
const Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm();
|
||||||
|
const Xbyak::Reg8 overflow = ctx.reg_alloc.ScratchGpr().cvt8();
|
||||||
|
|
||||||
// TODO AVX2, AVX-512: vpternlog
|
// TODO AVX2, AVX-512: vpternlog
|
||||||
|
|
||||||
|
@ -347,9 +308,6 @@ void EmitX64::EmitVectorUnsignedSaturatedSub64(EmitContext& ctx, IR::Inst* inst)
|
||||||
code.psrad(tmp, 31);
|
code.psrad(tmp, 31);
|
||||||
code.pshufd(tmp, tmp, 0b11110101);
|
code.pshufd(tmp, tmp, 0b11110101);
|
||||||
|
|
||||||
if (overflow_inst) {
|
|
||||||
const Xbyak::Reg8 overflow = ctx.reg_alloc.ScratchGpr().cvt8();
|
|
||||||
|
|
||||||
if (code.DoesCpuSupport(Xbyak::util::Cpu::tSSE41)) {
|
if (code.DoesCpuSupport(Xbyak::util::Cpu::tSSE41)) {
|
||||||
code.ptest(tmp, tmp);
|
code.ptest(tmp, tmp);
|
||||||
} else {
|
} else {
|
||||||
|
@ -357,13 +315,9 @@ void EmitX64::EmitVectorUnsignedSaturatedSub64(EmitContext& ctx, IR::Inst* inst)
|
||||||
code.test(overflow.cvt32(), overflow.cvt32());
|
code.test(overflow.cvt32(), overflow.cvt32());
|
||||||
}
|
}
|
||||||
code.setnz(overflow);
|
code.setnz(overflow);
|
||||||
|
code.or_(code.byte[code.r15 + code.GetJitStateInfo().offsetof_fpsr_qc], overflow);
|
||||||
ctx.reg_alloc.DefineValue(overflow_inst, overflow);
|
|
||||||
ctx.EraseInstruction(overflow_inst);
|
|
||||||
}
|
|
||||||
|
|
||||||
code.pandn(tmp, result);
|
code.pandn(tmp, result);
|
||||||
|
|
||||||
ctx.reg_alloc.DefineValue(inst, tmp);
|
ctx.reg_alloc.DefineValue(inst, tmp);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -11,7 +11,7 @@ INST(asimd_VBSL, "VBSL", "111100110D01nnnndddd000
|
||||||
INST(asimd_VBIT, "VBIT", "111100110D10nnnndddd0001NQM1mmmm") // ASIMD
|
INST(asimd_VBIT, "VBIT", "111100110D10nnnndddd0001NQM1mmmm") // ASIMD
|
||||||
INST(asimd_VBIF, "VBIF", "111100110D11nnnndddd0001NQM1mmmm") // ASIMD
|
INST(asimd_VBIF, "VBIF", "111100110D11nnnndddd0001NQM1mmmm") // ASIMD
|
||||||
INST(asimd_VHSUB, "VHSUB", "1111001U0Dzznnnndddd0010NQM0mmmm") // ASIMD
|
INST(asimd_VHSUB, "VHSUB", "1111001U0Dzznnnndddd0010NQM0mmmm") // ASIMD
|
||||||
//INST(asimd_VQSUB, "VQSUB", "1111001U0-CC--------0010---1----") // ASIMD
|
INST(asimd_VQSUB, "VQSUB", "1111001U0Dzznnnndddd0010NQM1mmmm") // ASIMD
|
||||||
//INST(asimd_VCGT_reg, "VCGT (register)", "1111001U0-CC--------0011---0----") // ASIMD
|
//INST(asimd_VCGT_reg, "VCGT (register)", "1111001U0-CC--------0011---0----") // ASIMD
|
||||||
//INST(asimd_VCGE_reg, "VCGE (register)", "1111001U0-CC--------0011---1----") // ASIMD
|
//INST(asimd_VCGE_reg, "VCGE (register)", "1111001U0-CC--------0011---1----") // ASIMD
|
||||||
//INST(asimd_VSHL_reg, "VSHL (register)", "1111001U0-CC--------0100---0----") // ASIMD
|
//INST(asimd_VSHL_reg, "VSHL (register)", "1111001U0-CC--------0100---0----") // ASIMD
|
||||||
|
|
|
@ -74,9 +74,8 @@ bool ArmTranslatorVisitor::asimd_VQADD(bool U, bool D, size_t sz, size_t Vn, siz
|
||||||
|
|
||||||
const IR::U128 reg_n = ir.GetVector(n);
|
const IR::U128 reg_n = ir.GetVector(n);
|
||||||
const IR::U128 reg_m = ir.GetVector(m);
|
const IR::U128 reg_m = ir.GetVector(m);
|
||||||
const auto result = U ? ir.VectorUnsignedSaturatedAdd(esize, reg_n, reg_m) : ir.VectorSignedSaturatedAdd(esize, reg_n, reg_m);
|
const IR::U128 result = U ? ir.VectorUnsignedSaturatedAdd(esize, reg_n, reg_m) : ir.VectorSignedSaturatedAdd(esize, reg_n, reg_m);
|
||||||
ir.OrQFlag(result.overflow);
|
ir.SetVector(d, result);
|
||||||
ir.SetVector(d, result.result);
|
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -173,4 +172,26 @@ bool ArmTranslatorVisitor::asimd_VHSUB(bool U, bool D, size_t sz, size_t Vn, siz
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool ArmTranslatorVisitor::asimd_VQSUB(bool U, bool D, size_t sz, size_t Vn, size_t Vd, bool N, bool Q, bool M, size_t Vm) {
|
||||||
|
if (Q && (Common::Bit<0>(Vd) || Common::Bit<0>(Vn) || Common::Bit<0>(Vm))) {
|
||||||
|
return UndefinedInstruction();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (sz == 0b11) {
|
||||||
|
return UndefinedInstruction();
|
||||||
|
}
|
||||||
|
|
||||||
|
const size_t esize = 8 << sz;
|
||||||
|
const auto d = ToVector(Q, Vd, D);
|
||||||
|
const auto m = ToVector(Q, Vm, M);
|
||||||
|
const auto n = ToVector(Q, Vn, N);
|
||||||
|
|
||||||
|
const IR::U128 reg_n = ir.GetVector(n);
|
||||||
|
const IR::U128 reg_m = ir.GetVector(m);
|
||||||
|
const IR::U128 result = U ? ir.VectorUnsignedSaturatedSub(esize, reg_n, reg_m) : ir.VectorSignedSaturatedSub(esize, reg_n, reg_m);
|
||||||
|
ir.SetVector(d, result);
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace Dynarmic::A32
|
} // namespace Dynarmic::A32
|
||||||
|
|
|
@ -446,6 +446,7 @@ struct ArmTranslatorVisitor final {
|
||||||
bool asimd_VBIT(bool D, size_t Vn, size_t Vd, bool N, bool Q, bool M, size_t Vm);
|
bool asimd_VBIT(bool D, size_t Vn, size_t Vd, bool N, bool Q, bool M, size_t Vm);
|
||||||
bool asimd_VBIF(bool D, size_t Vn, size_t Vd, bool N, bool Q, bool M, size_t Vm);
|
bool asimd_VBIF(bool D, size_t Vn, size_t Vd, bool N, bool Q, bool M, size_t Vm);
|
||||||
bool asimd_VHSUB(bool U, bool D, size_t sz, size_t Vn, size_t Vd, bool N, bool Q, bool M, size_t Vm);
|
bool asimd_VHSUB(bool U, bool D, size_t sz, size_t Vn, size_t Vd, bool N, bool Q, bool M, size_t Vm);
|
||||||
|
bool asimd_VQSUB(bool U, bool D, size_t sz, size_t Vn, size_t Vd, bool N, bool Q, bool M, size_t Vm);
|
||||||
|
|
||||||
// Advanced SIMD two register, miscellaneous
|
// Advanced SIMD two register, miscellaneous
|
||||||
bool asimd_VSWP(bool D, size_t Vd, bool Q, bool M, size_t Vm);
|
bool asimd_VSWP(bool D, size_t Vd, bool Q, bool M, size_t Vm);
|
||||||
|
|
|
@ -335,7 +335,7 @@ bool SaturatingArithmeticOperation(TranslatorVisitor& v, bool Q, Imm<2> size, Ve
|
||||||
const IR::U128 operand1 = v.V(datasize, Vn);
|
const IR::U128 operand1 = v.V(datasize, Vn);
|
||||||
const IR::U128 operand2 = v.V(datasize, Vm);
|
const IR::U128 operand2 = v.V(datasize, Vm);
|
||||||
|
|
||||||
const auto result = [&] {
|
const IR::U128 result = [&] {
|
||||||
if (sign == Signedness::Signed) {
|
if (sign == Signedness::Signed) {
|
||||||
if (op == Operation::Add) {
|
if (op == Operation::Add) {
|
||||||
return v.ir.VectorSignedSaturatedAdd(esize, operand1, operand2);
|
return v.ir.VectorSignedSaturatedAdd(esize, operand1, operand2);
|
||||||
|
@ -351,9 +351,7 @@ bool SaturatingArithmeticOperation(TranslatorVisitor& v, bool Q, Imm<2> size, Ve
|
||||||
return v.ir.VectorUnsignedSaturatedSub(esize, operand1, operand2);
|
return v.ir.VectorUnsignedSaturatedSub(esize, operand1, operand2);
|
||||||
}();
|
}();
|
||||||
|
|
||||||
v.ir.OrQC(result.overflow);
|
v.V(datasize, Vd, result);
|
||||||
|
|
||||||
v.V(datasize, Vd, result.result);
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -635,8 +635,7 @@ ResultAndOverflow<U32> IREmitter::UnsignedSaturation(const U32& a, size_t bit_si
|
||||||
return {result, overflow};
|
return {result, overflow};
|
||||||
}
|
}
|
||||||
|
|
||||||
ResultAndOverflow<U128> IREmitter::VectorSignedSaturatedAdd(size_t esize, const U128& a, const U128& b) {
|
U128 IREmitter::VectorSignedSaturatedAdd(size_t esize, const U128& a, const U128& b) {
|
||||||
const auto result = [&]{
|
|
||||||
switch (esize) {
|
switch (esize) {
|
||||||
case 8:
|
case 8:
|
||||||
return Inst<U128>(Opcode::VectorSignedSaturatedAdd8, a, b);
|
return Inst<U128>(Opcode::VectorSignedSaturatedAdd8, a, b);
|
||||||
|
@ -649,13 +648,9 @@ ResultAndOverflow<U128> IREmitter::VectorSignedSaturatedAdd(size_t esize, const
|
||||||
default:
|
default:
|
||||||
UNREACHABLE();
|
UNREACHABLE();
|
||||||
}
|
}
|
||||||
}();
|
|
||||||
const auto overflow = Inst<U1>(Opcode::GetOverflowFromOp, result);
|
|
||||||
return {result, overflow};
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ResultAndOverflow<U128> IREmitter::VectorSignedSaturatedSub(size_t esize, const U128& a, const U128& b) {
|
U128 IREmitter::VectorSignedSaturatedSub(size_t esize, const U128& a, const U128& b) {
|
||||||
const auto result = [&]{
|
|
||||||
switch (esize) {
|
switch (esize) {
|
||||||
case 8:
|
case 8:
|
||||||
return Inst<U128>(Opcode::VectorSignedSaturatedSub8, a, b);
|
return Inst<U128>(Opcode::VectorSignedSaturatedSub8, a, b);
|
||||||
|
@ -668,13 +663,9 @@ ResultAndOverflow<U128> IREmitter::VectorSignedSaturatedSub(size_t esize, const
|
||||||
default:
|
default:
|
||||||
UNREACHABLE();
|
UNREACHABLE();
|
||||||
}
|
}
|
||||||
}();
|
|
||||||
const auto overflow = Inst<U1>(Opcode::GetOverflowFromOp, result);
|
|
||||||
return {result, overflow};
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ResultAndOverflow<U128> IREmitter::VectorUnsignedSaturatedAdd(size_t esize, const U128& a, const U128& b) {
|
U128 IREmitter::VectorUnsignedSaturatedAdd(size_t esize, const U128& a, const U128& b) {
|
||||||
const auto result = [&]{
|
|
||||||
switch (esize) {
|
switch (esize) {
|
||||||
case 8:
|
case 8:
|
||||||
return Inst<U128>(Opcode::VectorUnsignedSaturatedAdd8, a, b);
|
return Inst<U128>(Opcode::VectorUnsignedSaturatedAdd8, a, b);
|
||||||
|
@ -687,13 +678,9 @@ ResultAndOverflow<U128> IREmitter::VectorUnsignedSaturatedAdd(size_t esize, cons
|
||||||
default:
|
default:
|
||||||
UNREACHABLE();
|
UNREACHABLE();
|
||||||
}
|
}
|
||||||
}();
|
|
||||||
const auto overflow = Inst<U1>(Opcode::GetOverflowFromOp, result);
|
|
||||||
return {result, overflow};
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ResultAndOverflow<U128> IREmitter::VectorUnsignedSaturatedSub(size_t esize, const U128& a, const U128& b) {
|
U128 IREmitter::VectorUnsignedSaturatedSub(size_t esize, const U128& a, const U128& b) {
|
||||||
const auto result = [&]{
|
|
||||||
switch (esize) {
|
switch (esize) {
|
||||||
case 8:
|
case 8:
|
||||||
return Inst<U128>(Opcode::VectorUnsignedSaturatedSub8, a, b);
|
return Inst<U128>(Opcode::VectorUnsignedSaturatedSub8, a, b);
|
||||||
|
@ -706,12 +693,8 @@ ResultAndOverflow<U128> IREmitter::VectorUnsignedSaturatedSub(size_t esize, cons
|
||||||
default:
|
default:
|
||||||
UNREACHABLE();
|
UNREACHABLE();
|
||||||
}
|
}
|
||||||
}();
|
|
||||||
const auto overflow = Inst<U1>(Opcode::GetOverflowFromOp, result);
|
|
||||||
return {result, overflow};
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
ResultAndGE<U32> IREmitter::PackedAddU8(const U32& a, const U32& b) {
|
ResultAndGE<U32> IREmitter::PackedAddU8(const U32& a, const U32& b) {
|
||||||
const auto result = Inst<U32>(Opcode::PackedAddU8, a, b);
|
const auto result = Inst<U32>(Opcode::PackedAddU8, a, b);
|
||||||
const auto ge = Inst<U32>(Opcode::GetGEFromOp, result);
|
const auto ge = Inst<U32>(Opcode::GetGEFromOp, result);
|
||||||
|
|
|
@ -166,10 +166,10 @@ public:
|
||||||
ResultAndOverflow<UAny> UnsignedSaturatedSub(const UAny& a, const UAny& b);
|
ResultAndOverflow<UAny> UnsignedSaturatedSub(const UAny& a, const UAny& b);
|
||||||
ResultAndOverflow<U32> UnsignedSaturation(const U32& a, size_t bit_size_to_saturate_to);
|
ResultAndOverflow<U32> UnsignedSaturation(const U32& a, size_t bit_size_to_saturate_to);
|
||||||
|
|
||||||
ResultAndOverflow<U128> VectorSignedSaturatedAdd(size_t esize, const U128& a, const U128& b);
|
U128 VectorSignedSaturatedAdd(size_t esize, const U128& a, const U128& b);
|
||||||
ResultAndOverflow<U128> VectorSignedSaturatedSub(size_t esize, const U128& a, const U128& b);
|
U128 VectorSignedSaturatedSub(size_t esize, const U128& a, const U128& b);
|
||||||
ResultAndOverflow<U128> VectorUnsignedSaturatedAdd(size_t esize, const U128& a, const U128& b);
|
U128 VectorUnsignedSaturatedAdd(size_t esize, const U128& a, const U128& b);
|
||||||
ResultAndOverflow<U128> VectorUnsignedSaturatedSub(size_t esize, const U128& a, const U128& b);
|
U128 VectorUnsignedSaturatedSub(size_t esize, const U128& a, const U128& b);
|
||||||
|
|
||||||
ResultAndGE<U32> PackedAddU8(const U32& a, const U32& b);
|
ResultAndGE<U32> PackedAddU8(const U32& a, const U32& b);
|
||||||
ResultAndGE<U32> PackedAddS8(const U32& a, const U32& b);
|
ResultAndGE<U32> PackedAddS8(const U32& a, const U32& b);
|
||||||
|
|
|
@ -421,6 +421,10 @@ bool Inst::WritesToFPSRCumulativeSaturationBit() const {
|
||||||
case Opcode::VectorSignedSaturatedAccumulateUnsigned16:
|
case Opcode::VectorSignedSaturatedAccumulateUnsigned16:
|
||||||
case Opcode::VectorSignedSaturatedAccumulateUnsigned32:
|
case Opcode::VectorSignedSaturatedAccumulateUnsigned32:
|
||||||
case Opcode::VectorSignedSaturatedAccumulateUnsigned64:
|
case Opcode::VectorSignedSaturatedAccumulateUnsigned64:
|
||||||
|
case Opcode::VectorSignedSaturatedAdd8:
|
||||||
|
case Opcode::VectorSignedSaturatedAdd16:
|
||||||
|
case Opcode::VectorSignedSaturatedAdd32:
|
||||||
|
case Opcode::VectorSignedSaturatedAdd64:
|
||||||
case Opcode::VectorSignedSaturatedDoublingMultiply16:
|
case Opcode::VectorSignedSaturatedDoublingMultiply16:
|
||||||
case Opcode::VectorSignedSaturatedDoublingMultiply32:
|
case Opcode::VectorSignedSaturatedDoublingMultiply32:
|
||||||
case Opcode::VectorSignedSaturatedDoublingMultiplyLong16:
|
case Opcode::VectorSignedSaturatedDoublingMultiplyLong16:
|
||||||
|
@ -443,10 +447,18 @@ bool Inst::WritesToFPSRCumulativeSaturationBit() const {
|
||||||
case Opcode::VectorSignedSaturatedShiftLeftUnsigned16:
|
case Opcode::VectorSignedSaturatedShiftLeftUnsigned16:
|
||||||
case Opcode::VectorSignedSaturatedShiftLeftUnsigned32:
|
case Opcode::VectorSignedSaturatedShiftLeftUnsigned32:
|
||||||
case Opcode::VectorSignedSaturatedShiftLeftUnsigned64:
|
case Opcode::VectorSignedSaturatedShiftLeftUnsigned64:
|
||||||
|
case Opcode::VectorSignedSaturatedSub8:
|
||||||
|
case Opcode::VectorSignedSaturatedSub16:
|
||||||
|
case Opcode::VectorSignedSaturatedSub32:
|
||||||
|
case Opcode::VectorSignedSaturatedSub64:
|
||||||
case Opcode::VectorUnsignedSaturatedAccumulateSigned8:
|
case Opcode::VectorUnsignedSaturatedAccumulateSigned8:
|
||||||
case Opcode::VectorUnsignedSaturatedAccumulateSigned16:
|
case Opcode::VectorUnsignedSaturatedAccumulateSigned16:
|
||||||
case Opcode::VectorUnsignedSaturatedAccumulateSigned32:
|
case Opcode::VectorUnsignedSaturatedAccumulateSigned32:
|
||||||
case Opcode::VectorUnsignedSaturatedAccumulateSigned64:
|
case Opcode::VectorUnsignedSaturatedAccumulateSigned64:
|
||||||
|
case Opcode::VectorUnsignedSaturatedAdd8:
|
||||||
|
case Opcode::VectorUnsignedSaturatedAdd16:
|
||||||
|
case Opcode::VectorUnsignedSaturatedAdd32:
|
||||||
|
case Opcode::VectorUnsignedSaturatedAdd64:
|
||||||
case Opcode::VectorUnsignedSaturatedNarrow16:
|
case Opcode::VectorUnsignedSaturatedNarrow16:
|
||||||
case Opcode::VectorUnsignedSaturatedNarrow32:
|
case Opcode::VectorUnsignedSaturatedNarrow32:
|
||||||
case Opcode::VectorUnsignedSaturatedNarrow64:
|
case Opcode::VectorUnsignedSaturatedNarrow64:
|
||||||
|
@ -454,6 +466,10 @@ bool Inst::WritesToFPSRCumulativeSaturationBit() const {
|
||||||
case Opcode::VectorUnsignedSaturatedShiftLeft16:
|
case Opcode::VectorUnsignedSaturatedShiftLeft16:
|
||||||
case Opcode::VectorUnsignedSaturatedShiftLeft32:
|
case Opcode::VectorUnsignedSaturatedShiftLeft32:
|
||||||
case Opcode::VectorUnsignedSaturatedShiftLeft64:
|
case Opcode::VectorUnsignedSaturatedShiftLeft64:
|
||||||
|
case Opcode::VectorUnsignedSaturatedSub8:
|
||||||
|
case Opcode::VectorUnsignedSaturatedSub16:
|
||||||
|
case Opcode::VectorUnsignedSaturatedSub32:
|
||||||
|
case Opcode::VectorUnsignedSaturatedSub64:
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
|
|
Loading…
Reference in a new issue