From f3845cea9aa8a56e77d933e0c69056523ff965cd Mon Sep 17 00:00:00 2001 From: MerryMage Date: Sat, 30 May 2020 16:10:51 +0100 Subject: [PATCH] A32: Implement ASIMD VQSUB instruction --- .../x64/emit_x64_vector_saturation.cpp | 160 +++++++----------- src/frontend/A32/decoder/asimd.inc | 2 +- .../A32/translate/impl/asimd_three_same.cpp | 27 ++- .../A32/translate/impl/translate_arm.h | 1 + .../A64/translate/impl/simd_three_same.cpp | 6 +- src/frontend/ir/ir_emitter.cpp | 121 ++++++------- src/frontend/ir/ir_emitter.h | 8 +- src/frontend/ir/microinstruction.cpp | 16 ++ 8 files changed, 157 insertions(+), 184 deletions(-) diff --git a/src/backend/x64/emit_x64_vector_saturation.cpp b/src/backend/x64/emit_x64_vector_saturation.cpp index 763462e4..8bbf54ef 100644 --- a/src/backend/x64/emit_x64_vector_saturation.cpp +++ b/src/backend/x64/emit_x64_vector_saturation.cpp @@ -16,39 +16,30 @@ using namespace Xbyak::util; namespace { void EmitVectorSaturatedNative(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst, void (Xbyak::CodeGenerator::*saturated_fn)(const Xbyak::Mmx& mmx, const Xbyak::Operand&), void (Xbyak::CodeGenerator::*unsaturated_fn)(const Xbyak::Mmx& mmx, const Xbyak::Operand&), void (Xbyak::CodeGenerator::*sub_fn)(const Xbyak::Mmx& mmx, const Xbyak::Operand&)) { - const auto overflow_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetOverflowFromOp); - auto args = ctx.reg_alloc.GetArgumentInfo(inst); const Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]); const Xbyak::Xmm addend = ctx.reg_alloc.UseXmm(args[1]); + const Xbyak::Reg8 overflow = ctx.reg_alloc.ScratchGpr().cvt8(); - if (overflow_inst) { - code.movaps(xmm0, result); - } + code.movaps(xmm0, result); (code.*saturated_fn)(result, addend); - if (overflow_inst) { - const Xbyak::Reg8 overflow = ctx.reg_alloc.ScratchGpr().cvt8(); - - (code.*unsaturated_fn)(xmm0, addend); - (code.*sub_fn)(xmm0, result); - if (code.DoesCpuSupport(Xbyak::util::Cpu::tSSE41)) { - code.ptest(xmm0, xmm0); - } else { - const Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm(); - code.pxor(tmp, tmp); - code.pcmpeqw(xmm0, tmp); - code.pmovmskb(overflow.cvt32(), xmm0); - code.xor_(overflow.cvt32(), 0xFFFF); - code.test(overflow.cvt32(), overflow.cvt32()); - } - code.setnz(overflow); - - ctx.reg_alloc.DefineValue(overflow_inst, overflow); - ctx.EraseInstruction(overflow_inst); + (code.*unsaturated_fn)(xmm0, addend); + (code.*sub_fn)(xmm0, result); + if (code.DoesCpuSupport(Xbyak::util::Cpu::tSSE41)) { + code.ptest(xmm0, xmm0); + } else { + const Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm(); + code.pxor(tmp, tmp); + code.pcmpeqw(xmm0, tmp); + code.pmovmskb(overflow.cvt32(), xmm0); + code.xor_(overflow.cvt32(), 0xFFFF); + code.test(overflow.cvt32(), overflow.cvt32()); } + code.setnz(overflow); + code.or_(code.byte[code.r15 + code.GetJitStateInfo().offsetof_fpsr_qc], overflow); ctx.reg_alloc.DefineValue(inst, result); } @@ -63,13 +54,12 @@ void EmitVectorSignedSaturated(BlockOfCode& code, EmitContext& ctx, IR::Inst* in static_assert(esize == 32 || esize == 64); constexpr u64 msb_mask = esize == 32 ? 0x8000000080000000 : 0x8000000000000000; - const auto overflow_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetOverflowFromOp); - auto args = ctx.reg_alloc.GetArgumentInfo(inst); const Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]); const Xbyak::Xmm arg = ctx.reg_alloc.UseXmm(args[1]); const Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm(); + const Xbyak::Reg8 overflow = ctx.reg_alloc.ScratchGpr().cvt8(); // TODO AVX-512: vpternlog, vpsraq // TODO AVX2 implementation @@ -106,24 +96,18 @@ void EmitVectorSignedSaturated(BlockOfCode& code, EmitContext& ctx, IR::Inst* in } code.pxor(tmp, code.MConst(xword, msb_mask, msb_mask)); - if (overflow_inst) { - const Xbyak::Reg8 overflow = ctx.reg_alloc.ScratchGpr().cvt8(); - - if (code.DoesCpuSupport(Xbyak::util::Cpu::tSSE41)) { - code.ptest(xmm0, code.MConst(xword, msb_mask, msb_mask)); + if (code.DoesCpuSupport(Xbyak::util::Cpu::tSSE41)) { + code.ptest(xmm0, code.MConst(xword, msb_mask, msb_mask)); + } else { + if constexpr (esize == 32) { + code.movmskps(overflow.cvt32(), xmm0); } else { - if constexpr (esize == 32) { - code.movmskps(overflow.cvt32(), xmm0); - } else { - code.movmskpd(overflow.cvt32(), xmm0); - } - code.test(overflow.cvt32(), overflow.cvt32()); + code.movmskpd(overflow.cvt32(), xmm0); } - code.setnz(overflow); - - ctx.reg_alloc.DefineValue(overflow_inst, overflow); - ctx.EraseInstruction(overflow_inst); + code.test(overflow.cvt32(), overflow.cvt32()); } + code.setnz(overflow); + code.or_(code.byte[code.r15 + code.GetJitStateInfo().offsetof_fpsr_qc], overflow); if (code.DoesCpuSupport(Xbyak::util::Cpu::tSSE41)) { if constexpr (esize == 32) { @@ -190,13 +174,12 @@ void EmitX64::EmitVectorUnsignedSaturatedAdd16(EmitContext& ctx, IR::Inst* inst) } void EmitX64::EmitVectorUnsignedSaturatedAdd32(EmitContext& ctx, IR::Inst* inst) { - const auto overflow_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetOverflowFromOp); - auto args = ctx.reg_alloc.GetArgumentInfo(inst); const Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]); const Xbyak::Xmm addend = ctx.reg_alloc.UseXmm(args[1]); const Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm(); + const Xbyak::Reg8 overflow = ctx.reg_alloc.ScratchGpr().cvt8(); // TODO AVX2, AVX-512: vpternlog @@ -213,32 +196,25 @@ void EmitX64::EmitVectorUnsignedSaturatedAdd32(EmitContext& ctx, IR::Inst* inst) code.por(result, tmp); - if (overflow_inst) { - const Xbyak::Reg8 overflow = ctx.reg_alloc.ScratchGpr().cvt8(); - - if (code.DoesCpuSupport(Xbyak::util::Cpu::tSSE41)) { - code.ptest(tmp, tmp); - } else { - code.movmskps(overflow.cvt32(), tmp); - code.test(overflow.cvt32(), overflow.cvt32()); - } - code.setnz(overflow); - - ctx.reg_alloc.DefineValue(overflow_inst, overflow); - ctx.EraseInstruction(overflow_inst); + if (code.DoesCpuSupport(Xbyak::util::Cpu::tSSE41)) { + code.ptest(tmp, tmp); + } else { + code.movmskps(overflow.cvt32(), tmp); + code.test(overflow.cvt32(), overflow.cvt32()); } + code.setnz(overflow); + code.or_(code.byte[code.r15 + code.GetJitStateInfo().offsetof_fpsr_qc], overflow); ctx.reg_alloc.DefineValue(inst, result); } void EmitX64::EmitVectorUnsignedSaturatedAdd64(EmitContext& ctx, IR::Inst* inst) { - const auto overflow_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetOverflowFromOp); - auto args = ctx.reg_alloc.GetArgumentInfo(inst); const Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]); const Xbyak::Xmm addend = ctx.reg_alloc.UseXmm(args[1]); const Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm(); + const Xbyak::Reg8 overflow = ctx.reg_alloc.ScratchGpr().cvt8(); // TODO AVX2, AVX-512: vpternlog @@ -256,20 +232,14 @@ void EmitX64::EmitVectorUnsignedSaturatedAdd64(EmitContext& ctx, IR::Inst* inst) code.por(result, tmp); - if (overflow_inst) { - const Xbyak::Reg8 overflow = ctx.reg_alloc.ScratchGpr().cvt8(); - - if (code.DoesCpuSupport(Xbyak::util::Cpu::tSSE41)) { - code.ptest(tmp, tmp); - } else { - code.movmskpd(overflow.cvt32(), tmp); - code.test(overflow.cvt32(), overflow.cvt32()); - } - code.setnz(overflow); - - ctx.reg_alloc.DefineValue(overflow_inst, overflow); - ctx.EraseInstruction(overflow_inst); + if (code.DoesCpuSupport(Xbyak::util::Cpu::tSSE41)) { + code.ptest(tmp, tmp); + } else { + code.movmskpd(overflow.cvt32(), tmp); + code.test(overflow.cvt32(), overflow.cvt32()); } + code.setnz(overflow); + code.or_(code.byte[code.r15 + code.GetJitStateInfo().offsetof_fpsr_qc], overflow); ctx.reg_alloc.DefineValue(inst, result); } @@ -283,13 +253,12 @@ void EmitX64::EmitVectorUnsignedSaturatedSub16(EmitContext& ctx, IR::Inst* inst) } void EmitX64::EmitVectorUnsignedSaturatedSub32(EmitContext& ctx, IR::Inst* inst) { - const auto overflow_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetOverflowFromOp); - auto args = ctx.reg_alloc.GetArgumentInfo(inst); const Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]); const Xbyak::Xmm subtrahend = ctx.reg_alloc.UseXmm(args[1]); const Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm(); + const Xbyak::Reg8 overflow = ctx.reg_alloc.ScratchGpr().cvt8(); // TODO AVX2, AVX-512: vpternlog @@ -304,34 +273,26 @@ void EmitX64::EmitVectorUnsignedSaturatedSub32(EmitContext& ctx, IR::Inst* inst) code.psubd(tmp, xmm0); code.psrad(tmp, 31); - if (overflow_inst) { - const Xbyak::Reg8 overflow = ctx.reg_alloc.ScratchGpr().cvt8(); - - if (code.DoesCpuSupport(Xbyak::util::Cpu::tSSE41)) { - code.ptest(tmp, tmp); - } else { - code.movmskps(overflow.cvt32(), tmp); - code.test(overflow.cvt32(), overflow.cvt32()); - } - code.setnz(overflow); - - ctx.reg_alloc.DefineValue(overflow_inst, overflow); - ctx.EraseInstruction(overflow_inst); + if (code.DoesCpuSupport(Xbyak::util::Cpu::tSSE41)) { + code.ptest(tmp, tmp); + } else { + code.movmskps(overflow.cvt32(), tmp); + code.test(overflow.cvt32(), overflow.cvt32()); } + code.setnz(overflow); + code.or_(code.byte[code.r15 + code.GetJitStateInfo().offsetof_fpsr_qc], overflow); code.pandn(tmp, result); - ctx.reg_alloc.DefineValue(inst, tmp); } void EmitX64::EmitVectorUnsignedSaturatedSub64(EmitContext& ctx, IR::Inst* inst) { - const auto overflow_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetOverflowFromOp); - auto args = ctx.reg_alloc.GetArgumentInfo(inst); const Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]); const Xbyak::Xmm subtrahend = ctx.reg_alloc.UseXmm(args[1]); const Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm(); + const Xbyak::Reg8 overflow = ctx.reg_alloc.ScratchGpr().cvt8(); // TODO AVX2, AVX-512: vpternlog @@ -347,23 +308,16 @@ void EmitX64::EmitVectorUnsignedSaturatedSub64(EmitContext& ctx, IR::Inst* inst) code.psrad(tmp, 31); code.pshufd(tmp, tmp, 0b11110101); - if (overflow_inst) { - const Xbyak::Reg8 overflow = ctx.reg_alloc.ScratchGpr().cvt8(); - - if (code.DoesCpuSupport(Xbyak::util::Cpu::tSSE41)) { - code.ptest(tmp, tmp); - } else { - code.movmskpd(overflow.cvt32(), tmp); - code.test(overflow.cvt32(), overflow.cvt32()); - } - code.setnz(overflow); - - ctx.reg_alloc.DefineValue(overflow_inst, overflow); - ctx.EraseInstruction(overflow_inst); + if (code.DoesCpuSupport(Xbyak::util::Cpu::tSSE41)) { + code.ptest(tmp, tmp); + } else { + code.movmskpd(overflow.cvt32(), tmp); + code.test(overflow.cvt32(), overflow.cvt32()); } + code.setnz(overflow); + code.or_(code.byte[code.r15 + code.GetJitStateInfo().offsetof_fpsr_qc], overflow); code.pandn(tmp, result); - ctx.reg_alloc.DefineValue(inst, tmp); } diff --git a/src/frontend/A32/decoder/asimd.inc b/src/frontend/A32/decoder/asimd.inc index 89ae7ec6..4e9f870f 100644 --- a/src/frontend/A32/decoder/asimd.inc +++ b/src/frontend/A32/decoder/asimd.inc @@ -11,7 +11,7 @@ INST(asimd_VBSL, "VBSL", "111100110D01nnnndddd000 INST(asimd_VBIT, "VBIT", "111100110D10nnnndddd0001NQM1mmmm") // ASIMD INST(asimd_VBIF, "VBIF", "111100110D11nnnndddd0001NQM1mmmm") // ASIMD INST(asimd_VHSUB, "VHSUB", "1111001U0Dzznnnndddd0010NQM0mmmm") // ASIMD -//INST(asimd_VQSUB, "VQSUB", "1111001U0-CC--------0010---1----") // ASIMD +INST(asimd_VQSUB, "VQSUB", "1111001U0Dzznnnndddd0010NQM1mmmm") // ASIMD //INST(asimd_VCGT_reg, "VCGT (register)", "1111001U0-CC--------0011---0----") // ASIMD //INST(asimd_VCGE_reg, "VCGE (register)", "1111001U0-CC--------0011---1----") // ASIMD //INST(asimd_VSHL_reg, "VSHL (register)", "1111001U0-CC--------0100---0----") // ASIMD diff --git a/src/frontend/A32/translate/impl/asimd_three_same.cpp b/src/frontend/A32/translate/impl/asimd_three_same.cpp index d041d152..8186d998 100644 --- a/src/frontend/A32/translate/impl/asimd_three_same.cpp +++ b/src/frontend/A32/translate/impl/asimd_three_same.cpp @@ -74,9 +74,8 @@ bool ArmTranslatorVisitor::asimd_VQADD(bool U, bool D, size_t sz, size_t Vn, siz const IR::U128 reg_n = ir.GetVector(n); const IR::U128 reg_m = ir.GetVector(m); - const auto result = U ? ir.VectorUnsignedSaturatedAdd(esize, reg_n, reg_m) : ir.VectorSignedSaturatedAdd(esize, reg_n, reg_m); - ir.OrQFlag(result.overflow); - ir.SetVector(d, result.result); + const IR::U128 result = U ? ir.VectorUnsignedSaturatedAdd(esize, reg_n, reg_m) : ir.VectorSignedSaturatedAdd(esize, reg_n, reg_m); + ir.SetVector(d, result); return true; } @@ -173,4 +172,26 @@ bool ArmTranslatorVisitor::asimd_VHSUB(bool U, bool D, size_t sz, size_t Vn, siz return true; } +bool ArmTranslatorVisitor::asimd_VQSUB(bool U, bool D, size_t sz, size_t Vn, size_t Vd, bool N, bool Q, bool M, size_t Vm) { + if (Q && (Common::Bit<0>(Vd) || Common::Bit<0>(Vn) || Common::Bit<0>(Vm))) { + return UndefinedInstruction(); + } + + if (sz == 0b11) { + return UndefinedInstruction(); + } + + const size_t esize = 8 << sz; + const auto d = ToVector(Q, Vd, D); + const auto m = ToVector(Q, Vm, M); + const auto n = ToVector(Q, Vn, N); + + const IR::U128 reg_n = ir.GetVector(n); + const IR::U128 reg_m = ir.GetVector(m); + const IR::U128 result = U ? ir.VectorUnsignedSaturatedSub(esize, reg_n, reg_m) : ir.VectorSignedSaturatedSub(esize, reg_n, reg_m); + ir.SetVector(d, result); + + return true; +} + } // namespace Dynarmic::A32 diff --git a/src/frontend/A32/translate/impl/translate_arm.h b/src/frontend/A32/translate/impl/translate_arm.h index 571900c7..745010e3 100644 --- a/src/frontend/A32/translate/impl/translate_arm.h +++ b/src/frontend/A32/translate/impl/translate_arm.h @@ -446,6 +446,7 @@ struct ArmTranslatorVisitor final { bool asimd_VBIT(bool D, size_t Vn, size_t Vd, bool N, bool Q, bool M, size_t Vm); bool asimd_VBIF(bool D, size_t Vn, size_t Vd, bool N, bool Q, bool M, size_t Vm); bool asimd_VHSUB(bool U, bool D, size_t sz, size_t Vn, size_t Vd, bool N, bool Q, bool M, size_t Vm); + bool asimd_VQSUB(bool U, bool D, size_t sz, size_t Vn, size_t Vd, bool N, bool Q, bool M, size_t Vm); // Advanced SIMD two register, miscellaneous bool asimd_VSWP(bool D, size_t Vd, bool Q, bool M, size_t Vm); diff --git a/src/frontend/A64/translate/impl/simd_three_same.cpp b/src/frontend/A64/translate/impl/simd_three_same.cpp index 50e54495..62d5f938 100644 --- a/src/frontend/A64/translate/impl/simd_three_same.cpp +++ b/src/frontend/A64/translate/impl/simd_three_same.cpp @@ -335,7 +335,7 @@ bool SaturatingArithmeticOperation(TranslatorVisitor& v, bool Q, Imm<2> size, Ve const IR::U128 operand1 = v.V(datasize, Vn); const IR::U128 operand2 = v.V(datasize, Vm); - const auto result = [&] { + const IR::U128 result = [&] { if (sign == Signedness::Signed) { if (op == Operation::Add) { return v.ir.VectorSignedSaturatedAdd(esize, operand1, operand2); @@ -351,9 +351,7 @@ bool SaturatingArithmeticOperation(TranslatorVisitor& v, bool Q, Imm<2> size, Ve return v.ir.VectorUnsignedSaturatedSub(esize, operand1, operand2); }(); - v.ir.OrQC(result.overflow); - - v.V(datasize, Vd, result.result); + v.V(datasize, Vd, result); return true; } diff --git a/src/frontend/ir/ir_emitter.cpp b/src/frontend/ir/ir_emitter.cpp index dad61212..c243791d 100644 --- a/src/frontend/ir/ir_emitter.cpp +++ b/src/frontend/ir/ir_emitter.cpp @@ -635,83 +635,66 @@ ResultAndOverflow IREmitter::UnsignedSaturation(const U32& a, size_t bit_si return {result, overflow}; } -ResultAndOverflow IREmitter::VectorSignedSaturatedAdd(size_t esize, const U128& a, const U128& b) { - const auto result = [&]{ - switch (esize) { - case 8: - return Inst(Opcode::VectorSignedSaturatedAdd8, a, b); - case 16: - return Inst(Opcode::VectorSignedSaturatedAdd16, a, b); - case 32: - return Inst(Opcode::VectorSignedSaturatedAdd32, a, b); - case 64: - return Inst(Opcode::VectorSignedSaturatedAdd64, a, b); - default: - UNREACHABLE(); - } - }(); - const auto overflow = Inst(Opcode::GetOverflowFromOp, result); - return {result, overflow}; +U128 IREmitter::VectorSignedSaturatedAdd(size_t esize, const U128& a, const U128& b) { + switch (esize) { + case 8: + return Inst(Opcode::VectorSignedSaturatedAdd8, a, b); + case 16: + return Inst(Opcode::VectorSignedSaturatedAdd16, a, b); + case 32: + return Inst(Opcode::VectorSignedSaturatedAdd32, a, b); + case 64: + return Inst(Opcode::VectorSignedSaturatedAdd64, a, b); + default: + UNREACHABLE(); + } } -ResultAndOverflow IREmitter::VectorSignedSaturatedSub(size_t esize, const U128& a, const U128& b) { - const auto result = [&]{ - switch (esize) { - case 8: - return Inst(Opcode::VectorSignedSaturatedSub8, a, b); - case 16: - return Inst(Opcode::VectorSignedSaturatedSub16, a, b); - case 32: - return Inst(Opcode::VectorSignedSaturatedSub32, a, b); - case 64: - return Inst(Opcode::VectorSignedSaturatedSub64, a, b); - default: - UNREACHABLE(); - } - }(); - const auto overflow = Inst(Opcode::GetOverflowFromOp, result); - return {result, overflow}; +U128 IREmitter::VectorSignedSaturatedSub(size_t esize, const U128& a, const U128& b) { + switch (esize) { + case 8: + return Inst(Opcode::VectorSignedSaturatedSub8, a, b); + case 16: + return Inst(Opcode::VectorSignedSaturatedSub16, a, b); + case 32: + return Inst(Opcode::VectorSignedSaturatedSub32, a, b); + case 64: + return Inst(Opcode::VectorSignedSaturatedSub64, a, b); + default: + UNREACHABLE(); + } } -ResultAndOverflow IREmitter::VectorUnsignedSaturatedAdd(size_t esize, const U128& a, const U128& b) { - const auto result = [&]{ - switch (esize) { - case 8: - return Inst(Opcode::VectorUnsignedSaturatedAdd8, a, b); - case 16: - return Inst(Opcode::VectorUnsignedSaturatedAdd16, a, b); - case 32: - return Inst(Opcode::VectorUnsignedSaturatedAdd32, a, b); - case 64: - return Inst(Opcode::VectorUnsignedSaturatedAdd64, a, b); - default: - UNREACHABLE(); - } - }(); - const auto overflow = Inst(Opcode::GetOverflowFromOp, result); - return {result, overflow}; +U128 IREmitter::VectorUnsignedSaturatedAdd(size_t esize, const U128& a, const U128& b) { + switch (esize) { + case 8: + return Inst(Opcode::VectorUnsignedSaturatedAdd8, a, b); + case 16: + return Inst(Opcode::VectorUnsignedSaturatedAdd16, a, b); + case 32: + return Inst(Opcode::VectorUnsignedSaturatedAdd32, a, b); + case 64: + return Inst(Opcode::VectorUnsignedSaturatedAdd64, a, b); + default: + UNREACHABLE(); + } } -ResultAndOverflow IREmitter::VectorUnsignedSaturatedSub(size_t esize, const U128& a, const U128& b) { - const auto result = [&]{ - switch (esize) { - case 8: - return Inst(Opcode::VectorUnsignedSaturatedSub8, a, b); - case 16: - return Inst(Opcode::VectorUnsignedSaturatedSub16, a, b); - case 32: - return Inst(Opcode::VectorUnsignedSaturatedSub32, a, b); - case 64: - return Inst(Opcode::VectorUnsignedSaturatedSub64, a, b); - default: - UNREACHABLE(); - } - }(); - const auto overflow = Inst(Opcode::GetOverflowFromOp, result); - return {result, overflow}; +U128 IREmitter::VectorUnsignedSaturatedSub(size_t esize, const U128& a, const U128& b) { + switch (esize) { + case 8: + return Inst(Opcode::VectorUnsignedSaturatedSub8, a, b); + case 16: + return Inst(Opcode::VectorUnsignedSaturatedSub16, a, b); + case 32: + return Inst(Opcode::VectorUnsignedSaturatedSub32, a, b); + case 64: + return Inst(Opcode::VectorUnsignedSaturatedSub64, a, b); + default: + UNREACHABLE(); + } } - ResultAndGE IREmitter::PackedAddU8(const U32& a, const U32& b) { const auto result = Inst(Opcode::PackedAddU8, a, b); const auto ge = Inst(Opcode::GetGEFromOp, result); diff --git a/src/frontend/ir/ir_emitter.h b/src/frontend/ir/ir_emitter.h index 4b28b0d9..16403cf2 100644 --- a/src/frontend/ir/ir_emitter.h +++ b/src/frontend/ir/ir_emitter.h @@ -166,10 +166,10 @@ public: ResultAndOverflow UnsignedSaturatedSub(const UAny& a, const UAny& b); ResultAndOverflow UnsignedSaturation(const U32& a, size_t bit_size_to_saturate_to); - ResultAndOverflow VectorSignedSaturatedAdd(size_t esize, const U128& a, const U128& b); - ResultAndOverflow VectorSignedSaturatedSub(size_t esize, const U128& a, const U128& b); - ResultAndOverflow VectorUnsignedSaturatedAdd(size_t esize, const U128& a, const U128& b); - ResultAndOverflow VectorUnsignedSaturatedSub(size_t esize, const U128& a, const U128& b); + U128 VectorSignedSaturatedAdd(size_t esize, const U128& a, const U128& b); + U128 VectorSignedSaturatedSub(size_t esize, const U128& a, const U128& b); + U128 VectorUnsignedSaturatedAdd(size_t esize, const U128& a, const U128& b); + U128 VectorUnsignedSaturatedSub(size_t esize, const U128& a, const U128& b); ResultAndGE PackedAddU8(const U32& a, const U32& b); ResultAndGE PackedAddS8(const U32& a, const U32& b); diff --git a/src/frontend/ir/microinstruction.cpp b/src/frontend/ir/microinstruction.cpp index 40ac6cc6..5d645b3c 100644 --- a/src/frontend/ir/microinstruction.cpp +++ b/src/frontend/ir/microinstruction.cpp @@ -421,6 +421,10 @@ bool Inst::WritesToFPSRCumulativeSaturationBit() const { case Opcode::VectorSignedSaturatedAccumulateUnsigned16: case Opcode::VectorSignedSaturatedAccumulateUnsigned32: case Opcode::VectorSignedSaturatedAccumulateUnsigned64: + case Opcode::VectorSignedSaturatedAdd8: + case Opcode::VectorSignedSaturatedAdd16: + case Opcode::VectorSignedSaturatedAdd32: + case Opcode::VectorSignedSaturatedAdd64: case Opcode::VectorSignedSaturatedDoublingMultiply16: case Opcode::VectorSignedSaturatedDoublingMultiply32: case Opcode::VectorSignedSaturatedDoublingMultiplyLong16: @@ -443,10 +447,18 @@ bool Inst::WritesToFPSRCumulativeSaturationBit() const { case Opcode::VectorSignedSaturatedShiftLeftUnsigned16: case Opcode::VectorSignedSaturatedShiftLeftUnsigned32: case Opcode::VectorSignedSaturatedShiftLeftUnsigned64: + case Opcode::VectorSignedSaturatedSub8: + case Opcode::VectorSignedSaturatedSub16: + case Opcode::VectorSignedSaturatedSub32: + case Opcode::VectorSignedSaturatedSub64: case Opcode::VectorUnsignedSaturatedAccumulateSigned8: case Opcode::VectorUnsignedSaturatedAccumulateSigned16: case Opcode::VectorUnsignedSaturatedAccumulateSigned32: case Opcode::VectorUnsignedSaturatedAccumulateSigned64: + case Opcode::VectorUnsignedSaturatedAdd8: + case Opcode::VectorUnsignedSaturatedAdd16: + case Opcode::VectorUnsignedSaturatedAdd32: + case Opcode::VectorUnsignedSaturatedAdd64: case Opcode::VectorUnsignedSaturatedNarrow16: case Opcode::VectorUnsignedSaturatedNarrow32: case Opcode::VectorUnsignedSaturatedNarrow64: @@ -454,6 +466,10 @@ bool Inst::WritesToFPSRCumulativeSaturationBit() const { case Opcode::VectorUnsignedSaturatedShiftLeft16: case Opcode::VectorUnsignedSaturatedShiftLeft32: case Opcode::VectorUnsignedSaturatedShiftLeft64: + case Opcode::VectorUnsignedSaturatedSub8: + case Opcode::VectorUnsignedSaturatedSub16: + case Opcode::VectorUnsignedSaturatedSub32: + case Opcode::VectorUnsignedSaturatedSub64: return true; default: