From 90193b0e3d6f718c65d3e37358354fde1109ce74 Mon Sep 17 00:00:00 2001 From: MerryMage Date: Tue, 18 Sep 2018 20:36:37 +0100 Subject: [PATCH] IR: Add fbits argument to FixedToFP-related opcodes --- src/backend/x64/emit_x64_floating_point.cpp | 136 ++++++++++++------ .../A32/translate/translate_arm/vfp2.cpp | 10 +- .../floating_point_conversion_integer.cpp | 24 ++-- .../impl/simd_scalar_two_register_misc.cpp | 26 ++-- src/frontend/ir/ir_emitter.cpp | 48 +++---- src/frontend/ir/ir_emitter.h | 12 +- src/frontend/ir/microinstruction.cpp | 16 +-- src/frontend/ir/opcodes.inc | 16 +-- 8 files changed, 154 insertions(+), 134 deletions(-) diff --git a/src/backend/x64/emit_x64_floating_point.cpp b/src/backend/x64/emit_x64_floating_point.cpp index 80716794..1ee8c629 100644 --- a/src/backend/x64/emit_x64_floating_point.cpp +++ b/src/backend/x64/emit_x64_floating_point.cpp @@ -1201,82 +1201,115 @@ void EmitX64::EmitFPSingleToFixedU64(EmitContext& ctx, IR::Inst* inst) { EmitFPToFixed(code, ctx, inst, 32, true, 64); } -void EmitX64::EmitFPS32ToSingle(EmitContext& ctx, IR::Inst* inst) { +void EmitX64::EmitFPFixedS32ToSingle(EmitContext& ctx, IR::Inst* inst) { auto args = ctx.reg_alloc.GetArgumentInfo(inst); - Xbyak::Reg32 from = ctx.reg_alloc.UseGpr(args[0]).cvt32(); - Xbyak::Xmm to = ctx.reg_alloc.ScratchXmm(); - bool round_to_nearest = args[1].GetImmediateU1(); - ASSERT_MSG(!round_to_nearest, "round_to_nearest unimplemented"); - code.cvtsi2ss(to, from); + const Xbyak::Reg32 from = ctx.reg_alloc.UseGpr(args[0]).cvt32(); + const Xbyak::Xmm result = ctx.reg_alloc.ScratchXmm(); + const size_t fbits = args[1].GetImmediateU8(); + const FP::RoundingMode rounding_mode = static_cast(args[2].GetImmediateU8()); + ASSERT(rounding_mode == ctx.FPSCR_RMode()); - ctx.reg_alloc.DefineValue(inst, to); + code.cvtsi2ss(result, from); + + if (fbits != 0) { + const u32 scale_factor = static_cast((127 - fbits) << 23); + code.mulss(result, code.MConst(xword, scale_factor)); + } + + ctx.reg_alloc.DefineValue(inst, result); } -void EmitX64::EmitFPU32ToSingle(EmitContext& ctx, IR::Inst* inst) { +void EmitX64::EmitFPFixedU32ToSingle(EmitContext& ctx, IR::Inst* inst) { auto args = ctx.reg_alloc.GetArgumentInfo(inst); - const Xbyak::Xmm to = ctx.reg_alloc.ScratchXmm(); - const bool round_to_nearest = args[1].GetImmediateU1(); - ASSERT_MSG(!round_to_nearest, "round_to_nearest unimplemented"); + const Xbyak::Xmm result = ctx.reg_alloc.ScratchXmm(); + const size_t fbits = args[1].GetImmediateU8(); + const FP::RoundingMode rounding_mode = static_cast(args[2].GetImmediateU8()); + ASSERT(rounding_mode == ctx.FPSCR_RMode()); if (code.DoesCpuSupport(Xbyak::util::Cpu::tAVX512F)) { const Xbyak::Reg64 from = ctx.reg_alloc.UseGpr(args[0]); - code.vcvtusi2ss(to, to, from.cvt32()); + code.vcvtusi2ss(result, result, from.cvt32()); } else { // We are using a 64-bit GPR register to ensure we don't end up treating the input as signed const Xbyak::Reg64 from = ctx.reg_alloc.UseScratchGpr(args[0]); code.mov(from.cvt32(), from.cvt32()); // TODO: Verify if this is necessary - code.cvtsi2ss(to, from); + code.cvtsi2ss(result, from); } - ctx.reg_alloc.DefineValue(inst, to); + if (fbits != 0) { + const u32 scale_factor = static_cast((127 - fbits) << 23); + code.mulss(result, code.MConst(xword, scale_factor)); + } + + ctx.reg_alloc.DefineValue(inst, result); } -void EmitX64::EmitFPS32ToDouble(EmitContext& ctx, IR::Inst* inst) { - auto args = ctx.reg_alloc.GetArgumentInfo(inst); - Xbyak::Reg32 from = ctx.reg_alloc.UseGpr(args[0]).cvt32(); - Xbyak::Xmm to = ctx.reg_alloc.ScratchXmm(); - bool round_to_nearest = args[1].GetImmediateU1(); - ASSERT_MSG(!round_to_nearest, "round_to_nearest unimplemented"); - - code.cvtsi2sd(to, from); - - ctx.reg_alloc.DefineValue(inst, to); -} - -void EmitX64::EmitFPS64ToDouble(EmitContext& ctx, IR::Inst* inst) { +void EmitX64::EmitFPFixedS32ToDouble(EmitContext& ctx, IR::Inst* inst) { auto args = ctx.reg_alloc.GetArgumentInfo(inst); - const Xbyak::Reg64 from = ctx.reg_alloc.UseGpr(args[0]); + const Xbyak::Reg32 from = ctx.reg_alloc.UseGpr(args[0]).cvt32(); const Xbyak::Xmm result = ctx.reg_alloc.ScratchXmm(); - const bool round_to_nearest = args[1].GetImmediateU1(); - ASSERT_MSG(!round_to_nearest, "round_to_nearest unimplemented"); + const size_t fbits = args[1].GetImmediateU8(); + const FP::RoundingMode rounding_mode = static_cast(args[2].GetImmediateU8()); + ASSERT(rounding_mode == ctx.FPSCR_RMode()); code.cvtsi2sd(result, from); + if (fbits != 0) { + const u64 scale_factor = static_cast((1023 - fbits) << 52); + code.mulsd(result, code.MConst(xword, scale_factor)); + } + ctx.reg_alloc.DefineValue(inst, result); } -void EmitX64::EmitFPS64ToSingle(EmitContext& ctx, IR::Inst* inst) { +void EmitX64::EmitFPFixedS64ToDouble(EmitContext& ctx, IR::Inst* inst) { auto args = ctx.reg_alloc.GetArgumentInfo(inst); const Xbyak::Reg64 from = ctx.reg_alloc.UseGpr(args[0]); const Xbyak::Xmm result = ctx.reg_alloc.ScratchXmm(); - const bool round_to_nearest = args[1].GetImmediateU1(); - ASSERT_MSG(!round_to_nearest, "round_to_nearest unimplemented"); + const size_t fbits = args[1].GetImmediateU8(); + const FP::RoundingMode rounding_mode = static_cast(args[2].GetImmediateU8()); + ASSERT(rounding_mode == ctx.FPSCR_RMode()); - code.cvtsi2ss(result, from); + code.cvtsi2sd(result, from); + + if (fbits != 0) { + const u64 scale_factor = static_cast((1023 - fbits) << 52); + code.mulsd(result, code.MConst(xword, scale_factor)); + } ctx.reg_alloc.DefineValue(inst, result); } -void EmitX64::EmitFPU32ToDouble(EmitContext& ctx, IR::Inst* inst) { +void EmitX64::EmitFPFixedS64ToSingle(EmitContext& ctx, IR::Inst* inst) { + auto args = ctx.reg_alloc.GetArgumentInfo(inst); + + const Xbyak::Reg64 from = ctx.reg_alloc.UseGpr(args[0]); + const Xbyak::Xmm result = ctx.reg_alloc.ScratchXmm(); + const size_t fbits = args[1].GetImmediateU8(); + const FP::RoundingMode rounding_mode = static_cast(args[2].GetImmediateU8()); + ASSERT(rounding_mode == ctx.FPSCR_RMode()); + + code.cvtsi2ss(result, from); + + if (fbits != 0) { + const u32 scale_factor = static_cast((127 - fbits) << 23); + code.mulss(result, code.MConst(xword, scale_factor)); + } + + ctx.reg_alloc.DefineValue(inst, result); +} + +void EmitX64::EmitFPFixedU32ToDouble(EmitContext& ctx, IR::Inst* inst) { auto args = ctx.reg_alloc.GetArgumentInfo(inst); const Xbyak::Xmm to = ctx.reg_alloc.ScratchXmm(); - const bool round_to_nearest = args[1].GetImmediateU1(); - ASSERT_MSG(!round_to_nearest, "round_to_nearest unimplemented"); + const size_t fbits = args[1].GetImmediateU8(); + const FP::RoundingMode rounding_mode = static_cast(args[2].GetImmediateU8()); + ASSERT(rounding_mode == ctx.FPSCR_RMode()); if (code.DoesCpuSupport(Xbyak::util::Cpu::tAVX512F)) { const Xbyak::Reg64 from = ctx.reg_alloc.UseGpr(args[0]); @@ -1288,16 +1321,22 @@ void EmitX64::EmitFPU32ToDouble(EmitContext& ctx, IR::Inst* inst) { code.cvtsi2sd(to, from); } + if (fbits != 0) { + const u64 scale_factor = static_cast((1023 - fbits) << 52); + code.mulsd(to, code.MConst(xword, scale_factor)); + } + ctx.reg_alloc.DefineValue(inst, to); } -void EmitX64::EmitFPU64ToDouble(EmitContext& ctx, IR::Inst* inst) { +void EmitX64::EmitFPFixedU64ToDouble(EmitContext& ctx, IR::Inst* inst) { auto args = ctx.reg_alloc.GetArgumentInfo(inst); const Xbyak::Reg64 from = ctx.reg_alloc.UseGpr(args[0]); const Xbyak::Xmm result = ctx.reg_alloc.ScratchXmm(); - const bool round_to_nearest = args[1].GetImmediateU1(); - ASSERT_MSG(!round_to_nearest, "round_to_nearest unimplemented"); + const size_t fbits = args[1].GetImmediateU8(); + const FP::RoundingMode rounding_mode = static_cast(args[2].GetImmediateU8()); + ASSERT(rounding_mode == ctx.FPSCR_RMode()); if (code.DoesCpuSupport(Xbyak::util::Cpu::tAVX512F)) { code.vcvtusi2sd(result, result, from); @@ -1314,15 +1353,21 @@ void EmitX64::EmitFPU64ToDouble(EmitContext& ctx, IR::Inst* inst) { } } + if (fbits != 0) { + const u64 scale_factor = static_cast((1023 - fbits) << 52); + code.mulsd(result, code.MConst(xword, scale_factor)); + } + ctx.reg_alloc.DefineValue(inst, result); } -void EmitX64::EmitFPU64ToSingle(EmitContext& ctx, IR::Inst* inst) { +void EmitX64::EmitFPFixedU64ToSingle(EmitContext& ctx, IR::Inst* inst) { auto args = ctx.reg_alloc.GetArgumentInfo(inst); const Xbyak::Xmm result = ctx.reg_alloc.ScratchXmm(); - const bool round_to_nearest = args[1].GetImmediateU1(); - ASSERT_MSG(!round_to_nearest, "round_to_nearest unimplemented"); + const size_t fbits = args[1].GetImmediateU8(); + const FP::RoundingMode rounding_mode = static_cast(args[2].GetImmediateU8()); + ASSERT(rounding_mode == ctx.FPSCR_RMode()); if (code.DoesCpuSupport(Xbyak::util::Cpu::tAVX512F)) { const Xbyak::Reg64 from = ctx.reg_alloc.UseGpr(args[0]); @@ -1352,6 +1397,11 @@ void EmitX64::EmitFPU64ToSingle(EmitContext& ctx, IR::Inst* inst) { code.L(end); } + if (fbits != 0) { + const u32 scale_factor = static_cast((127 - fbits) << 23); + code.mulss(result, code.MConst(xword, scale_factor)); + } + ctx.reg_alloc.DefineValue(inst, result); } } // namespace Dynarmic::BackendX64 diff --git a/src/frontend/A32/translate/translate_arm/vfp2.cpp b/src/frontend/A32/translate/translate_arm/vfp2.cpp index 69939ff5..5ad556d3 100644 --- a/src/frontend/A32/translate/translate_arm/vfp2.cpp +++ b/src/frontend/A32/translate/translate_arm/vfp2.cpp @@ -414,20 +414,20 @@ bool ArmTranslatorVisitor::vfp2_VCVT_f_to_f(Cond cond, bool D, size_t Vd, bool s bool ArmTranslatorVisitor::vfp2_VCVT_to_float(Cond cond, bool D, size_t Vd, bool sz, bool is_signed, bool M, size_t Vm) { ExtReg d = ToExtReg(sz, Vd, D); ExtReg m = ToExtReg(false, Vm, M); - bool round_to_nearest = false; + FP::RoundingMode rounding_mode = ir.current_location.FPSCR().RMode(); // VCVT.F32.{S32,U32} , // VCVT.F64.{S32,U32} , if (ConditionPassed(cond)) { auto reg_m = ir.GetExtendedRegister(m); if (sz) { auto result = is_signed - ? ir.FPS32ToDouble(reg_m, round_to_nearest, true) - : ir.FPU32ToDouble(reg_m, round_to_nearest, true); + ? ir.FPSignedFixedToDouble(reg_m, 0, rounding_mode) + : ir.FPUnsignedFixedToDouble(reg_m, 0, rounding_mode); ir.SetExtendedRegister(d, result); } else { auto result = is_signed - ? ir.FPS32ToSingle(reg_m, round_to_nearest, true) - : ir.FPU32ToSingle(reg_m, round_to_nearest, true); + ? ir.FPSignedFixedToSingle(reg_m, 0, rounding_mode) + : ir.FPUnsignedFixedToSingle(reg_m, 0, rounding_mode); ir.SetExtendedRegister(d, result); } } diff --git a/src/frontend/A64/translate/impl/floating_point_conversion_integer.cpp b/src/frontend/A64/translate/impl/floating_point_conversion_integer.cpp index 97be8ca9..18d7dd7a 100644 --- a/src/frontend/A64/translate/impl/floating_point_conversion_integer.cpp +++ b/src/frontend/A64/translate/impl/floating_point_conversion_integer.cpp @@ -21,14 +21,10 @@ bool TranslatorVisitor::SCVTF_float_int(bool sf, Imm<2> type, Reg Rn, Vec Vd) { const IR::U32U64 intval = X(intsize, Rn); IR::U32U64 fltval; - if (intsize == 32 && *fltsize == 32) { - fltval = ir.FPS32ToSingle(intval, false, true); - } else if (intsize == 32 && *fltsize == 64) { - fltval = ir.FPS32ToDouble(intval, false, true); - } else if (intsize == 64 && *fltsize == 32) { - fltval = ir.FPS64ToSingle(intval, false, true); - } else if (intsize == 64 && *fltsize == 64) { - fltval = ir.FPS64ToDouble(intval, false, true); + if (*fltsize == 32) { + fltval = ir.FPSignedFixedToSingle(intval, 0, ir.current_location->FPCR().RMode()); + } else if (*fltsize == 64) { + fltval = ir.FPSignedFixedToDouble(intval, 0, ir.current_location->FPCR().RMode()); } else { UNREACHABLE(); } @@ -48,14 +44,10 @@ bool TranslatorVisitor::UCVTF_float_int(bool sf, Imm<2> type, Reg Rn, Vec Vd) { const IR::U32U64 intval = X(intsize, Rn); IR::U32U64 fltval; - if (intsize == 32 && *fltsize == 32) { - fltval = ir.FPU32ToSingle(intval, false, true); - } else if (intsize == 32 && *fltsize == 64) { - fltval = ir.FPU32ToDouble(intval, false, true); - } else if (intsize == 64 && *fltsize == 32) { - fltval = ir.FPU64ToSingle(intval, false, true); - } else if (intsize == 64 && *fltsize == 64) { - fltval = ir.FPU64ToDouble(intval, false, true); + if (*fltsize == 32) { + fltval = ir.FPUnsignedFixedToSingle(intval, 0, ir.current_location->FPCR().RMode()); + } else if (*fltsize == 64) { + fltval = ir.FPUnsignedFixedToDouble(intval, 0, ir.current_location->FPCR().RMode()); } else { UNREACHABLE(); } diff --git a/src/frontend/A64/translate/impl/simd_scalar_two_register_misc.cpp b/src/frontend/A64/translate/impl/simd_scalar_two_register_misc.cpp index cf27a9de..ebb621c1 100644 --- a/src/frontend/A64/translate/impl/simd_scalar_two_register_misc.cpp +++ b/src/frontend/A64/translate/impl/simd_scalar_two_register_misc.cpp @@ -195,13 +195,12 @@ bool TranslatorVisitor::NEG_1(Imm<2> size, Vec Vn, Vec Vd) { bool TranslatorVisitor::SCVTF_int_2(bool sz, Vec Vn, Vec Vd) { const auto esize = sz ? 64 : 32; - IR::U32U64 element = V_scalar(esize, Vn); - if (esize == 32) { - element = ir.FPS32ToSingle(element, false, true); - } else { - element = ir.FPS64ToDouble(element, false, true); - } - V_scalar(esize, Vd, element); + const IR::U32U64 element = V_scalar(esize, Vn); + const IR::U32U64 result = esize == 32 + ? IR::U32U64(ir.FPSignedFixedToSingle(element, 0, ir.current_location->FPCR().RMode())) + : IR::U32U64(ir.FPSignedFixedToDouble(element, 0, ir.current_location->FPCR().RMode())); + + V_scalar(esize, Vd, result); return true; } @@ -248,13 +247,12 @@ bool TranslatorVisitor::SUQADD_1(Imm<2> size, Vec Vn, Vec Vd) { bool TranslatorVisitor::UCVTF_int_2(bool sz, Vec Vn, Vec Vd) { const auto esize = sz ? 64 : 32; - IR::U32U64 element = V_scalar(esize, Vn); - if (esize == 32) { - element = ir.FPU32ToSingle(element, false, true); - } else { - element = ir.FPU64ToDouble(element, false, true); - } - V_scalar(esize, Vd, element); + const IR::U32U64 element = V_scalar(esize, Vn); + const IR::U32U64 result = esize == 32 + ? IR::U32U64(ir.FPUnsignedFixedToSingle(element, 0, ir.current_location->FPCR().RMode())) + : IR::U32U64(ir.FPUnsignedFixedToDouble(element, 0, ir.current_location->FPCR().RMode())); + + V_scalar(esize, Vd, result); return true; } diff --git a/src/frontend/ir/ir_emitter.cpp b/src/frontend/ir/ir_emitter.cpp index b4bd3617..968a93e3 100644 --- a/src/frontend/ir/ir_emitter.cpp +++ b/src/frontend/ir/ir_emitter.cpp @@ -1960,44 +1960,28 @@ U64 IREmitter::FPToFixedU64(const U32U64& a, size_t fbits, FP::RoundingMode roun return Inst(opcode, a, Imm8(static_cast(fbits)), Imm8(static_cast(rounding))); } -U32 IREmitter::FPS32ToSingle(const U32& a, bool round_to_nearest, bool fpcr_controlled) { - ASSERT(fpcr_controlled); - return Inst(Opcode::FPS32ToSingle, a, Imm1(round_to_nearest)); +U32 IREmitter::FPSignedFixedToSingle(const U32U64& a, size_t fbits, FP::RoundingMode rounding) { + ASSERT(fbits <= (a.GetType() == Type::U32 ? 32 : 64)); + const Opcode opcode = a.GetType() == Type::U32 ? Opcode::FPFixedS32ToSingle : Opcode::FPFixedS64ToSingle; + return Inst(opcode, a, Imm8(static_cast(fbits)), Imm8(static_cast(rounding))); } -U64 IREmitter::FPS64ToDouble(const U64& a, bool round_to_nearest, bool fpcr_controlled) { - ASSERT(fpcr_controlled); - return Inst(Opcode::FPS64ToDouble, a, Imm1(round_to_nearest)); +U32 IREmitter::FPUnsignedFixedToSingle(const U32U64& a, size_t fbits, FP::RoundingMode rounding) { + ASSERT(fbits <= (a.GetType() == Type::U32 ? 32 : 64)); + const Opcode opcode = a.GetType() == Type::U32 ? Opcode::FPFixedU32ToSingle : Opcode::FPFixedU64ToSingle; + return Inst(opcode, a, Imm8(static_cast(fbits)), Imm8(static_cast(rounding))); } -U32 IREmitter::FPS64ToSingle(const U64& a, bool round_to_nearest, bool fpcr_controlled) { - ASSERT(fpcr_controlled); - return Inst(Opcode::FPS64ToSingle, a, Imm1(round_to_nearest)); +U64 IREmitter::FPSignedFixedToDouble(const U32U64& a, size_t fbits, FP::RoundingMode rounding) { + ASSERT(fbits <= (a.GetType() == Type::U32 ? 32 : 64)); + const Opcode opcode = a.GetType() == Type::U32 ? Opcode::FPFixedS32ToDouble : Opcode::FPFixedS64ToDouble; + return Inst(opcode, a, Imm8(static_cast(fbits)), Imm8(static_cast(rounding))); } -U32 IREmitter::FPU32ToSingle(const U32& a, bool round_to_nearest, bool fpcr_controlled) { - ASSERT(fpcr_controlled); - return Inst(Opcode::FPU32ToSingle, a, Imm1(round_to_nearest)); -} - -U64 IREmitter::FPS32ToDouble(const U32& a, bool round_to_nearest, bool fpcr_controlled) { - ASSERT(fpcr_controlled); - return Inst(Opcode::FPS32ToDouble, a, Imm1(round_to_nearest)); -} - -U64 IREmitter::FPU32ToDouble(const U32& a, bool round_to_nearest, bool fpcr_controlled) { - ASSERT(fpcr_controlled); - return Inst(Opcode::FPU32ToDouble, a, Imm1(round_to_nearest)); -} - -U64 IREmitter::FPU64ToDouble(const U64& a, bool round_to_nearest, bool fpcr_controlled) { - ASSERT(fpcr_controlled); - return Inst(Opcode::FPU64ToDouble, a, Imm1(round_to_nearest)); -} - -U32 IREmitter::FPU64ToSingle(const U64& a, bool round_to_nearest, bool fpcr_controlled) { - ASSERT(fpcr_controlled); - return Inst(Opcode::FPU64ToSingle, a, Imm1(round_to_nearest)); +U64 IREmitter::FPUnsignedFixedToDouble(const U32U64& a, size_t fbits, FP::RoundingMode rounding) { + ASSERT(fbits <= (a.GetType() == Type::U32 ? 32 : 64)); + const Opcode opcode = a.GetType() == Type::U32 ? Opcode::FPFixedU32ToDouble : Opcode::FPFixedU64ToDouble; + return Inst(opcode, a, Imm8(static_cast(fbits)), Imm8(static_cast(rounding))); } U128 IREmitter::FPVectorAbs(size_t esize, const U128& a) { diff --git a/src/frontend/ir/ir_emitter.h b/src/frontend/ir/ir_emitter.h index a6523d23..fcef89d1 100644 --- a/src/frontend/ir/ir_emitter.h +++ b/src/frontend/ir/ir_emitter.h @@ -316,14 +316,10 @@ public: U64 FPToFixedS64(const U32U64& a, size_t fbits, FP::RoundingMode rounding); U32 FPToFixedU32(const U32U64& a, size_t fbits, FP::RoundingMode rounding); U64 FPToFixedU64(const U32U64& a, size_t fbits, FP::RoundingMode rounding); - U32 FPS32ToSingle(const U32& a, bool round_to_nearest, bool fpcr_controlled); - U32 FPU32ToSingle(const U32& a, bool round_to_nearest, bool fpcr_controlled); - U64 FPS32ToDouble(const U32& a, bool round_to_nearest, bool fpcr_controlled); - U64 FPS64ToDouble(const U64& a, bool round_to_nearest, bool fpcr_controlled); - U32 FPS64ToSingle(const U64& a, bool round_to_nearest, bool fpcr_controlled); - U64 FPU32ToDouble(const U32& a, bool round_to_nearest, bool fpcr_controlled); - U64 FPU64ToDouble(const U64& a, bool round_to_nearest, bool fpcr_controlled); - U32 FPU64ToSingle(const U64& a, bool round_to_nearest, bool fpcr_controlled); + U32 FPSignedFixedToSingle(const U32U64& a, size_t fbits, FP::RoundingMode rounding); + U32 FPUnsignedFixedToSingle(const U32U64& a, size_t fbits, FP::RoundingMode rounding); + U64 FPSignedFixedToDouble(const U32U64& a, size_t fbits, FP::RoundingMode rounding); + U64 FPUnsignedFixedToDouble(const U32U64& a, size_t fbits, FP::RoundingMode rounding); U128 FPVectorAbs(size_t esize, const U128& a); U128 FPVectorAdd(size_t esize, const U128& a, const U128& b); diff --git a/src/frontend/ir/microinstruction.cpp b/src/frontend/ir/microinstruction.cpp index 9994d920..f82058fe 100644 --- a/src/frontend/ir/microinstruction.cpp +++ b/src/frontend/ir/microinstruction.cpp @@ -293,14 +293,14 @@ bool Inst::ReadsFromAndWritesToFPSRCumulativeExceptionBits() const { case Opcode::FPSingleToFixedS64: case Opcode::FPSingleToFixedU32: case Opcode::FPSingleToFixedU64: - case Opcode::FPU32ToSingle: - case Opcode::FPS32ToSingle: - case Opcode::FPU32ToDouble: - case Opcode::FPU64ToDouble: - case Opcode::FPU64ToSingle: - case Opcode::FPS32ToDouble: - case Opcode::FPS64ToDouble: - case Opcode::FPS64ToSingle: + case Opcode::FPFixedU32ToSingle: + case Opcode::FPFixedS32ToSingle: + case Opcode::FPFixedU32ToDouble: + case Opcode::FPFixedU64ToDouble: + case Opcode::FPFixedU64ToSingle: + case Opcode::FPFixedS32ToDouble: + case Opcode::FPFixedS64ToDouble: + case Opcode::FPFixedS64ToSingle: case Opcode::FPVectorAdd32: case Opcode::FPVectorAdd64: case Opcode::FPVectorDiv32: diff --git a/src/frontend/ir/opcodes.inc b/src/frontend/ir/opcodes.inc index 29cee190..b75a47b5 100644 --- a/src/frontend/ir/opcodes.inc +++ b/src/frontend/ir/opcodes.inc @@ -504,14 +504,14 @@ OPCODE(FPSingleToFixedS32, U32, U32, OPCODE(FPSingleToFixedS64, U64, U32, U8, U8 ) OPCODE(FPSingleToFixedU32, U32, U32, U8, U8 ) OPCODE(FPSingleToFixedU64, U64, U32, U8, U8 ) -OPCODE(FPU32ToSingle, U32, U32, U1 ) -OPCODE(FPS32ToSingle, U32, U32, U1 ) -OPCODE(FPU32ToDouble, U64, U32, U1 ) -OPCODE(FPU64ToDouble, U64, U64, U1 ) -OPCODE(FPU64ToSingle, U32, U64, U1 ) -OPCODE(FPS32ToDouble, U64, U32, U1 ) -OPCODE(FPS64ToDouble, U64, U64, U1 ) -OPCODE(FPS64ToSingle, U32, U64, U1 ) +OPCODE(FPFixedU32ToSingle, U32, U32, U8, U8 ) +OPCODE(FPFixedS32ToSingle, U32, U32, U8, U8 ) +OPCODE(FPFixedU32ToDouble, U64, U32, U8, U8 ) +OPCODE(FPFixedU64ToDouble, U64, U64, U8, U8 ) +OPCODE(FPFixedU64ToSingle, U32, U64, U8, U8 ) +OPCODE(FPFixedS32ToDouble, U64, U32, U8, U8 ) +OPCODE(FPFixedS64ToDouble, U64, U64, U8, U8 ) +OPCODE(FPFixedS64ToSingle, U32, U64, U8, U8 ) // Floating-point vector instructions OPCODE(FPVectorAbs16, U128, U128 )