diff --git a/src/backend/x64/emit_x64_vector_floating_point.cpp b/src/backend/x64/emit_x64_vector_floating_point.cpp index aa2cd25d..b0478635 100644 --- a/src/backend/x64/emit_x64_vector_floating_point.cpp +++ b/src/backend/x64/emit_x64_vector_floating_point.cpp @@ -1335,6 +1335,7 @@ static void EmitRSqrtStepFused(BlockOfCode& code, EmitContext& ctx, IR::Inst* in if constexpr (fsize != 16) { if (code.HasFMA() && code.HasAVX()) { auto args = ctx.reg_alloc.GetArgumentInfo(inst); + const bool fpcr_controlled = args[2].GetImmediateU1(); const Xbyak::Xmm result = ctx.reg_alloc.ScratchXmm(); const Xbyak::Xmm operand1 = ctx.reg_alloc.UseXmm(args[0]); @@ -1344,28 +1345,30 @@ static void EmitRSqrtStepFused(BlockOfCode& code, EmitContext& ctx, IR::Inst* in Xbyak::Label end, fallback; - code.vmovaps(result, GetVectorOf(code)); - FCODE(vfnmadd231p)(result, operand1, operand2); + MaybeStandardFPSCRValue(code, ctx, fpcr_controlled, [&]{ + code.vmovaps(result, GetVectorOf(code)); + FCODE(vfnmadd231p)(result, operand1, operand2); - // An explanation for this is given in EmitFPRSqrtStepFused. - code.vmovaps(mask, GetVectorOf(code)); - FCODE(vandp)(tmp, result, mask); - if constexpr (fsize == 32) { - code.vpcmpeqd(tmp, tmp, mask); - } else { - code.vpcmpeqq(tmp, tmp, mask); - } - code.ptest(tmp, tmp); - code.jnz(fallback, code.T_NEAR); + // An explanation for this is given in EmitFPRSqrtStepFused. + code.vmovaps(mask, GetVectorOf(code)); + FCODE(vandp)(tmp, result, mask); + if constexpr (fsize == 32) { + code.vpcmpeqd(tmp, tmp, mask); + } else { + code.vpcmpeqq(tmp, tmp, mask); + } + code.ptest(tmp, tmp); + code.jnz(fallback, code.T_NEAR); - FCODE(vmulp)(result, result, GetVectorOf(code)); - code.L(end); + FCODE(vmulp)(result, result, GetVectorOf(code)); + code.L(end); + }); code.SwitchToFarCode(); code.L(fallback); code.sub(rsp, 8); ABI_PushCallerSaveRegistersAndAdjustStackExcept(code, HostLocXmmIdx(result.getIdx())); - EmitThreeOpFallbackWithoutRegAlloc(code, ctx, result, operand1, operand2, fallback_fn); + EmitThreeOpFallbackWithoutRegAlloc(code, ctx, result, operand1, operand2, fallback_fn, fpcr_controlled); ABI_PopCallerSaveRegistersAndAdjustStackExcept(code, HostLocXmmIdx(result.getIdx())); code.add(rsp, 8); code.jmp(end, code.T_NEAR); @@ -1376,7 +1379,7 @@ static void EmitRSqrtStepFused(BlockOfCode& code, EmitContext& ctx, IR::Inst* in } } - EmitThreeOpFallback(code, ctx, inst, fallback_fn); + EmitThreeOpFallback(code, ctx, inst, fallback_fn); } void EmitX64::EmitFPVectorRSqrtStepFused16(EmitContext& ctx, IR::Inst* inst) { diff --git a/src/frontend/A32/decoder/asimd.inc b/src/frontend/A32/decoder/asimd.inc index 7ab66c1a..6671581f 100644 --- a/src/frontend/A32/decoder/asimd.inc +++ b/src/frontend/A32/decoder/asimd.inc @@ -47,7 +47,7 @@ INST(asimd_VMUL_float, "VMUL (floating-point)", "111100110D0znnnndddd110 INST(asimd_VMAX_float, "VMAX (floating-point)", "111100100D0znnnndddd1111NQM0mmmm") // ASIMD INST(asimd_VMIN_float, "VMIN (floating-point)", "111100100D1znnnndddd1111NQM0mmmm") // ASIMD INST(asimd_VRECPS, "VRECPS", "111100100D0znnnndddd1111NQM1mmmm") // ASIMD -//INST(asimd_VRSQRTS, "VRSQRTS", "111100100-1C--------1111---1----") // ASIMD +INST(asimd_VRSQRTS, "VRSQRTS", "111100100D1znnnndddd1111NQM1mmmm") // ASIMD // Two registers and a scalar //INST(asimd_VMLA_scalar, "VMLA (scalar)", "1111001U1-BB--------0x0x-1-0----") // ASIMD diff --git a/src/frontend/A32/translate/impl/asimd_three_same.cpp b/src/frontend/A32/translate/impl/asimd_three_same.cpp index 8d45594d..8404e303 100644 --- a/src/frontend/A32/translate/impl/asimd_three_same.cpp +++ b/src/frontend/A32/translate/impl/asimd_three_same.cpp @@ -419,4 +419,10 @@ bool ArmTranslatorVisitor::asimd_VRECPS(bool D, bool sz, size_t Vn, size_t Vd, b }); } +bool ArmTranslatorVisitor::asimd_VRSQRTS(bool D, bool sz, size_t Vn, size_t Vd, bool N, bool Q, bool M, size_t Vm) { + return FloatingPointInstruction(*this, D, sz, Vn, Vd, N, Q, M, Vm, [this](const auto&, const auto& reg_n, const auto& reg_m) { + return ir.FPVectorRSqrtStepFused(32, reg_n, reg_m, false); + }); +} + } // namespace Dynarmic::A32 diff --git a/src/frontend/A32/translate/impl/translate_arm.h b/src/frontend/A32/translate/impl/translate_arm.h index 14c085b7..ca916b32 100644 --- a/src/frontend/A32/translate/impl/translate_arm.h +++ b/src/frontend/A32/translate/impl/translate_arm.h @@ -472,6 +472,7 @@ struct ArmTranslatorVisitor final { bool asimd_VMAX_float(bool D, bool sz, size_t Vn, size_t Vd, bool N, bool Q, bool M, size_t Vm); bool asimd_VMIN_float(bool D, bool sz, size_t Vn, size_t Vd, bool N, bool Q, bool M, size_t Vm); bool asimd_VRECPS(bool D, bool sz, size_t Vn, size_t Vd, bool N, bool Q, bool M, size_t Vm); + bool asimd_VRSQRTS(bool D, bool sz, size_t Vn, size_t Vd, bool N, bool Q, bool M, size_t Vm); // Two registers and a shift amount bool asimd_SHR(bool U, bool D, size_t imm6, size_t Vd, bool L, bool Q, bool M, size_t Vm); diff --git a/src/frontend/ir/ir_emitter.cpp b/src/frontend/ir/ir_emitter.cpp index 42c2ebc8..35e66a8f 100644 --- a/src/frontend/ir/ir_emitter.cpp +++ b/src/frontend/ir/ir_emitter.cpp @@ -2491,14 +2491,14 @@ U128 IREmitter::FPVectorRSqrtEstimate(size_t esize, const U128& a) { UNREACHABLE(); } -U128 IREmitter::FPVectorRSqrtStepFused(size_t esize, const U128& a, const U128& b) { +U128 IREmitter::FPVectorRSqrtStepFused(size_t esize, const U128& a, const U128& b, bool fpcr_controlled) { switch (esize) { case 16: - return Inst(Opcode::FPVectorRSqrtStepFused16, a, b); + return Inst(Opcode::FPVectorRSqrtStepFused16, a, b, Imm1(fpcr_controlled)); case 32: - return Inst(Opcode::FPVectorRSqrtStepFused32, a, b); + return Inst(Opcode::FPVectorRSqrtStepFused32, a, b, Imm1(fpcr_controlled)); case 64: - return Inst(Opcode::FPVectorRSqrtStepFused64, a, b); + return Inst(Opcode::FPVectorRSqrtStepFused64, a, b, Imm1(fpcr_controlled)); } UNREACHABLE(); } diff --git a/src/frontend/ir/ir_emitter.h b/src/frontend/ir/ir_emitter.h index ec4312cb..6852295f 100644 --- a/src/frontend/ir/ir_emitter.h +++ b/src/frontend/ir/ir_emitter.h @@ -364,7 +364,7 @@ public: U128 FPVectorRecipStepFused(size_t esize, const U128& a, const U128& b, bool fpcr_controlled = true); U128 FPVectorRoundInt(size_t esize, const U128& operand, FP::RoundingMode rounding, bool exact); U128 FPVectorRSqrtEstimate(size_t esize, const U128& a); - U128 FPVectorRSqrtStepFused(size_t esize, const U128& a, const U128& b); + U128 FPVectorRSqrtStepFused(size_t esize, const U128& a, const U128& b, bool fpcr_controlled = true); U128 FPVectorSqrt(size_t esize, const U128& a); U128 FPVectorSub(size_t esize, const U128& a, const U128& b, bool fpcr_controlled = true); U128 FPVectorToSignedFixed(size_t esize, const U128& a, size_t fbits, FP::RoundingMode rounding); diff --git a/src/frontend/ir/opcodes.inc b/src/frontend/ir/opcodes.inc index f1d2bb72..655d624c 100644 --- a/src/frontend/ir/opcodes.inc +++ b/src/frontend/ir/opcodes.inc @@ -625,9 +625,9 @@ OPCODE(FPVectorRoundInt64, U128, U128 OPCODE(FPVectorRSqrtEstimate16, U128, U128 ) OPCODE(FPVectorRSqrtEstimate32, U128, U128 ) OPCODE(FPVectorRSqrtEstimate64, U128, U128 ) -OPCODE(FPVectorRSqrtStepFused16, U128, U128, U128 ) -OPCODE(FPVectorRSqrtStepFused32, U128, U128, U128 ) -OPCODE(FPVectorRSqrtStepFused64, U128, U128, U128 ) +OPCODE(FPVectorRSqrtStepFused16, U128, U128, U128, U1 ) +OPCODE(FPVectorRSqrtStepFused32, U128, U128, U128, U1 ) +OPCODE(FPVectorRSqrtStepFused64, U128, U128, U128, U1 ) OPCODE(FPVectorSqrt32, U128, U128 ) OPCODE(FPVectorSqrt64, U128, U128 ) OPCODE(FPVectorSub32, U128, U128, U128, U1 ) diff --git a/tests/A32/fuzz_arm.cpp b/tests/A32/fuzz_arm.cpp index 8517e808..60998fa9 100644 --- a/tests/A32/fuzz_arm.cpp +++ b/tests/A32/fuzz_arm.cpp @@ -112,6 +112,8 @@ u32 GenRandomInst(u32 pc, bool is_last_inst) { "vfp_VMRS", // Unimplemented in Unicorn "asimd_VPADD_float", + // Incorrect Unicorn implementations + "asimd_VRECPS", // Unicorn does not fuse the multiply and subtraction, resulting in being off by 1ULP. }; for (const auto& [fn, bitstring] : list) {