diff --git a/src/backend/x64/emit_x64_floating_point.cpp b/src/backend/x64/emit_x64_floating_point.cpp index 8543bc94..8386338d 100644 --- a/src/backend/x64/emit_x64_floating_point.cpp +++ b/src/backend/x64/emit_x64_floating_point.cpp @@ -946,52 +946,54 @@ template static void EmitFPRSqrtStepFused(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) { using FPT = mp::unsigned_integer_of_size; - if (code.DoesCpuSupport(Xbyak::util::Cpu::tFMA) && code.DoesCpuSupport(Xbyak::util::Cpu::tAVX)) { - auto args = ctx.reg_alloc.GetArgumentInfo(inst); + if constexpr (fsize != 16) { + if (code.DoesCpuSupport(Xbyak::util::Cpu::tFMA) && code.DoesCpuSupport(Xbyak::util::Cpu::tAVX)) { + auto args = ctx.reg_alloc.GetArgumentInfo(inst); - Xbyak::Label end, fallback; + Xbyak::Label end, fallback; - const Xbyak::Xmm operand1 = ctx.reg_alloc.UseXmm(args[0]); - const Xbyak::Xmm operand2 = ctx.reg_alloc.UseXmm(args[1]); - const Xbyak::Xmm result = ctx.reg_alloc.ScratchXmm(); + const Xbyak::Xmm operand1 = ctx.reg_alloc.UseXmm(args[0]); + const Xbyak::Xmm operand2 = ctx.reg_alloc.UseXmm(args[1]); + const Xbyak::Xmm result = ctx.reg_alloc.ScratchXmm(); - code.vmovaps(result, code.MConst(xword, FP::FPValue())); - FCODE(vfnmadd231s)(result, operand1, operand2); + code.vmovaps(result, code.MConst(xword, FP::FPValue())); + FCODE(vfnmadd231s)(result, operand1, operand2); - // Detect if the intermediate result is infinity or NaN or nearly an infinity. - // Why do we need to care about infinities? This is because x86 doesn't allow us - // to fuse the divide-by-two with the rest of the FMA operation. Therefore the - // intermediate value may overflow and we would like to handle this case. - const Xbyak::Reg32 tmp = ctx.reg_alloc.ScratchGpr().cvt32(); - code.vpextrw(tmp, result, fsize == 32 ? 1 : 3); - code.and_(tmp.cvt16(), fsize == 32 ? 0x7f80 : 0x7ff0); - code.cmp(tmp.cvt16(), fsize == 32 ? 0x7f00 : 0x7fe0); - ctx.reg_alloc.Release(tmp); + // Detect if the intermediate result is infinity or NaN or nearly an infinity. + // Why do we need to care about infinities? This is because x86 doesn't allow us + // to fuse the divide-by-two with the rest of the FMA operation. Therefore the + // intermediate value may overflow and we would like to handle this case. + const Xbyak::Reg32 tmp = ctx.reg_alloc.ScratchGpr().cvt32(); + code.vpextrw(tmp, result, fsize == 32 ? 1 : 3); + code.and_(tmp.cvt16(), fsize == 32 ? 0x7f80 : 0x7ff0); + code.cmp(tmp.cvt16(), fsize == 32 ? 0x7f00 : 0x7fe0); + ctx.reg_alloc.Release(tmp); - code.jae(fallback, code.T_NEAR); + code.jae(fallback, code.T_NEAR); - FCODE(vmuls)(result, result, code.MConst(xword, FP::FPValue())); - code.L(end); + FCODE(vmuls)(result, result, code.MConst(xword, FP::FPValue())); + code.L(end); - code.SwitchToFarCode(); - code.L(fallback); + code.SwitchToFarCode(); + code.L(fallback); - code.sub(rsp, 8); - ABI_PushCallerSaveRegistersAndAdjustStackExcept(code, HostLocXmmIdx(result.getIdx())); - code.movq(code.ABI_PARAM1, operand1); - code.movq(code.ABI_PARAM2, operand2); - code.mov(code.ABI_PARAM3.cvt32(), ctx.FPCR().Value()); - code.lea(code.ABI_PARAM4, code.ptr[code.r15 + code.GetJitStateInfo().offsetof_fpsr_exc]); - code.CallFunction(&FP::FPRSqrtStepFused); - code.movq(result, code.ABI_RETURN); - ABI_PopCallerSaveRegistersAndAdjustStackExcept(code, HostLocXmmIdx(result.getIdx())); - code.add(rsp, 8); + code.sub(rsp, 8); + ABI_PushCallerSaveRegistersAndAdjustStackExcept(code, HostLocXmmIdx(result.getIdx())); + code.movq(code.ABI_PARAM1, operand1); + code.movq(code.ABI_PARAM2, operand2); + code.mov(code.ABI_PARAM3.cvt32(), ctx.FPCR().Value()); + code.lea(code.ABI_PARAM4, code.ptr[code.r15 + code.GetJitStateInfo().offsetof_fpsr_exc]); + code.CallFunction(&FP::FPRSqrtStepFused); + code.movq(result, code.ABI_RETURN); + ABI_PopCallerSaveRegistersAndAdjustStackExcept(code, HostLocXmmIdx(result.getIdx())); + code.add(rsp, 8); - code.jmp(end, code.T_NEAR); - code.SwitchToNearCode(); + code.jmp(end, code.T_NEAR); + code.SwitchToNearCode(); - ctx.reg_alloc.DefineValue(inst, result); - return; + ctx.reg_alloc.DefineValue(inst, result); + return; + } } auto args = ctx.reg_alloc.GetArgumentInfo(inst); @@ -1001,6 +1003,10 @@ static void EmitFPRSqrtStepFused(BlockOfCode& code, EmitContext& ctx, IR::Inst* code.CallFunction(&FP::FPRSqrtStepFused); } +void EmitX64::EmitFPRSqrtStepFused16(EmitContext& ctx, IR::Inst* inst) { + EmitFPRSqrtStepFused<16>(code, ctx, inst); +} + void EmitX64::EmitFPRSqrtStepFused32(EmitContext& ctx, IR::Inst* inst) { EmitFPRSqrtStepFused<32>(code, ctx, inst); } diff --git a/src/frontend/ir/ir_emitter.cpp b/src/frontend/ir/ir_emitter.cpp index e9c655ab..4381b3df 100644 --- a/src/frontend/ir/ir_emitter.cpp +++ b/src/frontend/ir/ir_emitter.cpp @@ -1997,11 +1997,20 @@ U16U32U64 IREmitter::FPRSqrtEstimate(const U16U32U64& a) { } } -U32U64 IREmitter::FPRSqrtStepFused(const U32U64& a, const U32U64& b) { - if (a.GetType() == Type::U32) { +U16U32U64 IREmitter::FPRSqrtStepFused(const U16U32U64& a, const U16U32U64& b) { + ASSERT(a.GetType() == b.GetType()); + + switch (a.GetType()) { + case Type::U16: + return Inst(Opcode::FPRSqrtStepFused16, a, b); + case Type::U32: return Inst(Opcode::FPRSqrtStepFused32, a, b); + case Type::U64: + return Inst(Opcode::FPRSqrtStepFused64, a, b); + default: + UNREACHABLE(); + return U16U32U64{}; } - return Inst(Opcode::FPRSqrtStepFused64, a, b); } U32U64 IREmitter::FPSqrt(const U32U64& a) { diff --git a/src/frontend/ir/ir_emitter.h b/src/frontend/ir/ir_emitter.h index 6fe44dab..0b80d924 100644 --- a/src/frontend/ir/ir_emitter.h +++ b/src/frontend/ir/ir_emitter.h @@ -310,7 +310,7 @@ public: U16U32U64 FPRecipStepFused(const U16U32U64& a, const U16U32U64& b); U16U32U64 FPRoundInt(const U16U32U64& a, FP::RoundingMode rounding, bool exact); U16U32U64 FPRSqrtEstimate(const U16U32U64& a); - U32U64 FPRSqrtStepFused(const U32U64& a, const U32U64& b); + U16U32U64 FPRSqrtStepFused(const U16U32U64& a, const U16U32U64& b); U32U64 FPSqrt(const U32U64& a); U32U64 FPSub(const U32U64& a, const U32U64& b, bool fpcr_controlled); U16 FPDoubleToHalf(const U64& a, FP::RoundingMode rounding); diff --git a/src/frontend/ir/microinstruction.cpp b/src/frontend/ir/microinstruction.cpp index 3577fee7..212f0169 100644 --- a/src/frontend/ir/microinstruction.cpp +++ b/src/frontend/ir/microinstruction.cpp @@ -287,6 +287,7 @@ bool Inst::ReadsFromAndWritesToFPSRCumulativeExceptionBits() const { case Opcode::FPRSqrtEstimate16: case Opcode::FPRSqrtEstimate32: case Opcode::FPRSqrtEstimate64: + case Opcode::FPRSqrtStepFused16: case Opcode::FPRSqrtStepFused32: case Opcode::FPRSqrtStepFused64: case Opcode::FPSqrt32: diff --git a/src/frontend/ir/opcodes.inc b/src/frontend/ir/opcodes.inc index 0adab017..ea43f4d0 100644 --- a/src/frontend/ir/opcodes.inc +++ b/src/frontend/ir/opcodes.inc @@ -506,6 +506,7 @@ OPCODE(FPRoundInt64, U64, U64, OPCODE(FPRSqrtEstimate16, U16, U16 ) OPCODE(FPRSqrtEstimate32, U32, U32 ) OPCODE(FPRSqrtEstimate64, U64, U64 ) +OPCODE(FPRSqrtStepFused16, U16, U16, U16 ) OPCODE(FPRSqrtStepFused32, U32, U32, U32 ) OPCODE(FPRSqrtStepFused64, U64, U64, U64 ) OPCODE(FPSqrt32, U32, U32 )