frontend/ir_emitter: Add half-precision opcode variant of FPRSqrtStepFused

This commit is contained in:
Lioncash 2019-04-14 20:55:25 -04:00 committed by MerryMage
parent e3b2eb57b5
commit 824c551ba2
5 changed files with 57 additions and 40 deletions

View file

@ -946,52 +946,54 @@ template<size_t fsize>
static void EmitFPRSqrtStepFused(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) { static void EmitFPRSqrtStepFused(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) {
using FPT = mp::unsigned_integer_of_size<fsize>; using FPT = mp::unsigned_integer_of_size<fsize>;
if (code.DoesCpuSupport(Xbyak::util::Cpu::tFMA) && code.DoesCpuSupport(Xbyak::util::Cpu::tAVX)) { if constexpr (fsize != 16) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst); if (code.DoesCpuSupport(Xbyak::util::Cpu::tFMA) && code.DoesCpuSupport(Xbyak::util::Cpu::tAVX)) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
Xbyak::Label end, fallback; Xbyak::Label end, fallback;
const Xbyak::Xmm operand1 = ctx.reg_alloc.UseXmm(args[0]); const Xbyak::Xmm operand1 = ctx.reg_alloc.UseXmm(args[0]);
const Xbyak::Xmm operand2 = ctx.reg_alloc.UseXmm(args[1]); const Xbyak::Xmm operand2 = ctx.reg_alloc.UseXmm(args[1]);
const Xbyak::Xmm result = ctx.reg_alloc.ScratchXmm(); const Xbyak::Xmm result = ctx.reg_alloc.ScratchXmm();
code.vmovaps(result, code.MConst(xword, FP::FPValue<FPT, false, 0, 3>())); code.vmovaps(result, code.MConst(xword, FP::FPValue<FPT, false, 0, 3>()));
FCODE(vfnmadd231s)(result, operand1, operand2); FCODE(vfnmadd231s)(result, operand1, operand2);
// Detect if the intermediate result is infinity or NaN or nearly an infinity. // Detect if the intermediate result is infinity or NaN or nearly an infinity.
// Why do we need to care about infinities? This is because x86 doesn't allow us // Why do we need to care about infinities? This is because x86 doesn't allow us
// to fuse the divide-by-two with the rest of the FMA operation. Therefore the // to fuse the divide-by-two with the rest of the FMA operation. Therefore the
// intermediate value may overflow and we would like to handle this case. // intermediate value may overflow and we would like to handle this case.
const Xbyak::Reg32 tmp = ctx.reg_alloc.ScratchGpr().cvt32(); const Xbyak::Reg32 tmp = ctx.reg_alloc.ScratchGpr().cvt32();
code.vpextrw(tmp, result, fsize == 32 ? 1 : 3); code.vpextrw(tmp, result, fsize == 32 ? 1 : 3);
code.and_(tmp.cvt16(), fsize == 32 ? 0x7f80 : 0x7ff0); code.and_(tmp.cvt16(), fsize == 32 ? 0x7f80 : 0x7ff0);
code.cmp(tmp.cvt16(), fsize == 32 ? 0x7f00 : 0x7fe0); code.cmp(tmp.cvt16(), fsize == 32 ? 0x7f00 : 0x7fe0);
ctx.reg_alloc.Release(tmp); ctx.reg_alloc.Release(tmp);
code.jae(fallback, code.T_NEAR); code.jae(fallback, code.T_NEAR);
FCODE(vmuls)(result, result, code.MConst(xword, FP::FPValue<FPT, false, -1, 1>())); FCODE(vmuls)(result, result, code.MConst(xword, FP::FPValue<FPT, false, -1, 1>()));
code.L(end); code.L(end);
code.SwitchToFarCode(); code.SwitchToFarCode();
code.L(fallback); code.L(fallback);
code.sub(rsp, 8); code.sub(rsp, 8);
ABI_PushCallerSaveRegistersAndAdjustStackExcept(code, HostLocXmmIdx(result.getIdx())); ABI_PushCallerSaveRegistersAndAdjustStackExcept(code, HostLocXmmIdx(result.getIdx()));
code.movq(code.ABI_PARAM1, operand1); code.movq(code.ABI_PARAM1, operand1);
code.movq(code.ABI_PARAM2, operand2); code.movq(code.ABI_PARAM2, operand2);
code.mov(code.ABI_PARAM3.cvt32(), ctx.FPCR().Value()); code.mov(code.ABI_PARAM3.cvt32(), ctx.FPCR().Value());
code.lea(code.ABI_PARAM4, code.ptr[code.r15 + code.GetJitStateInfo().offsetof_fpsr_exc]); code.lea(code.ABI_PARAM4, code.ptr[code.r15 + code.GetJitStateInfo().offsetof_fpsr_exc]);
code.CallFunction(&FP::FPRSqrtStepFused<FPT>); code.CallFunction(&FP::FPRSqrtStepFused<FPT>);
code.movq(result, code.ABI_RETURN); code.movq(result, code.ABI_RETURN);
ABI_PopCallerSaveRegistersAndAdjustStackExcept(code, HostLocXmmIdx(result.getIdx())); ABI_PopCallerSaveRegistersAndAdjustStackExcept(code, HostLocXmmIdx(result.getIdx()));
code.add(rsp, 8); code.add(rsp, 8);
code.jmp(end, code.T_NEAR); code.jmp(end, code.T_NEAR);
code.SwitchToNearCode(); code.SwitchToNearCode();
ctx.reg_alloc.DefineValue(inst, result); ctx.reg_alloc.DefineValue(inst, result);
return; return;
}
} }
auto args = ctx.reg_alloc.GetArgumentInfo(inst); auto args = ctx.reg_alloc.GetArgumentInfo(inst);
@ -1001,6 +1003,10 @@ static void EmitFPRSqrtStepFused(BlockOfCode& code, EmitContext& ctx, IR::Inst*
code.CallFunction(&FP::FPRSqrtStepFused<FPT>); code.CallFunction(&FP::FPRSqrtStepFused<FPT>);
} }
void EmitX64::EmitFPRSqrtStepFused16(EmitContext& ctx, IR::Inst* inst) {
EmitFPRSqrtStepFused<16>(code, ctx, inst);
}
void EmitX64::EmitFPRSqrtStepFused32(EmitContext& ctx, IR::Inst* inst) { void EmitX64::EmitFPRSqrtStepFused32(EmitContext& ctx, IR::Inst* inst) {
EmitFPRSqrtStepFused<32>(code, ctx, inst); EmitFPRSqrtStepFused<32>(code, ctx, inst);
} }

View file

@ -1997,11 +1997,20 @@ U16U32U64 IREmitter::FPRSqrtEstimate(const U16U32U64& a) {
} }
} }
U32U64 IREmitter::FPRSqrtStepFused(const U32U64& a, const U32U64& b) { U16U32U64 IREmitter::FPRSqrtStepFused(const U16U32U64& a, const U16U32U64& b) {
if (a.GetType() == Type::U32) { ASSERT(a.GetType() == b.GetType());
switch (a.GetType()) {
case Type::U16:
return Inst<U16>(Opcode::FPRSqrtStepFused16, a, b);
case Type::U32:
return Inst<U32>(Opcode::FPRSqrtStepFused32, a, b); return Inst<U32>(Opcode::FPRSqrtStepFused32, a, b);
case Type::U64:
return Inst<U64>(Opcode::FPRSqrtStepFused64, a, b);
default:
UNREACHABLE();
return U16U32U64{};
} }
return Inst<U64>(Opcode::FPRSqrtStepFused64, a, b);
} }
U32U64 IREmitter::FPSqrt(const U32U64& a) { U32U64 IREmitter::FPSqrt(const U32U64& a) {

View file

@ -310,7 +310,7 @@ public:
U16U32U64 FPRecipStepFused(const U16U32U64& a, const U16U32U64& b); U16U32U64 FPRecipStepFused(const U16U32U64& a, const U16U32U64& b);
U16U32U64 FPRoundInt(const U16U32U64& a, FP::RoundingMode rounding, bool exact); U16U32U64 FPRoundInt(const U16U32U64& a, FP::RoundingMode rounding, bool exact);
U16U32U64 FPRSqrtEstimate(const U16U32U64& a); U16U32U64 FPRSqrtEstimate(const U16U32U64& a);
U32U64 FPRSqrtStepFused(const U32U64& a, const U32U64& b); U16U32U64 FPRSqrtStepFused(const U16U32U64& a, const U16U32U64& b);
U32U64 FPSqrt(const U32U64& a); U32U64 FPSqrt(const U32U64& a);
U32U64 FPSub(const U32U64& a, const U32U64& b, bool fpcr_controlled); U32U64 FPSub(const U32U64& a, const U32U64& b, bool fpcr_controlled);
U16 FPDoubleToHalf(const U64& a, FP::RoundingMode rounding); U16 FPDoubleToHalf(const U64& a, FP::RoundingMode rounding);

View file

@ -287,6 +287,7 @@ bool Inst::ReadsFromAndWritesToFPSRCumulativeExceptionBits() const {
case Opcode::FPRSqrtEstimate16: case Opcode::FPRSqrtEstimate16:
case Opcode::FPRSqrtEstimate32: case Opcode::FPRSqrtEstimate32:
case Opcode::FPRSqrtEstimate64: case Opcode::FPRSqrtEstimate64:
case Opcode::FPRSqrtStepFused16:
case Opcode::FPRSqrtStepFused32: case Opcode::FPRSqrtStepFused32:
case Opcode::FPRSqrtStepFused64: case Opcode::FPRSqrtStepFused64:
case Opcode::FPSqrt32: case Opcode::FPSqrt32:

View file

@ -506,6 +506,7 @@ OPCODE(FPRoundInt64, U64, U64,
OPCODE(FPRSqrtEstimate16, U16, U16 ) OPCODE(FPRSqrtEstimate16, U16, U16 )
OPCODE(FPRSqrtEstimate32, U32, U32 ) OPCODE(FPRSqrtEstimate32, U32, U32 )
OPCODE(FPRSqrtEstimate64, U64, U64 ) OPCODE(FPRSqrtEstimate64, U64, U64 )
OPCODE(FPRSqrtStepFused16, U16, U16, U16 )
OPCODE(FPRSqrtStepFused32, U32, U32, U32 ) OPCODE(FPRSqrtStepFused32, U32, U32, U32 )
OPCODE(FPRSqrtStepFused64, U64, U64, U64 ) OPCODE(FPRSqrtStepFused64, U64, U64, U64 )
OPCODE(FPSqrt32, U32, U32 ) OPCODE(FPSqrt32, U32, U32 )