frontend/ir_emitter: Add half-precision opcode for FPVectorRecipStepFused

This commit is contained in:
Lioncash 2019-04-13 18:54:53 -04:00 committed by MerryMage
parent 6da0411111
commit 5d5c9f149f
4 changed files with 34 additions and 24 deletions

View file

@ -1110,41 +1110,47 @@ static void EmitRecipStepFused(BlockOfCode& code, EmitContext& ctx, IR::Inst* in
}
};
if (code.DoesCpuSupport(Xbyak::util::Cpu::tFMA) && code.DoesCpuSupport(Xbyak::util::Cpu::tAVX)) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
if constexpr (fsize != 16) {
if (code.DoesCpuSupport(Xbyak::util::Cpu::tFMA) && code.DoesCpuSupport(Xbyak::util::Cpu::tAVX)) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
const Xbyak::Xmm result = ctx.reg_alloc.ScratchXmm();
const Xbyak::Xmm operand1 = ctx.reg_alloc.UseXmm(args[0]);
const Xbyak::Xmm operand2 = ctx.reg_alloc.UseXmm(args[1]);
const Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm();
const Xbyak::Xmm result = ctx.reg_alloc.ScratchXmm();
const Xbyak::Xmm operand1 = ctx.reg_alloc.UseXmm(args[0]);
const Xbyak::Xmm operand2 = ctx.reg_alloc.UseXmm(args[1]);
const Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm();
Xbyak::Label end, fallback;
Xbyak::Label end, fallback;
code.movaps(result, GetVectorOf<fsize, false, 0, 2>(code));
FCODE(vfnmadd231p)(result, operand1, operand2);
code.movaps(result, GetVectorOf<fsize, false, 0, 2>(code));
FCODE(vfnmadd231p)(result, operand1, operand2);
FCODE(vcmpunordp)(tmp, result, result);
code.vptest(tmp, tmp);
code.jnz(fallback, code.T_NEAR);
code.L(end);
FCODE(vcmpunordp)(tmp, result, result);
code.vptest(tmp, tmp);
code.jnz(fallback, code.T_NEAR);
code.L(end);
code.SwitchToFarCode();
code.L(fallback);
code.sub(rsp, 8);
ABI_PushCallerSaveRegistersAndAdjustStackExcept(code, HostLocXmmIdx(result.getIdx()));
EmitThreeOpFallbackWithoutRegAlloc(code, ctx, result, operand1, operand2, fallback_fn);
ABI_PopCallerSaveRegistersAndAdjustStackExcept(code, HostLocXmmIdx(result.getIdx()));
code.add(rsp, 8);
code.jmp(end, code.T_NEAR);
code.SwitchToNearCode();
code.SwitchToFarCode();
code.L(fallback);
code.sub(rsp, 8);
ABI_PushCallerSaveRegistersAndAdjustStackExcept(code, HostLocXmmIdx(result.getIdx()));
EmitThreeOpFallbackWithoutRegAlloc(code, ctx, result, operand1, operand2, fallback_fn);
ABI_PopCallerSaveRegistersAndAdjustStackExcept(code, HostLocXmmIdx(result.getIdx()));
code.add(rsp, 8);
code.jmp(end, code.T_NEAR);
code.SwitchToNearCode();
ctx.reg_alloc.DefineValue(inst, result);
return;
ctx.reg_alloc.DefineValue(inst, result);
return;
}
}
EmitThreeOpFallback(code, ctx, inst, fallback_fn);
}
void EmitX64::EmitFPVectorRecipStepFused16(EmitContext& ctx, IR::Inst* inst) {
EmitRecipStepFused<16>(code, ctx, inst);
}
void EmitX64::EmitFPVectorRecipStepFused32(EmitContext& ctx, IR::Inst* inst) {
EmitRecipStepFused<32>(code, ctx, inst);
}

View file

@ -2277,6 +2277,8 @@ U128 IREmitter::FPVectorRecipEstimate(size_t esize, const U128& a) {
U128 IREmitter::FPVectorRecipStepFused(size_t esize, const U128& a, const U128& b) {
switch (esize) {
case 16:
return Inst<U128>(Opcode::FPVectorRecipStepFused16, a, b);
case 32:
return Inst<U128>(Opcode::FPVectorRecipStepFused32, a, b);
case 64:

View file

@ -338,6 +338,7 @@ bool Inst::ReadsFromAndWritesToFPSRCumulativeExceptionBits() const {
case Opcode::FPVectorPairedAdd64:
case Opcode::FPVectorRecipEstimate32:
case Opcode::FPVectorRecipEstimate64:
case Opcode::FPVectorRecipStepFused16:
case Opcode::FPVectorRecipStepFused32:
case Opcode::FPVectorRecipStepFused64:
case Opcode::FPVectorRoundInt16:

View file

@ -573,6 +573,7 @@ OPCODE(FPVectorPairedAddLower32, U128, U128
OPCODE(FPVectorPairedAddLower64, U128, U128, U128 )
OPCODE(FPVectorRecipEstimate32, U128, U128 )
OPCODE(FPVectorRecipEstimate64, U128, U128 )
OPCODE(FPVectorRecipStepFused16, U128, U128, U128 )
OPCODE(FPVectorRecipStepFused32, U128, U128, U128 )
OPCODE(FPVectorRecipStepFused64, U128, U128, U128 )
OPCODE(FPVectorRoundInt16, U128, U128, U8, U1 )