ir/frontend: Add half-precision opcode for FPVectorMulAdd
This commit is contained in:
parent
5f74d25bf7
commit
ec6b3ae084
4 changed files with 37 additions and 27 deletions
|
@ -908,44 +908,50 @@ void EmitFPVectorMulAdd(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
if (code.DoesCpuSupport(Xbyak::util::Cpu::tFMA) && code.DoesCpuSupport(Xbyak::util::Cpu::tAVX)) {
|
if constexpr (fsize != 16) {
|
||||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
if (code.DoesCpuSupport(Xbyak::util::Cpu::tFMA) && code.DoesCpuSupport(Xbyak::util::Cpu::tAVX)) {
|
||||||
|
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||||
|
|
||||||
const Xbyak::Xmm result = ctx.reg_alloc.ScratchXmm();
|
const Xbyak::Xmm result = ctx.reg_alloc.ScratchXmm();
|
||||||
const Xbyak::Xmm xmm_a = ctx.reg_alloc.UseXmm(args[0]);
|
const Xbyak::Xmm xmm_a = ctx.reg_alloc.UseXmm(args[0]);
|
||||||
const Xbyak::Xmm xmm_b = ctx.reg_alloc.UseXmm(args[1]);
|
const Xbyak::Xmm xmm_b = ctx.reg_alloc.UseXmm(args[1]);
|
||||||
const Xbyak::Xmm xmm_c = ctx.reg_alloc.UseXmm(args[2]);
|
const Xbyak::Xmm xmm_c = ctx.reg_alloc.UseXmm(args[2]);
|
||||||
const Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm();
|
const Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm();
|
||||||
|
|
||||||
Xbyak::Label end, fallback;
|
Xbyak::Label end, fallback;
|
||||||
|
|
||||||
code.movaps(result, xmm_a);
|
code.movaps(result, xmm_a);
|
||||||
FCODE(vfmadd231p)(result, xmm_b, xmm_c);
|
FCODE(vfmadd231p)(result, xmm_b, xmm_c);
|
||||||
|
|
||||||
code.movaps(tmp, GetNegativeZeroVector<fsize>(code));
|
code.movaps(tmp, GetNegativeZeroVector<fsize>(code));
|
||||||
code.andnps(tmp, result);
|
code.andnps(tmp, result);
|
||||||
FCODE(vcmpeq_uqp)(tmp, tmp, GetSmallestNormalVector<fsize>(code));
|
FCODE(vcmpeq_uqp)(tmp, tmp, GetSmallestNormalVector<fsize>(code));
|
||||||
code.vptest(tmp, tmp);
|
code.vptest(tmp, tmp);
|
||||||
code.jnz(fallback, code.T_NEAR);
|
code.jnz(fallback, code.T_NEAR);
|
||||||
code.L(end);
|
code.L(end);
|
||||||
|
|
||||||
code.SwitchToFarCode();
|
code.SwitchToFarCode();
|
||||||
code.L(fallback);
|
code.L(fallback);
|
||||||
code.sub(rsp, 8);
|
code.sub(rsp, 8);
|
||||||
ABI_PushCallerSaveRegistersAndAdjustStackExcept(code, HostLocXmmIdx(result.getIdx()));
|
ABI_PushCallerSaveRegistersAndAdjustStackExcept(code, HostLocXmmIdx(result.getIdx()));
|
||||||
EmitFourOpFallbackWithoutRegAlloc(code, ctx, result, xmm_a, xmm_b, xmm_c, fallback_fn);
|
EmitFourOpFallbackWithoutRegAlloc(code, ctx, result, xmm_a, xmm_b, xmm_c, fallback_fn);
|
||||||
ABI_PopCallerSaveRegistersAndAdjustStackExcept(code, HostLocXmmIdx(result.getIdx()));
|
ABI_PopCallerSaveRegistersAndAdjustStackExcept(code, HostLocXmmIdx(result.getIdx()));
|
||||||
code.add(rsp, 8);
|
code.add(rsp, 8);
|
||||||
code.jmp(end, code.T_NEAR);
|
code.jmp(end, code.T_NEAR);
|
||||||
code.SwitchToNearCode();
|
code.SwitchToNearCode();
|
||||||
|
|
||||||
ctx.reg_alloc.DefineValue(inst, result);
|
ctx.reg_alloc.DefineValue(inst, result);
|
||||||
return;
|
return;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
EmitFourOpFallback(code, ctx, inst, fallback_fn);
|
EmitFourOpFallback(code, ctx, inst, fallback_fn);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void EmitX64::EmitFPVectorMulAdd16(EmitContext& ctx, IR::Inst* inst) {
|
||||||
|
EmitFPVectorMulAdd<16>(code, ctx, inst);
|
||||||
|
}
|
||||||
|
|
||||||
void EmitX64::EmitFPVectorMulAdd32(EmitContext& ctx, IR::Inst* inst) {
|
void EmitX64::EmitFPVectorMulAdd32(EmitContext& ctx, IR::Inst* inst) {
|
||||||
EmitFPVectorMulAdd<32>(code, ctx, inst);
|
EmitFPVectorMulAdd<32>(code, ctx, inst);
|
||||||
}
|
}
|
||||||
|
|
|
@ -2173,6 +2173,8 @@ U128 IREmitter::FPVectorMul(size_t esize, const U128& a, const U128& b) {
|
||||||
|
|
||||||
U128 IREmitter::FPVectorMulAdd(size_t esize, const U128& a, const U128& b, const U128& c) {
|
U128 IREmitter::FPVectorMulAdd(size_t esize, const U128& a, const U128& b, const U128& c) {
|
||||||
switch (esize) {
|
switch (esize) {
|
||||||
|
case 16:
|
||||||
|
return Inst<U128>(Opcode::FPVectorMulAdd16, a, b, c);
|
||||||
case 32:
|
case 32:
|
||||||
return Inst<U128>(Opcode::FPVectorMulAdd32, a, b, c);
|
return Inst<U128>(Opcode::FPVectorMulAdd32, a, b, c);
|
||||||
case 64:
|
case 64:
|
||||||
|
|
|
@ -327,6 +327,7 @@ bool Inst::ReadsFromAndWritesToFPSRCumulativeExceptionBits() const {
|
||||||
case Opcode::FPVectorGreaterEqual64:
|
case Opcode::FPVectorGreaterEqual64:
|
||||||
case Opcode::FPVectorMul32:
|
case Opcode::FPVectorMul32:
|
||||||
case Opcode::FPVectorMul64:
|
case Opcode::FPVectorMul64:
|
||||||
|
case Opcode::FPVectorMulAdd16:
|
||||||
case Opcode::FPVectorMulAdd32:
|
case Opcode::FPVectorMulAdd32:
|
||||||
case Opcode::FPVectorMulAdd64:
|
case Opcode::FPVectorMulAdd64:
|
||||||
case Opcode::FPVectorPairedAddLower32:
|
case Opcode::FPVectorPairedAddLower32:
|
||||||
|
|
|
@ -553,6 +553,7 @@ OPCODE(FPVectorMin32, U128, U128
|
||||||
OPCODE(FPVectorMin64, U128, U128, U128 )
|
OPCODE(FPVectorMin64, U128, U128, U128 )
|
||||||
OPCODE(FPVectorMul32, U128, U128, U128 )
|
OPCODE(FPVectorMul32, U128, U128, U128 )
|
||||||
OPCODE(FPVectorMul64, U128, U128, U128 )
|
OPCODE(FPVectorMul64, U128, U128, U128 )
|
||||||
|
OPCODE(FPVectorMulAdd16, U128, U128, U128, U128 )
|
||||||
OPCODE(FPVectorMulAdd32, U128, U128, U128, U128 )
|
OPCODE(FPVectorMulAdd32, U128, U128, U128, U128 )
|
||||||
OPCODE(FPVectorMulAdd64, U128, U128, U128, U128 )
|
OPCODE(FPVectorMulAdd64, U128, U128, U128, U128 )
|
||||||
OPCODE(FPVectorMulX32, U128, U128, U128 )
|
OPCODE(FPVectorMulX32, U128, U128, U128 )
|
||||||
|
|
Loading…
Reference in a new issue