frontend/ir_emitter: Add half-precision variant of FPRoundInt
This commit is contained in:
parent
61cec94a19
commit
ad0c698f89
5 changed files with 25 additions and 7 deletions
|
@ -843,7 +843,7 @@ static void EmitFPRound(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst, siz
|
||||||
const bool exact = inst->GetArg(2).GetU1();
|
const bool exact = inst->GetArg(2).GetU1();
|
||||||
const auto round_imm = ConvertRoundingModeToX64Immediate(rounding_mode);
|
const auto round_imm = ConvertRoundingModeToX64Immediate(rounding_mode);
|
||||||
|
|
||||||
if (code.DoesCpuSupport(Xbyak::util::Cpu::tSSE41) && round_imm && !exact) {
|
if (fsize != 16 && code.DoesCpuSupport(Xbyak::util::Cpu::tSSE41) && round_imm && !exact) {
|
||||||
if (fsize == 64) {
|
if (fsize == 64) {
|
||||||
FPTwoOp<64>(code, ctx, inst, [&](Xbyak::Xmm result) {
|
FPTwoOp<64>(code, ctx, inst, [&](Xbyak::Xmm result) {
|
||||||
code.roundsd(result, result, *round_imm);
|
code.roundsd(result, result, *round_imm);
|
||||||
|
@ -857,7 +857,9 @@ static void EmitFPRound(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst, siz
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
using fsize_list = mp::list<mp::vlift<size_t(32)>, mp::vlift<size_t(64)>>;
|
using fsize_list = mp::list<mp::vlift<size_t(16)>,
|
||||||
|
mp::vlift<size_t(32)>,
|
||||||
|
mp::vlift<size_t(64)>>;
|
||||||
using rounding_list = mp::list<
|
using rounding_list = mp::list<
|
||||||
std::integral_constant<FP::RoundingMode, FP::RoundingMode::ToNearest_TieEven>,
|
std::integral_constant<FP::RoundingMode, FP::RoundingMode::ToNearest_TieEven>,
|
||||||
std::integral_constant<FP::RoundingMode, FP::RoundingMode::TowardsPlusInfinity>,
|
std::integral_constant<FP::RoundingMode, FP::RoundingMode::TowardsPlusInfinity>,
|
||||||
|
@ -897,6 +899,10 @@ static void EmitFPRound(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst, siz
|
||||||
code.CallFunction(lut.at(std::make_tuple(fsize, rounding_mode, exact)));
|
code.CallFunction(lut.at(std::make_tuple(fsize, rounding_mode, exact)));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void EmitX64::EmitFPRoundInt16(EmitContext& ctx, IR::Inst* inst) {
|
||||||
|
EmitFPRound(code, ctx, inst, 16);
|
||||||
|
}
|
||||||
|
|
||||||
void EmitX64::EmitFPRoundInt32(EmitContext& ctx, IR::Inst* inst) {
|
void EmitX64::EmitFPRoundInt32(EmitContext& ctx, IR::Inst* inst) {
|
||||||
EmitFPRound(code, ctx, inst, 32);
|
EmitFPRound(code, ctx, inst, 32);
|
||||||
}
|
}
|
||||||
|
|
|
@ -1950,11 +1950,21 @@ U32U64 IREmitter::FPRecipStepFused(const U32U64& a, const U32U64& b) {
|
||||||
return Inst<U64>(Opcode::FPRecipStepFused64, a, b);
|
return Inst<U64>(Opcode::FPRecipStepFused64, a, b);
|
||||||
}
|
}
|
||||||
|
|
||||||
U32U64 IREmitter::FPRoundInt(const U32U64& a, FP::RoundingMode rounding, bool exact) {
|
U16U32U64 IREmitter::FPRoundInt(const U16U32U64& a, FP::RoundingMode rounding, bool exact) {
|
||||||
if (a.GetType() == Type::U32) {
|
const u8 rounding_value = static_cast<u8>(rounding);
|
||||||
return Inst<U32>(Opcode::FPRoundInt32, a, static_cast<u8>(rounding), Imm1(exact));
|
const IR::U1 exact_imm = Imm1(exact);
|
||||||
|
|
||||||
|
switch (a.GetType()) {
|
||||||
|
case Type::U16:
|
||||||
|
return Inst<U16>(Opcode::FPRoundInt16, a, rounding_value, exact_imm);
|
||||||
|
case Type::U32:
|
||||||
|
return Inst<U32>(Opcode::FPRoundInt32, a, rounding_value, exact_imm);
|
||||||
|
case Type::U64:
|
||||||
|
return Inst<U64>(Opcode::FPRoundInt64, a, rounding_value, exact_imm);
|
||||||
|
default:
|
||||||
|
UNREACHABLE();
|
||||||
|
return U16U32U64{};
|
||||||
}
|
}
|
||||||
return Inst<U64>(Opcode::FPRoundInt64, a, static_cast<u8>(rounding), Imm1(exact));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
U32U64 IREmitter::FPRSqrtEstimate(const U32U64& a) {
|
U32U64 IREmitter::FPRSqrtEstimate(const U32U64& a) {
|
||||||
|
|
|
@ -308,7 +308,7 @@ public:
|
||||||
U32U64 FPRecipEstimate(const U32U64& a);
|
U32U64 FPRecipEstimate(const U32U64& a);
|
||||||
U16U32U64 FPRecipExponent(const U16U32U64& a);
|
U16U32U64 FPRecipExponent(const U16U32U64& a);
|
||||||
U32U64 FPRecipStepFused(const U32U64& a, const U32U64& b);
|
U32U64 FPRecipStepFused(const U32U64& a, const U32U64& b);
|
||||||
U32U64 FPRoundInt(const U32U64& a, FP::RoundingMode rounding, bool exact);
|
U16U32U64 FPRoundInt(const U16U32U64& a, FP::RoundingMode rounding, bool exact);
|
||||||
U32U64 FPRSqrtEstimate(const U32U64& a);
|
U32U64 FPRSqrtEstimate(const U32U64& a);
|
||||||
U32U64 FPRSqrtStepFused(const U32U64& a, const U32U64& b);
|
U32U64 FPRSqrtStepFused(const U32U64& a, const U32U64& b);
|
||||||
U32U64 FPSqrt(const U32U64& a);
|
U32U64 FPSqrt(const U32U64& a);
|
||||||
|
|
|
@ -279,6 +279,7 @@ bool Inst::ReadsFromAndWritesToFPSRCumulativeExceptionBits() const {
|
||||||
case Opcode::FPRecipExponent64:
|
case Opcode::FPRecipExponent64:
|
||||||
case Opcode::FPRecipStepFused32:
|
case Opcode::FPRecipStepFused32:
|
||||||
case Opcode::FPRecipStepFused64:
|
case Opcode::FPRecipStepFused64:
|
||||||
|
case Opcode::FPRoundInt16:
|
||||||
case Opcode::FPRoundInt32:
|
case Opcode::FPRoundInt32:
|
||||||
case Opcode::FPRoundInt64:
|
case Opcode::FPRoundInt64:
|
||||||
case Opcode::FPRSqrtEstimate32:
|
case Opcode::FPRSqrtEstimate32:
|
||||||
|
|
|
@ -498,6 +498,7 @@ OPCODE(FPRecipExponent32, U32, U32
|
||||||
OPCODE(FPRecipExponent64, U64, U64 )
|
OPCODE(FPRecipExponent64, U64, U64 )
|
||||||
OPCODE(FPRecipStepFused32, U32, U32, U32 )
|
OPCODE(FPRecipStepFused32, U32, U32, U32 )
|
||||||
OPCODE(FPRecipStepFused64, U64, U64, U64 )
|
OPCODE(FPRecipStepFused64, U64, U64, U64 )
|
||||||
|
OPCODE(FPRoundInt16, U16, U16, U8, U1 )
|
||||||
OPCODE(FPRoundInt32, U32, U32, U8, U1 )
|
OPCODE(FPRoundInt32, U32, U32, U8, U1 )
|
||||||
OPCODE(FPRoundInt64, U64, U64, U8, U1 )
|
OPCODE(FPRoundInt64, U64, U64, U8, U1 )
|
||||||
OPCODE(FPRSqrtEstimate32, U32, U32 )
|
OPCODE(FPRSqrtEstimate32, U32, U32 )
|
||||||
|
|
Loading…
Reference in a new issue