frontend/ir/ir_emitter: Alter parameters of FPDoubleToSingle() and FPSingleToDouble() to pass along desired rounding mode
This will be necessary to special-case the non-IEEE Von Neumann rounding to odd rounding mode.
This commit is contained in:
parent
95af9dafbe
commit
7c81a58ed3
7 changed files with 52 additions and 29 deletions
|
@ -1034,26 +1034,46 @@ void EmitX64::EmitFPCompare64(EmitContext& ctx, IR::Inst* inst) {
|
|||
|
||||
void EmitX64::EmitFPSingleToDouble(EmitContext& ctx, IR::Inst* inst) {
|
||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||
const Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]);
|
||||
const auto rounding_mode = static_cast<FP::RoundingMode>(args[1].GetImmediateU8());
|
||||
|
||||
code.cvtss2sd(result, result);
|
||||
if (ctx.FPSCR_DN()) {
|
||||
ForceToDefaultNaN<64>(code, result);
|
||||
// We special-case the non-IEEE-defined ToOdd rounding mode.
|
||||
if (rounding_mode == ctx.FPSCR_RMode() && rounding_mode != FP::RoundingMode::ToOdd) {
|
||||
const Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]);
|
||||
|
||||
code.cvtss2sd(result, result);
|
||||
if (ctx.FPSCR_DN()) {
|
||||
ForceToDefaultNaN<64>(code, result);
|
||||
}
|
||||
ctx.reg_alloc.DefineValue(inst, result);
|
||||
} else {
|
||||
ctx.reg_alloc.HostCall(inst, args[0]);
|
||||
code.mov(code.ABI_PARAM2.cvt32(), ctx.FPCR());
|
||||
code.mov(code.ABI_PARAM3.cvt32(), static_cast<u32>(rounding_mode));
|
||||
code.lea(code.ABI_PARAM4, code.ptr[code.r15 + code.GetJitStateInfo().offsetof_fpsr_exc]);
|
||||
code.CallFunction(&FP::FPConvert<u64, u32>);
|
||||
}
|
||||
|
||||
ctx.reg_alloc.DefineValue(inst, result);
|
||||
}
|
||||
|
||||
void EmitX64::EmitFPDoubleToSingle(EmitContext& ctx, IR::Inst* inst) {
|
||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||
const Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]);
|
||||
const auto rounding_mode = static_cast<FP::RoundingMode>(args[1].GetImmediateU8());
|
||||
|
||||
code.cvtsd2ss(result, result);
|
||||
if (ctx.FPSCR_DN()) {
|
||||
ForceToDefaultNaN<32>(code, result);
|
||||
// We special-case the non-IEEE-defined ToOdd rounding mode.
|
||||
if (rounding_mode == ctx.FPSCR_RMode() && rounding_mode != FP::RoundingMode::ToOdd) {
|
||||
const Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]);
|
||||
|
||||
code.cvtsd2ss(result, result);
|
||||
if (ctx.FPSCR_DN()) {
|
||||
ForceToDefaultNaN<32>(code, result);
|
||||
}
|
||||
ctx.reg_alloc.DefineValue(inst, result);
|
||||
} else {
|
||||
ctx.reg_alloc.HostCall(inst, args[0]);
|
||||
code.mov(code.ABI_PARAM2.cvt32(), ctx.FPCR());
|
||||
code.mov(code.ABI_PARAM3.cvt32(), static_cast<u32>(rounding_mode));
|
||||
code.lea(code.ABI_PARAM4, code.ptr[code.r15 + code.GetJitStateInfo().offsetof_fpsr_exc]);
|
||||
code.CallFunction(&FP::FPConvert<u32, u64>);
|
||||
}
|
||||
|
||||
ctx.reg_alloc.DefineValue(inst, result);
|
||||
}
|
||||
|
||||
template<size_t fsize, bool unsigned_, size_t isize>
|
||||
|
|
|
@ -479,12 +479,13 @@ bool ArmTranslatorVisitor::vfp2_VCVT_f_to_f(Cond cond, bool D, size_t Vd, bool s
|
|||
const auto d = ToExtReg(!sz, Vd, D); // Destination is of opposite size to source
|
||||
const auto m = ToExtReg(sz, Vm, M);
|
||||
const auto reg_m = ir.GetExtendedRegister(m);
|
||||
const auto rounding_mode = ir.current_location.FPSCR().RMode();
|
||||
|
||||
if (sz) {
|
||||
const auto result = ir.FPDoubleToSingle(reg_m, true);
|
||||
const auto result = ir.FPDoubleToSingle(reg_m, rounding_mode);
|
||||
ir.SetExtendedRegister(d, result);
|
||||
} else {
|
||||
const auto result = ir.FPSingleToDouble(reg_m, true);
|
||||
const auto result = ir.FPSingleToDouble(reg_m, rounding_mode);
|
||||
ir.SetExtendedRegister(d, result);
|
||||
}
|
||||
|
||||
|
|
|
@ -104,7 +104,9 @@ bool TranslatorVisitor::FCVT_float(Imm<2> type, Imm<2> opc, Vec Vn, Vec Vd) {
|
|||
return UnallocatedEncoding();
|
||||
}
|
||||
|
||||
IR::UAny operand = V_scalar(*srcsize, Vn);
|
||||
const IR::UAny operand = V_scalar(*srcsize, Vn);
|
||||
const auto rounding_mode = ir.current_location->FPCR().RMode();
|
||||
|
||||
IR::UAny result;
|
||||
switch (*srcsize) {
|
||||
case 16:
|
||||
|
@ -120,7 +122,7 @@ bool TranslatorVisitor::FCVT_float(Imm<2> type, Imm<2> opc, Vec Vn, Vec Vd) {
|
|||
case 16:
|
||||
return InterpretThisInstruction();
|
||||
case 64:
|
||||
result = ir.FPSingleToDouble(operand, true);
|
||||
result = ir.FPSingleToDouble(operand, rounding_mode);
|
||||
break;
|
||||
}
|
||||
break;
|
||||
|
@ -129,7 +131,7 @@ bool TranslatorVisitor::FCVT_float(Imm<2> type, Imm<2> opc, Vec Vn, Vec Vd) {
|
|||
case 16:
|
||||
return InterpretThisInstruction();
|
||||
case 32:
|
||||
result = ir.FPDoubleToSingle(operand, true);
|
||||
result = ir.FPDoubleToSingle(operand, rounding_mode);
|
||||
break;
|
||||
}
|
||||
break;
|
||||
|
|
|
@ -348,10 +348,11 @@ bool TranslatorVisitor::FCVTL(bool Q, bool sz, Vec Vn, Vec Vd) {
|
|||
}
|
||||
|
||||
const IR::U128 part = Vpart(64, Vn, Q);
|
||||
const auto rounding_mode = ir.current_location->FPCR().RMode();
|
||||
IR::U128 result = ir.ZeroVector();
|
||||
|
||||
for (size_t i = 0; i < 2; i++) {
|
||||
const IR::U64 element = ir.FPSingleToDouble(ir.VectorGetElement(32, part, i), true);
|
||||
const IR::U64 element = ir.FPSingleToDouble(ir.VectorGetElement(32, part, i), rounding_mode);
|
||||
|
||||
result = ir.VectorSetElement(64, result, i, element);
|
||||
}
|
||||
|
@ -367,10 +368,11 @@ bool TranslatorVisitor::FCVTN(bool Q, bool sz, Vec Vn, Vec Vd) {
|
|||
}
|
||||
|
||||
const IR::U128 operand = V(128, Vn);
|
||||
const auto rounding_mode = ir.current_location->FPCR().RMode();
|
||||
IR::U128 result = ir.ZeroVector();
|
||||
|
||||
for (size_t i = 0; i < 2; i++) {
|
||||
const IR::U32 element = ir.FPDoubleToSingle(ir.VectorGetElement(64, operand, i), true);
|
||||
const IR::U32 element = ir.FPDoubleToSingle(ir.VectorGetElement(64, operand, i), rounding_mode);
|
||||
|
||||
result = ir.VectorSetElement(32, result, i, element);
|
||||
}
|
||||
|
|
|
@ -1948,14 +1948,12 @@ U32U64 IREmitter::FPSub(const U32U64& a, const U32U64& b, bool fpcr_controlled)
|
|||
}
|
||||
}
|
||||
|
||||
U32 IREmitter::FPDoubleToSingle(const U64& a, bool fpcr_controlled) {
|
||||
ASSERT(fpcr_controlled);
|
||||
return Inst<U32>(Opcode::FPDoubleToSingle, a);
|
||||
U32 IREmitter::FPDoubleToSingle(const U64& a, FP::RoundingMode rounding) {
|
||||
return Inst<U32>(Opcode::FPDoubleToSingle, a, Imm8(static_cast<u8>(rounding)));
|
||||
}
|
||||
|
||||
U64 IREmitter::FPSingleToDouble(const U32& a, bool fpcr_controlled) {
|
||||
ASSERT(fpcr_controlled);
|
||||
return Inst<U64>(Opcode::FPSingleToDouble, a);
|
||||
U64 IREmitter::FPSingleToDouble(const U32& a, FP::RoundingMode rounding) {
|
||||
return Inst<U64>(Opcode::FPSingleToDouble, a, Imm8(static_cast<u8>(rounding)));
|
||||
}
|
||||
|
||||
U32 IREmitter::FPToFixedS32(const U32U64& a, size_t fbits, FP::RoundingMode rounding) {
|
||||
|
|
|
@ -312,8 +312,8 @@ public:
|
|||
U32U64 FPRSqrtStepFused(const U32U64& a, const U32U64& b);
|
||||
U32U64 FPSqrt(const U32U64& a);
|
||||
U32U64 FPSub(const U32U64& a, const U32U64& b, bool fpcr_controlled);
|
||||
U32 FPDoubleToSingle(const U64& a, bool fpcr_controlled);
|
||||
U64 FPSingleToDouble(const U32& a, bool fpcr_controlled);
|
||||
U32 FPDoubleToSingle(const U64& a, FP::RoundingMode rounding);
|
||||
U64 FPSingleToDouble(const U32& a, FP::RoundingMode rounding);
|
||||
U32 FPToFixedS32(const U32U64& a, size_t fbits, FP::RoundingMode rounding);
|
||||
U64 FPToFixedS64(const U32U64& a, size_t fbits, FP::RoundingMode rounding);
|
||||
U32 FPToFixedU32(const U32U64& a, size_t fbits, FP::RoundingMode rounding);
|
||||
|
|
|
@ -500,8 +500,8 @@ OPCODE(FPSub32, U32, U32,
|
|||
OPCODE(FPSub64, U64, U64, U64 )
|
||||
|
||||
// Floating-point conversions
|
||||
OPCODE(FPSingleToDouble, U64, U32 )
|
||||
OPCODE(FPDoubleToSingle, U32, U64 )
|
||||
OPCODE(FPSingleToDouble, U64, U32, U8 )
|
||||
OPCODE(FPDoubleToSingle, U32, U64, U8 )
|
||||
OPCODE(FPDoubleToFixedS32, U32, U64, U8, U8 )
|
||||
OPCODE(FPDoubleToFixedS64, U64, U64, U8, U8 )
|
||||
OPCODE(FPDoubleToFixedU32, U32, U64, U8, U8 )
|
||||
|
|
Loading…
Reference in a new issue