emit_x64_floating_point: Implement accurate fallback for FPMulAdd{32,64}

This commit is contained in:
MerryMage 2018-07-23 16:23:05 +01:00
parent e199887fbc
commit 0ce11b7b15
3 changed files with 56 additions and 14 deletions

View file

@ -766,6 +766,23 @@ void EmitX64::EmitFPMul64(EmitContext& ctx, IR::Inst* inst) {
FPThreeOp64(code, ctx, inst, &Xbyak::CodeGenerator::mulsd); FPThreeOp64(code, ctx, inst, &Xbyak::CodeGenerator::mulsd);
} }
template<typename FPT>
static void EmitFPMulAddFallback(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
ctx.reg_alloc.HostCall(inst, args[0], args[1], args[2]);
code.mov(code.ABI_PARAM4.cvt32(), ctx.FPCR());
#ifdef _WIN32
code.sub(rsp, 16 + ABI_SHADOW_SPACE);
code.lea(rax, code.ptr[code.r15 + code.GetJitStateInfo().offsetof_fpsr_exc]);
code.mov(qword[rsp + ABI_SHADOW_SPACE], rax);
code.CallFunction(&FP::FPMulAdd<FPT>);
code.add(rsp, 16 + ABI_SHADOW_SPACE);
#else
code.lea(code.ABI_PARAM5, code.ptr[code.r15 + code.GetJitStateInfo().offsetof_fpsr_exc]);
code.CallFunction(&FP::FPMulAdd<FPT>);
#endif
}
void EmitX64::EmitFPMulAdd32(EmitContext& ctx, IR::Inst* inst) { void EmitX64::EmitFPMulAdd32(EmitContext& ctx, IR::Inst* inst) {
if (code.DoesCpuSupport(Xbyak::util::Cpu::tFMA)) { if (code.DoesCpuSupport(Xbyak::util::Cpu::tFMA)) {
FPFourOp32(code, ctx, inst, [&](Xbyak::Xmm result, Xbyak::Xmm operand2, Xbyak::Xmm operand3) { FPFourOp32(code, ctx, inst, [&](Xbyak::Xmm result, Xbyak::Xmm operand2, Xbyak::Xmm operand3) {
@ -774,11 +791,7 @@ void EmitX64::EmitFPMulAdd32(EmitContext& ctx, IR::Inst* inst) {
return; return;
} }
// TODO: Improve accuracy. EmitFPMulAddFallback<u32>(code, ctx, inst);
FPFourOp32(code, ctx, inst, [&](Xbyak::Xmm result, Xbyak::Xmm operand2, Xbyak::Xmm operand3) {
code.mulss(operand2, operand3);
code.addss(result, operand2);
});
} }
void EmitX64::EmitFPMulAdd64(EmitContext& ctx, IR::Inst* inst) { void EmitX64::EmitFPMulAdd64(EmitContext& ctx, IR::Inst* inst) {
@ -789,11 +802,7 @@ void EmitX64::EmitFPMulAdd64(EmitContext& ctx, IR::Inst* inst) {
return; return;
} }
// TODO: Improve accuracy. EmitFPMulAddFallback<u64>(code, ctx, inst);
FPFourOp64(code, ctx, inst, [&](Xbyak::Xmm result, Xbyak::Xmm operand2, Xbyak::Xmm operand3) {
code.mulsd(operand2, operand3);
code.addsd(result, operand2);
});
} }
static void EmitFPRound(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst, size_t fsize) { static void EmitFPRound(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst, size_t fsize) {

View file

@ -334,3 +334,40 @@ TEST_CASE("A64: CNTPCT_EL0", "[a64]") {
REQUIRE(jit.GetRegister(3) == 7); REQUIRE(jit.GetRegister(3) == 7);
} }
TEST_CASE("A64: FNMSUB 1", "[a64]") {
TestEnv env;
Dynarmic::A64::Jit jit{Dynarmic::A64::UserConfig{&env}};
env.code_mem[0] = 0x1f618a9c; // FNMSUB D28, D20, D1, D2
env.code_mem[1] = 0x14000000; // B .
jit.SetPC(0);
jit.SetVector(20, {0xe73a51346164bd6c, 0x8080000000002b94});
jit.SetVector(1, {0xbf8000007fffffff, 0xffffffff00002b94});
jit.SetVector(2, {0x0000000000000000, 0xc79b271e3f000000});
env.ticks_left = 2;
jit.Run();
REQUIRE(jit.GetVector(28) == Vector{0x66ca513533ee6076, 0x0000000000000000});
}
TEST_CASE("A64: FNMSUB 2", "[a64]") {
TestEnv env;
Dynarmic::A64::Jit jit{Dynarmic::A64::UserConfig{&env}};
env.code_mem[0] = 0x1f2ab88e; // FNMSUB S14, S4, S10, S14
env.code_mem[1] = 0x14000000; // B .
jit.SetPC(0);
jit.SetVector(4, {0x3c9623b101398437, 0x7ff0abcd0ba98d27});
jit.SetVector(10, {0xffbfffff3eaaaaab, 0x3f0000003f8147ae});
jit.SetVector(14, {0x80000000007fffff, 0xe73a513400000000});
jit.SetFpcr(0x00400000);
env.ticks_left = 2;
jit.Run();
REQUIRE(jit.GetVector(14) == Vector{0x0000000080045284, 0x0000000000000000});
}

View file

@ -77,8 +77,6 @@ static u32 GenRandomInst(u64 pc, bool is_last_inst) {
"LDLAR", "LDLAR",
// Dynarmic and QEMU currently differ on how the exclusive monitor's address range works. // Dynarmic and QEMU currently differ on how the exclusive monitor's address range works.
"STXR", "STLXR", "STXP", "STLXP", "LDXR", "LDAXR", "LDXP", "LDAXP", "STXR", "STLXR", "STXP", "STLXP", "LDXR", "LDAXR", "LDXP", "LDAXP",
// Approximation. Produces inaccurate results.
"FMADD_float", "FMSUB_float", "FNMADD_float", "FNMSUB_float",
}; };
for (const auto& [fn, bitstring] : list) { for (const auto& [fn, bitstring] : list) {
@ -116,8 +114,6 @@ static u32 GenFloatInst(u64 pc, bool is_last_inst) {
const std::vector<std::string> do_not_test { const std::vector<std::string> do_not_test {
// QEMU's implementation of FCVT is incorrect // QEMU's implementation of FCVT is incorrect
"FCVT_float", "FCVT_float",
// Approximation. Produces incorrect results.
"FMADD_float", "FMSUB_float", "FNMADD_float", "FNMSUB_float",
// Requires investigation (temporarily disabled). // Requires investigation (temporarily disabled).
"FDIV_1", "FDIV_2", "FDIV_1", "FDIV_2",
}; };