A32: Implement ASIMD VRECPE
This commit is contained in:
parent
d3dc50d718
commit
6f59c2cd8e
8 changed files with 47 additions and 18 deletions
|
@ -378,11 +378,12 @@ void EmitThreeOpVectorOperation(BlockOfCode& code, EmitContext& ctx, IR::Inst* i
|
|||
ctx.reg_alloc.DefineValue(inst, result);
|
||||
}
|
||||
|
||||
template<typename Lambda>
|
||||
template<FpcrControlledArgument fcarg = FpcrControlledArgument::Absent, typename Lambda>
|
||||
void EmitTwoOpFallback(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst, Lambda lambda) {
|
||||
const auto fn = static_cast<mp::equivalent_function_type<Lambda>*>(lambda);
|
||||
|
||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||
const bool fpcr_controlled = fcarg == FpcrControlledArgument::Absent || args[1].GetImmediateU1();
|
||||
const Xbyak::Xmm arg1 = ctx.reg_alloc.UseXmm(args[0]);
|
||||
const Xbyak::Xmm result = ctx.reg_alloc.ScratchXmm();
|
||||
ctx.reg_alloc.EndOfAllocScope();
|
||||
|
@ -392,7 +393,7 @@ void EmitTwoOpFallback(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst, Lamb
|
|||
code.sub(rsp, stack_space + ABI_SHADOW_SPACE);
|
||||
code.lea(code.ABI_PARAM1, ptr[rsp + ABI_SHADOW_SPACE + 0 * 16]);
|
||||
code.lea(code.ABI_PARAM2, ptr[rsp + ABI_SHADOW_SPACE + 1 * 16]);
|
||||
code.mov(code.ABI_PARAM3.cvt32(), ctx.FPCR().Value());
|
||||
code.mov(code.ABI_PARAM3.cvt32(), ctx.FPCR(fpcr_controlled).Value());
|
||||
code.lea(code.ABI_PARAM4, code.ptr[code.r15 + code.GetJitStateInfo().offsetof_fpsr_exc]);
|
||||
|
||||
code.movaps(xword[code.ABI_PARAM2], arg1);
|
||||
|
@ -1144,7 +1145,7 @@ void EmitX64::EmitFPVectorPairedAddLower64(EmitContext& ctx, IR::Inst* inst) {
|
|||
|
||||
template<typename FPT>
|
||||
static void EmitRecipEstimate(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) {
|
||||
EmitTwoOpFallback(code, ctx, inst, [](VectorArray<FPT>& result, const VectorArray<FPT>& operand, FP::FPCR fpcr, FP::FPSR& fpsr) {
|
||||
EmitTwoOpFallback<FpcrControlledArgument::Present>(code, ctx, inst, [](VectorArray<FPT>& result, const VectorArray<FPT>& operand, FP::FPCR fpcr, FP::FPSR& fpsr) {
|
||||
for (size_t i = 0; i < result.size(); i++) {
|
||||
result[i] = FP::FPRecipEstimate<FPT>(operand[i], fpcr, fpsr);
|
||||
}
|
||||
|
@ -1188,12 +1189,12 @@ static void EmitRecipStepFused(BlockOfCode& code, EmitContext& ctx, IR::Inst* in
|
|||
MaybeStandardFPSCRValue(code, ctx, fpcr_controlled, [&]{
|
||||
code.movaps(result, GetVectorOf<fsize, false, 0, 2>(code));
|
||||
FCODE(vfnmadd231p)(result, operand1, operand2);
|
||||
});
|
||||
|
||||
FCODE(vcmpunordp)(tmp, result, result);
|
||||
code.vptest(tmp, tmp);
|
||||
code.jnz(fallback, code.T_NEAR);
|
||||
code.L(end);
|
||||
});
|
||||
|
||||
code.SwitchToFarCode();
|
||||
code.L(fallback);
|
||||
|
|
|
@ -103,7 +103,7 @@ INST(asimd_VSWP, "VSWP", "111100111D110010dddd000
|
|||
//INST(asimd_VQMOVN, "VQMOVN", "111100111-11--10----00101x-0----") // ASIMD
|
||||
//INST(asimd_VSHLL_max, "VSHLL_max", "111100111-11--10----001100-0----") // ASIMD
|
||||
//INST(asimd_VCVT_half, "VCVT (half-precision)", "111100111-11--10----011x00-0----") // ASIMD
|
||||
//INST(asimd_VRECPE, "VRECPE", "111100111-11--11----010x0x-0----") // ASIMD
|
||||
INST(asimd_VRECPE, "VRECPE", "111100111D11zz11dddd010F0QM0mmmm") // ASIMD
|
||||
//INST(asimd_VRSQRTE, "VRSQRTE", "111100111-11--11----010x1x-0----") // ASIMD
|
||||
//INST(asimd_VCVT_integer, "VCVT (integer)", "111100111-11--11----011xxx-0----") // ASIMD
|
||||
|
||||
|
|
|
@ -352,4 +352,31 @@ bool ArmTranslatorVisitor::asimd_VSWP(bool D, size_t Vd, bool Q, bool M, size_t
|
|||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool ArmTranslatorVisitor::asimd_VRECPE(bool D, size_t sz, size_t Vd, bool F, bool Q, bool M, size_t Vm) {
|
||||
if (Q && (Common::Bit<0>(Vd) || Common::Bit<0>(Vm))) {
|
||||
return UndefinedInstruction();
|
||||
}
|
||||
|
||||
if (sz == 0b00 || sz == 0b11) {
|
||||
return UndefinedInstruction();
|
||||
}
|
||||
|
||||
if (!F && sz == 0b01) {
|
||||
// TODO: Implement 16-bit VectorUnsignedRecipEstimate
|
||||
return UndefinedInstruction();
|
||||
}
|
||||
|
||||
const size_t esize = 8U << sz;
|
||||
|
||||
const auto d = ToVector(Q, Vd, D);
|
||||
const auto m = ToVector(Q, Vm, M);
|
||||
const auto reg_m = ir.GetVector(m);
|
||||
const auto result = F ? ir.FPVectorRecipEstimate(esize, reg_m, false)
|
||||
: ir.VectorUnsignedRecipEstimate(reg_m);
|
||||
|
||||
ir.SetVector(d, result);
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace Dynarmic::A32
|
||||
|
|
|
@ -499,6 +499,7 @@ struct ArmTranslatorVisitor final {
|
|||
bool asimd_VABS(bool D, size_t sz, size_t Vd, bool F, bool Q, bool M, size_t Vm);
|
||||
bool asimd_VNEG(bool D, size_t sz, size_t Vd, bool F, bool Q, bool M, size_t Vm);
|
||||
bool asimd_VSWP(bool D, size_t Vd, bool Q, bool M, size_t Vm);
|
||||
bool asimd_VRECPE(bool D, size_t sz, size_t Vd, bool F, bool Q, bool M, size_t Vm);
|
||||
|
||||
// Advanced SIMD load/store structures
|
||||
bool v8_VST_multiple(bool D, Reg n, size_t Vd, Imm<4> type, size_t sz, size_t align, Reg m);
|
||||
|
|
|
@ -2440,14 +2440,14 @@ U128 IREmitter::FPVectorPairedAddLower(size_t esize, const U128& a, const U128&
|
|||
UNREACHABLE();
|
||||
}
|
||||
|
||||
U128 IREmitter::FPVectorRecipEstimate(size_t esize, const U128& a) {
|
||||
U128 IREmitter::FPVectorRecipEstimate(size_t esize, const U128& a, bool fpcr_controlled) {
|
||||
switch (esize) {
|
||||
case 16:
|
||||
return Inst<U128>(Opcode::FPVectorRecipEstimate16, a);
|
||||
return Inst<U128>(Opcode::FPVectorRecipEstimate16, a, Imm1(fpcr_controlled));
|
||||
case 32:
|
||||
return Inst<U128>(Opcode::FPVectorRecipEstimate32, a);
|
||||
return Inst<U128>(Opcode::FPVectorRecipEstimate32, a, Imm1(fpcr_controlled));
|
||||
case 64:
|
||||
return Inst<U128>(Opcode::FPVectorRecipEstimate64, a);
|
||||
return Inst<U128>(Opcode::FPVectorRecipEstimate64, a, Imm1(fpcr_controlled));
|
||||
}
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
|
|
@ -360,7 +360,7 @@ public:
|
|||
U128 FPVectorNeg(size_t esize, const U128& a);
|
||||
U128 FPVectorPairedAdd(size_t esize, const U128& a, const U128& b, bool fpcr_controlled = true);
|
||||
U128 FPVectorPairedAddLower(size_t esize, const U128& a, const U128& b, bool fpcr_controlled = true);
|
||||
U128 FPVectorRecipEstimate(size_t esize, const U128& a);
|
||||
U128 FPVectorRecipEstimate(size_t esize, const U128& a, bool fpcr_controlled = true);
|
||||
U128 FPVectorRecipStepFused(size_t esize, const U128& a, const U128& b, bool fpcr_controlled = true);
|
||||
U128 FPVectorRoundInt(size_t esize, const U128& operand, FP::RoundingMode rounding, bool exact);
|
||||
U128 FPVectorRSqrtEstimate(size_t esize, const U128& a);
|
||||
|
|
|
@ -613,9 +613,9 @@ OPCODE(FPVectorPairedAdd32, U128, U128
|
|||
OPCODE(FPVectorPairedAdd64, U128, U128, U128, U1 )
|
||||
OPCODE(FPVectorPairedAddLower32, U128, U128, U128, U1 )
|
||||
OPCODE(FPVectorPairedAddLower64, U128, U128, U128, U1 )
|
||||
OPCODE(FPVectorRecipEstimate16, U128, U128 )
|
||||
OPCODE(FPVectorRecipEstimate32, U128, U128 )
|
||||
OPCODE(FPVectorRecipEstimate64, U128, U128 )
|
||||
OPCODE(FPVectorRecipEstimate16, U128, U128, U1 )
|
||||
OPCODE(FPVectorRecipEstimate32, U128, U128, U1 )
|
||||
OPCODE(FPVectorRecipEstimate64, U128, U128, U1 )
|
||||
OPCODE(FPVectorRecipStepFused16, U128, U128, U128, U1 )
|
||||
OPCODE(FPVectorRecipStepFused32, U128, U128, U128, U1 )
|
||||
OPCODE(FPVectorRecipStepFused64, U128, U128, U128, U1 )
|
||||
|
|
|
@ -111,7 +111,7 @@ u32 GenRandomInst(u32 pc, bool is_last_inst) {
|
|||
// FPSCR is inaccurate
|
||||
"vfp_VMRS",
|
||||
// Unimplemented in Unicorn
|
||||
"asimd_VPADD_float",
|
||||
"asimd_VPADD_float", "asimd_VRECPE",
|
||||
// Incorrect Unicorn implementations
|
||||
"asimd_VRECPS", // Unicorn does not fuse the multiply and subtraction, resulting in being off by 1ULP.
|
||||
};
|
||||
|
|
Loading…
Reference in a new issue