emit_x64_vector: Vectorize fallback case in EmitVectorMultiply64()

Gets rid of the need to perform a fallback.
This commit is contained in:
Lioncash 2018-05-23 16:28:29 -04:00 committed by MerryMage
parent 954deff2d4
commit cf188448d4

View file

@ -1464,8 +1464,9 @@ void EmitX64::EmitVectorMultiply64(EmitContext& ctx, IR::Inst* inst) {
return; return;
} }
if (code.DoesCpuSupport(Xbyak::util::Cpu::tSSE41)) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst); auto args = ctx.reg_alloc.GetArgumentInfo(inst);
if (code.DoesCpuSupport(Xbyak::util::Cpu::tSSE41)) {
Xbyak::Xmm a = ctx.reg_alloc.UseScratchXmm(args[0]); Xbyak::Xmm a = ctx.reg_alloc.UseScratchXmm(args[0]);
Xbyak::Xmm b = ctx.reg_alloc.UseXmm(args[1]); Xbyak::Xmm b = ctx.reg_alloc.UseXmm(args[1]);
Xbyak::Reg64 tmp1 = ctx.reg_alloc.ScratchGpr(); Xbyak::Reg64 tmp1 = ctx.reg_alloc.ScratchGpr();
@ -1484,9 +1485,28 @@ void EmitX64::EmitVectorMultiply64(EmitContext& ctx, IR::Inst* inst) {
return; return;
} }
EmitTwoArgumentFallback(code, ctx, inst, [](std::array<u64, 2>& result, const std::array<u64, 2>& a, const std::array<u64, 2>& b) { const Xbyak::Xmm a = ctx.reg_alloc.UseXmm(args[0]);
std::transform(a.begin(), a.end(), b.begin(), result.begin(), std::multiplies<>()); const Xbyak::Xmm b = ctx.reg_alloc.UseScratchXmm(args[1]);
}); const Xbyak::Xmm tmp1 = ctx.reg_alloc.ScratchXmm();
const Xbyak::Xmm tmp2 = ctx.reg_alloc.ScratchXmm();
const Xbyak::Xmm tmp3 = ctx.reg_alloc.ScratchXmm();
code.movdqa(tmp1, a);
code.movdqa(tmp2, a);
code.movdqa(tmp3, b);
code.psrlq(tmp1, 32);
code.psrlq(tmp3, 32);
code.pmuludq(tmp2, b);
code.pmuludq(tmp3, a);
code.pmuludq(b, tmp1);
code.paddq(b, tmp3);
code.psllq(b, 32);
code.paddq(tmp2, b);
ctx.reg_alloc.DefineValue(inst, tmp2);
} }
void EmitX64::EmitVectorNarrow16(EmitContext& ctx, IR::Inst* inst) { void EmitX64::EmitVectorNarrow16(EmitContext& ctx, IR::Inst* inst) {