diff --git a/src/backend/x64/emit_x64_vector.cpp b/src/backend/x64/emit_x64_vector.cpp index e65a58f2..9aab41ef 100644 --- a/src/backend/x64/emit_x64_vector.cpp +++ b/src/backend/x64/emit_x64_vector.cpp @@ -1408,6 +1408,31 @@ void EmitX64::EmitVectorLogicalVShiftS32(EmitContext& ctx, IR::Inst* inst) { } void EmitX64::EmitVectorLogicalVShiftS64(EmitContext& ctx, IR::Inst* inst) { + if (code.DoesCpuSupport(Xbyak::util::Cpu::tAVX512F) && code.DoesCpuSupport(Xbyak::util::Cpu::tAVX512VL)) { + auto args = ctx.reg_alloc.GetArgumentInfo(inst); + + const Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]); + const Xbyak::Xmm left_shift = ctx.reg_alloc.UseScratchXmm(args[1]); + const Xbyak::Xmm right_shift = ctx.reg_alloc.ScratchXmm(); + const Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm(); + + code.vmovdqa(tmp, code.MConst(xword, 0x00000000000000FF, 0x00000000000000FF)); + code.vpxor(right_shift, right_shift, right_shift); + code.vpsubq(right_shift, right_shift, left_shift); + + code.vpsllq(xmm0, left_shift, 56); + + code.vpand(right_shift, right_shift, tmp); + code.vpand(left_shift, left_shift, tmp); + + code.vpsravq(tmp, result, right_shift); + code.vpsllvq(result, result, left_shift); + code.blendvpd(result, tmp); + + ctx.reg_alloc.DefineValue(inst, result); + return; + } + EmitTwoArgumentFallback(code, ctx, inst, [](VectorArray& result, const VectorArray& a, const VectorArray& b) { std::transform(a.begin(), a.end(), b.begin(), result.begin(), LogicalVShift); });