emit_x64_vector_saturation: AVX implementation of EmitVectorUnsignedSaturatedSub32

This commit is contained in:
Merry 2021-05-28 14:42:42 +01:00
parent 0a232a6fbf
commit d087ef42b9

View file

@ -341,38 +341,42 @@ void EmitX64::EmitVectorUnsignedSaturatedSub16(EmitContext& ctx, IR::Inst* inst)
void EmitX64::EmitVectorUnsignedSaturatedSub32(EmitContext& ctx, IR::Inst* inst) { void EmitX64::EmitVectorUnsignedSaturatedSub32(EmitContext& ctx, IR::Inst* inst) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst); auto args = ctx.reg_alloc.GetArgumentInfo(inst);
const Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]); if (code.HasHostFeature(HostFeature::AVX512_Ortho | HostFeature::AVX512DQ)) {
const Xbyak::Xmm subtrahend = ctx.reg_alloc.UseXmm(args[1]); const Xbyak::Xmm operand1 = ctx.reg_alloc.UseXmm(args[0]);
const Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm(); const Xbyak::Xmm operand2 = ctx.reg_alloc.UseXmm(args[1]);
const Xbyak::Xmm result = ctx.reg_alloc.ScratchXmm();
const Xbyak::Reg8 overflow = ctx.reg_alloc.ScratchGpr().cvt8(); const Xbyak::Reg8 overflow = ctx.reg_alloc.ScratchGpr().cvt8();
code.movaps(tmp, result); code.vpsubd(result, operand1, operand2);
code.vpcmpud(k1, result, operand1, CmpInt::GreaterThan);
// TODO AVX2
if (code.HasHostFeature(HostFeature::AVX512_Ortho | HostFeature::AVX512DQ)) {
// Do a regular unsigned subtraction
code.vpsubd(result, result, subtrahend);
// Test if an underflow happened
code.vpcmpud(k1, result, tmp, CmpInt::GreaterThan);
// Write 0 where underflows have happened
code.vpxord(result | k1, result, result); code.vpxord(result | k1, result, result);
// Set ZF if an underflow happened
code.ktestb(k1, k1); code.ktestb(k1, k1);
code.setnz(overflow); code.setnz(overflow);
code.or_(code.byte[code.r15 + code.GetJitStateInfo().offsetof_fpsr_qc], overflow); code.or_(code.byte[code.r15 + code.GetJitStateInfo().offsetof_fpsr_qc], overflow);
ctx.reg_alloc.DefineValue(inst, result); ctx.reg_alloc.DefineValue(inst, result);
return; return;
} }
code.movaps(xmm0, subtrahend); const Xbyak::Xmm operand1 = code.HasHostFeature(HostFeature::AVX) ? ctx.reg_alloc.UseXmm(args[0]) : ctx.reg_alloc.UseScratchXmm(args[0]);
const Xbyak::Xmm operand2 = ctx.reg_alloc.UseXmm(args[1]);
const Xbyak::Xmm result = code.HasHostFeature(HostFeature::AVX) ? ctx.reg_alloc.ScratchXmm() : operand1;
const Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm();
const Xbyak::Reg8 overflow = ctx.reg_alloc.ScratchGpr().cvt8();
code.pxor(tmp, subtrahend); if (code.HasHostFeature(HostFeature::AVX)) {
code.psubd(result, subtrahend); code.vpxor(tmp, operand1, operand2);
code.vpsubd(result, operand1, operand2);
code.vpand(xmm0, operand2, tmp);
} else {
code.movaps(tmp, operand1);
code.movaps(xmm0, operand2);
code.pxor(tmp, operand2);
code.psubd(result, operand2);
code.pand(xmm0, tmp); code.pand(xmm0, tmp);
}
code.psrld(tmp, 1); code.psrld(tmp, 1);
code.psubd(tmp, xmm0); code.psubd(tmp, xmm0);