diff --git a/src/backend_x64/emit_x64_vector.cpp b/src/backend_x64/emit_x64_vector.cpp index ac3db0c7..65ce3043 100644 --- a/src/backend_x64/emit_x64_vector.cpp +++ b/src/backend_x64/emit_x64_vector.cpp @@ -254,6 +254,72 @@ void EmitX64::EmitVectorSetElement64(EmitContext& ctx, IR::Inst* inst) { } } +static void EmitVectorAbs(size_t esize, EmitContext& ctx, IR::Inst* inst, BlockOfCode& code) { + auto args = ctx.reg_alloc.GetArgumentInfo(inst); + + const Xbyak::Xmm data = ctx.reg_alloc.UseScratchXmm(args[0]); + + switch (esize) { + case 8: + if (code.DoesCpuSupport(Xbyak::util::Cpu::tSSSE3)) { + code.pabsb(data, data); + } else { + const Xbyak::Xmm temp = ctx.reg_alloc.ScratchXmm(); + code.pxor(temp, temp); + code.psubb(temp, data); + code.pminub(data, temp); + } + break; + case 16: + if (code.DoesCpuSupport(Xbyak::util::Cpu::tSSSE3)) { + code.pabsw(data, data); + } else { + const Xbyak::Xmm temp = ctx.reg_alloc.ScratchXmm(); + code.pxor(temp, temp); + code.psubw(temp, data); + code.pmaxsw(data, temp); + } + break; + case 32: + if (code.DoesCpuSupport(Xbyak::util::Cpu::tSSSE3)) { + code.pabsd(data, data); + } else { + const Xbyak::Xmm temp = ctx.reg_alloc.ScratchXmm(); + code.movdqa(temp, data); + code.psrad(temp, 31); + code.pxor(data, temp); + code.psubd(data, temp); + } + break; + case 64: { + const Xbyak::Xmm temp = ctx.reg_alloc.ScratchXmm(); + code.pshufd(temp, data, 0b11110101); + code.psrad(temp, 31); + code.pxor(data, temp); + code.psubq(data, temp); + break; + } + } + + ctx.reg_alloc.DefineValue(inst, data); +} + +void EmitX64::EmitVectorAbs8(EmitContext& ctx, IR::Inst* inst) { + EmitVectorAbs(8, ctx, inst, code); +} + +void EmitX64::EmitVectorAbs16(EmitContext& ctx, IR::Inst* inst) { + EmitVectorAbs(16, ctx, inst, code); +} + +void EmitX64::EmitVectorAbs32(EmitContext& ctx, IR::Inst* inst) { + EmitVectorAbs(32, ctx, inst, code); +} + +void EmitX64::EmitVectorAbs64(EmitContext& ctx, IR::Inst* inst) { + EmitVectorAbs(64, ctx, inst, code); +} + void EmitX64::EmitVectorAdd8(EmitContext& ctx, IR::Inst* inst) { EmitVectorOperation(code, ctx, inst, &Xbyak::CodeGenerator::paddb); } diff --git a/src/frontend/ir/ir_emitter.cpp b/src/frontend/ir/ir_emitter.cpp index 748bc557..13cef6a8 100644 --- a/src/frontend/ir/ir_emitter.cpp +++ b/src/frontend/ir/ir_emitter.cpp @@ -789,6 +789,21 @@ U128 IREmitter::VectorSetElement(size_t esize, const U128& a, size_t index, cons } } +U128 IREmitter::VectorAbs(size_t esize, const U128& a) { + switch (esize) { + case 8: + return Inst(Opcode::VectorAbs8, a); + case 16: + return Inst(Opcode::VectorAbs16, a); + case 32: + return Inst(Opcode::VectorAbs32, a); + case 64: + return Inst(Opcode::VectorAbs64, a); + } + UNREACHABLE(); + return {}; +} + U128 IREmitter::VectorAdd(size_t esize, const U128& a, const U128& b) { switch (esize) { case 8: diff --git a/src/frontend/ir/ir_emitter.h b/src/frontend/ir/ir_emitter.h index e965bee8..19623624 100644 --- a/src/frontend/ir/ir_emitter.h +++ b/src/frontend/ir/ir_emitter.h @@ -208,6 +208,7 @@ public: UAny VectorGetElement(size_t esize, const U128& a, size_t index); U128 VectorSetElement(size_t esize, const U128& a, size_t index, const UAny& elem); + U128 VectorAbs(size_t esize, const U128& a); U128 VectorAdd(size_t esize, const U128& a, const U128& b); U128 VectorAnd(const U128& a, const U128& b); U128 VectorArithmeticShiftRight(size_t esize, const U128& a, u8 shift_amount); diff --git a/src/frontend/ir/opcodes.inc b/src/frontend/ir/opcodes.inc index 30bfb532..123b6571 100644 --- a/src/frontend/ir/opcodes.inc +++ b/src/frontend/ir/opcodes.inc @@ -211,6 +211,10 @@ OPCODE(VectorSetElement8, T::U128, T::U128, T::U OPCODE(VectorSetElement16, T::U128, T::U128, T::U8, T::U16 ) OPCODE(VectorSetElement32, T::U128, T::U128, T::U8, T::U32 ) OPCODE(VectorSetElement64, T::U128, T::U128, T::U8, T::U64 ) +OPCODE(VectorAbs8, T::U128, T::U128 ) +OPCODE(VectorAbs16, T::U128, T::U128 ) +OPCODE(VectorAbs32, T::U128, T::U128 ) +OPCODE(VectorAbs64, T::U128, T::U128 ) OPCODE(VectorAdd8, T::U128, T::U128, T::U128 ) OPCODE(VectorAdd16, T::U128, T::U128, T::U128 ) OPCODE(VectorAdd32, T::U128, T::U128, T::U128 )