backend_x64/ir: Amend generic LogicalVShift() template to also handle signed variants

Also adds IR opcodes to dispatch said variants
This commit is contained in:
Lioncash 2018-04-30 10:01:57 -04:00 committed by MerryMage
parent 9fc89f0a0e
commit 21974ee57e
6 changed files with 75 additions and 17 deletions

View file

@ -6,6 +6,7 @@
#include <algorithm>
#include <functional>
#include <type_traits>
#include "backend_x64/abi.h"
#include "backend_x64/block_of_code.h"
@ -923,10 +924,23 @@ static constexpr T LogicalVShift(T x, T y) {
const s8 shift_amount = static_cast<s8>(static_cast<u8>(y));
const s64 bit_size = static_cast<s64>(Common::BitSize<T>());
if (shift_amount <= -bit_size || shift_amount >= bit_size) {
if constexpr (std::is_signed_v<T>) {
if (shift_amount >= bit_size) {
return 0;
}
} else if (shift_amount <= -bit_size || shift_amount >= bit_size) {
return 0;
}
if constexpr (std::is_signed_v<T>) {
if (shift_amount <= -bit_size) {
// Parentheses necessary, as MSVC doesn't appear to consider cast parentheses
// as a grouping in terms of precedence, causing warning C4554 to fire. See:
// https://developercommunity.visualstudio.com/content/problem/144783/msvc-2017-does-not-understand-that-static-cast-cou.html
return x >> (T(bit_size - 1));
}
}
if (shift_amount < 0) {
return x >> T(-shift_amount);
}
@ -934,25 +948,49 @@ static constexpr T LogicalVShift(T x, T y) {
return x << T(shift_amount);
}
void EmitX64::EmitVectorLogicalVShift8(EmitContext& ctx, IR::Inst* inst) {
void EmitX64::EmitVectorLogicalVShiftS8(EmitContext& ctx, IR::Inst* inst) {
EmitTwoArgumentFallback(code, ctx, inst, [](std::array<s8, 16>& result, const std::array<s8, 16>& a, const std::array<s8, 16>& b) {
std::transform(a.begin(), a.end(), b.begin(), result.begin(), LogicalVShift<s8>);
});
}
void EmitX64::EmitVectorLogicalVShiftS16(EmitContext& ctx, IR::Inst* inst) {
EmitTwoArgumentFallback(code, ctx, inst, [](std::array<s16, 8>& result, const std::array<s16, 8>& a, const std::array<s16, 8>& b){
std::transform(a.begin(), a.end(), b.begin(), result.begin(), LogicalVShift<s16>);
});
}
void EmitX64::EmitVectorLogicalVShiftS32(EmitContext& ctx, IR::Inst* inst) {
EmitTwoArgumentFallback(code, ctx, inst, [](std::array<s32, 4>& result, const std::array<s32, 4>& a, const std::array<s32, 4>& b){
std::transform(a.begin(), a.end(), b.begin(), result.begin(), LogicalVShift<s32>);
});
}
void EmitX64::EmitVectorLogicalVShiftS64(EmitContext& ctx, IR::Inst* inst) {
EmitTwoArgumentFallback(code, ctx, inst, [](std::array<s64, 2>& result, const std::array<s64, 2>& a, const std::array<s64, 2>& b){
std::transform(a.begin(), a.end(), b.begin(), result.begin(), LogicalVShift<s64>);
});
}
void EmitX64::EmitVectorLogicalVShiftU8(EmitContext& ctx, IR::Inst* inst) {
EmitTwoArgumentFallback(code, ctx, inst, [](std::array<u8, 16>& result, const std::array<u8, 16>& a, const std::array<u8, 16>& b) {
std::transform(a.begin(), a.end(), b.begin(), result.begin(), LogicalVShift<u8>);
});
}
void EmitX64::EmitVectorLogicalVShift16(EmitContext& ctx, IR::Inst* inst) {
void EmitX64::EmitVectorLogicalVShiftU16(EmitContext& ctx, IR::Inst* inst) {
EmitTwoArgumentFallback(code, ctx, inst, [](std::array<u16, 8>& result, const std::array<u16, 8>& a, const std::array<u16, 8>& b){
std::transform(a.begin(), a.end(), b.begin(), result.begin(), LogicalVShift<u16>);
});
}
void EmitX64::EmitVectorLogicalVShift32(EmitContext& ctx, IR::Inst* inst) {
void EmitX64::EmitVectorLogicalVShiftU32(EmitContext& ctx, IR::Inst* inst) {
EmitTwoArgumentFallback(code, ctx, inst, [](std::array<u32, 4>& result, const std::array<u32, 4>& a, const std::array<u32, 4>& b){
std::transform(a.begin(), a.end(), b.begin(), result.begin(), LogicalVShift<u32>);
});
}
void EmitX64::EmitVectorLogicalVShift64(EmitContext& ctx, IR::Inst* inst) {
void EmitX64::EmitVectorLogicalVShiftU64(EmitContext& ctx, IR::Inst* inst) {
EmitTwoArgumentFallback(code, ctx, inst, [](std::array<u64, 2>& result, const std::array<u64, 2>& a, const std::array<u64, 2>& b){
std::transform(a.begin(), a.end(), b.begin(), result.begin(), LogicalVShift<u64>);
});

View file

@ -151,7 +151,7 @@ bool TranslatorVisitor::USHL_1(Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
const IR::U128 operand1 = V(64, Vn);
const IR::U128 operand2 = V(64, Vm);
const IR::U128 result = ir.VectorLogicalVShift(64, operand1, operand2);
const IR::U128 result = ir.VectorLogicalVShiftUnsigned(64, operand1, operand2);
V(64, Vd, result);
return true;

View file

@ -299,7 +299,7 @@ bool TranslatorVisitor::USHL_2(bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd) {
const IR::U128 operand1 = V(datasize, Vn);
const IR::U128 operand2 = V(datasize, Vm);
const IR::U128 result = ir.VectorLogicalVShift(esize, operand1, operand2);
const IR::U128 result = ir.VectorLogicalVShiftUnsigned(esize, operand1, operand2);
V(datasize, Vd, result);
return true;
}

View file

@ -958,16 +958,31 @@ U128 IREmitter::VectorLogicalShiftRight(size_t esize, const U128& a, u8 shift_am
return {};
}
U128 IREmitter::VectorLogicalVShift(size_t esize, const U128& a, const U128& b) {
U128 IREmitter::VectorLogicalVShiftSigned(size_t esize, const U128& a, const U128& b) {
switch (esize) {
case 8:
return Inst<U128>(Opcode::VectorLogicalVShift8, a, b);
return Inst<U128>(Opcode::VectorLogicalVShiftS8, a, b);
case 16:
return Inst<U128>(Opcode::VectorLogicalVShift16, a, b);
return Inst<U128>(Opcode::VectorLogicalVShiftS16, a, b);
case 32:
return Inst<U128>(Opcode::VectorLogicalVShift32, a, b);
return Inst<U128>(Opcode::VectorLogicalVShiftS32, a, b);
case 64:
return Inst<U128>(Opcode::VectorLogicalVShift64, a, b);
return Inst<U128>(Opcode::VectorLogicalVShiftS64, a, b);
}
UNREACHABLE();
return {};
}
U128 IREmitter::VectorLogicalVShiftUnsigned(size_t esize, const U128& a, const U128& b) {
switch (esize) {
case 8:
return Inst<U128>(Opcode::VectorLogicalVShiftU8, a, b);
case 16:
return Inst<U128>(Opcode::VectorLogicalVShiftU16, a, b);
case 32:
return Inst<U128>(Opcode::VectorLogicalVShiftU32, a, b);
case 64:
return Inst<U128>(Opcode::VectorLogicalVShiftU64, a, b);
}
UNREACHABLE();
return {};

View file

@ -214,7 +214,8 @@ public:
U128 VectorLessUnsigned(size_t esize, const U128& a, const U128& b);
U128 VectorLogicalShiftLeft(size_t esize, const U128& a, u8 shift_amount);
U128 VectorLogicalShiftRight(size_t esize, const U128& a, u8 shift_amount);
U128 VectorLogicalVShift(size_t esize, const U128& a, const U128& b);
U128 VectorLogicalVShiftSigned(size_t esize, const U128& a, const U128& b);
U128 VectorLogicalVShiftUnsigned(size_t esize, const U128& a, const U128& b);
U128 VectorMaxSigned(size_t esize, const U128& a, const U128& b);
U128 VectorMaxUnsigned(size_t esize, const U128& a, const U128& b);
U128 VectorMinSigned(size_t esize, const U128& a, const U128& b);

View file

@ -267,10 +267,14 @@ OPCODE(VectorLogicalShiftRight8, T::U128, T::U128, T::U
OPCODE(VectorLogicalShiftRight16, T::U128, T::U128, T::U8 )
OPCODE(VectorLogicalShiftRight32, T::U128, T::U128, T::U8 )
OPCODE(VectorLogicalShiftRight64, T::U128, T::U128, T::U8 )
OPCODE(VectorLogicalVShift8, T::U128, T::U128, T::U128 )
OPCODE(VectorLogicalVShift16, T::U128, T::U128, T::U128 )
OPCODE(VectorLogicalVShift32, T::U128, T::U128, T::U128 )
OPCODE(VectorLogicalVShift64, T::U128, T::U128, T::U128 )
OPCODE(VectorLogicalVShiftS8, T::U128, T::U128, T::U128 )
OPCODE(VectorLogicalVShiftS16, T::U128, T::U128, T::U128 )
OPCODE(VectorLogicalVShiftS32, T::U128, T::U128, T::U128 )
OPCODE(VectorLogicalVShiftS64, T::U128, T::U128, T::U128 )
OPCODE(VectorLogicalVShiftU8, T::U128, T::U128, T::U128 )
OPCODE(VectorLogicalVShiftU16, T::U128, T::U128, T::U128 )
OPCODE(VectorLogicalVShiftU32, T::U128, T::U128, T::U128 )
OPCODE(VectorLogicalVShiftU64, T::U128, T::U128, T::U128 )
OPCODE(VectorMaxS8, T::U128, T::U128, T::U128 )
OPCODE(VectorMaxS16, T::U128, T::U128, T::U128 )
OPCODE(VectorMaxS32, T::U128, T::U128, T::U128 )