diff --git a/src/frontend/A64/decoder/a64.inc b/src/frontend/A64/decoder/a64.inc index 88d66bc2..576c112a 100644 --- a/src/frontend/A64/decoder/a64.inc +++ b/src/frontend/A64/decoder/a64.inc @@ -807,7 +807,7 @@ INST(USRA_2, "USRA", "0Q101 INST(URSHR_2, "URSHR", "0Q1011110IIIIiii001001nnnnnddddd") INST(URSRA_2, "URSRA", "0Q1011110IIIIiii001101nnnnnddddd") //INST(SRI_2, "SRI", "0Q1011110IIIIiii010001nnnnnddddd") -//INST(SLI_2, "SLI", "0Q1011110IIIIiii010101nnnnnddddd") +INST(SLI_2, "SLI", "0Q1011110IIIIiii010101nnnnnddddd") //INST(SQSHLU_2, "SQSHLU", "0Q1011110IIIIiii011001nnnnnddddd") //INST(UQSHL_imm_2, "UQSHL (immediate)", "0Q1011110IIIIiii011101nnnnnddddd") //INST(SQSHRUN_2, "SQSHRUN, SQSHRUN2", "0Q1011110IIIIiii100001nnnnnddddd") diff --git a/src/frontend/A64/translate/impl/simd_shift_by_immediate.cpp b/src/frontend/A64/translate/impl/simd_shift_by_immediate.cpp index 48ead849..8a1d7332 100644 --- a/src/frontend/A64/translate/impl/simd_shift_by_immediate.cpp +++ b/src/frontend/A64/translate/impl/simd_shift_by_immediate.cpp @@ -304,4 +304,30 @@ bool TranslatorVisitor::USHLL(bool Q, Imm<4> immh, Imm<3> immb, Vec Vn, Vec Vd) return true; } +bool TranslatorVisitor::SLI_2(bool Q, Imm<4> immh, Imm<3> immb, Vec Vn, Vec Vd) { + if (immh == 0b0000) { + return DecodeError(); + } + + if (!Q && immh.Bit<3>()) { + return ReservedValue(); + } + + const size_t esize = 8 << Common::HighestSetBit(immh.ZeroExtend()); + const size_t datasize = Q ? 128 : 64; + + const u8 shift_amount = concatenate(immh, immb).ZeroExtend() - static_cast(esize); + const u64 mask = Common::Ones(esize) << shift_amount; + + const IR::U128 operand1 = V(datasize, Vn); + const IR::U128 operand2 = V(datasize, Vd); + + const IR::U128 shifted = ir.VectorLogicalShiftLeft(esize, operand1, shift_amount); + const IR::U128 mask_vec = ir.VectorBroadcast(esize, I(esize, mask)); + const IR::U128 result = ir.VectorOr(ir.VectorAnd(operand2, ir.VectorNot(mask_vec)), shifted); + + V(datasize, Vd, result); + return true; +} + } // namespace Dynarmic::A64