diff --git a/src/backend_x64/emit_x64_data_processing.cpp b/src/backend_x64/emit_x64_data_processing.cpp index 8a14f718..d0252206 100644 --- a/src/backend_x64/emit_x64_data_processing.cpp +++ b/src/backend_x64/emit_x64_data_processing.cpp @@ -176,6 +176,26 @@ void EmitX64::EmitConditionalSelect64(EmitContext& ctx, IR::Inst* inst) { EmitConditionalSelect(code, ctx, inst, 64); } +static void EmitExtractRegister(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst, int bit_size) { + auto args = ctx.reg_alloc.GetArgumentInfo(inst); + + const Xbyak::Reg result = ctx.reg_alloc.UseScratchGpr(args[0]).changeBit(bit_size); + const Xbyak::Reg operand = ctx.reg_alloc.UseScratchGpr(args[1]).changeBit(bit_size); + const u8 lsb = args[2].GetImmediateU8(); + + code.shrd(result, operand, lsb); + + ctx.reg_alloc.DefineValue(inst, result); +} + +void EmitX64::EmitExtractRegister32(Dynarmic::BackendX64::EmitContext& ctx, IR::Inst* inst) { + EmitExtractRegister(*code, ctx, inst, 32); +} + +void EmitX64::EmitExtractRegister64(Dynarmic::BackendX64::EmitContext& ctx, IR::Inst* inst) { + EmitExtractRegister(*code, ctx, inst, 64); +} + void EmitX64::EmitLogicalShiftLeft32(EmitContext& ctx, IR::Inst* inst) { auto carry_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetCarryFromOp); diff --git a/src/frontend/A64/decoder/a64.inc b/src/frontend/A64/decoder/a64.inc index 37a4ab68..8ab56709 100644 --- a/src/frontend/A64/decoder/a64.inc +++ b/src/frontend/A64/decoder/a64.inc @@ -25,7 +25,7 @@ INST(BFM, "BFM", "z0110 INST(UBFM, "UBFM", "z10100110Nrrrrrrssssssnnnnnddddd") // Data processing - Immediate - Extract -//INST(EXTR, "EXTR", "z00100111N0mmmmmssssssnnnnnddddd") +INST(EXTR, "EXTR", "z00100111N0mmmmmssssssnnnnnddddd") // Conditional branch INST(B_cond, "B.cond", "01010100iiiiiiiiiiiiiiiiiii0cccc") diff --git a/src/frontend/A64/translate/impl/data_processing_bitfield.cpp b/src/frontend/A64/translate/impl/data_processing_bitfield.cpp index 10085dc7..9ca50e9b 100644 --- a/src/frontend/A64/translate/impl/data_processing_bitfield.cpp +++ b/src/frontend/A64/translate/impl/data_processing_bitfield.cpp @@ -76,4 +76,23 @@ bool TranslatorVisitor::UBFM(bool sf, bool N, Imm<6> immr, Imm<6> imms, Reg Rn, return true; } +bool TranslatorVisitor::EXTR(bool sf, bool N, Reg Rm, Imm<6> imms, Reg Rn, Reg Rd) { + if (N != sf) { + return UnallocatedEncoding(); + } + + if (!sf && imms.Bit<5>()) { + return ReservedValue(); + } + + const size_t datasize = sf ? 64 : 32; + + const IR::U32U64 m = X(datasize, Rm); + const IR::U32U64 n = X(datasize, Rn); + const IR::U32U64 result = ir.ExtractRegister(m, n, ir.Imm8(imms.ZeroExtend())); + + X(datasize, Rd, result); + return true; +} + } // namespace Dynarmic::A64 diff --git a/src/frontend/ir/ir_emitter.cpp b/src/frontend/ir/ir_emitter.cpp index 3feabbda..0b6b9eb3 100644 --- a/src/frontend/ir/ir_emitter.cpp +++ b/src/frontend/ir/ir_emitter.cpp @@ -481,6 +481,22 @@ U32U64 IREmitter::CountLeadingZeros(const U32U64& a) { return Inst(Opcode::CountLeadingZeros64, a); } +U32 IREmitter::ExtractRegister(const U32& a, const U32& b, const U8& lsb) { + return Inst(Opcode::ExtractRegister32, a, b, lsb); +} + +U64 IREmitter::ExtractRegister(const U64& a, const U64& b, const U8& lsb) { + return Inst(Opcode::ExtractRegister64, a, b, lsb); +} + +U32U64 IREmitter::ExtractRegister(const U32U64& a, const U32U64& b, const U8& lsb) { + if (a.GetType() == IR::Type::U32) { + return Inst(Opcode::ExtractRegister32, a, b, lsb); + } + + return Inst(Opcode::ExtractRegister64, a, b, lsb); +} + ResultAndOverflow IREmitter::SignedSaturatedAdd(const U32& a, const U32& b) { auto result = Inst(Opcode::SignedSaturatedAdd, a, b); auto overflow = Inst(Opcode::GetOverflowFromOp, result); diff --git a/src/frontend/ir/ir_emitter.h b/src/frontend/ir/ir_emitter.h index 36e47c6f..61b1edee 100644 --- a/src/frontend/ir/ir_emitter.h +++ b/src/frontend/ir/ir_emitter.h @@ -142,6 +142,9 @@ public: U32 CountLeadingZeros(const U32& a); U64 CountLeadingZeros(const U64& a); U32U64 CountLeadingZeros(const U32U64& a); + U32 ExtractRegister(const U32& a, const U32& b, const U8& lsb); + U64 ExtractRegister(const U64& a, const U64& b, const U8& lsb); + U32U64 ExtractRegister(const U32U64& a, const U32U64& b, const U8& lsb); ResultAndOverflow SignedSaturatedAdd(const U32& a, const U32& b); ResultAndOverflow SignedSaturatedSub(const U32& a, const U32& b); diff --git a/src/frontend/ir/opcodes.inc b/src/frontend/ir/opcodes.inc index 71540285..3e8af4a6 100644 --- a/src/frontend/ir/opcodes.inc +++ b/src/frontend/ir/opcodes.inc @@ -122,6 +122,8 @@ OPCODE(ByteReverseHalf, T::U16, T::U16 OPCODE(ByteReverseDual, T::U64, T::U64 ) OPCODE(CountLeadingZeros32, T::U32, T::U32 ) OPCODE(CountLeadingZeros64, T::U64, T::U64 ) +OPCODE(ExtractRegister32, T::U32, T::U32, T::U32, T::U8 ) +OPCODE(ExtractRegister64, T::U64, T::U64, T::U64, T::U8 ) // Saturated instructions OPCODE(SignedSaturatedAdd, T::U32, T::U32, T::U32 )