dynarmic/src/backend_x64/emit_x64.cpp

530 lines
18 KiB
C++
Raw Normal View History

2016-07-01 14:01:06 +01:00
/* This file is part of the dynarmic project.
* Copyright (c) 2016 MerryMage
* This software may be used and distributed according to the terms of the GNU
* General Public License version 2 or any later version.
*/
#include <map>
#include <unordered_map>
#include "backend_x64/emit_x64.h"
2016-07-07 10:53:09 +01:00
#include "common/x64/abi.h"
2016-07-01 14:01:06 +01:00
#include "common/x64/emitter.h"
#include "frontend/arm_types.h"
2016-07-01 14:01:06 +01:00
// TODO: More optimal use of immediates.
// TODO: Have ARM flags in host flags and not have them use up GPR registers unless necessary.
// TODO: Actually implement that proper instruction selector you've always wanted to sweetheart.
using namespace Gen;
namespace Dynarmic {
namespace BackendX64 {
2016-07-07 10:53:09 +01:00
static OpArg MJitStateReg(Arm::Reg reg) {
return MDisp(R15, offsetof(JitState, Reg) + sizeof(u32) * static_cast<size_t>(reg));
}
static OpArg MJitStateCpsr() {
return MDisp(R15, offsetof(JitState, Cpsr));
}
2016-07-01 14:01:06 +01:00
static IR::Inst* FindUseWithOpcode(IR::Inst* inst, IR::Opcode opcode) {
// Gets first found use.
auto uses = inst->GetUses();
auto iter = std::find_if(uses.begin(), uses.end(), [opcode](const auto& use){ return use->GetOpcode() == opcode; });
ASSERT(std::count_if(uses.begin(), uses.end(), [opcode](const auto& use){ return use->GetOpcode() == opcode; }) <= 1);
2016-07-01 14:01:06 +01:00
return iter == uses.end() ? nullptr : reinterpret_cast<IR::Inst*>(iter->get());
}
CodePtr EmitX64::Emit(Arm::LocationDescriptor descriptor, Dynarmic::IR::Block block) {
inhibit_emission.clear();
reg_alloc.Reset();
2016-07-01 14:01:06 +01:00
code->INT3();
CodePtr code_ptr = code->GetCodePtr();
for (const auto& value : block.instructions) {
if (inhibit_emission.count(value.get()) != 0)
continue;
// Call the relevant Emit* member function.
switch (value->GetOpcode()) {
#define OPCODE(name, type, ...) \
case IR::Opcode::name: \
EmitX64::Emit##name(value.get()); \
break;
#include "frontend/ir/opcodes.inc"
#undef OPCODE
default:
ASSERT_MSG(false, "Invalid opcode %zu", static_cast<size_t>(value->GetOpcode()));
break;
}
2016-07-01 14:01:06 +01:00
reg_alloc.EndOfAllocScope();
}
EmitAddCycles(block.cycle_count);
2016-07-07 10:53:09 +01:00
EmitTerminal(block.terminal, block.location);
2016-07-01 14:01:06 +01:00
return code_ptr;
}
void EmitX64::EmitImmU1(IR::Value* value_) {
auto value = reinterpret_cast<IR::ImmU1*>(value_);
X64Reg result = reg_alloc.DefRegister(value);
code->MOV(32, R(result), Imm32(value->value));
2016-07-01 14:01:06 +01:00
}
void EmitX64::EmitImmU8(IR::Value* value_) {
auto value = reinterpret_cast<IR::ImmU8*>(value_);
X64Reg result = reg_alloc.DefRegister(value);
code->MOV(32, R(result), Imm32(value->value));
}
void EmitX64::EmitImmU32(IR::Value* value_) {
auto value = reinterpret_cast<IR::ImmU32*>(value_);
X64Reg result = reg_alloc.DefRegister(value);
code->MOV(32, R(result), Imm32(value->value));
}
void EmitX64::EmitImmRegRef(IR::Value*) {
return; // No need to do anything.
}
void EmitX64::EmitGetRegister(IR::Value* value_) {
auto value = reinterpret_cast<IR::Inst*>(value_);
auto regref = reinterpret_cast<IR::ImmRegRef*>(value->GetArg(0).get());
X64Reg result = reg_alloc.DefRegister(value);
2016-07-07 10:53:09 +01:00
code->MOV(32, R(result), MJitStateReg(regref->value));
2016-07-01 14:01:06 +01:00
}
void EmitX64::EmitSetRegister(IR::Value* value_) {
auto value = reinterpret_cast<IR::Inst*>(value_);
auto regref = reinterpret_cast<IR::ImmRegRef*>(value->GetArg(0).get());
X64Reg to_store = reg_alloc.UseRegister(value->GetArg(1).get());
2016-07-07 10:53:09 +01:00
code->MOV(32, MJitStateReg(regref->value), R(to_store));
2016-07-01 14:01:06 +01:00
}
void EmitX64::EmitGetNFlag(IR::Value* value_) {
auto value = reinterpret_cast<IR::Inst*>(value_);
X64Reg result = reg_alloc.DefRegister(value);
// TODO: Flag optimization
2016-07-07 10:53:09 +01:00
code->MOV(32, R(result), MJitStateCpsr());
2016-07-01 14:01:06 +01:00
code->SHR(32, R(result), Imm8(31));
}
void EmitX64::EmitSetNFlag(IR::Value* value_) {
auto value = reinterpret_cast<IR::Inst*>(value_);
X64Reg to_store = reg_alloc.UseRegister(value->GetArg(0).get());
// TODO: Flag optimization
2016-07-01 14:01:06 +01:00
code->SHL(32, R(to_store), Imm8(31));
2016-07-07 10:53:09 +01:00
code->AND(32, MJitStateCpsr(), Imm32(~static_cast<u32>(1 << 31)));
code->OR(32, MJitStateCpsr(), R(to_store));
2016-07-01 14:01:06 +01:00
}
void EmitX64::EmitGetZFlag(IR::Value* value_) {
auto value = reinterpret_cast<IR::Inst*>(value_);
X64Reg result = reg_alloc.DefRegister(value);
// TODO: Flag optimization
2016-07-07 10:53:09 +01:00
code->MOV(32, R(result), MJitStateCpsr());
2016-07-01 14:01:06 +01:00
code->SHR(32, R(result), Imm8(30));
code->AND(32, R(result), Imm32(1));
}
void EmitX64::EmitSetZFlag(IR::Value* value_) {
auto value = reinterpret_cast<IR::Inst*>(value_);
X64Reg to_store = reg_alloc.UseRegister(value->GetArg(0).get());
// TODO: Flag optimization
2016-07-01 14:01:06 +01:00
code->SHL(32, R(to_store), Imm8(30));
2016-07-07 10:53:09 +01:00
code->AND(32, MJitStateCpsr(), Imm32(~static_cast<u32>(1 << 30)));
code->OR(32, MJitStateCpsr(), R(to_store));
2016-07-01 14:01:06 +01:00
}
void EmitX64::EmitGetCFlag(IR::Value* value_) {
auto value = reinterpret_cast<IR::Inst*>(value_);
X64Reg result = reg_alloc.DefRegister(value);
// TODO: Flag optimization
2016-07-07 10:53:09 +01:00
code->MOV(32, R(result), MJitStateCpsr());
2016-07-01 14:01:06 +01:00
code->SHR(32, R(result), Imm8(29));
code->AND(32, R(result), Imm32(1));
}
void EmitX64::EmitSetCFlag(IR::Value* value_) {
auto value = reinterpret_cast<IR::Inst*>(value_);
X64Reg to_store = reg_alloc.UseRegister(value->GetArg(0).get());
// TODO: Flag optimization
2016-07-01 14:01:06 +01:00
code->SHL(32, R(to_store), Imm8(29));
2016-07-07 10:53:09 +01:00
code->AND(32, MJitStateCpsr(), Imm32(~static_cast<u32>(1 << 29)));
code->OR(32, MJitStateCpsr(), R(to_store));
2016-07-01 14:01:06 +01:00
}
void EmitX64::EmitGetVFlag(IR::Value* value_) {
auto value = reinterpret_cast<IR::Inst*>(value_);
X64Reg result = reg_alloc.DefRegister(value);
// TODO: Flag optimization
2016-07-07 10:53:09 +01:00
code->MOV(32, R(result), MJitStateCpsr());
2016-07-01 14:01:06 +01:00
code->SHR(32, R(result), Imm8(28));
code->AND(32, R(result), Imm32(1));
}
void EmitX64::EmitSetVFlag(IR::Value* value_) {
auto value = reinterpret_cast<IR::Inst*>(value_);
X64Reg to_store = reg_alloc.UseRegister(value->GetArg(0).get());
// TODO: Flag optimization
2016-07-01 14:01:06 +01:00
code->SHL(32, R(to_store), Imm8(28));
2016-07-07 10:53:09 +01:00
code->AND(32, MJitStateCpsr(), Imm32(~static_cast<u32>(1 << 28)));
code->OR(32, MJitStateCpsr(), R(to_store));
2016-07-01 14:01:06 +01:00
}
void EmitX64::EmitGetCarryFromOp(IR::Value*) {
ASSERT_MSG(0, "should never happen");
}
void EmitX64::EmitGetOverflowFromOp(IR::Value*) {
ASSERT_MSG(0, "should never happen");
}
2016-07-01 14:01:06 +01:00
void EmitX64::EmitLeastSignificantByte(IR::Value* value_) {
auto value = reinterpret_cast<IR::Inst*>(value_);
// TODO: Flag optimization
2016-07-01 14:01:06 +01:00
reg_alloc.UseDefRegister(value->GetArg(0).get(), value);
}
void EmitX64::EmitMostSignificantBit(IR::Value* value_) {
auto value = reinterpret_cast<IR::Inst*>(value_);
X64Reg result = reg_alloc.UseDefRegister(value->GetArg(0).get(), value);
// TODO: Flag optimization
2016-07-07 14:51:47 +01:00
code->SHR(32, R(result), Imm8(31));
2016-07-01 14:01:06 +01:00
}
void EmitX64::EmitIsZero(IR::Value* value_) {
auto value = reinterpret_cast<IR::Inst*>(value_);
X64Reg result = reg_alloc.UseDefRegister(value->GetArg(0).get(), value);
// TODO: Flag optimization
2016-07-01 14:01:06 +01:00
code->TEST(32, R(result), R(result));
code->SETcc(CCFlags::CC_E, R(result));
code->MOVZX(32, 8, result, R(result));
}
void EmitX64::EmitLogicalShiftLeft(IR::Value* value_) {
auto value = reinterpret_cast<IR::Inst*>(value_);
auto carry_inst = FindUseWithOpcode(value, IR::Opcode::GetCarryFromOp);
// TODO: Consider using BMI2 instructions like SHLX when arm-in-host flags is implemented.
if (!carry_inst) {
X64Reg shift = reg_alloc.UseRegister(value->GetArg(1).get(), {HostLoc::RCX});
X64Reg result = reg_alloc.UseDefRegister(value->GetArg(0).get(), value);
X64Reg zero = reg_alloc.ScratchRegister();
// The 32-bit x64 SHL instruction masks the shift count by 0x1F before performing the shift.
// ARM differs from the behaviour: It does not mask the count, so shifts above 31 result in zeros.
code->SHL(32, R(result), R(shift));
code->XOR(32, R(zero), R(zero));
code->CMP(8, R(shift), Imm8(32));
code->CMOVcc(32, result, R(zero), CC_NB);
} else {
inhibit_emission.insert(carry_inst);
X64Reg shift = reg_alloc.UseRegister(value->GetArg(1).get(), {HostLoc::RCX});
X64Reg result = reg_alloc.UseDefRegister(value->GetArg(0).get(), value);
X64Reg carry = reg_alloc.UseDefRegister(value->GetArg(2).get(), carry_inst);
// TODO: Optimize this.
code->CMP(8, R(shift), Imm8(32));
auto Rs_gt32 = code->J_CC(CC_A);
auto Rs_eq32 = code->J_CC(CC_E);
// if (Rs & 0xFF < 32) {
code->BT(32, R(carry), Imm8(0)); // Set the carry flag for correct behaviour in the case when Rs & 0xFF == 0
code->SHL(32, R(result), R(shift));
code->SETcc(CC_C, R(carry));
auto jmp_to_end_1 = code->J();
// } else if (Rs & 0xFF > 32) {
code->SetJumpTarget(Rs_gt32);
code->XOR(32, R(result), R(result));
code->XOR(32, R(carry), R(carry));
auto jmp_to_end_2 = code->J();
// } else if (Rs & 0xFF == 32) {
code->SetJumpTarget(Rs_eq32);
code->MOV(32, R(carry), R(result));
code->AND(32, R(carry), Imm8(1));
code->XOR(32, R(result), R(result));
// }
code->SetJumpTarget(jmp_to_end_1);
code->SetJumpTarget(jmp_to_end_2);
}
}
void EmitX64::EmitLogicalShiftRight(IR::Value* value_) {
auto value = reinterpret_cast<IR::Inst*>(value_);
auto carry_inst = FindUseWithOpcode(value, IR::Opcode::GetCarryFromOp);
if (!carry_inst) {
X64Reg shift = reg_alloc.UseRegister(value->GetArg(1).get(), {HostLoc::RCX});
X64Reg result = reg_alloc.UseDefRegister(value->GetArg(0).get(), value);
X64Reg zero = reg_alloc.ScratchRegister();
// The 32-bit x64 SHR instruction masks the shift count by 0x1F before performing the shift.
// ARM differs from the behaviour: It does not mask the count, so shifts above 31 result in zeros.
code->SHR(32, R(result), R(shift));
code->XOR(32, R(zero), R(zero));
code->CMP(8, R(shift), Imm8(32));
code->CMOVcc(32, result, R(zero), CC_NB);
} else {
inhibit_emission.insert(carry_inst);
X64Reg shift = reg_alloc.UseRegister(value->GetArg(1).get(), {HostLoc::RCX});
X64Reg result = reg_alloc.UseDefRegister(value->GetArg(0).get(), value);
X64Reg carry = reg_alloc.UseDefRegister(value->GetArg(2).get(), carry_inst);
// TODO: Optimize this.
2016-07-07 14:51:47 +01:00
code->CMP(8, R(shift), Imm8(32));
2016-07-01 14:01:06 +01:00
auto Rs_gt32 = code->J_CC(CC_A);
auto Rs_eq32 = code->J_CC(CC_E);
// if (Rs & 0xFF == 0) goto end;
2016-07-07 14:51:47 +01:00
code->TEST(8, R(shift), R(shift));
2016-07-01 14:01:06 +01:00
auto Rs_zero = code->J_CC(CC_Z);
// if (Rs & 0xFF < 32) {
code->SHR(32, R(result), R(shift));
code->SETcc(CC_C, R(carry));
auto jmp_to_end_1 = code->J();
// } else if (Rs & 0xFF > 32) {
code->SetJumpTarget(Rs_gt32);
code->MOV(32, R(result), Imm32(0));
code->MOV(8, R(carry), Imm8(0));
auto jmp_to_end_2 = code->J();
// } else if (Rs & 0xFF == 32) {
code->SetJumpTarget(Rs_eq32);
code->BT(32, R(result), Imm8(31));
code->SETcc(CC_C, R(carry));
code->MOV(32, R(result), Imm32(0));
// }
code->SetJumpTarget(jmp_to_end_1);
code->SetJumpTarget(jmp_to_end_2);
code->SetJumpTarget(Rs_zero);
}
}
void EmitX64::EmitArithmeticShiftRight(IR::Value* value_) {
auto value = reinterpret_cast<IR::Inst*>(value_);
auto carry_inst = FindUseWithOpcode(value, IR::Opcode::GetCarryFromOp);
if (!carry_inst) {
X64Reg shift = reg_alloc.UseRegister(value->GetArg(1).get(), {HostLoc::RCX});
X64Reg result = reg_alloc.UseDefRegister(value->GetArg(0).get(), value);
//X64Reg zero = reg_alloc.ScratchRegister();
// The 32-bit x64 SAR instruction masks the shift count by 0x1F before performing the shift.
// ARM differs from the behaviour: It does not mask the count, so shifts above 31 result in zeros.
// TODO: Optimize this.
code->CMP(8, R(shift), Imm8(31));
auto Rs_gt31 = code->J_CC(CC_A);
// if (Rs & 0xFF <= 31) {
code->SAR(32, R(result), R(shift));
auto jmp_to_end = code->J();
// } else {
code->SetJumpTarget(Rs_gt31);
code->SAR(32, R(result), Imm8(31)); // Verified.
// }
code->SetJumpTarget(jmp_to_end);
} else {
inhibit_emission.insert(carry_inst);
X64Reg shift = reg_alloc.UseRegister(value->GetArg(1).get(), {HostLoc::RCX});
X64Reg result = reg_alloc.UseDefRegister(value->GetArg(0).get(), value);
X64Reg carry = reg_alloc.UseDefRegister(value->GetArg(2).get(), carry_inst);
// TODO: Optimize this.
code->CMP(8, R(shift), Imm8(31));
auto Rs_gt31 = code->J_CC(CC_A);
// if (Rs & 0xFF == 0) goto end;
code->TEST(8, R(shift), R(shift));
auto Rs_zero = code->J_CC(CC_Z);
// if (Rs & 0xFF <= 31) {
code->SAR(32, R(result), R(CL));
code->SETcc(CC_C, R(carry));
auto jmp_to_end = code->J();
// } else if (Rs & 0xFF > 31) {
code->SetJumpTarget(Rs_gt31);
code->SAR(32, R(result), Imm8(31)); // Verified.
code->BT(32, R(result), Imm8(31));
code->SETcc(CC_C, R(carry));
// }
code->SetJumpTarget(jmp_to_end);
code->SetJumpTarget(Rs_zero);
}
}
void EmitX64::EmitAddWithCarry(IR::Value* value_) {
auto value = reinterpret_cast<IR::Inst*>(value_);
auto carry_inst = FindUseWithOpcode(value, IR::Opcode::GetCarryFromOp);
auto overflow_inst = FindUseWithOpcode(value, IR::Opcode::GetOverflowFromOp);
X64Reg addend = reg_alloc.UseRegister(value->GetArg(1).get());
X64Reg result = reg_alloc.UseDefRegister(value->GetArg(0).get(), value);
X64Reg carry = carry_inst
? reg_alloc.UseDefRegister(value->GetArg(2).get(), carry_inst)
: reg_alloc.UseRegister(value->GetArg(2).get());
X64Reg overflow = overflow_inst
? reg_alloc.DefRegister(overflow_inst)
: X64Reg::INVALID_REG;
// TODO: Consider using LEA.
code->BT(32, R(carry), Imm8(0)); // Sets x64 CF appropriately.
code->ADC(32, R(result), R(addend));
if (carry_inst) {
inhibit_emission.insert(carry_inst);
code->SETcc(Gen::CC_C, R(carry));
}
if (overflow_inst) {
inhibit_emission.insert(overflow_inst);
code->SETcc(Gen::CC_O, R(overflow));
}
}
2016-07-08 10:43:28 +01:00
void EmitX64::EmitAnd(IR::Value* value_) {
auto value = reinterpret_cast<IR::Inst*>(value_);
X64Reg andend = reg_alloc.UseRegister(value->GetArg(1).get());
X64Reg result = reg_alloc.UseDefRegister(value->GetArg(0).get(), value);
code->AND(32, R(result), R(andend));
}
2016-07-08 11:14:50 +01:00
void EmitX64::EmitEor(IR::Value* value_) {
auto value = reinterpret_cast<IR::Inst*>(value_);
X64Reg eorend = reg_alloc.UseRegister(value->GetArg(1).get());
X64Reg result = reg_alloc.UseDefRegister(value->GetArg(0).get(), value);
code->XOR(32, R(result), R(eorend));
}
void EmitX64::EmitAddCycles(size_t cycles) {
ASSERT(cycles < std::numeric_limits<u32>::max());
2016-07-07 10:53:09 +01:00
code->SUB(64, MDisp(R15, offsetof(JitState, cycles_remaining)), Imm32(static_cast<u32>(cycles)));
}
void EmitX64::EmitTerminal(IR::Terminal terminal, Arm::LocationDescriptor initial_location) {
switch (terminal.which()) {
case 1:
EmitTerminalInterpret(boost::get<IR::Term::Interpret>(terminal), initial_location);
return;
case 2:
EmitTerminalReturnToDispatch(boost::get<IR::Term::ReturnToDispatch>(terminal), initial_location);
return;
case 3:
EmitTerminalLinkBlock(boost::get<IR::Term::LinkBlock>(terminal), initial_location);
return;
case 4:
EmitTerminalLinkBlockFast(boost::get<IR::Term::LinkBlockFast>(terminal), initial_location);
return;
case 5:
EmitTerminalPopRSBHint(boost::get<IR::Term::PopRSBHint>(terminal), initial_location);
return;
case 6:
EmitTerminalIf(boost::get<IR::Term::If>(terminal), initial_location);
return;
default:
ASSERT_MSG(0, "Invalid Terminal. Bad programmer.");
return;
}
}
2016-07-07 10:53:09 +01:00
void EmitX64::EmitTerminalInterpret(IR::Term::Interpret terminal, Arm::LocationDescriptor initial_location) {
ASSERT_MSG(terminal.next.TFlag == initial_location.TFlag, "Unimplemented");
ASSERT_MSG(terminal.next.EFlag == initial_location.EFlag, "Unimplemented");
2016-07-07 10:53:09 +01:00
code->MOV(64, R(ABI_PARAM1), Imm64(terminal.next.arm_pc));
code->MOV(64, R(ABI_PARAM2), Imm64(reinterpret_cast<u64>(jit_interface)));
code->MOV(32, MJitStateReg(Arm::Reg::PC), R(ABI_PARAM1));
code->MOV(64, R(RSP), MDisp(R15, offsetof(JitState, save_host_RSP)));
2016-07-07 10:53:09 +01:00
code->CALL(reinterpret_cast<void*>(cb.InterpreterFallback));
code->JMP(routines->RunCodeReturnAddress(), true); // TODO: Check cycles
}
void EmitX64::EmitTerminalReturnToDispatch(IR::Term::ReturnToDispatch, Arm::LocationDescriptor initial_location) {
2016-07-01 14:01:06 +01:00
code->JMP(routines->RunCodeReturnAddress(), true);
}
2016-07-07 10:53:09 +01:00
void EmitX64::EmitTerminalLinkBlock(IR::Term::LinkBlock terminal, Arm::LocationDescriptor initial_location) {
ASSERT_MSG(terminal.next.TFlag == initial_location.TFlag, "Unimplemented");
ASSERT_MSG(terminal.next.EFlag == initial_location.EFlag, "Unimplemented");
code->MOV(32, MJitStateReg(Arm::Reg::PC), Imm32(terminal.next.arm_pc));
code->JMP(routines->RunCodeReturnAddress(), true); // TODO: Check cycles, Properly do a link
}
void EmitX64::EmitTerminalLinkBlockFast(IR::Term::LinkBlockFast terminal, Arm::LocationDescriptor initial_location) {
EmitTerminalLinkBlock(IR::Term::LinkBlock{terminal.next}, initial_location); // TODO: Implement
}
void EmitX64::EmitTerminalPopRSBHint(IR::Term::PopRSBHint, Arm::LocationDescriptor initial_location) {
EmitTerminalReturnToDispatch({}, initial_location); // TODO: Implement RSB
}
void EmitX64::EmitTerminalIf(IR::Term::If terminal, Arm::LocationDescriptor initial_location) {
ASSERT_MSG(0, "Unimplemented");
}
2016-07-07 12:01:47 +01:00
void EmitX64::ClearCache() {
basic_blocks.clear();
}
2016-07-01 14:01:06 +01:00
} // namespace BackendX64
} // namespace Dynarmic