Rename JitState to A32JitState
This commit is contained in:
parent
63bd1ece23
commit
7bf421dd38
11 changed files with 124 additions and 123 deletions
|
@ -77,6 +77,8 @@ if (ARCHITECTURE_x86_64)
|
||||||
target_sources(dynarmic PRIVATE
|
target_sources(dynarmic PRIVATE
|
||||||
backend_x64/a32_emit_x64.cpp
|
backend_x64/a32_emit_x64.cpp
|
||||||
backend_x64/a32_emit_x64.h
|
backend_x64/a32_emit_x64.h
|
||||||
|
backend_x64/a32_jitstate.cpp
|
||||||
|
backend_x64/a32_jitstate.h
|
||||||
backend_x64/abi.cpp
|
backend_x64/abi.cpp
|
||||||
backend_x64/abi.h
|
backend_x64/abi.h
|
||||||
backend_x64/block_of_code.cpp
|
backend_x64/block_of_code.cpp
|
||||||
|
@ -88,8 +90,6 @@ if (ARCHITECTURE_x86_64)
|
||||||
backend_x64/hostloc.cpp
|
backend_x64/hostloc.cpp
|
||||||
backend_x64/hostloc.h
|
backend_x64/hostloc.h
|
||||||
backend_x64/interface_x64.cpp
|
backend_x64/interface_x64.cpp
|
||||||
backend_x64/jitstate.cpp
|
|
||||||
backend_x64/jitstate.h
|
|
||||||
backend_x64/oparg.h
|
backend_x64/oparg.h
|
||||||
backend_x64/reg_alloc.cpp
|
backend_x64/reg_alloc.cpp
|
||||||
backend_x64/reg_alloc.h
|
backend_x64/reg_alloc.h
|
||||||
|
|
|
@ -10,10 +10,10 @@
|
||||||
#include <dynarmic/coprocessor.h>
|
#include <dynarmic/coprocessor.h>
|
||||||
|
|
||||||
#include "backend_x64/a32_emit_x64.h"
|
#include "backend_x64/a32_emit_x64.h"
|
||||||
|
#include "backend_x64/a32_jitstate.h"
|
||||||
#include "backend_x64/abi.h"
|
#include "backend_x64/abi.h"
|
||||||
#include "backend_x64/block_of_code.h"
|
#include "backend_x64/block_of_code.h"
|
||||||
#include "backend_x64/emit_x64.h"
|
#include "backend_x64/emit_x64.h"
|
||||||
#include "backend_x64/jitstate.h"
|
|
||||||
#include "common/address_range.h"
|
#include "common/address_range.h"
|
||||||
#include "common/assert.h"
|
#include "common/assert.h"
|
||||||
#include "common/bit_util.h"
|
#include "common/bit_util.h"
|
||||||
|
@ -34,17 +34,17 @@ namespace BackendX64 {
|
||||||
using namespace Xbyak::util;
|
using namespace Xbyak::util;
|
||||||
|
|
||||||
static Xbyak::Address MJitStateReg(A32::Reg reg) {
|
static Xbyak::Address MJitStateReg(A32::Reg reg) {
|
||||||
return dword[r15 + offsetof(JitState, Reg) + sizeof(u32) * static_cast<size_t>(reg)];
|
return dword[r15 + offsetof(A32JitState, Reg) + sizeof(u32) * static_cast<size_t>(reg)];
|
||||||
}
|
}
|
||||||
|
|
||||||
static Xbyak::Address MJitStateExtReg(A32::ExtReg reg) {
|
static Xbyak::Address MJitStateExtReg(A32::ExtReg reg) {
|
||||||
if (A32::IsSingleExtReg(reg)) {
|
if (A32::IsSingleExtReg(reg)) {
|
||||||
size_t index = static_cast<size_t>(reg) - static_cast<size_t>(A32::ExtReg::S0);
|
size_t index = static_cast<size_t>(reg) - static_cast<size_t>(A32::ExtReg::S0);
|
||||||
return dword[r15 + offsetof(JitState, ExtReg) + sizeof(u32) * index];
|
return dword[r15 + offsetof(A32JitState, ExtReg) + sizeof(u32) * index];
|
||||||
}
|
}
|
||||||
if (A32::IsDoubleExtReg(reg)) {
|
if (A32::IsDoubleExtReg(reg)) {
|
||||||
size_t index = static_cast<size_t>(reg) - static_cast<size_t>(A32::ExtReg::D0);
|
size_t index = static_cast<size_t>(reg) - static_cast<size_t>(A32::ExtReg::D0);
|
||||||
return qword[r15 + offsetof(JitState, ExtReg) + sizeof(u64) * index];
|
return qword[r15 + offsetof(A32JitState, ExtReg) + sizeof(u64) * index];
|
||||||
}
|
}
|
||||||
ASSERT_MSG(false, "Should never happen.");
|
ASSERT_MSG(false, "Should never happen.");
|
||||||
}
|
}
|
||||||
|
@ -174,7 +174,7 @@ void A32EmitX64::EmitA32SetExtendedRegister64(RegAlloc& reg_alloc, IR::Block&, I
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static u32 GetCpsrImpl(JitState* jit_state) {
|
static u32 GetCpsrImpl(A32JitState* jit_state) {
|
||||||
return jit_state->Cpsr();
|
return jit_state->Cpsr();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -184,11 +184,11 @@ void A32EmitX64::EmitA32GetCpsr(RegAlloc& reg_alloc, IR::Block&, IR::Inst* inst)
|
||||||
Xbyak::Reg32 b = reg_alloc.ScratchGpr().cvt32();
|
Xbyak::Reg32 b = reg_alloc.ScratchGpr().cvt32();
|
||||||
Xbyak::Reg32 c = reg_alloc.ScratchGpr().cvt32();
|
Xbyak::Reg32 c = reg_alloc.ScratchGpr().cvt32();
|
||||||
|
|
||||||
code->mov(c, dword[r15 + offsetof(JitState, CPSR_ge)]);
|
code->mov(c, dword[r15 + offsetof(A32JitState, CPSR_ge)]);
|
||||||
// Here we observe that CPSR_q and CPSR_nzcv are right next to each other in memory,
|
// Here we observe that CPSR_q and CPSR_nzcv are right next to each other in memory,
|
||||||
// so we load them both at the same time with one 64-bit read. This allows us to
|
// so we load them both at the same time with one 64-bit read. This allows us to
|
||||||
// extract all of their bits together at once with one pext.
|
// extract all of their bits together at once with one pext.
|
||||||
code->mov(result.cvt64(), qword[r15 + offsetof(JitState, CPSR_q)]);
|
code->mov(result.cvt64(), qword[r15 + offsetof(A32JitState, CPSR_q)]);
|
||||||
code->mov(b.cvt64(), 0xF000000000000001ull);
|
code->mov(b.cvt64(), 0xF000000000000001ull);
|
||||||
code->pext(result.cvt64(), result.cvt64(), b.cvt64());
|
code->pext(result.cvt64(), result.cvt64(), b.cvt64());
|
||||||
code->mov(b, 0x80808080);
|
code->mov(b, 0x80808080);
|
||||||
|
@ -197,9 +197,9 @@ void A32EmitX64::EmitA32GetCpsr(RegAlloc& reg_alloc, IR::Block&, IR::Inst* inst)
|
||||||
code->shl(c, 16);
|
code->shl(c, 16);
|
||||||
code->or_(result, c);
|
code->or_(result, c);
|
||||||
code->mov(b, 0x00000220);
|
code->mov(b, 0x00000220);
|
||||||
code->mov(c, dword[r15 + offsetof(JitState, CPSR_et)]);
|
code->mov(c, dword[r15 + offsetof(A32JitState, CPSR_et)]);
|
||||||
code->pdep(c.cvt64(), c.cvt64(), b.cvt64());
|
code->pdep(c.cvt64(), c.cvt64(), b.cvt64());
|
||||||
code->or_(result, dword[r15 + offsetof(JitState, CPSR_jaifm)]);
|
code->or_(result, dword[r15 + offsetof(A32JitState, CPSR_jaifm)]);
|
||||||
code->or_(result, c);
|
code->or_(result, c);
|
||||||
|
|
||||||
reg_alloc.DefineValue(inst, result);
|
reg_alloc.DefineValue(inst, result);
|
||||||
|
@ -210,7 +210,7 @@ void A32EmitX64::EmitA32GetCpsr(RegAlloc& reg_alloc, IR::Block&, IR::Inst* inst)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void SetCpsrImpl(u32 value, JitState* jit_state) {
|
static void SetCpsrImpl(u32 value, A32JitState* jit_state) {
|
||||||
jit_state->SetCpsr(value);
|
jit_state->SetCpsr(value);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -226,12 +226,12 @@ void A32EmitX64::EmitA32SetCpsrNZCV(RegAlloc& reg_alloc, IR::Block&, IR::Inst* i
|
||||||
if (args[0].IsImmediate()) {
|
if (args[0].IsImmediate()) {
|
||||||
u32 imm = args[0].GetImmediateU32();
|
u32 imm = args[0].GetImmediateU32();
|
||||||
|
|
||||||
code->mov(dword[r15 + offsetof(JitState, CPSR_nzcv)], u32(imm & 0xF0000000));
|
code->mov(dword[r15 + offsetof(A32JitState, CPSR_nzcv)], u32(imm & 0xF0000000));
|
||||||
} else {
|
} else {
|
||||||
Xbyak::Reg32 a = reg_alloc.UseScratchGpr(args[0]).cvt32();
|
Xbyak::Reg32 a = reg_alloc.UseScratchGpr(args[0]).cvt32();
|
||||||
|
|
||||||
code->and_(a, 0xF0000000);
|
code->and_(a, 0xF0000000);
|
||||||
code->mov(dword[r15 + offsetof(JitState, CPSR_nzcv)], a);
|
code->mov(dword[r15 + offsetof(A32JitState, CPSR_nzcv)], a);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -240,21 +240,21 @@ void A32EmitX64::EmitA32SetCpsrNZCVQ(RegAlloc& reg_alloc, IR::Block&, IR::Inst*
|
||||||
if (args[0].IsImmediate()) {
|
if (args[0].IsImmediate()) {
|
||||||
u32 imm = args[0].GetImmediateU32();
|
u32 imm = args[0].GetImmediateU32();
|
||||||
|
|
||||||
code->mov(dword[r15 + offsetof(JitState, CPSR_nzcv)], u32(imm & 0xF0000000));
|
code->mov(dword[r15 + offsetof(A32JitState, CPSR_nzcv)], u32(imm & 0xF0000000));
|
||||||
code->mov(code->byte[r15 + offsetof(JitState, CPSR_q)], u8((imm & 0x08000000) != 0 ? 1 : 0));
|
code->mov(code->byte[r15 + offsetof(A32JitState, CPSR_q)], u8((imm & 0x08000000) != 0 ? 1 : 0));
|
||||||
} else {
|
} else {
|
||||||
Xbyak::Reg32 a = reg_alloc.UseScratchGpr(args[0]).cvt32();
|
Xbyak::Reg32 a = reg_alloc.UseScratchGpr(args[0]).cvt32();
|
||||||
|
|
||||||
code->bt(a, 27);
|
code->bt(a, 27);
|
||||||
code->setc(code->byte[r15 + offsetof(JitState, CPSR_q)]);
|
code->setc(code->byte[r15 + offsetof(A32JitState, CPSR_q)]);
|
||||||
code->and_(a, 0xF0000000);
|
code->and_(a, 0xF0000000);
|
||||||
code->mov(dword[r15 + offsetof(JitState, CPSR_nzcv)], a);
|
code->mov(dword[r15 + offsetof(A32JitState, CPSR_nzcv)], a);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void A32EmitX64::EmitA32GetNFlag(RegAlloc& reg_alloc, IR::Block&, IR::Inst* inst) {
|
void A32EmitX64::EmitA32GetNFlag(RegAlloc& reg_alloc, IR::Block&, IR::Inst* inst) {
|
||||||
Xbyak::Reg32 result = reg_alloc.ScratchGpr().cvt32();
|
Xbyak::Reg32 result = reg_alloc.ScratchGpr().cvt32();
|
||||||
code->mov(result, dword[r15 + offsetof(JitState, CPSR_nzcv)]);
|
code->mov(result, dword[r15 + offsetof(A32JitState, CPSR_nzcv)]);
|
||||||
code->shr(result, 31);
|
code->shr(result, 31);
|
||||||
reg_alloc.DefineValue(inst, result);
|
reg_alloc.DefineValue(inst, result);
|
||||||
}
|
}
|
||||||
|
@ -265,22 +265,22 @@ void A32EmitX64::EmitA32SetNFlag(RegAlloc& reg_alloc, IR::Block&, IR::Inst* inst
|
||||||
auto args = reg_alloc.GetArgumentInfo(inst);
|
auto args = reg_alloc.GetArgumentInfo(inst);
|
||||||
if (args[0].IsImmediate()) {
|
if (args[0].IsImmediate()) {
|
||||||
if (args[0].GetImmediateU1()) {
|
if (args[0].GetImmediateU1()) {
|
||||||
code->or_(dword[r15 + offsetof(JitState, CPSR_nzcv)], flag_mask);
|
code->or_(dword[r15 + offsetof(A32JitState, CPSR_nzcv)], flag_mask);
|
||||||
} else {
|
} else {
|
||||||
code->and_(dword[r15 + offsetof(JitState, CPSR_nzcv)], ~flag_mask);
|
code->and_(dword[r15 + offsetof(A32JitState, CPSR_nzcv)], ~flag_mask);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
Xbyak::Reg32 to_store = reg_alloc.UseScratchGpr(args[0]).cvt32();
|
Xbyak::Reg32 to_store = reg_alloc.UseScratchGpr(args[0]).cvt32();
|
||||||
|
|
||||||
code->shl(to_store, flag_bit);
|
code->shl(to_store, flag_bit);
|
||||||
code->and_(dword[r15 + offsetof(JitState, CPSR_nzcv)], ~flag_mask);
|
code->and_(dword[r15 + offsetof(A32JitState, CPSR_nzcv)], ~flag_mask);
|
||||||
code->or_(dword[r15 + offsetof(JitState, CPSR_nzcv)], to_store);
|
code->or_(dword[r15 + offsetof(A32JitState, CPSR_nzcv)], to_store);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void A32EmitX64::EmitA32GetZFlag(RegAlloc& reg_alloc, IR::Block&, IR::Inst* inst) {
|
void A32EmitX64::EmitA32GetZFlag(RegAlloc& reg_alloc, IR::Block&, IR::Inst* inst) {
|
||||||
Xbyak::Reg32 result = reg_alloc.ScratchGpr().cvt32();
|
Xbyak::Reg32 result = reg_alloc.ScratchGpr().cvt32();
|
||||||
code->mov(result, dword[r15 + offsetof(JitState, CPSR_nzcv)]);
|
code->mov(result, dword[r15 + offsetof(A32JitState, CPSR_nzcv)]);
|
||||||
code->shr(result, 30);
|
code->shr(result, 30);
|
||||||
code->and_(result, 1);
|
code->and_(result, 1);
|
||||||
reg_alloc.DefineValue(inst, result);
|
reg_alloc.DefineValue(inst, result);
|
||||||
|
@ -292,22 +292,22 @@ void A32EmitX64::EmitA32SetZFlag(RegAlloc& reg_alloc, IR::Block&, IR::Inst* inst
|
||||||
auto args = reg_alloc.GetArgumentInfo(inst);
|
auto args = reg_alloc.GetArgumentInfo(inst);
|
||||||
if (args[0].IsImmediate()) {
|
if (args[0].IsImmediate()) {
|
||||||
if (args[0].GetImmediateU1()) {
|
if (args[0].GetImmediateU1()) {
|
||||||
code->or_(dword[r15 + offsetof(JitState, CPSR_nzcv)], flag_mask);
|
code->or_(dword[r15 + offsetof(A32JitState, CPSR_nzcv)], flag_mask);
|
||||||
} else {
|
} else {
|
||||||
code->and_(dword[r15 + offsetof(JitState, CPSR_nzcv)], ~flag_mask);
|
code->and_(dword[r15 + offsetof(A32JitState, CPSR_nzcv)], ~flag_mask);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
Xbyak::Reg32 to_store = reg_alloc.UseScratchGpr(args[0]).cvt32();
|
Xbyak::Reg32 to_store = reg_alloc.UseScratchGpr(args[0]).cvt32();
|
||||||
|
|
||||||
code->shl(to_store, flag_bit);
|
code->shl(to_store, flag_bit);
|
||||||
code->and_(dword[r15 + offsetof(JitState, CPSR_nzcv)], ~flag_mask);
|
code->and_(dword[r15 + offsetof(A32JitState, CPSR_nzcv)], ~flag_mask);
|
||||||
code->or_(dword[r15 + offsetof(JitState, CPSR_nzcv)], to_store);
|
code->or_(dword[r15 + offsetof(A32JitState, CPSR_nzcv)], to_store);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void A32EmitX64::EmitA32GetCFlag(RegAlloc& reg_alloc, IR::Block&, IR::Inst* inst) {
|
void A32EmitX64::EmitA32GetCFlag(RegAlloc& reg_alloc, IR::Block&, IR::Inst* inst) {
|
||||||
Xbyak::Reg32 result = reg_alloc.ScratchGpr().cvt32();
|
Xbyak::Reg32 result = reg_alloc.ScratchGpr().cvt32();
|
||||||
code->mov(result, dword[r15 + offsetof(JitState, CPSR_nzcv)]);
|
code->mov(result, dword[r15 + offsetof(A32JitState, CPSR_nzcv)]);
|
||||||
code->shr(result, 29);
|
code->shr(result, 29);
|
||||||
code->and_(result, 1);
|
code->and_(result, 1);
|
||||||
reg_alloc.DefineValue(inst, result);
|
reg_alloc.DefineValue(inst, result);
|
||||||
|
@ -319,22 +319,22 @@ void A32EmitX64::EmitA32SetCFlag(RegAlloc& reg_alloc, IR::Block&, IR::Inst* inst
|
||||||
auto args = reg_alloc.GetArgumentInfo(inst);
|
auto args = reg_alloc.GetArgumentInfo(inst);
|
||||||
if (args[0].IsImmediate()) {
|
if (args[0].IsImmediate()) {
|
||||||
if (args[0].GetImmediateU1()) {
|
if (args[0].GetImmediateU1()) {
|
||||||
code->or_(dword[r15 + offsetof(JitState, CPSR_nzcv)], flag_mask);
|
code->or_(dword[r15 + offsetof(A32JitState, CPSR_nzcv)], flag_mask);
|
||||||
} else {
|
} else {
|
||||||
code->and_(dword[r15 + offsetof(JitState, CPSR_nzcv)], ~flag_mask);
|
code->and_(dword[r15 + offsetof(A32JitState, CPSR_nzcv)], ~flag_mask);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
Xbyak::Reg32 to_store = reg_alloc.UseScratchGpr(args[0]).cvt32();
|
Xbyak::Reg32 to_store = reg_alloc.UseScratchGpr(args[0]).cvt32();
|
||||||
|
|
||||||
code->shl(to_store, flag_bit);
|
code->shl(to_store, flag_bit);
|
||||||
code->and_(dword[r15 + offsetof(JitState, CPSR_nzcv)], ~flag_mask);
|
code->and_(dword[r15 + offsetof(A32JitState, CPSR_nzcv)], ~flag_mask);
|
||||||
code->or_(dword[r15 + offsetof(JitState, CPSR_nzcv)], to_store);
|
code->or_(dword[r15 + offsetof(A32JitState, CPSR_nzcv)], to_store);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void A32EmitX64::EmitA32GetVFlag(RegAlloc& reg_alloc, IR::Block&, IR::Inst* inst) {
|
void A32EmitX64::EmitA32GetVFlag(RegAlloc& reg_alloc, IR::Block&, IR::Inst* inst) {
|
||||||
Xbyak::Reg32 result = reg_alloc.ScratchGpr().cvt32();
|
Xbyak::Reg32 result = reg_alloc.ScratchGpr().cvt32();
|
||||||
code->mov(result, dword[r15 + offsetof(JitState, CPSR_nzcv)]);
|
code->mov(result, dword[r15 + offsetof(A32JitState, CPSR_nzcv)]);
|
||||||
code->shr(result, 28);
|
code->shr(result, 28);
|
||||||
code->and_(result, 1);
|
code->and_(result, 1);
|
||||||
reg_alloc.DefineValue(inst, result);
|
reg_alloc.DefineValue(inst, result);
|
||||||
|
@ -346,16 +346,16 @@ void A32EmitX64::EmitA32SetVFlag(RegAlloc& reg_alloc, IR::Block&, IR::Inst* inst
|
||||||
auto args = reg_alloc.GetArgumentInfo(inst);
|
auto args = reg_alloc.GetArgumentInfo(inst);
|
||||||
if (args[0].IsImmediate()) {
|
if (args[0].IsImmediate()) {
|
||||||
if (args[0].GetImmediateU1()) {
|
if (args[0].GetImmediateU1()) {
|
||||||
code->or_(dword[r15 + offsetof(JitState, CPSR_nzcv)], flag_mask);
|
code->or_(dword[r15 + offsetof(A32JitState, CPSR_nzcv)], flag_mask);
|
||||||
} else {
|
} else {
|
||||||
code->and_(dword[r15 + offsetof(JitState, CPSR_nzcv)], ~flag_mask);
|
code->and_(dword[r15 + offsetof(A32JitState, CPSR_nzcv)], ~flag_mask);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
Xbyak::Reg32 to_store = reg_alloc.UseScratchGpr(args[0]).cvt32();
|
Xbyak::Reg32 to_store = reg_alloc.UseScratchGpr(args[0]).cvt32();
|
||||||
|
|
||||||
code->shl(to_store, flag_bit);
|
code->shl(to_store, flag_bit);
|
||||||
code->and_(dword[r15 + offsetof(JitState, CPSR_nzcv)], ~flag_mask);
|
code->and_(dword[r15 + offsetof(A32JitState, CPSR_nzcv)], ~flag_mask);
|
||||||
code->or_(dword[r15 + offsetof(JitState, CPSR_nzcv)], to_store);
|
code->or_(dword[r15 + offsetof(A32JitState, CPSR_nzcv)], to_store);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -363,17 +363,17 @@ void A32EmitX64::EmitA32OrQFlag(RegAlloc& reg_alloc, IR::Block&, IR::Inst* inst)
|
||||||
auto args = reg_alloc.GetArgumentInfo(inst);
|
auto args = reg_alloc.GetArgumentInfo(inst);
|
||||||
if (args[0].IsImmediate()) {
|
if (args[0].IsImmediate()) {
|
||||||
if (args[0].GetImmediateU1())
|
if (args[0].GetImmediateU1())
|
||||||
code->mov(dword[r15 + offsetof(JitState, CPSR_q)], 1);
|
code->mov(dword[r15 + offsetof(A32JitState, CPSR_q)], 1);
|
||||||
} else {
|
} else {
|
||||||
Xbyak::Reg8 to_store = reg_alloc.UseGpr(args[0]).cvt8();
|
Xbyak::Reg8 to_store = reg_alloc.UseGpr(args[0]).cvt8();
|
||||||
|
|
||||||
code->or_(code->byte[r15 + offsetof(JitState, CPSR_q)], to_store);
|
code->or_(code->byte[r15 + offsetof(A32JitState, CPSR_q)], to_store);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void A32EmitX64::EmitA32GetGEFlags(RegAlloc& reg_alloc, IR::Block&, IR::Inst* inst) {
|
void A32EmitX64::EmitA32GetGEFlags(RegAlloc& reg_alloc, IR::Block&, IR::Inst* inst) {
|
||||||
Xbyak::Xmm result = reg_alloc.ScratchXmm();
|
Xbyak::Xmm result = reg_alloc.ScratchXmm();
|
||||||
code->movd(result, dword[r15 + offsetof(JitState, CPSR_ge)]);
|
code->movd(result, dword[r15 + offsetof(A32JitState, CPSR_ge)]);
|
||||||
reg_alloc.DefineValue(inst, result);
|
reg_alloc.DefineValue(inst, result);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -383,10 +383,10 @@ void A32EmitX64::EmitA32SetGEFlags(RegAlloc& reg_alloc, IR::Block&, IR::Inst* in
|
||||||
|
|
||||||
if (args[0].IsInXmm()) {
|
if (args[0].IsInXmm()) {
|
||||||
Xbyak::Xmm to_store = reg_alloc.UseXmm(args[0]);
|
Xbyak::Xmm to_store = reg_alloc.UseXmm(args[0]);
|
||||||
code->movd(dword[r15 + offsetof(JitState, CPSR_ge)], to_store);
|
code->movd(dword[r15 + offsetof(A32JitState, CPSR_ge)], to_store);
|
||||||
} else {
|
} else {
|
||||||
Xbyak::Reg32 to_store = reg_alloc.UseGpr(args[0]).cvt32();
|
Xbyak::Reg32 to_store = reg_alloc.UseGpr(args[0]).cvt32();
|
||||||
code->mov(dword[r15 + offsetof(JitState, CPSR_ge)], to_store);
|
code->mov(dword[r15 + offsetof(A32JitState, CPSR_ge)], to_store);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -400,7 +400,7 @@ void A32EmitX64::EmitA32SetGEFlagsCompressed(RegAlloc& reg_alloc, IR::Block&, IR
|
||||||
ge |= Common::Bit<17>(imm) ? 0x0000FF00 : 0;
|
ge |= Common::Bit<17>(imm) ? 0x0000FF00 : 0;
|
||||||
ge |= Common::Bit<16>(imm) ? 0x000000FF : 0;
|
ge |= Common::Bit<16>(imm) ? 0x000000FF : 0;
|
||||||
|
|
||||||
code->mov(dword[r15 + offsetof(JitState, CPSR_ge)], ge);
|
code->mov(dword[r15 + offsetof(A32JitState, CPSR_ge)], ge);
|
||||||
} else if (code->DoesCpuSupport(Xbyak::util::Cpu::tBMI2)) {
|
} else if (code->DoesCpuSupport(Xbyak::util::Cpu::tBMI2)) {
|
||||||
Xbyak::Reg32 a = reg_alloc.UseScratchGpr(args[0]).cvt32();
|
Xbyak::Reg32 a = reg_alloc.UseScratchGpr(args[0]).cvt32();
|
||||||
Xbyak::Reg32 b = reg_alloc.ScratchGpr().cvt32();
|
Xbyak::Reg32 b = reg_alloc.ScratchGpr().cvt32();
|
||||||
|
@ -409,7 +409,7 @@ void A32EmitX64::EmitA32SetGEFlagsCompressed(RegAlloc& reg_alloc, IR::Block&, IR
|
||||||
code->shr(a, 16);
|
code->shr(a, 16);
|
||||||
code->pdep(a, a, b);
|
code->pdep(a, a, b);
|
||||||
code->imul(a, a, 0xFF);
|
code->imul(a, a, 0xFF);
|
||||||
code->mov(dword[r15 + offsetof(JitState, CPSR_ge)], a);
|
code->mov(dword[r15 + offsetof(A32JitState, CPSR_ge)], a);
|
||||||
} else {
|
} else {
|
||||||
Xbyak::Reg32 a = reg_alloc.UseScratchGpr(args[0]).cvt32();
|
Xbyak::Reg32 a = reg_alloc.UseScratchGpr(args[0]).cvt32();
|
||||||
|
|
||||||
|
@ -418,7 +418,7 @@ void A32EmitX64::EmitA32SetGEFlagsCompressed(RegAlloc& reg_alloc, IR::Block&, IR
|
||||||
code->imul(a, a, 0x00204081);
|
code->imul(a, a, 0x00204081);
|
||||||
code->and_(a, 0x01010101);
|
code->and_(a, 0x01010101);
|
||||||
code->imul(a, a, 0xFF);
|
code->imul(a, a, 0xFF);
|
||||||
code->mov(dword[r15 + offsetof(JitState, CPSR_ge)], a);
|
code->mov(dword[r15 + offsetof(A32JitState, CPSR_ge)], a);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -444,7 +444,7 @@ void A32EmitX64::EmitA32BXWritePC(RegAlloc& reg_alloc, IR::Block& block, IR::Ins
|
||||||
et |= Common::Bit<0>(new_pc) ? 1 : 0;
|
et |= Common::Bit<0>(new_pc) ? 1 : 0;
|
||||||
|
|
||||||
code->mov(MJitStateReg(A32::Reg::PC), new_pc & mask);
|
code->mov(MJitStateReg(A32::Reg::PC), new_pc & mask);
|
||||||
code->mov(dword[r15 + offsetof(JitState, CPSR_et)], et);
|
code->mov(dword[r15 + offsetof(A32JitState, CPSR_et)], et);
|
||||||
} else {
|
} else {
|
||||||
if (A32::LocationDescriptor{block.Location()}.EFlag()) {
|
if (A32::LocationDescriptor{block.Location()}.EFlag()) {
|
||||||
Xbyak::Reg32 new_pc = reg_alloc.UseScratchGpr(arg).cvt32();
|
Xbyak::Reg32 new_pc = reg_alloc.UseScratchGpr(arg).cvt32();
|
||||||
|
@ -454,7 +454,7 @@ void A32EmitX64::EmitA32BXWritePC(RegAlloc& reg_alloc, IR::Block& block, IR::Ins
|
||||||
code->mov(mask, new_pc);
|
code->mov(mask, new_pc);
|
||||||
code->and_(mask, 1);
|
code->and_(mask, 1);
|
||||||
code->lea(et, ptr[mask.cvt64() + 2]);
|
code->lea(et, ptr[mask.cvt64() + 2]);
|
||||||
code->mov(dword[r15 + offsetof(JitState, CPSR_et)], et);
|
code->mov(dword[r15 + offsetof(A32JitState, CPSR_et)], et);
|
||||||
code->lea(mask, ptr[mask.cvt64() + mask.cvt64() * 1 - 4]); // mask = pc & 1 ? 0xFFFFFFFE : 0xFFFFFFFC
|
code->lea(mask, ptr[mask.cvt64() + mask.cvt64() * 1 - 4]); // mask = pc & 1 ? 0xFFFFFFFE : 0xFFFFFFFC
|
||||||
code->and_(new_pc, mask);
|
code->and_(new_pc, mask);
|
||||||
code->mov(MJitStateReg(A32::Reg::PC), new_pc);
|
code->mov(MJitStateReg(A32::Reg::PC), new_pc);
|
||||||
|
@ -464,7 +464,7 @@ void A32EmitX64::EmitA32BXWritePC(RegAlloc& reg_alloc, IR::Block& block, IR::Ins
|
||||||
|
|
||||||
code->mov(mask, new_pc);
|
code->mov(mask, new_pc);
|
||||||
code->and_(mask, 1);
|
code->and_(mask, 1);
|
||||||
code->mov(dword[r15 + offsetof(JitState, CPSR_et)], mask);
|
code->mov(dword[r15 + offsetof(A32JitState, CPSR_et)], mask);
|
||||||
code->lea(mask, ptr[mask.cvt64() + mask.cvt64() * 1 - 4]); // mask = pc & 1 ? 0xFFFFFFFE : 0xFFFFFFFC
|
code->lea(mask, ptr[mask.cvt64() + mask.cvt64() * 1 - 4]); // mask = pc & 1 ? 0xFFFFFFFE : 0xFFFFFFFC
|
||||||
code->and_(new_pc, mask);
|
code->and_(new_pc, mask);
|
||||||
code->mov(MJitStateReg(A32::Reg::PC), new_pc);
|
code->mov(MJitStateReg(A32::Reg::PC), new_pc);
|
||||||
|
@ -476,20 +476,20 @@ void A32EmitX64::EmitA32CallSupervisor(RegAlloc& reg_alloc, IR::Block&, IR::Inst
|
||||||
reg_alloc.HostCall(nullptr);
|
reg_alloc.HostCall(nullptr);
|
||||||
|
|
||||||
code->SwitchMxcsrOnExit();
|
code->SwitchMxcsrOnExit();
|
||||||
code->mov(code->ABI_PARAM1, qword[r15 + offsetof(JitState, cycles_to_run)]);
|
code->mov(code->ABI_PARAM1, qword[r15 + offsetof(A32JitState, cycles_to_run)]);
|
||||||
code->sub(code->ABI_PARAM1, qword[r15 + offsetof(JitState, cycles_remaining)]);
|
code->sub(code->ABI_PARAM1, qword[r15 + offsetof(A32JitState, cycles_remaining)]);
|
||||||
code->CallFunction(cb.AddTicks);
|
code->CallFunction(cb.AddTicks);
|
||||||
reg_alloc.EndOfAllocScope();
|
reg_alloc.EndOfAllocScope();
|
||||||
auto args = reg_alloc.GetArgumentInfo(inst);
|
auto args = reg_alloc.GetArgumentInfo(inst);
|
||||||
reg_alloc.HostCall(nullptr, args[0]);
|
reg_alloc.HostCall(nullptr, args[0]);
|
||||||
code->CallFunction(cb.CallSVC);
|
code->CallFunction(cb.CallSVC);
|
||||||
code->CallFunction(cb.GetTicksRemaining);
|
code->CallFunction(cb.GetTicksRemaining);
|
||||||
code->mov(qword[r15 + offsetof(JitState, cycles_to_run)], code->ABI_RETURN);
|
code->mov(qword[r15 + offsetof(A32JitState, cycles_to_run)], code->ABI_RETURN);
|
||||||
code->mov(qword[r15 + offsetof(JitState, cycles_remaining)], code->ABI_RETURN);
|
code->mov(qword[r15 + offsetof(A32JitState, cycles_remaining)], code->ABI_RETURN);
|
||||||
code->SwitchMxcsrOnEntry();
|
code->SwitchMxcsrOnEntry();
|
||||||
}
|
}
|
||||||
|
|
||||||
static u32 GetFpscrImpl(JitState* jit_state) {
|
static u32 GetFpscrImpl(A32JitState* jit_state) {
|
||||||
return jit_state->Fpscr();
|
return jit_state->Fpscr();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -497,11 +497,11 @@ void A32EmitX64::EmitA32GetFpscr(RegAlloc& reg_alloc, IR::Block&, IR::Inst* inst
|
||||||
reg_alloc.HostCall(inst);
|
reg_alloc.HostCall(inst);
|
||||||
code->mov(code->ABI_PARAM1, code->r15);
|
code->mov(code->ABI_PARAM1, code->r15);
|
||||||
|
|
||||||
code->stmxcsr(code->dword[code->r15 + offsetof(JitState, guest_MXCSR)]);
|
code->stmxcsr(code->dword[code->r15 + offsetof(A32JitState, guest_MXCSR)]);
|
||||||
code->CallFunction(&GetFpscrImpl);
|
code->CallFunction(&GetFpscrImpl);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void SetFpscrImpl(u32 value, JitState* jit_state) {
|
static void SetFpscrImpl(u32 value, A32JitState* jit_state) {
|
||||||
jit_state->SetFpscr(value);
|
jit_state->SetFpscr(value);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -511,12 +511,12 @@ void A32EmitX64::EmitA32SetFpscr(RegAlloc& reg_alloc, IR::Block&, IR::Inst* inst
|
||||||
code->mov(code->ABI_PARAM2, code->r15);
|
code->mov(code->ABI_PARAM2, code->r15);
|
||||||
|
|
||||||
code->CallFunction(&SetFpscrImpl);
|
code->CallFunction(&SetFpscrImpl);
|
||||||
code->ldmxcsr(code->dword[code->r15 + offsetof(JitState, guest_MXCSR)]);
|
code->ldmxcsr(code->dword[code->r15 + offsetof(A32JitState, guest_MXCSR)]);
|
||||||
}
|
}
|
||||||
|
|
||||||
void A32EmitX64::EmitA32GetFpscrNZCV(RegAlloc& reg_alloc, IR::Block&, IR::Inst* inst) {
|
void A32EmitX64::EmitA32GetFpscrNZCV(RegAlloc& reg_alloc, IR::Block&, IR::Inst* inst) {
|
||||||
Xbyak::Reg32 result = reg_alloc.ScratchGpr().cvt32();
|
Xbyak::Reg32 result = reg_alloc.ScratchGpr().cvt32();
|
||||||
code->mov(result, dword[r15 + offsetof(JitState, FPSCR_nzcv)]);
|
code->mov(result, dword[r15 + offsetof(A32JitState, FPSCR_nzcv)]);
|
||||||
reg_alloc.DefineValue(inst, result);
|
reg_alloc.DefineValue(inst, result);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -524,11 +524,11 @@ void A32EmitX64::EmitA32SetFpscrNZCV(RegAlloc& reg_alloc, IR::Block&, IR::Inst*
|
||||||
auto args = reg_alloc.GetArgumentInfo(inst);
|
auto args = reg_alloc.GetArgumentInfo(inst);
|
||||||
Xbyak::Reg32 value = reg_alloc.UseGpr(args[0]).cvt32();
|
Xbyak::Reg32 value = reg_alloc.UseGpr(args[0]).cvt32();
|
||||||
|
|
||||||
code->mov(dword[r15 + offsetof(JitState, FPSCR_nzcv)], value);
|
code->mov(dword[r15 + offsetof(A32JitState, FPSCR_nzcv)], value);
|
||||||
}
|
}
|
||||||
|
|
||||||
void A32EmitX64::EmitA32ClearExclusive(RegAlloc&, IR::Block&, IR::Inst*) {
|
void A32EmitX64::EmitA32ClearExclusive(RegAlloc&, IR::Block&, IR::Inst*) {
|
||||||
code->mov(code->byte[r15 + offsetof(JitState, exclusive_state)], u8(0));
|
code->mov(code->byte[r15 + offsetof(A32JitState, exclusive_state)], u8(0));
|
||||||
}
|
}
|
||||||
|
|
||||||
void A32EmitX64::EmitA32SetExclusive(RegAlloc& reg_alloc, IR::Block&, IR::Inst* inst) {
|
void A32EmitX64::EmitA32SetExclusive(RegAlloc& reg_alloc, IR::Block&, IR::Inst* inst) {
|
||||||
|
@ -536,8 +536,8 @@ void A32EmitX64::EmitA32SetExclusive(RegAlloc& reg_alloc, IR::Block&, IR::Inst*
|
||||||
ASSERT(args[1].IsImmediate());
|
ASSERT(args[1].IsImmediate());
|
||||||
Xbyak::Reg32 address = reg_alloc.UseGpr(args[0]).cvt32();
|
Xbyak::Reg32 address = reg_alloc.UseGpr(args[0]).cvt32();
|
||||||
|
|
||||||
code->mov(code->byte[r15 + offsetof(JitState, exclusive_state)], u8(1));
|
code->mov(code->byte[r15 + offsetof(A32JitState, exclusive_state)], u8(1));
|
||||||
code->mov(dword[r15 + offsetof(JitState, exclusive_address)], address);
|
code->mov(dword[r15 + offsetof(A32JitState, exclusive_address)], address);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename FunctionPointer>
|
template <typename FunctionPointer>
|
||||||
|
@ -690,13 +690,13 @@ static void ExclusiveWrite(BlockOfCode* code, RegAlloc& reg_alloc, IR::Inst* ins
|
||||||
Xbyak::Label end;
|
Xbyak::Label end;
|
||||||
|
|
||||||
code->mov(passed, u32(1));
|
code->mov(passed, u32(1));
|
||||||
code->cmp(code->byte[r15 + offsetof(JitState, exclusive_state)], u8(0));
|
code->cmp(code->byte[r15 + offsetof(A32JitState, exclusive_state)], u8(0));
|
||||||
code->je(end);
|
code->je(end);
|
||||||
code->mov(tmp, code->ABI_PARAM1);
|
code->mov(tmp, code->ABI_PARAM1);
|
||||||
code->xor_(tmp, dword[r15 + offsetof(JitState, exclusive_address)]);
|
code->xor_(tmp, dword[r15 + offsetof(A32JitState, exclusive_address)]);
|
||||||
code->test(tmp, JitState::RESERVATION_GRANULE_MASK);
|
code->test(tmp, A32JitState::RESERVATION_GRANULE_MASK);
|
||||||
code->jne(end);
|
code->jne(end);
|
||||||
code->mov(code->byte[r15 + offsetof(JitState, exclusive_state)], u8(0));
|
code->mov(code->byte[r15 + offsetof(A32JitState, exclusive_state)], u8(0));
|
||||||
if (prepend_high_word) {
|
if (prepend_high_word) {
|
||||||
code->mov(code->ABI_PARAM2.cvt32(), code->ABI_PARAM2.cvt32()); // zero extend to 64-bits
|
code->mov(code->ABI_PARAM2.cvt32(), code->ABI_PARAM2.cvt32()); // zero extend to 64-bits
|
||||||
code->shl(code->ABI_PARAM3, 32);
|
code->shl(code->ABI_PARAM3, 32);
|
||||||
|
@ -1014,10 +1014,10 @@ static u32 CalculateCpsr_et(const A32::LocationDescriptor& desc) {
|
||||||
|
|
||||||
void A32EmitX64::EmitTerminalImpl(IR::Term::LinkBlock terminal, IR::LocationDescriptor initial_location) {
|
void A32EmitX64::EmitTerminalImpl(IR::Term::LinkBlock terminal, IR::LocationDescriptor initial_location) {
|
||||||
if (CalculateCpsr_et(terminal.next) != CalculateCpsr_et(initial_location)) {
|
if (CalculateCpsr_et(terminal.next) != CalculateCpsr_et(initial_location)) {
|
||||||
code->mov(dword[r15 + offsetof(JitState, CPSR_et)], CalculateCpsr_et(terminal.next));
|
code->mov(dword[r15 + offsetof(A32JitState, CPSR_et)], CalculateCpsr_et(terminal.next));
|
||||||
}
|
}
|
||||||
|
|
||||||
code->cmp(qword[r15 + offsetof(JitState, cycles_remaining)], 0);
|
code->cmp(qword[r15 + offsetof(A32JitState, cycles_remaining)], 0);
|
||||||
|
|
||||||
patch_information[terminal.next].jg.emplace_back(code->getCurr());
|
patch_information[terminal.next].jg.emplace_back(code->getCurr());
|
||||||
if (auto next_bb = GetBasicBlock(terminal.next)) {
|
if (auto next_bb = GetBasicBlock(terminal.next)) {
|
||||||
|
@ -1039,7 +1039,7 @@ void A32EmitX64::EmitTerminalImpl(IR::Term::LinkBlock terminal, IR::LocationDesc
|
||||||
|
|
||||||
void A32EmitX64::EmitTerminalImpl(IR::Term::LinkBlockFast terminal, IR::LocationDescriptor initial_location) {
|
void A32EmitX64::EmitTerminalImpl(IR::Term::LinkBlockFast terminal, IR::LocationDescriptor initial_location) {
|
||||||
if (CalculateCpsr_et(terminal.next) != CalculateCpsr_et(initial_location)) {
|
if (CalculateCpsr_et(terminal.next) != CalculateCpsr_et(initial_location)) {
|
||||||
code->mov(dword[r15 + offsetof(JitState, CPSR_et)], CalculateCpsr_et(terminal.next));
|
code->mov(dword[r15 + offsetof(A32JitState, CPSR_et)], CalculateCpsr_et(terminal.next));
|
||||||
}
|
}
|
||||||
|
|
||||||
patch_information[terminal.next].jmp.emplace_back(code->getCurr());
|
patch_information[terminal.next].jmp.emplace_back(code->getCurr());
|
||||||
|
@ -1055,17 +1055,17 @@ void A32EmitX64::EmitTerminalImpl(IR::Term::PopRSBHint, IR::LocationDescriptor)
|
||||||
// TODO: Optimization is available here based on known state of FPSCR_mode and CPSR_et.
|
// TODO: Optimization is available here based on known state of FPSCR_mode and CPSR_et.
|
||||||
code->mov(ecx, MJitStateReg(A32::Reg::PC));
|
code->mov(ecx, MJitStateReg(A32::Reg::PC));
|
||||||
code->shl(rcx, 32);
|
code->shl(rcx, 32);
|
||||||
code->mov(ebx, dword[r15 + offsetof(JitState, FPSCR_mode)]);
|
code->mov(ebx, dword[r15 + offsetof(A32JitState, FPSCR_mode)]);
|
||||||
code->or_(ebx, dword[r15 + offsetof(JitState, CPSR_et)]);
|
code->or_(ebx, dword[r15 + offsetof(A32JitState, CPSR_et)]);
|
||||||
code->or_(rbx, rcx);
|
code->or_(rbx, rcx);
|
||||||
|
|
||||||
code->mov(eax, dword[r15 + offsetof(JitState, rsb_ptr)]);
|
code->mov(eax, dword[r15 + offsetof(A32JitState, rsb_ptr)]);
|
||||||
code->sub(eax, 1);
|
code->sub(eax, 1);
|
||||||
code->and_(eax, u32(JitState::RSBPtrMask));
|
code->and_(eax, u32(A32JitState::RSBPtrMask));
|
||||||
code->mov(dword[r15 + offsetof(JitState, rsb_ptr)], eax);
|
code->mov(dword[r15 + offsetof(A32JitState, rsb_ptr)], eax);
|
||||||
code->cmp(rbx, qword[r15 + offsetof(JitState, rsb_location_descriptors) + rax * sizeof(u64)]);
|
code->cmp(rbx, qword[r15 + offsetof(A32JitState, rsb_location_descriptors) + rax * sizeof(u64)]);
|
||||||
code->jne(code->GetReturnFromRunCodeAddress());
|
code->jne(code->GetReturnFromRunCodeAddress());
|
||||||
code->mov(rax, qword[r15 + offsetof(JitState, rsb_codeptrs) + rax * sizeof(u64)]);
|
code->mov(rax, qword[r15 + offsetof(A32JitState, rsb_codeptrs) + rax * sizeof(u64)]);
|
||||||
code->jmp(rax);
|
code->jmp(rax);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1077,7 +1077,7 @@ void A32EmitX64::EmitTerminalImpl(IR::Term::If terminal, IR::LocationDescriptor
|
||||||
}
|
}
|
||||||
|
|
||||||
void A32EmitX64::EmitTerminalImpl(IR::Term::CheckHalt terminal, IR::LocationDescriptor initial_location) {
|
void A32EmitX64::EmitTerminalImpl(IR::Term::CheckHalt terminal, IR::LocationDescriptor initial_location) {
|
||||||
code->cmp(code->byte[r15 + offsetof(JitState, halt_requested)], u8(0));
|
code->cmp(code->byte[r15 + offsetof(A32JitState, halt_requested)], u8(0));
|
||||||
code->jne(code->GetForceReturnFromRunCodeAddress());
|
code->jne(code->GetForceReturnFromRunCodeAddress());
|
||||||
EmitTerminal(terminal.else_, initial_location);
|
EmitTerminal(terminal.else_, initial_location);
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,8 +4,8 @@
|
||||||
* General Public License version 2 or any later version.
|
* General Public License version 2 or any later version.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#include "backend_x64/a32_jitstate.h"
|
||||||
#include "backend_x64/block_of_code.h"
|
#include "backend_x64/block_of_code.h"
|
||||||
#include "backend_x64/jitstate.h"
|
|
||||||
#include "common/assert.h"
|
#include "common/assert.h"
|
||||||
#include "common/bit_util.h"
|
#include "common/bit_util.h"
|
||||||
#include "common/common_types.h"
|
#include "common/common_types.h"
|
||||||
|
@ -44,7 +44,7 @@ namespace BackendX64 {
|
||||||
* OF bit 0 Overflow flag
|
* OF bit 0 Overflow flag
|
||||||
*/
|
*/
|
||||||
|
|
||||||
u32 JitState::Cpsr() const {
|
u32 A32JitState::Cpsr() const {
|
||||||
ASSERT((CPSR_nzcv & ~0xF0000000) == 0);
|
ASSERT((CPSR_nzcv & ~0xF0000000) == 0);
|
||||||
ASSERT((CPSR_q & ~1) == 0);
|
ASSERT((CPSR_q & ~1) == 0);
|
||||||
ASSERT((CPSR_et & ~3) == 0);
|
ASSERT((CPSR_et & ~3) == 0);
|
||||||
|
@ -70,7 +70,7 @@ u32 JitState::Cpsr() const {
|
||||||
return cpsr;
|
return cpsr;
|
||||||
}
|
}
|
||||||
|
|
||||||
void JitState::SetCpsr(u32 cpsr) {
|
void A32JitState::SetCpsr(u32 cpsr) {
|
||||||
// NZCV flags
|
// NZCV flags
|
||||||
CPSR_nzcv = cpsr & 0xF0000000;
|
CPSR_nzcv = cpsr & 0xF0000000;
|
||||||
// Q flag
|
// Q flag
|
||||||
|
@ -89,7 +89,7 @@ void JitState::SetCpsr(u32 cpsr) {
|
||||||
CPSR_jaifm = cpsr & 0x07F0FDDF;
|
CPSR_jaifm = cpsr & 0x07F0FDDF;
|
||||||
}
|
}
|
||||||
|
|
||||||
void JitState::ResetRSB() {
|
void A32JitState::ResetRSB() {
|
||||||
rsb_location_descriptors.fill(0xFFFFFFFFFFFFFFFFull);
|
rsb_location_descriptors.fill(0xFFFFFFFFFFFFFFFFull);
|
||||||
rsb_codeptrs.fill(0);
|
rsb_codeptrs.fill(0);
|
||||||
}
|
}
|
||||||
|
@ -153,7 +153,7 @@ void JitState::ResetRSB() {
|
||||||
constexpr u32 FPSCR_MODE_MASK = A32::LocationDescriptor::FPSCR_MODE_MASK;
|
constexpr u32 FPSCR_MODE_MASK = A32::LocationDescriptor::FPSCR_MODE_MASK;
|
||||||
constexpr u32 FPSCR_NZCV_MASK = 0xF0000000;
|
constexpr u32 FPSCR_NZCV_MASK = 0xF0000000;
|
||||||
|
|
||||||
u32 JitState::Fpscr() const {
|
u32 A32JitState::Fpscr() const {
|
||||||
ASSERT((FPSCR_mode & ~FPSCR_MODE_MASK) == 0);
|
ASSERT((FPSCR_mode & ~FPSCR_MODE_MASK) == 0);
|
||||||
ASSERT((FPSCR_nzcv & ~FPSCR_NZCV_MASK) == 0);
|
ASSERT((FPSCR_nzcv & ~FPSCR_NZCV_MASK) == 0);
|
||||||
ASSERT((FPSCR_IDC & ~(1 << 7)) == 0);
|
ASSERT((FPSCR_IDC & ~(1 << 7)) == 0);
|
||||||
|
@ -168,7 +168,7 @@ u32 JitState::Fpscr() const {
|
||||||
return FPSCR;
|
return FPSCR;
|
||||||
}
|
}
|
||||||
|
|
||||||
void JitState::SetFpscr(u32 FPSCR) {
|
void A32JitState::SetFpscr(u32 FPSCR) {
|
||||||
old_FPSCR = FPSCR;
|
old_FPSCR = FPSCR;
|
||||||
FPSCR_mode = FPSCR & FPSCR_MODE_MASK;
|
FPSCR_mode = FPSCR & FPSCR_MODE_MASK;
|
||||||
FPSCR_nzcv = FPSCR & FPSCR_NZCV_MASK;
|
FPSCR_nzcv = FPSCR & FPSCR_NZCV_MASK;
|
||||||
|
@ -199,7 +199,7 @@ void JitState::SetFpscr(u32 FPSCR) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
u64 JitState::GetUniqueHash() const {
|
u64 A32JitState::GetUniqueHash() const {
|
||||||
return CPSR_et | FPSCR_mode | (static_cast<u64>(Reg[15]) << 32);
|
return CPSR_et | FPSCR_mode | (static_cast<u64>(Reg[15]) << 32);
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,8 +22,8 @@ constexpr size_t SpillCount = 64;
|
||||||
#pragma warning(disable:4324) // Structure was padded due to alignment specifier
|
#pragma warning(disable:4324) // Structure was padded due to alignment specifier
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
struct JitState {
|
struct A32JitState {
|
||||||
JitState() { ResetRSB(); }
|
A32JitState() { ResetRSB(); }
|
||||||
|
|
||||||
std::array<u32, 16> Reg{}; // Current register file.
|
std::array<u32, 16> Reg{}; // Current register file.
|
||||||
// TODO: Mode-specific register sets unimplemented.
|
// TODO: Mode-specific register sets unimplemented.
|
|
@ -9,9 +9,9 @@
|
||||||
|
|
||||||
#include <xbyak.h>
|
#include <xbyak.h>
|
||||||
|
|
||||||
|
#include "backend_x64/a32_jitstate.h"
|
||||||
#include "backend_x64/abi.h"
|
#include "backend_x64/abi.h"
|
||||||
#include "backend_x64/block_of_code.h"
|
#include "backend_x64/block_of_code.h"
|
||||||
#include "backend_x64/jitstate.h"
|
|
||||||
#include "common/assert.h"
|
#include "common/assert.h"
|
||||||
#include "dynarmic/callbacks.h"
|
#include "dynarmic/callbacks.h"
|
||||||
|
|
||||||
|
@ -76,14 +76,14 @@ size_t BlockOfCode::SpaceRemaining() const {
|
||||||
return std::min(TOTAL_CODE_SIZE - far_code_offset, FAR_CODE_OFFSET - near_code_offset);
|
return std::min(TOTAL_CODE_SIZE - far_code_offset, FAR_CODE_OFFSET - near_code_offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
void BlockOfCode::RunCode(JitState* jit_state, size_t cycles_to_run) const {
|
void BlockOfCode::RunCode(A32JitState* jit_state, size_t cycles_to_run) const {
|
||||||
constexpr size_t max_cycles_to_run = static_cast<size_t>(std::numeric_limits<decltype(jit_state->cycles_remaining)>::max());
|
constexpr size_t max_cycles_to_run = static_cast<size_t>(std::numeric_limits<decltype(jit_state->cycles_remaining)>::max());
|
||||||
ASSERT(cycles_to_run <= max_cycles_to_run);
|
ASSERT(cycles_to_run <= max_cycles_to_run);
|
||||||
|
|
||||||
jit_state->cycles_to_run = cycles_to_run;
|
jit_state->cycles_to_run = cycles_to_run;
|
||||||
jit_state->cycles_remaining = cycles_to_run;
|
jit_state->cycles_remaining = cycles_to_run;
|
||||||
|
|
||||||
u32 new_rsb_ptr = (jit_state->rsb_ptr - 1) & JitState::RSBPtrMask;
|
u32 new_rsb_ptr = (jit_state->rsb_ptr - 1) & A32JitState::RSBPtrMask;
|
||||||
if (jit_state->GetUniqueHash() == jit_state->rsb_location_descriptors[new_rsb_ptr]) {
|
if (jit_state->GetUniqueHash() == jit_state->rsb_location_descriptors[new_rsb_ptr]) {
|
||||||
jit_state->rsb_ptr = new_rsb_ptr;
|
jit_state->rsb_ptr = new_rsb_ptr;
|
||||||
run_code_from(jit_state, jit_state->rsb_codeptrs[new_rsb_ptr]);
|
run_code_from(jit_state, jit_state->rsb_codeptrs[new_rsb_ptr]);
|
||||||
|
@ -139,7 +139,7 @@ void BlockOfCode::GenRunCode() {
|
||||||
// Return from run code variants
|
// Return from run code variants
|
||||||
const auto emit_return_from_run_code = [this, &loop, &enter_mxcsr_then_loop](bool mxcsr_already_exited, bool force_return){
|
const auto emit_return_from_run_code = [this, &loop, &enter_mxcsr_then_loop](bool mxcsr_already_exited, bool force_return){
|
||||||
if (!force_return) {
|
if (!force_return) {
|
||||||
cmp(qword[r15 + offsetof(JitState, cycles_remaining)], 0);
|
cmp(qword[r15 + offsetof(A32JitState, cycles_remaining)], 0);
|
||||||
jg(mxcsr_already_exited ? enter_mxcsr_then_loop : loop);
|
jg(mxcsr_already_exited ? enter_mxcsr_then_loop : loop);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -147,8 +147,8 @@ void BlockOfCode::GenRunCode() {
|
||||||
SwitchMxcsrOnExit();
|
SwitchMxcsrOnExit();
|
||||||
}
|
}
|
||||||
|
|
||||||
mov(ABI_PARAM1, qword[r15 + offsetof(JitState, cycles_to_run)]);
|
mov(ABI_PARAM1, qword[r15 + offsetof(A32JitState, cycles_to_run)]);
|
||||||
sub(ABI_PARAM1, qword[r15 + offsetof(JitState, cycles_remaining)]);
|
sub(ABI_PARAM1, qword[r15 + offsetof(A32JitState, cycles_remaining)]);
|
||||||
CallFunction(cb.AddTicks);
|
CallFunction(cb.AddTicks);
|
||||||
|
|
||||||
ABI_PopCalleeSaveRegistersAndAdjustStack(this);
|
ABI_PopCalleeSaveRegistersAndAdjustStack(this);
|
||||||
|
@ -231,13 +231,13 @@ void BlockOfCode::GenMemoryAccessors() {
|
||||||
}
|
}
|
||||||
|
|
||||||
void BlockOfCode::SwitchMxcsrOnEntry() {
|
void BlockOfCode::SwitchMxcsrOnEntry() {
|
||||||
stmxcsr(dword[r15 + offsetof(JitState, save_host_MXCSR)]);
|
stmxcsr(dword[r15 + offsetof(A32JitState, save_host_MXCSR)]);
|
||||||
ldmxcsr(dword[r15 + offsetof(JitState, guest_MXCSR)]);
|
ldmxcsr(dword[r15 + offsetof(A32JitState, guest_MXCSR)]);
|
||||||
}
|
}
|
||||||
|
|
||||||
void BlockOfCode::SwitchMxcsrOnExit() {
|
void BlockOfCode::SwitchMxcsrOnExit() {
|
||||||
stmxcsr(dword[r15 + offsetof(JitState, guest_MXCSR)]);
|
stmxcsr(dword[r15 + offsetof(A32JitState, guest_MXCSR)]);
|
||||||
ldmxcsr(dword[r15 + offsetof(JitState, save_host_MXCSR)]);
|
ldmxcsr(dword[r15 + offsetof(A32JitState, save_host_MXCSR)]);
|
||||||
}
|
}
|
||||||
|
|
||||||
Xbyak::Address BlockOfCode::MConst(u64 constant) {
|
Xbyak::Address BlockOfCode::MConst(u64 constant) {
|
||||||
|
|
|
@ -12,8 +12,8 @@
|
||||||
#include <xbyak.h>
|
#include <xbyak.h>
|
||||||
#include <xbyak_util.h>
|
#include <xbyak_util.h>
|
||||||
|
|
||||||
|
#include "backend_x64/a32_jitstate.h"
|
||||||
#include "backend_x64/constant_pool.h"
|
#include "backend_x64/constant_pool.h"
|
||||||
#include "backend_x64/jitstate.h"
|
|
||||||
#include "common/common_types.h"
|
#include "common/common_types.h"
|
||||||
#include "dynarmic/callbacks.h"
|
#include "dynarmic/callbacks.h"
|
||||||
|
|
||||||
|
@ -32,7 +32,7 @@ public:
|
||||||
size_t SpaceRemaining() const;
|
size_t SpaceRemaining() const;
|
||||||
|
|
||||||
/// Runs emulated code for approximately `cycles_to_run` cycles.
|
/// Runs emulated code for approximately `cycles_to_run` cycles.
|
||||||
void RunCode(JitState* jit_state, size_t cycles_to_run) const;
|
void RunCode(A32JitState* jit_state, size_t cycles_to_run) const;
|
||||||
/// Code emitter: Returns to dispatcher
|
/// Code emitter: Returns to dispatcher
|
||||||
void ReturnFromRunCode(bool mxcsr_already_exited = false);
|
void ReturnFromRunCode(bool mxcsr_already_exited = false);
|
||||||
/// Code emitter: Returns to dispatcher, forces return to host
|
/// Code emitter: Returns to dispatcher, forces return to host
|
||||||
|
@ -137,8 +137,8 @@ private:
|
||||||
CodePtr near_code_ptr;
|
CodePtr near_code_ptr;
|
||||||
CodePtr far_code_ptr;
|
CodePtr far_code_ptr;
|
||||||
|
|
||||||
using RunCodeFuncType = void(*)(JitState*);
|
using RunCodeFuncType = void(*)(A32JitState*);
|
||||||
using RunCodeFromFuncType = void(*)(JitState*, u64);
|
using RunCodeFromFuncType = void(*)(A32JitState*, u64);
|
||||||
RunCodeFuncType run_code = nullptr;
|
RunCodeFuncType run_code = nullptr;
|
||||||
RunCodeFromFuncType run_code_from = nullptr;
|
RunCodeFromFuncType run_code_from = nullptr;
|
||||||
static constexpr size_t MXCSR_ALREADY_EXITED = 1 << 0;
|
static constexpr size_t MXCSR_ALREADY_EXITED = 1 << 0;
|
||||||
|
|
|
@ -11,7 +11,6 @@
|
||||||
#include "backend_x64/abi.h"
|
#include "backend_x64/abi.h"
|
||||||
#include "backend_x64/block_of_code.h"
|
#include "backend_x64/block_of_code.h"
|
||||||
#include "backend_x64/emit_x64.h"
|
#include "backend_x64/emit_x64.h"
|
||||||
#include "backend_x64/jitstate.h"
|
|
||||||
#include "common/address_range.h"
|
#include "common/address_range.h"
|
||||||
#include "common/assert.h"
|
#include "common/assert.h"
|
||||||
#include "common/bit_util.h"
|
#include "common/bit_util.h"
|
||||||
|
@ -89,19 +88,19 @@ void EmitX64<PCT>::PushRSBHelper(Xbyak::Reg64 loc_desc_reg, Xbyak::Reg64 index_r
|
||||||
? iter->second.entrypoint
|
? iter->second.entrypoint
|
||||||
: code->GetReturnFromRunCodeAddress();
|
: code->GetReturnFromRunCodeAddress();
|
||||||
|
|
||||||
code->mov(index_reg.cvt32(), dword[r15 + offsetof(JitState, rsb_ptr)]);
|
code->mov(index_reg.cvt32(), dword[r15 + offsetof(A32JitState, rsb_ptr)]);
|
||||||
|
|
||||||
code->mov(loc_desc_reg, target.Value());
|
code->mov(loc_desc_reg, target.Value());
|
||||||
|
|
||||||
patch_information[target].mov_rcx.emplace_back(code->getCurr());
|
patch_information[target].mov_rcx.emplace_back(code->getCurr());
|
||||||
EmitPatchMovRcx(target_code_ptr);
|
EmitPatchMovRcx(target_code_ptr);
|
||||||
|
|
||||||
code->mov(qword[r15 + index_reg * 8 + offsetof(JitState, rsb_location_descriptors)], loc_desc_reg);
|
code->mov(qword[r15 + index_reg * 8 + offsetof(A32JitState, rsb_location_descriptors)], loc_desc_reg);
|
||||||
code->mov(qword[r15 + index_reg * 8 + offsetof(JitState, rsb_codeptrs)], rcx);
|
code->mov(qword[r15 + index_reg * 8 + offsetof(A32JitState, rsb_codeptrs)], rcx);
|
||||||
|
|
||||||
code->add(index_reg.cvt32(), 1);
|
code->add(index_reg.cvt32(), 1);
|
||||||
code->and_(index_reg.cvt32(), u32(JitState::RSBPtrMask));
|
code->and_(index_reg.cvt32(), u32(A32JitState::RSBPtrMask));
|
||||||
code->mov(dword[r15 + offsetof(JitState, rsb_ptr)], index_reg.cvt32());
|
code->mov(dword[r15 + offsetof(A32JitState, rsb_ptr)], index_reg.cvt32());
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename PCT>
|
template <typename PCT>
|
||||||
|
@ -1839,7 +1838,7 @@ static void DenormalsAreZero32(BlockOfCode* code, Xbyak::Xmm xmm_value, Xbyak::R
|
||||||
code->cmp(gpr_scratch, u32(0x007FFFFE));
|
code->cmp(gpr_scratch, u32(0x007FFFFE));
|
||||||
code->ja(end);
|
code->ja(end);
|
||||||
code->pxor(xmm_value, xmm_value);
|
code->pxor(xmm_value, xmm_value);
|
||||||
code->mov(dword[r15 + offsetof(JitState, FPSCR_IDC)], u32(1 << 7));
|
code->mov(dword[r15 + offsetof(A32JitState, FPSCR_IDC)], u32(1 << 7));
|
||||||
code->L(end);
|
code->L(end);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1857,7 +1856,7 @@ static void DenormalsAreZero64(BlockOfCode* code, Xbyak::Xmm xmm_value, Xbyak::R
|
||||||
code->cmp(gpr_scratch, penult_denormal);
|
code->cmp(gpr_scratch, penult_denormal);
|
||||||
code->ja(end);
|
code->ja(end);
|
||||||
code->pxor(xmm_value, xmm_value);
|
code->pxor(xmm_value, xmm_value);
|
||||||
code->mov(dword[r15 + offsetof(JitState, FPSCR_IDC)], u32(1 << 7));
|
code->mov(dword[r15 + offsetof(A32JitState, FPSCR_IDC)], u32(1 << 7));
|
||||||
code->L(end);
|
code->L(end);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1870,7 +1869,7 @@ static void FlushToZero32(BlockOfCode* code, Xbyak::Xmm xmm_value, Xbyak::Reg32
|
||||||
code->cmp(gpr_scratch, u32(0x007FFFFE));
|
code->cmp(gpr_scratch, u32(0x007FFFFE));
|
||||||
code->ja(end);
|
code->ja(end);
|
||||||
code->pxor(xmm_value, xmm_value);
|
code->pxor(xmm_value, xmm_value);
|
||||||
code->mov(dword[r15 + offsetof(JitState, FPSCR_UFC)], u32(1 << 3));
|
code->mov(dword[r15 + offsetof(A32JitState, FPSCR_UFC)], u32(1 << 3));
|
||||||
code->L(end);
|
code->L(end);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1888,7 +1887,7 @@ static void FlushToZero64(BlockOfCode* code, Xbyak::Xmm xmm_value, Xbyak::Reg64
|
||||||
code->cmp(gpr_scratch, penult_denormal);
|
code->cmp(gpr_scratch, penult_denormal);
|
||||||
code->ja(end);
|
code->ja(end);
|
||||||
code->pxor(xmm_value, xmm_value);
|
code->pxor(xmm_value, xmm_value);
|
||||||
code->mov(dword[r15 + offsetof(JitState, FPSCR_UFC)], u32(1 << 3));
|
code->mov(dword[r15 + offsetof(A32JitState, FPSCR_UFC)], u32(1 << 3));
|
||||||
code->L(end);
|
code->L(end);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2138,7 +2137,7 @@ static void SetFpscrNzcvFromFlags(BlockOfCode* code, RegAlloc& reg_alloc) {
|
||||||
code->rcl(cl, 3);
|
code->rcl(cl, 3);
|
||||||
code->shl(nzcv, cl);
|
code->shl(nzcv, cl);
|
||||||
code->and_(nzcv, 0xF0000000);
|
code->and_(nzcv, 0xF0000000);
|
||||||
code->mov(dword[r15 + offsetof(JitState, FPSCR_nzcv)], nzcv);
|
code->mov(dword[r15 + offsetof(A32JitState, FPSCR_nzcv)], nzcv);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename PCT>
|
template <typename PCT>
|
||||||
|
@ -2463,7 +2462,7 @@ void EmitX64<PCT>::EmitFPU32ToDouble(RegAlloc& reg_alloc, IR::Block&, IR::Inst*
|
||||||
template <typename PCT>
|
template <typename PCT>
|
||||||
void EmitX64<PCT>::EmitAddCycles(size_t cycles) {
|
void EmitX64<PCT>::EmitAddCycles(size_t cycles) {
|
||||||
ASSERT(cycles < std::numeric_limits<u32>::max());
|
ASSERT(cycles < std::numeric_limits<u32>::max());
|
||||||
code->sub(qword[r15 + offsetof(JitState, cycles_remaining)], static_cast<u32>(cycles));
|
code->sub(qword[r15 + offsetof(A32JitState, cycles_remaining)], static_cast<u32>(cycles));
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename PCT>
|
template <typename PCT>
|
||||||
|
@ -2471,7 +2470,7 @@ Xbyak::Label EmitX64<PCT>::EmitCond(IR::Cond cond) {
|
||||||
Xbyak::Label label;
|
Xbyak::Label label;
|
||||||
|
|
||||||
const Xbyak::Reg32 cpsr = eax;
|
const Xbyak::Reg32 cpsr = eax;
|
||||||
code->mov(cpsr, dword[r15 + offsetof(JitState, CPSR_nzcv)]);
|
code->mov(cpsr, dword[r15 + offsetof(A32JitState, CPSR_nzcv)]);
|
||||||
|
|
||||||
constexpr size_t n_shift = 31;
|
constexpr size_t n_shift = 31;
|
||||||
constexpr size_t z_shift = 30;
|
constexpr size_t z_shift = 30;
|
||||||
|
|
|
@ -4,6 +4,9 @@
|
||||||
* General Public License version 2 or any later version.
|
* General Public License version 2 or any later version.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#include <xbyak.h>
|
||||||
|
|
||||||
|
#include "backend_x64/a32_jitstate.h"
|
||||||
#include "backend_x64/hostloc.h"
|
#include "backend_x64/hostloc.h"
|
||||||
|
|
||||||
namespace Dynarmic {
|
namespace Dynarmic {
|
||||||
|
@ -22,11 +25,11 @@ Xbyak::Xmm HostLocToXmm(HostLoc loc) {
|
||||||
Xbyak::Address SpillToOpArg(HostLoc loc) {
|
Xbyak::Address SpillToOpArg(HostLoc loc) {
|
||||||
using namespace Xbyak::util;
|
using namespace Xbyak::util;
|
||||||
|
|
||||||
static_assert(std::is_same<decltype(JitState::Spill[0]), u64&>::value, "Spill must be u64");
|
static_assert(std::is_same<decltype(A32JitState::Spill[0]), u64&>::value, "Spill must be u64");
|
||||||
ASSERT(HostLocIsSpill(loc));
|
ASSERT(HostLocIsSpill(loc));
|
||||||
|
|
||||||
size_t i = static_cast<size_t>(loc) - static_cast<size_t>(HostLoc::FirstSpill);
|
size_t i = static_cast<size_t>(loc) - static_cast<size_t>(HostLoc::FirstSpill);
|
||||||
return qword[r15 + offsetof(JitState, Spill) + i * sizeof(u64)];
|
return qword[r15 + offsetof(A32JitState, Spill) + i * sizeof(u64)];
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace BackendX64
|
} // namespace BackendX64
|
||||||
|
|
|
@ -7,7 +7,7 @@
|
||||||
|
|
||||||
#include <xbyak.h>
|
#include <xbyak.h>
|
||||||
|
|
||||||
#include "backend_x64/jitstate.h"
|
#include "backend_x64/a32_jitstate.h"
|
||||||
#include "common/assert.h"
|
#include "common/assert.h"
|
||||||
#include "common/common_types.h"
|
#include "common/common_types.h"
|
||||||
|
|
||||||
|
@ -92,7 +92,7 @@ const HostLocList any_xmm = {
|
||||||
|
|
||||||
Xbyak::Reg64 HostLocToReg64(HostLoc loc);
|
Xbyak::Reg64 HostLocToReg64(HostLoc loc);
|
||||||
Xbyak::Xmm HostLocToXmm(HostLoc loc);
|
Xbyak::Xmm HostLocToXmm(HostLoc loc);
|
||||||
Xbyak::Address SpillToOpArg(HostLoc loc);
|
Xbyak::Address SpillToOpArg(HostLoc loc); ///< TODO: Remove from this file
|
||||||
|
|
||||||
} // namespace BackendX64
|
} // namespace BackendX64
|
||||||
} // namespace Dynarmic
|
} // namespace Dynarmic
|
||||||
|
|
|
@ -15,8 +15,8 @@
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#include "backend_x64/a32_emit_x64.h"
|
#include "backend_x64/a32_emit_x64.h"
|
||||||
|
#include "backend_x64/a32_jitstate.h"
|
||||||
#include "backend_x64/block_of_code.h"
|
#include "backend_x64/block_of_code.h"
|
||||||
#include "backend_x64/jitstate.h"
|
|
||||||
#include "common/assert.h"
|
#include "common/assert.h"
|
||||||
#include "common/common_types.h"
|
#include "common/common_types.h"
|
||||||
#include "common/scope_exit.h"
|
#include "common/scope_exit.h"
|
||||||
|
@ -41,7 +41,7 @@ struct Jit::Impl {
|
||||||
{}
|
{}
|
||||||
|
|
||||||
BlockOfCode block_of_code;
|
BlockOfCode block_of_code;
|
||||||
JitState jit_state;
|
A32JitState jit_state;
|
||||||
A32EmitX64 emitter;
|
A32EmitX64 emitter;
|
||||||
const UserCallbacks callbacks;
|
const UserCallbacks callbacks;
|
||||||
|
|
||||||
|
@ -128,7 +128,7 @@ private:
|
||||||
|
|
||||||
static CodePtr GetCurrentBlock(void *this_voidptr) {
|
static CodePtr GetCurrentBlock(void *this_voidptr) {
|
||||||
Jit::Impl& this_ = *reinterpret_cast<Jit::Impl*>(this_voidptr);
|
Jit::Impl& this_ = *reinterpret_cast<Jit::Impl*>(this_voidptr);
|
||||||
JitState& jit_state = this_.jit_state;
|
A32JitState& jit_state = this_.jit_state;
|
||||||
|
|
||||||
u32 pc = jit_state.Reg[15];
|
u32 pc = jit_state.Reg[15];
|
||||||
A32::PSR cpsr{jit_state.Cpsr()};
|
A32::PSR cpsr{jit_state.Cpsr()};
|
||||||
|
@ -232,7 +232,7 @@ Context Jit::SaveContext() const {
|
||||||
}
|
}
|
||||||
|
|
||||||
struct Context::Impl {
|
struct Context::Impl {
|
||||||
JitState jit_state;
|
A32JitState jit_state;
|
||||||
size_t invalid_cache_generation;
|
size_t invalid_cache_generation;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -278,7 +278,7 @@ void Context::SetFpscr(std::uint32_t value) {
|
||||||
return impl->jit_state.SetFpscr(value);
|
return impl->jit_state.SetFpscr(value);
|
||||||
}
|
}
|
||||||
|
|
||||||
void TransferJitState(JitState& dest, const JitState& src, bool reset_rsb) {
|
void TransferJitState(A32JitState& dest, const A32JitState& src, bool reset_rsb) {
|
||||||
dest.CPSR_ge = src.CPSR_ge;
|
dest.CPSR_ge = src.CPSR_ge;
|
||||||
dest.CPSR_et = src.CPSR_et;
|
dest.CPSR_et = src.CPSR_et;
|
||||||
dest.CPSR_q = src.CPSR_q;
|
dest.CPSR_q = src.CPSR_q;
|
||||||
|
|
|
@ -10,7 +10,6 @@
|
||||||
#include <xbyak.h>
|
#include <xbyak.h>
|
||||||
|
|
||||||
#include "backend_x64/abi.h"
|
#include "backend_x64/abi.h"
|
||||||
#include "backend_x64/jitstate.h"
|
|
||||||
#include "backend_x64/reg_alloc.h"
|
#include "backend_x64/reg_alloc.h"
|
||||||
#include "common/assert.h"
|
#include "common/assert.h"
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue