A64: Implement ISB

Given we want to ensure that all instructions are fetched again, we can
treat an ISB instruction as a code cache flush.
This commit is contained in:
Lioncash 2018-08-17 20:20:42 -04:00 committed by MerryMage
parent be53e356a2
commit f3f60cd179
9 changed files with 45 additions and 20 deletions

View file

@ -60,8 +60,8 @@ bool A64EmitContext::AccurateNaN() const {
return conf.floating_point_nan_accuracy == A64::UserConfig::NaNAccuracy::Accurate; return conf.floating_point_nan_accuracy == A64::UserConfig::NaNAccuracy::Accurate;
} }
A64EmitX64::A64EmitX64(BlockOfCode& code, A64::UserConfig conf) A64EmitX64::A64EmitX64(BlockOfCode& code, A64::UserConfig conf, A64::Jit* jit_interface)
: EmitX64(code), conf(conf) : EmitX64(code), conf(conf), jit_interface{jit_interface}
{ {
GenMemory128Accessors(); GenMemory128Accessors();
GenFastmemFallbacks(); GenFastmemFallbacks();
@ -538,6 +538,15 @@ void A64EmitX64::EmitA64DataMemoryBarrier(A64EmitContext&, IR::Inst*) {
code.lfence(); code.lfence();
} }
void A64EmitX64::EmitA64InstructionSynchronizationBarrier(A64EmitContext& ctx, IR::Inst* ) {
ctx.reg_alloc.HostCall(nullptr);
code.mov(code.ABI_PARAM1, reinterpret_cast<u64>(jit_interface));
code.CallFunction(static_cast<void(*)(A64::Jit*)>([](A64::Jit* jit) {
jit->ClearCache();
}));
}
void A64EmitX64::EmitA64GetCNTFRQ(A64EmitContext& ctx, IR::Inst* inst) { void A64EmitX64::EmitA64GetCNTFRQ(A64EmitContext& ctx, IR::Inst* inst) {
Xbyak::Reg32 result = ctx.reg_alloc.ScratchGpr().cvt32(); Xbyak::Reg32 result = ctx.reg_alloc.ScratchGpr().cvt32();
code.mov(result, conf.cntfrq_el0); code.mov(result, conf.cntfrq_el0);

View file

@ -12,6 +12,7 @@
#include "backend_x64/a64_jitstate.h" #include "backend_x64/a64_jitstate.h"
#include "backend_x64/block_range_information.h" #include "backend_x64/block_range_information.h"
#include "backend_x64/emit_x64.h" #include "backend_x64/emit_x64.h"
#include "dynarmic/A64/a64.h"
#include "dynarmic/A64/config.h" #include "dynarmic/A64/config.h"
#include "frontend/A64/location_descriptor.h" #include "frontend/A64/location_descriptor.h"
#include "frontend/ir/terminal.h" #include "frontend/ir/terminal.h"
@ -34,7 +35,7 @@ struct A64EmitContext final : public EmitContext {
class A64EmitX64 final : public EmitX64 { class A64EmitX64 final : public EmitX64 {
public: public:
A64EmitX64(BlockOfCode& code, A64::UserConfig conf); A64EmitX64(BlockOfCode& code, A64::UserConfig conf, A64::Jit* jit_interface);
~A64EmitX64() override; ~A64EmitX64() override;
/** /**
@ -49,6 +50,7 @@ public:
protected: protected:
const A64::UserConfig conf; const A64::UserConfig conf;
A64::Jit* jit_interface;
BlockRangeInformation<u64> block_ranges; BlockRangeInformation<u64> block_ranges;
void (*memory_read_128)(); void (*memory_read_128)();

View file

@ -36,10 +36,10 @@ static RunCodeCallbacks GenRunCodeCallbacks(A64::UserCallbacks* cb, CodePtr (*Lo
struct Jit::Impl final { struct Jit::Impl final {
public: public:
explicit Impl(UserConfig conf) Impl(Jit* jit, UserConfig conf)
: conf(conf) : conf(conf)
, block_of_code(GenRunCodeCallbacks(conf.callbacks, &GetCurrentBlockThunk, this), JitStateInfo{jit_state}) , block_of_code(GenRunCodeCallbacks(conf.callbacks, &GetCurrentBlockThunk, this), JitStateInfo{jit_state})
, emitter(block_of_code, conf) , emitter(block_of_code, conf, jit)
{ {
ASSERT(conf.page_table_address_space_bits >= 12 && conf.page_table_address_space_bits <= 64); ASSERT(conf.page_table_address_space_bits >= 12 && conf.page_table_address_space_bits <= 64);
} }
@ -247,7 +247,7 @@ private:
}; };
Jit::Jit(UserConfig conf) Jit::Jit(UserConfig conf)
: impl(std::make_unique<Jit::Impl>(conf)) {} : impl(std::make_unique<Jit::Impl>(this, conf)) {}
Jit::~Jit() = default; Jit::~Jit() = default;

View file

@ -64,7 +64,7 @@ INST(SEVL, "SEVL", "11010
INST(CLREX, "CLREX", "11010101000000110011MMMM01011111") INST(CLREX, "CLREX", "11010101000000110011MMMM01011111")
INST(DSB, "DSB", "11010101000000110011MMMM10011111") INST(DSB, "DSB", "11010101000000110011MMMM10011111")
INST(DMB, "DMB", "11010101000000110011MMMM10111111") INST(DMB, "DMB", "11010101000000110011MMMM10111111")
//INST(ISB, "ISB", "11010101000000110011MMMM11011111") INST(ISB, "ISB", "11010101000000110011MMMM11011111")
//INST(SYS, "SYS", "1101010100001oooNNNNMMMMooottttt") //INST(SYS, "SYS", "1101010100001oooNNNNMMMMooottttt")
INST(MSR_reg, "MSR (register)", "110101010001poooNNNNMMMMooottttt") INST(MSR_reg, "MSR (register)", "110101010001poooNNNNMMMMooottttt")
//INST(SYSL, "SYSL", "1101010100101oooNNNNMMMMooottttt") //INST(SYSL, "SYSL", "1101010100101oooNNNNMMMMooottttt")

View file

@ -57,6 +57,10 @@ void IREmitter::DataMemoryBarrier() {
Inst(Opcode::A64DataMemoryBarrier); Inst(Opcode::A64DataMemoryBarrier);
} }
void IREmitter::InstructionSynchronizationBarrier() {
Inst(Opcode::A64InstructionSynchronizationBarrier);
}
IR::U32 IREmitter::GetCNTFRQ() { IR::U32 IREmitter::GetCNTFRQ() {
return Inst<IR::U32>(Opcode::A64GetCNTFRQ); return Inst<IR::U32>(Opcode::A64GetCNTFRQ);
} }

View file

@ -45,6 +45,7 @@ public:
void DataCacheOperationRaised(DataCacheOperation op, const IR::U64& value); void DataCacheOperationRaised(DataCacheOperation op, const IR::U64& value);
void DataSynchronizationBarrier(); void DataSynchronizationBarrier();
void DataMemoryBarrier(); void DataMemoryBarrier();
void InstructionSynchronizationBarrier();
IR::U32 GetCNTFRQ(); IR::U32 GetCNTFRQ();
IR::U64 GetCNTPCT(); // TODO: Ensure sub-basic-block cycle counts are updated before this. IR::U64 GetCNTPCT(); // TODO: Ensure sub-basic-block cycle counts are updated before this.
IR::U32 GetCTR(); IR::U32 GetCTR();

View file

@ -71,6 +71,13 @@ bool TranslatorVisitor::DMB(Imm<4> /*CRm*/) {
return true; return true;
} }
bool TranslatorVisitor::ISB(Imm<4> /*CRm*/) {
ir.InstructionSynchronizationBarrier();
ir.SetPC(ir.Imm64(ir.current_location->PC() + 4));
ir.SetTerm(IR::Term::ReturnToDispatch{});
return false;
}
bool TranslatorVisitor::MSR_reg(Imm<1> o0, Imm<3> op1, Imm<4> CRn, Imm<4> CRm, Imm<3> op2, Reg Rt) { bool TranslatorVisitor::MSR_reg(Imm<1> o0, Imm<3> op1, Imm<4> CRn, Imm<4> CRm, Imm<3> op2, Reg Rt) {
const auto sys_reg = concatenate(Imm<1>{1}, o0, op1, CRn, CRm, op2).ZeroExtend<SystemRegisterEncoding>(); const auto sys_reg = concatenate(Imm<1>{1}, o0, op1, CRn, CRm, op2).ZeroExtend<SystemRegisterEncoding>();
switch (sys_reg) { switch (sys_reg) {

View file

@ -395,19 +395,20 @@ bool Inst::IsCoprocessorInstruction() const {
} }
bool Inst::MayHaveSideEffects() const { bool Inst::MayHaveSideEffects() const {
return op == Opcode::PushRSB || return op == Opcode::PushRSB ||
op == Opcode::A64SetCheckBit || op == Opcode::A64SetCheckBit ||
op == Opcode::A64DataCacheOperationRaised || op == Opcode::A64DataCacheOperationRaised ||
op == Opcode::A64DataSynchronizationBarrier || op == Opcode::A64DataSynchronizationBarrier ||
op == Opcode::A64DataMemoryBarrier || op == Opcode::A64DataMemoryBarrier ||
CausesCPUException() || op == Opcode::A64InstructionSynchronizationBarrier ||
WritesToCoreRegister() || CausesCPUException() ||
WritesToSystemRegister() || WritesToCoreRegister() ||
WritesToCPSR() || WritesToSystemRegister() ||
WritesToFPCR() || WritesToCPSR() ||
WritesToFPSR() || WritesToFPCR() ||
AltersExclusiveState() || WritesToFPSR() ||
IsMemoryWrite() || AltersExclusiveState() ||
IsMemoryWrite() ||
IsCoprocessorInstruction(); IsCoprocessorInstruction();
} }

View file

@ -66,6 +66,7 @@ A64OPC(ExceptionRaised, T::Void, T::U64,
A64OPC(DataCacheOperationRaised, T::Void, T::U64, T::U64 ) A64OPC(DataCacheOperationRaised, T::Void, T::U64, T::U64 )
A64OPC(DataSynchronizationBarrier, T::Void, ) A64OPC(DataSynchronizationBarrier, T::Void, )
A64OPC(DataMemoryBarrier, T::Void, ) A64OPC(DataMemoryBarrier, T::Void, )
A64OPC(InstructionSynchronizationBarrier, T::Void, )
A64OPC(GetCNTFRQ, T::U32, ) A64OPC(GetCNTFRQ, T::U32, )
A64OPC(GetCNTPCT, T::U64, ) A64OPC(GetCNTPCT, T::U64, )
A64OPC(GetCTR, T::U32, ) A64OPC(GetCTR, T::U32, )