dynarmic/src/backend_x64/interface_x64.cpp

216 lines
6 KiB
C++
Raw Normal View History

/* This file is part of the dynarmic project.
* Copyright (c) 2016 MerryMage
* This software may be used and distributed according to the terms of the GNU
* General Public License version 2 or any later version.
*/
#include <memory>
2017-02-16 18:18:29 +00:00
#include <queue>
2016-08-25 23:41:31 +01:00
#include <fmt/format.h>
#ifdef DYNARMIC_USE_LLVM
#include <llvm-c/Disassembler.h>
#include <llvm-c/Target.h>
#endif
#include "backend_x64/block_of_code.h"
#include "backend_x64/emit_x64.h"
#include "backend_x64/jitstate.h"
#include "common/assert.h"
#include "common/common_types.h"
#include "common/scope_exit.h"
2016-08-25 18:22:08 +01:00
#include "dynarmic/dynarmic.h"
#include "frontend/ir/basic_block.h"
#include "frontend/ir/location_descriptor.h"
#include "frontend/translate/translate.h"
#include "ir_opt/passes.h"
namespace Dynarmic {
using namespace BackendX64;
struct Jit::Impl {
Impl(Jit* jit, UserCallbacks callbacks)
: block_of_code(callbacks)
, jit_state()
, emitter(&block_of_code, callbacks, jit)
, callbacks(callbacks)
2017-02-16 18:18:29 +00:00
, jit_interface(jit)
{}
BlockOfCode block_of_code;
JitState jit_state;
EmitX64 emitter;
const UserCallbacks callbacks;
2017-02-16 18:18:29 +00:00
// Requests made during execution to invalidate the cache are queued up here.
std::queue<Common::AddressRange> invalid_cache_ranges;
size_t Execute(size_t cycle_count) {
u32 pc = jit_state.Reg[15];
IR::LocationDescriptor descriptor{pc, Arm::PSR{jit_state.Cpsr}, Arm::FPSCR{jit_state.FPSCR_mode}};
2017-02-16 18:18:29 +00:00
CodePtr entrypoint = GetBasicBlock(descriptor).entrypoint;
return block_of_code.RunCode(&jit_state, entrypoint, cycle_count);
}
std::string Disassemble(const IR::LocationDescriptor& descriptor) {
auto block = GetBasicBlock(descriptor);
2017-02-16 18:18:29 +00:00
std::string result = fmt::format("address: {}\nsize: {} bytes\n", block.entrypoint, block.size);
#ifdef DYNARMIC_USE_LLVM
LLVMInitializeX86TargetInfo();
LLVMInitializeX86TargetMC();
LLVMInitializeX86Disassembler();
LLVMDisasmContextRef llvm_ctx = LLVMCreateDisasm("x86_64", nullptr, 0, nullptr, nullptr);
LLVMSetDisasmOptions(llvm_ctx, LLVMDisassembler_Option_AsmPrinterVariant);
2017-02-16 18:18:29 +00:00
const u8* pos = static_cast<const u8*>(block.entrypoint);
const u8* end = pos + block.size;
size_t remaining = block.size;
while (pos < end) {
char buffer[80];
size_t inst_size = LLVMDisasmInstruction(llvm_ctx, const_cast<u8*>(pos), remaining, (u64)pos, buffer, sizeof(buffer));
2016-08-19 01:53:24 +01:00
ASSERT(inst_size);
for (const u8* i = pos; i < pos + inst_size; i++)
2016-08-25 23:41:31 +01:00
result += fmt::format("{:02x} ", *i);
for (size_t i = inst_size; i < 10; i++)
2016-08-25 23:41:31 +01:00
result += " ";
result += buffer;
result += '\n';
pos += inst_size;
remaining -= inst_size;
}
LLVMDisasmDispose(llvm_ctx);
#else
result.append("(recompile with DYNARMIC_USE_LLVM=ON to disassemble the generated x86_64 code)\n");
#endif
return result;
}
2017-02-16 18:18:29 +00:00
void PerformCacheInvalidation() {
if (invalid_cache_ranges.empty()) {
return;
}
jit_state.ResetRSB();
2017-02-16 18:18:29 +00:00
block_of_code.ClearCache();
while (!invalid_cache_ranges.empty()) {
emitter.InvalidateCacheRange(invalid_cache_ranges.front());
invalid_cache_ranges.pop();
}
}
void HandleNewCacheRange() {
if (jit_interface->is_executing) {
jit_state.halt_requested = true;
return;
}
PerformCacheInvalidation();
}
private:
2017-02-16 18:18:29 +00:00
Jit* jit_interface;
EmitX64::BlockDescriptor GetBasicBlock(IR::LocationDescriptor descriptor) {
auto block = emitter.GetBasicBlock(descriptor);
if (block)
2016-08-12 18:17:31 +01:00
return *block;
IR::Block ir_block = Arm::Translate(descriptor, callbacks.memory.ReadCode);
Optimization::GetSetElimination(ir_block);
Optimization::DeadCodeElimination(ir_block);
Optimization::ConstantPropagation(ir_block, callbacks.memory);
Optimization::DeadCodeElimination(ir_block);
Optimization::VerificationPass(ir_block);
return emitter.Emit(ir_block);
}
};
2016-07-12 14:31:43 +01:00
Jit::Jit(UserCallbacks callbacks) : impl(std::make_unique<Impl>(this, callbacks)) {}
Jit::~Jit() {}
size_t Jit::Run(size_t cycle_count) {
ASSERT(!is_executing);
is_executing = true;
SCOPE_EXIT({ this->is_executing = false; });
2016-08-15 15:02:08 +01:00
impl->jit_state.halt_requested = false;
size_t cycles_executed = 0;
2016-08-15 15:02:08 +01:00
while (cycles_executed < cycle_count && !impl->jit_state.halt_requested) {
cycles_executed += impl->Execute(cycle_count - cycles_executed);
}
2017-02-16 18:18:29 +00:00
impl->PerformCacheInvalidation();
return cycles_executed;
}
void Jit::ClearCache() {
2017-02-16 18:18:29 +00:00
impl->invalid_cache_ranges.push(Common::FullAddressRange{});
impl->HandleNewCacheRange();
}
2017-02-16 18:18:29 +00:00
void Jit::InvalidateCacheRange(std::uint32_t start_address, std::size_t length) {
impl->invalid_cache_ranges.push(Common::AddressInterval{start_address, length});
impl->HandleNewCacheRange();
}
void Jit::Reset() {
ASSERT(!is_executing);
impl->jit_state = {};
}
void Jit::HaltExecution() {
ASSERT(is_executing);
2016-08-15 15:02:08 +01:00
impl->jit_state.halt_requested = true;
// TODO: Uh do other stuff to JitState pls.
}
std::array<u32, 16>& Jit::Regs() {
return impl->jit_state.Reg;
}
const std::array<u32, 16>& Jit::Regs() const {
return impl->jit_state.Reg;
}
std::array<u32, 64>& Jit::ExtRegs() {
return impl->jit_state.ExtReg;
}
const std::array<u32, 64>& Jit::ExtRegs() const {
return impl->jit_state.ExtReg;
}
u32& Jit::Cpsr() {
return impl->jit_state.Cpsr;
}
u32 Jit::Cpsr() const {
return impl->jit_state.Cpsr;
}
u32 Jit::Fpscr() const {
return impl->jit_state.Fpscr();
}
void Jit::SetFpscr(u32 value) const {
return impl->jit_state.SetFpscr(value);
}
std::string Jit::Disassemble(const IR::LocationDescriptor& descriptor) {
return impl->Disassemble(descriptor);
}
} // namespace Dynarmic