A64/config.h: Split fastmem and page_table options.
We might want to allocate different sizes for each of them. e.g. for the unsafe fastmem approach without bounds checking. Or for using the full 48bit adress range (with mirrors) by allocating our real arena as close to 1<<47 as possible.
This commit is contained in:
parent
828959caed
commit
0c12614d1a
3 changed files with 20 additions and 10 deletions
|
@ -881,11 +881,11 @@ Xbyak::RegExp EmitVAddrLookup(BlockOfCode& code, A64EmitContext& ctx, size_t bit
|
|||
}
|
||||
|
||||
Xbyak::RegExp EmitFastmemVAddr(BlockOfCode& code, A64EmitContext& ctx, Xbyak::Label& abort, Xbyak::Reg64 vaddr, bool& require_abort_handling) {
|
||||
const size_t unused_top_bits = 64 - ctx.conf.page_table_address_space_bits;
|
||||
const size_t unused_top_bits = 64 - ctx.conf.fastmem_address_space_bits;
|
||||
|
||||
if (unused_top_bits == 0) {
|
||||
return r13 + vaddr;
|
||||
} else if (ctx.conf.silently_mirror_page_table) {
|
||||
} else if (ctx.conf.silently_mirror_fastmem) {
|
||||
Xbyak::Reg64 tmp = ctx.reg_alloc.ScratchGpr();
|
||||
if (unused_top_bits < 32) {
|
||||
code.mov(tmp, vaddr);
|
||||
|
@ -895,19 +895,19 @@ Xbyak::RegExp EmitFastmemVAddr(BlockOfCode& code, A64EmitContext& ctx, Xbyak::La
|
|||
code.mov(tmp.cvt32(), vaddr.cvt32());
|
||||
} else {
|
||||
code.mov(tmp.cvt32(), vaddr.cvt32());
|
||||
code.and_(tmp, u32((1 << ctx.conf.page_table_address_space_bits) - 1));
|
||||
code.and_(tmp, u32((1 << ctx.conf.fastmem_address_space_bits) - 1));
|
||||
}
|
||||
return r13 + tmp;
|
||||
} else {
|
||||
if (ctx.conf.page_table_address_space_bits < 32) {
|
||||
code.test(vaddr, u32(-(1 << ctx.conf.page_table_address_space_bits)));
|
||||
if (ctx.conf.fastmem_address_space_bits < 32) {
|
||||
code.test(vaddr, u32(-(1 << ctx.conf.fastmem_address_space_bits)));
|
||||
code.jnz(abort, code.T_NEAR);
|
||||
require_abort_handling = true;
|
||||
} else {
|
||||
// TODO: Consider having TEST as above but coalesce 64-bit constant in register allocator
|
||||
Xbyak::Reg64 tmp = ctx.reg_alloc.ScratchGpr();
|
||||
code.mov(tmp, vaddr);
|
||||
code.shr(tmp, int(ctx.conf.page_table_address_space_bits));
|
||||
code.shr(tmp, int(ctx.conf.fastmem_address_space_bits));
|
||||
code.jnz(abort, code.T_NEAR);
|
||||
require_abort_handling = true;
|
||||
}
|
||||
|
|
|
@ -202,7 +202,7 @@ struct UserConfig {
|
|||
void** page_table = nullptr;
|
||||
/// Declares how many valid address bits are there in virtual addresses.
|
||||
/// Determines the size of page_table. Valid values are between 12 and 64 inclusive.
|
||||
/// This is only used if page_table or fastmem_pointer is not nullptr.
|
||||
/// This is only used if page_table is not nullptr.
|
||||
size_t page_table_address_space_bits = 36;
|
||||
/// Masks out the first N bits in host pointers from the page table.
|
||||
/// The intention behind this is to allow users of Dynarmic to pack attributes in the
|
||||
|
@ -213,7 +213,7 @@ struct UserConfig {
|
|||
/// page table. If true, Dynarmic will silently mirror page_table's address space. If
|
||||
/// false, accessing memory outside of page_table bounds will result in a call to the
|
||||
/// relevant memory callback.
|
||||
/// This is only used if page_table or fastmem_pointer is not nullptr.
|
||||
/// This is only used if page_table is not nullptr.
|
||||
bool silently_mirror_page_table = true;
|
||||
/// Determines if the pointer in the page_table shall be offseted locally or globally.
|
||||
/// 'false' will access page_table[addr >> bits][addr & mask]
|
||||
|
@ -243,6 +243,16 @@ struct UserConfig {
|
|||
/// Recompiled code will use the page_table if this is available, otherwise memory
|
||||
/// accesses will hit the memory callbacks.
|
||||
bool recompile_on_fastmem_failure = true;
|
||||
/// Declares how many valid address bits are there in virtual addresses.
|
||||
/// Determines the size of fastmem arena. Valid values are between 12 and 64 inclusive.
|
||||
/// This is only used if fastmem_pointer is not nullptr.
|
||||
size_t fastmem_address_space_bits = 36;
|
||||
/// Determines what happens if the guest accesses an entry that is off the end of the
|
||||
/// fastmem arena. If true, Dynarmic will silently mirror fastmem's address space. If
|
||||
/// false, accessing memory outside of fastmem bounds will result in a call to the
|
||||
/// relevant memory callback.
|
||||
/// This is only used if fastmem_pointer is not nullptr.
|
||||
bool silently_mirror_fastmem = true;
|
||||
|
||||
/// This option relates to translation. Generally when we run into an unpredictable
|
||||
/// instruction the ExceptionRaised callback is called. If this is true, we define
|
||||
|
|
|
@ -852,9 +852,9 @@ TEST_CASE("A64: Memory access (fastmem)", "[a64]") {
|
|||
A64FastmemTestEnv env{backing_memory};
|
||||
Dynarmic::A64::UserConfig config{&env};
|
||||
config.fastmem_pointer = backing_memory;
|
||||
config.page_table_address_space_bits = address_width;
|
||||
config.fastmem_address_space_bits = address_width;
|
||||
config.recompile_on_fastmem_failure = false;
|
||||
config.silently_mirror_page_table = true;
|
||||
config.silently_mirror_fastmem = true;
|
||||
config.processor_id = 0;
|
||||
|
||||
Dynarmic::A64::Jit jit{config};
|
||||
|
|
Loading…
Reference in a new issue