diff --git a/include/dynarmic/A32/config.h b/include/dynarmic/A32/config.h index a0091841..d7af62c6 100644 --- a/include/dynarmic/A32/config.h +++ b/include/dynarmic/A32/config.h @@ -137,6 +137,11 @@ struct UserConfig { /// So there might be wrongly faulted pages which maps to nullptr. /// This can be avoided by carefully allocating the memory region. bool absolute_offset_page_table = false; + /// Masks out the first N bits in host pointers from the page table. + /// The intention behind this is to allow users of Dynarmic to pack attributes in the + /// same integer and update the pointer attribute pair atomically. + /// If the configured value is 3, all pointers will be forcefully aligned to 8 bytes. + int page_table_pointer_mask_bits = 0; /// Determines if we should detect memory accesses via page_table that straddle are /// misaligned. Accesses that straddle page boundaries will fallback to the relevant /// memory callback. diff --git a/include/dynarmic/A64/config.h b/include/dynarmic/A64/config.h index 6c607717..e6f48ef6 100644 --- a/include/dynarmic/A64/config.h +++ b/include/dynarmic/A64/config.h @@ -188,6 +188,11 @@ struct UserConfig { /// Determines the size of page_table. Valid values are between 12 and 64 inclusive. /// This is only used if page_table is not nullptr. size_t page_table_address_space_bits = 36; + /// Masks out the first N bits in host pointers from the page table. + /// The intention behind this is to allow users of Dynarmic to pack attributes in the + /// same integer and update the pointer attribute pair atomically. + /// If the configured value is 3, all pointers will be forcefully aligned to 8 bytes. + int page_table_pointer_mask_bits = 0; /// Determines what happens if the guest accesses an entry that is off the end of the /// page table. If true, Dynarmic will silently mirror page_table's address space. If /// false, accessing memory outside of page_table bounds will result in a call to the diff --git a/src/backend/x64/a32_emit_x64.cpp b/src/backend/x64/a32_emit_x64.cpp index a532ccb1..099dfdd0 100644 --- a/src/backend/x64/a32_emit_x64.cpp +++ b/src/backend/x64/a32_emit_x64.cpp @@ -935,7 +935,11 @@ Xbyak::RegExp EmitVAddrLookup(BlockOfCode& code, A32EmitContext& ctx, size_t bit code.mov(tmp, vaddr.cvt32()); code.shr(tmp, static_cast(page_bits)); code.mov(page, qword[r14 + tmp.cvt64() * sizeof(void*)]); - code.test(page, page); + if (ctx.conf.page_table_pointer_mask_bits == 0) { + code.test(page, page); + } else { + code.and_(page, ~u32(0) << ctx.conf.page_table_pointer_mask_bits); + } code.jz(abort, code.T_NEAR); if (ctx.conf.absolute_offset_page_table) { return page + vaddr; diff --git a/src/backend/x64/a64_emit_x64.cpp b/src/backend/x64/a64_emit_x64.cpp index ade6711b..c8be6fe1 100644 --- a/src/backend/x64/a64_emit_x64.cpp +++ b/src/backend/x64/a64_emit_x64.cpp @@ -815,7 +815,11 @@ Xbyak::RegExp EmitVAddrLookup(BlockOfCode& code, A64EmitContext& ctx, size_t bit code.jnz(abort, code.T_NEAR); } code.mov(page, qword[r14 + tmp * sizeof(void*)]); - code.test(page, page); + if (ctx.conf.page_table_pointer_mask_bits == 0) { + code.test(page, page); + } else { + code.and_(page, ~u32(0) << ctx.conf.page_table_pointer_mask_bits); + } code.jz(abort, code.T_NEAR); if (ctx.conf.absolute_offset_page_table) { return page + vaddr;