backend/{a32,a64}_emit_x64: Add config entry to mask page table pointers

Add config entry to mask out the lower bits in page table pointers.
This is intended to allow users of Dynarmic to pack small integers
inside pointers and update the pair atomically without locks.
These lower bits can be masked out due to the expected alignment in
pointers inside the page table.

For the given usage, using AND on the pointer acts the same way as a
TEST instruction. That said when the mask value is zero, TEST is still
emitted to keep the same behavior.
This commit is contained in:
ReinUsesLisp 2020-12-28 02:18:31 -03:00 committed by merry
parent 42059edca4
commit 4a9a0d07f7
4 changed files with 20 additions and 2 deletions

View file

@ -137,6 +137,11 @@ struct UserConfig {
/// So there might be wrongly faulted pages which maps to nullptr.
/// This can be avoided by carefully allocating the memory region.
bool absolute_offset_page_table = false;
/// Masks out the first N bits in host pointers from the page table.
/// The intention behind this is to allow users of Dynarmic to pack attributes in the
/// same integer and update the pointer attribute pair atomically.
/// If the configured value is 3, all pointers will be forcefully aligned to 8 bytes.
int page_table_pointer_mask_bits = 0;
/// Determines if we should detect memory accesses via page_table that straddle are
/// misaligned. Accesses that straddle page boundaries will fallback to the relevant
/// memory callback.

View file

@ -188,6 +188,11 @@ struct UserConfig {
/// Determines the size of page_table. Valid values are between 12 and 64 inclusive.
/// This is only used if page_table is not nullptr.
size_t page_table_address_space_bits = 36;
/// Masks out the first N bits in host pointers from the page table.
/// The intention behind this is to allow users of Dynarmic to pack attributes in the
/// same integer and update the pointer attribute pair atomically.
/// If the configured value is 3, all pointers will be forcefully aligned to 8 bytes.
int page_table_pointer_mask_bits = 0;
/// Determines what happens if the guest accesses an entry that is off the end of the
/// page table. If true, Dynarmic will silently mirror page_table's address space. If
/// false, accessing memory outside of page_table bounds will result in a call to the

View file

@ -935,7 +935,11 @@ Xbyak::RegExp EmitVAddrLookup(BlockOfCode& code, A32EmitContext& ctx, size_t bit
code.mov(tmp, vaddr.cvt32());
code.shr(tmp, static_cast<int>(page_bits));
code.mov(page, qword[r14 + tmp.cvt64() * sizeof(void*)]);
code.test(page, page);
if (ctx.conf.page_table_pointer_mask_bits == 0) {
code.test(page, page);
} else {
code.and_(page, ~u32(0) << ctx.conf.page_table_pointer_mask_bits);
}
code.jz(abort, code.T_NEAR);
if (ctx.conf.absolute_offset_page_table) {
return page + vaddr;

View file

@ -815,7 +815,11 @@ Xbyak::RegExp EmitVAddrLookup(BlockOfCode& code, A64EmitContext& ctx, size_t bit
code.jnz(abort, code.T_NEAR);
}
code.mov(page, qword[r14 + tmp * sizeof(void*)]);
code.test(page, page);
if (ctx.conf.page_table_pointer_mask_bits == 0) {
code.test(page, page);
} else {
code.and_(page, ~u32(0) << ctx.conf.page_table_pointer_mask_bits);
}
code.jz(abort, code.T_NEAR);
if (ctx.conf.absolute_offset_page_table) {
return page + vaddr;