diff --git a/src/backend_x64/a64_emit_x64.cpp b/src/backend_x64/a64_emit_x64.cpp index e6b4241d..3b7275d4 100644 --- a/src/backend_x64/a64_emit_x64.cpp +++ b/src/backend_x64/a64_emit_x64.cpp @@ -492,10 +492,10 @@ void A64EmitX64::EmitA64ClearExclusive(A64EmitContext&, IR::Inst*) { void A64EmitX64::EmitA64SetExclusive(A64EmitContext& ctx, IR::Inst* inst) { auto args = ctx.reg_alloc.GetArgumentInfo(inst); ASSERT(args[1].IsImmediate()); - Xbyak::Reg32 address = ctx.reg_alloc.UseGpr(args[0]).cvt32(); + Xbyak::Reg64 address = ctx.reg_alloc.UseGpr(args[0]); code.mov(code.byte[r15 + offsetof(A64JitState, exclusive_state)], u8(1)); - code.mov(dword[r15 + offsetof(A64JitState, exclusive_address)], address); + code.mov(qword[r15 + offsetof(A64JitState, exclusive_address)], address); } static Xbyak::RegExp EmitVAddrLookup(const A64::UserConfig& conf, BlockOfCode& code, A64EmitContext& ctx, Xbyak::Label& abort, Xbyak::Reg64 vaddr, boost::optional arg_scratch = {}) { @@ -743,14 +743,14 @@ void A64EmitX64::EmitA64WriteMemory128(A64EmitContext& ctx, IR::Inst* inst) { void A64EmitX64::EmitExclusiveWrite(A64EmitContext& ctx, IR::Inst* inst, size_t bitsize, Xbyak::Reg64 vaddr, int value_idx) { Xbyak::Label end; Xbyak::Reg32 passed = ctx.reg_alloc.ScratchGpr().cvt32(); - Xbyak::Reg32 tmp = ctx.reg_alloc.ScratchGpr().cvt32(); + Xbyak::Reg64 tmp = ctx.reg_alloc.ScratchGpr(); code.mov(passed, u32(1)); code.cmp(code.byte[r15 + offsetof(A64JitState, exclusive_state)], u8(0)); code.je(end); code.mov(tmp, vaddr); - code.xor_(tmp, dword[r15 + offsetof(A64JitState, exclusive_address)]); - code.test(tmp, A64JitState::RESERVATION_GRANULE_MASK); + code.xor_(tmp, qword[r15 + offsetof(A64JitState, exclusive_address)]); + code.test(tmp, static_cast(A64JitState::RESERVATION_GRANULE_MASK & 0xFFFF'FFFF)); code.jne(end); code.mov(code.byte[r15 + offsetof(A64JitState, exclusive_state)], u8(0)); code.call(write_fallbacks[std::make_tuple(bitsize, vaddr.getIdx(), value_idx)]); diff --git a/src/backend_x64/a64_jitstate.h b/src/backend_x64/a64_jitstate.h index 84b79e19..8f08a0bd 100644 --- a/src/backend_x64/a64_jitstate.h +++ b/src/backend_x64/a64_jitstate.h @@ -57,9 +57,9 @@ struct A64JitState { bool check_bit = false; // Exclusive state - static constexpr u32 RESERVATION_GRANULE_MASK = 0xFFFFFFF8; - u32 exclusive_state = 0; - u32 exclusive_address = 0; + static constexpr u64 RESERVATION_GRANULE_MASK = 0xFFFF'FFFF'FFFF'FFF0ull; + u8 exclusive_state = 0; + u64 exclusive_address = 0; static constexpr size_t RSBSize = 8; // MUST be a power of 2. static constexpr size_t RSBPtrMask = RSBSize - 1; diff --git a/src/frontend/A64/translate/impl/load_store_exclusive.cpp b/src/frontend/A64/translate/impl/load_store_exclusive.cpp index 78316fac..bab0fb37 100644 --- a/src/frontend/A64/translate/impl/load_store_exclusive.cpp +++ b/src/frontend/A64/translate/impl/load_store_exclusive.cpp @@ -47,7 +47,7 @@ static bool ExclusiveSharedDecodeAndOperation(TranslatorVisitor& tv, IREmitter& } else if (pair && elsize == 32) { data = ir.Pack2x32To1x64(tv.X(32, Rt), tv.X(32, *Rt2)); } else { - data = tv.X(datasize, Rt); + data = tv.X(elsize, Rt); } IR::U32 status = tv.ExclusiveMem(address, dbytes, acctype, data); tv.X(32, *Rs, status); diff --git a/tests/A64/fuzz_with_unicorn.cpp b/tests/A64/fuzz_with_unicorn.cpp index 49c9d4c9..eedf1b71 100644 --- a/tests/A64/fuzz_with_unicorn.cpp +++ b/tests/A64/fuzz_with_unicorn.cpp @@ -53,6 +53,8 @@ static u32 GenRandomInst(u64 pc, bool is_last_inst) { "STLLR", // Unimplemented in QEMU "LDLAR", + // Dynarmic and QEMU currently differ on how the exclusive monitor's address range works. + "STXR", "STLXR", "STXP", "STLXP", "LDXR", "LDAXR", "LDXP", "LDAXP", }; for (const auto& [fn, bitstring] : list) {