Implement global exclusive monitor

This commit is contained in:
MerryMage 2018-06-05 12:27:37 +01:00
parent 85234338d3
commit 57f7c7e1b0
5 changed files with 228 additions and 0 deletions

View file

@ -106,9 +106,14 @@ struct UserCallbacks {
virtual std::uint64_t GetCNTPCT() = 0; virtual std::uint64_t GetCNTPCT() = 0;
}; };
class ExclusiveMonitor;
struct UserConfig { struct UserConfig {
UserCallbacks* callbacks; UserCallbacks* callbacks;
size_t processor_id = 0;
ExclusiveMonitor* global_monitor = nullptr;
/// When set to true, UserCallbacks::DataCacheOperationRaised will be called when any /// When set to true, UserCallbacks::DataCacheOperationRaised will be called when any
/// data cache instruction is executed. Notably DC ZVA will not implicitly do anything. /// data cache instruction is executed. Notably DC ZVA will not implicitly do anything.
/// When set to false, UserCallbacks::DataCacheOperationRaised will never be called. /// When set to false, UserCallbacks::DataCacheOperationRaised will never be called.

View file

@ -0,0 +1,64 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2018 MerryMage
* This software may be used and distributed according to the terms of the GNU
* General Public License version 2 or any later version.
*/
#pragma once
#include <atomic>
#include <cstddef>
#include <cstdint>
#include <vector>
namespace Dynarmic {
namespace A64 {
using VAddr = std::uint64_t;
class ExclusiveMonitor {
public:
/// @param processor_count Maximum number of processors using this global
/// exclusive monitor. Each processor must have a
/// unique id.
explicit ExclusiveMonitor(size_t processor_count);
size_t GetProcessorCount() const;
/// Marks a region containing [address, address+size) to be exclusive to
/// processor processor_id.
void Mark(size_t processor_id, VAddr address, size_t size);
/// Checks to see if processor processor_id has exclusive access to the
/// specified region. If it does, executes the operation then clears
/// the exclusive state for processors if their exclusive region(s)
/// contain [address, address+size).
template <typename Function>
bool DoExclusiveOperation(size_t processor_id, VAddr address, size_t size, Function op) {
if (!CheckAndClear(processor_id, address, size)) {
return false;
}
op();
Unlock();
return true;
}
/// Unmark everything.
void Clear();
private:
bool CheckAndClear(size_t processor_id, VAddr address, size_t size);
void Lock();
void Unlock();
static constexpr VAddr RESERVATION_GRANULE_MASK = 0xFFFF'FFFF'FFFF'FFF0ull;
static constexpr VAddr INVALID_EXCLUSIVE_ADDRESS = 0xDEAD'DEAD'DEAD'DEADull;
std::atomic_flag is_locked;
std::vector<VAddr> exclusive_addresses;
};
} // namespace A64
} // namespace Dynarmic

View file

@ -6,6 +6,7 @@ add_library(dynarmic
../include/dynarmic/A32/disassembler.h ../include/dynarmic/A32/disassembler.h
../include/dynarmic/A64/a64.h ../include/dynarmic/A64/a64.h
../include/dynarmic/A64/config.h ../include/dynarmic/A64/config.h
../include/dynarmic/A64/exclusive_monitor.h
common/address_range.h common/address_range.h
common/aes.cpp common/aes.cpp
common/aes.h common/aes.h
@ -164,6 +165,7 @@ if (ARCHITECTURE_x86_64)
backend_x64/a32_jitstate.h backend_x64/a32_jitstate.h
backend_x64/a64_emit_x64.cpp backend_x64/a64_emit_x64.cpp
backend_x64/a64_emit_x64.h backend_x64/a64_emit_x64.h
backend_x64/a64_exclusive_monitor.cpp
backend_x64/a64_interface.cpp backend_x64/a64_interface.cpp
backend_x64/a64_jitstate.cpp backend_x64/a64_jitstate.cpp
backend_x64/a64_jitstate.h backend_x64/a64_jitstate.h

View file

@ -6,6 +6,7 @@
#include <initializer_list> #include <initializer_list>
#include <dynarmic/A64/exclusive_monitor.h>
#include <fmt/ostream.h> #include <fmt/ostream.h>
#include "backend_x64/a64_emit_x64.h" #include "backend_x64/a64_emit_x64.h"
@ -567,6 +568,21 @@ void A64EmitX64::EmitA64ClearExclusive(A64EmitContext&, IR::Inst*) {
} }
void A64EmitX64::EmitA64SetExclusive(A64EmitContext& ctx, IR::Inst* inst) { void A64EmitX64::EmitA64SetExclusive(A64EmitContext& ctx, IR::Inst* inst) {
if (conf.global_monitor) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
ctx.reg_alloc.HostCall(nullptr, {}, args[0], args[1]);
code.mov(code.byte[r15 + offsetof(A64JitState, exclusive_state)], u8(1));
code.mov(code.ABI_PARAM1, reinterpret_cast<u64>(&conf));
code.CallFunction(static_cast<void(*)(A64::UserConfig&, u64, u8)>(
[](A64::UserConfig& conf, u64 vaddr, u8 size) {
conf.global_monitor->Mark(conf.processor_id, vaddr, size);
}
));
return;
}
auto args = ctx.reg_alloc.GetArgumentInfo(inst); auto args = ctx.reg_alloc.GetArgumentInfo(inst);
ASSERT(args[1].IsImmediate()); ASSERT(args[1].IsImmediate());
Xbyak::Reg64 address = ctx.reg_alloc.UseGpr(args[0]); Xbyak::Reg64 address = ctx.reg_alloc.UseGpr(args[0]);
@ -818,6 +834,82 @@ void A64EmitX64::EmitA64WriteMemory128(A64EmitContext& ctx, IR::Inst* inst) {
} }
void A64EmitX64::EmitExclusiveWrite(A64EmitContext& ctx, IR::Inst* inst, size_t bitsize) { void A64EmitX64::EmitExclusiveWrite(A64EmitContext& ctx, IR::Inst* inst, size_t bitsize) {
if (conf.global_monitor) {
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
if (bitsize != 128) {
ctx.reg_alloc.HostCall(inst, {}, args[0], args[1]);
} else {
ctx.reg_alloc.Use(args[0], ABI_PARAM2);
ctx.reg_alloc.Use(args[1], HostLoc::XMM0);
ctx.reg_alloc.EndOfAllocScope();
ctx.reg_alloc.HostCall(inst);
}
Xbyak::Label end;
code.mov(code.ABI_RETURN, u32(1));
code.cmp(code.byte[r15 + offsetof(A64JitState, exclusive_state)], u8(0));
code.je(end);
code.mov(code.ABI_PARAM1, reinterpret_cast<u64>(&conf));
switch (bitsize) {
case 8:
code.CallFunction(static_cast<u32(*)(A64::UserConfig&, u64, u8)>(
[](A64::UserConfig& conf, u64 vaddr, u8 value) -> u32 {
return conf.global_monitor->DoExclusiveOperation(conf.processor_id, vaddr, 1, [&]{
conf.callbacks->MemoryWrite8(vaddr, value);
}) ? 0 : 1;
}
));
break;
case 16:
code.CallFunction(static_cast<u32(*)(A64::UserConfig&, u64, u16)>(
[](A64::UserConfig& conf, u64 vaddr, u16 value) -> u32 {
return conf.global_monitor->DoExclusiveOperation(conf.processor_id, vaddr, 2, [&]{
conf.callbacks->MemoryWrite16(vaddr, value);
}) ? 0 : 1;
}
));
break;
case 32:
code.CallFunction(static_cast<u32(*)(A64::UserConfig&, u64, u32)>(
[](A64::UserConfig& conf, u64 vaddr, u32 value) -> u32 {
return conf.global_monitor->DoExclusiveOperation(conf.processor_id, vaddr, 4, [&]{
conf.callbacks->MemoryWrite32(vaddr, value);
}) ? 0 : 1;
}
));
break;
case 64:
code.CallFunction(static_cast<u32(*)(A64::UserConfig&, u64, u64)>(
[](A64::UserConfig& conf, u64 vaddr, u64 value) -> u32 {
return conf.global_monitor->DoExclusiveOperation(conf.processor_id, vaddr, 8, [&]{
conf.callbacks->MemoryWrite64(vaddr, value);
}) ? 0 : 1;
}
));
break;
case 128:
code.sub(rsp, 8 + 16 + ABI_SHADOW_SPACE);
code.lea(code.ABI_PARAM3, ptr[rsp + ABI_SHADOW_SPACE]);
code.movaps(xword[code.ABI_PARAM3], xmm0);
code.CallFunction(static_cast<u32(*)(A64::UserConfig&, u64, A64::Vector&)>(
[](A64::UserConfig& conf, u64 vaddr, A64::Vector& value) -> u32 {
return conf.global_monitor->DoExclusiveOperation(conf.processor_id, vaddr, 16, [&]{
conf.callbacks->MemoryWrite128(vaddr, value);
}) ? 0 : 1;
}
));
code.add(rsp, 8 + 16 + ABI_SHADOW_SPACE);
break;
default:
UNREACHABLE();
}
code.L(end);
return;
}
auto args = ctx.reg_alloc.GetArgumentInfo(inst); auto args = ctx.reg_alloc.GetArgumentInfo(inst);
Xbyak::Reg64 vaddr = ctx.reg_alloc.UseGpr(args[0]); Xbyak::Reg64 vaddr = ctx.reg_alloc.UseGpr(args[0]);
int value_idx = bitsize != 128 int value_idx = bitsize != 128

View file

@ -0,0 +1,65 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2018 MerryMage
* This software may be used and distributed according to the terms of the GNU
* General Public License version 2 or any later version.
*/
#include <algorithm>
#include <dynarmic/A64/exclusive_monitor.h>
#include "common/assert.h"
namespace Dynarmic {
namespace A64 {
ExclusiveMonitor::ExclusiveMonitor(size_t processor_count) : exclusive_addresses(processor_count, INVALID_EXCLUSIVE_ADDRESS) {
Unlock();
}
size_t ExclusiveMonitor::GetProcessorCount() const {
return exclusive_addresses.size();
}
void ExclusiveMonitor::Mark(size_t processor_id, VAddr address, size_t size) {
ASSERT(size <= 16);
const VAddr masked_address = address & RESERVATION_GRANULE_MASK;
Lock();
exclusive_addresses[processor_id] = masked_address;
Unlock();
}
void ExclusiveMonitor::Lock() {
while (is_locked.test_and_set()) {}
}
void ExclusiveMonitor::Unlock() {
is_locked.clear();
}
bool ExclusiveMonitor::CheckAndClear(size_t processor_id, VAddr address, size_t size) {
ASSERT(size <= 16);
const VAddr masked_address = address & RESERVATION_GRANULE_MASK;
Lock();
if (exclusive_addresses[processor_id] != masked_address) {
Unlock();
return false;
}
for (VAddr& other_address : exclusive_addresses) {
if (other_address == masked_address) {
other_address = INVALID_EXCLUSIVE_ADDRESS;
}
}
return true;
}
void ExclusiveMonitor::Clear() {
Lock();
std::fill(exclusive_addresses.begin(), exclusive_addresses.end(), INVALID_EXCLUSIVE_ADDRESS);
Unlock();
}
} // namespace A64
} // namespace Dynarmic