diff --git a/src/backend/x64/a32_emit_x64.cpp b/src/backend/x64/a32_emit_x64.cpp index 6548fd00..b0b66b1d 100644 --- a/src/backend/x64/a32_emit_x64.cpp +++ b/src/backend/x64/a32_emit_x64.cpp @@ -34,7 +34,7 @@ // TODO: Have ARM flags in host flags and not have them use up GPR registers unless necessary. // TODO: Actually implement that proper instruction selector you've always wanted to sweetheart. -namespace Dynarmic::BackendX64 { +namespace Dynarmic::Backend::X64 { using namespace Xbyak::util; @@ -1380,4 +1380,4 @@ void A32EmitX64::EmitPatchMovRcx(CodePtr target_code_ptr) { code.EnsurePatchLocationSize(patch_location, 10); } -} // namespace Dynarmic::BackendX64 +} // namespace Dynarmic::Backend::X64 diff --git a/src/backend/x64/a32_emit_x64.h b/src/backend/x64/a32_emit_x64.h index 24f62cd9..119b0517 100644 --- a/src/backend/x64/a32_emit_x64.h +++ b/src/backend/x64/a32_emit_x64.h @@ -17,7 +17,7 @@ #include "frontend/A32/location_descriptor.h" #include "frontend/ir/terminal.h" -namespace Dynarmic::BackendX64 { +namespace Dynarmic::Backend::X64 { class RegAlloc; @@ -101,4 +101,4 @@ protected: void EmitPatchMovRcx(CodePtr target_code_ptr = nullptr) override; }; -} // namespace Dynarmic::BackendX64 +} // namespace Dynarmic::Backend::X64 diff --git a/src/backend/x64/a32_interface.cpp b/src/backend/x64/a32_interface.cpp index 62ca2f0b..9c00f486 100644 --- a/src/backend/x64/a32_interface.cpp +++ b/src/backend/x64/a32_interface.cpp @@ -29,7 +29,7 @@ namespace Dynarmic::A32 { -using namespace BackendX64; +using namespace Backend::X64; static RunCodeCallbacks GenRunCodeCallbacks(A32::UserCallbacks* cb, CodePtr (*LookupBlock)(void* lookup_block_arg), void* arg) { return RunCodeCallbacks{ diff --git a/src/backend/x64/a32_jitstate.cpp b/src/backend/x64/a32_jitstate.cpp index bf088473..730ba8fb 100644 --- a/src/backend/x64/a32_jitstate.cpp +++ b/src/backend/x64/a32_jitstate.cpp @@ -11,7 +11,7 @@ #include "common/common_types.h" #include "frontend/A32/location_descriptor.h" -namespace Dynarmic::BackendX64 { +namespace Dynarmic::Backend::X64 { /** * CPSR Bits @@ -202,4 +202,4 @@ void A32JitState::SetFpscr(u32 FPSCR) { } } -} // namespace Dynarmic::BackendX64 +} // namespace Dynarmic::Backend::X64 diff --git a/src/backend/x64/a32_jitstate.h b/src/backend/x64/a32_jitstate.h index ad15fbd4..495bf86a 100644 --- a/src/backend/x64/a32_jitstate.h +++ b/src/backend/x64/a32_jitstate.h @@ -12,7 +12,7 @@ #include "common/common_types.h" -namespace Dynarmic::BackendX64 { +namespace Dynarmic::Backend::X64 { class BlockOfCode; @@ -109,4 +109,4 @@ struct A32JitState { using CodePtr = const void*; -} // namespace Dynarmic::BackendX64 +} // namespace Dynarmic::Backend::X64 diff --git a/src/backend/x64/a64_emit_x64.cpp b/src/backend/x64/a64_emit_x64.cpp index 879780dc..e0c06cb2 100644 --- a/src/backend/x64/a64_emit_x64.cpp +++ b/src/backend/x64/a64_emit_x64.cpp @@ -30,7 +30,7 @@ // TODO: Have ARM flags in host flags and not have them use up GPR registers unless necessary. // TODO: Actually implement that proper instruction selector you've always wanted to sweetheart. -namespace Dynarmic::BackendX64 { +namespace Dynarmic::Backend::X64 { using namespace Xbyak::util; @@ -1244,4 +1244,4 @@ void A64EmitX64::EmitPatchMovRcx(CodePtr target_code_ptr) { code.EnsurePatchLocationSize(patch_location, 10); } -} // namespace Dynarmic::BackendX64 +} // namespace Dynarmic::Backend::X64 diff --git a/src/backend/x64/a64_emit_x64.h b/src/backend/x64/a64_emit_x64.h index a0f02dac..cb5a526d 100644 --- a/src/backend/x64/a64_emit_x64.h +++ b/src/backend/x64/a64_emit_x64.h @@ -18,7 +18,7 @@ #include "frontend/A64/location_descriptor.h" #include "frontend/ir/terminal.h" -namespace Dynarmic::BackendX64 { +namespace Dynarmic::Backend::X64 { class RegAlloc; @@ -106,4 +106,4 @@ protected: void EmitPatchMovRcx(CodePtr target_code_ptr = nullptr) override; }; -} // namespace Dynarmic::BackendX64 +} // namespace Dynarmic::Backend::X64 diff --git a/src/backend/x64/a64_interface.cpp b/src/backend/x64/a64_interface.cpp index 90e031d8..31d221d8 100644 --- a/src/backend/x64/a64_interface.cpp +++ b/src/backend/x64/a64_interface.cpp @@ -24,7 +24,7 @@ namespace Dynarmic::A64 { -using namespace BackendX64; +using namespace Backend::X64; static RunCodeCallbacks GenRunCodeCallbacks(A64::UserCallbacks* cb, CodePtr (*LookupBlock)(void* lookup_block_arg), void* arg) { return RunCodeCallbacks{ diff --git a/src/backend/x64/a64_jitstate.cpp b/src/backend/x64/a64_jitstate.cpp index 054c9fce..fdc4169e 100644 --- a/src/backend/x64/a64_jitstate.cpp +++ b/src/backend/x64/a64_jitstate.cpp @@ -8,7 +8,7 @@ #include "common/bit_util.h" #include "frontend/A64/location_descriptor.h" -namespace Dynarmic::BackendX64 { +namespace Dynarmic::Backend::X64 { /** * Comparing MXCSR and FPCR @@ -108,4 +108,4 @@ void A64JitState::SetFpsr(u32 value) { fpsr_exc = value & 0x9F; } -} // namespace Dynarmic::BackendX64 +} // namespace Dynarmic::Backend::X64 diff --git a/src/backend/x64/a64_jitstate.h b/src/backend/x64/a64_jitstate.h index f926d0d7..3fb1c228 100644 --- a/src/backend/x64/a64_jitstate.h +++ b/src/backend/x64/a64_jitstate.h @@ -13,7 +13,7 @@ #include "common/common_types.h" #include "frontend/A64/location_descriptor.h" -namespace Dynarmic::BackendX64 { +namespace Dynarmic::Backend::X64 { class BlockOfCode; @@ -93,4 +93,4 @@ struct A64JitState { using CodePtr = const void*; -} // namespace Dynarmic::BackendX64 +} // namespace Dynarmic::Backend::X64 diff --git a/src/backend/x64/abi.cpp b/src/backend/x64/abi.cpp index cf0cd8a2..2dcbce3e 100644 --- a/src/backend/x64/abi.cpp +++ b/src/backend/x64/abi.cpp @@ -24,7 +24,7 @@ #include "common/common_types.h" #include "common/iterator_util.h" -namespace Dynarmic::BackendX64 { +namespace Dynarmic::Backend::X64 { constexpr size_t GPR_SIZE = 8; constexpr size_t XMM_SIZE = 16; @@ -150,4 +150,4 @@ void ABI_PopCallerSaveRegistersAndAdjustStackExcept(BlockOfCode& code, HostLoc e ABI_PopRegistersAndAdjustStack(code, 0, regs); } -} // namespace Dynarmic::BackendX64 +} // namespace Dynarmic::Backend::X64 diff --git a/src/backend/x64/abi.h b/src/backend/x64/abi.h index d53d8419..f99687d6 100644 --- a/src/backend/x64/abi.h +++ b/src/backend/x64/abi.h @@ -9,7 +9,7 @@ #include "backend/x64/hostloc.h" -namespace Dynarmic::BackendX64 { +namespace Dynarmic::Backend::X64 { class BlockOfCode; @@ -121,4 +121,4 @@ void ABI_PopCallerSaveRegistersAndAdjustStack(BlockOfCode& code, size_t frame_si void ABI_PushCallerSaveRegistersAndAdjustStackExcept(BlockOfCode& code, HostLoc exception); void ABI_PopCallerSaveRegistersAndAdjustStackExcept(BlockOfCode& code, HostLoc exception); -} // namespace Dynarmic::BackendX64 +} // namespace Dynarmic::Backend::X64 diff --git a/src/backend/x64/block_of_code.cpp b/src/backend/x64/block_of_code.cpp index 46d1c066..103d8168 100644 --- a/src/backend/x64/block_of_code.cpp +++ b/src/backend/x64/block_of_code.cpp @@ -21,7 +21,7 @@ #include #endif -namespace Dynarmic::BackendX64 { +namespace Dynarmic::Backend::X64 { #ifdef _WIN32 const Xbyak::Reg64 BlockOfCode::ABI_RETURN = Xbyak::util::rax; @@ -317,4 +317,4 @@ bool BlockOfCode::DoesCpuSupport([[maybe_unused]] Xbyak::util::Cpu::Type type) c #endif } -} // namespace Dynarmic::BackendX64 +} // namespace Dynarmic::Backend::X64 diff --git a/src/backend/x64/block_of_code.h b/src/backend/x64/block_of_code.h index b17086e2..ee405bbe 100644 --- a/src/backend/x64/block_of_code.h +++ b/src/backend/x64/block_of_code.h @@ -19,7 +19,7 @@ #include "common/cast_util.h" #include "common/common_types.h" -namespace Dynarmic::BackendX64 { +namespace Dynarmic::Backend::X64 { using CodePtr = const void*; @@ -178,4 +178,4 @@ private: Xbyak::util::Cpu cpu_info; }; -} // namespace Dynarmic::BackendX64 +} // namespace Dynarmic::Backend::X64 diff --git a/src/backend/x64/block_range_information.cpp b/src/backend/x64/block_range_information.cpp index 5f05c27d..0bfe8c26 100644 --- a/src/backend/x64/block_range_information.cpp +++ b/src/backend/x64/block_range_information.cpp @@ -12,7 +12,7 @@ #include "backend/x64/block_range_information.h" #include "common/common_types.h" -namespace Dynarmic::BackendX64 { +namespace Dynarmic::Backend::X64 { template void BlockRangeInformation::AddRange(boost::icl::discrete_interval range, IR::LocationDescriptor location) { @@ -42,4 +42,4 @@ std::unordered_set BlockRangeInformation; template class BlockRangeInformation; -} // namespace Dynarmic::BackendX64 +} // namespace Dynarmic::Backend::X64 diff --git a/src/backend/x64/block_range_information.h b/src/backend/x64/block_range_information.h index 25d88785..48b5b1c1 100644 --- a/src/backend/x64/block_range_information.h +++ b/src/backend/x64/block_range_information.h @@ -14,7 +14,7 @@ #include "frontend/ir/location_descriptor.h" -namespace Dynarmic::BackendX64 { +namespace Dynarmic::Backend::X64 { template class BlockRangeInformation { @@ -27,4 +27,4 @@ private: boost::icl::interval_map> block_ranges; }; -} // namespace Dynarmic::BackendX64 +} // namespace Dynarmic::Backend::X64 diff --git a/src/backend/x64/callback.cpp b/src/backend/x64/callback.cpp index 081f4817..3e0e87ad 100644 --- a/src/backend/x64/callback.cpp +++ b/src/backend/x64/callback.cpp @@ -7,7 +7,7 @@ #include "backend/x64/callback.h" #include "backend/x64/block_of_code.h" -namespace Dynarmic::BackendX64 { +namespace Dynarmic::Backend::X64 { Callback::~Callback() = default; @@ -38,4 +38,4 @@ void ArgCallback::EmitCallWithReturnPointer(BlockOfCode& code, std::function; @@ -52,4 +52,4 @@ private: u64 arg; }; -} // namespace Dynarmic::BackendX64 +} // namespace Dynarmic::Backend::X64 diff --git a/src/backend/x64/constant_pool.cpp b/src/backend/x64/constant_pool.cpp index ee94f55d..63134a03 100644 --- a/src/backend/x64/constant_pool.cpp +++ b/src/backend/x64/constant_pool.cpp @@ -10,7 +10,7 @@ #include "backend/x64/constant_pool.h" #include "common/assert.h" -namespace Dynarmic::BackendX64 { +namespace Dynarmic::Backend::X64 { ConstantPool::ConstantPool(BlockOfCode& code, size_t size) : code(code), pool_size(size) { code.int3(); @@ -32,4 +32,4 @@ Xbyak::Address ConstantPool::GetConstant(const Xbyak::AddressFrame& frame, u64 l return frame[code.rip + iter->second]; } -} // namespace Dynarmic::BackendX64 +} // namespace Dynarmic::Backend::X64 diff --git a/src/backend/x64/constant_pool.h b/src/backend/x64/constant_pool.h index 2bb29c0f..65ebaaf1 100644 --- a/src/backend/x64/constant_pool.h +++ b/src/backend/x64/constant_pool.h @@ -13,7 +13,7 @@ #include "common/common_types.h" -namespace Dynarmic::BackendX64 { +namespace Dynarmic::Backend::X64 { class BlockOfCode; @@ -38,4 +38,4 @@ private: u8* current_pool_ptr; }; -} // namespace Dynarmic::BackendX64 +} // namespace Dynarmic::Backend::X64 diff --git a/src/backend/x64/devirtualize.h b/src/backend/x64/devirtualize.h index 70ab12af..fd9d153d 100644 --- a/src/backend/x64/devirtualize.h +++ b/src/backend/x64/devirtualize.h @@ -16,7 +16,7 @@ #include "common/common_types.h" namespace Dynarmic { -namespace BackendX64 { +namespace Backend::X64 { namespace impl { @@ -78,5 +78,5 @@ ArgCallback Devirtualize(mp::class_type* this_) { #endif } -} // namespace BackendX64 +} // namespace Backend::X64 } // namespace Dynarmic diff --git a/src/backend/x64/emit_x64.cpp b/src/backend/x64/emit_x64.cpp index abfff99d..1a50677b 100644 --- a/src/backend/x64/emit_x64.cpp +++ b/src/backend/x64/emit_x64.cpp @@ -21,7 +21,7 @@ // TODO: Have ARM flags in host flags and not have them use up GPR registers unless necessary. // TODO: Actually implement that proper instruction selector you've always wanted to sweetheart. -namespace Dynarmic::BackendX64 { +namespace Dynarmic::Backend::X64 { using namespace Xbyak::util; @@ -365,4 +365,4 @@ void EmitX64::InvalidateBasicBlocks(const std::unordered_set patch_information; }; -} // namespace Dynarmic::BackendX64 +} // namespace Dynarmic::Backend::X64 diff --git a/src/backend/x64/emit_x64_aes.cpp b/src/backend/x64/emit_x64_aes.cpp index ee611de9..b37e3160 100644 --- a/src/backend/x64/emit_x64_aes.cpp +++ b/src/backend/x64/emit_x64_aes.cpp @@ -11,7 +11,7 @@ #include "common/crypto/aes.h" #include "frontend/ir/microinstruction.h" -namespace Dynarmic::BackendX64 { +namespace Dynarmic::Backend::X64 { using namespace Xbyak::util; namespace AES = Common::Crypto::AES; @@ -73,4 +73,4 @@ void EmitX64::EmitAESMixColumns(EmitContext& ctx, IR::Inst* inst) { EmitAESFunction(args, ctx, code, inst, AES::MixColumns); } -} // namespace Dynarmic::BackendX64 +} // namespace Dynarmic::Backend::X64 diff --git a/src/backend/x64/emit_x64_crc32.cpp b/src/backend/x64/emit_x64_crc32.cpp index 1e30359b..2c839e5d 100644 --- a/src/backend/x64/emit_x64_crc32.cpp +++ b/src/backend/x64/emit_x64_crc32.cpp @@ -12,7 +12,7 @@ #include "common/crypto/crc32.h" #include "frontend/ir/microinstruction.h" -namespace Dynarmic::BackendX64 { +namespace Dynarmic::Backend::X64 { using namespace Xbyak::util; namespace CRC32 = Common::Crypto::CRC32; @@ -72,4 +72,4 @@ void EmitX64::EmitCRC32ISO64(EmitContext& ctx, IR::Inst* inst) { EmitCRC32ISO(code, ctx, inst, 64); } -} // namespace Dynarmic::BackendX64 +} // namespace Dynarmic::Backend::X64 diff --git a/src/backend/x64/emit_x64_data_processing.cpp b/src/backend/x64/emit_x64_data_processing.cpp index 5a291758..64bc3c45 100644 --- a/src/backend/x64/emit_x64_data_processing.cpp +++ b/src/backend/x64/emit_x64_data_processing.cpp @@ -15,7 +15,7 @@ #include "frontend/ir/microinstruction.h" #include "frontend/ir/opcodes.h" -namespace Dynarmic::BackendX64 { +namespace Dynarmic::Backend::X64 { using namespace Xbyak::util; @@ -214,11 +214,11 @@ static void EmitExtractRegister(BlockOfCode& code, EmitContext& ctx, IR::Inst* i ctx.reg_alloc.DefineValue(inst, result); } -void EmitX64::EmitExtractRegister32(Dynarmic::BackendX64::EmitContext& ctx, IR::Inst* inst) { +void EmitX64::EmitExtractRegister32(Dynarmic::Backend::X64::EmitContext& ctx, IR::Inst* inst) { EmitExtractRegister(code, ctx, inst, 32); } -void EmitX64::EmitExtractRegister64(Dynarmic::BackendX64::EmitContext& ctx, IR::Inst* inst) { +void EmitX64::EmitExtractRegister64(Dynarmic::Backend::X64::EmitContext& ctx, IR::Inst* inst) { EmitExtractRegister(code, ctx, inst, 64); } @@ -1520,4 +1520,4 @@ void EmitX64::EmitMinUnsigned64(EmitContext& ctx, IR::Inst* inst) { ctx.reg_alloc.DefineValue(inst, y); } -} // namespace Dynarmic::BackendX64 +} // namespace Dynarmic::Backend::X64 diff --git a/src/backend/x64/emit_x64_floating_point.cpp b/src/backend/x64/emit_x64_floating_point.cpp index 45aee0ca..8edbad21 100644 --- a/src/backend/x64/emit_x64_floating_point.cpp +++ b/src/backend/x64/emit_x64_floating_point.cpp @@ -30,7 +30,7 @@ #include "frontend/ir/basic_block.h" #include "frontend/ir/microinstruction.h" -namespace Dynarmic::BackendX64 { +namespace Dynarmic::Backend::X64 { using namespace Xbyak::util; @@ -1562,4 +1562,4 @@ void EmitX64::EmitFPFixedU64ToSingle(EmitContext& ctx, IR::Inst* inst) { ctx.reg_alloc.DefineValue(inst, result); } -} // namespace Dynarmic::BackendX64 +} // namespace Dynarmic::Backend::X64 diff --git a/src/backend/x64/emit_x64_packed.cpp b/src/backend/x64/emit_x64_packed.cpp index 32847496..e6e6c1db 100644 --- a/src/backend/x64/emit_x64_packed.cpp +++ b/src/backend/x64/emit_x64_packed.cpp @@ -9,7 +9,7 @@ #include "frontend/ir/microinstruction.h" #include "frontend/ir/opcodes.h" -namespace Dynarmic::BackendX64 { +namespace Dynarmic::Backend::X64 { using namespace Xbyak::util; @@ -698,4 +698,4 @@ void EmitX64::EmitPackedSelect(EmitContext& ctx, IR::Inst* inst) { } } -} // namespace Dynarmic::BackendX64 +} // namespace Dynarmic::Backend::X64 diff --git a/src/backend/x64/emit_x64_saturation.cpp b/src/backend/x64/emit_x64_saturation.cpp index 0658b763..c1cc141a 100644 --- a/src/backend/x64/emit_x64_saturation.cpp +++ b/src/backend/x64/emit_x64_saturation.cpp @@ -17,7 +17,7 @@ #include "frontend/ir/microinstruction.h" #include "frontend/ir/opcodes.h" -namespace Dynarmic::BackendX64 { +namespace Dynarmic::Backend::X64 { using namespace Xbyak::util; @@ -321,4 +321,4 @@ void EmitX64::EmitUnsignedSaturation(EmitContext& ctx, IR::Inst* inst) { ctx.reg_alloc.DefineValue(inst, result); } -} // namespace Dynarmic::BackendX64 +} // namespace Dynarmic::Backend::X64 diff --git a/src/backend/x64/emit_x64_sm4.cpp b/src/backend/x64/emit_x64_sm4.cpp index e5ed798d..e990e3e7 100644 --- a/src/backend/x64/emit_x64_sm4.cpp +++ b/src/backend/x64/emit_x64_sm4.cpp @@ -9,7 +9,7 @@ #include "common/crypto/sm4.h" #include "frontend/ir/microinstruction.h" -namespace Dynarmic::BackendX64 { +namespace Dynarmic::Backend::X64 { void EmitX64::EmitSM4AccessSubstitutionBox(EmitContext& ctx, IR::Inst* inst) { auto args = ctx.reg_alloc.GetArgumentInfo(inst); @@ -18,4 +18,4 @@ void EmitX64::EmitSM4AccessSubstitutionBox(EmitContext& ctx, IR::Inst* inst) { code.CallFunction(&Common::Crypto::SM4::AccessSubstitutionBox); } -} // namespace Dynarmic::BackendX64 +} // namespace Dynarmic::Backend::X64 diff --git a/src/backend/x64/emit_x64_vector.cpp b/src/backend/x64/emit_x64_vector.cpp index 0f50eadb..a0e4b38d 100644 --- a/src/backend/x64/emit_x64_vector.cpp +++ b/src/backend/x64/emit_x64_vector.cpp @@ -22,7 +22,7 @@ #include "frontend/ir/microinstruction.h" #include "frontend/ir/opcodes.h" -namespace Dynarmic::BackendX64 { +namespace Dynarmic::Backend::X64 { using namespace Xbyak::util; @@ -4496,4 +4496,4 @@ void EmitX64::EmitZeroVector(EmitContext& ctx, IR::Inst* inst) { ctx.reg_alloc.DefineValue(inst, a); } -} // namespace Dynarmic::BackendX64 +} // namespace Dynarmic::Backend::X64 diff --git a/src/backend/x64/emit_x64_vector_floating_point.cpp b/src/backend/x64/emit_x64_vector_floating_point.cpp index 286227fb..fddb9611 100644 --- a/src/backend/x64/emit_x64_vector_floating_point.cpp +++ b/src/backend/x64/emit_x64_vector_floating_point.cpp @@ -30,7 +30,7 @@ #include "frontend/ir/basic_block.h" #include "frontend/ir/microinstruction.h" -namespace Dynarmic::BackendX64 { +namespace Dynarmic::Backend::X64 { using namespace Xbyak::util; @@ -1519,4 +1519,4 @@ void EmitX64::EmitFPVectorToUnsignedFixed64(EmitContext& ctx, IR::Inst* inst) { EmitFPVectorToFixed<64, true>(code, ctx, inst); } -} // namespace Dynarmic::BackendX64 +} // namespace Dynarmic::Backend::X64 diff --git a/src/backend/x64/exception_handler_generic.cpp b/src/backend/x64/exception_handler_generic.cpp index 5009ae70..e801cf45 100644 --- a/src/backend/x64/exception_handler_generic.cpp +++ b/src/backend/x64/exception_handler_generic.cpp @@ -6,7 +6,7 @@ #include "backend/x64/block_of_code.h" -namespace Dynarmic::BackendX64 { +namespace Dynarmic::Backend::X64 { struct BlockOfCode::ExceptionHandler::Impl final { }; @@ -18,4 +18,4 @@ void BlockOfCode::ExceptionHandler::Register(BlockOfCode&) { // Do nothing } -} // namespace Dynarmic::BackendX64 +} // namespace Dynarmic::Backend::X64 diff --git a/src/backend/x64/exception_handler_windows.cpp b/src/backend/x64/exception_handler_windows.cpp index 47a6f73a..15b1a087 100644 --- a/src/backend/x64/exception_handler_windows.cpp +++ b/src/backend/x64/exception_handler_windows.cpp @@ -68,7 +68,7 @@ struct UNWIND_INFO { // With Flags == 0 there are no additional fields. }; -namespace Dynarmic::BackendX64 { +namespace Dynarmic::Backend::X64 { struct PrologueInformation { std::vector unwind_code; @@ -197,4 +197,4 @@ void BlockOfCode::ExceptionHandler::Register(BlockOfCode& code) { impl = std::make_unique(rfuncs, code.getCode()); } -} // namespace Dynarmic::BackendX64 +} // namespace Dynarmic::Backend::X64 diff --git a/src/backend/x64/hostloc.cpp b/src/backend/x64/hostloc.cpp index fb090bd2..8e3a2f8d 100644 --- a/src/backend/x64/hostloc.cpp +++ b/src/backend/x64/hostloc.cpp @@ -8,7 +8,7 @@ #include "backend/x64/hostloc.h" -namespace Dynarmic::BackendX64 { +namespace Dynarmic::Backend::X64 { Xbyak::Reg64 HostLocToReg64(HostLoc loc) { ASSERT(HostLocIsGPR(loc)); @@ -20,4 +20,4 @@ Xbyak::Xmm HostLocToXmm(HostLoc loc) { return Xbyak::Xmm(static_cast(loc) - static_cast(HostLoc::XMM0)); } -} // namespace Dynarmic::BackendX64 +} // namespace Dynarmic::Backend::X64 diff --git a/src/backend/x64/hostloc.h b/src/backend/x64/hostloc.h index 177e71c6..f05dcc40 100644 --- a/src/backend/x64/hostloc.h +++ b/src/backend/x64/hostloc.h @@ -10,7 +10,7 @@ #include "common/assert.h" #include "common/common_types.h" -namespace Dynarmic::BackendX64 { +namespace Dynarmic::Backend::X64 { enum class HostLoc { // Ordering of the registers is intentional. See also: HostLocToX64. @@ -122,4 +122,4 @@ Xbyak::Address SpillToOpArg(HostLoc loc) { return JitStateType::GetSpillLocationFromIndex(i); } -} // namespace Dynarmic::BackendX64 +} // namespace Dynarmic::Backend::X64 diff --git a/src/backend/x64/jitstate_info.h b/src/backend/x64/jitstate_info.h index 95f0d290..0cc475c8 100644 --- a/src/backend/x64/jitstate_info.h +++ b/src/backend/x64/jitstate_info.h @@ -8,7 +8,7 @@ #include -namespace Dynarmic::BackendX64 { +namespace Dynarmic::Backend::X64 { struct JitStateInfo { template @@ -39,4 +39,4 @@ struct JitStateInfo { const size_t offsetof_fpsr_qc; }; -} // namespace Dynarmic::BackendX64 +} // namespace Dynarmic::Backend::X64 diff --git a/src/backend/x64/oparg.h b/src/backend/x64/oparg.h index ac2df5f8..266c1f36 100644 --- a/src/backend/x64/oparg.h +++ b/src/backend/x64/oparg.h @@ -10,7 +10,7 @@ #include "common/assert.h" -namespace Dynarmic::BackendX64 { +namespace Dynarmic::Backend::X64 { struct OpArg { OpArg() : type(Type::Operand), inner_operand() {} @@ -75,4 +75,4 @@ private: }; }; -} // namespace Dynarmic::BackendX64 +} // namespace Dynarmic::Backend::X64 diff --git a/src/backend/x64/perf_map.cpp b/src/backend/x64/perf_map.cpp index fee125de..4c494774 100644 --- a/src/backend/x64/perf_map.cpp +++ b/src/backend/x64/perf_map.cpp @@ -19,7 +19,7 @@ #include "common/common_types.h" -namespace Dynarmic::BackendX64 { +namespace Dynarmic::Backend::X64 { namespace { std::mutex mutex; @@ -72,11 +72,11 @@ void PerfMapClear() { OpenFile(); } -} // namespace Dynarmic::BackendX64 +} // namespace Dynarmic::Backend::X64 #else -namespace Dynarmic::BackendX64 { +namespace Dynarmic::Backend::X64 { namespace detail { void PerfMapRegister(const void*, const void*, std::string_view) {} @@ -84,6 +84,6 @@ void PerfMapRegister(const void*, const void*, std::string_view) {} void PerfMapClear() {} -} // namespace Dynarmic::BackendX64 +} // namespace Dynarmic::Backend::X64 #endif diff --git a/src/backend/x64/perf_map.h b/src/backend/x64/perf_map.h index 9caaf5e3..aafacb09 100644 --- a/src/backend/x64/perf_map.h +++ b/src/backend/x64/perf_map.h @@ -10,7 +10,7 @@ #include "common/cast_util.h" -namespace Dynarmic::BackendX64 { +namespace Dynarmic::Backend::X64 { namespace detail { void PerfMapRegister(const void* start, const void* end, std::string_view friendly_name); @@ -23,4 +23,4 @@ void PerfMapRegister(T start, const void* end, std::string_view friendly_name) { void PerfMapClear(); -} // namespace Dynarmic::BackendX64 +} // namespace Dynarmic::Backend::X64 diff --git a/src/backend/x64/reg_alloc.cpp b/src/backend/x64/reg_alloc.cpp index 611e4689..3c042f5a 100644 --- a/src/backend/x64/reg_alloc.cpp +++ b/src/backend/x64/reg_alloc.cpp @@ -15,7 +15,7 @@ #include "backend/x64/reg_alloc.h" #include "common/assert.h" -namespace Dynarmic::BackendX64 { +namespace Dynarmic::Backend::X64 { #define MAYBE_AVX(OPCODE, ...) \ [&] { \ @@ -681,4 +681,4 @@ void RegAlloc::EmitExchange(HostLoc a, HostLoc b) { } } -} // namespace Dynarmic::BackendX64 +} // namespace Dynarmic::Backend::X64 diff --git a/src/backend/x64/reg_alloc.h b/src/backend/x64/reg_alloc.h index 982bc7d2..3edfa263 100644 --- a/src/backend/x64/reg_alloc.h +++ b/src/backend/x64/reg_alloc.h @@ -22,7 +22,7 @@ #include "frontend/ir/microinstruction.h" #include "frontend/ir/value.h" -namespace Dynarmic::BackendX64 { +namespace Dynarmic::Backend::X64 { class RegAlloc; @@ -162,4 +162,4 @@ private: void EmitExchange(HostLoc a, HostLoc b); }; -} // namespace Dynarmic::BackendX64 +} // namespace Dynarmic::Backend::X64