Migrate to mcl

This commit is contained in:
Merry 2022-04-19 15:36:26 +01:00
parent ed9955891f
commit 78b4ba10c9
181 changed files with 987 additions and 1807 deletions

View file

@ -1,9 +1,5 @@
add_library(dynarmic
common/assert.cpp
common/assert.h
common/bit_util.h
common/cast_util.h
common/common_types.h
common/crypto/aes.cpp
common/crypto/aes.h
common/crypto/crc32.cpp
@ -46,18 +42,14 @@ add_library(dynarmic
common/fp/unpacked.cpp
common/fp/unpacked.h
common/fp/util.h
common/intrusive_list.h
common/iterator_util.h
common/llvm_disassemble.cpp
common/llvm_disassemble.h
common/lut_from_list.h
common/macro_util.h
common/math_util.cpp
common/math_util.h
common/memory_pool.cpp
common/memory_pool.h
common/safe_ops.h
common/scope_exit.h
common/spin_lock.h
common/string_util.h
common/u128.cpp

View file

@ -11,6 +11,10 @@
#include <fmt/format.h>
#include <fmt/ostream.h>
#include <mcl/assert.hpp>
#include <mcl/bit/bit_field.hpp>
#include <mcl/scope_exit.hpp>
#include <mcl/stdint.hpp>
#include "dynarmic/backend/x64/a32_jitstate.h"
#include "dynarmic/backend/x64/abi.h"
@ -20,10 +24,6 @@
#include "dynarmic/backend/x64/nzcv_util.h"
#include "dynarmic/backend/x64/perf_map.h"
#include "dynarmic/backend/x64/stack_layout.h"
#include "dynarmic/common/assert.h"
#include "dynarmic/common/bit_util.h"
#include "dynarmic/common/common_types.h"
#include "dynarmic/common/scope_exit.h"
#include "dynarmic/common/variant_util.h"
#include "dynarmic/frontend/A32/a32_location_descriptor.h"
#include "dynarmic/frontend/A32/a32_types.h"
@ -626,10 +626,10 @@ void A32EmitX64::EmitA32SetGEFlagsCompressed(A32EmitContext& ctx, IR::Inst* inst
if (args[0].IsImmediate()) {
const u32 imm = args[0].GetImmediateU32();
u32 ge = 0;
ge |= Common::Bit<19>(imm) ? 0xFF000000 : 0;
ge |= Common::Bit<18>(imm) ? 0x00FF0000 : 0;
ge |= Common::Bit<17>(imm) ? 0x0000FF00 : 0;
ge |= Common::Bit<16>(imm) ? 0x000000FF : 0;
ge |= mcl::bit::get_bit<19>(imm) ? 0xFF000000 : 0;
ge |= mcl::bit::get_bit<18>(imm) ? 0x00FF0000 : 0;
ge |= mcl::bit::get_bit<17>(imm) ? 0x0000FF00 : 0;
ge |= mcl::bit::get_bit<16>(imm) ? 0x000000FF : 0;
code.mov(dword[r15 + offsetof(A32JitState, cpsr_ge)], ge);
} else if (code.HasHostFeature(HostFeature::FastBMI2)) {
@ -689,8 +689,8 @@ void A32EmitX64::EmitA32BXWritePC(A32EmitContext& ctx, IR::Inst* inst) {
if (arg.IsImmediate()) {
const u32 new_pc = arg.GetImmediateU32();
const u32 mask = Common::Bit<0>(new_pc) ? 0xFFFFFFFE : 0xFFFFFFFC;
const u32 new_upper = upper_without_t | (Common::Bit<0>(new_pc) ? 1 : 0);
const u32 mask = mcl::bit::get_bit<0>(new_pc) ? 0xFFFFFFFE : 0xFFFFFFFC;
const u32 new_upper = upper_without_t | (mcl::bit::get_bit<0>(new_pc) ? 1 : 0);
code.mov(MJitStateReg(A32::Reg::PC), new_pc & mask);
code.mov(dword[r15 + offsetof(A32JitState, upper_location_descriptor)], new_upper);

View file

@ -8,6 +8,10 @@
#include <boost/icl/interval_set.hpp>
#include <fmt/format.h>
#include <mcl/assert.hpp>
#include <mcl/bit_cast.hpp>
#include <mcl/scope_exit.hpp>
#include <mcl/stdint.hpp>
#include "dynarmic/backend/x64/a32_emit_x64.h"
#include "dynarmic/backend/x64/a32_jitstate.h"
@ -15,11 +19,7 @@
#include "dynarmic/backend/x64/callback.h"
#include "dynarmic/backend/x64/devirtualize.h"
#include "dynarmic/backend/x64/jitstate_info.h"
#include "dynarmic/common/assert.h"
#include "dynarmic/common/atomic.h"
#include "dynarmic/common/cast_util.h"
#include "dynarmic/common/common_types.h"
#include "dynarmic/common/scope_exit.h"
#include "dynarmic/common/x64_disassemble.h"
#include "dynarmic/frontend/A32/translate/a32_translate.h"
#include "dynarmic/interface/A32/a32.h"
@ -44,10 +44,10 @@ static RunCodeCallbacks GenRunCodeCallbacks(A32::UserCallbacks* cb, CodePtr (*Lo
static std::function<void(BlockOfCode&)> GenRCP(const A32::UserConfig& conf) {
return [conf](BlockOfCode& code) {
if (conf.page_table) {
code.mov(code.r14, Common::BitCast<u64>(conf.page_table));
code.mov(code.r14, mcl::bit_cast<u64>(conf.page_table));
}
if (conf.fastmem_pointer) {
code.mov(code.r13, Common::BitCast<u64>(conf.fastmem_pointer));
code.mov(code.r13, mcl::bit_cast<u64>(conf.fastmem_pointer));
}
};
}

View file

@ -5,11 +5,12 @@
#include "dynarmic/backend/x64/a32_jitstate.h"
#include <mcl/assert.hpp>
#include <mcl/bit/bit_field.hpp>
#include <mcl/stdint.hpp>
#include "dynarmic/backend/x64/block_of_code.h"
#include "dynarmic/backend/x64/nzcv_util.h"
#include "dynarmic/common/assert.h"
#include "dynarmic/common/bit_util.h"
#include "dynarmic/common/common_types.h"
#include "dynarmic/frontend/A32/a32_location_descriptor.h"
namespace Dynarmic::Backend::X64 {
@ -57,13 +58,13 @@ u32 A32JitState::Cpsr() const {
// Q flag
cpsr |= cpsr_q ? 1 << 27 : 0;
// GE flags
cpsr |= Common::Bit<31>(cpsr_ge) ? 1 << 19 : 0;
cpsr |= Common::Bit<23>(cpsr_ge) ? 1 << 18 : 0;
cpsr |= Common::Bit<15>(cpsr_ge) ? 1 << 17 : 0;
cpsr |= Common::Bit<7>(cpsr_ge) ? 1 << 16 : 0;
cpsr |= mcl::bit::get_bit<31>(cpsr_ge) ? 1 << 19 : 0;
cpsr |= mcl::bit::get_bit<23>(cpsr_ge) ? 1 << 18 : 0;
cpsr |= mcl::bit::get_bit<15>(cpsr_ge) ? 1 << 17 : 0;
cpsr |= mcl::bit::get_bit<7>(cpsr_ge) ? 1 << 16 : 0;
// E flag, T flag
cpsr |= Common::Bit<1>(upper_location_descriptor) ? 1 << 9 : 0;
cpsr |= Common::Bit<0>(upper_location_descriptor) ? 1 << 5 : 0;
cpsr |= mcl::bit::get_bit<1>(upper_location_descriptor) ? 1 << 9 : 0;
cpsr |= mcl::bit::get_bit<0>(upper_location_descriptor) ? 1 << 5 : 0;
// IT state
cpsr |= static_cast<u32>(upper_location_descriptor & 0b11111100'00000000);
cpsr |= static_cast<u32>(upper_location_descriptor & 0b00000011'00000000) << 17;
@ -77,18 +78,18 @@ void A32JitState::SetCpsr(u32 cpsr) {
// NZCV flags
cpsr_nzcv = NZCV::ToX64(cpsr);
// Q flag
cpsr_q = Common::Bit<27>(cpsr) ? 1 : 0;
cpsr_q = mcl::bit::get_bit<27>(cpsr) ? 1 : 0;
// GE flags
cpsr_ge = 0;
cpsr_ge |= Common::Bit<19>(cpsr) ? 0xFF000000 : 0;
cpsr_ge |= Common::Bit<18>(cpsr) ? 0x00FF0000 : 0;
cpsr_ge |= Common::Bit<17>(cpsr) ? 0x0000FF00 : 0;
cpsr_ge |= Common::Bit<16>(cpsr) ? 0x000000FF : 0;
cpsr_ge |= mcl::bit::get_bit<19>(cpsr) ? 0xFF000000 : 0;
cpsr_ge |= mcl::bit::get_bit<18>(cpsr) ? 0x00FF0000 : 0;
cpsr_ge |= mcl::bit::get_bit<17>(cpsr) ? 0x0000FF00 : 0;
cpsr_ge |= mcl::bit::get_bit<16>(cpsr) ? 0x000000FF : 0;
upper_location_descriptor &= 0xFFFF0000;
// E flag, T flag
upper_location_descriptor |= Common::Bit<9>(cpsr) ? 2 : 0;
upper_location_descriptor |= Common::Bit<5>(cpsr) ? 1 : 0;
upper_location_descriptor |= mcl::bit::get_bit<9>(cpsr) ? 2 : 0;
upper_location_descriptor |= mcl::bit::get_bit<5>(cpsr) ? 1 : 0;
// IT state
upper_location_descriptor |= (cpsr >> 0) & 0b11111100'00000000;
upper_location_descriptor |= (cpsr >> 17) & 0b00000011'00000000;
@ -197,7 +198,7 @@ void A32JitState::SetFpscr(u32 FPSCR) {
// Cumulative flags IDC, IOC, IXC, UFC, OFC, DZC
fpsr_exc = FPSCR & 0x9F;
if (Common::Bit<24>(FPSCR)) {
if (mcl::bit::get_bit<24>(FPSCR)) {
// VFP Flush to Zero
guest_MXCSR |= (1 << 15); // SSE Flush to Zero
guest_MXCSR |= (1 << 6); // SSE Denormals are Zero

View file

@ -7,7 +7,7 @@
#include <array>
#include "dynarmic/common/common_types.h"
#include <mcl/stdint.hpp>
namespace Dynarmic::Backend::X64 {

View file

@ -7,6 +7,9 @@
#include <fmt/format.h>
#include <fmt/ostream.h>
#include <mcl/assert.hpp>
#include <mcl/scope_exit.hpp>
#include <mcl/stdint.hpp>
#include <mcl/type_traits/integer_of_size.hpp>
#include "dynarmic/backend/x64/a64_jitstate.h"
@ -17,10 +20,6 @@
#include "dynarmic/backend/x64/nzcv_util.h"
#include "dynarmic/backend/x64/perf_map.h"
#include "dynarmic/backend/x64/stack_layout.h"
#include "dynarmic/common/assert.h"
#include "dynarmic/common/bit_util.h"
#include "dynarmic/common/common_types.h"
#include "dynarmic/common/scope_exit.h"
#include "dynarmic/frontend/A64/a64_location_descriptor.h"
#include "dynarmic/frontend/A64/a64_types.h"
#include "dynarmic/ir/basic_block.h"

View file

@ -8,15 +8,16 @@
#include <mutex>
#include <boost/icl/interval_set.hpp>
#include <mcl/assert.hpp>
#include <mcl/bit_cast.hpp>
#include <mcl/scope_exit.hpp>
#include "dynarmic/backend/x64/a64_emit_x64.h"
#include "dynarmic/backend/x64/a64_jitstate.h"
#include "dynarmic/backend/x64/block_of_code.h"
#include "dynarmic/backend/x64/devirtualize.h"
#include "dynarmic/backend/x64/jitstate_info.h"
#include "dynarmic/common/assert.h"
#include "dynarmic/common/atomic.h"
#include "dynarmic/common/scope_exit.h"
#include "dynarmic/common/x64_disassemble.h"
#include "dynarmic/frontend/A64/translate/a64_translate.h"
#include "dynarmic/interface/A64/a64.h"
@ -39,10 +40,10 @@ static RunCodeCallbacks GenRunCodeCallbacks(A64::UserCallbacks* cb, CodePtr (*Lo
static std::function<void(BlockOfCode&)> GenRCP(const A64::UserConfig& conf) {
return [conf](BlockOfCode& code) {
if (conf.page_table) {
code.mov(code.r14, Common::BitCast<u64>(conf.page_table));
code.mov(code.r14, mcl::bit_cast<u64>(conf.page_table));
}
if (conf.fastmem_pointer) {
code.mov(code.r13, Common::BitCast<u64>(conf.fastmem_pointer));
code.mov(code.r13, mcl::bit_cast<u64>(conf.fastmem_pointer));
}
};
}

View file

@ -5,7 +5,8 @@
#include "dynarmic/backend/x64/a64_jitstate.h"
#include "dynarmic/common/bit_util.h"
#include <mcl/bit/bit_field.hpp>
#include "dynarmic/frontend/A64/a64_location_descriptor.h"
namespace Dynarmic::Backend::X64 {
@ -65,7 +66,7 @@ void A64JitState::SetFpcr(u32 value) {
const std::array<u32, 4> MXCSR_RMode{0x0, 0x4000, 0x2000, 0x6000};
guest_MXCSR |= MXCSR_RMode[(value >> 22) & 0x3];
if (Common::Bit<24>(value)) {
if (mcl::bit::get_bit<24>(value)) {
guest_MXCSR |= (1 << 15); // SSE Flush to Zero
guest_MXCSR |= (1 << 6); // SSE Denormals are Zero
}

View file

@ -7,8 +7,9 @@
#include <array>
#include <mcl/stdint.hpp>
#include "dynarmic/backend/x64/nzcv_util.h"
#include "dynarmic/common/common_types.h"
#include "dynarmic/frontend/A64/a64_location_descriptor.h"
namespace Dynarmic::Backend::X64 {

View file

@ -8,11 +8,11 @@
#include <algorithm>
#include <vector>
#include <mcl/iterator/reverse.hpp>
#include <mcl/stdint.hpp>
#include <xbyak/xbyak.h>
#include "dynarmic/backend/x64/block_of_code.h"
#include "dynarmic/common/common_types.h"
#include "dynarmic/common/iterator_util.h"
namespace Dynarmic::Backend::X64 {
@ -97,7 +97,7 @@ void ABI_PopRegistersAndAdjustStack(BlockOfCode& code, size_t frame_size, const
code.add(rsp, u32(frame_info.stack_subtraction));
}
for (HostLoc gpr : Common::Reverse(regs)) {
for (HostLoc gpr : mcl::iterator::reverse(regs)) {
if (HostLocIsGPR(gpr)) {
code.pop(HostLocToReg64(gpr));
}

View file

@ -6,8 +6,9 @@
#include <array>
#include <mcl/stdint.hpp>
#include "dynarmic/backend/x64/hostloc.h"
#include "dynarmic/common/common_types.h"
namespace Dynarmic::Backend::X64 {

View file

@ -15,6 +15,8 @@
#include <array>
#include <cstring>
#include <mcl/assert.hpp>
#include <mcl/bit/bit_field.hpp>
#include <xbyak/xbyak.h>
#include "dynarmic/backend/x64/a32_jitstate.h"
@ -22,8 +24,6 @@
#include "dynarmic/backend/x64/hostloc.h"
#include "dynarmic/backend/x64/perf_map.h"
#include "dynarmic/backend/x64/stack_layout.h"
#include "dynarmic/common/assert.h"
#include "dynarmic/common/bit_util.h"
namespace Dynarmic::Backend::X64 {
@ -134,8 +134,8 @@ HostFeature GetHostFeatures() {
if (cpu_info.has(Cpu::tAMD)) {
std::array<u32, 4> data{};
cpu_info.getCpuid(1, data.data());
const u32 family_base = Common::Bits<8, 11>(data[0]);
const u32 family_extended = Common::Bits<20, 27>(data[0]);
const u32 family_base = mcl::bit::get_bits<8, 11>(data[0]);
const u32 family_extended = mcl::bit::get_bits<20, 27>(data[0]);
const u32 family = family_base + family_extended;
if (family >= 0x19)
features |= HostFeature::FastBMI2;

View file

@ -10,6 +10,7 @@
#include <memory>
#include <type_traits>
#include <mcl/stdint.hpp>
#include <xbyak/xbyak.h>
#include <xbyak/xbyak_util.h>
@ -19,7 +20,6 @@
#include "dynarmic/backend/x64/host_feature.h"
#include "dynarmic/backend/x64/jitstate_info.h"
#include "dynarmic/common/cast_util.h"
#include "dynarmic/common/common_types.h"
#include "dynarmic/interface/halt_reason.h"
namespace Dynarmic::Backend::X64 {

View file

@ -7,10 +7,9 @@
#include <boost/icl/interval_map.hpp>
#include <boost/icl/interval_set.hpp>
#include <mcl/stdint.hpp>
#include <tsl/robin_set.h>
#include "dynarmic/common/common_types.h"
namespace Dynarmic::Backend::X64 {
template<typename ProgramCounterType>

View file

@ -8,10 +8,9 @@
#include <functional>
#include <vector>
#include <mcl/stdint.hpp>
#include <xbyak/xbyak.h>
#include "dynarmic/common/common_types.h"
namespace Dynarmic::Backend::X64 {
using RegList = std::vector<Xbyak::Reg64>;

View file

@ -7,8 +7,9 @@
#include <cstring>
#include <mcl/assert.hpp>
#include "dynarmic/backend/x64/block_of_code.h"
#include "dynarmic/common/assert.h"
namespace Dynarmic::Backend::X64 {

View file

@ -8,11 +8,10 @@
#include <bit>
#include <utility>
#include <mcl/stdint.hpp>
#include <tsl/robin_map.h>
#include <xbyak/xbyak.h>
#include "dynarmic/common/common_types.h"
namespace Dynarmic::Backend::X64 {
class BlockOfCode;

View file

@ -7,8 +7,9 @@
#include <optional>
#include "dynarmic/common/bit_util.h"
#include "dynarmic/common/common_types.h"
#include <mcl/bit/bit_field.hpp>
#include <mcl/stdint.hpp>
#include "dynarmic/common/fp/rounding_mode.h"
namespace Dynarmic::Backend::X64 {
@ -124,14 +125,14 @@ constexpr u32 FixupLUT(FpFixup src_qnan = FpFixup::A,
FpFixup src_pos = FpFixup::A,
FpFixup src_neg = FpFixup::A) {
u32 fixup_lut = 0;
fixup_lut = Common::ModifyBits<0, 3, u32>(fixup_lut, static_cast<u32>(src_qnan));
fixup_lut = Common::ModifyBits<4, 7, u32>(fixup_lut, static_cast<u32>(src_snan));
fixup_lut = Common::ModifyBits<8, 11, u32>(fixup_lut, static_cast<u32>(src_zero));
fixup_lut = Common::ModifyBits<12, 15, u32>(fixup_lut, static_cast<u32>(src_posone));
fixup_lut = Common::ModifyBits<16, 19, u32>(fixup_lut, static_cast<u32>(src_neginf));
fixup_lut = Common::ModifyBits<20, 23, u32>(fixup_lut, static_cast<u32>(src_posinf));
fixup_lut = Common::ModifyBits<24, 27, u32>(fixup_lut, static_cast<u32>(src_pos));
fixup_lut = Common::ModifyBits<28, 31, u32>(fixup_lut, static_cast<u32>(src_neg));
fixup_lut = mcl::bit::set_bits<0, 3, u32>(fixup_lut, static_cast<u32>(src_qnan));
fixup_lut = mcl::bit::set_bits<4, 7, u32>(fixup_lut, static_cast<u32>(src_snan));
fixup_lut = mcl::bit::set_bits<8, 11, u32>(fixup_lut, static_cast<u32>(src_zero));
fixup_lut = mcl::bit::set_bits<12, 15, u32>(fixup_lut, static_cast<u32>(src_posone));
fixup_lut = mcl::bit::set_bits<16, 19, u32>(fixup_lut, static_cast<u32>(src_neginf));
fixup_lut = mcl::bit::set_bits<20, 23, u32>(fixup_lut, static_cast<u32>(src_posinf));
fixup_lut = mcl::bit::set_bits<24, 27, u32>(fixup_lut, static_cast<u32>(src_pos));
fixup_lut = mcl::bit::set_bits<28, 31, u32>(fixup_lut, static_cast<u32>(src_neg));
return fixup_lut;
}
@ -153,8 +154,8 @@ enum class FpRangeSign : u8 {
// Generates 8-bit immediate LUT for vrange instruction
constexpr u8 FpRangeLUT(FpRangeSelect range_select, FpRangeSign range_sign) {
u8 range_lut = 0;
range_lut = Common::ModifyBits<0, 1, u8>(range_lut, static_cast<u8>(range_select));
range_lut = Common::ModifyBits<2, 3, u8>(range_lut, static_cast<u8>(range_sign));
range_lut = mcl::bit::set_bits<0, 1, u8>(range_lut, static_cast<u8>(range_select));
range_lut = mcl::bit::set_bits<2, 3, u8>(range_lut, static_cast<u8>(range_sign));
return range_lut;
}

View file

@ -8,11 +8,11 @@
#include <cstring>
#include <utility>
#include <mcl/bit_cast.hpp>
#include <mcl/stdint.hpp>
#include <mcl/type_traits/function_info.hpp>
#include "dynarmic/backend/x64/callback.h"
#include "dynarmic/common/cast_util.h"
#include "dynarmic/common/common_types.h"
namespace Dynarmic {
namespace Backend::X64 {
@ -39,7 +39,7 @@ ArgCallback DevirtualizeGeneric(mcl::class_type<decltype(mfp)>* this_) {
template<auto mfp>
ArgCallback DevirtualizeWindows(mcl::class_type<decltype(mfp)>* this_) {
static_assert(sizeof(mfp) == 8);
return ArgCallback{Common::BitCast<u64>(mfp), reinterpret_cast<u64>(this_)};
return ArgCallback{mcl::bit_cast<u64>(mfp), reinterpret_cast<u64>(this_)};
}
template<auto mfp>
@ -50,7 +50,7 @@ ArgCallback DevirtualizeItanium(mcl::class_type<decltype(mfp)>* this_) {
u64 ptr;
/// The required adjustment to `this`, prior to the call.
u64 adj;
} mfp_struct = Common::BitCast<MemberFunctionPointer>(mfp);
} mfp_struct = mcl::bit_cast<MemberFunctionPointer>(mfp);
static_assert(sizeof(MemberFunctionPointer) == 16);
static_assert(sizeof(MemberFunctionPointer) == sizeof(mfp));
@ -58,8 +58,8 @@ ArgCallback DevirtualizeItanium(mcl::class_type<decltype(mfp)>* this_) {
u64 fn_ptr = mfp_struct.ptr;
u64 this_ptr = reinterpret_cast<u64>(this_) + mfp_struct.adj;
if (mfp_struct.ptr & 1) {
u64 vtable = Common::BitCastPointee<u64>(this_ptr);
fn_ptr = Common::BitCastPointee<u64>(vtable + fn_ptr - 1);
u64 vtable = mcl::bit_cast_pointee<u64>(this_ptr);
fn_ptr = mcl::bit_cast_pointee<u64>(vtable + fn_ptr - 1);
}
return ArgCallback{fn_ptr, this_ptr};
}

View file

@ -7,16 +7,16 @@
#include <iterator>
#include <mcl/assert.hpp>
#include <mcl/bit/bit_field.hpp>
#include <mcl/scope_exit.hpp>
#include <mcl/stdint.hpp>
#include <tsl/robin_set.h>
#include "dynarmic/backend/x64/block_of_code.h"
#include "dynarmic/backend/x64/nzcv_util.h"
#include "dynarmic/backend/x64/perf_map.h"
#include "dynarmic/backend/x64/stack_layout.h"
#include "dynarmic/common/assert.h"
#include "dynarmic/common/bit_util.h"
#include "dynarmic/common/common_types.h"
#include "dynarmic/common/scope_exit.h"
#include "dynarmic/common/variant_util.h"
#include "dynarmic/ir/basic_block.h"
#include "dynarmic/ir/microinstruction.h"
@ -164,10 +164,10 @@ void EmitX64::EmitNZCVFromPackedFlags(EmitContext& ctx, IR::Inst* inst) {
if (args[0].IsImmediate()) {
const Xbyak::Reg32 nzcv = ctx.reg_alloc.ScratchGpr().cvt32();
u32 value = 0;
value |= Common::Bit<31>(args[0].GetImmediateU32()) ? (1 << 15) : 0;
value |= Common::Bit<30>(args[0].GetImmediateU32()) ? (1 << 14) : 0;
value |= Common::Bit<29>(args[0].GetImmediateU32()) ? (1 << 8) : 0;
value |= Common::Bit<28>(args[0].GetImmediateU32()) ? (1 << 0) : 0;
value |= mcl::bit::get_bit<31>(args[0].GetImmediateU32()) ? (1 << 15) : 0;
value |= mcl::bit::get_bit<30>(args[0].GetImmediateU32()) ? (1 << 14) : 0;
value |= mcl::bit::get_bit<29>(args[0].GetImmediateU32()) ? (1 << 8) : 0;
value |= mcl::bit::get_bit<28>(args[0].GetImmediateU32()) ? (1 << 0) : 0;
code.mov(nzcv, value);
ctx.reg_alloc.DefineValue(inst, nzcv);
} else if (code.HasHostFeature(HostFeature::FastBMI2)) {

View file

@ -11,13 +11,13 @@
#include <type_traits>
#include <vector>
#include <mcl/bitsizeof.hpp>
#include <tsl/robin_map.h>
#include <tsl/robin_set.h>
#include <xbyak/xbyak_util.h>
#include "dynarmic/backend/x64/exception_handler.h"
#include "dynarmic/backend/x64/reg_alloc.h"
#include "dynarmic/common/bit_util.h"
#include "dynarmic/common/fp/fpcr.h"
#include "dynarmic/ir/location_descriptor.h"
#include "dynarmic/ir/terminal.h"
@ -41,10 +41,10 @@ using A64FullVectorWidth = std::integral_constant<size_t, 128>;
// relative to the size of a vector register. e.g. T = u32 would result
// in a std::array<u32, 4>.
template<typename T>
using VectorArray = std::array<T, A64FullVectorWidth::value / Common::BitSize<T>()>;
using VectorArray = std::array<T, A64FullVectorWidth::value / mcl::bitsizeof<T>>;
template<typename T>
using HalfVectorArray = std::array<T, A64FullVectorWidth::value / Common::BitSize<T>() / 2>;
using HalfVectorArray = std::array<T, A64FullVectorWidth::value / mcl::bitsizeof<T> / 2>;
struct EmitContext {
EmitContext(RegAlloc& reg_alloc, IR::Block& block);

View file

@ -3,10 +3,11 @@
* SPDX-License-Identifier: 0BSD
*/
#include <mcl/stdint.hpp>
#include "dynarmic/backend/x64/abi.h"
#include "dynarmic/backend/x64/block_of_code.h"
#include "dynarmic/backend/x64/emit_x64.h"
#include "dynarmic/common/common_types.h"
#include "dynarmic/common/crypto/aes.h"
#include "dynarmic/ir/microinstruction.h"

View file

@ -6,10 +6,11 @@
#include <cstddef>
#include <type_traits>
#include <mcl/assert.hpp>
#include <mcl/stdint.hpp>
#include "dynarmic/backend/x64/block_of_code.h"
#include "dynarmic/backend/x64/emit_x64.h"
#include "dynarmic/common/assert.h"
#include "dynarmic/common/common_types.h"
#include "dynarmic/ir/basic_block.h"
#include "dynarmic/ir/microinstruction.h"
#include "dynarmic/ir/opcodes.h"

View file

@ -7,20 +7,20 @@
#include <type_traits>
#include <utility>
#include <mcl/assert.hpp>
#include <mcl/mp/metavalue/lift_value.hpp>
#include <mcl/mp/typelist/cartesian_product.hpp>
#include <mcl/mp/typelist/lift_sequence.hpp>
#include <mcl/mp/typelist/list.hpp>
#include <mcl/mp/typelist/lower_to_tuple.hpp>
#include <mcl/stdint.hpp>
#include <mcl/type_traits/integer_of_size.hpp>
#include "dynarmic/backend/x64/abi.h"
#include "dynarmic/backend/x64/block_of_code.h"
#include "dynarmic/backend/x64/constants.h"
#include "dynarmic/backend/x64/emit_x64.h"
#include "dynarmic/common/assert.h"
#include "dynarmic/common/cast_util.h"
#include "dynarmic/common/common_types.h"
#include "dynarmic/common/fp/fpcr.h"
#include "dynarmic/common/fp/fpsr.h"
#include "dynarmic/common/fp/info.h"

View file

@ -3,7 +3,7 @@
* SPDX-License-Identifier: 0BSD
*/
#include "dynarmic/common/macro_util.h"
#include <mcl/macro/concatenate_tokens.hpp>
#define AxxEmitX64 CONCATENATE_TOKENS(Axx, EmitX64)
#define AxxEmitContext CONCATENATE_TOKENS(Axx, EmitContext)
@ -98,10 +98,10 @@ void AxxEmitX64::EmitMemoryRead(AxxEmitContext& ctx, IR::Inst* inst) {
const auto location = EmitReadMemoryMov<bitsize>(code, value_idx, src_ptr, ordered);
fastmem_patch_info.emplace(
Common::BitCast<u64>(location),
mcl::bit_cast<u64>(location),
FastmemPatchInfo{
Common::BitCast<u64>(code.getCurr()),
Common::BitCast<u64>(wrapped_fn),
mcl::bit_cast<u64>(code.getCurr()),
mcl::bit_cast<u64>(wrapped_fn),
*fastmem_marker,
conf.recompile_on_fastmem_failure,
});
@ -178,10 +178,10 @@ void AxxEmitX64::EmitMemoryWrite(AxxEmitContext& ctx, IR::Inst* inst) {
const auto location = EmitWriteMemoryMov<bitsize>(code, dest_ptr, value_idx, ordered);
fastmem_patch_info.emplace(
Common::BitCast<u64>(location),
mcl::bit_cast<u64>(location),
FastmemPatchInfo{
Common::BitCast<u64>(code.getCurr()),
Common::BitCast<u64>(wrapped_fn),
mcl::bit_cast<u64>(code.getCurr()),
mcl::bit_cast<u64>(wrapped_fn),
*fastmem_marker,
conf.recompile_on_fastmem_failure,
});
@ -339,7 +339,7 @@ void AxxEmitX64::EmitExclusiveReadMemoryInline(AxxEmitContext& ctx, IR::Inst* in
EmitExclusiveLock(code, conf, tmp, tmp2.cvt32());
code.mov(code.byte[r15 + offsetof(AxxJitState, exclusive_state)], u8(1));
code.mov(tmp, Common::BitCast<u64>(GetExclusiveMonitorAddressPointer(conf.global_monitor, conf.processor_id)));
code.mov(tmp, mcl::bit_cast<u64>(GetExclusiveMonitorAddressPointer(conf.global_monitor, conf.processor_id)));
code.mov(qword[tmp], vaddr);
const auto fastmem_marker = ShouldFastmem(ctx, inst);
@ -352,10 +352,10 @@ void AxxEmitX64::EmitExclusiveReadMemoryInline(AxxEmitContext& ctx, IR::Inst* in
const auto location = EmitReadMemoryMov<bitsize>(code, value_idx, src_ptr, ordered);
fastmem_patch_info.emplace(
Common::BitCast<u64>(location),
mcl::bit_cast<u64>(location),
FastmemPatchInfo{
Common::BitCast<u64>(code.getCurr()),
Common::BitCast<u64>(wrapped_fn),
mcl::bit_cast<u64>(code.getCurr()),
mcl::bit_cast<u64>(wrapped_fn),
*fastmem_marker,
conf.recompile_on_exclusive_fastmem_failure,
});
@ -373,7 +373,7 @@ void AxxEmitX64::EmitExclusiveReadMemoryInline(AxxEmitContext& ctx, IR::Inst* in
code.call(wrapped_fn);
}
code.mov(tmp, Common::BitCast<u64>(GetExclusiveMonitorValuePointer(conf.global_monitor, conf.processor_id)));
code.mov(tmp, mcl::bit_cast<u64>(GetExclusiveMonitorValuePointer(conf.global_monitor, conf.processor_id)));
EmitWriteMemoryMov<bitsize>(code, tmp, value_idx, false);
EmitExclusiveUnlock(code, conf, tmp, tmp2.cvt32());
@ -418,7 +418,7 @@ void AxxEmitX64::EmitExclusiveWriteMemoryInline(AxxEmitContext& ctx, IR::Inst* i
Xbyak::Label end;
code.mov(tmp, Common::BitCast<u64>(GetExclusiveMonitorAddressPointer(conf.global_monitor, conf.processor_id)));
code.mov(tmp, mcl::bit_cast<u64>(GetExclusiveMonitorAddressPointer(conf.global_monitor, conf.processor_id)));
code.mov(status, u32(1));
code.cmp(code.byte[r15 + offsetof(AxxJitState, exclusive_state)], u8(0));
code.je(end, code.T_NEAR);
@ -428,7 +428,7 @@ void AxxEmitX64::EmitExclusiveWriteMemoryInline(AxxEmitContext& ctx, IR::Inst* i
EmitExclusiveTestAndClear(code, conf, vaddr, tmp, rax);
code.mov(code.byte[r15 + offsetof(AxxJitState, exclusive_state)], u8(0));
code.mov(tmp, Common::BitCast<u64>(GetExclusiveMonitorValuePointer(conf.global_monitor, conf.processor_id)));
code.mov(tmp, mcl::bit_cast<u64>(GetExclusiveMonitorValuePointer(conf.global_monitor, conf.processor_id)));
if constexpr (bitsize == 128) {
code.mov(rax, qword[tmp + 0]);
@ -488,10 +488,10 @@ void AxxEmitX64::EmitExclusiveWriteMemoryInline(AxxEmitContext& ctx, IR::Inst* i
code.call(wrapped_fn);
fastmem_patch_info.emplace(
Common::BitCast<u64>(location),
mcl::bit_cast<u64>(location),
FastmemPatchInfo{
Common::BitCast<u64>(code.getCurr()),
Common::BitCast<u64>(wrapped_fn),
mcl::bit_cast<u64>(code.getCurr()),
mcl::bit_cast<u64>(wrapped_fn),
*fastmem_marker,
conf.recompile_on_exclusive_fastmem_failure,
});

View file

@ -3,6 +3,7 @@
* SPDX-License-Identifier: 0BSD
*/
#include <mcl/bit_cast.hpp>
#include <xbyak/xbyak.h>
#include "dynarmic/backend/x64/a32_emit_x64.h"
@ -343,7 +344,7 @@ void EmitExclusiveLock(BlockOfCode& code, const UserConfig& conf, Xbyak::Reg64 p
return;
}
code.mov(pointer, Common::BitCast<u64>(GetExclusiveMonitorLockPointer(conf.global_monitor)));
code.mov(pointer, mcl::bit_cast<u64>(GetExclusiveMonitorLockPointer(conf.global_monitor)));
EmitSpinLockLock(code, pointer, tmp);
}
@ -353,7 +354,7 @@ void EmitExclusiveUnlock(BlockOfCode& code, const UserConfig& conf, Xbyak::Reg64
return;
}
code.mov(pointer, Common::BitCast<u64>(GetExclusiveMonitorLockPointer(conf.global_monitor)));
code.mov(pointer, mcl::bit_cast<u64>(GetExclusiveMonitorLockPointer(conf.global_monitor)));
EmitSpinLockUnlock(code, pointer, tmp);
}
@ -370,7 +371,7 @@ void EmitExclusiveTestAndClear(BlockOfCode& code, const UserConfig& conf, Xbyak:
continue;
}
Xbyak::Label ok;
code.mov(pointer, Common::BitCast<u64>(GetExclusiveMonitorAddressPointer(conf.global_monitor, processor_index)));
code.mov(pointer, mcl::bit_cast<u64>(GetExclusiveMonitorAddressPointer(conf.global_monitor, processor_index)));
code.cmp(qword[pointer], vaddr);
code.jne(ok);
code.mov(qword[pointer], tmp);

View file

@ -5,13 +5,13 @@
#include <limits>
#include <mcl/assert.hpp>
#include <mcl/bit/bit_field.hpp>
#include <mcl/stdint.hpp>
#include <mcl/type_traits/integer_of_size.hpp>
#include "dynarmic/backend/x64/block_of_code.h"
#include "dynarmic/backend/x64/emit_x64.h"
#include "dynarmic/common/assert.h"
#include "dynarmic/common/bit_util.h"
#include "dynarmic/common/common_types.h"
#include "dynarmic/ir/basic_block.h"
#include "dynarmic/ir/microinstruction.h"
#include "dynarmic/ir/opcodes.h"

View file

@ -8,6 +8,11 @@
#include <cstdlib>
#include <type_traits>
#include <mcl/assert.hpp>
#include <mcl/bit/bit_count.hpp>
#include <mcl/bit/bit_field.hpp>
#include <mcl/bitsizeof.hpp>
#include <mcl/stdint.hpp>
#include <mcl/type_traits/function_info.hpp>
#include <xbyak/xbyak.h>
@ -15,9 +20,6 @@
#include "dynarmic/backend/x64/block_of_code.h"
#include "dynarmic/backend/x64/constants.h"
#include "dynarmic/backend/x64/emit_x64.h"
#include "dynarmic/common/assert.h"
#include "dynarmic/common/bit_util.h"
#include "dynarmic/common/common_types.h"
#include "dynarmic/common/math_util.h"
#include "dynarmic/ir/basic_block.h"
#include "dynarmic/ir/microinstruction.h"
@ -529,7 +531,7 @@ void EmitX64::EmitVectorArithmeticShiftRight64(EmitContext& ctx, IR::Inst* inst)
template<typename T>
static constexpr T VShift(T x, T y) {
const s8 shift_amount = static_cast<s8>(static_cast<u8>(y));
const s64 bit_size = static_cast<s64>(Common::BitSize<T>());
const s64 bit_size = static_cast<s64>(mcl::bitsizeof<T>);
if constexpr (std::is_signed_v<T>) {
if (shift_amount >= bit_size) {
@ -859,10 +861,10 @@ void EmitX64::EmitVectorBroadcastElement16(EmitContext& ctx, IR::Inst* inst) {
}
if (index < 4) {
code.pshuflw(a, a, Common::Replicate<u8>(index, 2));
code.pshuflw(a, a, mcl::bit::replicate_element<2, u8>(index));
code.punpcklqdq(a, a);
} else {
code.pshufhw(a, a, Common::Replicate<u8>(u8(index - 4), 2));
code.pshufhw(a, a, mcl::bit::replicate_element<2, u8>(u8(index - 4)));
code.punpckhqdq(a, a);
}
@ -876,7 +878,7 @@ void EmitX64::EmitVectorBroadcastElement32(EmitContext& ctx, IR::Inst* inst) {
const u8 index = args[1].GetImmediateU8();
ASSERT(index < 4);
code.pshufd(a, a, Common::Replicate<u8>(index, 2));
code.pshufd(a, a, mcl::bit::replicate_element<2, u8>(index));
ctx.reg_alloc.DefineValue(inst, a);
}
@ -889,7 +891,7 @@ void EmitX64::EmitVectorBroadcastElement64(EmitContext& ctx, IR::Inst* inst) {
ASSERT(index < 2);
if (code.HasHostFeature(HostFeature::AVX)) {
code.vpermilpd(a, a, Common::Replicate<u8>(index, 1));
code.vpermilpd(a, a, mcl::bit::replicate_element<1, u8>(index));
} else {
if (index == 0) {
code.punpcklqdq(a, a);
@ -905,7 +907,7 @@ static void EmitVectorCountLeadingZeros(VectorArray<T>& result, const VectorArra
for (size_t i = 0; i < result.size(); i++) {
T element = data[i];
size_t count = Common::BitSize<T>();
size_t count = mcl::bitsizeof<T>;
while (element != 0) {
element >>= 1;
--count;
@ -1636,7 +1638,7 @@ void EmitX64::EmitVectorLogicalShiftLeft8(EmitContext& ctx, IR::Inst* inst) {
code.gf2p8affineqb(result, code.MConst(xword, shift_matrix, shift_matrix), 0);
} else {
const u64 replicand = (0xFFULL << shift_amount) & 0xFF;
const u64 mask = Common::Replicate(replicand, Common::BitSize<u8>());
const u64 mask = mcl::bit::replicate_element<u8, u64>(replicand);
code.psllw(result, shift_amount);
code.pand(result, code.MConst(xword, mask, mask));
@ -1693,7 +1695,7 @@ void EmitX64::EmitVectorLogicalShiftRight8(EmitContext& ctx, IR::Inst* inst) {
code.gf2p8affineqb(result, code.MConst(xword, shift_matrix, shift_matrix), 0);
} else {
const u64 replicand = 0xFEULL >> shift_amount;
const u64 mask = Common::Replicate(replicand, Common::BitSize<u8>());
const u64 mask = mcl::bit::replicate_element<u8, u64>(replicand);
code.psrlw(result, shift_amount);
code.pand(result, code.MConst(xword, mask, mask));
@ -2775,7 +2777,7 @@ void EmitX64::EmitVectorPairedMinU32(EmitContext& ctx, IR::Inst* inst) {
template<typename D, typename T>
static D PolynomialMultiply(T lhs, T rhs) {
constexpr size_t bit_size = Common::BitSize<T>();
constexpr size_t bit_size = mcl::bitsizeof<T>;
const std::bitset<bit_size> operand(lhs);
D res = 0;
@ -2890,11 +2892,11 @@ void EmitX64::EmitVectorPolynomialMultiplyLong64(EmitContext& ctx, IR::Inst* ins
EmitTwoArgumentFallback(code, ctx, inst, [](VectorArray<u64>& result, const VectorArray<u64>& a, const VectorArray<u64>& b) {
const auto handle_high_bits = [](u64 lhs, u64 rhs) {
constexpr size_t bit_size = Common::BitSize<u64>();
constexpr size_t bit_size = mcl::bitsizeof<u64>;
u64 result = 0;
for (size_t i = 1; i < bit_size; i++) {
if (Common::Bit(i, lhs)) {
if (mcl::bit::get_bit(i, lhs)) {
result ^= rhs >> (bit_size - i);
}
}
@ -2945,7 +2947,7 @@ void EmitX64::EmitVectorPopulationCount(EmitContext& ctx, IR::Inst* inst) {
EmitOneArgumentFallback(code, ctx, inst, [](VectorArray<u8>& result, const VectorArray<u8>& a) {
std::transform(a.begin(), a.end(), result.begin(), [](u8 val) {
return static_cast<u8>(Common::BitCount(val));
return static_cast<u8>(mcl::bit::count_ones(val));
});
});
}
@ -3194,10 +3196,10 @@ static void RoundingShiftLeft(VectorArray<T>& out, const VectorArray<T>& lhs, co
using signed_type = std::make_signed_t<T>;
using unsigned_type = std::make_unsigned_t<T>;
constexpr auto bit_size = static_cast<s64>(Common::BitSize<T>());
constexpr auto bit_size = static_cast<s64>(mcl::bitsizeof<T>);
for (size_t i = 0; i < out.size(); i++) {
const s64 extended_shift = Common::SignExtend<8>(rhs[i] & 0xFF);
const s64 extended_shift = static_cast<s64>(mcl::bit::sign_extend<8, u64>(rhs[i] & 0xFF));
if (extended_shift >= 0) {
if (extended_shift >= bit_size) {
@ -4290,7 +4292,7 @@ static bool VectorSignedSaturatedShiftLeft(VectorArray<T>& dst, const VectorArra
bool qc_flag = false;
constexpr size_t bit_size_minus_one = Common::BitSize<T>() - 1;
constexpr size_t bit_size_minus_one = mcl::bitsizeof<T> - 1;
const auto saturate = [bit_size_minus_one](T value) {
return static_cast<T>((static_cast<U>(value) >> bit_size_minus_one) + (U{1} << bit_size_minus_one) - 1);
@ -4298,7 +4300,7 @@ static bool VectorSignedSaturatedShiftLeft(VectorArray<T>& dst, const VectorArra
for (size_t i = 0; i < dst.size(); i++) {
const T element = data[i];
const T shift = std::clamp<T>(static_cast<T>(Common::SignExtend<8>(shift_values[i] & 0xFF)),
const T shift = std::clamp<T>(static_cast<T>(mcl::bit::sign_extend<8>(static_cast<U>(shift_values[i] & 0xFF))),
-static_cast<T>(bit_size_minus_one), std::numeric_limits<T>::max());
if (element == 0) {
@ -4346,12 +4348,12 @@ template<typename T, typename U = std::make_unsigned_t<T>>
static bool VectorSignedSaturatedShiftLeftUnsigned(VectorArray<T>& dst, const VectorArray<T>& data, const VectorArray<T>& shift_values) {
static_assert(std::is_signed_v<T>, "T must be signed.");
constexpr size_t bit_size_minus_one = Common::BitSize<T>() - 1;
constexpr size_t bit_size_minus_one = mcl::bitsizeof<T> - 1;
bool qc_flag = false;
for (size_t i = 0; i < dst.size(); i++) {
const T element = data[i];
const T shift = std::clamp<T>(static_cast<T>(Common::SignExtend<8>(shift_values[i] & 0xFF)),
const T shift = std::clamp<T>(static_cast<T>(mcl::bit::sign_extend<8>(static_cast<U>(shift_values[i] & 0xFF))),
-static_cast<T>(bit_size_minus_one), std::numeric_limits<T>::max());
if (element == 0) {
@ -4709,7 +4711,7 @@ void EmitX64::EmitVectorTableLookup128(EmitContext& ctx, IR::Inst* inst) {
for (size_t i = 0; i < table_size; ++i) {
const Xbyak::Xmm xmm_table = ctx.reg_alloc.UseScratchXmm(table[i]);
const Xbyak::Opmask table_mask = k1;
const u64 table_index = Common::Replicate<u64>(i * 16, 8);
const u64 table_index = mcl::bit::replicate_element<u8, u64>(i * 16);
code.vpcmpeqb(table_mask, masked, code.MConst(xword, table_index, table_index));
@ -4737,7 +4739,7 @@ void EmitX64::EmitVectorTableLookup128(EmitContext& ctx, IR::Inst* inst) {
for (size_t i = 0; i < table_size; ++i) {
const Xbyak::Xmm xmm_table = ctx.reg_alloc.UseScratchXmm(table[i]);
const u64 table_index = Common::Replicate<u64>(i * 16, 8);
const u64 table_index = mcl::bit::replicate_element<u8, u64>(i * 16);
if (table_index == 0) {
code.pxor(xmm0, xmm0);
@ -5044,7 +5046,7 @@ void EmitX64::EmitVectorUnsignedRecipEstimate(EmitContext& ctx, IR::Inst* inst)
continue;
}
const u32 input = Common::Bits<23, 31>(a[i]);
const u32 input = mcl::bit::get_bits<23, 31>(a[i]);
const u32 estimate = Common::RecipEstimate(input);
result[i] = (0b100000000 | estimate) << 23;
@ -5060,7 +5062,7 @@ void EmitX64::EmitVectorUnsignedRecipSqrtEstimate(EmitContext& ctx, IR::Inst* in
continue;
}
const u32 input = Common::Bits<23, 31>(a[i]);
const u32 input = mcl::bit::get_bits<23, 31>(a[i]);
const u32 estimate = Common::RecipSqrtEstimate(input);
result[i] = (0b100000000 | estimate) << 23;
@ -5073,7 +5075,7 @@ void EmitX64::EmitVectorUnsignedRecipSqrtEstimate(EmitContext& ctx, IR::Inst* in
template<typename T, typename U = std::make_unsigned_t<T>>
static bool EmitVectorUnsignedSaturatedAccumulateSigned(VectorArray<U>& result, const VectorArray<T>& lhs, const VectorArray<T>& rhs) {
static_assert(std::is_signed_v<T>, "T must be signed.");
static_assert(Common::BitSize<T>() < 64, "T must be less than 64 bits in size.");
static_assert(mcl::bitsizeof<T> < 64, "T must be less than 64 bits in size.");
bool qc_flag = false;
@ -5177,12 +5179,12 @@ static bool VectorUnsignedSaturatedShiftLeft(VectorArray<T>& dst, const VectorAr
bool qc_flag = false;
constexpr size_t bit_size = Common::BitSize<T>();
constexpr size_t bit_size = mcl::bitsizeof<T>;
constexpr S negative_bit_size = -static_cast<S>(bit_size);
for (size_t i = 0; i < dst.size(); i++) {
const T element = data[i];
const S shift = std::clamp(static_cast<S>(Common::SignExtend<8>(shift_values[i] & 0xFF)),
const S shift = std::clamp(static_cast<S>(mcl::bit::sign_extend<8>(static_cast<T>(shift_values[i] & 0xFF))),
negative_bit_size, std::numeric_limits<S>::max());
if (element == 0 || shift <= negative_bit_size) {

View file

@ -9,6 +9,7 @@
#include <type_traits>
#include <utility>
#include <mcl/assert.hpp>
#include <mcl/mp/metavalue/lift_value.hpp>
#include <mcl/mp/typelist/cartesian_product.hpp>
#include <mcl/mp/typelist/lift_sequence.hpp>
@ -21,7 +22,6 @@
#include "dynarmic/backend/x64/block_of_code.h"
#include "dynarmic/backend/x64/constants.h"
#include "dynarmic/backend/x64/emit_x64.h"
#include "dynarmic/common/assert.h"
#include "dynarmic/common/fp/fpcr.h"
#include "dynarmic/common/fp/info.h"
#include "dynarmic/common/fp/op.h"
@ -562,7 +562,7 @@ template<size_t fsize>
void FPVectorAbs(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) {
using FPT = mcl::unsigned_integer_of_size<fsize>;
constexpr FPT non_sign_mask = FP::FPInfo<FPT>::sign_mask - FPT(1u);
constexpr u64 non_sign_mask64 = Common::Replicate<u64>(non_sign_mask, fsize);
constexpr u64 non_sign_mask64 = mcl::bit::replicate_element<fsize, u64>(non_sign_mask);
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
@ -1229,7 +1229,7 @@ template<size_t fsize>
void FPVectorNeg(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) {
using FPT = mcl::unsigned_integer_of_size<fsize>;
constexpr FPT sign_mask = FP::FPInfo<FPT>::sign_mask;
constexpr u64 sign_mask64 = Common::Replicate<u64>(sign_mask, fsize);
constexpr u64 sign_mask64 = mcl::bit::replicate_element<fsize, u64>(sign_mask);
auto args = ctx.reg_alloc.GetArgumentInfo(inst);

View file

@ -3,10 +3,11 @@
* SPDX-License-Identifier: 0BSD
*/
#include <mcl/stdint.hpp>
#include "dynarmic/backend/x64/block_of_code.h"
#include "dynarmic/backend/x64/constants.h"
#include "dynarmic/backend/x64/emit_x64.h"
#include "dynarmic/common/common_types.h"
#include "dynarmic/ir/microinstruction.h"
#include "dynarmic/ir/opcodes.h"

View file

@ -8,7 +8,7 @@
#include <functional>
#include <memory>
#include "dynarmic/common/common_types.h"
#include <mcl/stdint.hpp>
namespace Dynarmic::Backend::X64 {

View file

@ -14,12 +14,12 @@
#include <vector>
#include <fmt/format.h>
#include <mcl/assert.hpp>
#include <mcl/bit_cast.hpp>
#include <mcl/stdint.hpp>
#include "dynarmic/backend/x64/block_of_code.h"
#include "dynarmic/backend/x64/exception_handler.h"
#include "dynarmic/common/assert.h"
#include "dynarmic/common/cast_util.h"
#include "dynarmic/common/common_types.h"
#define mig_external extern "C"
#include "dynarmic/backend/x64/mig/mach_exc_server.h"
@ -120,7 +120,7 @@ kern_return_t MachHandler::HandleRequest(x86_thread_state64_t* ts) {
FakeCall fc = iter->cb(ts->__rip);
ts->__rsp -= sizeof(u64);
*Common::BitCast<u64*>(ts->__rsp) = fc.ret_rip;
*mcl::bit_cast<u64*>(ts->__rsp) = fc.ret_rip;
ts->__rip = fc.call_rip;
return KERN_SUCCESS;
@ -189,7 +189,7 @@ mig_external kern_return_t catch_mach_exception_raise_state(
struct ExceptionHandler::Impl final {
Impl(BlockOfCode& code)
: code_begin(Common::BitCast<u64>(code.getCode()))
: code_begin(mcl::bit_cast<u64>(code.getCode()))
, code_end(code_begin + code.GetTotalCodeSize()) {}
void SetCallback(std::function<FakeCall(u64)> cb) {

View file

@ -19,10 +19,11 @@
#include <mutex>
#include <vector>
#include <mcl/assert.hpp>
#include <mcl/bit_cast.hpp>
#include <mcl/stdint.hpp>
#include "dynarmic/backend/x64/block_of_code.h"
#include "dynarmic/common/assert.h"
#include "dynarmic/common/cast_util.h"
#include "dynarmic/common/common_types.h"
namespace Dynarmic::Backend::X64 {
@ -142,7 +143,7 @@ void SigHandler::SigAction(int sig, siginfo_t* info, void* raw_context) {
FakeCall fc = iter->cb(CTX_RIP);
CTX_RSP -= sizeof(u64);
*Common::BitCast<u64*>(CTX_RSP) = fc.ret_rip;
*mcl::bit_cast<u64*>(CTX_RSP) = fc.ret_rip;
CTX_RIP = fc.call_rip;
return;
@ -170,7 +171,7 @@ void SigHandler::SigAction(int sig, siginfo_t* info, void* raw_context) {
struct ExceptionHandler::Impl final {
Impl(BlockOfCode& code)
: code_begin(Common::BitCast<u64>(code.getCode()))
: code_begin(mcl::bit_cast<u64>(code.getCode()))
, code_end(code_begin + code.GetTotalCodeSize()) {}
void SetCallback(std::function<FakeCall(u64)> cb) {

View file

@ -9,11 +9,12 @@
#include <cstring>
#include <vector>
#include <mcl/assert.hpp>
#include <mcl/bit_cast.hpp>
#include <mcl/stdint.hpp>
#include "dynarmic/backend/x64/block_of_code.h"
#include "dynarmic/backend/x64/exception_handler.h"
#include "dynarmic/common/assert.h"
#include "dynarmic/common/cast_util.h"
#include "dynarmic/common/common_types.h"
#include "dynarmic/common/safe_ops.h"
using UBYTE = u8;
@ -178,20 +179,20 @@ struct ExceptionHandler::Impl final {
// Our 3rd argument is a PCONTEXT.
// If not within our codeblock, ignore this exception.
code.mov(code.rax, Safe::Negate(Common::BitCast<u64>(code.getCode())));
code.mov(code.rax, Safe::Negate(mcl::bit_cast<u64>(code.getCode())));
code.add(code.rax, code.qword[code.ABI_PARAM3 + Xbyak::RegExp(offsetof(CONTEXT, Rip))]);
code.cmp(code.rax, static_cast<u32>(code.GetTotalCodeSize()));
code.ja(exception_handler_without_cb);
code.sub(code.rsp, 8);
code.mov(code.ABI_PARAM1, Common::BitCast<u64>(&cb));
code.mov(code.ABI_PARAM1, mcl::bit_cast<u64>(&cb));
code.mov(code.ABI_PARAM2, code.ABI_PARAM3);
code.CallLambda(
[](const std::function<FakeCall(u64)>& cb_, PCONTEXT ctx) {
FakeCall fc = cb_(ctx->Rip);
ctx->Rsp -= sizeof(u64);
*Common::BitCast<u64*>(ctx->Rsp) = fc.ret_rip;
*mcl::bit_cast<u64*>(ctx->Rsp) = fc.ret_rip;
ctx->Rip = fc.call_rip;
});
code.add(code.rsp, 8);

View file

@ -7,7 +7,7 @@
#include <algorithm>
#include "dynarmic/common/assert.h"
#include <mcl/assert.hpp>
namespace Dynarmic {

View file

@ -5,7 +5,7 @@
#pragma once
#include "dynarmic/common/common_types.h"
#include <mcl/stdint.hpp>
namespace Dynarmic::Backend::X64 {

View file

@ -4,11 +4,10 @@
*/
#pragma once
#include <mcl/assert.hpp>
#include <mcl/stdint.hpp>
#include <xbyak/xbyak.h>
#include "dynarmic/common/assert.h"
#include "dynarmic/common/common_types.h"
namespace Dynarmic::Backend::X64 {
enum class HostLoc {

View file

@ -5,8 +5,7 @@
#pragma once
#include "dynarmic/common/bit_util.h"
#include "dynarmic/common/common_types.h"
#include <mcl/stdint.hpp>
namespace Dynarmic::Backend::X64::NZCV {
@ -29,10 +28,10 @@ constexpr u32 from_x64_multiplier = 0x1021'0000;
inline u32 ToX64(u32 nzcv) {
/* Naive implementation:
u32 x64_flags = 0;
x64_flags |= Common::Bit<31>(cpsr) ? 1 << 15 : 0;
x64_flags |= Common::Bit<30>(cpsr) ? 1 << 14 : 0;
x64_flags |= Common::Bit<29>(cpsr) ? 1 << 8 : 0;
x64_flags |= Common::Bit<28>(cpsr) ? 1 : 0;
x64_flags |= mcl::bit::get_bit<31>(cpsr) ? 1 << 15 : 0;
x64_flags |= mcl::bit::get_bit<30>(cpsr) ? 1 << 14 : 0;
x64_flags |= mcl::bit::get_bit<29>(cpsr) ? 1 << 8 : 0;
x64_flags |= mcl::bit::get_bit<28>(cpsr) ? 1 : 0;
return x64_flags;
*/
return ((nzcv >> 28) * to_x64_multiplier) & x64_mask;
@ -41,10 +40,10 @@ inline u32 ToX64(u32 nzcv) {
inline u32 FromX64(u32 x64_flags) {
/* Naive implementation:
u32 nzcv = 0;
nzcv |= Common::Bit<15>(x64_flags) ? 1 << 31 : 0;
nzcv |= Common::Bit<14>(x64_flags) ? 1 << 30 : 0;
nzcv |= Common::Bit<8>(x64_flags) ? 1 << 29 : 0;
nzcv |= Common::Bit<0>(x64_flags) ? 1 << 28 : 0;
nzcv |= mcl::bit::get_bit<15>(x64_flags) ? 1 << 31 : 0;
nzcv |= mcl::bit::get_bit<14>(x64_flags) ? 1 << 30 : 0;
nzcv |= mcl::bit::get_bit<8>(x64_flags) ? 1 << 29 : 0;
nzcv |= mcl::bit::get_bit<0>(x64_flags) ? 1 << 28 : 0;
return nzcv;
*/
return ((x64_flags & x64_mask) * from_x64_multiplier) & arm_mask;

View file

@ -5,10 +5,9 @@
#pragma once
#include <mcl/assert.hpp>
#include <xbyak/xbyak.h>
#include "dynarmic/common/assert.h"
namespace Dynarmic::Backend::X64 {
struct OpArg {

View file

@ -15,11 +15,10 @@
# include <mutex>
# include <fmt/format.h>
# include <mcl/stdint.hpp>
# include <sys/types.h>
# include <unistd.h>
# include "dynarmic/common/common_types.h"
namespace Dynarmic::Backend::X64 {
namespace {

View file

@ -7,7 +7,7 @@
#include <string_view>
#include "dynarmic/common/cast_util.h"
#include <mcl/bit_cast.hpp>
namespace Dynarmic::Backend::X64 {
@ -17,7 +17,7 @@ void PerfMapRegister(const void* start, const void* end, std::string_view friend
template<typename T>
void PerfMapRegister(T start, const void* end, std::string_view friendly_name) {
detail::PerfMapRegister(Common::BitCast<const void*>(start), end, friendly_name);
detail::PerfMapRegister(mcl::bit_cast<const void*>(start), end, friendly_name);
}
void PerfMapClear();

View file

@ -10,11 +10,11 @@
#include <utility>
#include <fmt/ostream.h>
#include <mcl/assert.hpp>
#include <xbyak/xbyak.h>
#include "dynarmic/backend/x64/abi.h"
#include "dynarmic/backend/x64/stack_layout.h"
#include "dynarmic/common/assert.h"
namespace Dynarmic::Backend::X64 {

View file

@ -11,12 +11,12 @@
#include <utility>
#include <vector>
#include <mcl/stdint.hpp>
#include <xbyak/xbyak.h>
#include "dynarmic/backend/x64/block_of_code.h"
#include "dynarmic/backend/x64/hostloc.h"
#include "dynarmic/backend/x64/oparg.h"
#include "dynarmic/common/common_types.h"
#include "dynarmic/ir/cond.h"
#include "dynarmic/ir/microinstruction.h"
#include "dynarmic/ir/value.h"

View file

@ -7,7 +7,7 @@
#include <array>
#include "dynarmic/common/common_types.h"
#include <mcl/stdint.hpp>
namespace Dynarmic::Backend::X64 {

View file

@ -1,21 +0,0 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2020 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#include "dynarmic/common/assert.h"
#include <cstdio>
#include <exception>
#include <fmt/format.h>
namespace Dynarmic::Common {
[[noreturn]] void Terminate(fmt::string_view msg, fmt::format_args args) {
fmt::print(stderr, "dynarmic assertion failed: ");
fmt::vprint(stderr, msg, args);
std::terminate();
}
} // namespace Dynarmic::Common

View file

@ -1,71 +0,0 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2020 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include <fmt/format.h>
#include "dynarmic/common/unlikely.h"
namespace Dynarmic::Common {
[[noreturn]] void Terminate(fmt::string_view msg, fmt::format_args args);
namespace detail {
template<typename... Ts>
[[noreturn]] void TerminateHelper(fmt::string_view msg, Ts... args) {
Terminate(msg, fmt::make_format_args(args...));
}
} // namespace detail
} // namespace Dynarmic::Common
#if defined(__clang) || defined(__GNUC__)
# define ASSUME(expr) [&] { if (!(expr)) __builtin_unreachable(); }()
#elif defined(_MSC_VER)
# define ASSUME(expr) __assume(expr)
#else
# define ASSUME(expr)
#endif
#ifdef DYNARMIC_IGNORE_ASSERTS
# if defined(__clang) || defined(__GNUC__)
# define UNREACHABLE() __builtin_unreachable()
# elif defined(_MSC_VER)
# define UNREACHABLE() __assume(0)
# else
# define UNREACHABLE()
# endif
# define ASSERT(expr) ASSUME(expr)
# define ASSERT_MSG(expr, ...) ASSUME(expr)
# define ASSERT_FALSE(...) UNREACHABLE()
#else
# define UNREACHABLE() ASSERT_FALSE("Unreachable code!")
# define ASSERT(expr) \
[&] { \
if (UNLIKELY(!(expr))) { \
::Dynarmic::Common::detail::TerminateHelper(#expr); \
} \
}()
# define ASSERT_MSG(expr, ...) \
[&] { \
if (UNLIKELY(!(expr))) { \
::Dynarmic::Common::detail::TerminateHelper(#expr "\nMessage: " __VA_ARGS__); \
} \
}()
# define ASSERT_FALSE(...) ::Dynarmic::Common::detail::TerminateHelper("false\nMessage: " __VA_ARGS__)
#endif
#if defined(NDEBUG) || defined(DYNARMIC_IGNORE_ASSERTS)
# define DEBUG_ASSERT(expr) ASSUME(expr)
# define DEBUG_ASSERT_MSG(expr, ...) ASSUME(expr)
#else
# define DEBUG_ASSERT(expr) ASSERT(expr)
# define DEBUG_ASSERT_MSG(expr, ...) ASSERT_MSG(expr, __VA_ARGS__)
#endif

View file

@ -5,7 +5,7 @@
#pragma once
#include "dynarmic/common/common_types.h"
#include <mcl/stdint.hpp>
namespace Dynarmic::Atomic {

View file

@ -1,248 +0,0 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2016 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include <bitset>
#include <climits>
#include <cstddef>
#include <type_traits>
#include "dynarmic/common/assert.h"
#include "dynarmic/common/common_types.h"
namespace Dynarmic::Common {
/// The size of a type in terms of bits
template<typename T>
constexpr size_t BitSize() {
return sizeof(T) * CHAR_BIT;
}
template<typename T>
constexpr T Ones(size_t count) {
ASSERT_MSG(count <= BitSize<T>(), "count larger than bitsize of T");
if (count == BitSize<T>())
return static_cast<T>(~static_cast<T>(0));
return ~(static_cast<T>(~static_cast<T>(0)) << count);
}
/// Extract bits [begin_bit, end_bit] inclusive from value of type T.
template<typename T>
constexpr T Bits(const size_t begin_bit, const size_t end_bit, const T value) {
ASSERT_MSG(begin_bit <= end_bit, "invalid bit range (position of beginning bit cannot be greater than that of end bit)");
ASSERT_MSG(begin_bit < BitSize<T>(), "begin_bit must be smaller than size of T");
ASSERT_MSG(end_bit < BitSize<T>(), "end_bit must be smaller than size of T");
return (value >> begin_bit) & Ones<T>(end_bit - begin_bit + 1);
}
/// Extract bits [begin_bit, end_bit] inclusive from value of type T.
template<size_t begin_bit, size_t end_bit, typename T>
constexpr T Bits(const T value) {
static_assert(begin_bit <= end_bit, "invalid bit range (position of beginning bit cannot be greater than that of end bit)");
static_assert(begin_bit < BitSize<T>(), "begin_bit must be smaller than size of T");
static_assert(end_bit < BitSize<T>(), "end_bit must be smaller than size of T");
return (value >> begin_bit) & Ones<T>(end_bit - begin_bit + 1);
}
/// Create a mask of type T for bits [begin_bit, end_bit] inclusive.
template<size_t begin_bit, size_t end_bit, typename T>
constexpr T Mask() {
static_assert(begin_bit <= end_bit, "invalid bit range (position of beginning bit cannot be greater than that of end bit)");
static_assert(begin_bit < BitSize<T>(), "begin_bit must be smaller than size of T");
static_assert(end_bit < BitSize<T>(), "end_bit must be smaller than size of T");
return Ones<T>(end_bit - begin_bit + 1) << begin_bit;
}
/// Clears bits [begin_bit, end_bit] inclusive of value of type T.
template<size_t begin_bit, size_t end_bit, typename T>
constexpr T ClearBits(const T value) {
return value & ~Mask<begin_bit, end_bit, T>();
}
/// Modifies bits [begin_bit, end_bit] inclusive of value of type T.
template<size_t begin_bit, size_t end_bit, typename T>
constexpr T ModifyBits(const T value, const T new_bits) {
return ClearBits<begin_bit, end_bit, T>(value) | ((new_bits << begin_bit) & Mask<begin_bit, end_bit, T>());
}
#ifdef _MSC_VER
# pragma warning(push)
# pragma warning(disable : 4554)
#endif
/// Extracts a single bit at bit_position from value of type T.
template<typename T>
inline bool Bit(size_t bit_position, const T value) {
ASSERT_MSG(bit_position < BitSize<T>(), "bit_position must be smaller than size of T");
return ((value >> bit_position) & 1) != 0;
}
/// Extracts a single bit at bit_position from value of type T.
template<size_t bit_position, typename T>
constexpr bool Bit(const T value) {
static_assert(bit_position < BitSize<T>(), "bit_position must be smaller than size of T");
return Bit<T>(bit_position, value);
}
/// Clears a single bit at bit_position from value of type T.
template<typename T>
inline T ClearBit(size_t bit_position, const T value) {
ASSERT_MSG(bit_position < BitSize<T>(), "bit_position must be smaller than size of T");
return value & ~(static_cast<T>(1) << bit_position);
}
/// Clears a single bit at bit_position from value of type T.
template<size_t bit_position, typename T>
constexpr T ClearBit(const T value) {
static_assert(bit_position < BitSize<T>(), "bit_position must be smaller than size of T");
return ClearBit<T>(bit_position, value);
}
/// Modifies a single bit at bit_position from value of type T.
template<typename T>
inline T ModifyBit(size_t bit_position, const T value, bool new_bit) {
ASSERT_MSG(bit_position < BitSize<T>(), "bit_position must be smaller than size of T");
return ClearBit<T>(bit_position, value) | (static_cast<T>(new_bit) << bit_position);
}
/// Modifies a single bit at bit_position from value of type T.
template<size_t bit_position, typename T>
constexpr T ModifyBit(const T value, bool new_bit) {
static_assert(bit_position < BitSize<T>(), "bit_position must be smaller than size of T");
return ModifyBit<T>(bit_position, value, new_bit);
}
#ifdef _MSC_VER
# pragma warning(pop)
#endif
/// Sign-extends a value that has bit_count bits to the full bitwidth of type T.
template<size_t bit_count, typename T>
constexpr T SignExtend(const T value) {
static_assert(bit_count <= BitSize<T>(), "bit_count larger than bitsize of T");
constexpr T mask = static_cast<T>(1ULL << bit_count) - 1;
const bool signbit = Bit<bit_count - 1, T>(value);
if (signbit) {
return value | ~mask;
}
return value;
}
/// Sign-extends a value that has bit_count bits to the full bitwidth of type T.
template<typename T>
inline T SignExtend(const size_t bit_count, const T value) {
ASSERT_MSG(bit_count <= BitSize<T>(), "bit_count larger than bitsize of T");
const T mask = static_cast<T>(1ULL << bit_count) - 1;
const bool signbit = Bit<T>(bit_count - 1, value);
if (signbit) {
return value | ~mask;
}
return value;
}
template<typename Integral>
inline size_t BitCount(Integral value) {
return std::bitset<BitSize<Integral>()>(value).count();
}
template<typename T>
constexpr size_t CountLeadingZeros(T value) {
auto x = static_cast<std::make_unsigned_t<T>>(value);
size_t result = BitSize<T>();
while (x != 0) {
x >>= 1;
result--;
}
return result;
}
template<typename T>
constexpr int HighestSetBit(T value) {
auto x = static_cast<std::make_unsigned_t<T>>(value);
int result = -1;
while (x != 0) {
x >>= 1;
result++;
}
return result;
}
template<typename T>
constexpr size_t LowestSetBit(T value) {
auto x = static_cast<std::make_unsigned_t<T>>(value);
if (x == 0)
return BitSize<T>();
size_t result = 0;
while ((x & 1) == 0) {
x >>= 1;
result++;
}
return result;
}
template<typename T>
constexpr bool MostSignificantBit(T value) {
return Bit<BitSize<T>() - 1, T>(value);
}
template<typename T>
constexpr T Replicate(T value, size_t element_size) {
ASSERT_MSG(BitSize<T>() % element_size == 0, "bitsize of T not divisible by element_size");
if (element_size == BitSize<T>())
return value;
return Replicate<T>(T(value | value << element_size), element_size * 2);
}
template<typename T>
constexpr T RotateRight(T value, size_t amount) {
amount %= BitSize<T>();
if (amount == 0) {
return value;
}
auto x = static_cast<std::make_unsigned_t<T>>(value);
return static_cast<T>((x >> amount) | (x << (BitSize<T>() - amount)));
}
constexpr u32 SwapHalves32(u32 value) {
return ((value & 0xFFFF0000U) >> 16)
| ((value & 0x0000FFFFU) << 16);
}
constexpr u16 SwapBytes16(u16 value) {
return static_cast<u16>(u32{value} >> 8 | u32{value} << 8);
}
constexpr u32 SwapBytes32(u32 value) {
return ((value & 0xFF000000U) >> 24)
| ((value & 0x00FF0000U) >> 8)
| ((value & 0x0000FF00U) << 8)
| ((value & 0x000000FFU) << 24);
}
constexpr u64 SwapBytes64(u64 value) {
return ((value & 0xFF00000000000000ULL) >> 56)
| ((value & 0x00FF000000000000ULL) >> 40)
| ((value & 0x0000FF0000000000ULL) >> 24)
| ((value & 0x000000FF00000000ULL) >> 8)
| ((value & 0x00000000FF000000ULL) << 8)
| ((value & 0x0000000000FF0000ULL) << 24)
| ((value & 0x000000000000FF00ULL) << 40)
| ((value & 0x00000000000000FFULL) << 56);
}
} // namespace Dynarmic::Common

View file

@ -5,37 +5,10 @@
#pragma once
#include <cstring>
#include <type_traits>
#include <mcl/type_traits/function_info.hpp>
namespace Dynarmic::Common {
/// Reinterpret objects of one type as another by bit-casting between object representations.
template<class Dest, class Source>
inline Dest BitCast(const Source& source) noexcept {
static_assert(sizeof(Dest) == sizeof(Source), "size of destination and source objects must be equal");
static_assert(std::is_trivially_copyable_v<Dest>, "destination type must be trivially copyable.");
static_assert(std::is_trivially_copyable_v<Source>, "source type must be trivially copyable");
std::aligned_storage_t<sizeof(Dest), alignof(Dest)> dest;
std::memcpy(&dest, &source, sizeof(dest));
return reinterpret_cast<Dest&>(dest);
}
/// Reinterpret objects of any arbitrary type as another type by bit-casting between object representations.
/// Note that here we do not verify if source has enough bytes to read from.
template<class Dest, class SourcePtr>
inline Dest BitCastPointee(const SourcePtr source) noexcept {
static_assert(sizeof(SourcePtr) == sizeof(void*), "source pointer must have size of a pointer");
static_assert(std::is_trivially_copyable_v<Dest>, "destination type must be trivially copyable.");
std::aligned_storage_t<sizeof(Dest), alignof(Dest)> dest;
std::memcpy(&dest, BitCast<void*>(source), sizeof(dest));
return reinterpret_cast<Dest&>(dest);
}
/// Cast a lambda into an equivalent function pointer.
template<class Function>
inline auto FptrCast(Function f) noexcept {

View file

@ -1,28 +0,0 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2016 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include <cstddef>
#include <cstdint>
using u8 = std::uint8_t;
using u16 = std::uint16_t;
using u32 = std::uint32_t;
using u64 = std::uint64_t;
using uptr = std::uintptr_t;
using s8 = std::int8_t;
using s16 = std::int16_t;
using s32 = std::int32_t;
using s64 = std::int64_t;
using sptr = std::intptr_t;
using size_t = std::size_t;
using f32 = float;
using f64 = double;
static_assert(sizeof(f32) == sizeof(u32), "f32 must be 32 bits wide");
static_assert(sizeof(f64) == sizeof(u64), "f64 must be 64 bits wide");

View file

@ -7,7 +7,7 @@
#include <array>
#include "dynarmic/common/common_types.h"
#include <mcl/stdint.hpp>
namespace Dynarmic::Common::Crypto::AES {

View file

@ -7,7 +7,7 @@
#include <array>
#include "dynarmic/common/common_types.h"
#include <mcl/stdint.hpp>
namespace Dynarmic::Common::Crypto::AES {

View file

@ -7,7 +7,7 @@
#include <array>
#include "dynarmic/common/common_types.h"
#include <mcl/stdint.hpp>
namespace Dynarmic::Common::Crypto::CRC32 {

View file

@ -5,7 +5,7 @@
#pragma once
#include "dynarmic/common/common_types.h"
#include <mcl/stdint.hpp>
namespace Dynarmic::Common::Crypto::CRC32 {

View file

@ -7,7 +7,7 @@
#include <array>
#include "dynarmic/common/common_types.h"
#include <mcl/stdint.hpp>
namespace Dynarmic::Common::Crypto::SM4 {

View file

@ -5,7 +5,7 @@
#pragma once
#include "dynarmic/common/common_types.h"
#include <mcl/stdint.hpp>
namespace Dynarmic::Common::Crypto::SM4 {

View file

@ -7,9 +7,10 @@
#include <optional>
#include "dynarmic/common/assert.h"
#include "dynarmic/common/bit_util.h"
#include "dynarmic/common/common_types.h"
#include <mcl/assert.hpp>
#include <mcl/bit/bit_field.hpp>
#include <mcl/stdint.hpp>
#include "dynarmic/common/fp/rounding_mode.h"
namespace Dynarmic::FP {
@ -34,49 +35,49 @@ public:
/// Get alternate half-precision control flag.
bool AHP() const {
return Common::Bit<26>(value);
return mcl::bit::get_bit<26>(value);
}
/// Set alternate half-precision control flag.
void AHP(bool ahp) {
value = Common::ModifyBit<26>(value, ahp);
value = mcl::bit::set_bit<26>(value, ahp);
}
/// Get default NaN mode control bit.
bool DN() const {
return Common::Bit<25>(value);
return mcl::bit::get_bit<25>(value);
}
/// Set default NaN mode control bit.
void DN(bool dn) {
value = Common::ModifyBit<25>(value, dn);
value = mcl::bit::set_bit<25>(value, dn);
}
/// Get flush-to-zero mode control bit.
bool FZ() const {
return Common::Bit<24>(value);
return mcl::bit::get_bit<24>(value);
}
/// Set flush-to-zero mode control bit.
void FZ(bool fz) {
value = Common::ModifyBit<24>(value, fz);
value = mcl::bit::set_bit<24>(value, fz);
}
/// Get rounding mode control field.
FP::RoundingMode RMode() const {
return static_cast<FP::RoundingMode>(Common::Bits<22, 23>(value));
return static_cast<FP::RoundingMode>(mcl::bit::get_bits<22, 23>(value));
}
/// Set rounding mode control field.
void RMode(FP::RoundingMode rounding_mode) {
ASSERT_MSG(static_cast<u32>(rounding_mode) <= 0b11, "FPCR: Invalid rounding mode");
value = Common::ModifyBits<22, 23>(value, static_cast<u32>(rounding_mode));
value = mcl::bit::set_bits<22, 23>(value, static_cast<u32>(rounding_mode));
}
/// Get the stride of a vector when executing AArch32 VFP instructions.
/// This field has no function in AArch64 state.
std::optional<size_t> Stride() const {
switch (Common::Bits<20, 21>(value)) {
switch (mcl::bit::get_bits<20, 21>(value)) {
case 0b00:
return 1;
case 0b11:
@ -90,90 +91,90 @@ public:
/// This field has no function in AArch64 state.
void Stride(size_t stride) {
ASSERT_MSG(stride >= 1 && stride <= 2, "FPCR: Invalid stride");
value = Common::ModifyBits<20, 21>(value, stride == 1 ? 0b00u : 0b11u);
value = mcl::bit::set_bits<20, 21>(value, stride == 1 ? 0b00u : 0b11u);
}
/// Get flush-to-zero (half-precision specific) mode control bit.
bool FZ16() const {
return Common::Bit<19>(value);
return mcl::bit::get_bit<19>(value);
}
/// Set flush-to-zero (half-precision specific) mode control bit.
void FZ16(bool fz16) {
value = Common::ModifyBit<19>(value, fz16);
value = mcl::bit::set_bit<19>(value, fz16);
}
/// Gets the length of a vector when executing AArch32 VFP instructions.
/// This field has no function in AArch64 state.
size_t Len() const {
return Common::Bits<16, 18>(value) + 1;
return mcl::bit::get_bits<16, 18>(value) + 1;
}
/// Sets the length of a vector when executing AArch32 VFP instructions.
/// This field has no function in AArch64 state.
void Len(size_t len) {
ASSERT_MSG(len >= 1 && len <= 8, "FPCR: Invalid len");
value = Common::ModifyBits<16, 18>(value, static_cast<u32>(len - 1));
value = mcl::bit::set_bits<16, 18>(value, static_cast<u32>(len - 1));
}
/// Get input denormal exception trap enable flag.
bool IDE() const {
return Common::Bit<15>(value);
return mcl::bit::get_bit<15>(value);
}
/// Set input denormal exception trap enable flag.
void IDE(bool ide) {
value = Common::ModifyBit<15>(value, ide);
value = mcl::bit::set_bit<15>(value, ide);
}
/// Get inexact exception trap enable flag.
bool IXE() const {
return Common::Bit<12>(value);
return mcl::bit::get_bit<12>(value);
}
/// Set inexact exception trap enable flag.
void IXE(bool ixe) {
value = Common::ModifyBit<12>(value, ixe);
value = mcl::bit::set_bit<12>(value, ixe);
}
/// Get underflow exception trap enable flag.
bool UFE() const {
return Common::Bit<11>(value);
return mcl::bit::get_bit<11>(value);
}
/// Set underflow exception trap enable flag.
void UFE(bool ufe) {
value = Common::ModifyBit<11>(value, ufe);
value = mcl::bit::set_bit<11>(value, ufe);
}
/// Get overflow exception trap enable flag.
bool OFE() const {
return Common::Bit<10>(value);
return mcl::bit::get_bit<10>(value);
}
/// Set overflow exception trap enable flag.
void OFE(bool ofe) {
value = Common::ModifyBit<10>(value, ofe);
value = mcl::bit::set_bit<10>(value, ofe);
}
/// Get division by zero exception trap enable flag.
bool DZE() const {
return Common::Bit<9>(value);
return mcl::bit::get_bit<9>(value);
}
/// Set division by zero exception trap enable flag.
void DZE(bool dze) {
value = Common::ModifyBit<9>(value, dze);
value = mcl::bit::set_bit<9>(value, dze);
}
/// Get invalid operation exception trap enable flag.
bool IOE() const {
return Common::Bit<8>(value);
return mcl::bit::get_bit<8>(value);
}
/// Set invalid operation exception trap enable flag.
void IOE(bool ioe) {
value = Common::ModifyBit<8>(value, ioe);
value = mcl::bit::set_bit<8>(value, ioe);
}
/// Gets the underlying raw value within the FPCR.

View file

@ -5,8 +5,8 @@
#pragma once
#include "dynarmic/common/bit_util.h"
#include "dynarmic/common/common_types.h"
#include <mcl/bit/bit_field.hpp>
#include <mcl/stdint.hpp>
namespace Dynarmic::FP {
@ -30,112 +30,112 @@ public:
/// Get negative condition flag
bool N() const {
return Common::Bit<31>(value);
return mcl::bit::get_bit<31>(value);
}
/// Set negative condition flag
void N(bool N_) {
value = Common::ModifyBit<31>(value, N_);
value = mcl::bit::set_bit<31>(value, N_);
}
/// Get zero condition flag
bool Z() const {
return Common::Bit<30>(value);
return mcl::bit::get_bit<30>(value);
}
/// Set zero condition flag
void Z(bool Z_) {
value = Common::ModifyBit<30>(value, Z_);
value = mcl::bit::set_bit<30>(value, Z_);
}
/// Get carry condition flag
bool C() const {
return Common::Bit<29>(value);
return mcl::bit::get_bit<29>(value);
}
/// Set carry condition flag
void C(bool C_) {
value = Common::ModifyBit<29>(value, C_);
value = mcl::bit::set_bit<29>(value, C_);
}
/// Get overflow condition flag
bool V() const {
return Common::Bit<28>(value);
return mcl::bit::get_bit<28>(value);
}
/// Set overflow condition flag
void V(bool V_) {
value = Common::ModifyBit<28>(value, V_);
value = mcl::bit::set_bit<28>(value, V_);
}
/// Get cumulative saturation bit
bool QC() const {
return Common::Bit<27>(value);
return mcl::bit::get_bit<27>(value);
}
/// Set cumulative saturation bit
void QC(bool QC_) {
value = Common::ModifyBit<27>(value, QC_);
value = mcl::bit::set_bit<27>(value, QC_);
}
/// Get input denormal floating-point exception bit
bool IDC() const {
return Common::Bit<7>(value);
return mcl::bit::get_bit<7>(value);
}
/// Set input denormal floating-point exception bit
void IDC(bool IDC_) {
value = Common::ModifyBit<7>(value, IDC_);
value = mcl::bit::set_bit<7>(value, IDC_);
}
/// Get inexact cumulative floating-point exception bit
bool IXC() const {
return Common::Bit<4>(value);
return mcl::bit::get_bit<4>(value);
}
/// Set inexact cumulative floating-point exception bit
void IXC(bool IXC_) {
value = Common::ModifyBit<4>(value, IXC_);
value = mcl::bit::set_bit<4>(value, IXC_);
}
/// Get underflow cumulative floating-point exception bit
bool UFC() const {
return Common::Bit<3>(value);
return mcl::bit::get_bit<3>(value);
}
/// Set underflow cumulative floating-point exception bit
void UFC(bool UFC_) {
value = Common::ModifyBit<3>(value, UFC_);
value = mcl::bit::set_bit<3>(value, UFC_);
}
/// Get overflow cumulative floating-point exception bit
bool OFC() const {
return Common::Bit<2>(value);
return mcl::bit::get_bit<2>(value);
}
/// Set overflow cumulative floating-point exception bit
void OFC(bool OFC_) {
value = Common::ModifyBit<2>(value, OFC_);
value = mcl::bit::set_bit<2>(value, OFC_);
}
/// Get divide by zero cumulative floating-point exception bit
bool DZC() const {
return Common::Bit<1>(value);
return mcl::bit::get_bit<1>(value);
}
/// Set divide by zero cumulative floating-point exception bit
void DZC(bool DZC_) {
value = Common::ModifyBit<1>(value, DZC_);
value = mcl::bit::set_bit<1>(value, DZC_);
}
/// Get invalid operation cumulative floating-point exception bit
bool IOC() const {
return Common::Bit<0>(value);
return mcl::bit::get_bit<0>(value);
}
/// Set invalid operation cumulative floating-point exception bit
void IOC(bool IOC_) {
value = Common::ModifyBit<0>(value, IOC_);
value = mcl::bit::set_bit<0>(value, IOC_);
}
/// Gets the underlying raw value within the FPSR.

View file

@ -5,6 +5,8 @@
#include "dynarmic/common/fp/fused.h"
#include <mcl/bit/bit_count.hpp>
#include "dynarmic/common/fp/mantissa_util.h"
#include "dynarmic/common/fp/unpacked.h"
#include "dynarmic/common/u128.h"
@ -81,7 +83,7 @@ FPUnpacked FusedMulAdd(FPUnpacked addend, FPUnpacked op1, FPUnpacked op2) {
return FPUnpacked{result_sign, result_exponent, result.lower};
}
const int required_shift = normalized_point_position - Common::HighestSetBit(result.upper);
const int required_shift = normalized_point_position - mcl::bit::highest_set_bit(result.upper);
result = result << required_shift;
result_exponent -= required_shift;
return ReduceMantissa(result_sign, result_exponent, result);

View file

@ -5,8 +5,8 @@
#pragma once
#include "dynarmic/common/bit_util.h"
#include "dynarmic/common/common_types.h"
#include <mcl/bit/bit_count.hpp>
#include <mcl/stdint.hpp>
namespace Dynarmic::FP {
@ -124,7 +124,7 @@ constexpr FPT FPValue() {
}
constexpr int point_position = static_cast<int>(FPInfo<FPT>::explicit_mantissa_width);
constexpr int highest_bit = Common::HighestSetBit(value);
constexpr int highest_bit = mcl::bit::highest_set_bit(value);
constexpr int offset = point_position - highest_bit;
constexpr int normalized_exponent = exponent - offset + point_position;
static_assert(offset >= 0);

View file

@ -5,8 +5,9 @@
#pragma once
#include "dynarmic/common/bit_util.h"
#include "dynarmic/common/common_types.h"
#include <mcl/bit/bit_field.hpp>
#include <mcl/bitsizeof.hpp>
#include <mcl/stdint.hpp>
namespace Dynarmic::FP {
@ -22,13 +23,13 @@ inline ResidualError ResidualErrorOnRightShift(u64 mantissa, int shift_amount) {
return ResidualError::Zero;
}
if (shift_amount > static_cast<int>(Common::BitSize<u64>())) {
return Common::MostSignificantBit(mantissa) ? ResidualError::GreaterThanHalf : ResidualError::LessThanHalf;
if (shift_amount > static_cast<int>(mcl::bitsizeof<u64>)) {
return mcl::bit::most_significant_bit(mantissa) ? ResidualError::GreaterThanHalf : ResidualError::LessThanHalf;
}
const size_t half_bit_position = static_cast<size_t>(shift_amount - 1);
const u64 half = static_cast<u64>(1) << half_bit_position;
const u64 error_mask = Common::Ones<u64>(static_cast<size_t>(shift_amount));
const u64 error_mask = mcl::bit::ones<u64>(static_cast<size_t>(shift_amount));
const u64 error = mantissa & error_mask;
if (error == 0) {

View file

@ -5,7 +5,10 @@
#include "dynarmic/common/fp/op/FPConvert.h"
#include "dynarmic/common/common_types.h"
#include <mcl/bit/bit_field.hpp>
#include <mcl/bitsizeof.hpp>
#include <mcl/stdint.hpp>
#include "dynarmic/common/fp/fpcr.h"
#include "dynarmic/common/fp/fpsr.h"
#include "dynarmic/common/fp/info.h"
@ -16,27 +19,27 @@ namespace Dynarmic::FP {
namespace {
template<typename FPT_TO, typename FPT_FROM>
FPT_TO FPConvertNaN(FPT_FROM op) {
const bool sign = Common::Bit<Common::BitSize<FPT_FROM>() - 1>(op);
const bool sign = mcl::bit::get_bit<mcl::bitsizeof<FPT_FROM> - 1>(op);
const u64 frac = [op] {
if constexpr (sizeof(FPT_FROM) == sizeof(u64)) {
return Common::Bits<0, 50>(op);
return mcl::bit::get_bits<0, 50>(op);
} else if constexpr (sizeof(FPT_FROM) == sizeof(u32)) {
return u64{Common::Bits<0, 21>(op)} << 29;
return u64{mcl::bit::get_bits<0, 21>(op)} << 29;
} else {
return u64{Common::Bits<0, 8>(op)} << 42;
return u64{mcl::bit::get_bits<0, 8>(op)} << 42;
}
}();
const size_t dest_bit_size = Common::BitSize<FPT_TO>();
const size_t dest_bit_size = mcl::bitsizeof<FPT_TO>;
const u64 shifted_sign = u64{sign} << (dest_bit_size - 1);
const u64 exponent = Common::Ones<u64>(dest_bit_size - FPInfo<FPT_TO>::explicit_mantissa_width);
const u64 exponent = mcl::bit::ones<u64>(dest_bit_size - FPInfo<FPT_TO>::explicit_mantissa_width);
if constexpr (sizeof(FPT_TO) == sizeof(u64)) {
return FPT_TO(shifted_sign | exponent << 51 | frac);
} else if constexpr (sizeof(FPT_TO) == sizeof(u32)) {
return FPT_TO(shifted_sign | exponent << 22 | Common::Bits<29, 50>(frac));
return FPT_TO(shifted_sign | exponent << 22 | mcl::bit::get_bits<29, 50>(frac));
} else {
return FPT_TO(shifted_sign | exponent << 9 | Common::Bits<42, 50>(frac));
return FPT_TO(shifted_sign | exponent << 9 | mcl::bit::get_bits<42, 50>(frac));
}
}
} // Anonymous namespace
@ -44,7 +47,7 @@ FPT_TO FPConvertNaN(FPT_FROM op) {
template<typename FPT_TO, typename FPT_FROM>
FPT_TO FPConvert(FPT_FROM op, FPCR fpcr, RoundingMode rounding_mode, FPSR& fpsr) {
const auto [type, sign, value] = FPUnpackCV<FPT_FROM>(op, fpcr, fpsr);
const bool is_althp = Common::BitSize<FPT_TO>() == 16 && fpcr.AHP();
const bool is_althp = mcl::bitsizeof<FPT_TO> == 16 && fpcr.AHP();
if (type == FPType::SNaN || type == FPType::QNaN) {
std::uintmax_t result{};

View file

@ -5,7 +5,8 @@
#include "dynarmic/common/fp/op/FPMulAdd.h"
#include "dynarmic/common/common_types.h"
#include <mcl/stdint.hpp>
#include "dynarmic/common/fp/fpcr.h"
#include "dynarmic/common/fp/fpsr.h"
#include "dynarmic/common/fp/fused.h"

View file

@ -5,7 +5,8 @@
#include "dynarmic/common/fp/op/FPRSqrtEstimate.h"
#include "dynarmic/common/common_types.h"
#include <mcl/stdint.hpp>
#include "dynarmic/common/fp/fpcr.h"
#include "dynarmic/common/fp/fpsr.h"
#include "dynarmic/common/fp/info.h"

View file

@ -7,8 +7,9 @@
#include <tuple>
#include "dynarmic/common/assert.h"
#include "dynarmic/common/common_types.h"
#include <mcl/assert.hpp>
#include <mcl/stdint.hpp>
#include "dynarmic/common/fp/fpcr.h"
#include "dynarmic/common/fp/fpsr.h"
#include "dynarmic/common/fp/info.h"

View file

@ -5,8 +5,9 @@
#include "dynarmic/common/fp/op/FPRecipExponent.h"
#include "dynarmic/common/bit_util.h"
#include "dynarmic/common/common_types.h"
#include <mcl/bit/bit_field.hpp>
#include <mcl/stdint.hpp>
#include "dynarmic/common/fp/fpcr.h"
#include "dynarmic/common/fp/fpsr.h"
#include "dynarmic/common/fp/info.h"
@ -18,11 +19,11 @@ namespace {
template<typename FPT>
FPT DetermineExponentValue(size_t value) {
if constexpr (sizeof(FPT) == sizeof(u32)) {
return static_cast<FPT>(Common::Bits<23, 30>(value));
return static_cast<FPT>(mcl::bit::get_bits<23, 30>(value));
} else if constexpr (sizeof(FPT) == sizeof(u64)) {
return static_cast<FPT>(Common::Bits<52, 62>(value));
return static_cast<FPT>(mcl::bit::get_bits<52, 62>(value));
} else {
return static_cast<FPT>(Common::Bits<10, 14>(value));
return static_cast<FPT>(mcl::bit::get_bits<10, 14>(value));
}
}
} // Anonymous namespace
@ -41,7 +42,7 @@ FPT FPRecipExponent(FPT op, FPCR fpcr, FPSR& fpsr) {
// Zero and denormals
if (exponent == 0) {
const FPT max_exponent = Common::Ones<FPT>(FPInfo<FPT>::exponent_width) - 1;
const FPT max_exponent = mcl::bit::ones<FPT>(FPInfo<FPT>::exponent_width) - 1;
return FPT(sign_bits | (max_exponent << FPInfo<FPT>::explicit_mantissa_width));
}

View file

@ -5,9 +5,10 @@
#include "dynarmic/common/fp/op/FPRoundInt.h"
#include "dynarmic/common/assert.h"
#include "dynarmic/common/bit_util.h"
#include "dynarmic/common/common_types.h"
#include <mcl/assert.hpp>
#include <mcl/bit/bit_field.hpp>
#include <mcl/stdint.hpp>
#include "dynarmic/common/fp/fpcr.h"
#include "dynarmic/common/fp/fpsr.h"
#include "dynarmic/common/fp/info.h"
@ -53,7 +54,7 @@ u64 FPRoundInt(FPT op, FPCR fpcr, RoundingMode rounding, bool exact, FPSR& fpsr)
bool round_up = false;
switch (rounding) {
case RoundingMode::ToNearest_TieEven:
round_up = error > ResidualError::Half || (error == ResidualError::Half && Common::Bit<0>(int_result));
round_up = error > ResidualError::Half || (error == ResidualError::Half && mcl::bit::get_bit<0>(int_result));
break;
case RoundingMode::TowardsPlusInfinity:
round_up = error != ResidualError::Zero;
@ -62,10 +63,10 @@ u64 FPRoundInt(FPT op, FPCR fpcr, RoundingMode rounding, bool exact, FPSR& fpsr)
round_up = false;
break;
case RoundingMode::TowardsZero:
round_up = error != ResidualError::Zero && Common::MostSignificantBit(int_result);
round_up = error != ResidualError::Zero && mcl::bit::most_significant_bit(int_result);
break;
case RoundingMode::ToNearest_TieAwayFromZero:
round_up = error > ResidualError::Half || (error == ResidualError::Half && !Common::MostSignificantBit(int_result));
round_up = error > ResidualError::Half || (error == ResidualError::Half && !mcl::bit::most_significant_bit(int_result));
break;
case RoundingMode::ToOdd:
UNREACHABLE();
@ -75,7 +76,7 @@ u64 FPRoundInt(FPT op, FPCR fpcr, RoundingMode rounding, bool exact, FPSR& fpsr)
int_result++;
}
const bool new_sign = Common::MostSignificantBit(int_result);
const bool new_sign = mcl::bit::most_significant_bit(int_result);
const u64 abs_int_result = new_sign ? Safe::Negate<u64>(int_result) : static_cast<u64>(int_result);
const FPT result = int_result == 0

View file

@ -5,7 +5,7 @@
#pragma once
#include "dynarmic/common/common_types.h"
#include <mcl/stdint.hpp>
namespace Dynarmic::FP {

View file

@ -5,9 +5,11 @@
#include "dynarmic/common/fp/op/FPToFixed.h"
#include "dynarmic/common/assert.h"
#include "dynarmic/common/bit_util.h"
#include "dynarmic/common/common_types.h"
#include <mcl/assert.hpp>
#include <mcl/bit/bit_count.hpp>
#include <mcl/bit/bit_field.hpp>
#include <mcl/stdint.hpp>
#include "dynarmic/common/fp/fpcr.h"
#include "dynarmic/common/fp/fpsr.h"
#include "dynarmic/common/fp/mantissa_util.h"
@ -50,7 +52,7 @@ u64 FPToFixed(size_t ibits, FPT op, size_t fbits, bool unsigned_, FPCR fpcr, Rou
bool round_up = false;
switch (rounding) {
case RoundingMode::ToNearest_TieEven:
round_up = error > ResidualError::Half || (error == ResidualError::Half && Common::Bit<0>(int_result));
round_up = error > ResidualError::Half || (error == ResidualError::Half && mcl::bit::get_bit<0>(int_result));
break;
case RoundingMode::TowardsPlusInfinity:
round_up = error != ResidualError::Zero;
@ -59,10 +61,10 @@ u64 FPToFixed(size_t ibits, FPT op, size_t fbits, bool unsigned_, FPCR fpcr, Rou
round_up = false;
break;
case RoundingMode::TowardsZero:
round_up = error != ResidualError::Zero && Common::MostSignificantBit(int_result);
round_up = error != ResidualError::Zero && mcl::bit::most_significant_bit(int_result);
break;
case RoundingMode::ToNearest_TieAwayFromZero:
round_up = error > ResidualError::Half || (error == ResidualError::Half && !Common::MostSignificantBit(int_result));
round_up = error > ResidualError::Half || (error == ResidualError::Half && !mcl::bit::most_significant_bit(int_result));
break;
case RoundingMode::ToOdd:
UNREACHABLE();
@ -73,12 +75,12 @@ u64 FPToFixed(size_t ibits, FPT op, size_t fbits, bool unsigned_, FPCR fpcr, Rou
}
// Detect Overflow
const int min_exponent_for_overflow = static_cast<int>(ibits) - static_cast<int>(Common::HighestSetBit(value.mantissa + (round_up ? 1 : 0))) - (unsigned_ ? 0 : 1);
const int min_exponent_for_overflow = static_cast<int>(ibits) - static_cast<int>(mcl::bit::highest_set_bit(value.mantissa + (round_up ? 1 : 0))) - (unsigned_ ? 0 : 1);
if (exponent >= min_exponent_for_overflow) {
// Positive overflow
if (unsigned_ || !sign) {
FPProcessException(FPExc::InvalidOp, fpcr, fpsr);
return Common::Ones<u64>(ibits - (unsigned_ ? 0 : 1));
return mcl::bit::ones<u64>(ibits - (unsigned_ ? 0 : 1));
}
// Negative overflow
@ -92,7 +94,7 @@ u64 FPToFixed(size_t ibits, FPT op, size_t fbits, bool unsigned_, FPCR fpcr, Rou
if (error != ResidualError::Zero) {
FPProcessException(FPExc::Inexact, fpcr, fpsr);
}
return int_result & Common::Ones<u64>(ibits);
return int_result & mcl::bit::ones<u64>(ibits);
}
template u64 FPToFixed<u16>(size_t ibits, u16 op, size_t fbits, bool unsigned_, FPCR fpcr, RoundingMode rounding, FPSR& fpsr);

View file

@ -5,7 +5,7 @@
#pragma once
#include "dynarmic/common/common_types.h"
#include <mcl/stdint.hpp>
namespace Dynarmic::FP {

View file

@ -5,7 +5,8 @@
#include "dynarmic/common/fp/process_exception.h"
#include "dynarmic/common/assert.h"
#include <mcl/assert.hpp>
#include "dynarmic/common/fp/fpcr.h"
#include "dynarmic/common/fp/fpsr.h"

View file

@ -7,8 +7,9 @@
#include <optional>
#include "dynarmic/common/assert.h"
#include "dynarmic/common/bit_util.h"
#include <mcl/assert.hpp>
#include <mcl/bit/bit_field.hpp>
#include "dynarmic/common/fp/fpcr.h"
#include "dynarmic/common/fp/fpsr.h"
#include "dynarmic/common/fp/info.h"
@ -26,7 +27,7 @@ FPT FPProcessNaN(FPType type, FPT op, FPCR fpcr, FPSR& fpsr) {
FPT result = op;
if (type == FPType::SNaN) {
result = Common::ModifyBit<topfrac>(op, true);
result = mcl::bit::set_bit<topfrac>(op, true);
FPProcessException(FPExc::InvalidOp, fpcr, fpsr);
}

View file

@ -7,6 +7,9 @@
#include <algorithm>
#include <mcl/bit/bit_count.hpp>
#include <mcl/bit/bit_field.hpp>
#include "dynarmic/common/fp/fpsr.h"
#include "dynarmic/common/fp/info.h"
#include "dynarmic/common/fp/mantissa_util.h"
@ -26,9 +29,9 @@ std::tuple<FPType, bool, FPUnpacked> FPUnpackBase(FPT op, FPCR fpcr, [[maybe_unu
constexpr int denormal_exponent = FPInfo<FPT>::exponent_min - int(FPInfo<FPT>::explicit_mantissa_width);
constexpr bool is_half_precision = std::is_same_v<FPT, u16>;
const bool sign = Common::Bit<sign_bit>(op);
const FPT exp_raw = Common::Bits<exponent_low_bit, exponent_high_bit>(op);
const FPT frac_raw = Common::Bits<mantissa_low_bit, mantissa_high_bit>(op);
const bool sign = mcl::bit::get_bit<sign_bit>(op);
const FPT exp_raw = mcl::bit::get_bits<exponent_low_bit, exponent_high_bit>(op);
const FPT frac_raw = mcl::bit::get_bits<mantissa_low_bit, mantissa_high_bit>(op);
if (exp_raw == 0) {
if constexpr (is_half_precision) {
@ -48,14 +51,14 @@ std::tuple<FPType, bool, FPUnpacked> FPUnpackBase(FPT op, FPCR fpcr, [[maybe_unu
}
}
const bool exp_all_ones = exp_raw == Common::Ones<FPT>(FPInfo<FPT>::exponent_width);
const bool exp_all_ones = exp_raw == mcl::bit::ones<FPT>(FPInfo<FPT>::exponent_width);
const bool ahp_disabled = is_half_precision && !fpcr.AHP();
if ((exp_all_ones && !is_half_precision) || (exp_all_ones && ahp_disabled)) {
if (frac_raw == 0) {
return {FPType::Infinity, sign, ToNormalized(sign, 1000000, 1)};
}
const bool is_quiet = Common::Bit<mantissa_high_bit>(frac_raw);
const bool is_quiet = mcl::bit::get_bit<mantissa_high_bit>(frac_raw);
return {is_quiet ? FPType::QNaN : FPType::SNaN, sign, {sign, 0, 0}};
}
@ -70,7 +73,7 @@ template std::tuple<FPType, bool, FPUnpacked> FPUnpackBase<u64>(u64 op, FPCR fpc
template<size_t F>
std::tuple<bool, int, u64, ResidualError> Normalize(FPUnpacked op, int extra_right_shift = 0) {
const int highest_set_bit = Common::HighestSetBit(op.mantissa);
const int highest_set_bit = mcl::bit::highest_set_bit(op.mantissa);
const int shift_amount = highest_set_bit - static_cast<int>(F) + extra_right_shift;
const u64 mantissa = Safe::LogicalShiftRight(op.mantissa, shift_amount);
const ResidualError error = ResidualErrorOnRightShift(op.mantissa, shift_amount);
@ -107,7 +110,7 @@ FPT FPRoundBase(FPUnpacked op, FPCR fpcr, RoundingMode rounding, FPSR& fpsr) {
bool round_up = false, overflow_to_inf = false;
switch (rounding) {
case RoundingMode::ToNearest_TieEven: {
round_up = (error > ResidualError::Half) || (error == ResidualError::Half && Common::Bit<0>(mantissa));
round_up = (error > ResidualError::Half) || (error == ResidualError::Half && mcl::bit::get_bit<0>(mantissa));
overflow_to_inf = true;
break;
}
@ -141,7 +144,7 @@ FPT FPRoundBase(FPUnpacked op, FPCR fpcr, RoundingMode rounding, FPSR& fpsr) {
}
if (error != ResidualError::Zero && rounding == RoundingMode::ToOdd) {
mantissa = Common::ModifyBit<0>(mantissa, true);
mantissa = mcl::bit::set_bit<0>(mantissa, true);
}
FPT result = 0;

View file

@ -7,7 +7,9 @@
#include <tuple>
#include "dynarmic/common/common_types.h"
#include <mcl/bit/bit_count.hpp>
#include <mcl/stdint.hpp>
#include "dynarmic/common/fp/fpcr.h"
namespace Dynarmic::FP {
@ -43,7 +45,7 @@ constexpr FPUnpacked ToNormalized(bool sign, int exponent, u64 value) {
return {sign, 0, 0};
}
const int highest_bit = Common::HighestSetBit(value);
const int highest_bit = mcl::bit::highest_set_bit(value);
const int offset = static_cast<int>(normalized_point_position) - highest_bit;
value <<= offset;
exponent -= offset - static_cast<int>(normalized_point_position);

View file

@ -1,379 +0,0 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2016 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include <cstddef>
#include <iterator>
#include <memory>
#include <type_traits>
#include "dynarmic/common/assert.h"
namespace Dynarmic::Common {
template<typename T>
class IntrusiveList;
template<typename T>
class IntrusiveListIterator;
template<typename T>
class IntrusiveListNode {
public:
bool IsSentinel() const {
return is_sentinel;
}
protected:
IntrusiveListNode* next = nullptr;
IntrusiveListNode* prev = nullptr;
bool is_sentinel = false;
friend class IntrusiveList<T>;
friend class IntrusiveListIterator<T>;
friend class IntrusiveListIterator<const T>;
};
template<typename T>
class IntrusiveListSentinel final : public IntrusiveListNode<T> {
using IntrusiveListNode<T>::next;
using IntrusiveListNode<T>::prev;
using IntrusiveListNode<T>::is_sentinel;
public:
IntrusiveListSentinel() {
next = this;
prev = this;
is_sentinel = true;
}
};
template<typename T>
class IntrusiveListIterator {
public:
using iterator_category = std::bidirectional_iterator_tag;
using difference_type = std::ptrdiff_t;
using value_type = T;
using pointer = value_type*;
using const_pointer = const value_type*;
using reference = value_type&;
using const_reference = const value_type&;
// If value_type is const, we want "const IntrusiveListNode<value_type>", not "const IntrusiveListNode<const value_type>"
using node_type = std::conditional_t<std::is_const<value_type>::value,
const IntrusiveListNode<std::remove_const_t<value_type>>,
IntrusiveListNode<value_type>>;
using node_pointer = node_type*;
using node_reference = node_type&;
IntrusiveListIterator() = default;
IntrusiveListIterator(const IntrusiveListIterator& other) = default;
IntrusiveListIterator& operator=(const IntrusiveListIterator& other) = default;
explicit IntrusiveListIterator(node_pointer list_node)
: node(list_node) {
}
explicit IntrusiveListIterator(pointer data)
: node(data) {
}
explicit IntrusiveListIterator(reference data)
: node(&data) {
}
IntrusiveListIterator& operator++() {
node = node->next;
return *this;
}
IntrusiveListIterator& operator--() {
node = node->prev;
return *this;
}
IntrusiveListIterator operator++(int) {
IntrusiveListIterator it(*this);
++*this;
return it;
}
IntrusiveListIterator operator--(int) {
IntrusiveListIterator it(*this);
--*this;
return it;
}
bool operator==(const IntrusiveListIterator& other) const {
return node == other.node;
}
bool operator!=(const IntrusiveListIterator& other) const {
return !operator==(other);
}
reference operator*() const {
DEBUG_ASSERT(!node->IsSentinel());
return static_cast<reference>(*node);
}
pointer operator->() const {
return std::addressof(operator*());
}
node_pointer AsNodePointer() const {
return node;
}
private:
friend class IntrusiveList<T>;
node_pointer node = nullptr;
};
template<typename T>
class IntrusiveList {
public:
using difference_type = std::ptrdiff_t;
using size_type = std::size_t;
using value_type = T;
using pointer = value_type*;
using const_pointer = const value_type*;
using reference = value_type&;
using const_reference = const value_type&;
using iterator = IntrusiveListIterator<value_type>;
using const_iterator = IntrusiveListIterator<const value_type>;
using reverse_iterator = std::reverse_iterator<iterator>;
using const_reverse_iterator = std::reverse_iterator<const_iterator>;
/**
* Inserts a node at the given location indicated by an iterator.
*
* @param location The location to insert the node.
* @param new_node The node to add.
*/
iterator insert(iterator location, pointer new_node) {
return insert_before(location, new_node);
}
/**
* Inserts a node at the given location, moving the previous
* node occupant ahead of the one inserted.
*
* @param location The location to insert the new node.
* @param new_node The node to insert into the list.
*/
iterator insert_before(iterator location, pointer new_node) {
auto existing_node = location.AsNodePointer();
new_node->next = existing_node;
new_node->prev = existing_node->prev;
existing_node->prev->next = new_node;
existing_node->prev = new_node;
return iterator(new_node);
}
/**
* Inserts a new node into the list ahead of the position indicated.
*
* @param position Location to insert the node in front of.
* @param new_node The node to be inserted into the list.
*/
iterator insert_after(iterator position, pointer new_node) {
if (empty())
return insert(begin(), new_node);
return insert(++position, new_node);
}
/**
* Add an entry to the start of the list.
* @param node Node to add to the list.
*/
void push_front(pointer node) {
insert(begin(), node);
}
/**
* Add an entry to the end of the list
* @param node Node to add to the list.
*/
void push_back(pointer node) {
insert(end(), node);
}
/**
* Erases the node at the front of the list.
* @note Must not be called on an empty list.
*/
void pop_front() {
DEBUG_ASSERT(!empty());
erase(begin());
}
/**
* Erases the node at the back of the list.
* @note Must not be called on an empty list.
*/
void pop_back() {
DEBUG_ASSERT(!empty());
erase(--end());
}
/**
* Removes a node from this list
* @param it An iterator that points to the node to remove from list.
*/
pointer remove(iterator& it) {
DEBUG_ASSERT(it != end());
pointer node = &*it++;
node->prev->next = node->next;
node->next->prev = node->prev;
#if !defined(NDEBUG)
node->next = nullptr;
node->prev = nullptr;
#endif
return node;
}
/**
* Removes a node from this list
* @param it A constant iterator that points to the node to remove from list.
*/
pointer remove(const iterator& it) {
iterator copy = it;
return remove(copy);
}
/**
* Removes a node from this list.
* @param node A pointer to the node to remove.
*/
pointer remove(pointer node) {
return remove(iterator(node));
}
/**
* Removes a node from this list.
* @param node A reference to the node to remove.
*/
pointer remove(reference node) {
return remove(iterator(node));
}
/**
* Is this list empty?
* @returns true if there are no nodes in this list.
*/
bool empty() const {
return root->next == root.get();
}
/**
* Gets the total number of elements within this list.
* @return the number of elements in this list.
*/
size_type size() const {
return static_cast<size_type>(std::distance(begin(), end()));
}
/**
* Retrieves a reference to the node at the front of the list.
* @note Must not be called on an empty list.
*/
reference front() {
DEBUG_ASSERT(!empty());
return *begin();
}
/**
* Retrieves a constant reference to the node at the front of the list.
* @note Must not be called on an empty list.
*/
const_reference front() const {
DEBUG_ASSERT(!empty());
return *begin();
}
/**
* Retrieves a reference to the node at the back of the list.
* @note Must not be called on an empty list.
*/
reference back() {
DEBUG_ASSERT(!empty());
return *--end();
}
/**
* Retrieves a constant reference to the node at the back of the list.
* @note Must not be called on an empty list.
*/
const_reference back() const {
DEBUG_ASSERT(!empty());
return *--end();
}
// Iterator interface
iterator begin() { return iterator(root->next); }
const_iterator begin() const { return const_iterator(root->next); }
const_iterator cbegin() const { return begin(); }
iterator end() { return iterator(root.get()); }
const_iterator end() const { return const_iterator(root.get()); }
const_iterator cend() const { return end(); }
reverse_iterator rbegin() { return reverse_iterator(end()); }
const_reverse_iterator rbegin() const { return const_reverse_iterator(end()); }
const_reverse_iterator crbegin() const { return rbegin(); }
reverse_iterator rend() { return reverse_iterator(begin()); }
const_reverse_iterator rend() const { return const_reverse_iterator(begin()); }
const_reverse_iterator crend() const { return rend(); }
/**
* Erases a node from the list, indicated by an iterator.
* @param it The iterator that points to the node to erase.
*/
iterator erase(iterator it) {
remove(it);
return it;
}
/**
* Erases a node from this list.
* @param node A pointer to the node to erase from this list.
*/
iterator erase(pointer node) {
return erase(iterator(node));
}
/**
* Erases a node from this list.
* @param node A reference to the node to erase from this list.
*/
iterator erase(reference node) {
return erase(iterator(node));
}
/**
* Exchanges contents of this list with another list instance.
* @param other The other list to swap with.
*/
void swap(IntrusiveList& other) noexcept {
root.swap(other.root);
}
private:
std::shared_ptr<IntrusiveListNode<T>> root = std::make_shared<IntrusiveListSentinel<T>>();
};
/**
* Exchanges contents of an intrusive list with another intrusive list.
* @tparam T The type of data being kept track of by the lists.
* @param lhs The first list.
* @param rhs The second list.
*/
template<typename T>
void swap(IntrusiveList<T>& lhs, IntrusiveList<T>& rhs) noexcept {
lhs.swap(rhs);
}
} // namespace Dynarmic::Common

View file

@ -1,35 +0,0 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2016 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include <iterator>
namespace Dynarmic::Common {
namespace detail {
template<typename T>
struct ReverseAdapter {
T& iterable;
constexpr auto begin() {
using namespace std;
return rbegin(iterable);
}
constexpr auto end() {
using namespace std;
return rend(iterable);
}
};
} // namespace detail
template<typename T>
constexpr detail::ReverseAdapter<T> Reverse(T&& iterable) {
return detail::ReverseAdapter<T>{iterable};
}
} // namespace Dynarmic::Common

View file

@ -12,9 +12,10 @@
# include <llvm-c/Target.h>
#endif
#include "dynarmic/common/assert.h"
#include "dynarmic/common/cast_util.h"
#include "dynarmic/common/common_types.h"
#include <mcl/assert.hpp>
#include <mcl/bit_cast.hpp>
#include <mcl/stdint.hpp>
#include "dynarmic/common/llvm_disassemble.h"
namespace Dynarmic::Common {
@ -49,7 +50,7 @@ std::string DisassembleX64(const void* begin, const void* end) {
LLVMDisasmDispose(llvm_ctx);
#else
result += fmt::format("(recompile with DYNARMIC_USE_LLVM=ON to disassemble the generated x86_64 code)\n");
result += fmt::format("start: {:016x}, end: {:016x}\n", BitCast<u64>(begin), BitCast<u64>(end));
result += fmt::format("start: {:016x}, end: {:016x}\n", mcl::bit_cast<u64>(begin), mcl::bit_cast<u64>(end));
#endif
return result;

View file

@ -7,7 +7,7 @@
#include <string>
#include "dynarmic/common/common_types.h"
#include <mcl/stdint.hpp>
namespace Dynarmic::Common {

View file

@ -1,15 +0,0 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2018 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#define CONCATENATE_TOKENS(x, y) CONCATENATE_TOKENS_IMPL(x, y)
#define CONCATENATE_TOKENS_IMPL(x, y) x##y
#ifdef __COUNTER__
# define ANONYMOUS_VARIABLE(str) CONCATENATE_TOKENS(str, __COUNTER__)
#else
# define ANONYMOUS_VARIABLE(str) CONCATENATE_TOKENS(str, __LINE__)
#endif

View file

@ -7,7 +7,7 @@
#include <utility>
#include "dynarmic/common/common_types.h"
#include <mcl/stdint.hpp>
namespace Dynarmic::Common {

View file

@ -7,8 +7,9 @@
#include <type_traits>
#include "dynarmic/common/bit_util.h"
#include "dynarmic/common/common_types.h"
#include <mcl/bitsizeof.hpp>
#include <mcl/stdint.hpp>
#include "dynarmic/common/u128.h"
namespace Dynarmic::Safe {
@ -26,7 +27,7 @@ template<typename T>
T LogicalShiftLeft(T value, int shift_amount) {
static_assert(std::is_integral_v<T>);
if (shift_amount >= static_cast<int>(Common::BitSize<T>())) {
if (shift_amount >= static_cast<int>(mcl::bitsizeof<T>)) {
return 0;
}
@ -47,7 +48,7 @@ template<typename T>
T LogicalShiftRight(T value, int shift_amount) {
static_assert(std::is_integral_v<T>);
if (shift_amount >= static_cast<int>(Common::BitSize<T>())) {
if (shift_amount >= static_cast<int>(mcl::bitsizeof<T>)) {
return 0;
}
@ -66,14 +67,14 @@ inline u128 LogicalShiftRight(u128 value, int shift_amount) {
template<typename T>
T LogicalShiftRightDouble(T top, T bottom, int shift_amount) {
return LogicalShiftLeft(top, int(Common::BitSize<T>()) - shift_amount) | LogicalShiftRight(bottom, shift_amount);
return LogicalShiftLeft(top, int(mcl::bitsizeof<T>) - shift_amount) | LogicalShiftRight(bottom, shift_amount);
}
template<typename T>
T ArithmeticShiftLeft(T value, int shift_amount) {
static_assert(std::is_integral_v<T>);
if (shift_amount >= static_cast<int>(Common::BitSize<T>())) {
if (shift_amount >= static_cast<int>(mcl::bitsizeof<T>)) {
return 0;
}
@ -89,8 +90,8 @@ template<typename T>
T ArithmeticShiftRight(T value, int shift_amount) {
static_assert(std::is_integral_v<T>);
if (shift_amount >= static_cast<int>(Common::BitSize<T>())) {
return Common::MostSignificantBit(value) ? ~static_cast<T>(0) : 0;
if (shift_amount >= static_cast<int>(mcl::bitsizeof<T>)) {
return mcl::bit::most_significant_bit(value) ? ~static_cast<T>(0) : 0;
}
if (shift_amount < 0) {
@ -103,7 +104,7 @@ T ArithmeticShiftRight(T value, int shift_amount) {
template<typename T>
T ArithmeticShiftRightDouble(T top, T bottom, int shift_amount) {
return ArithmeticShiftLeft(top, int(Common::BitSize<T>()) - shift_amount) | LogicalShiftRight(bottom, shift_amount);
return ArithmeticShiftLeft(top, int(mcl::bitsizeof<T>) - shift_amount) | LogicalShiftRight(bottom, shift_amount);
}
template<typename T>

View file

@ -1,86 +0,0 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2018 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#include <exception>
#include <type_traits>
#include <utility>
#include "dynarmic/common/macro_util.h"
namespace Dynarmic::detail {
struct ScopeExitTag {};
struct ScopeFailTag {};
struct ScopeSuccessTag {};
template<typename Function>
class ScopeExit final {
public:
explicit ScopeExit(Function&& fn)
: function(std::move(fn)) {}
~ScopeExit() noexcept {
function();
}
private:
Function function;
};
template<typename Function>
class ScopeFail final {
public:
explicit ScopeFail(Function&& fn)
: function(std::move(fn)), exception_count(std::uncaught_exceptions()) {}
~ScopeFail() noexcept {
if (std::uncaught_exceptions() > exception_count) {
function();
}
}
private:
Function function;
int exception_count;
};
template<typename Function>
class ScopeSuccess final {
public:
explicit ScopeSuccess(Function&& fn)
: function(std::move(fn)), exception_count(std::uncaught_exceptions()) {}
~ScopeSuccess() {
if (std::uncaught_exceptions() <= exception_count) {
function();
}
}
private:
Function function;
int exception_count;
};
// We use ->* here as it has the highest precedence of the operators we can use.
template<typename Function>
auto operator->*(ScopeExitTag, Function&& function) {
return ScopeExit<std::decay_t<Function>>{std::forward<Function>(function)};
}
template<typename Function>
auto operator->*(ScopeFailTag, Function&& function) {
return ScopeFail<std::decay_t<Function>>{std::forward<Function>(function)};
}
template<typename Function>
auto operator->*(ScopeSuccessTag, Function&& function) {
return ScopeSuccess<std::decay_t<Function>>{std::forward<Function>(function)};
}
} // namespace Dynarmic::detail
#define SCOPE_EXIT auto ANONYMOUS_VARIABLE(_SCOPE_EXIT_) = ::Dynarmic::detail::ScopeExitTag{}->*[&]() noexcept
#define SCOPE_FAIL auto ANONYMOUS_VARIABLE(_SCOPE_FAIL_) = ::Dynarmic::detail::ScopeFailTag{}->*[&]() noexcept
#define SCOPE_SUCCESS auto ANONYMOUS_VARIABLE(_SCOPE_FAIL_) = ::Dynarmic::detail::ScopeSuccessTag{}->*[&]()

View file

@ -5,7 +5,8 @@
#include "dynarmic/common/u128.h"
#include "dynarmic/common/common_types.h"
#include <mcl/bitsizeof.hpp>
#include <mcl/stdint.hpp>
namespace Dynarmic {

View file

@ -8,8 +8,9 @@
#include <tuple>
#include <type_traits>
#include "dynarmic/common/bit_util.h"
#include "dynarmic/common/common_types.h"
#include <mcl/bit/bit_field.hpp>
#include <mcl/bitsizeof.hpp>
#include <mcl/stdint.hpp>
namespace Dynarmic {
@ -27,7 +28,7 @@ struct u128 {
/* implicit */ u128(T value)
: lower(value), upper(0) {
static_assert(std::is_integral_v<T>);
static_assert(Common::BitSize<T>() <= Common::BitSize<u64>());
static_assert(mcl::bitsizeof<T> <= mcl::bitsizeof<u64>);
}
u64 lower = 0;
@ -37,14 +38,14 @@ struct u128 {
bool Bit() const {
static_assert(bit_position < 128);
if constexpr (bit_position < 64) {
return Common::Bit<bit_position>(lower);
return mcl::bit::get_bit<bit_position>(lower);
} else {
return Common::Bit<bit_position - 64>(upper);
return mcl::bit::get_bit<bit_position - 64>(upper);
}
}
};
static_assert(Common::BitSize<u128>() == 128);
static_assert(mcl::bitsizeof<u128> == 128);
static_assert(std::is_standard_layout_v<u128>);
static_assert(std::is_trivially_copyable_v<u128>);

View file

@ -1,12 +0,0 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2020 MerryMage
* SPDX-License-Identifier: 0BSD
*/
#pragma once
#if defined(__clang__) || defined(__GNUC__)
# define UNLIKELY(x) __builtin_expect(!!(x), 0)
#else
# define UNLIKELY(x) !!(x)
#endif

View file

@ -7,8 +7,7 @@
#include <Zydis/Zydis.h>
#include <fmt/printf.h>
#include "dynarmic/common/common_types.h"
#include <mcl/stdint.hpp>
namespace Dynarmic::Common {

View file

@ -8,7 +8,7 @@
#include <string>
#include <vector>
#include "dynarmic/common/common_types.h"
#include <mcl/stdint.hpp>
namespace Dynarmic::Common {

View file

@ -7,8 +7,9 @@
#include <optional>
#include "dynarmic/common/bit_util.h"
#include "dynarmic/common/common_types.h"
#include <mcl/bit/bit_field.hpp>
#include <mcl/stdint.hpp>
#include "dynarmic/common/fp/rounding_mode.h"
namespace Dynarmic::A32 {
@ -33,52 +34,52 @@ public:
/// Negative condition flag.
bool N() const {
return Common::Bit<31>(value);
return mcl::bit::get_bit<31>(value);
}
/// Zero condition flag.
bool Z() const {
return Common::Bit<30>(value);
return mcl::bit::get_bit<30>(value);
}
/// Carry condition flag.
bool C() const {
return Common::Bit<29>(value);
return mcl::bit::get_bit<29>(value);
}
/// Overflow condition flag.
bool V() const {
return Common::Bit<28>(value);
return mcl::bit::get_bit<28>(value);
}
/// Cumulative saturation flag.
bool QC() const {
return Common::Bit<27>(value);
return mcl::bit::get_bit<27>(value);
}
/// Alternate half-precision control flag.
bool AHP() const {
return Common::Bit<26>(value);
return mcl::bit::get_bit<26>(value);
}
/// Default NaN mode control bit.
bool DN() const {
return Common::Bit<25>(value);
return mcl::bit::get_bit<25>(value);
}
/// Flush-to-zero mode control bit.
bool FTZ() const {
return Common::Bit<24>(value);
return mcl::bit::get_bit<24>(value);
}
/// Rounding mode control field.
FP::RoundingMode RMode() const {
return static_cast<FP::RoundingMode>(Common::Bits<22, 23>(value));
return static_cast<FP::RoundingMode>(mcl::bit::get_bits<22, 23>(value));
}
/// Indicates the stride of a vector.
std::optional<size_t> Stride() const {
switch (Common::Bits<20, 21>(value)) {
switch (mcl::bit::get_bits<20, 21>(value)) {
case 0b00:
return 1;
case 0b11:
@ -90,67 +91,67 @@ public:
/// Indicates the length of a vector.
size_t Len() const {
return Common::Bits<16, 18>(value) + 1;
return mcl::bit::get_bits<16, 18>(value) + 1;
}
/// Input denormal exception trap enable flag.
bool IDE() const {
return Common::Bit<15>(value);
return mcl::bit::get_bit<15>(value);
}
/// Inexact exception trap enable flag.
bool IXE() const {
return Common::Bit<12>(value);
return mcl::bit::get_bit<12>(value);
}
/// Underflow exception trap enable flag.
bool UFE() const {
return Common::Bit<11>(value);
return mcl::bit::get_bit<11>(value);
}
/// Overflow exception trap enable flag.
bool OFE() const {
return Common::Bit<10>(value);
return mcl::bit::get_bit<10>(value);
}
/// Division by zero exception trap enable flag.
bool DZE() const {
return Common::Bit<9>(value);
return mcl::bit::get_bit<9>(value);
}
/// Invalid operation exception trap enable flag.
bool IOE() const {
return Common::Bit<8>(value);
return mcl::bit::get_bit<8>(value);
}
/// Input denormal cumulative exception bit.
bool IDC() const {
return Common::Bit<7>(value);
return mcl::bit::get_bit<7>(value);
}
/// Inexact cumulative exception bit.
bool IXC() const {
return Common::Bit<4>(value);
return mcl::bit::get_bit<4>(value);
}
/// Underflow cumulative exception bit.
bool UFC() const {
return Common::Bit<3>(value);
return mcl::bit::get_bit<3>(value);
}
/// Overflow cumulative exception bit.
bool OFC() const {
return Common::Bit<2>(value);
return mcl::bit::get_bit<2>(value);
}
/// Division by zero cumulative exception bit.
bool DZC() const {
return Common::Bit<1>(value);
return mcl::bit::get_bit<1>(value);
}
/// Invalid operation cumulative exception bit.
bool IOC() const {
return Common::Bit<0>(value);
return mcl::bit::get_bit<0>(value);
}
/**

View file

@ -5,8 +5,9 @@
#pragma once
#include "dynarmic/common/bit_util.h"
#include "dynarmic/common/common_types.h"
#include <mcl/bit/bit_field.hpp>
#include <mcl/stdint.hpp>
#include "dynarmic/ir/cond.h"
namespace Dynarmic::A32 {
@ -26,22 +27,22 @@ public:
if (value == 0b00000000) {
return IR::Cond::AL;
}
return static_cast<IR::Cond>(Common::Bits<4, 7>(value));
return static_cast<IR::Cond>(mcl::bit::get_bits<4, 7>(value));
}
bool IsInITBlock() const {
return Common::Bits<0, 3>(value) != 0b0000;
return mcl::bit::get_bits<0, 3>(value) != 0b0000;
}
bool IsLastInITBlock() const {
return Common::Bits<0, 3>(value) == 0b1000;
return mcl::bit::get_bits<0, 3>(value) == 0b1000;
}
ITState Advance() const {
if (Common::Bits<0, 2>(value) == 0b000) {
if (mcl::bit::get_bits<0, 2>(value) == 0b000) {
return ITState{0b00000000};
}
return ITState{Common::ModifyBits<0, 4>(value, static_cast<u8>(Common::Bits<0, 4>(value) << 1))};
return ITState{mcl::bit::set_bits<0, 4>(value, static_cast<u8>(mcl::bit::get_bits<0, 4>(value) << 1))};
}
u8 Value() const {

View file

@ -5,8 +5,9 @@
#pragma once
#include "dynarmic/common/bit_util.h"
#include "dynarmic/common/common_types.h"
#include <mcl/bit/bit_field.hpp>
#include <mcl/stdint.hpp>
#include "dynarmic/frontend/A32/ITState.h"
namespace Dynarmic::A32 {
@ -65,52 +66,52 @@ public:
}
bool N() const {
return Common::Bit<31>(value);
return mcl::bit::get_bit<31>(value);
}
void N(bool set) {
value = Common::ModifyBit<31>(value, set);
value = mcl::bit::set_bit<31>(value, set);
}
bool Z() const {
return Common::Bit<30>(value);
return mcl::bit::get_bit<30>(value);
}
void Z(bool set) {
value = Common::ModifyBit<30>(value, set);
value = mcl::bit::set_bit<30>(value, set);
}
bool C() const {
return Common::Bit<29>(value);
return mcl::bit::get_bit<29>(value);
}
void C(bool set) {
value = Common::ModifyBit<29>(value, set);
value = mcl::bit::set_bit<29>(value, set);
}
bool V() const {
return Common::Bit<28>(value);
return mcl::bit::get_bit<28>(value);
}
void V(bool set) {
value = Common::ModifyBit<28>(value, set);
value = mcl::bit::set_bit<28>(value, set);
}
bool Q() const {
return Common::Bit<27>(value);
return mcl::bit::get_bit<27>(value);
}
void Q(bool set) {
value = Common::ModifyBit<27>(value, set);
value = mcl::bit::set_bit<27>(value, set);
}
bool J() const {
return Common::Bit<24>(value);
return mcl::bit::get_bit<24>(value);
}
void J(bool set) {
value = Common::ModifyBit<24>(value, set);
value = mcl::bit::set_bit<24>(value, set);
}
u32 GE() const {
return Common::Bits<16, 19>(value);
return mcl::bit::get_bits<16, 19>(value);
}
void GE(u32 data) {
value = Common::ModifyBits<16, 19>(value, data);
value = mcl::bit::set_bits<16, 19>(value, data);
}
ITState IT() const {
@ -123,45 +124,45 @@ public:
}
bool E() const {
return Common::Bit<9>(value);
return mcl::bit::get_bit<9>(value);
}
void E(bool set) {
value = Common::ModifyBit<9>(value, set);
value = mcl::bit::set_bit<9>(value, set);
}
bool A() const {
return Common::Bit<8>(value);
return mcl::bit::get_bit<8>(value);
}
void A(bool set) {
value = Common::ModifyBit<8>(value, set);
value = mcl::bit::set_bit<8>(value, set);
}
bool I() const {
return Common::Bit<7>(value);
return mcl::bit::get_bit<7>(value);
}
void I(bool set) {
value = Common::ModifyBit<7>(value, set);
value = mcl::bit::set_bit<7>(value, set);
}
bool F() const {
return Common::Bit<6>(value);
return mcl::bit::get_bit<6>(value);
}
void F(bool set) {
value = Common::ModifyBit<6>(value, set);
value = mcl::bit::set_bit<6>(value, set);
}
bool T() const {
return Common::Bit<5>(value);
return mcl::bit::get_bit<5>(value);
}
void T(bool set) {
value = Common::ModifyBit<5>(value, set);
value = mcl::bit::set_bit<5>(value, set);
}
Mode M() const {
return static_cast<Mode>(Common::Bits<0, 4>(value));
return static_cast<Mode>(mcl::bit::get_bits<0, 4>(value));
}
void M(Mode mode) {
value = Common::ModifyBits<0, 4>(value, static_cast<u32>(mode));
value = mcl::bit::set_bits<0, 4>(value, static_cast<u32>(mode));
}
u32 Value() const {

View file

@ -5,7 +5,8 @@
#include "dynarmic/frontend/A32/a32_ir_emitter.h"
#include "dynarmic/common/assert.h"
#include <mcl/assert.hpp>
#include "dynarmic/frontend/A32/a32_types.h"
#include "dynarmic/interface/A32/arch_version.h"
#include "dynarmic/ir/opcodes.h"

View file

@ -7,7 +7,8 @@
#include <utility>
#include "dynarmic/common/common_types.h"
#include <mcl/stdint.hpp>
#include "dynarmic/frontend/A32/a32_location_descriptor.h"
#include "dynarmic/ir/ir_emitter.h"
#include "dynarmic/ir/value.h"

View file

@ -9,7 +9,8 @@
#include <iosfwd>
#include <tuple>
#include "dynarmic/common/common_types.h"
#include <mcl/stdint.hpp>
#include "dynarmic/frontend/A32/FPSCR.h"
#include "dynarmic/frontend/A32/ITState.h"
#include "dynarmic/frontend/A32/PSR.h"

View file

@ -8,7 +8,7 @@
#include <array>
#include <ostream>
#include "dynarmic/common/bit_util.h"
#include <mcl/bit/bit_field.hpp>
namespace Dynarmic::A32 {
@ -46,7 +46,7 @@ std::string RegListToString(RegList reg_list) {
std::string ret;
bool first_reg = true;
for (size_t i = 0; i < 16; i++) {
if (Common::Bit(i, reg_list)) {
if (mcl::bit::get_bit(i, reg_list)) {
if (!first_reg) {
ret += ", ";
}

View file

@ -9,8 +9,9 @@
#include <string>
#include <utility>
#include "dynarmic/common/assert.h"
#include "dynarmic/common/common_types.h"
#include <mcl/assert.hpp>
#include <mcl/stdint.hpp>
#include "dynarmic/interface/A32/coprocessor_util.h"
#include "dynarmic/ir/cond.h"

View file

@ -12,8 +12,9 @@
#include <optional>
#include <vector>
#include "dynarmic/common/bit_util.h"
#include "dynarmic/common/common_types.h"
#include <mcl/bit/bit_count.hpp>
#include <mcl/stdint.hpp>
#include "dynarmic/frontend/decoder/decoder_detail.h"
#include "dynarmic/frontend/decoder/matcher.h"
@ -34,7 +35,7 @@ std::vector<ArmMatcher<V>> GetArmDecodeTable() {
// If a matcher has more bits in its mask it is more specific, so it should come first.
std::stable_sort(table.begin(), table.end(), [](const auto& matcher1, const auto& matcher2) {
return Common::BitCount(matcher1.GetMask()) > Common::BitCount(matcher2.GetMask());
return mcl::bit::count_ones(matcher1.GetMask()) > mcl::bit::count_ones(matcher2.GetMask());
});
return table;

View file

@ -11,8 +11,9 @@
#include <set>
#include <vector>
#include "dynarmic/common/bit_util.h"
#include "dynarmic/common/common_types.h"
#include <mcl/bit/bit_count.hpp>
#include <mcl/stdint.hpp>
#include "dynarmic/frontend/decoder/decoder_detail.h"
#include "dynarmic/frontend/decoder/matcher.h"
@ -58,7 +59,7 @@ std::vector<ASIMDMatcher<V>> GetASIMDDecodeTable() {
// If a matcher has more bits in its mask it is more specific, so it should come first.
std::stable_sort(sort_begin, sort_end, [](const auto& matcher1, const auto& matcher2) {
return Common::BitCount(matcher1.GetMask()) > Common::BitCount(matcher2.GetMask());
return mcl::bit::count_ones(matcher1.GetMask()) > mcl::bit::count_ones(matcher2.GetMask());
});
return table;

View file

@ -10,7 +10,8 @@
#include <optional>
#include <vector>
#include "dynarmic/common/common_types.h"
#include <mcl/stdint.hpp>
#include "dynarmic/frontend/decoder/decoder_detail.h"
#include "dynarmic/frontend/decoder/matcher.h"

View file

@ -9,7 +9,8 @@
#include <optional>
#include <vector>
#include "dynarmic/common/common_types.h"
#include <mcl/stdint.hpp>
#include "dynarmic/frontend/decoder/decoder_detail.h"
#include "dynarmic/frontend/decoder/matcher.h"

View file

@ -10,7 +10,8 @@
#include <optional>
#include <vector>
#include "dynarmic/common/common_types.h"
#include <mcl/stdint.hpp>
#include "dynarmic/frontend/decoder/decoder_detail.h"
#include "dynarmic/frontend/decoder/matcher.h"

Some files were not shown because too many files have changed in this diff Show more