cast_util: Add FptrCast

Reduce unnecessary type duplication when casting a lambda to a function pointer.
This commit is contained in:
MerryMage 2020-04-04 10:57:41 +01:00
parent fe583aa076
commit bd88286b21
7 changed files with 38 additions and 26 deletions

View file

@ -647,9 +647,7 @@ void A32EmitX64::EmitA32InstructionSynchronizationBarrier(A32EmitContext& ctx, I
ctx.reg_alloc.HostCall(nullptr); ctx.reg_alloc.HostCall(nullptr);
code.mov(code.ABI_PARAM1, reinterpret_cast<u64>(jit_interface)); code.mov(code.ABI_PARAM1, reinterpret_cast<u64>(jit_interface));
code.CallFunction(static_cast<void(*)(A32::Jit*)>([](A32::Jit* jit) { code.CallLambda([](A32::Jit* jit) { jit->ClearCache(); });
jit->ClearCache();
}));
} }
void A32EmitX64::EmitA32BXWritePC(A32EmitContext& ctx, IR::Inst* inst) { void A32EmitX64::EmitA32BXWritePC(A32EmitContext& ctx, IR::Inst* inst) {

View file

@ -619,9 +619,7 @@ void A64EmitX64::EmitA64InstructionSynchronizationBarrier(A64EmitContext& ctx, I
ctx.reg_alloc.HostCall(nullptr); ctx.reg_alloc.HostCall(nullptr);
code.mov(code.ABI_PARAM1, reinterpret_cast<u64>(jit_interface)); code.mov(code.ABI_PARAM1, reinterpret_cast<u64>(jit_interface));
code.CallFunction(static_cast<void(*)(A64::Jit*)>([](A64::Jit* jit) { code.CallLambda([](A64::Jit* jit) { jit->ClearCache(); });
jit->ClearCache();
}));
} }
void A64EmitX64::EmitA64GetCNTFRQ(A64EmitContext& ctx, IR::Inst* inst) { void A64EmitX64::EmitA64GetCNTFRQ(A64EmitContext& ctx, IR::Inst* inst) {
@ -691,11 +689,11 @@ void A64EmitX64::EmitA64SetExclusive(A64EmitContext& ctx, IR::Inst* inst) {
code.mov(code.byte[r15 + offsetof(A64JitState, exclusive_state)], u8(1)); code.mov(code.byte[r15 + offsetof(A64JitState, exclusive_state)], u8(1));
code.mov(code.ABI_PARAM1, reinterpret_cast<u64>(&conf)); code.mov(code.ABI_PARAM1, reinterpret_cast<u64>(&conf));
code.CallFunction(static_cast<void(*)(A64::UserConfig&, u64, u8)>( code.CallLambda(
[](A64::UserConfig& conf, u64 vaddr, u8 size) { [](A64::UserConfig& conf, u64 vaddr, u8 size) {
conf.global_monitor->Mark(conf.processor_id, vaddr, size); conf.global_monitor->Mark(conf.processor_id, vaddr, size);
} }
)); );
return; return;
} }
@ -1025,52 +1023,52 @@ void A64EmitX64::EmitExclusiveWrite(A64EmitContext& ctx, IR::Inst* inst, size_t
code.mov(code.ABI_PARAM1, reinterpret_cast<u64>(&conf)); code.mov(code.ABI_PARAM1, reinterpret_cast<u64>(&conf));
switch (bitsize) { switch (bitsize) {
case 8: case 8:
code.CallFunction(static_cast<u32(*)(A64::UserConfig&, u64, u8)>( code.CallLambda(
[](A64::UserConfig& conf, u64 vaddr, u8 value) -> u32 { [](A64::UserConfig& conf, u64 vaddr, u8 value) -> u32 {
return conf.global_monitor->DoExclusiveOperation(conf.processor_id, vaddr, 1, [&]{ return conf.global_monitor->DoExclusiveOperation(conf.processor_id, vaddr, 1, [&]{
conf.callbacks->MemoryWrite8(vaddr, value); conf.callbacks->MemoryWrite8(vaddr, value);
}) ? 0 : 1; }) ? 0 : 1;
} }
)); );
break; break;
case 16: case 16:
code.CallFunction(static_cast<u32(*)(A64::UserConfig&, u64, u16)>( code.CallLambda(
[](A64::UserConfig& conf, u64 vaddr, u16 value) -> u32 { [](A64::UserConfig& conf, u64 vaddr, u16 value) -> u32 {
return conf.global_monitor->DoExclusiveOperation(conf.processor_id, vaddr, 2, [&]{ return conf.global_monitor->DoExclusiveOperation(conf.processor_id, vaddr, 2, [&]{
conf.callbacks->MemoryWrite16(vaddr, value); conf.callbacks->MemoryWrite16(vaddr, value);
}) ? 0 : 1; }) ? 0 : 1;
} }
)); );
break; break;
case 32: case 32:
code.CallFunction(static_cast<u32(*)(A64::UserConfig&, u64, u32)>( code.CallLambda(
[](A64::UserConfig& conf, u64 vaddr, u32 value) -> u32 { [](A64::UserConfig& conf, u64 vaddr, u32 value) -> u32 {
return conf.global_monitor->DoExclusiveOperation(conf.processor_id, vaddr, 4, [&]{ return conf.global_monitor->DoExclusiveOperation(conf.processor_id, vaddr, 4, [&]{
conf.callbacks->MemoryWrite32(vaddr, value); conf.callbacks->MemoryWrite32(vaddr, value);
}) ? 0 : 1; }) ? 0 : 1;
} }
)); );
break; break;
case 64: case 64:
code.CallFunction(static_cast<u32(*)(A64::UserConfig&, u64, u64)>( code.CallLambda(
[](A64::UserConfig& conf, u64 vaddr, u64 value) -> u32 { [](A64::UserConfig& conf, u64 vaddr, u64 value) -> u32 {
return conf.global_monitor->DoExclusiveOperation(conf.processor_id, vaddr, 8, [&]{ return conf.global_monitor->DoExclusiveOperation(conf.processor_id, vaddr, 8, [&]{
conf.callbacks->MemoryWrite64(vaddr, value); conf.callbacks->MemoryWrite64(vaddr, value);
}) ? 0 : 1; }) ? 0 : 1;
} }
)); );
break; break;
case 128: case 128:
code.sub(rsp, 16 + ABI_SHADOW_SPACE); code.sub(rsp, 16 + ABI_SHADOW_SPACE);
code.lea(code.ABI_PARAM3, ptr[rsp + ABI_SHADOW_SPACE]); code.lea(code.ABI_PARAM3, ptr[rsp + ABI_SHADOW_SPACE]);
code.movaps(xword[code.ABI_PARAM3], xmm1); code.movaps(xword[code.ABI_PARAM3], xmm1);
code.CallFunction(static_cast<u32(*)(A64::UserConfig&, u64, A64::Vector&)>( code.CallLambda(
[](A64::UserConfig& conf, u64 vaddr, A64::Vector& value) -> u32 { [](A64::UserConfig& conf, u64 vaddr, A64::Vector& value) -> u32 {
return conf.global_monitor->DoExclusiveOperation(conf.processor_id, vaddr, 16, [&]{ return conf.global_monitor->DoExclusiveOperation(conf.processor_id, vaddr, 16, [&]{
conf.callbacks->MemoryWrite128(vaddr, value); conf.callbacks->MemoryWrite128(vaddr, value);
}) ? 0 : 1; }) ? 0 : 1;
} }
)); );
code.add(rsp, 16 + ABI_SHADOW_SPACE); code.add(rsp, 16 + ABI_SHADOW_SPACE);
break; break;
default: default:

View file

@ -16,6 +16,7 @@
#include "backend/x64/callback.h" #include "backend/x64/callback.h"
#include "backend/x64/constant_pool.h" #include "backend/x64/constant_pool.h"
#include "backend/x64/jitstate_info.h" #include "backend/x64/jitstate_info.h"
#include "common/cast_util.h"
#include "common/common_types.h" #include "common/common_types.h"
namespace Dynarmic::BackendX64 { namespace Dynarmic::BackendX64 {
@ -83,6 +84,12 @@ public:
} }
} }
/// Code emitter: Calls the lambda. Lambda must not have any captures.
template <typename Lambda>
void CallLambda(Lambda l) {
CallFunction(Common::FptrCast(l));
}
Xbyak::Address MConst(const Xbyak::AddressFrame& frame, u64 lower, u64 upper = 0); Xbyak::Address MConst(const Xbyak::AddressFrame& frame, u64 lower, u64 upper = 0);
/// Far code sits far away from the near code. Execution remains primarily in near code. /// Far code sits far away from the near code. Execution remains primarily in near code.

View file

@ -20,6 +20,7 @@
#include "backend/x64/emit_x64.h" #include "backend/x64/emit_x64.h"
#include "common/assert.h" #include "common/assert.h"
#include "common/common_types.h" #include "common/common_types.h"
#include "common/cast_util.h"
#include "common/fp/fpcr.h" #include "common/fp/fpcr.h"
#include "common/fp/fpsr.h" #include "common/fp/fpsr.h"
#include "common/fp/info.h" #include "common/fp/info.h"
@ -861,7 +862,7 @@ static void EmitFPRound(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst, siz
[](auto args) { [](auto args) {
return std::pair{ return std::pair{
mp::lower_to_tuple_v<decltype(args)>, mp::lower_to_tuple_v<decltype(args)>,
static_cast<u64(*)(u64, FP::FPSR&, FP::FPCR)>( Common::FptrCast(
[](u64 input, FP::FPSR& fpsr, FP::FPCR fpcr) { [](u64 input, FP::FPSR& fpsr, FP::FPCR fpcr) {
constexpr auto t = mp::lower_to_tuple_v<decltype(args)>; constexpr auto t = mp::lower_to_tuple_v<decltype(args)>;
constexpr size_t fsize = std::get<0>(t); constexpr size_t fsize = std::get<0>(t);
@ -1289,7 +1290,7 @@ static void EmitFPToFixed(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) {
[](auto args) { [](auto args) {
return std::pair{ return std::pair{
mp::lower_to_tuple_v<decltype(args)>, mp::lower_to_tuple_v<decltype(args)>,
static_cast<u64(*)(u64, FP::FPSR&, FP::FPCR)>( Common::FptrCast(
[](u64 input, FP::FPSR& fpsr, FP::FPCR fpcr) { [](u64 input, FP::FPSR& fpsr, FP::FPCR fpcr) {
constexpr auto t = mp::lower_to_tuple_v<decltype(args)>; constexpr auto t = mp::lower_to_tuple_v<decltype(args)>;
constexpr size_t fbits = std::get<0>(t); constexpr size_t fbits = std::get<0>(t);

View file

@ -4050,7 +4050,7 @@ void EmitX64::EmitVectorTableLookup(EmitContext& ctx, IR::Inst* inst) {
code.movaps(xword[code.ABI_PARAM2], defaults); code.movaps(xword[code.ABI_PARAM2], defaults);
code.movaps(xword[code.ABI_PARAM3], indicies); code.movaps(xword[code.ABI_PARAM3], indicies);
code.CallFunction(static_cast<void(*)(const VectorArray<u8>*, VectorArray<u8>&, const VectorArray<u8>&, size_t)>( code.CallLambda(
[](const VectorArray<u8>* table, VectorArray<u8>& result, const VectorArray<u8>& indicies, size_t table_size) { [](const VectorArray<u8>* table, VectorArray<u8>& result, const VectorArray<u8>& indicies, size_t table_size) {
for (size_t i = 0; i < result.size(); ++i) { for (size_t i = 0; i < result.size(); ++i) {
const size_t index = indicies[i] / table[0].size(); const size_t index = indicies[i] / table[0].size();
@ -4060,7 +4060,7 @@ void EmitX64::EmitVectorTableLookup(EmitContext& ctx, IR::Inst* inst) {
} }
} }
} }
)); );
code.movaps(result, xword[rsp + ABI_SHADOW_SPACE + (table_size + 0) * 16]); code.movaps(result, xword[rsp + ABI_SHADOW_SPACE + (table_size + 0) * 16]);
code.add(rsp, stack_space + ABI_SHADOW_SPACE); code.add(rsp, stack_space + ABI_SHADOW_SPACE);

View file

@ -1005,7 +1005,7 @@ static void EmitFPVectorMulX(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst
FCODE(mulp)(result, xmm_b); FCODE(mulp)(result, xmm_b);
FCODE(cmpunordp)(nan_mask, result); FCODE(cmpunordp)(nan_mask, result);
const auto nan_handler = static_cast<void(*)(std::array<VectorArray<FPT>, 3>&, FP::FPCR)>( const auto nan_handler = Common::FptrCast(
[](std::array<VectorArray<FPT>, 3>& values, FP::FPCR fpcr) { [](std::array<VectorArray<FPT>, 3>& values, FP::FPCR fpcr) {
VectorArray<FPT>& result = values[0]; VectorArray<FPT>& result = values[0];
for (size_t elementi = 0; elementi < result.size(); ++elementi) { for (size_t elementi = 0; elementi < result.size(); ++elementi) {
@ -1217,7 +1217,7 @@ void EmitFPVectorRoundInt(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) {
[](auto arg) { [](auto arg) {
return std::pair{ return std::pair{
mp::lower_to_tuple_v<decltype(arg)>, mp::lower_to_tuple_v<decltype(arg)>,
static_cast<void(*)(VectorArray<FPT>&, const VectorArray<FPT>&, FP::FPCR, FP::FPSR&)>( Common::FptrCast(
[](VectorArray<FPT>& output, const VectorArray<FPT>& input, FP::FPCR fpcr, FP::FPSR& fpsr) { [](VectorArray<FPT>& output, const VectorArray<FPT>& input, FP::FPCR fpcr, FP::FPSR& fpsr) {
constexpr auto t = mp::lower_to_tuple_v<decltype(arg)>; constexpr auto t = mp::lower_to_tuple_v<decltype(arg)>;
constexpr FP::RoundingMode rounding_mode = std::get<0>(t); constexpr FP::RoundingMode rounding_mode = std::get<0>(t);
@ -1476,7 +1476,7 @@ void EmitFPVectorToFixed(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) {
[](auto arg) { [](auto arg) {
return std::pair{ return std::pair{
mp::lower_to_tuple_v<decltype(arg)>, mp::lower_to_tuple_v<decltype(arg)>,
static_cast<void(*)(VectorArray<FPT>&, const VectorArray<FPT>&, FP::FPCR, FP::FPSR&)>( Common::FptrCast(
[](VectorArray<FPT>& output, const VectorArray<FPT>& input, FP::FPCR fpcr, FP::FPSR& fpsr) { [](VectorArray<FPT>& output, const VectorArray<FPT>& input, FP::FPCR fpcr, FP::FPSR& fpsr) {
constexpr auto t = mp::lower_to_tuple_v<decltype(arg)>; constexpr auto t = mp::lower_to_tuple_v<decltype(arg)>;
constexpr size_t fbits = std::get<0>(t); constexpr size_t fbits = std::get<0>(t);

View file

@ -9,6 +9,8 @@
#include <cstring> #include <cstring>
#include <type_traits> #include <type_traits>
#include <mp/traits/function_info.h>
namespace Dynarmic::Common { namespace Dynarmic::Common {
/// Reinterpret objects of one type as another by bit-casting between object representations. /// Reinterpret objects of one type as another by bit-casting between object representations.
@ -35,4 +37,10 @@ inline Dest BitCastPointee(const SourcePtr source) noexcept {
return reinterpret_cast<Dest&>(dest); return reinterpret_cast<Dest&>(dest);
} }
/// Cast a lambda into an equivalent function pointer.
template <class Function>
inline auto FptrCast(Function f) noexcept {
return static_cast<mp::equivalent_function_type<Function>*>(f);
}
} // namespace Dynarmic::Common } // namespace Dynarmic::Common