From eba3a06d8063a1a0c33172409ef321176763ff16 Mon Sep 17 00:00:00 2001 From: Lioncash Date: Wed, 24 Aug 2016 08:58:32 -0400 Subject: [PATCH] frontend: Introduce FPSCR register helper class Encapsulates all of the FPSCR state. --- src/backend_x64/emit_x64.cpp | 52 ++--- src/frontend/arm/FPSCR.h | 188 ++++++++++++++++++ src/frontend/arm_types.h | 27 +-- src/frontend/ir/basic_block.cpp | 2 +- src/frontend/translate/translate_arm/vfp2.cpp | 26 +-- 5 files changed, 236 insertions(+), 59 deletions(-) create mode 100644 src/frontend/arm/FPSCR.h diff --git a/src/backend_x64/emit_x64.cpp b/src/backend_x64/emit_x64.cpp index cf9598a0..f2479b91 100644 --- a/src/backend_x64/emit_x64.cpp +++ b/src/backend_x64/emit_x64.cpp @@ -1226,15 +1226,15 @@ static void FPThreeOp32(BlockOfCode* code, RegAlloc& reg_alloc, IR::Block& block X64Reg operand = reg_alloc.UseRegister(b, any_xmm); X64Reg gpr_scratch = reg_alloc.ScratchRegister(any_gpr); - if (block.location.FPSCR_FTZ()) { + if (block.location.FPSCR().FTZ()) { DenormalsAreZero32(code, result, gpr_scratch); DenormalsAreZero32(code, operand, gpr_scratch); } (code->*fn)(result, R(operand)); - if (block.location.FPSCR_FTZ()) { + if (block.location.FPSCR().FTZ()) { FlushToZero32(code, result, gpr_scratch); } - if (block.location.FPSCR_DN()) { + if (block.location.FPSCR().DN()) { DefaultNaN32(code, result); } } @@ -1247,15 +1247,15 @@ static void FPThreeOp64(BlockOfCode* code, RegAlloc& reg_alloc, IR::Block& block X64Reg operand = reg_alloc.UseRegister(b, any_xmm); X64Reg gpr_scratch = reg_alloc.ScratchRegister(any_gpr); - if (block.location.FPSCR_FTZ()) { + if (block.location.FPSCR().FTZ()) { DenormalsAreZero64(code, result, gpr_scratch); DenormalsAreZero64(code, operand, gpr_scratch); } (code->*fn)(result, R(operand)); - if (block.location.FPSCR_FTZ()) { + if (block.location.FPSCR().FTZ()) { FlushToZero64(code, result, gpr_scratch); } - if (block.location.FPSCR_DN()) { + if (block.location.FPSCR().DN()) { DefaultNaN64(code, result); } } @@ -1266,14 +1266,14 @@ static void FPTwoOp32(BlockOfCode* code, RegAlloc& reg_alloc, IR::Block& block, X64Reg result = reg_alloc.UseDefRegister(a, inst, any_xmm); X64Reg gpr_scratch = reg_alloc.ScratchRegister(any_gpr); - if (block.location.FPSCR_FTZ()) { + if (block.location.FPSCR().FTZ()) { DenormalsAreZero32(code, result, gpr_scratch); } (code->*fn)(result, R(result)); - if (block.location.FPSCR_FTZ()) { + if (block.location.FPSCR().FTZ()) { FlushToZero32(code, result, gpr_scratch); } - if (block.location.FPSCR_DN()) { + if (block.location.FPSCR().DN()) { DefaultNaN32(code, result); } } @@ -1284,14 +1284,14 @@ static void FPTwoOp64(BlockOfCode* code, RegAlloc& reg_alloc, IR::Block& block, X64Reg result = reg_alloc.UseDefRegister(a, inst, any_xmm); X64Reg gpr_scratch = reg_alloc.ScratchRegister(any_gpr); - if (block.location.FPSCR_FTZ()) { + if (block.location.FPSCR().FTZ()) { DenormalsAreZero64(code, result, gpr_scratch); } (code->*fn)(result, R(result)); - if (block.location.FPSCR_FTZ()) { + if (block.location.FPSCR().FTZ()) { FlushToZero64(code, result, gpr_scratch); } - if (block.location.FPSCR_DN()) { + if (block.location.FPSCR().DN()) { DefaultNaN64(code, result); } } @@ -1402,14 +1402,14 @@ void EmitX64::EmitFPSingleToDouble(IR::Block& block, IR::Inst* inst) { X64Reg result = reg_alloc.UseDefRegister(a, inst, any_xmm); X64Reg gpr_scratch = reg_alloc.ScratchRegister(any_gpr); - if (block.location.FPSCR_FTZ()) { + if (block.location.FPSCR().FTZ()) { DenormalsAreZero32(code, result, gpr_scratch); } code->CVTSS2SD(result, R(result)); - if (block.location.FPSCR_FTZ()) { + if (block.location.FPSCR().FTZ()) { FlushToZero64(code, result, gpr_scratch); } - if (block.location.FPSCR_DN()) { + if (block.location.FPSCR().DN()) { DefaultNaN64(code, result); } } @@ -1420,14 +1420,14 @@ void EmitX64::EmitFPDoubleToSingle(IR::Block& block, IR::Inst* inst) { X64Reg result = reg_alloc.UseDefRegister(a, inst, any_xmm); X64Reg gpr_scratch = reg_alloc.ScratchRegister(any_gpr); - if (block.location.FPSCR_FTZ()) { + if (block.location.FPSCR().FTZ()) { DenormalsAreZero64(code, result, gpr_scratch); } code->CVTSD2SS(result, R(result)); - if (block.location.FPSCR_FTZ()) { + if (block.location.FPSCR().FTZ()) { FlushToZero32(code, result, gpr_scratch); } - if (block.location.FPSCR_DN()) { + if (block.location.FPSCR().DN()) { DefaultNaN32(code, result); } } @@ -1443,7 +1443,7 @@ void EmitX64::EmitFPSingleToS32(IR::Block& block, IR::Inst* inst) { // ARM saturates on conversion; this differs from x64 which returns a sentinel value. // Conversion to double is lossless, and allows for clamping. - if (block.location.FPSCR_FTZ()) { + if (block.location.FPSCR().FTZ()) { DenormalsAreZero32(code, from, gpr_scratch); } code->CVTSS2SD(from, R(from)); @@ -1481,8 +1481,8 @@ void EmitX64::EmitFPSingleToU32(IR::Block& block, IR::Inst* inst) { // // FIXME: Inexact exception not correctly signalled with the below code - if (block.location.FPSCR_RMode() != Arm::FPRoundingMode::RoundTowardsZero && !round_towards_zero) { - if (block.location.FPSCR_FTZ()) { + if (block.location.FPSCR().RMode() != Arm::FPSCR::RoundingMode::TowardsZero && !round_towards_zero) { + if (block.location.FPSCR().FTZ()) { DenormalsAreZero32(code, from, gpr_scratch); } code->CVTSS2SD(from, R(from)); @@ -1503,7 +1503,7 @@ void EmitX64::EmitFPSingleToU32(IR::Block& block, IR::Inst* inst) { X64Reg xmm_mask = reg_alloc.ScratchRegister(any_xmm); X64Reg gpr_mask = reg_alloc.ScratchRegister(any_gpr); - if (block.location.FPSCR_FTZ()) { + if (block.location.FPSCR().FTZ()) { DenormalsAreZero32(code, from, gpr_scratch); } code->CVTSS2SD(from, R(from)); @@ -1539,7 +1539,7 @@ void EmitX64::EmitFPDoubleToS32(IR::Block& block, IR::Inst* inst) { // ARM saturates on conversion; this differs from x64 which returns a sentinel value. - if (block.location.FPSCR_FTZ()) { + if (block.location.FPSCR().FTZ()) { DenormalsAreZero64(code, from, gpr_scratch); } // First time is to set flags @@ -1574,8 +1574,8 @@ void EmitX64::EmitFPDoubleToU32(IR::Block& block, IR::Inst* inst) { // TODO: Use VCVTPD2UDQ when AVX512VL is available. // FIXME: Inexact exception not correctly signalled with the below code - if (block.location.FPSCR_RMode() != Arm::FPRoundingMode::RoundTowardsZero && !round_towards_zero) { - if (block.location.FPSCR_FTZ()) { + if (block.location.FPSCR().RMode() != Arm::FPSCR::RoundingMode::TowardsZero && !round_towards_zero) { + if (block.location.FPSCR().FTZ()) { DenormalsAreZero64(code, from, gpr_scratch); } ZeroIfNaN64(code, from); @@ -1595,7 +1595,7 @@ void EmitX64::EmitFPDoubleToU32(IR::Block& block, IR::Inst* inst) { X64Reg xmm_mask = reg_alloc.ScratchRegister(any_xmm); X64Reg gpr_mask = reg_alloc.ScratchRegister(any_gpr); - if (block.location.FPSCR_FTZ()) { + if (block.location.FPSCR().FTZ()) { DenormalsAreZero64(code, from, gpr_scratch); } ZeroIfNaN64(code, from); diff --git a/src/frontend/arm/FPSCR.h b/src/frontend/arm/FPSCR.h new file mode 100644 index 00000000..0468bb25 --- /dev/null +++ b/src/frontend/arm/FPSCR.h @@ -0,0 +1,188 @@ +/* This file is part of the dynarmic project. + * Copyright (c) 2016 MerryMage + * This software may be used and distributed according to the terms of the GNU + * General Public License version 2 or any later version. + */ + +#pragma once + +#include "common/bit_util.h" +#include "common/common_types.h" + +namespace Dynarmic { +namespace Arm { + +/** + * Representation of the Floating-Point Status and Control Register. + */ +class FPSCR final +{ +public: + enum class RoundingMode { + ToNearest, + TowardsPlusInfinity, + TowardsMinusInfinity, + TowardsZero + }; + + FPSCR() = default; + FPSCR(const FPSCR&) = default; + FPSCR(FPSCR&&) = default; + /* implicit */ FPSCR(u32 data) : value{data} {} + + FPSCR& operator=(const FPSCR&) = default; + FPSCR& operator=(FPSCR&&) = default; + FPSCR& operator=(u32 data) { + value = data; + return *this; + } + + /// Negative condition flag. + bool N() const { + return Common::Bit<31>(value); + } + + /// Zero condition flag. + bool Z() const { + return Common::Bit<30>(value); + } + + /// Carry condition flag. + bool C() const { + return Common::Bit<29>(value); + } + + /// Overflow condition flag. + bool V() const { + return Common::Bit<28>(value); + } + + /// Cumulative saturation flag. + bool QC() const { + return Common::Bit<27>(value); + } + + /// Alternate half-precision control flag. + bool AHP() const { + return Common::Bit<26>(value); + } + + /// Default NaN mode control bit. + bool DN() const { + return Common::Bit<25>(value); + } + + /// Flush-to-zero mode control bit. + bool FTZ() const { + return Common::Bit<24>(value); + } + + /// Rounding mode control field. + RoundingMode RMode() const { + return static_cast(Common::Bits<22, 23>(value)); + } + + /// Indicates the stride of a vector. + u32 Stride() const { + return Common::Bits<20, 21>(value) + 1; + } + + /// Indicates the length of a vector. + u32 Len() const { + return Common::Bits<16, 18>(value) + 1; + } + + /// Input denormal exception trap enable flag. + bool IDE() const { + return Common::Bit<15>(value); + } + + /// Inexact exception trap enable flag. + bool IXE() const { + return Common::Bit<12>(value); + } + + /// Underflow exception trap enable flag. + bool UFE() const { + return Common::Bit<11>(value); + } + + /// Overflow exception trap enable flag. + bool OFE() const { + return Common::Bit<10>(value); + } + + /// Division by zero exception trap enable flag. + bool DZE() const { + return Common::Bit<9>(value); + } + + /// Invalid operation exception trap enable flag. + bool IOE() const { + return Common::Bit<8>(value); + } + + /// Input denormal cumulative exception bit. + bool IDC() const { + return Common::Bit<7>(value); + } + + /// Inexact cumulative exception bit. + bool IXC() const { + return Common::Bit<4>(value); + } + + /// Underflow cumulative exception bit. + bool UFC() const { + return Common::Bit<3>(value); + } + + /// Overflow cumulative exception bit. + bool OFC() const { + return Common::Bit<2>(value); + } + + /// Division by zero cumulative exception bit. + bool DZC() const { + return Common::Bit<1>(value); + } + + /// Invalid operation cumulative exception bit. + bool IOC() const { + return Common::Bit<0>(value); + } + + /** + * Whether or not the FPSCR indicates RunFast mode. + * + * RunFast mode is enabled when: + * - Flush-to-zero is enabled + * - Default NaNs are enabled. + * - All exception enable bits are cleared. + */ + bool InRunFastMode() const { + constexpr u32 mask = 0x03001F00; + constexpr u32 expected = 0x03000000; + + return (value & mask) == expected; + } + + /// Gets the underlying raw value within the FPSCR. + u32 Value() const { + return value; + } + +private: + u32 value = 0; +}; + +inline bool operator==(FPSCR lhs, FPSCR rhs) { + return lhs.Value() == rhs.Value(); +} + +inline bool operator!=(FPSCR lhs, FPSCR rhs) { + return !operator==(lhs, rhs); +} + +} // namespace Arm +} // namespace Dynarmic diff --git a/src/frontend/arm_types.h b/src/frontend/arm_types.h index 8f7207f9..32da1c33 100644 --- a/src/frontend/arm_types.h +++ b/src/frontend/arm_types.h @@ -13,6 +13,7 @@ #include "common/assert.h" #include "common/bit_util.h" #include "common/common_types.h" +#include "frontend/arm/FPSCR.h" namespace Dynarmic { namespace Arm { @@ -64,13 +65,6 @@ enum class SignExtendRotation { ROR_24 ///< ROR #24 }; -enum class FPRoundingMode { - RoundToNearest, - RoundTowardsPositiveInfinity, - RoundTowardsNegativeInfinity, - RoundTowardsZero, -}; - /** * LocationDescriptor describes the location of a basic block. * The location is not solely based on the PC because other flags influence the way @@ -80,18 +74,13 @@ enum class FPRoundingMode { struct LocationDescriptor { static constexpr u32 FPSCR_MODE_MASK = 0x03F79F00; - LocationDescriptor(u32 arm_pc, bool tflag, bool eflag, u32 fpscr) - : arm_pc(arm_pc), tflag(tflag), eflag(eflag), fpscr(fpscr & FPSCR_MODE_MASK) {} + LocationDescriptor(u32 arm_pc, bool tflag, bool eflag, FPSCR fpscr) + : arm_pc(arm_pc), tflag(tflag), eflag(eflag), fpscr(fpscr.Value() & FPSCR_MODE_MASK) {} u32 PC() const { return arm_pc; } bool TFlag() const { return tflag; } bool EFlag() const { return eflag; } - u32 FPSCR() const { return fpscr; } - bool FPSCR_FTZ() const { return Common::Bit<24>(fpscr); } - bool FPSCR_DN() const { return Common::Bit<25>(fpscr); } - u32 FPSCR_Len() const { return Common::Bits<16, 18>(fpscr) + 1; } - u32 FPSCR_Stride() const { return Common::Bits<20, 21>(fpscr) + 1; } - FPRoundingMode FPSCR_RMode() const { return static_cast(Common::Bits<22, 23>(fpscr)); } + Arm::FPSCR FPSCR() const { return fpscr; } bool operator == (const LocationDescriptor& o) const { return std::tie(arm_pc, tflag, eflag, fpscr) == std::tie(o.arm_pc, o.tflag, o.eflag, o.fpscr); @@ -121,7 +110,7 @@ struct LocationDescriptor { // This value MUST BE UNIQUE. // This calculation has to match up with EmitX64::EmitTerminalPopRSBHint u64 pc_u64 = u64(arm_pc); - u64 fpscr_u64 = u64(fpscr) << 32; + u64 fpscr_u64 = u64(fpscr.Value()) << 32; u64 t_u64 = tflag ? (1ull << 35) : 0; u64 e_u64 = eflag ? (1ull << 39) : 0; return pc_u64 | fpscr_u64 | t_u64 | e_u64; @@ -129,9 +118,9 @@ struct LocationDescriptor { private: u32 arm_pc; - bool tflag; ///< Thumb / ARM - bool eflag; ///< Big / Little Endian - u32 fpscr; ///< Floating point status control register + bool tflag; ///< Thumb / ARM + bool eflag; ///< Big / Little Endian + Arm::FPSCR fpscr; ///< Floating point status control register }; struct LocationDescriptorHash { diff --git a/src/frontend/ir/basic_block.cpp b/src/frontend/ir/basic_block.cpp index 383d95fb..4c227aeb 100644 --- a/src/frontend/ir/basic_block.cpp +++ b/src/frontend/ir/basic_block.cpp @@ -21,7 +21,7 @@ std::string DumpBlock(const IR::Block& block) { loc.PC(), loc.TFlag() ? "T" : "!T", loc.EFlag() ? "E" : "!E", - loc.FPSCR()); + loc.FPSCR().Value()); }; ret += Common::StringFromFormat("Block: location=%s\n", loc_to_string(block.location).c_str()); diff --git a/src/frontend/translate/translate_arm/vfp2.cpp b/src/frontend/translate/translate_arm/vfp2.cpp index 43ca3d13..b7cee0d6 100644 --- a/src/frontend/translate/translate_arm/vfp2.cpp +++ b/src/frontend/translate/translate_arm/vfp2.cpp @@ -18,7 +18,7 @@ static ExtReg ToExtReg(bool sz, size_t base, bool bit) { } bool ArmTranslatorVisitor::vfp2_VADD(Cond cond, bool D, size_t Vn, size_t Vd, bool sz, bool N, bool M, size_t Vm) { - if (ir.current_location.FPSCR_Len() != 1 || ir.current_location.FPSCR_Stride() != 1) + if (ir.current_location.FPSCR().Len() != 1 || ir.current_location.FPSCR().Stride() != 1) return InterpretThisInstruction(); // TODO: Vectorised floating point instructions ExtReg d = ToExtReg(sz, Vd, D); @@ -37,7 +37,7 @@ bool ArmTranslatorVisitor::vfp2_VADD(Cond cond, bool D, size_t Vn, size_t Vd, bo } bool ArmTranslatorVisitor::vfp2_VSUB(Cond cond, bool D, size_t Vn, size_t Vd, bool sz, bool N, bool M, size_t Vm) { - if (ir.current_location.FPSCR_Len() != 1 || ir.current_location.FPSCR_Stride() != 1) + if (ir.current_location.FPSCR().Len() != 1 || ir.current_location.FPSCR().Stride() != 1) return InterpretThisInstruction(); // TODO: Vectorised floating point instructions ExtReg d = ToExtReg(sz, Vd, D); @@ -56,7 +56,7 @@ bool ArmTranslatorVisitor::vfp2_VSUB(Cond cond, bool D, size_t Vn, size_t Vd, bo } bool ArmTranslatorVisitor::vfp2_VMUL(Cond cond, bool D, size_t Vn, size_t Vd, bool sz, bool N, bool M, size_t Vm) { - if (ir.current_location.FPSCR_Len() != 1 || ir.current_location.FPSCR_Stride() != 1) + if (ir.current_location.FPSCR().Len() != 1 || ir.current_location.FPSCR().Stride() != 1) return InterpretThisInstruction(); // TODO: Vectorised floating point instructions ExtReg d = ToExtReg(sz, Vd, D); @@ -75,7 +75,7 @@ bool ArmTranslatorVisitor::vfp2_VMUL(Cond cond, bool D, size_t Vn, size_t Vd, bo } bool ArmTranslatorVisitor::vfp2_VMLA(Cond cond, bool D, size_t Vn, size_t Vd, bool sz, bool N, bool M, size_t Vm) { - if (ir.current_location.FPSCR_Len() != 1 || ir.current_location.FPSCR_Stride() != 1) + if (ir.current_location.FPSCR().Len() != 1 || ir.current_location.FPSCR().Stride() != 1) return InterpretThisInstruction(); // TODO: Vectorised floating point instructions ExtReg d = ToExtReg(sz, Vd, D); @@ -95,7 +95,7 @@ bool ArmTranslatorVisitor::vfp2_VMLA(Cond cond, bool D, size_t Vn, size_t Vd, bo } bool ArmTranslatorVisitor::vfp2_VMLS(Cond cond, bool D, size_t Vn, size_t Vd, bool sz, bool N, bool M, size_t Vm) { - if (ir.current_location.FPSCR_Len() != 1 || ir.current_location.FPSCR_Stride() != 1) + if (ir.current_location.FPSCR().Len() != 1 || ir.current_location.FPSCR().Stride() != 1) return InterpretThisInstruction(); // TODO: Vectorised floating point instructions ExtReg d = ToExtReg(sz, Vd, D); @@ -115,7 +115,7 @@ bool ArmTranslatorVisitor::vfp2_VMLS(Cond cond, bool D, size_t Vn, size_t Vd, bo } bool ArmTranslatorVisitor::vfp2_VNMUL(Cond cond, bool D, size_t Vn, size_t Vd, bool sz, bool N, bool M, size_t Vm) { - if (ir.current_location.FPSCR_Len() != 1 || ir.current_location.FPSCR_Stride() != 1) + if (ir.current_location.FPSCR().Len() != 1 || ir.current_location.FPSCR().Stride() != 1) return InterpretThisInstruction(); // TODO: Vectorised floating point instructions ExtReg d = ToExtReg(sz, Vd, D); @@ -134,7 +134,7 @@ bool ArmTranslatorVisitor::vfp2_VNMUL(Cond cond, bool D, size_t Vn, size_t Vd, b } bool ArmTranslatorVisitor::vfp2_VNMLA(Cond cond, bool D, size_t Vn, size_t Vd, bool sz, bool N, bool M, size_t Vm) { - if (ir.current_location.FPSCR_Len() != 1 || ir.current_location.FPSCR_Stride() != 1) + if (ir.current_location.FPSCR().Len() != 1 || ir.current_location.FPSCR().Stride() != 1) return InterpretThisInstruction(); // TODO: Vectorised floating point instructions ExtReg d = ToExtReg(sz, Vd, D); @@ -154,7 +154,7 @@ bool ArmTranslatorVisitor::vfp2_VNMLA(Cond cond, bool D, size_t Vn, size_t Vd, b } bool ArmTranslatorVisitor::vfp2_VNMLS(Cond cond, bool D, size_t Vn, size_t Vd, bool sz, bool N, bool M, size_t Vm) { - if (ir.current_location.FPSCR_Len() != 1 || ir.current_location.FPSCR_Stride() != 1) + if (ir.current_location.FPSCR().Len() != 1 || ir.current_location.FPSCR().Stride() != 1) return InterpretThisInstruction(); // TODO: Vectorised floating point instructions ExtReg d = ToExtReg(sz, Vd, D); @@ -174,7 +174,7 @@ bool ArmTranslatorVisitor::vfp2_VNMLS(Cond cond, bool D, size_t Vn, size_t Vd, b } bool ArmTranslatorVisitor::vfp2_VDIV(Cond cond, bool D, size_t Vn, size_t Vd, bool sz, bool N, bool M, size_t Vm) { - if (ir.current_location.FPSCR_Len() != 1 || ir.current_location.FPSCR_Stride() != 1) + if (ir.current_location.FPSCR().Len() != 1 || ir.current_location.FPSCR().Stride() != 1) return InterpretThisInstruction(); // TODO: Vectorised floating point instructions ExtReg d = ToExtReg(sz, Vd, D); @@ -297,7 +297,7 @@ bool ArmTranslatorVisitor::vfp2_VMOV_f64_2u32(Cond cond, Reg t2, Reg t, bool M, } bool ArmTranslatorVisitor::vfp2_VMOV_reg(Cond cond, bool D, size_t Vd, bool sz, bool M, size_t Vm) { - if (ir.current_location.FPSCR_Len() != 1 || ir.current_location.FPSCR_Stride() != 1) + if (ir.current_location.FPSCR().Len() != 1 || ir.current_location.FPSCR().Stride() != 1) return InterpretThisInstruction(); // TODO: Vectorised floating point instructions ExtReg d = ToExtReg(sz, Vd, D); @@ -310,7 +310,7 @@ bool ArmTranslatorVisitor::vfp2_VMOV_reg(Cond cond, bool D, size_t Vd, bool sz, } bool ArmTranslatorVisitor::vfp2_VABS(Cond cond, bool D, size_t Vd, bool sz, bool M, size_t Vm) { - if (ir.current_location.FPSCR_Len() != 1 || ir.current_location.FPSCR_Stride() != 1) + if (ir.current_location.FPSCR().Len() != 1 || ir.current_location.FPSCR().Stride() != 1) return InterpretThisInstruction(); // TODO: Vectorised floating point instructions ExtReg d = ToExtReg(sz, Vd, D); @@ -327,7 +327,7 @@ bool ArmTranslatorVisitor::vfp2_VABS(Cond cond, bool D, size_t Vd, bool sz, bool } bool ArmTranslatorVisitor::vfp2_VNEG(Cond cond, bool D, size_t Vd, bool sz, bool M, size_t Vm) { - if (ir.current_location.FPSCR_Len() != 1 || ir.current_location.FPSCR_Stride() != 1) + if (ir.current_location.FPSCR().Len() != 1 || ir.current_location.FPSCR().Stride() != 1) return InterpretThisInstruction(); // TODO: Vectorised floating point instructions ExtReg d = ToExtReg(sz, Vd, D); @@ -344,7 +344,7 @@ bool ArmTranslatorVisitor::vfp2_VNEG(Cond cond, bool D, size_t Vd, bool sz, bool } bool ArmTranslatorVisitor::vfp2_VSQRT(Cond cond, bool D, size_t Vd, bool sz, bool M, size_t Vm) { - if (ir.current_location.FPSCR_Len() != 1 || ir.current_location.FPSCR_Stride() != 1) + if (ir.current_location.FPSCR().Len() != 1 || ir.current_location.FPSCR().Stride() != 1) return InterpretThisInstruction(); // TODO: Vectorised floating point instructions ExtReg d = ToExtReg(sz, Vd, D);