diff --git a/src/backend/x64/a64_interface.cpp b/src/backend/x64/a64_interface.cpp index e3cbe882..3065ea69 100644 --- a/src/backend/x64/a64_interface.cpp +++ b/src/backend/x64/a64_interface.cpp @@ -37,7 +37,7 @@ static RunCodeCallbacks GenRunCodeCallbacks(A64::UserCallbacks* cb, CodePtr (*Lo struct Jit::Impl final { public: Impl(Jit* jit, UserConfig conf) - : conf(conf) + : conf(conf) , block_of_code(GenRunCodeCallbacks(conf.callbacks, &GetCurrentBlockThunk, this), JitStateInfo{jit_state}) , emitter(block_of_code, conf, jit) { diff --git a/src/backend/x64/emit_x64_vector.cpp b/src/backend/x64/emit_x64_vector.cpp index e19d424b..d9eac183 100644 --- a/src/backend/x64/emit_x64_vector.cpp +++ b/src/backend/x64/emit_x64_vector.cpp @@ -3399,7 +3399,7 @@ void EmitX64::EmitVectorSignedSaturatedDoublingMultiply32(EmitContext& ctx, IR:: if (upper_inst) { const Xbyak::Xmm upper_result = ctx.reg_alloc.ScratchXmm(); - + code.vpsrlq(upper_result, odds, 32); code.vblendps(upper_result, upper_result, even, 0b1010); @@ -3420,14 +3420,14 @@ void EmitX64::EmitVectorSignedSaturatedDoublingMultiply32(EmitContext& ctx, IR:: if (lower_inst) { const Xbyak::Xmm lower_result = ctx.reg_alloc.ScratchXmm(); - + code.vpsllq(lower_result, even, 32); code.vblendps(lower_result, lower_result, odds, 0b0101); ctx.reg_alloc.DefineValue(lower_inst, lower_result); ctx.EraseInstruction(lower_inst); } - + return; } diff --git a/src/common/cast_util.h b/src/common/cast_util.h index 1787cbd4..0bed4b65 100644 --- a/src/common/cast_util.h +++ b/src/common/cast_util.h @@ -32,7 +32,7 @@ inline Dest BitCastPointee(const SourcePtr source) { std::aligned_storage_t dest; std::memcpy(&dest, BitCast(source), sizeof(dest)); - return reinterpret_cast(dest); + return reinterpret_cast(dest); } } // namespace Dynarmic::Common diff --git a/src/common/fp/info.h b/src/common/fp/info.h index c245fd36..d60c4a31 100644 --- a/src/common/fp/info.h +++ b/src/common/fp/info.h @@ -114,4 +114,4 @@ constexpr FPT FPValue() { return FPT(FPInfo::Zero(sign) | mantissa | (biased_exponent << FPInfo::explicit_mantissa_width)); } -} // namespace Dynarmic::FP +} // namespace Dynarmic::FP diff --git a/src/common/fp/mantissa_util.h b/src/common/fp/mantissa_util.h index 7f2c3110..5b7f38d3 100644 --- a/src/common/fp/mantissa_util.h +++ b/src/common/fp/mantissa_util.h @@ -44,4 +44,4 @@ inline ResidualError ResidualErrorOnRightShift(u64 mantissa, int shift_amount) { return ResidualError::GreaterThanHalf; } -} // namespace Dynarmic::FP +} // namespace Dynarmic::FP diff --git a/src/common/fp/op/FPMulAdd.cpp b/src/common/fp/op/FPMulAdd.cpp index edc3d054..d53ba11e 100644 --- a/src/common/fp/op/FPMulAdd.cpp +++ b/src/common/fp/op/FPMulAdd.cpp @@ -77,4 +77,4 @@ template u16 FPMulAdd(u16 addend, u16 op1, u16 op2, FPCR fpcr, FPSR& fpsr); template u32 FPMulAdd(u32 addend, u32 op1, u32 op2, FPCR fpcr, FPSR& fpsr); template u64 FPMulAdd(u64 addend, u64 op1, u64 op2, FPCR fpcr, FPSR& fpsr); -} // namespace Dynarmic::FP +} // namespace Dynarmic::FP diff --git a/src/common/fp/op/FPMulAdd.h b/src/common/fp/op/FPMulAdd.h index 7b1556e4..a9873fee 100644 --- a/src/common/fp/op/FPMulAdd.h +++ b/src/common/fp/op/FPMulAdd.h @@ -14,4 +14,4 @@ class FPSR; template FPT FPMulAdd(FPT addend, FPT op1, FPT op2, FPCR fpcr, FPSR& fpsr); -} // namespace Dynarmic::FP +} // namespace Dynarmic::FP diff --git a/src/common/fp/op/FPNeg.h b/src/common/fp/op/FPNeg.h index 402cca21..c138bc3f 100644 --- a/src/common/fp/op/FPNeg.h +++ b/src/common/fp/op/FPNeg.h @@ -15,4 +15,4 @@ inline FPT FPNeg(FPT op) { return op ^ FPInfo::sign_mask; } -} // namespace Dynarmic::FP +} // namespace Dynarmic::FP diff --git a/src/common/fp/op/FPRSqrtEstimate.cpp b/src/common/fp/op/FPRSqrtEstimate.cpp index c9605fa8..e54bdc50 100644 --- a/src/common/fp/op/FPRSqrtEstimate.cpp +++ b/src/common/fp/op/FPRSqrtEstimate.cpp @@ -54,4 +54,4 @@ template u16 FPRSqrtEstimate(u16 op, FPCR fpcr, FPSR& fpsr); template u32 FPRSqrtEstimate(u32 op, FPCR fpcr, FPSR& fpsr); template u64 FPRSqrtEstimate(u64 op, FPCR fpcr, FPSR& fpsr); -} // namespace Dynarmic::FP +} // namespace Dynarmic::FP diff --git a/src/common/fp/op/FPRSqrtEstimate.h b/src/common/fp/op/FPRSqrtEstimate.h index e41bc4fd..8ab2aed5 100644 --- a/src/common/fp/op/FPRSqrtEstimate.h +++ b/src/common/fp/op/FPRSqrtEstimate.h @@ -14,4 +14,4 @@ class FPSR; template FPT FPRSqrtEstimate(FPT op, FPCR fpcr, FPSR& fpsr); -} // namespace Dynarmic::FP +} // namespace Dynarmic::FP diff --git a/src/common/fp/op/FPRSqrtStepFused.cpp b/src/common/fp/op/FPRSqrtStepFused.cpp index 29788010..51542075 100644 --- a/src/common/fp/op/FPRSqrtStepFused.cpp +++ b/src/common/fp/op/FPRSqrtStepFused.cpp @@ -54,4 +54,4 @@ template u16 FPRSqrtStepFused(u16 op1, u16 op2, FPCR fpcr, FPSR& fpsr); template u32 FPRSqrtStepFused(u32 op1, u32 op2, FPCR fpcr, FPSR& fpsr); template u64 FPRSqrtStepFused(u64 op1, u64 op2, FPCR fpcr, FPSR& fpsr); -} // namespace Dynarmic::FP +} // namespace Dynarmic::FP diff --git a/src/common/fp/op/FPRSqrtStepFused.h b/src/common/fp/op/FPRSqrtStepFused.h index 4847809c..0db7777d 100644 --- a/src/common/fp/op/FPRSqrtStepFused.h +++ b/src/common/fp/op/FPRSqrtStepFused.h @@ -14,4 +14,4 @@ class FPSR; template FPT FPRSqrtStepFused(FPT op1, FPT op2, FPCR fpcr, FPSR& fpsr); -} // namespace Dynarmic::FP +} // namespace Dynarmic::FP diff --git a/src/common/fp/op/FPRecipEstimate.cpp b/src/common/fp/op/FPRecipEstimate.cpp index 5e13767d..cd152fdb 100644 --- a/src/common/fp/op/FPRecipEstimate.cpp +++ b/src/common/fp/op/FPRecipEstimate.cpp @@ -97,4 +97,4 @@ template u16 FPRecipEstimate(u16 op, FPCR fpcr, FPSR& fpsr); template u32 FPRecipEstimate(u32 op, FPCR fpcr, FPSR& fpsr); template u64 FPRecipEstimate(u64 op, FPCR fpcr, FPSR& fpsr); -} // namespace Dynarmic::FP +} // namespace Dynarmic::FP diff --git a/src/common/fp/op/FPRecipEstimate.h b/src/common/fp/op/FPRecipEstimate.h index 61bcecfd..1a2dc9c0 100644 --- a/src/common/fp/op/FPRecipEstimate.h +++ b/src/common/fp/op/FPRecipEstimate.h @@ -14,4 +14,4 @@ class FPSR; template FPT FPRecipEstimate(FPT op, FPCR fpcr, FPSR& fpsr); -} // namespace Dynarmic::FP +} // namespace Dynarmic::FP diff --git a/src/common/fp/op/FPRecipStepFused.cpp b/src/common/fp/op/FPRecipStepFused.cpp index f82fea8e..3f0287e7 100644 --- a/src/common/fp/op/FPRecipStepFused.cpp +++ b/src/common/fp/op/FPRecipStepFused.cpp @@ -53,4 +53,4 @@ template u16 FPRecipStepFused(u16 op1, u16 op2, FPCR fpcr, FPSR& fpsr); template u32 FPRecipStepFused(u32 op1, u32 op2, FPCR fpcr, FPSR& fpsr); template u64 FPRecipStepFused(u64 op1, u64 op2, FPCR fpcr, FPSR& fpsr); -} // namespace Dynarmic::FP +} // namespace Dynarmic::FP diff --git a/src/common/fp/op/FPRecipStepFused.h b/src/common/fp/op/FPRecipStepFused.h index 09222638..0dff4ef5 100644 --- a/src/common/fp/op/FPRecipStepFused.h +++ b/src/common/fp/op/FPRecipStepFused.h @@ -14,4 +14,4 @@ class FPSR; template FPT FPRecipStepFused(FPT op1, FPT op2, FPCR fpcr, FPSR& fpsr); -} // namespace Dynarmic::FP +} // namespace Dynarmic::FP diff --git a/src/common/fp/op/FPRoundInt.cpp b/src/common/fp/op/FPRoundInt.cpp index 2eff07fe..2e2be26e 100644 --- a/src/common/fp/op/FPRoundInt.cpp +++ b/src/common/fp/op/FPRoundInt.cpp @@ -33,7 +33,7 @@ u64 FPRoundInt(FPT op, FPCR fpcr, RoundingMode rounding, bool exact, FPSR& fpsr) if (type == FPType::Infinity) { return FPInfo::Infinity(sign); } - + if (type == FPType::Zero) { return FPInfo::Zero(sign); } @@ -93,4 +93,4 @@ template u64 FPRoundInt(u16 op, FPCR fpcr, RoundingMode rounding, bool exac template u64 FPRoundInt(u32 op, FPCR fpcr, RoundingMode rounding, bool exact, FPSR& fpsr); template u64 FPRoundInt(u64 op, FPCR fpcr, RoundingMode rounding, bool exact, FPSR& fpsr); -} // namespace Dynarmic::FP +} // namespace Dynarmic::FP diff --git a/src/common/fp/op/FPRoundInt.h b/src/common/fp/op/FPRoundInt.h index e0a60b29..5b6e711a 100644 --- a/src/common/fp/op/FPRoundInt.h +++ b/src/common/fp/op/FPRoundInt.h @@ -17,4 +17,4 @@ enum class RoundingMode; template u64 FPRoundInt(FPT op, FPCR fpcr, RoundingMode rounding, bool exact, FPSR& fpsr); -} // namespace Dynarmic::FP +} // namespace Dynarmic::FP diff --git a/src/common/fp/op/FPToFixed.cpp b/src/common/fp/op/FPToFixed.cpp index 9240d5e5..458c3b4c 100644 --- a/src/common/fp/op/FPToFixed.cpp +++ b/src/common/fp/op/FPToFixed.cpp @@ -99,4 +99,4 @@ template u64 FPToFixed(size_t ibits, u16 op, size_t fbits, bool unsigned_, template u64 FPToFixed(size_t ibits, u32 op, size_t fbits, bool unsigned_, FPCR fpcr, RoundingMode rounding, FPSR& fpsr); template u64 FPToFixed(size_t ibits, u64 op, size_t fbits, bool unsigned_, FPCR fpcr, RoundingMode rounding, FPSR& fpsr); -} // namespace Dynarmic::FP +} // namespace Dynarmic::FP diff --git a/src/common/fp/op/FPToFixed.h b/src/common/fp/op/FPToFixed.h index d4febe0b..c1a2172e 100644 --- a/src/common/fp/op/FPToFixed.h +++ b/src/common/fp/op/FPToFixed.h @@ -17,4 +17,4 @@ enum class RoundingMode; template u64 FPToFixed(size_t ibits, FPT op, size_t fbits, bool unsigned_, FPCR fpcr, RoundingMode rounding, FPSR& fpsr); -} // namespace Dynarmic::FP +} // namespace Dynarmic::FP diff --git a/src/common/fp/process_exception.cpp b/src/common/fp/process_exception.cpp index 231f1508..468b9ec0 100644 --- a/src/common/fp/process_exception.cpp +++ b/src/common/fp/process_exception.cpp @@ -55,4 +55,4 @@ void FPProcessException(FPExc exception, FPCR fpcr, FPSR& fpsr) { } } -} // namespace Dynarmic::FP +} // namespace Dynarmic::FP diff --git a/src/common/fp/process_exception.h b/src/common/fp/process_exception.h index 6115a97f..7d36957a 100644 --- a/src/common/fp/process_exception.h +++ b/src/common/fp/process_exception.h @@ -22,4 +22,4 @@ enum class FPExc { void FPProcessException(FPExc exception, FPCR fpcr, FPSR& fpsr); -} // namespace Dynarmic::FP +} // namespace Dynarmic::FP diff --git a/src/common/fp/process_nan.cpp b/src/common/fp/process_nan.cpp index 5013ba33..0164c50b 100644 --- a/src/common/fp/process_nan.cpp +++ b/src/common/fp/process_nan.cpp @@ -89,4 +89,4 @@ template std::optional FPProcessNaNs3(FPType type1, FPType type2, FPTy template std::optional FPProcessNaNs3(FPType type1, FPType type2, FPType type3, u32 op1, u32 op2, u32 op3, FPCR fpcr, FPSR& fpsr); template std::optional FPProcessNaNs3(FPType type1, FPType type2, FPType type3, u64 op1, u64 op2, u64 op3, FPCR fpcr, FPSR& fpsr); -} // namespace Dynarmic::FP +} // namespace Dynarmic::FP diff --git a/src/common/fp/process_nan.h b/src/common/fp/process_nan.h index 208be016..1e4bda57 100644 --- a/src/common/fp/process_nan.h +++ b/src/common/fp/process_nan.h @@ -23,4 +23,4 @@ std::optional FPProcessNaNs(FPType type1, FPType type2, FPT op1, FPT op2, F template std::optional FPProcessNaNs3(FPType type1, FPType type2, FPType type3, FPT op1, FPT op2, FPT op3, FPCR fpcr, FPSR& fpsr); -} // namespace Dynarmic::FP +} // namespace Dynarmic::FP diff --git a/src/common/mp/cartesian_product.h b/src/common/mp/cartesian_product.h index 919c7eef..2ff42db1 100644 --- a/src/common/mp/cartesian_product.h +++ b/src/common/mp/cartesian_product.h @@ -43,7 +43,7 @@ struct cartesian_product_impl { } // namespace detail /// Produces the cartesian product of a set of lists -/// For example: +/// For example: /// cartesian_product, list> == list, list, list, list template using cartesian_product = typename detail::cartesian_product_impl, Ls...>::type; diff --git a/src/frontend/A32/translate/translate_arm/vfp.cpp b/src/frontend/A32/translate/translate_arm/vfp.cpp index 847443dd..e4375aeb 100644 --- a/src/frontend/A32/translate/translate_arm/vfp.cpp +++ b/src/frontend/A32/translate/translate_arm/vfp.cpp @@ -765,7 +765,7 @@ bool ArmTranslatorVisitor::vfp_VSTM_a1(Cond cond, bool p, bool u, bool D, bool w if (!p && !u && !w) { ASSERT_MSG(false, "Decode error"); } - + if (p && !w) { ASSERT_MSG(false, "Decode error"); } diff --git a/src/frontend/A64/translate/impl/data_processing_register.cpp b/src/frontend/A64/translate/impl/data_processing_register.cpp index 6ba72231..939b1645 100644 --- a/src/frontend/A64/translate/impl/data_processing_register.cpp +++ b/src/frontend/A64/translate/impl/data_processing_register.cpp @@ -34,17 +34,17 @@ bool TranslatorVisitor::RBIT_int(bool sf, Reg Rn, Reg Rd) { const IR::U32 first_lsl = ir.LogicalShiftLeft(ir.And(operand, ir.Imm32(0x55555555)), ir.Imm8(1)); const IR::U32 first_lsr = ir.And(ir.LogicalShiftRight(operand, ir.Imm8(1)), ir.Imm32(0x55555555)); const IR::U32 first = ir.Or(first_lsl, first_lsr); - + // x = (x & 0x33333333) << 2 | ((x >> 2) & 0x33333333); const IR::U32 second_lsl = ir.LogicalShiftLeft(ir.And(first, ir.Imm32(0x33333333)), ir.Imm8(2)); const IR::U32 second_lsr = ir.And(ir.LogicalShiftRight(first, ir.Imm8(2)), ir.Imm32(0x33333333)); const IR::U32 second = ir.Or(second_lsl, second_lsr); - + // x = (x & 0x0F0F0F0F) << 4 | ((x >> 4) & 0x0F0F0F0F); const IR::U32 third_lsl = ir.LogicalShiftLeft(ir.And(second, ir.Imm32(0x0F0F0F0F)), ir.Imm8(4)); const IR::U32 third_lsr = ir.And(ir.LogicalShiftRight(second, ir.Imm8(4)), ir.Imm32(0x0F0F0F0F)); const IR::U32 third = ir.Or(third_lsl, third_lsr); - + // x = (x << 24) | ((x & 0xFF00) << 8) | ((x >> 8) & 0xFF00) | (x >> 24); const IR::U32 fourth_lsl = ir.Or(ir.LogicalShiftLeft(third, ir.Imm8(24)), ir.LogicalShiftLeft(ir.And(third, ir.Imm32(0xFF00)), ir.Imm8(8))); diff --git a/src/frontend/A64/translate/impl/load_store_register_register_offset.cpp b/src/frontend/A64/translate/impl/load_store_register_register_offset.cpp index c5424b0c..6eed91e4 100644 --- a/src/frontend/A64/translate/impl/load_store_register_register_offset.cpp +++ b/src/frontend/A64/translate/impl/load_store_register_register_offset.cpp @@ -67,7 +67,7 @@ static bool RegSharedDecodeAndOperation(TranslatorVisitor& v, size_t scale, u8 s case MemOp::PREFETCH: // TODO: Prefetch break; - default: + default: UNREACHABLE(); } diff --git a/src/frontend/A64/translate/impl/simd_across_lanes.cpp b/src/frontend/A64/translate/impl/simd_across_lanes.cpp index 0e8b4d71..aebc0c1f 100644 --- a/src/frontend/A64/translate/impl/simd_across_lanes.cpp +++ b/src/frontend/A64/translate/impl/simd_across_lanes.cpp @@ -89,7 +89,7 @@ bool FPMinMax(TranslatorVisitor& v, bool Q, bool sz, Vec Vn, Vec Vd, MinMaxOpera for (size_t i = start + 1; i < end; i++) { const IR::U32U64 element = v.ir.VectorGetElement(esize, operand, i); - + result = op(result, element); } @@ -128,7 +128,7 @@ bool ScalarMinMax(TranslatorVisitor& v, bool Q, Imm<2> size, Vec Vn, Vec Vd, return v.ir.ZeroExtendToWord(vec_element); }; - + const auto op_func = [&](const auto& a, const auto& b) { switch (operation) { case ScalarMinMaxOperation::Max: diff --git a/src/frontend/A64/translate/impl/simd_sha512.cpp b/src/frontend/A64/translate/impl/simd_sha512.cpp index 30c48752..d7893bb1 100644 --- a/src/frontend/A64/translate/impl/simd_sha512.cpp +++ b/src/frontend/A64/translate/impl/simd_sha512.cpp @@ -124,7 +124,7 @@ IR::U32 SM4Rotation(IREmitter& ir, IR::U32 intval, IR::U32 round_result_low_word IR::U128 SM4Hash(IREmitter& ir, Vec Vn, Vec Vd, SM4RotationType type) { const IR::U128 n = ir.GetQ(Vn); IR::U128 roundresult = ir.GetQ(Vd); - + for (size_t i = 0; i < 4; i++) { const IR::U32 round_key = ir.VectorGetElement(32, n, i); diff --git a/src/frontend/A64/translate/impl/simd_three_different.cpp b/src/frontend/A64/translate/impl/simd_three_different.cpp index 433b813c..a6635d92 100644 --- a/src/frontend/A64/translate/impl/simd_three_different.cpp +++ b/src/frontend/A64/translate/impl/simd_three_different.cpp @@ -90,7 +90,7 @@ enum class LongOperationBehavior { Addition, Subtraction }; - + bool LongOperation(TranslatorVisitor& v, bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd, LongOperationBehavior behavior, Signedness sign) { if (size == 0b11) { diff --git a/src/frontend/A64/translate/impl/simd_two_register_misc.cpp b/src/frontend/A64/translate/impl/simd_two_register_misc.cpp index c31028eb..e8726c56 100644 --- a/src/frontend/A64/translate/impl/simd_two_register_misc.cpp +++ b/src/frontend/A64/translate/impl/simd_two_register_misc.cpp @@ -99,7 +99,7 @@ bool IntegerConvertToFloat(TranslatorVisitor& v, bool Q, bool sz, Vec Vn, Vec Vd const IR::U128 operand = v.V(datasize, Vn); const IR::U128 result = signedness == Signedness::Signed - ? v.ir.FPVectorFromSignedFixed(esize, operand, 0, rounding_mode) + ? v.ir.FPVectorFromSignedFixed(esize, operand, 0, rounding_mode) : v.ir.FPVectorFromUnsignedFixed(esize, operand, 0, rounding_mode); v.V(datasize, Vd, result); diff --git a/src/frontend/A64/translate/impl/simd_vector_x_indexed_element.cpp b/src/frontend/A64/translate/impl/simd_vector_x_indexed_element.cpp index a05ad7d5..7ccd0a9b 100644 --- a/src/frontend/A64/translate/impl/simd_vector_x_indexed_element.cpp +++ b/src/frontend/A64/translate/impl/simd_vector_x_indexed_element.cpp @@ -96,7 +96,7 @@ bool FPMultiplyByElementHalfPrecision(TranslatorVisitor& v, bool Q, Imm<1> L, Im const Vec Vm = Vmlo.ZeroExtend(); const size_t esize = 16; const size_t datasize = Q ? 128 : 64; - + const IR::UAny element2 = v.ir.VectorGetElement(esize, v.V(idxdsize, Vm), index); const IR::U128 operand1 = v.V(datasize, Vn); const IR::U128 operand2 = Q ? v.ir.VectorBroadcast(esize, element2) : v.ir.VectorBroadcastLower(esize, element2); diff --git a/src/frontend/A64/types.h b/src/frontend/A64/types.h index 17f111aa..8cf23929 100644 --- a/src/frontend/A64/types.h +++ b/src/frontend/A64/types.h @@ -20,7 +20,7 @@ using Cond = IR::Cond; enum class Reg { R0, R1, R2, R3, R4, R5, R6, R7, - R8, R9, R10, R11, R12, R13, R14, R15, + R8, R9, R10, R11, R12, R13, R14, R15, R16, R17, R18, R19, R20, R21, R22, R23, R24, R25, R26, R27, R28, R29, R30, R31, LR = R30, @@ -29,7 +29,7 @@ enum class Reg { enum class Vec { V0, V1, V2, V3, V4, V5, V6, V7, - V8, V9, V10, V11, V12, V13, V14, V15, + V8, V9, V10, V11, V12, V13, V14, V15, V16, V17, V18, V19, V20, V21, V22, V23, V24, V25, V26, V27, V28, V29, V30, V31, }; diff --git a/src/ir_opt/constant_propagation_pass.cpp b/src/ir_opt/constant_propagation_pass.cpp index 8dc94c0d..a86a3a16 100644 --- a/src/ir_opt/constant_propagation_pass.cpp +++ b/src/ir_opt/constant_propagation_pass.cpp @@ -126,7 +126,7 @@ void FoldLeastSignificantByte(IR::Inst& inst) { if (!inst.AreAllArgsImmediates()) { return; } - + const auto operand = inst.GetArg(0); inst.ReplaceUsesWith(IR::Value{static_cast(operand.GetImmediateAsU64())}); }