diff --git a/src/backend/x64/emit_x64_data_processing.cpp b/src/backend/x64/emit_x64_data_processing.cpp index f7cbd0ab..6d847670 100644 --- a/src/backend/x64/emit_x64_data_processing.cpp +++ b/src/backend/x64/emit_x64_data_processing.cpp @@ -38,7 +38,7 @@ void EmitX64::EmitPack2x64To1x128(EmitContext& ctx, IR::Inst* inst) { code.movq(result, lo); code.pinsrq(result, hi, 1); } else { - Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm(); + const Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm(); code.movq(result, lo); code.movq(tmp, hi); code.punpcklqdq(result, tmp); @@ -53,7 +53,7 @@ void EmitX64::EmitLeastSignificantWord(EmitContext& ctx, IR::Inst* inst) { } void EmitX64::EmitMostSignificantWord(EmitContext& ctx, IR::Inst* inst) { - auto carry_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetCarryFromOp); + const auto carry_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetCarryFromOp); auto args = ctx.reg_alloc.GetArgumentInfo(inst); const Xbyak::Reg64 result = ctx.reg_alloc.UseScratchGpr(args[0]); @@ -220,7 +220,7 @@ void EmitX64::EmitExtractRegister64(Dynarmic::BackendX64::EmitContext& ctx, IR:: } void EmitX64::EmitLogicalShiftLeft32(EmitContext& ctx, IR::Inst* inst) { - auto carry_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetCarryFromOp); + const auto carry_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetCarryFromOp); auto args = ctx.reg_alloc.GetArgumentInfo(inst); auto& operand_arg = args[0]; @@ -353,7 +353,7 @@ void EmitX64::EmitLogicalShiftLeft64(EmitContext& ctx, IR::Inst* inst) { } void EmitX64::EmitLogicalShiftRight32(EmitContext& ctx, IR::Inst* inst) { - auto carry_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetCarryFromOp); + const auto carry_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetCarryFromOp); auto args = ctx.reg_alloc.GetArgumentInfo(inst); auto& operand_arg = args[0]; @@ -485,7 +485,7 @@ void EmitX64::EmitLogicalShiftRight64(EmitContext& ctx, IR::Inst* inst) { } void EmitX64::EmitArithmeticShiftRight32(EmitContext& ctx, IR::Inst* inst) { - auto carry_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetCarryFromOp); + const auto carry_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetCarryFromOp); auto args = ctx.reg_alloc.GetArgumentInfo(inst); auto& operand_arg = args[0]; @@ -604,7 +604,7 @@ void EmitX64::EmitArithmeticShiftRight64(EmitContext& ctx, IR::Inst* inst) { } void EmitX64::EmitRotateRight32(EmitContext& ctx, IR::Inst* inst) { - auto carry_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetCarryFromOp); + const auto carry_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetCarryFromOp); auto args = ctx.reg_alloc.GetArgumentInfo(inst); auto& operand_arg = args[0]; @@ -706,7 +706,7 @@ void EmitX64::EmitRotateRight64(EmitContext& ctx, IR::Inst* inst) { } void EmitX64::EmitRotateRightExtended(EmitContext& ctx, IR::Inst* inst) { - auto carry_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetCarryFromOp); + const auto carry_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetCarryFromOp); auto args = ctx.reg_alloc.GetArgumentInfo(inst); const Xbyak::Reg32 result = ctx.reg_alloc.UseScratchGpr(args[0]).cvt32(); diff --git a/src/backend/x64/emit_x64_floating_point.cpp b/src/backend/x64/emit_x64_floating_point.cpp index 5dbe29e6..9496f9a3 100644 --- a/src/backend/x64/emit_x64_floating_point.cpp +++ b/src/backend/x64/emit_x64_floating_point.cpp @@ -1013,7 +1013,7 @@ void EmitX64::EmitFPSub64(EmitContext& ctx, IR::Inst* inst) { static Xbyak::Reg64 SetFpscrNzcvFromFlags(BlockOfCode& code, EmitContext& ctx) { ctx.reg_alloc.ScratchGpr({HostLoc::RCX}); // shifting requires use of cl - Xbyak::Reg64 nzcv = ctx.reg_alloc.ScratchGpr(); + const Xbyak::Reg64 nzcv = ctx.reg_alloc.ScratchGpr(); // x64 flags ARM flags // ZF PF CF NZCV @@ -1040,9 +1040,9 @@ static Xbyak::Reg64 SetFpscrNzcvFromFlags(BlockOfCode& code, EmitContext& ctx) { void EmitX64::EmitFPCompare32(EmitContext& ctx, IR::Inst* inst) { auto args = ctx.reg_alloc.GetArgumentInfo(inst); - Xbyak::Xmm reg_a = ctx.reg_alloc.UseXmm(args[0]); - Xbyak::Xmm reg_b = ctx.reg_alloc.UseXmm(args[1]); - bool exc_on_qnan = args[2].GetImmediateU1(); + const Xbyak::Xmm reg_a = ctx.reg_alloc.UseXmm(args[0]); + const Xbyak::Xmm reg_b = ctx.reg_alloc.UseXmm(args[1]); + const bool exc_on_qnan = args[2].GetImmediateU1(); if (exc_on_qnan) { code.comiss(reg_a, reg_b); @@ -1050,15 +1050,15 @@ void EmitX64::EmitFPCompare32(EmitContext& ctx, IR::Inst* inst) { code.ucomiss(reg_a, reg_b); } - Xbyak::Reg64 nzcv = SetFpscrNzcvFromFlags(code, ctx); + const Xbyak::Reg64 nzcv = SetFpscrNzcvFromFlags(code, ctx); ctx.reg_alloc.DefineValue(inst, nzcv); } void EmitX64::EmitFPCompare64(EmitContext& ctx, IR::Inst* inst) { auto args = ctx.reg_alloc.GetArgumentInfo(inst); - Xbyak::Xmm reg_a = ctx.reg_alloc.UseXmm(args[0]); - Xbyak::Xmm reg_b = ctx.reg_alloc.UseXmm(args[1]); - bool exc_on_qnan = args[2].GetImmediateU1(); + const Xbyak::Xmm reg_a = ctx.reg_alloc.UseXmm(args[0]); + const Xbyak::Xmm reg_b = ctx.reg_alloc.UseXmm(args[1]); + const bool exc_on_qnan = args[2].GetImmediateU1(); if (exc_on_qnan) { code.comisd(reg_a, reg_b); @@ -1066,7 +1066,7 @@ void EmitX64::EmitFPCompare64(EmitContext& ctx, IR::Inst* inst) { code.ucomisd(reg_a, reg_b); } - Xbyak::Reg64 nzcv = SetFpscrNzcvFromFlags(code, ctx); + const Xbyak::Reg64 nzcv = SetFpscrNzcvFromFlags(code, ctx); ctx.reg_alloc.DefineValue(inst, nzcv); } diff --git a/src/backend/x64/emit_x64_packed.cpp b/src/backend/x64/emit_x64_packed.cpp index e2928bf8..74ff1ef5 100644 --- a/src/backend/x64/emit_x64_packed.cpp +++ b/src/backend/x64/emit_x64_packed.cpp @@ -18,7 +18,7 @@ using namespace Xbyak::util; void EmitX64::EmitPackedAddU8(EmitContext& ctx, IR::Inst* inst) { auto args = ctx.reg_alloc.GetArgumentInfo(inst); - auto ge_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetGEFromOp); + const auto ge_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetGEFromOp); const Xbyak::Xmm xmm_a = ctx.reg_alloc.UseScratchXmm(args[0]); const Xbyak::Xmm xmm_b = ctx.reg_alloc.UseXmm(args[1]); @@ -45,7 +45,7 @@ void EmitX64::EmitPackedAddU8(EmitContext& ctx, IR::Inst* inst) { void EmitX64::EmitPackedAddS8(EmitContext& ctx, IR::Inst* inst) { auto args = ctx.reg_alloc.GetArgumentInfo(inst); - auto ge_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetGEFromOp); + const auto ge_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetGEFromOp); const Xbyak::Xmm xmm_a = ctx.reg_alloc.UseScratchXmm(args[0]); const Xbyak::Xmm xmm_b = ctx.reg_alloc.UseXmm(args[1]); @@ -72,7 +72,7 @@ void EmitX64::EmitPackedAddS8(EmitContext& ctx, IR::Inst* inst) { void EmitX64::EmitPackedAddU16(EmitContext& ctx, IR::Inst* inst) { auto args = ctx.reg_alloc.GetArgumentInfo(inst); - auto ge_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetGEFromOp); + const auto ge_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetGEFromOp); const Xbyak::Xmm xmm_a = ctx.reg_alloc.UseScratchXmm(args[0]); const Xbyak::Xmm xmm_b = ctx.reg_alloc.UseXmm(args[1]); @@ -114,7 +114,7 @@ void EmitX64::EmitPackedAddU16(EmitContext& ctx, IR::Inst* inst) { void EmitX64::EmitPackedAddS16(EmitContext& ctx, IR::Inst* inst) { auto args = ctx.reg_alloc.GetArgumentInfo(inst); - auto ge_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetGEFromOp); + const auto ge_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetGEFromOp); const Xbyak::Xmm xmm_a = ctx.reg_alloc.UseScratchXmm(args[0]); const Xbyak::Xmm xmm_b = ctx.reg_alloc.UseXmm(args[1]); @@ -141,7 +141,7 @@ void EmitX64::EmitPackedAddS16(EmitContext& ctx, IR::Inst* inst) { void EmitX64::EmitPackedSubU8(EmitContext& ctx, IR::Inst* inst) { auto args = ctx.reg_alloc.GetArgumentInfo(inst); - auto ge_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetGEFromOp); + const auto ge_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetGEFromOp); const Xbyak::Xmm xmm_a = ctx.reg_alloc.UseScratchXmm(args[0]); const Xbyak::Xmm xmm_b = ctx.reg_alloc.UseXmm(args[1]); @@ -164,7 +164,7 @@ void EmitX64::EmitPackedSubU8(EmitContext& ctx, IR::Inst* inst) { void EmitX64::EmitPackedSubS8(EmitContext& ctx, IR::Inst* inst) { auto args = ctx.reg_alloc.GetArgumentInfo(inst); - auto ge_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetGEFromOp); + const auto ge_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetGEFromOp); const Xbyak::Xmm xmm_a = ctx.reg_alloc.UseScratchXmm(args[0]); const Xbyak::Xmm xmm_b = ctx.reg_alloc.UseXmm(args[1]); @@ -191,7 +191,7 @@ void EmitX64::EmitPackedSubS8(EmitContext& ctx, IR::Inst* inst) { void EmitX64::EmitPackedSubU16(EmitContext& ctx, IR::Inst* inst) { auto args = ctx.reg_alloc.GetArgumentInfo(inst); - auto ge_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetGEFromOp); + const auto ge_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetGEFromOp); if (!ge_inst) { const Xbyak::Xmm xmm_a = ctx.reg_alloc.UseScratchXmm(args[0]); @@ -242,7 +242,7 @@ void EmitX64::EmitPackedSubU16(EmitContext& ctx, IR::Inst* inst) { void EmitX64::EmitPackedSubS16(EmitContext& ctx, IR::Inst* inst) { auto args = ctx.reg_alloc.GetArgumentInfo(inst); - auto ge_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetGEFromOp); + const auto ge_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetGEFromOp); const Xbyak::Xmm xmm_a = ctx.reg_alloc.UseScratchXmm(args[0]); const Xbyak::Xmm xmm_b = ctx.reg_alloc.UseXmm(args[1]); @@ -512,7 +512,7 @@ void EmitX64::EmitPackedHalvingSubS16(EmitContext& ctx, IR::Inst* inst) { void EmitPackedSubAdd(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst, bool hi_is_sum, bool is_signed, bool is_halving) { auto args = ctx.reg_alloc.GetArgumentInfo(inst); - auto ge_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetGEFromOp); + const auto ge_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetGEFromOp); const Xbyak::Reg32 reg_a_hi = ctx.reg_alloc.UseScratchGpr(args[0]).cvt32(); const Xbyak::Reg32 reg_b_hi = ctx.reg_alloc.UseScratchGpr(args[1]).cvt32(); diff --git a/src/backend/x64/emit_x64_saturation.cpp b/src/backend/x64/emit_x64_saturation.cpp index 8d917bc1..1293da99 100644 --- a/src/backend/x64/emit_x64_saturation.cpp +++ b/src/backend/x64/emit_x64_saturation.cpp @@ -30,7 +30,7 @@ enum class Op { template void EmitSignedSaturatedOp(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) { - auto overflow_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetOverflowFromOp); + const auto overflow_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetOverflowFromOp); auto args = ctx.reg_alloc.GetArgumentInfo(inst); @@ -79,7 +79,7 @@ void EmitSignedSaturatedOp(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) template void EmitUnsignedSaturatedOp(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) { - auto overflow_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetOverflowFromOp); + const auto overflow_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetOverflowFromOp); auto args = ctx.reg_alloc.GetArgumentInfo(inst); @@ -135,7 +135,7 @@ void EmitX64::EmitSignedSaturatedAdd64(EmitContext& ctx, IR::Inst* inst) { } void EmitX64::EmitSignedSaturatedDoublingMultiplyReturnHigh16(EmitContext& ctx, IR::Inst* inst) { - auto overflow_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetOverflowFromOp); + const auto overflow_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetOverflowFromOp); auto args = ctx.reg_alloc.GetArgumentInfo(inst); @@ -165,7 +165,7 @@ void EmitX64::EmitSignedSaturatedDoublingMultiplyReturnHigh16(EmitContext& ctx, } void EmitX64::EmitSignedSaturatedDoublingMultiplyReturnHigh32(EmitContext& ctx, IR::Inst* inst) { - auto overflow_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetOverflowFromOp); + const auto overflow_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetOverflowFromOp); auto args = ctx.reg_alloc.GetArgumentInfo(inst); @@ -211,7 +211,7 @@ void EmitX64::EmitSignedSaturatedSub64(EmitContext& ctx, IR::Inst* inst) { } void EmitX64::EmitSignedSaturation(EmitContext& ctx, IR::Inst* inst) { - auto overflow_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetOverflowFromOp); + const auto overflow_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetOverflowFromOp); auto args = ctx.reg_alloc.GetArgumentInfo(inst); const size_t N = args[1].GetImmediateU8(); @@ -292,7 +292,7 @@ void EmitX64::EmitUnsignedSaturatedSub64(EmitContext& ctx, IR::Inst* inst) { } void EmitX64::EmitUnsignedSaturation(EmitContext& ctx, IR::Inst* inst) { - auto overflow_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetOverflowFromOp); + const auto overflow_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetOverflowFromOp); auto args = ctx.reg_alloc.GetArgumentInfo(inst); const size_t N = args[1].GetImmediateU8(); diff --git a/src/backend/x64/emit_x64_vector.cpp b/src/backend/x64/emit_x64_vector.cpp index d9eac183..c52635ed 100644 --- a/src/backend/x64/emit_x64_vector.cpp +++ b/src/backend/x64/emit_x64_vector.cpp @@ -467,7 +467,7 @@ void EmitX64::EmitVectorArithmeticShiftRight8(EmitContext& ctx, IR::Inst* inst) void EmitX64::EmitVectorArithmeticShiftRight16(EmitContext& ctx, IR::Inst* inst) { auto args = ctx.reg_alloc.GetArgumentInfo(inst); - Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]); + const Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]); const u8 shift_amount = args[1].GetImmediateU8(); code.psraw(result, shift_amount); @@ -478,7 +478,7 @@ void EmitX64::EmitVectorArithmeticShiftRight16(EmitContext& ctx, IR::Inst* inst) void EmitX64::EmitVectorArithmeticShiftRight32(EmitContext& ctx, IR::Inst* inst) { auto args = ctx.reg_alloc.GetArgumentInfo(inst); - Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]); + const Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]); const u8 shift_amount = args[1].GetImmediateU8(); code.psrad(result, shift_amount); @@ -1001,9 +1001,9 @@ void EmitX64::EmitVectorEqual64(EmitContext& ctx, IR::Inst* inst) { auto args = ctx.reg_alloc.GetArgumentInfo(inst); - Xbyak::Xmm xmm_a = ctx.reg_alloc.UseScratchXmm(args[0]); - Xbyak::Xmm xmm_b = ctx.reg_alloc.UseXmm(args[1]); - Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm(); + const Xbyak::Xmm xmm_a = ctx.reg_alloc.UseScratchXmm(args[0]); + const Xbyak::Xmm xmm_b = ctx.reg_alloc.UseXmm(args[1]); + const Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm(); code.pcmpeqd(xmm_a, xmm_b); code.pshufd(tmp, xmm_a, 0b10110001);