general: Remove trailing spaces

General code-related cleanup. Gets rid of trailing spaces in the
codebase.
This commit is contained in:
Lioncash 2019-05-04 21:03:30 -04:00 committed by MerryMage
parent fdbafbc1ae
commit 87083af733
35 changed files with 42 additions and 42 deletions

View file

@ -37,7 +37,7 @@ static RunCodeCallbacks GenRunCodeCallbacks(A64::UserCallbacks* cb, CodePtr (*Lo
struct Jit::Impl final { struct Jit::Impl final {
public: public:
Impl(Jit* jit, UserConfig conf) Impl(Jit* jit, UserConfig conf)
: conf(conf) : conf(conf)
, block_of_code(GenRunCodeCallbacks(conf.callbacks, &GetCurrentBlockThunk, this), JitStateInfo{jit_state}) , block_of_code(GenRunCodeCallbacks(conf.callbacks, &GetCurrentBlockThunk, this), JitStateInfo{jit_state})
, emitter(block_of_code, conf, jit) , emitter(block_of_code, conf, jit)
{ {

View file

@ -3399,7 +3399,7 @@ void EmitX64::EmitVectorSignedSaturatedDoublingMultiply32(EmitContext& ctx, IR::
if (upper_inst) { if (upper_inst) {
const Xbyak::Xmm upper_result = ctx.reg_alloc.ScratchXmm(); const Xbyak::Xmm upper_result = ctx.reg_alloc.ScratchXmm();
code.vpsrlq(upper_result, odds, 32); code.vpsrlq(upper_result, odds, 32);
code.vblendps(upper_result, upper_result, even, 0b1010); code.vblendps(upper_result, upper_result, even, 0b1010);
@ -3420,14 +3420,14 @@ void EmitX64::EmitVectorSignedSaturatedDoublingMultiply32(EmitContext& ctx, IR::
if (lower_inst) { if (lower_inst) {
const Xbyak::Xmm lower_result = ctx.reg_alloc.ScratchXmm(); const Xbyak::Xmm lower_result = ctx.reg_alloc.ScratchXmm();
code.vpsllq(lower_result, even, 32); code.vpsllq(lower_result, even, 32);
code.vblendps(lower_result, lower_result, odds, 0b0101); code.vblendps(lower_result, lower_result, odds, 0b0101);
ctx.reg_alloc.DefineValue(lower_inst, lower_result); ctx.reg_alloc.DefineValue(lower_inst, lower_result);
ctx.EraseInstruction(lower_inst); ctx.EraseInstruction(lower_inst);
} }
return; return;
} }

View file

@ -32,7 +32,7 @@ inline Dest BitCastPointee(const SourcePtr source) {
std::aligned_storage_t<sizeof(Dest), alignof(Dest)> dest; std::aligned_storage_t<sizeof(Dest), alignof(Dest)> dest;
std::memcpy(&dest, BitCast<void*>(source), sizeof(dest)); std::memcpy(&dest, BitCast<void*>(source), sizeof(dest));
return reinterpret_cast<Dest&>(dest); return reinterpret_cast<Dest&>(dest);
} }
} // namespace Dynarmic::Common } // namespace Dynarmic::Common

View file

@ -114,4 +114,4 @@ constexpr FPT FPValue() {
return FPT(FPInfo<FPT>::Zero(sign) | mantissa | (biased_exponent << FPInfo<FPT>::explicit_mantissa_width)); return FPT(FPInfo<FPT>::Zero(sign) | mantissa | (biased_exponent << FPInfo<FPT>::explicit_mantissa_width));
} }
} // namespace Dynarmic::FP } // namespace Dynarmic::FP

View file

@ -44,4 +44,4 @@ inline ResidualError ResidualErrorOnRightShift(u64 mantissa, int shift_amount) {
return ResidualError::GreaterThanHalf; return ResidualError::GreaterThanHalf;
} }
} // namespace Dynarmic::FP } // namespace Dynarmic::FP

View file

@ -77,4 +77,4 @@ template u16 FPMulAdd<u16>(u16 addend, u16 op1, u16 op2, FPCR fpcr, FPSR& fpsr);
template u32 FPMulAdd<u32>(u32 addend, u32 op1, u32 op2, FPCR fpcr, FPSR& fpsr); template u32 FPMulAdd<u32>(u32 addend, u32 op1, u32 op2, FPCR fpcr, FPSR& fpsr);
template u64 FPMulAdd<u64>(u64 addend, u64 op1, u64 op2, FPCR fpcr, FPSR& fpsr); template u64 FPMulAdd<u64>(u64 addend, u64 op1, u64 op2, FPCR fpcr, FPSR& fpsr);
} // namespace Dynarmic::FP } // namespace Dynarmic::FP

View file

@ -14,4 +14,4 @@ class FPSR;
template<typename FPT> template<typename FPT>
FPT FPMulAdd(FPT addend, FPT op1, FPT op2, FPCR fpcr, FPSR& fpsr); FPT FPMulAdd(FPT addend, FPT op1, FPT op2, FPCR fpcr, FPSR& fpsr);
} // namespace Dynarmic::FP } // namespace Dynarmic::FP

View file

@ -15,4 +15,4 @@ inline FPT FPNeg(FPT op) {
return op ^ FPInfo<FPT>::sign_mask; return op ^ FPInfo<FPT>::sign_mask;
} }
} // namespace Dynarmic::FP } // namespace Dynarmic::FP

View file

@ -54,4 +54,4 @@ template u16 FPRSqrtEstimate<u16>(u16 op, FPCR fpcr, FPSR& fpsr);
template u32 FPRSqrtEstimate<u32>(u32 op, FPCR fpcr, FPSR& fpsr); template u32 FPRSqrtEstimate<u32>(u32 op, FPCR fpcr, FPSR& fpsr);
template u64 FPRSqrtEstimate<u64>(u64 op, FPCR fpcr, FPSR& fpsr); template u64 FPRSqrtEstimate<u64>(u64 op, FPCR fpcr, FPSR& fpsr);
} // namespace Dynarmic::FP } // namespace Dynarmic::FP

View file

@ -14,4 +14,4 @@ class FPSR;
template<typename FPT> template<typename FPT>
FPT FPRSqrtEstimate(FPT op, FPCR fpcr, FPSR& fpsr); FPT FPRSqrtEstimate(FPT op, FPCR fpcr, FPSR& fpsr);
} // namespace Dynarmic::FP } // namespace Dynarmic::FP

View file

@ -54,4 +54,4 @@ template u16 FPRSqrtStepFused<u16>(u16 op1, u16 op2, FPCR fpcr, FPSR& fpsr);
template u32 FPRSqrtStepFused<u32>(u32 op1, u32 op2, FPCR fpcr, FPSR& fpsr); template u32 FPRSqrtStepFused<u32>(u32 op1, u32 op2, FPCR fpcr, FPSR& fpsr);
template u64 FPRSqrtStepFused<u64>(u64 op1, u64 op2, FPCR fpcr, FPSR& fpsr); template u64 FPRSqrtStepFused<u64>(u64 op1, u64 op2, FPCR fpcr, FPSR& fpsr);
} // namespace Dynarmic::FP } // namespace Dynarmic::FP

View file

@ -14,4 +14,4 @@ class FPSR;
template<typename FPT> template<typename FPT>
FPT FPRSqrtStepFused(FPT op1, FPT op2, FPCR fpcr, FPSR& fpsr); FPT FPRSqrtStepFused(FPT op1, FPT op2, FPCR fpcr, FPSR& fpsr);
} // namespace Dynarmic::FP } // namespace Dynarmic::FP

View file

@ -97,4 +97,4 @@ template u16 FPRecipEstimate<u16>(u16 op, FPCR fpcr, FPSR& fpsr);
template u32 FPRecipEstimate<u32>(u32 op, FPCR fpcr, FPSR& fpsr); template u32 FPRecipEstimate<u32>(u32 op, FPCR fpcr, FPSR& fpsr);
template u64 FPRecipEstimate<u64>(u64 op, FPCR fpcr, FPSR& fpsr); template u64 FPRecipEstimate<u64>(u64 op, FPCR fpcr, FPSR& fpsr);
} // namespace Dynarmic::FP } // namespace Dynarmic::FP

View file

@ -14,4 +14,4 @@ class FPSR;
template<typename FPT> template<typename FPT>
FPT FPRecipEstimate(FPT op, FPCR fpcr, FPSR& fpsr); FPT FPRecipEstimate(FPT op, FPCR fpcr, FPSR& fpsr);
} // namespace Dynarmic::FP } // namespace Dynarmic::FP

View file

@ -53,4 +53,4 @@ template u16 FPRecipStepFused<u16>(u16 op1, u16 op2, FPCR fpcr, FPSR& fpsr);
template u32 FPRecipStepFused<u32>(u32 op1, u32 op2, FPCR fpcr, FPSR& fpsr); template u32 FPRecipStepFused<u32>(u32 op1, u32 op2, FPCR fpcr, FPSR& fpsr);
template u64 FPRecipStepFused<u64>(u64 op1, u64 op2, FPCR fpcr, FPSR& fpsr); template u64 FPRecipStepFused<u64>(u64 op1, u64 op2, FPCR fpcr, FPSR& fpsr);
} // namespace Dynarmic::FP } // namespace Dynarmic::FP

View file

@ -14,4 +14,4 @@ class FPSR;
template<typename FPT> template<typename FPT>
FPT FPRecipStepFused(FPT op1, FPT op2, FPCR fpcr, FPSR& fpsr); FPT FPRecipStepFused(FPT op1, FPT op2, FPCR fpcr, FPSR& fpsr);
} // namespace Dynarmic::FP } // namespace Dynarmic::FP

View file

@ -33,7 +33,7 @@ u64 FPRoundInt(FPT op, FPCR fpcr, RoundingMode rounding, bool exact, FPSR& fpsr)
if (type == FPType::Infinity) { if (type == FPType::Infinity) {
return FPInfo<FPT>::Infinity(sign); return FPInfo<FPT>::Infinity(sign);
} }
if (type == FPType::Zero) { if (type == FPType::Zero) {
return FPInfo<FPT>::Zero(sign); return FPInfo<FPT>::Zero(sign);
} }
@ -93,4 +93,4 @@ template u64 FPRoundInt<u16>(u16 op, FPCR fpcr, RoundingMode rounding, bool exac
template u64 FPRoundInt<u32>(u32 op, FPCR fpcr, RoundingMode rounding, bool exact, FPSR& fpsr); template u64 FPRoundInt<u32>(u32 op, FPCR fpcr, RoundingMode rounding, bool exact, FPSR& fpsr);
template u64 FPRoundInt<u64>(u64 op, FPCR fpcr, RoundingMode rounding, bool exact, FPSR& fpsr); template u64 FPRoundInt<u64>(u64 op, FPCR fpcr, RoundingMode rounding, bool exact, FPSR& fpsr);
} // namespace Dynarmic::FP } // namespace Dynarmic::FP

View file

@ -17,4 +17,4 @@ enum class RoundingMode;
template<typename FPT> template<typename FPT>
u64 FPRoundInt(FPT op, FPCR fpcr, RoundingMode rounding, bool exact, FPSR& fpsr); u64 FPRoundInt(FPT op, FPCR fpcr, RoundingMode rounding, bool exact, FPSR& fpsr);
} // namespace Dynarmic::FP } // namespace Dynarmic::FP

View file

@ -99,4 +99,4 @@ template u64 FPToFixed<u16>(size_t ibits, u16 op, size_t fbits, bool unsigned_,
template u64 FPToFixed<u32>(size_t ibits, u32 op, size_t fbits, bool unsigned_, FPCR fpcr, RoundingMode rounding, FPSR& fpsr); template u64 FPToFixed<u32>(size_t ibits, u32 op, size_t fbits, bool unsigned_, FPCR fpcr, RoundingMode rounding, FPSR& fpsr);
template u64 FPToFixed<u64>(size_t ibits, u64 op, size_t fbits, bool unsigned_, FPCR fpcr, RoundingMode rounding, FPSR& fpsr); template u64 FPToFixed<u64>(size_t ibits, u64 op, size_t fbits, bool unsigned_, FPCR fpcr, RoundingMode rounding, FPSR& fpsr);
} // namespace Dynarmic::FP } // namespace Dynarmic::FP

View file

@ -17,4 +17,4 @@ enum class RoundingMode;
template<typename FPT> template<typename FPT>
u64 FPToFixed(size_t ibits, FPT op, size_t fbits, bool unsigned_, FPCR fpcr, RoundingMode rounding, FPSR& fpsr); u64 FPToFixed(size_t ibits, FPT op, size_t fbits, bool unsigned_, FPCR fpcr, RoundingMode rounding, FPSR& fpsr);
} // namespace Dynarmic::FP } // namespace Dynarmic::FP

View file

@ -55,4 +55,4 @@ void FPProcessException(FPExc exception, FPCR fpcr, FPSR& fpsr) {
} }
} }
} // namespace Dynarmic::FP } // namespace Dynarmic::FP

View file

@ -22,4 +22,4 @@ enum class FPExc {
void FPProcessException(FPExc exception, FPCR fpcr, FPSR& fpsr); void FPProcessException(FPExc exception, FPCR fpcr, FPSR& fpsr);
} // namespace Dynarmic::FP } // namespace Dynarmic::FP

View file

@ -89,4 +89,4 @@ template std::optional<u16> FPProcessNaNs3<u16>(FPType type1, FPType type2, FPTy
template std::optional<u32> FPProcessNaNs3<u32>(FPType type1, FPType type2, FPType type3, u32 op1, u32 op2, u32 op3, FPCR fpcr, FPSR& fpsr); template std::optional<u32> FPProcessNaNs3<u32>(FPType type1, FPType type2, FPType type3, u32 op1, u32 op2, u32 op3, FPCR fpcr, FPSR& fpsr);
template std::optional<u64> FPProcessNaNs3<u64>(FPType type1, FPType type2, FPType type3, u64 op1, u64 op2, u64 op3, FPCR fpcr, FPSR& fpsr); template std::optional<u64> FPProcessNaNs3<u64>(FPType type1, FPType type2, FPType type3, u64 op1, u64 op2, u64 op3, FPCR fpcr, FPSR& fpsr);
} // namespace Dynarmic::FP } // namespace Dynarmic::FP

View file

@ -23,4 +23,4 @@ std::optional<FPT> FPProcessNaNs(FPType type1, FPType type2, FPT op1, FPT op2, F
template<typename FPT> template<typename FPT>
std::optional<FPT> FPProcessNaNs3(FPType type1, FPType type2, FPType type3, FPT op1, FPT op2, FPT op3, FPCR fpcr, FPSR& fpsr); std::optional<FPT> FPProcessNaNs3(FPType type1, FPType type2, FPType type3, FPT op1, FPT op2, FPT op3, FPCR fpcr, FPSR& fpsr);
} // namespace Dynarmic::FP } // namespace Dynarmic::FP

View file

@ -43,7 +43,7 @@ struct cartesian_product_impl<RL, L1, L2, Ls...> {
} // namespace detail } // namespace detail
/// Produces the cartesian product of a set of lists /// Produces the cartesian product of a set of lists
/// For example: /// For example:
/// cartesian_product<list<A, B>, list<D, E>> == list<list<A, D>, list<A, E>, list<B, D>, list<B, E> /// cartesian_product<list<A, B>, list<D, E>> == list<list<A, D>, list<A, E>, list<B, D>, list<B, E>
template<typename L1, typename... Ls> template<typename L1, typename... Ls>
using cartesian_product = typename detail::cartesian_product_impl<fmap<list, L1>, Ls...>::type; using cartesian_product = typename detail::cartesian_product_impl<fmap<list, L1>, Ls...>::type;

View file

@ -765,7 +765,7 @@ bool ArmTranslatorVisitor::vfp_VSTM_a1(Cond cond, bool p, bool u, bool D, bool w
if (!p && !u && !w) { if (!p && !u && !w) {
ASSERT_MSG(false, "Decode error"); ASSERT_MSG(false, "Decode error");
} }
if (p && !w) { if (p && !w) {
ASSERT_MSG(false, "Decode error"); ASSERT_MSG(false, "Decode error");
} }

View file

@ -34,17 +34,17 @@ bool TranslatorVisitor::RBIT_int(bool sf, Reg Rn, Reg Rd) {
const IR::U32 first_lsl = ir.LogicalShiftLeft(ir.And(operand, ir.Imm32(0x55555555)), ir.Imm8(1)); const IR::U32 first_lsl = ir.LogicalShiftLeft(ir.And(operand, ir.Imm32(0x55555555)), ir.Imm8(1));
const IR::U32 first_lsr = ir.And(ir.LogicalShiftRight(operand, ir.Imm8(1)), ir.Imm32(0x55555555)); const IR::U32 first_lsr = ir.And(ir.LogicalShiftRight(operand, ir.Imm8(1)), ir.Imm32(0x55555555));
const IR::U32 first = ir.Or(first_lsl, first_lsr); const IR::U32 first = ir.Or(first_lsl, first_lsr);
// x = (x & 0x33333333) << 2 | ((x >> 2) & 0x33333333); // x = (x & 0x33333333) << 2 | ((x >> 2) & 0x33333333);
const IR::U32 second_lsl = ir.LogicalShiftLeft(ir.And(first, ir.Imm32(0x33333333)), ir.Imm8(2)); const IR::U32 second_lsl = ir.LogicalShiftLeft(ir.And(first, ir.Imm32(0x33333333)), ir.Imm8(2));
const IR::U32 second_lsr = ir.And(ir.LogicalShiftRight(first, ir.Imm8(2)), ir.Imm32(0x33333333)); const IR::U32 second_lsr = ir.And(ir.LogicalShiftRight(first, ir.Imm8(2)), ir.Imm32(0x33333333));
const IR::U32 second = ir.Or(second_lsl, second_lsr); const IR::U32 second = ir.Or(second_lsl, second_lsr);
// x = (x & 0x0F0F0F0F) << 4 | ((x >> 4) & 0x0F0F0F0F); // x = (x & 0x0F0F0F0F) << 4 | ((x >> 4) & 0x0F0F0F0F);
const IR::U32 third_lsl = ir.LogicalShiftLeft(ir.And(second, ir.Imm32(0x0F0F0F0F)), ir.Imm8(4)); const IR::U32 third_lsl = ir.LogicalShiftLeft(ir.And(second, ir.Imm32(0x0F0F0F0F)), ir.Imm8(4));
const IR::U32 third_lsr = ir.And(ir.LogicalShiftRight(second, ir.Imm8(4)), ir.Imm32(0x0F0F0F0F)); const IR::U32 third_lsr = ir.And(ir.LogicalShiftRight(second, ir.Imm8(4)), ir.Imm32(0x0F0F0F0F));
const IR::U32 third = ir.Or(third_lsl, third_lsr); const IR::U32 third = ir.Or(third_lsl, third_lsr);
// x = (x << 24) | ((x & 0xFF00) << 8) | ((x >> 8) & 0xFF00) | (x >> 24); // x = (x << 24) | ((x & 0xFF00) << 8) | ((x >> 8) & 0xFF00) | (x >> 24);
const IR::U32 fourth_lsl = ir.Or(ir.LogicalShiftLeft(third, ir.Imm8(24)), const IR::U32 fourth_lsl = ir.Or(ir.LogicalShiftLeft(third, ir.Imm8(24)),
ir.LogicalShiftLeft(ir.And(third, ir.Imm32(0xFF00)), ir.Imm8(8))); ir.LogicalShiftLeft(ir.And(third, ir.Imm32(0xFF00)), ir.Imm8(8)));

View file

@ -67,7 +67,7 @@ static bool RegSharedDecodeAndOperation(TranslatorVisitor& v, size_t scale, u8 s
case MemOp::PREFETCH: case MemOp::PREFETCH:
// TODO: Prefetch // TODO: Prefetch
break; break;
default: default:
UNREACHABLE(); UNREACHABLE();
} }

View file

@ -89,7 +89,7 @@ bool FPMinMax(TranslatorVisitor& v, bool Q, bool sz, Vec Vn, Vec Vd, MinMaxOpera
for (size_t i = start + 1; i < end; i++) { for (size_t i = start + 1; i < end; i++) {
const IR::U32U64 element = v.ir.VectorGetElement(esize, operand, i); const IR::U32U64 element = v.ir.VectorGetElement(esize, operand, i);
result = op(result, element); result = op(result, element);
} }
@ -128,7 +128,7 @@ bool ScalarMinMax(TranslatorVisitor& v, bool Q, Imm<2> size, Vec Vn, Vec Vd,
return v.ir.ZeroExtendToWord(vec_element); return v.ir.ZeroExtendToWord(vec_element);
}; };
const auto op_func = [&](const auto& a, const auto& b) { const auto op_func = [&](const auto& a, const auto& b) {
switch (operation) { switch (operation) {
case ScalarMinMaxOperation::Max: case ScalarMinMaxOperation::Max:

View file

@ -124,7 +124,7 @@ IR::U32 SM4Rotation(IREmitter& ir, IR::U32 intval, IR::U32 round_result_low_word
IR::U128 SM4Hash(IREmitter& ir, Vec Vn, Vec Vd, SM4RotationType type) { IR::U128 SM4Hash(IREmitter& ir, Vec Vn, Vec Vd, SM4RotationType type) {
const IR::U128 n = ir.GetQ(Vn); const IR::U128 n = ir.GetQ(Vn);
IR::U128 roundresult = ir.GetQ(Vd); IR::U128 roundresult = ir.GetQ(Vd);
for (size_t i = 0; i < 4; i++) { for (size_t i = 0; i < 4; i++) {
const IR::U32 round_key = ir.VectorGetElement(32, n, i); const IR::U32 round_key = ir.VectorGetElement(32, n, i);

View file

@ -90,7 +90,7 @@ enum class LongOperationBehavior {
Addition, Addition,
Subtraction Subtraction
}; };
bool LongOperation(TranslatorVisitor& v, bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd, bool LongOperation(TranslatorVisitor& v, bool Q, Imm<2> size, Vec Vm, Vec Vn, Vec Vd,
LongOperationBehavior behavior, Signedness sign) { LongOperationBehavior behavior, Signedness sign) {
if (size == 0b11) { if (size == 0b11) {

View file

@ -99,7 +99,7 @@ bool IntegerConvertToFloat(TranslatorVisitor& v, bool Q, bool sz, Vec Vn, Vec Vd
const IR::U128 operand = v.V(datasize, Vn); const IR::U128 operand = v.V(datasize, Vn);
const IR::U128 result = signedness == Signedness::Signed const IR::U128 result = signedness == Signedness::Signed
? v.ir.FPVectorFromSignedFixed(esize, operand, 0, rounding_mode) ? v.ir.FPVectorFromSignedFixed(esize, operand, 0, rounding_mode)
: v.ir.FPVectorFromUnsignedFixed(esize, operand, 0, rounding_mode); : v.ir.FPVectorFromUnsignedFixed(esize, operand, 0, rounding_mode);
v.V(datasize, Vd, result); v.V(datasize, Vd, result);

View file

@ -96,7 +96,7 @@ bool FPMultiplyByElementHalfPrecision(TranslatorVisitor& v, bool Q, Imm<1> L, Im
const Vec Vm = Vmlo.ZeroExtend<Vec>(); const Vec Vm = Vmlo.ZeroExtend<Vec>();
const size_t esize = 16; const size_t esize = 16;
const size_t datasize = Q ? 128 : 64; const size_t datasize = Q ? 128 : 64;
const IR::UAny element2 = v.ir.VectorGetElement(esize, v.V(idxdsize, Vm), index); const IR::UAny element2 = v.ir.VectorGetElement(esize, v.V(idxdsize, Vm), index);
const IR::U128 operand1 = v.V(datasize, Vn); const IR::U128 operand1 = v.V(datasize, Vn);
const IR::U128 operand2 = Q ? v.ir.VectorBroadcast(esize, element2) : v.ir.VectorBroadcastLower(esize, element2); const IR::U128 operand2 = Q ? v.ir.VectorBroadcast(esize, element2) : v.ir.VectorBroadcastLower(esize, element2);

View file

@ -20,7 +20,7 @@ using Cond = IR::Cond;
enum class Reg { enum class Reg {
R0, R1, R2, R3, R4, R5, R6, R7, R0, R1, R2, R3, R4, R5, R6, R7,
R8, R9, R10, R11, R12, R13, R14, R15, R8, R9, R10, R11, R12, R13, R14, R15,
R16, R17, R18, R19, R20, R21, R22, R23, R16, R17, R18, R19, R20, R21, R22, R23,
R24, R25, R26, R27, R28, R29, R30, R31, R24, R25, R26, R27, R28, R29, R30, R31,
LR = R30, LR = R30,
@ -29,7 +29,7 @@ enum class Reg {
enum class Vec { enum class Vec {
V0, V1, V2, V3, V4, V5, V6, V7, V0, V1, V2, V3, V4, V5, V6, V7,
V8, V9, V10, V11, V12, V13, V14, V15, V8, V9, V10, V11, V12, V13, V14, V15,
V16, V17, V18, V19, V20, V21, V22, V23, V16, V17, V18, V19, V20, V21, V22, V23,
V24, V25, V26, V27, V28, V29, V30, V31, V24, V25, V26, V27, V28, V29, V30, V31,
}; };

View file

@ -126,7 +126,7 @@ void FoldLeastSignificantByte(IR::Inst& inst) {
if (!inst.AreAllArgsImmediates()) { if (!inst.AreAllArgsImmediates()) {
return; return;
} }
const auto operand = inst.GetArg(0); const auto operand = inst.GetArg(0);
inst.ReplaceUsesWith(IR::Value{static_cast<u8>(operand.GetImmediateAsU64())}); inst.ReplaceUsesWith(IR::Value{static_cast<u8>(operand.GetImmediateAsU64())});
} }