backend/x64/emit_*: Apply const where applicable
This commit is contained in:
parent
dd315e89eb
commit
cba9351b82
5 changed files with 36 additions and 36 deletions
|
@ -38,7 +38,7 @@ void EmitX64::EmitPack2x64To1x128(EmitContext& ctx, IR::Inst* inst) {
|
||||||
code.movq(result, lo);
|
code.movq(result, lo);
|
||||||
code.pinsrq(result, hi, 1);
|
code.pinsrq(result, hi, 1);
|
||||||
} else {
|
} else {
|
||||||
Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm();
|
const Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm();
|
||||||
code.movq(result, lo);
|
code.movq(result, lo);
|
||||||
code.movq(tmp, hi);
|
code.movq(tmp, hi);
|
||||||
code.punpcklqdq(result, tmp);
|
code.punpcklqdq(result, tmp);
|
||||||
|
@ -53,7 +53,7 @@ void EmitX64::EmitLeastSignificantWord(EmitContext& ctx, IR::Inst* inst) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void EmitX64::EmitMostSignificantWord(EmitContext& ctx, IR::Inst* inst) {
|
void EmitX64::EmitMostSignificantWord(EmitContext& ctx, IR::Inst* inst) {
|
||||||
auto carry_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetCarryFromOp);
|
const auto carry_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetCarryFromOp);
|
||||||
|
|
||||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||||
const Xbyak::Reg64 result = ctx.reg_alloc.UseScratchGpr(args[0]);
|
const Xbyak::Reg64 result = ctx.reg_alloc.UseScratchGpr(args[0]);
|
||||||
|
@ -220,7 +220,7 @@ void EmitX64::EmitExtractRegister64(Dynarmic::BackendX64::EmitContext& ctx, IR::
|
||||||
}
|
}
|
||||||
|
|
||||||
void EmitX64::EmitLogicalShiftLeft32(EmitContext& ctx, IR::Inst* inst) {
|
void EmitX64::EmitLogicalShiftLeft32(EmitContext& ctx, IR::Inst* inst) {
|
||||||
auto carry_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetCarryFromOp);
|
const auto carry_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetCarryFromOp);
|
||||||
|
|
||||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||||
auto& operand_arg = args[0];
|
auto& operand_arg = args[0];
|
||||||
|
@ -353,7 +353,7 @@ void EmitX64::EmitLogicalShiftLeft64(EmitContext& ctx, IR::Inst* inst) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void EmitX64::EmitLogicalShiftRight32(EmitContext& ctx, IR::Inst* inst) {
|
void EmitX64::EmitLogicalShiftRight32(EmitContext& ctx, IR::Inst* inst) {
|
||||||
auto carry_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetCarryFromOp);
|
const auto carry_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetCarryFromOp);
|
||||||
|
|
||||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||||
auto& operand_arg = args[0];
|
auto& operand_arg = args[0];
|
||||||
|
@ -485,7 +485,7 @@ void EmitX64::EmitLogicalShiftRight64(EmitContext& ctx, IR::Inst* inst) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void EmitX64::EmitArithmeticShiftRight32(EmitContext& ctx, IR::Inst* inst) {
|
void EmitX64::EmitArithmeticShiftRight32(EmitContext& ctx, IR::Inst* inst) {
|
||||||
auto carry_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetCarryFromOp);
|
const auto carry_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetCarryFromOp);
|
||||||
|
|
||||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||||
auto& operand_arg = args[0];
|
auto& operand_arg = args[0];
|
||||||
|
@ -604,7 +604,7 @@ void EmitX64::EmitArithmeticShiftRight64(EmitContext& ctx, IR::Inst* inst) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void EmitX64::EmitRotateRight32(EmitContext& ctx, IR::Inst* inst) {
|
void EmitX64::EmitRotateRight32(EmitContext& ctx, IR::Inst* inst) {
|
||||||
auto carry_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetCarryFromOp);
|
const auto carry_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetCarryFromOp);
|
||||||
|
|
||||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||||
auto& operand_arg = args[0];
|
auto& operand_arg = args[0];
|
||||||
|
@ -706,7 +706,7 @@ void EmitX64::EmitRotateRight64(EmitContext& ctx, IR::Inst* inst) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void EmitX64::EmitRotateRightExtended(EmitContext& ctx, IR::Inst* inst) {
|
void EmitX64::EmitRotateRightExtended(EmitContext& ctx, IR::Inst* inst) {
|
||||||
auto carry_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetCarryFromOp);
|
const auto carry_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetCarryFromOp);
|
||||||
|
|
||||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||||
const Xbyak::Reg32 result = ctx.reg_alloc.UseScratchGpr(args[0]).cvt32();
|
const Xbyak::Reg32 result = ctx.reg_alloc.UseScratchGpr(args[0]).cvt32();
|
||||||
|
|
|
@ -1013,7 +1013,7 @@ void EmitX64::EmitFPSub64(EmitContext& ctx, IR::Inst* inst) {
|
||||||
|
|
||||||
static Xbyak::Reg64 SetFpscrNzcvFromFlags(BlockOfCode& code, EmitContext& ctx) {
|
static Xbyak::Reg64 SetFpscrNzcvFromFlags(BlockOfCode& code, EmitContext& ctx) {
|
||||||
ctx.reg_alloc.ScratchGpr({HostLoc::RCX}); // shifting requires use of cl
|
ctx.reg_alloc.ScratchGpr({HostLoc::RCX}); // shifting requires use of cl
|
||||||
Xbyak::Reg64 nzcv = ctx.reg_alloc.ScratchGpr();
|
const Xbyak::Reg64 nzcv = ctx.reg_alloc.ScratchGpr();
|
||||||
|
|
||||||
// x64 flags ARM flags
|
// x64 flags ARM flags
|
||||||
// ZF PF CF NZCV
|
// ZF PF CF NZCV
|
||||||
|
@ -1040,9 +1040,9 @@ static Xbyak::Reg64 SetFpscrNzcvFromFlags(BlockOfCode& code, EmitContext& ctx) {
|
||||||
|
|
||||||
void EmitX64::EmitFPCompare32(EmitContext& ctx, IR::Inst* inst) {
|
void EmitX64::EmitFPCompare32(EmitContext& ctx, IR::Inst* inst) {
|
||||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||||
Xbyak::Xmm reg_a = ctx.reg_alloc.UseXmm(args[0]);
|
const Xbyak::Xmm reg_a = ctx.reg_alloc.UseXmm(args[0]);
|
||||||
Xbyak::Xmm reg_b = ctx.reg_alloc.UseXmm(args[1]);
|
const Xbyak::Xmm reg_b = ctx.reg_alloc.UseXmm(args[1]);
|
||||||
bool exc_on_qnan = args[2].GetImmediateU1();
|
const bool exc_on_qnan = args[2].GetImmediateU1();
|
||||||
|
|
||||||
if (exc_on_qnan) {
|
if (exc_on_qnan) {
|
||||||
code.comiss(reg_a, reg_b);
|
code.comiss(reg_a, reg_b);
|
||||||
|
@ -1050,15 +1050,15 @@ void EmitX64::EmitFPCompare32(EmitContext& ctx, IR::Inst* inst) {
|
||||||
code.ucomiss(reg_a, reg_b);
|
code.ucomiss(reg_a, reg_b);
|
||||||
}
|
}
|
||||||
|
|
||||||
Xbyak::Reg64 nzcv = SetFpscrNzcvFromFlags(code, ctx);
|
const Xbyak::Reg64 nzcv = SetFpscrNzcvFromFlags(code, ctx);
|
||||||
ctx.reg_alloc.DefineValue(inst, nzcv);
|
ctx.reg_alloc.DefineValue(inst, nzcv);
|
||||||
}
|
}
|
||||||
|
|
||||||
void EmitX64::EmitFPCompare64(EmitContext& ctx, IR::Inst* inst) {
|
void EmitX64::EmitFPCompare64(EmitContext& ctx, IR::Inst* inst) {
|
||||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||||
Xbyak::Xmm reg_a = ctx.reg_alloc.UseXmm(args[0]);
|
const Xbyak::Xmm reg_a = ctx.reg_alloc.UseXmm(args[0]);
|
||||||
Xbyak::Xmm reg_b = ctx.reg_alloc.UseXmm(args[1]);
|
const Xbyak::Xmm reg_b = ctx.reg_alloc.UseXmm(args[1]);
|
||||||
bool exc_on_qnan = args[2].GetImmediateU1();
|
const bool exc_on_qnan = args[2].GetImmediateU1();
|
||||||
|
|
||||||
if (exc_on_qnan) {
|
if (exc_on_qnan) {
|
||||||
code.comisd(reg_a, reg_b);
|
code.comisd(reg_a, reg_b);
|
||||||
|
@ -1066,7 +1066,7 @@ void EmitX64::EmitFPCompare64(EmitContext& ctx, IR::Inst* inst) {
|
||||||
code.ucomisd(reg_a, reg_b);
|
code.ucomisd(reg_a, reg_b);
|
||||||
}
|
}
|
||||||
|
|
||||||
Xbyak::Reg64 nzcv = SetFpscrNzcvFromFlags(code, ctx);
|
const Xbyak::Reg64 nzcv = SetFpscrNzcvFromFlags(code, ctx);
|
||||||
ctx.reg_alloc.DefineValue(inst, nzcv);
|
ctx.reg_alloc.DefineValue(inst, nzcv);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -18,7 +18,7 @@ using namespace Xbyak::util;
|
||||||
|
|
||||||
void EmitX64::EmitPackedAddU8(EmitContext& ctx, IR::Inst* inst) {
|
void EmitX64::EmitPackedAddU8(EmitContext& ctx, IR::Inst* inst) {
|
||||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||||
auto ge_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetGEFromOp);
|
const auto ge_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetGEFromOp);
|
||||||
|
|
||||||
const Xbyak::Xmm xmm_a = ctx.reg_alloc.UseScratchXmm(args[0]);
|
const Xbyak::Xmm xmm_a = ctx.reg_alloc.UseScratchXmm(args[0]);
|
||||||
const Xbyak::Xmm xmm_b = ctx.reg_alloc.UseXmm(args[1]);
|
const Xbyak::Xmm xmm_b = ctx.reg_alloc.UseXmm(args[1]);
|
||||||
|
@ -45,7 +45,7 @@ void EmitX64::EmitPackedAddU8(EmitContext& ctx, IR::Inst* inst) {
|
||||||
|
|
||||||
void EmitX64::EmitPackedAddS8(EmitContext& ctx, IR::Inst* inst) {
|
void EmitX64::EmitPackedAddS8(EmitContext& ctx, IR::Inst* inst) {
|
||||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||||
auto ge_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetGEFromOp);
|
const auto ge_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetGEFromOp);
|
||||||
|
|
||||||
const Xbyak::Xmm xmm_a = ctx.reg_alloc.UseScratchXmm(args[0]);
|
const Xbyak::Xmm xmm_a = ctx.reg_alloc.UseScratchXmm(args[0]);
|
||||||
const Xbyak::Xmm xmm_b = ctx.reg_alloc.UseXmm(args[1]);
|
const Xbyak::Xmm xmm_b = ctx.reg_alloc.UseXmm(args[1]);
|
||||||
|
@ -72,7 +72,7 @@ void EmitX64::EmitPackedAddS8(EmitContext& ctx, IR::Inst* inst) {
|
||||||
|
|
||||||
void EmitX64::EmitPackedAddU16(EmitContext& ctx, IR::Inst* inst) {
|
void EmitX64::EmitPackedAddU16(EmitContext& ctx, IR::Inst* inst) {
|
||||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||||
auto ge_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetGEFromOp);
|
const auto ge_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetGEFromOp);
|
||||||
|
|
||||||
const Xbyak::Xmm xmm_a = ctx.reg_alloc.UseScratchXmm(args[0]);
|
const Xbyak::Xmm xmm_a = ctx.reg_alloc.UseScratchXmm(args[0]);
|
||||||
const Xbyak::Xmm xmm_b = ctx.reg_alloc.UseXmm(args[1]);
|
const Xbyak::Xmm xmm_b = ctx.reg_alloc.UseXmm(args[1]);
|
||||||
|
@ -114,7 +114,7 @@ void EmitX64::EmitPackedAddU16(EmitContext& ctx, IR::Inst* inst) {
|
||||||
|
|
||||||
void EmitX64::EmitPackedAddS16(EmitContext& ctx, IR::Inst* inst) {
|
void EmitX64::EmitPackedAddS16(EmitContext& ctx, IR::Inst* inst) {
|
||||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||||
auto ge_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetGEFromOp);
|
const auto ge_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetGEFromOp);
|
||||||
|
|
||||||
const Xbyak::Xmm xmm_a = ctx.reg_alloc.UseScratchXmm(args[0]);
|
const Xbyak::Xmm xmm_a = ctx.reg_alloc.UseScratchXmm(args[0]);
|
||||||
const Xbyak::Xmm xmm_b = ctx.reg_alloc.UseXmm(args[1]);
|
const Xbyak::Xmm xmm_b = ctx.reg_alloc.UseXmm(args[1]);
|
||||||
|
@ -141,7 +141,7 @@ void EmitX64::EmitPackedAddS16(EmitContext& ctx, IR::Inst* inst) {
|
||||||
|
|
||||||
void EmitX64::EmitPackedSubU8(EmitContext& ctx, IR::Inst* inst) {
|
void EmitX64::EmitPackedSubU8(EmitContext& ctx, IR::Inst* inst) {
|
||||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||||
auto ge_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetGEFromOp);
|
const auto ge_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetGEFromOp);
|
||||||
|
|
||||||
const Xbyak::Xmm xmm_a = ctx.reg_alloc.UseScratchXmm(args[0]);
|
const Xbyak::Xmm xmm_a = ctx.reg_alloc.UseScratchXmm(args[0]);
|
||||||
const Xbyak::Xmm xmm_b = ctx.reg_alloc.UseXmm(args[1]);
|
const Xbyak::Xmm xmm_b = ctx.reg_alloc.UseXmm(args[1]);
|
||||||
|
@ -164,7 +164,7 @@ void EmitX64::EmitPackedSubU8(EmitContext& ctx, IR::Inst* inst) {
|
||||||
|
|
||||||
void EmitX64::EmitPackedSubS8(EmitContext& ctx, IR::Inst* inst) {
|
void EmitX64::EmitPackedSubS8(EmitContext& ctx, IR::Inst* inst) {
|
||||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||||
auto ge_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetGEFromOp);
|
const auto ge_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetGEFromOp);
|
||||||
|
|
||||||
const Xbyak::Xmm xmm_a = ctx.reg_alloc.UseScratchXmm(args[0]);
|
const Xbyak::Xmm xmm_a = ctx.reg_alloc.UseScratchXmm(args[0]);
|
||||||
const Xbyak::Xmm xmm_b = ctx.reg_alloc.UseXmm(args[1]);
|
const Xbyak::Xmm xmm_b = ctx.reg_alloc.UseXmm(args[1]);
|
||||||
|
@ -191,7 +191,7 @@ void EmitX64::EmitPackedSubS8(EmitContext& ctx, IR::Inst* inst) {
|
||||||
|
|
||||||
void EmitX64::EmitPackedSubU16(EmitContext& ctx, IR::Inst* inst) {
|
void EmitX64::EmitPackedSubU16(EmitContext& ctx, IR::Inst* inst) {
|
||||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||||
auto ge_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetGEFromOp);
|
const auto ge_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetGEFromOp);
|
||||||
|
|
||||||
if (!ge_inst) {
|
if (!ge_inst) {
|
||||||
const Xbyak::Xmm xmm_a = ctx.reg_alloc.UseScratchXmm(args[0]);
|
const Xbyak::Xmm xmm_a = ctx.reg_alloc.UseScratchXmm(args[0]);
|
||||||
|
@ -242,7 +242,7 @@ void EmitX64::EmitPackedSubU16(EmitContext& ctx, IR::Inst* inst) {
|
||||||
|
|
||||||
void EmitX64::EmitPackedSubS16(EmitContext& ctx, IR::Inst* inst) {
|
void EmitX64::EmitPackedSubS16(EmitContext& ctx, IR::Inst* inst) {
|
||||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||||
auto ge_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetGEFromOp);
|
const auto ge_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetGEFromOp);
|
||||||
|
|
||||||
const Xbyak::Xmm xmm_a = ctx.reg_alloc.UseScratchXmm(args[0]);
|
const Xbyak::Xmm xmm_a = ctx.reg_alloc.UseScratchXmm(args[0]);
|
||||||
const Xbyak::Xmm xmm_b = ctx.reg_alloc.UseXmm(args[1]);
|
const Xbyak::Xmm xmm_b = ctx.reg_alloc.UseXmm(args[1]);
|
||||||
|
@ -512,7 +512,7 @@ void EmitX64::EmitPackedHalvingSubS16(EmitContext& ctx, IR::Inst* inst) {
|
||||||
|
|
||||||
void EmitPackedSubAdd(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst, bool hi_is_sum, bool is_signed, bool is_halving) {
|
void EmitPackedSubAdd(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst, bool hi_is_sum, bool is_signed, bool is_halving) {
|
||||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||||
auto ge_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetGEFromOp);
|
const auto ge_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetGEFromOp);
|
||||||
|
|
||||||
const Xbyak::Reg32 reg_a_hi = ctx.reg_alloc.UseScratchGpr(args[0]).cvt32();
|
const Xbyak::Reg32 reg_a_hi = ctx.reg_alloc.UseScratchGpr(args[0]).cvt32();
|
||||||
const Xbyak::Reg32 reg_b_hi = ctx.reg_alloc.UseScratchGpr(args[1]).cvt32();
|
const Xbyak::Reg32 reg_b_hi = ctx.reg_alloc.UseScratchGpr(args[1]).cvt32();
|
||||||
|
|
|
@ -30,7 +30,7 @@ enum class Op {
|
||||||
|
|
||||||
template<Op op, size_t size>
|
template<Op op, size_t size>
|
||||||
void EmitSignedSaturatedOp(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) {
|
void EmitSignedSaturatedOp(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) {
|
||||||
auto overflow_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetOverflowFromOp);
|
const auto overflow_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetOverflowFromOp);
|
||||||
|
|
||||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||||
|
|
||||||
|
@ -79,7 +79,7 @@ void EmitSignedSaturatedOp(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst)
|
||||||
|
|
||||||
template<Op op, size_t size>
|
template<Op op, size_t size>
|
||||||
void EmitUnsignedSaturatedOp(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) {
|
void EmitUnsignedSaturatedOp(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) {
|
||||||
auto overflow_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetOverflowFromOp);
|
const auto overflow_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetOverflowFromOp);
|
||||||
|
|
||||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||||
|
|
||||||
|
@ -135,7 +135,7 @@ void EmitX64::EmitSignedSaturatedAdd64(EmitContext& ctx, IR::Inst* inst) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void EmitX64::EmitSignedSaturatedDoublingMultiplyReturnHigh16(EmitContext& ctx, IR::Inst* inst) {
|
void EmitX64::EmitSignedSaturatedDoublingMultiplyReturnHigh16(EmitContext& ctx, IR::Inst* inst) {
|
||||||
auto overflow_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetOverflowFromOp);
|
const auto overflow_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetOverflowFromOp);
|
||||||
|
|
||||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||||
|
|
||||||
|
@ -165,7 +165,7 @@ void EmitX64::EmitSignedSaturatedDoublingMultiplyReturnHigh16(EmitContext& ctx,
|
||||||
}
|
}
|
||||||
|
|
||||||
void EmitX64::EmitSignedSaturatedDoublingMultiplyReturnHigh32(EmitContext& ctx, IR::Inst* inst) {
|
void EmitX64::EmitSignedSaturatedDoublingMultiplyReturnHigh32(EmitContext& ctx, IR::Inst* inst) {
|
||||||
auto overflow_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetOverflowFromOp);
|
const auto overflow_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetOverflowFromOp);
|
||||||
|
|
||||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||||
|
|
||||||
|
@ -211,7 +211,7 @@ void EmitX64::EmitSignedSaturatedSub64(EmitContext& ctx, IR::Inst* inst) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void EmitX64::EmitSignedSaturation(EmitContext& ctx, IR::Inst* inst) {
|
void EmitX64::EmitSignedSaturation(EmitContext& ctx, IR::Inst* inst) {
|
||||||
auto overflow_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetOverflowFromOp);
|
const auto overflow_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetOverflowFromOp);
|
||||||
|
|
||||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||||
const size_t N = args[1].GetImmediateU8();
|
const size_t N = args[1].GetImmediateU8();
|
||||||
|
@ -292,7 +292,7 @@ void EmitX64::EmitUnsignedSaturatedSub64(EmitContext& ctx, IR::Inst* inst) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void EmitX64::EmitUnsignedSaturation(EmitContext& ctx, IR::Inst* inst) {
|
void EmitX64::EmitUnsignedSaturation(EmitContext& ctx, IR::Inst* inst) {
|
||||||
auto overflow_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetOverflowFromOp);
|
const auto overflow_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetOverflowFromOp);
|
||||||
|
|
||||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||||
const size_t N = args[1].GetImmediateU8();
|
const size_t N = args[1].GetImmediateU8();
|
||||||
|
|
|
@ -467,7 +467,7 @@ void EmitX64::EmitVectorArithmeticShiftRight8(EmitContext& ctx, IR::Inst* inst)
|
||||||
void EmitX64::EmitVectorArithmeticShiftRight16(EmitContext& ctx, IR::Inst* inst) {
|
void EmitX64::EmitVectorArithmeticShiftRight16(EmitContext& ctx, IR::Inst* inst) {
|
||||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||||
|
|
||||||
Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]);
|
const Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]);
|
||||||
const u8 shift_amount = args[1].GetImmediateU8();
|
const u8 shift_amount = args[1].GetImmediateU8();
|
||||||
|
|
||||||
code.psraw(result, shift_amount);
|
code.psraw(result, shift_amount);
|
||||||
|
@ -478,7 +478,7 @@ void EmitX64::EmitVectorArithmeticShiftRight16(EmitContext& ctx, IR::Inst* inst)
|
||||||
void EmitX64::EmitVectorArithmeticShiftRight32(EmitContext& ctx, IR::Inst* inst) {
|
void EmitX64::EmitVectorArithmeticShiftRight32(EmitContext& ctx, IR::Inst* inst) {
|
||||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||||
|
|
||||||
Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]);
|
const Xbyak::Xmm result = ctx.reg_alloc.UseScratchXmm(args[0]);
|
||||||
const u8 shift_amount = args[1].GetImmediateU8();
|
const u8 shift_amount = args[1].GetImmediateU8();
|
||||||
|
|
||||||
code.psrad(result, shift_amount);
|
code.psrad(result, shift_amount);
|
||||||
|
@ -1001,9 +1001,9 @@ void EmitX64::EmitVectorEqual64(EmitContext& ctx, IR::Inst* inst) {
|
||||||
|
|
||||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||||
|
|
||||||
Xbyak::Xmm xmm_a = ctx.reg_alloc.UseScratchXmm(args[0]);
|
const Xbyak::Xmm xmm_a = ctx.reg_alloc.UseScratchXmm(args[0]);
|
||||||
Xbyak::Xmm xmm_b = ctx.reg_alloc.UseXmm(args[1]);
|
const Xbyak::Xmm xmm_b = ctx.reg_alloc.UseXmm(args[1]);
|
||||||
Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm();
|
const Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm();
|
||||||
|
|
||||||
code.pcmpeqd(xmm_a, xmm_b);
|
code.pcmpeqd(xmm_a, xmm_b);
|
||||||
code.pshufd(tmp, xmm_a, 0b10110001);
|
code.pshufd(tmp, xmm_a, 0b10110001);
|
||||||
|
|
Loading…
Reference in a new issue