diff --git a/src/backend_x64/emit_x64.cpp b/src/backend_x64/emit_x64.cpp index ce1e25e5..ca1421f8 100644 --- a/src/backend_x64/emit_x64.cpp +++ b/src/backend_x64/emit_x64.cpp @@ -41,20 +41,6 @@ static Xbyak::Address MJitStateCpsr() { return dword[r15 + offsetof(JitState, Cpsr)]; } -static IR::Inst* FindUseWithOpcode(IR::Inst* inst, IR::Opcode opcode) { - switch (opcode) { - case IR::Opcode::GetCarryFromOp: - return inst->carry_inst; - case IR::Opcode::GetOverflowFromOp: - return inst->overflow_inst; - default: - break; - } - - ASSERT_MSG(false, "unreachable"); - return nullptr; -} - static void EraseInstruction(IR::Block& block, IR::Inst* inst) { block.Instructions().erase(block.Instructions().iterator_to(*inst)); } @@ -406,7 +392,7 @@ void EmitX64::EmitLeastSignificantWord(IR::Block&, IR::Inst* inst) { } void EmitX64::EmitMostSignificantWord(IR::Block& block, IR::Inst* inst) { - auto carry_inst = FindUseWithOpcode(inst, IR::Opcode::GetCarryFromOp); + auto carry_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetCarryFromOp); Xbyak::Reg64 result = reg_alloc.UseDefGpr(inst->GetArg(0), inst); code->shr(result, 32); @@ -457,7 +443,7 @@ void EmitX64::EmitIsZero64(IR::Block&, IR::Inst* inst) { } void EmitX64::EmitLogicalShiftLeft(IR::Block& block, IR::Inst* inst) { - auto carry_inst = FindUseWithOpcode(inst, IR::Opcode::GetCarryFromOp); + auto carry_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetCarryFromOp); // TODO: Consider using BMI2 instructions like SHLX when arm-in-host flags is implemented. @@ -552,7 +538,7 @@ void EmitX64::EmitLogicalShiftLeft(IR::Block& block, IR::Inst* inst) { } void EmitX64::EmitLogicalShiftRight(IR::Block& block, IR::Inst* inst) { - auto carry_inst = FindUseWithOpcode(inst, IR::Opcode::GetCarryFromOp); + auto carry_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetCarryFromOp); if (!carry_inst) { if (!inst->GetArg(2).IsImmediate()) { @@ -657,7 +643,7 @@ void EmitX64::EmitLogicalShiftRight64(IR::Block& block, IR::Inst* inst) { } void EmitX64::EmitArithmeticShiftRight(IR::Block& block, IR::Inst* inst) { - auto carry_inst = FindUseWithOpcode(inst, IR::Opcode::GetCarryFromOp); + auto carry_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetCarryFromOp); if (!carry_inst) { if (!inst->GetArg(2).IsImmediate()) { @@ -740,7 +726,7 @@ void EmitX64::EmitArithmeticShiftRight(IR::Block& block, IR::Inst* inst) { } void EmitX64::EmitRotateRight(IR::Block& block, IR::Inst* inst) { - auto carry_inst = FindUseWithOpcode(inst, IR::Opcode::GetCarryFromOp); + auto carry_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetCarryFromOp); if (!carry_inst) { if (!inst->GetArg(2).IsImmediate()) { @@ -814,7 +800,7 @@ void EmitX64::EmitRotateRight(IR::Block& block, IR::Inst* inst) { } void EmitX64::EmitRotateRightExtended(IR::Block& block, IR::Inst* inst) { - auto carry_inst = FindUseWithOpcode(inst, IR::Opcode::GetCarryFromOp); + auto carry_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetCarryFromOp); Xbyak::Reg32 result = reg_alloc.UseDefGpr(inst->GetArg(0), inst).cvt32(); Xbyak::Reg8 carry = carry_inst @@ -842,8 +828,8 @@ static Xbyak::Reg8 DoCarry(RegAlloc& reg_alloc, const IR::Value& carry_in, IR::I } void EmitX64::EmitAddWithCarry(IR::Block& block, IR::Inst* inst) { - auto carry_inst = FindUseWithOpcode(inst, IR::Opcode::GetCarryFromOp); - auto overflow_inst = FindUseWithOpcode(inst, IR::Opcode::GetOverflowFromOp); + auto carry_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetCarryFromOp); + auto overflow_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetOverflowFromOp); IR::Value a = inst->GetArg(0); IR::Value b = inst->GetArg(1); @@ -907,8 +893,8 @@ void EmitX64::EmitAdd64(IR::Block& block, IR::Inst* inst) { } void EmitX64::EmitSubWithCarry(IR::Block& block, IR::Inst* inst) { - auto carry_inst = FindUseWithOpcode(inst, IR::Opcode::GetCarryFromOp); - auto overflow_inst = FindUseWithOpcode(inst, IR::Opcode::GetOverflowFromOp); + auto carry_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetCarryFromOp); + auto overflow_inst = inst->GetAssociatedPseudoOperation(IR::Opcode::GetOverflowFromOp); IR::Value a = inst->GetArg(0); IR::Value b = inst->GetArg(1); diff --git a/src/frontend/ir/microinstruction.cpp b/src/frontend/ir/microinstruction.cpp index 90065974..557c0590 100644 --- a/src/frontend/ir/microinstruction.cpp +++ b/src/frontend/ir/microinstruction.cpp @@ -214,6 +214,21 @@ bool Inst::MayHaveSideEffects() const { } +Inst* Inst::GetAssociatedPseudoOperation(Opcode opcode) { + // This is faster than doing a search through the block. + switch (opcode) { + case IR::Opcode::GetCarryFromOp: + return carry_inst; + case IR::Opcode::GetOverflowFromOp: + return overflow_inst; + default: + break; + } + + ASSERT_MSG(false, "Not a valid pseudo-operation"); + return nullptr; +} + Type Inst::GetType() const { if (op == Opcode::Identity) return args[0].GetType(); @@ -266,9 +281,11 @@ void Inst::Use(Value& value) { switch (op){ case Opcode::GetCarryFromOp: + ASSERT_MSG(!value.GetInst()->carry_inst, "Only one of each type of pseudo-op allowed"); value.GetInst()->carry_inst = this; break; case Opcode::GetOverflowFromOp: + ASSERT_MSG(!value.GetInst()->overflow_inst, "Only one of each type of pseudo-op allowed"); value.GetInst()->overflow_inst = this; break; default: diff --git a/src/frontend/ir/microinstruction.h b/src/frontend/ir/microinstruction.h index c6a781a1..ed4e1e80 100644 --- a/src/frontend/ir/microinstruction.h +++ b/src/frontend/ir/microinstruction.h @@ -74,6 +74,8 @@ public: bool MayHaveSideEffects() const; bool HasUses() const { return use_count > 0; } + /// Gets a pseudo-operation associated with this instruction. + Inst* GetAssociatedPseudoOperation(Opcode opcode); /// Get the microop this microinstruction represents. Opcode GetOpcode() const { return op; } @@ -90,8 +92,6 @@ public: void ReplaceUsesWith(Value& replacement); size_t use_count = 0; - Inst* carry_inst = nullptr; - Inst* overflow_inst = nullptr; private: void Use(Value& value); @@ -99,6 +99,9 @@ private: Opcode op; std::array args; + + Inst* carry_inst = nullptr; + Inst* overflow_inst = nullptr; }; } // namespace IR