constant_propagation_pass: Prepare for IR matchers

This commit is contained in:
MerryMage 2020-04-20 20:05:32 +01:00
parent 0d7476d3ec
commit 4573511fe3
7 changed files with 203 additions and 73 deletions

View file

@ -96,6 +96,7 @@ add_library(dynarmic
ir_opt/constant_propagation_pass.cpp ir_opt/constant_propagation_pass.cpp
ir_opt/dead_code_elimination_pass.cpp ir_opt/dead_code_elimination_pass.cpp
ir_opt/identity_removal_pass.cpp ir_opt/identity_removal_pass.cpp
ir_opt/ir_matcher.h
ir_opt/passes.h ir_opt/passes.h
ir_opt/verification_pass.cpp ir_opt/verification_pass.cpp
) )

View file

@ -2607,12 +2607,4 @@ void IREmitter::SetTerm(const Terminal& terminal) {
block.SetTerminal(terminal); block.SetTerminal(terminal);
} }
void IREmitter::SetInsertionPoint(IR::Inst* new_insertion_point) {
insertion_point = IR::Block::iterator{*new_insertion_point};
}
void IREmitter::SetInsertionPoint(IR::Block::iterator new_insertion_point) {
insertion_point = new_insertion_point;
}
} // namespace Dynarmic::IR } // namespace Dynarmic::IR

View file

@ -369,8 +369,13 @@ public:
void SetTerm(const Terminal& terminal); void SetTerm(const Terminal& terminal);
void SetInsertionPoint(IR::Inst* new_insertion_point); void SetInsertionPoint(IR::Inst* new_insertion_point) {
void SetInsertionPoint(IR::Block::iterator new_insertion_point); insertion_point = IR::Block::iterator{*new_insertion_point};
}
void SetInsertionPoint(IR::Block::iterator new_insertion_point) {
insertion_point = new_insertion_point;
}
protected: protected:
IR::Block::iterator insertion_point; IR::Block::iterator insertion_point;

View file

@ -156,6 +156,8 @@ public:
explicit TypedValue(const Value& value) : Value(value) { explicit TypedValue(const Value& value) : Value(value) {
ASSERT((value.GetType() & type_) != Type::Void); ASSERT((value.GetType() & type_) != Type::Void);
} }
explicit TypedValue(Inst* inst) : TypedValue(Value(inst)) {}
}; };
using U1 = TypedValue<Type::U1>; using U1 = TypedValue<Type::U1>;

View file

@ -4,13 +4,22 @@
* General Public License version 2 or any later version. * General Public License version 2 or any later version.
*/ */
#include <optional>
#include "common/assert.h"
#include "common/bit_util.h" #include "common/bit_util.h"
#include "common/common_types.h" #include "common/common_types.h"
#include "frontend/ir/basic_block.h" #include "frontend/ir/basic_block.h"
#include "frontend/ir/ir_emitter.h"
#include "frontend/ir/opcodes.h" #include "frontend/ir/opcodes.h"
#include "ir_opt/ir_matcher.h"
#include "ir_opt/passes.h" #include "ir_opt/passes.h"
namespace Dynarmic::Optimization { namespace Dynarmic::Optimization {
using namespace IRMatcher;
using Op = Dynarmic::IR::Opcode;
namespace { namespace {
// Tiny helper to avoid the need to store based off the opcode // Tiny helper to avoid the need to store based off the opcode
@ -89,17 +98,17 @@ void FoldAND(IR::Inst& inst, bool is_32_bit) {
// //
// 1. imm -> swap(imm) // 1. imm -> swap(imm)
// //
void FoldByteReverse(IR::Inst& inst, IR::Opcode op) { void FoldByteReverse(IR::Inst& inst, Op op) {
const auto operand = inst.GetArg(0); const auto operand = inst.GetArg(0);
if (!operand.IsImmediate()) { if (!operand.IsImmediate()) {
return; return;
} }
if (op == IR::Opcode::ByteReverseWord) { if (op == Op::ByteReverseWord) {
const u32 result = Common::Swap32(static_cast<u32>(operand.GetImmediateAsU64())); const u32 result = Common::Swap32(static_cast<u32>(operand.GetImmediateAsU64()));
inst.ReplaceUsesWith(IR::Value{result}); inst.ReplaceUsesWith(IR::Value{result});
} else if (op == IR::Opcode::ByteReverseHalf) { } else if (op == Op::ByteReverseHalf) {
const u16 result = Common::Swap16(static_cast<u16>(operand.GetImmediateAsU64())); const u16 result = Common::Swap16(static_cast<u16>(operand.GetImmediateAsU64()));
inst.ReplaceUsesWith(IR::Value{result}); inst.ReplaceUsesWith(IR::Value{result});
} else { } else {
@ -188,7 +197,7 @@ void FoldMostSignificantBit(IR::Inst& inst) {
} }
void FoldMostSignificantWord(IR::Inst& inst) { void FoldMostSignificantWord(IR::Inst& inst) {
IR::Inst* carry_inst = inst.GetAssociatedPseudoOperation(IR::Opcode::GetCarryFromOp); IR::Inst* carry_inst = inst.GetAssociatedPseudoOperation(Op::GetCarryFromOp);
if (!inst.AreAllArgsImmediates()) { if (!inst.AreAllArgsImmediates()) {
return; return;
@ -239,21 +248,16 @@ void FoldNOT(IR::Inst& inst, bool is_32_bit) {
// 3. 0 | y -> y // 3. 0 | y -> y
// //
void FoldOR(IR::Inst& inst, bool is_32_bit) { void FoldOR(IR::Inst& inst, bool is_32_bit) {
const auto lhs = inst.GetArg(0); if (FoldCommutative(inst, is_32_bit, [](u64 a, u64 b) { return a | b; })) {
const auto rhs = inst.GetArg(1); const auto rhs = inst.GetArg(1);
if (rhs.IsZero()) {
if (lhs.IsImmediate() && rhs.IsImmediate()) { inst.ReplaceUsesWith(inst.GetArg(0));
const u64 result = lhs.GetImmediateAsU64() | rhs.GetImmediateAsU64(); }
ReplaceUsesWith(inst, is_32_bit, result);
} else if (lhs.IsZero()) {
inst.ReplaceUsesWith(rhs);
} else if (rhs.IsZero()) {
inst.ReplaceUsesWith(lhs);
} }
} }
void FoldShifts(IR::Inst& inst) { void FoldShifts(IR::Inst& inst) {
IR::Inst* carry_inst = inst.GetAssociatedPseudoOperation(IR::Opcode::GetCarryFromOp); IR::Inst* carry_inst = inst.GetAssociatedPseudoOperation(Op::GetCarryFromOp);
// The 32-bit variants can contain 3 arguments, while the // The 32-bit variants can contain 3 arguments, while the
// 64-bit variants only contain 2. // 64-bit variants only contain 2.
@ -314,80 +318,80 @@ void ConstantPropagation(IR::Block& block) {
const auto opcode = inst.GetOpcode(); const auto opcode = inst.GetOpcode();
switch (opcode) { switch (opcode) {
case IR::Opcode::LeastSignificantWord: case Op::LeastSignificantWord:
FoldLeastSignificantWord(inst); FoldLeastSignificantWord(inst);
break; break;
case IR::Opcode::MostSignificantWord: case Op::MostSignificantWord:
FoldMostSignificantWord(inst); FoldMostSignificantWord(inst);
break; break;
case IR::Opcode::LeastSignificantHalf: case Op::LeastSignificantHalf:
FoldLeastSignificantHalf(inst); FoldLeastSignificantHalf(inst);
break; break;
case IR::Opcode::LeastSignificantByte: case Op::LeastSignificantByte:
FoldLeastSignificantByte(inst); FoldLeastSignificantByte(inst);
break; break;
case IR::Opcode::MostSignificantBit: case Op::MostSignificantBit:
FoldMostSignificantBit(inst); FoldMostSignificantBit(inst);
break; break;
case IR::Opcode::LogicalShiftLeft32: case Op::LogicalShiftLeft32:
case IR::Opcode::LogicalShiftLeft64: case Op::LogicalShiftLeft64:
case IR::Opcode::LogicalShiftRight32: case Op::LogicalShiftRight32:
case IR::Opcode::LogicalShiftRight64: case Op::LogicalShiftRight64:
case IR::Opcode::ArithmeticShiftRight32: case Op::ArithmeticShiftRight32:
case IR::Opcode::ArithmeticShiftRight64: case Op::ArithmeticShiftRight64:
case IR::Opcode::RotateRight32: case Op::RotateRight32:
case IR::Opcode::RotateRight64: case Op::RotateRight64:
FoldShifts(inst); FoldShifts(inst);
break; break;
case IR::Opcode::Mul32: case Op::Mul32:
case IR::Opcode::Mul64: case Op::Mul64:
FoldMultiply(inst, opcode == IR::Opcode::Mul32); FoldMultiply(inst, opcode == Op::Mul32);
break; break;
case IR::Opcode::SignedDiv32: case Op::SignedDiv32:
case IR::Opcode::SignedDiv64: case Op::SignedDiv64:
FoldDivide(inst, opcode == IR::Opcode::SignedDiv32, true); FoldDivide(inst, opcode == Op::SignedDiv32, true);
break; break;
case IR::Opcode::UnsignedDiv32: case Op::UnsignedDiv32:
case IR::Opcode::UnsignedDiv64: case Op::UnsignedDiv64:
FoldDivide(inst, opcode == IR::Opcode::UnsignedDiv32, false); FoldDivide(inst, opcode == Op::UnsignedDiv32, false);
break; break;
case IR::Opcode::And32: case Op::And32:
case IR::Opcode::And64: case Op::And64:
FoldAND(inst, opcode == IR::Opcode::And32); FoldAND(inst, opcode == Op::And32);
break; break;
case IR::Opcode::Eor32: case Op::Eor32:
case IR::Opcode::Eor64: case Op::Eor64:
FoldEOR(inst, opcode == IR::Opcode::Eor32); FoldEOR(inst, opcode == Op::Eor32);
break; break;
case IR::Opcode::Or32: case Op::Or32:
case IR::Opcode::Or64: case Op::Or64:
FoldOR(inst, opcode == IR::Opcode::Or32); FoldOR(inst, opcode == Op::Or32);
break; break;
case IR::Opcode::Not32: case Op::Not32:
case IR::Opcode::Not64: case Op::Not64:
FoldNOT(inst, opcode == IR::Opcode::Not32); FoldNOT(inst, opcode == Op::Not32);
break; break;
case IR::Opcode::SignExtendByteToWord: case Op::SignExtendByteToWord:
case IR::Opcode::SignExtendHalfToWord: case Op::SignExtendHalfToWord:
FoldSignExtendXToWord(inst); FoldSignExtendXToWord(inst);
break; break;
case IR::Opcode::SignExtendByteToLong: case Op::SignExtendByteToLong:
case IR::Opcode::SignExtendHalfToLong: case Op::SignExtendHalfToLong:
case IR::Opcode::SignExtendWordToLong: case Op::SignExtendWordToLong:
FoldSignExtendXToLong(inst); FoldSignExtendXToLong(inst);
break; break;
case IR::Opcode::ZeroExtendByteToWord: case Op::ZeroExtendByteToWord:
case IR::Opcode::ZeroExtendHalfToWord: case Op::ZeroExtendHalfToWord:
FoldZeroExtendXToWord(inst); FoldZeroExtendXToWord(inst);
break; break;
case IR::Opcode::ZeroExtendByteToLong: case Op::ZeroExtendByteToLong:
case IR::Opcode::ZeroExtendHalfToLong: case Op::ZeroExtendHalfToLong:
case IR::Opcode::ZeroExtendWordToLong: case Op::ZeroExtendWordToLong:
FoldZeroExtendXToLong(inst); FoldZeroExtendXToLong(inst);
break; break;
case IR::Opcode::ByteReverseWord: case Op::ByteReverseWord:
case IR::Opcode::ByteReverseHalf: case Op::ByteReverseHalf:
case IR::Opcode::ByteReverseDual: case Op::ByteReverseDual:
FoldByteReverse(inst, opcode); FoldByteReverse(inst, opcode);
break; break;
default: default:

View file

@ -30,7 +30,7 @@ void IdentityRemovalPass(IR::Block& block) {
} }
} }
if (inst.GetOpcode() == IR::Opcode::Identity) { if (inst.GetOpcode() == IR::Opcode::Identity || inst.GetOpcode() == IR::Opcode::Void) {
iter = block.Instructions().erase(inst); iter = block.Instructions().erase(inst);
to_invalidate.push_back(&inst); to_invalidate.push_back(&inst);
} else { } else {

126
src/ir_opt/ir_matcher.h Normal file
View file

@ -0,0 +1,126 @@
/* This file is part of the dynarmic project.
* Copyright (c) 2020 MerryMage
* This software may be used and distributed according to the terms of the GNU
* General Public License version 2 or any later version.
*/
#include <optional>
#include <tuple>
#include <mp/metafunction/apply.h>
#include <mp/typelist/concat.h>
#include <mp/typelist/drop.h>
#include <mp/typelist/get.h>
#include <mp/typelist/head.h>
#include <mp/typelist/list.h>
#include <mp/typelist/prepend.h>
#include "frontend/ir/microinstruction.h"
#include "frontend/ir/opcodes.h"
#include "frontend/ir/value.h"
namespace Dynarmic::Optimization::IRMatcher {
struct CaptureValue {
using ReturnType = std::tuple<IR::Value>;
static std::optional<ReturnType> Match(IR::Value value) {
return std::tuple(value);
}
};
struct CaptureInst {
using ReturnType = std::tuple<IR::Inst*>;
static std::optional<ReturnType> Match(IR::Value value) {
if (value.IsImmediate())
return std::nullopt;
return std::tuple(value.GetInstRecursive());
}
};
struct CaptureUImm {
using ReturnType = std::tuple<u64>;
static std::optional<ReturnType> Match(IR::Value value) {
return std::tuple(value.GetImmediateAsU64());
}
};
struct CaptureSImm {
using ReturnType = std::tuple<s64>;
static std::optional<ReturnType> Match(IR::Value value) {
return std::tuple(value.GetImmediateAsS64());
}
};
template <u64 Value>
struct UImm {
using ReturnType = std::tuple<>;
static std::optional<std::tuple<>> Match(IR::Value value) {
if (value.GetImmediateAsU64() == Value)
return std::tuple();
return std::nullopt;
}
};
template <s64 Value>
struct SImm {
using ReturnType = std::tuple<>;
static std::optional<std::tuple<>> Match(IR::Value value) {
if (value.GetImmediateAsS64() == Value)
return std::tuple();
return std::nullopt;
}
};
template <IR::Opcode Opcode, typename... Args>
struct Inst {
public:
using ReturnType = mp::concat<std::tuple<>, typename Args::ReturnType...>;
static std::optional<ReturnType> Match(const IR::Inst& inst) {
if (inst.GetOpcode() != Opcode)
return std::nullopt;
if (inst.HasAssociatedPseudoOperation())
return std::nullopt;
return MatchArgs<0>(inst);
}
static std::optional<ReturnType> Match(IR::Value value) {
if (value.IsImmediate())
return std::nullopt;
return Match(*value.GetInstRecursive());
}
private:
template <size_t I>
static auto MatchArgs(const IR::Inst& inst) -> std::optional<mp::apply<mp::concat, mp::prepend<mp::drop<I, mp::list<typename Args::ReturnType...>>, std::tuple<>>>> {
if constexpr (I >= sizeof...(Args)) {
return std::tuple();
} else {
using Arg = mp::get<I, mp::list<Args...>>;
if (const auto arg = Arg::Match(inst.GetArg(I))) {
if (const auto rest = MatchArgs<I + 1>(inst)) {
return std::tuple_cat(*arg, *rest);
}
}
return std::nullopt;
}
}
};
inline bool IsSameInst(std::tuple<IR::Inst*, IR::Inst*> t) {
return std::get<0>(t) == std::get<1>(t);
}
inline bool IsSameInst(std::tuple<IR::Inst*, IR::Inst*, IR::Inst*> t) {
return std::get<0>(t) == std::get<1>(t) && std::get<0>(t) == std::get<2>(t);
}
} // namespace Dynarmic::Optimization::IRMatcher