From af3614553b722d3b41921ea8733899f8f9ac1996 Mon Sep 17 00:00:00 2001 From: Lioncash Date: Sun, 8 Mar 2020 03:34:20 -0400 Subject: [PATCH] A64/impl: Move AccType and MemOp enum into general IR emitter header These will be used by both frontends in the future, so this performs the migratory changes separate from the changes that will make use of them. --- src/frontend/A64/translate/impl/impl.cpp | 6 +- src/frontend/A64/translate/impl/impl.h | 14 +--- .../translate/impl/load_store_exclusive.cpp | 22 +++--- .../impl/load_store_load_literal.cpp | 6 +- .../impl/load_store_multiple_structures.cpp | 22 +++--- .../impl/load_store_register_immediate.cpp | 42 ++++++------ .../impl/load_store_register_pair.cpp | 40 +++++------ .../load_store_register_register_offset.cpp | 24 +++---- .../impl/load_store_register_unprivileged.cpp | 20 +++--- .../impl/load_store_single_structure.cpp | 68 +++++++++++-------- src/frontend/ir/ir_emitter.h | 10 +++ 11 files changed, 142 insertions(+), 132 deletions(-) diff --git a/src/frontend/A64/translate/impl/impl.cpp b/src/frontend/A64/translate/impl/impl.cpp index 15545de7..ab489794 100644 --- a/src/frontend/A64/translate/impl/impl.cpp +++ b/src/frontend/A64/translate/impl/impl.cpp @@ -275,7 +275,7 @@ void TranslatorVisitor::Vpart_scalar(size_t bitsize, Vec vec, size_t part, IR::U } } -IR::UAnyU128 TranslatorVisitor::Mem(IR::U64 address, size_t bytesize, AccType /*acctype*/) { +IR::UAnyU128 TranslatorVisitor::Mem(IR::U64 address, size_t bytesize, IR::AccType /*acc_type*/) { switch (bytesize) { case 1: return ir.ReadMemory8(address); @@ -293,7 +293,7 @@ IR::UAnyU128 TranslatorVisitor::Mem(IR::U64 address, size_t bytesize, AccType /* } } -void TranslatorVisitor::Mem(IR::U64 address, size_t bytesize, AccType /*acctype*/, IR::UAnyU128 value) { +void TranslatorVisitor::Mem(IR::U64 address, size_t bytesize, IR::AccType /*acc_type*/, IR::UAnyU128 value) { switch (bytesize) { case 1: ir.WriteMemory8(address, value); @@ -316,7 +316,7 @@ void TranslatorVisitor::Mem(IR::U64 address, size_t bytesize, AccType /*acctype* } } -IR::U32 TranslatorVisitor::ExclusiveMem(IR::U64 address, size_t bytesize, AccType /*acctype*/, IR::UAnyU128 value) { +IR::U32 TranslatorVisitor::ExclusiveMem(IR::U64 address, size_t bytesize, IR::AccType /*acc_type*/, IR::UAnyU128 value) { switch (bytesize) { case 1: return ir.ExclusiveWriteMemory8(address, value); diff --git a/src/frontend/A64/translate/impl/impl.h b/src/frontend/A64/translate/impl/impl.h index 565cbb17..52039fc4 100644 --- a/src/frontend/A64/translate/impl/impl.h +++ b/src/frontend/A64/translate/impl/impl.h @@ -16,14 +16,6 @@ namespace Dynarmic::A64 { -enum class AccType { - NORMAL, VEC, STREAM, VECSTREAM, ATOMIC, ORDERED, ORDEREDRW, LIMITEDORDERED, UNPRIV, IFETCH, PTW, DC, IC, DCZVA, AT, -}; - -enum class MemOp { - LOAD, STORE, PREFETCH, -}; - struct TranslatorVisitor final { using instruction_return_type = bool; @@ -64,9 +56,9 @@ struct TranslatorVisitor final { IR::UAny Vpart_scalar(size_t bitsize, Vec vec, size_t part); void Vpart_scalar(size_t bitsize, Vec vec, size_t part, IR::UAny value); - IR::UAnyU128 Mem(IR::U64 address, size_t size, AccType acctype); - void Mem(IR::U64 address, size_t size, AccType acctype, IR::UAnyU128 value); - IR::U32 ExclusiveMem(IR::U64 address, size_t size, AccType acctype, IR::UAnyU128 value); + IR::UAnyU128 Mem(IR::U64 address, size_t size, IR::AccType acctype); + void Mem(IR::U64 address, size_t size, IR::AccType acctype, IR::UAnyU128 value); + IR::U32 ExclusiveMem(IR::U64 address, size_t size, IR::AccType acctype, IR::UAnyU128 value); IR::U32U64 SignExtend(IR::UAny value, size_t to_size); IR::U32U64 ZeroExtend(IR::UAny value, size_t to_size); diff --git a/src/frontend/A64/translate/impl/load_store_exclusive.cpp b/src/frontend/A64/translate/impl/load_store_exclusive.cpp index 08fa3d0c..31d8bd98 100644 --- a/src/frontend/A64/translate/impl/load_store_exclusive.cpp +++ b/src/frontend/A64/translate/impl/load_store_exclusive.cpp @@ -13,8 +13,8 @@ namespace Dynarmic::A64 { static bool ExclusiveSharedDecodeAndOperation(TranslatorVisitor& v, bool pair, size_t size, bool L, bool o0, std::optional Rs, std::optional Rt2, Reg Rn, Reg Rt) { // Shared Decode - const AccType acctype = o0 ? AccType::ORDERED : AccType::ATOMIC; - const MemOp memop = L ? MemOp::LOAD : MemOp::STORE; + const auto acctype = o0 ? IR::AccType::ORDERED : IR::AccType::ATOMIC; + const auto memop = L ? IR::MemOp::LOAD : IR::MemOp::STORE; const size_t elsize = 8 << size; const size_t regsize = elsize == 64 ? 64 : 32; const size_t datasize = pair ? elsize * 2 : elsize; @@ -23,14 +23,14 @@ static bool ExclusiveSharedDecodeAndOperation(TranslatorVisitor& v, bool pair, s const size_t dbytes = datasize / 8; - if (memop == MemOp::LOAD && pair && Rt == *Rt2) { + if (memop == IR::MemOp::LOAD && pair && Rt == *Rt2) { return v.UnpredictableInstruction(); - } else if (memop == MemOp::STORE && (*Rs == Rt || (pair && *Rs == *Rt2))) { + } else if (memop == IR::MemOp::STORE && (*Rs == Rt || (pair && *Rs == *Rt2))) { if (!v.options.define_unpredictable_behaviour) { return v.UnpredictableInstruction(); } // UNPREDICTABLE: The Constraint_NONE case is executed. - } else if (memop == MemOp::STORE && *Rs == Rn && Rn != Reg::R31) { + } else if (memop == IR::MemOp::STORE && *Rs == Rn && Rn != Reg::R31) { return v.UnpredictableInstruction(); } @@ -43,7 +43,7 @@ static bool ExclusiveSharedDecodeAndOperation(TranslatorVisitor& v, bool pair, s } switch (memop) { - case MemOp::STORE: { + case IR::MemOp::STORE: { IR::UAnyU128 data; if (pair && elsize == 64) { data = v.ir.Pack2x64To1x128(v.X(64, Rt), v.X(64, *Rt2)); @@ -56,7 +56,7 @@ static bool ExclusiveSharedDecodeAndOperation(TranslatorVisitor& v, bool pair, s v.X(32, *Rs, status); break; } - case MemOp::LOAD: { + case IR::MemOp::LOAD: { v.ir.SetExclusive(address, dbytes); const IR::UAnyU128 data = v.Mem(address, dbytes, acctype); if (pair && elsize == 64) { @@ -144,8 +144,8 @@ bool TranslatorVisitor::LDAXP(Imm<1> sz, Reg Rt2, Reg Rn, Reg Rt) { static bool OrderedSharedDecodeAndOperation(TranslatorVisitor& v, size_t size, bool L, bool o0, Reg Rn, Reg Rt) { // Shared Decode - const AccType acctype = !o0 ? AccType::LIMITEDORDERED : AccType::ORDERED; - const MemOp memop = L ? MemOp::LOAD : MemOp::STORE; + const auto acctype = !o0 ? IR::AccType::LIMITEDORDERED : IR::AccType::ORDERED; + const auto memop = L ? IR::MemOp::LOAD : IR::MemOp::STORE; const size_t elsize = 8 << size; const size_t regsize = elsize == 64 ? 64 : 32; const size_t datasize = elsize; @@ -163,12 +163,12 @@ static bool OrderedSharedDecodeAndOperation(TranslatorVisitor& v, size_t size, b } switch (memop) { - case MemOp::STORE: { + case IR::MemOp::STORE: { const IR::UAny data = v.X(datasize, Rt); v.Mem(address, dbytes, acctype, data); break; } - case MemOp::LOAD: { + case IR::MemOp::LOAD: { const IR::UAny data = v.Mem(address, dbytes, acctype); v.X(regsize, Rt, v.ZeroExtend(data, regsize)); break; diff --git a/src/frontend/A64/translate/impl/load_store_load_literal.cpp b/src/frontend/A64/translate/impl/load_store_load_literal.cpp index cf045b58..537bf471 100644 --- a/src/frontend/A64/translate/impl/load_store_load_literal.cpp +++ b/src/frontend/A64/translate/impl/load_store_load_literal.cpp @@ -13,7 +13,7 @@ bool TranslatorVisitor::LDR_lit_gen(bool opc_0, Imm<19> imm19, Reg Rt) { const s64 offset = concatenate(imm19, Imm<2>{0}).SignExtend(); const u64 address = ir.PC() + offset; - const auto data = Mem(ir.Imm64(address), size, AccType::NORMAL); + const auto data = Mem(ir.Imm64(address), size, IR::AccType::NORMAL); X(8 * size, Rt, data); return true; @@ -27,7 +27,7 @@ bool TranslatorVisitor::LDR_lit_fpsimd(Imm<2> opc, Imm<19> imm19, Vec Vt) { const u64 size = 4 << opc.ZeroExtend(); const u64 offset = imm19.SignExtend() << 2; const IR::U64 address = ir.Imm64(ir.PC() + offset); - const IR::UAnyU128 data = Mem(address, size, AccType::VEC); + const IR::UAnyU128 data = Mem(address, size, IR::AccType::VEC); if (size == 16) { V(128, Vt, data); @@ -40,7 +40,7 @@ bool TranslatorVisitor::LDR_lit_fpsimd(Imm<2> opc, Imm<19> imm19, Vec Vt) { bool TranslatorVisitor::LDRSW_lit(Imm<19> imm19, Reg Rt) { const s64 offset = concatenate(imm19, Imm<2>{0}).SignExtend(); const u64 address = ir.PC() + offset; - const auto data = Mem(ir.Imm64(address), 4, AccType::NORMAL); + const auto data = Mem(ir.Imm64(address), 4, IR::AccType::NORMAL); X(64, Rt, ir.SignExtendWordToLong(data)); return true; diff --git a/src/frontend/A64/translate/impl/load_store_multiple_structures.cpp b/src/frontend/A64/translate/impl/load_store_multiple_structures.cpp index fa77cf32..74cf36b2 100644 --- a/src/frontend/A64/translate/impl/load_store_multiple_structures.cpp +++ b/src/frontend/A64/translate/impl/load_store_multiple_structures.cpp @@ -10,7 +10,7 @@ namespace Dynarmic::A64 { -static bool SharedDecodeAndOperation(TranslatorVisitor& v, bool wback, MemOp memop, bool Q, std::optional Rm, Imm<4> opcode, Imm<2> size, Reg Rn, Vec Vt) { +static bool SharedDecodeAndOperation(TranslatorVisitor& v, bool wback, IR::MemOp memop, bool Q, std::optional Rm, Imm<4> opcode, Imm<2> size, Reg Rn, Vec Vt) { const size_t datasize = Q ? 128 : 64; const size_t esize = 8 << size.ZeroExtend(); const size_t elements = datasize / esize; @@ -67,12 +67,12 @@ static bool SharedDecodeAndOperation(TranslatorVisitor& v, bool wback, MemOp mem if (selem == 1) { for (size_t r = 0; r < rpt; r++) { const Vec tt = static_cast((VecNumber(Vt) + r) % 32); - if (memop == MemOp::LOAD) { - const IR::UAnyU128 vec = v.Mem(v.ir.Add(address, offs), ebytes * elements, AccType::VEC); + if (memop == IR::MemOp::LOAD) { + const IR::UAnyU128 vec = v.Mem(v.ir.Add(address, offs), ebytes * elements, IR::AccType::VEC); v.V_scalar(datasize, tt, vec); } else { const IR::UAnyU128 vec = v.V_scalar(datasize, tt); - v.Mem(v.ir.Add(address, offs), ebytes * elements, AccType::VEC, vec); + v.Mem(v.ir.Add(address, offs), ebytes * elements, IR::AccType::VEC, vec); } offs = v.ir.Add(offs, v.ir.Imm64(ebytes * elements)); } @@ -80,13 +80,13 @@ static bool SharedDecodeAndOperation(TranslatorVisitor& v, bool wback, MemOp mem for (size_t e = 0; e < elements; e++) { for (size_t s = 0; s < selem; s++) { const Vec tt = static_cast((VecNumber(Vt) + s) % 32); - if (memop == MemOp::LOAD) { - const IR::UAny elem = v.Mem(v.ir.Add(address, offs), ebytes, AccType::VEC); + if (memop == IR::MemOp::LOAD) { + const IR::UAny elem = v.Mem(v.ir.Add(address, offs), ebytes, IR::AccType::VEC); const IR::U128 vec = v.ir.VectorSetElement(esize, v.V(datasize, tt), e, elem); v.V(datasize, tt, vec); } else { const IR::UAny elem = v.ir.VectorGetElement(esize, v.V(datasize, tt), e); - v.Mem(v.ir.Add(address, offs), ebytes, AccType::VEC, elem); + v.Mem(v.ir.Add(address, offs), ebytes, IR::AccType::VEC, elem); } offs = v.ir.Add(offs, v.ir.Imm64(ebytes)); } @@ -110,25 +110,25 @@ static bool SharedDecodeAndOperation(TranslatorVisitor& v, bool wback, MemOp mem bool TranslatorVisitor::STx_mult_1(bool Q, Imm<4> opcode, Imm<2> size, Reg Rn, Vec Vt) { const bool wback = false; - const MemOp memop = MemOp::STORE; + const auto memop = IR::MemOp::STORE; return SharedDecodeAndOperation(*this, wback, memop, Q, {}, opcode, size, Rn, Vt); } bool TranslatorVisitor::STx_mult_2(bool Q, Reg Rm, Imm<4> opcode, Imm<2> size, Reg Rn, Vec Vt) { const bool wback = true; - const MemOp memop = MemOp::STORE; + const auto memop = IR::MemOp::STORE; return SharedDecodeAndOperation(*this, wback, memop, Q, Rm, opcode, size, Rn, Vt); } bool TranslatorVisitor::LDx_mult_1(bool Q, Imm<4> opcode, Imm<2> size, Reg Rn, Vec Vt) { const bool wback = false; - const MemOp memop = MemOp::LOAD; + const auto memop = IR::MemOp::LOAD; return SharedDecodeAndOperation(*this, wback, memop, Q, {}, opcode, size, Rn, Vt); } bool TranslatorVisitor::LDx_mult_2(bool Q, Reg Rm, Imm<4> opcode, Imm<2> size, Reg Rn, Vec Vt) { const bool wback = true; - const MemOp memop = MemOp::LOAD; + const auto memop = IR::MemOp::LOAD; return SharedDecodeAndOperation(*this, wback, memop, Q, Rm, opcode, size, Rn, Vt); } diff --git a/src/frontend/A64/translate/impl/load_store_register_immediate.cpp b/src/frontend/A64/translate/impl/load_store_register_immediate.cpp index 1f7a84a9..f7784aaf 100644 --- a/src/frontend/A64/translate/impl/load_store_register_immediate.cpp +++ b/src/frontend/A64/translate/impl/load_store_register_immediate.cpp @@ -9,28 +9,28 @@ namespace Dynarmic::A64 { static bool LoadStoreRegisterImmediate(TranslatorVisitor& v, bool wback, bool postindex, size_t scale, u64 offset, Imm<2> size, Imm<2> opc, Reg Rn, Reg Rt) { - MemOp memop; + IR::MemOp memop; bool signed_ = false; size_t regsize = 0; if (opc.Bit<1>() == 0) { - memop = opc.Bit<0>() ? MemOp::LOAD : MemOp::STORE; + memop = opc.Bit<0>() ? IR::MemOp::LOAD : IR::MemOp::STORE; regsize = size == 0b11 ? 64 : 32; signed_ = false; } else if (size == 0b11) { - memop = MemOp::PREFETCH; + memop = IR::MemOp::PREFETCH; ASSERT(!opc.Bit<0>()); } else { - memop = MemOp::LOAD; + memop = IR::MemOp::LOAD; ASSERT(!(size == 0b10 && opc.Bit<0>() == 1)); regsize = opc.Bit<0>() ? 32 : 64; signed_ = true; } - if (memop == MemOp::LOAD && wback && Rn == Rt && Rn != Reg::R31) { + if (memop == IR::MemOp::LOAD && wback && Rn == Rt && Rn != Reg::R31) { return v.UnpredictableInstruction(); } - if (memop == MemOp::STORE && wback && Rn == Rt && Rn != Reg::R31) { + if (memop == IR::MemOp::STORE && wback && Rn == Rt && Rn != Reg::R31) { return v.UnpredictableInstruction(); } @@ -42,13 +42,13 @@ static bool LoadStoreRegisterImmediate(TranslatorVisitor& v, bool wback, bool po const size_t datasize = 8 << scale; switch (memop) { - case MemOp::STORE: { + case IR::MemOp::STORE: { const auto data = v.X(datasize, Rt); - v.Mem(address, datasize / 8, AccType::NORMAL, data); + v.Mem(address, datasize / 8, IR::AccType::NORMAL, data); break; } - case MemOp::LOAD: { - const auto data = v.Mem(address, datasize / 8, AccType::NORMAL); + case IR::MemOp::LOAD: { + const auto data = v.Mem(address, datasize / 8, IR::AccType::NORMAL); if (signed_) { v.X(regsize, Rt, v.SignExtend(data, regsize)); } else { @@ -56,7 +56,7 @@ static bool LoadStoreRegisterImmediate(TranslatorVisitor& v, bool wback, bool po } break; } - case MemOp::PREFETCH: + case IR::MemOp::PREFETCH: // Prefetch(address, Rt) break; } @@ -115,8 +115,8 @@ bool TranslatorVisitor::PRFM_unscaled_imm([[maybe_unused]] Imm<9> imm9, [[maybe_ return true; } -static bool LoadStoreSIMD(TranslatorVisitor& v, bool wback, bool postindex, size_t scale, u64 offset, MemOp memop, Reg Rn, Vec Vt) { - const AccType acctype = AccType::VEC; +static bool LoadStoreSIMD(TranslatorVisitor& v, bool wback, bool postindex, size_t scale, u64 offset, IR::MemOp memop, Reg Rn, Vec Vt) { + const auto acctype = IR::AccType::VEC; const size_t datasize = 8 << scale; IR::U64 address; @@ -132,7 +132,7 @@ static bool LoadStoreSIMD(TranslatorVisitor& v, bool wback, bool postindex, size } switch (memop) { - case MemOp::STORE: + case IR::MemOp::STORE: if (datasize == 128) { const IR::U128 data = v.V(128, Vt); v.Mem(address, 16, acctype, data); @@ -141,7 +141,7 @@ static bool LoadStoreSIMD(TranslatorVisitor& v, bool wback, bool postindex, size v.Mem(address, datasize / 8, acctype, data); } break; - case MemOp::LOAD: + case IR::MemOp::LOAD: if (datasize == 128) { const IR::U128 data = v.Mem(address, 16, acctype); v.V(128, Vt, data); @@ -179,7 +179,7 @@ bool TranslatorVisitor::STR_imm_fpsimd_1(Imm<2> size, Imm<1> opc_1, Imm<9> imm9, const bool postindex = !not_postindex; const u64 offset = imm9.SignExtend(); - return LoadStoreSIMD(*this, wback, postindex, scale, offset, MemOp::STORE, Rn, Vt); + return LoadStoreSIMD(*this, wback, postindex, scale, offset, IR::MemOp::STORE, Rn, Vt); } bool TranslatorVisitor::STR_imm_fpsimd_2(Imm<2> size, Imm<1> opc_1, Imm<12> imm12, Reg Rn, Vec Vt) { @@ -192,7 +192,7 @@ bool TranslatorVisitor::STR_imm_fpsimd_2(Imm<2> size, Imm<1> opc_1, Imm<12> imm1 const bool postindex = false; const u64 offset = imm12.ZeroExtend() << scale; - return LoadStoreSIMD(*this, wback, postindex, scale, offset, MemOp::STORE, Rn, Vt); + return LoadStoreSIMD(*this, wback, postindex, scale, offset, IR::MemOp::STORE, Rn, Vt); } bool TranslatorVisitor::LDR_imm_fpsimd_1(Imm<2> size, Imm<1> opc_1, Imm<9> imm9, bool not_postindex, Reg Rn, Vec Vt) { @@ -205,7 +205,7 @@ bool TranslatorVisitor::LDR_imm_fpsimd_1(Imm<2> size, Imm<1> opc_1, Imm<9> imm9, const bool postindex = !not_postindex; const u64 offset = imm9.SignExtend(); - return LoadStoreSIMD(*this, wback, postindex, scale, offset, MemOp::LOAD, Rn, Vt); + return LoadStoreSIMD(*this, wback, postindex, scale, offset, IR::MemOp::LOAD, Rn, Vt); } bool TranslatorVisitor::LDR_imm_fpsimd_2(Imm<2> size, Imm<1> opc_1, Imm<12> imm12, Reg Rn, Vec Vt) { @@ -218,7 +218,7 @@ bool TranslatorVisitor::LDR_imm_fpsimd_2(Imm<2> size, Imm<1> opc_1, Imm<12> imm1 const bool postindex = false; const u64 offset = imm12.ZeroExtend() << scale; - return LoadStoreSIMD(*this, wback, postindex, scale, offset, MemOp::LOAD, Rn, Vt); + return LoadStoreSIMD(*this, wback, postindex, scale, offset, IR::MemOp::LOAD, Rn, Vt); } bool TranslatorVisitor::STUR_fpsimd(Imm<2> size, Imm<1> opc_1, Imm<9> imm9, Reg Rn, Vec Vt) { @@ -231,7 +231,7 @@ bool TranslatorVisitor::STUR_fpsimd(Imm<2> size, Imm<1> opc_1, Imm<9> imm9, Reg const bool postindex = false; const u64 offset = imm9.SignExtend(); - return LoadStoreSIMD(*this, wback, postindex, scale, offset, MemOp::STORE, Rn, Vt); + return LoadStoreSIMD(*this, wback, postindex, scale, offset, IR::MemOp::STORE, Rn, Vt); } bool TranslatorVisitor::LDUR_fpsimd(Imm<2> size, Imm<1> opc_1, Imm<9> imm9, Reg Rn, Vec Vt) { @@ -244,7 +244,7 @@ bool TranslatorVisitor::LDUR_fpsimd(Imm<2> size, Imm<1> opc_1, Imm<9> imm9, Reg const bool postindex = false; const u64 offset = imm9.SignExtend(); - return LoadStoreSIMD(*this, wback, postindex, scale, offset, MemOp::LOAD, Rn, Vt); + return LoadStoreSIMD(*this, wback, postindex, scale, offset, IR::MemOp::LOAD, Rn, Vt); } } // namespace Dynarmic::A64 diff --git a/src/frontend/A64/translate/impl/load_store_register_pair.cpp b/src/frontend/A64/translate/impl/load_store_register_pair.cpp index f270ec2a..b3d36706 100644 --- a/src/frontend/A64/translate/impl/load_store_register_pair.cpp +++ b/src/frontend/A64/translate/impl/load_store_register_pair.cpp @@ -13,14 +13,14 @@ bool TranslatorVisitor::STP_LDP_gen(Imm<2> opc, bool not_postindex, bool wback, return UnallocatedEncoding(); } - const MemOp memop = L == 1 ? MemOp::LOAD : MemOp::STORE; - if (memop == MemOp::LOAD && wback && (Rt == Rn || Rt2 == Rn) && Rn != Reg::R31) { + const auto memop = L == 1 ? IR::MemOp::LOAD : IR::MemOp::STORE; + if (memop == IR::MemOp::LOAD && wback && (Rt == Rn || Rt2 == Rn) && Rn != Reg::R31) { return UnpredictableInstruction(); } - if (memop == MemOp::STORE && wback && (Rt == Rn || Rt2 == Rn) && Rn != Reg::R31) { + if (memop == IR::MemOp::STORE && wback && (Rt == Rn || Rt2 == Rn) && Rn != Reg::R31) { return UnpredictableInstruction(); } - if (memop == MemOp::LOAD && Rt == Rt2) { + if (memop == IR::MemOp::LOAD && Rt == Rt2) { return UnpredictableInstruction(); } @@ -44,16 +44,16 @@ bool TranslatorVisitor::STP_LDP_gen(Imm<2> opc, bool not_postindex, bool wback, const size_t dbytes = datasize / 8; switch (memop) { - case MemOp::STORE: { + case IR::MemOp::STORE: { const IR::U32U64 data1 = X(datasize, Rt); const IR::U32U64 data2 = X(datasize, Rt2); - Mem(address, dbytes, AccType::NORMAL, data1); - Mem(ir.Add(address, ir.Imm64(dbytes)), dbytes, AccType::NORMAL, data2); + Mem(address, dbytes, IR::AccType::NORMAL, data1); + Mem(ir.Add(address, ir.Imm64(dbytes)), dbytes, IR::AccType::NORMAL, data2); break; } - case MemOp::LOAD: { - const IR::U32U64 data1 = Mem(address, dbytes, AccType::NORMAL); - const IR::U32U64 data2 = Mem(ir.Add(address, ir.Imm64(dbytes)), dbytes, AccType::NORMAL); + case IR::MemOp::LOAD: { + const IR::U32U64 data1 = Mem(address, dbytes, IR::AccType::NORMAL); + const IR::U32U64 data2 = Mem(ir.Add(address, ir.Imm64(dbytes)), dbytes, IR::AccType::NORMAL); if (signed_) { X(64, Rt, SignExtend(data1, 64)); X(64, Rt2, SignExtend(data2, 64)); @@ -63,7 +63,7 @@ bool TranslatorVisitor::STP_LDP_gen(Imm<2> opc, bool not_postindex, bool wback, } break; } - case MemOp::PREFETCH: + case IR::MemOp::PREFETCH: UNREACHABLE(); } @@ -87,8 +87,8 @@ bool TranslatorVisitor::STP_LDP_fpsimd(Imm<2> opc, bool not_postindex, bool wbac return UnallocatedEncoding(); } - const MemOp memop = L == 1 ? MemOp::LOAD : MemOp::STORE; - if (memop == MemOp::LOAD && Vt == Vt2) { + const auto memop = L == 1 ? IR::MemOp::LOAD : IR::MemOp::STORE; + if (memop == IR::MemOp::LOAD && Vt == Vt2) { return UnpredictableInstruction(); } @@ -111,20 +111,20 @@ bool TranslatorVisitor::STP_LDP_fpsimd(Imm<2> opc, bool not_postindex, bool wbac } switch (memop) { - case MemOp::STORE: { + case IR::MemOp::STORE: { IR::UAnyU128 data1 = V(datasize, Vt); IR::UAnyU128 data2 = V(datasize, Vt2); if (datasize != 128) { data1 = ir.VectorGetElement(datasize, data1, 0); data2 = ir.VectorGetElement(datasize, data2, 0); } - Mem(address, dbytes, AccType::VEC, data1); - Mem(ir.Add(address, ir.Imm64(dbytes)), dbytes, AccType::VEC, data2); + Mem(address, dbytes, IR::AccType::VEC, data1); + Mem(ir.Add(address, ir.Imm64(dbytes)), dbytes, IR::AccType::VEC, data2); break; } - case MemOp::LOAD: { - IR::UAnyU128 data1 = Mem(address, dbytes, AccType::VEC); - IR::UAnyU128 data2 = Mem(ir.Add(address, ir.Imm64(dbytes)), dbytes, AccType::VEC); + case IR::MemOp::LOAD: { + IR::UAnyU128 data1 = Mem(address, dbytes, IR::AccType::VEC); + IR::UAnyU128 data2 = Mem(ir.Add(address, ir.Imm64(dbytes)), dbytes, IR::AccType::VEC); if (datasize != 128) { data1 = ir.ZeroExtendToQuad(data1); data2 = ir.ZeroExtendToQuad(data2); @@ -133,7 +133,7 @@ bool TranslatorVisitor::STP_LDP_fpsimd(Imm<2> opc, bool not_postindex, bool wbac V(datasize, Vt2, data2); break; } - case MemOp::PREFETCH: + case IR::MemOp::PREFETCH: UNREACHABLE(); } diff --git a/src/frontend/A64/translate/impl/load_store_register_register_offset.cpp b/src/frontend/A64/translate/impl/load_store_register_register_offset.cpp index 6eed91e4..1d8ee829 100644 --- a/src/frontend/A64/translate/impl/load_store_register_register_offset.cpp +++ b/src/frontend/A64/translate/impl/load_store_register_register_offset.cpp @@ -11,22 +11,22 @@ namespace Dynarmic::A64 { static bool RegSharedDecodeAndOperation(TranslatorVisitor& v, size_t scale, u8 shift, Imm<2> size, Imm<1> opc_1, Imm<1> opc_0, Reg Rm, Imm<3> option, Reg Rn, Reg Rt) { // Shared Decode - const AccType acctype = AccType::NORMAL; - MemOp memop; + const auto acctype = IR::AccType::NORMAL; + IR::MemOp memop; size_t regsize = 64; bool signed_ = false; if (opc_1 == 0) { - memop = opc_0 == 1 ? MemOp::LOAD : MemOp::STORE; + memop = opc_0 == 1 ? IR::MemOp::LOAD : IR::MemOp::STORE; regsize = size == 0b11 ? 64 : 32; signed_ = false; } else if (size == 0b11) { - memop = MemOp::PREFETCH; + memop = IR::MemOp::PREFETCH; if (opc_0 == 1) { return v.UnallocatedEncoding(); } } else { - memop = MemOp::LOAD; + memop = IR::MemOp::LOAD; if (size == 0b10 && opc_0 == 1) { return v.UnallocatedEncoding(); } @@ -50,12 +50,12 @@ static bool RegSharedDecodeAndOperation(TranslatorVisitor& v, size_t scale, u8 s address = v.ir.Add(address, offset); switch (memop) { - case MemOp::STORE: { + case IR::MemOp::STORE: { const IR::UAny data = v.X(datasize, Rt); v.Mem(address, datasize / 8, acctype, data); break; } - case MemOp::LOAD: { + case IR::MemOp::LOAD: { const IR::UAny data = v.Mem(address, datasize / 8, acctype); if (signed_) { v.X(regsize, Rt, v.SignExtend(data, regsize)); @@ -64,7 +64,7 @@ static bool RegSharedDecodeAndOperation(TranslatorVisitor& v, size_t scale, u8 s } break; } - case MemOp::PREFETCH: + case IR::MemOp::PREFETCH: // TODO: Prefetch break; default: @@ -97,8 +97,8 @@ bool TranslatorVisitor::LDRx_reg(Imm<2> size, Imm<1> opc_1, Reg Rm, Imm<3> optio static bool VecSharedDecodeAndOperation(TranslatorVisitor& v, size_t scale, u8 shift, Imm<1> opc_0, Reg Rm, Imm<3> option, Reg Rn, Vec Vt) { // Shared Decode - const AccType acctype = AccType::VEC; - const MemOp memop = opc_0 == 1 ? MemOp::LOAD : MemOp::STORE; + const auto acctype = IR::AccType::VEC; + const auto memop = opc_0 == 1 ? IR::MemOp::LOAD : IR::MemOp::STORE; const size_t datasize = 8 << scale; // Operation @@ -115,12 +115,12 @@ static bool VecSharedDecodeAndOperation(TranslatorVisitor& v, size_t scale, u8 s address = v.ir.Add(address, offset); switch (memop) { - case MemOp::STORE: { + case IR::MemOp::STORE: { const IR::UAnyU128 data = v.V_scalar(datasize, Vt); v.Mem(address, datasize / 8, acctype, data); break; } - case MemOp::LOAD: { + case IR::MemOp::LOAD: { const IR::UAnyU128 data = v.Mem(address, datasize / 8, acctype); v.V_scalar(datasize, Vt, data); break; diff --git a/src/frontend/A64/translate/impl/load_store_register_unprivileged.cpp b/src/frontend/A64/translate/impl/load_store_register_unprivileged.cpp index 7b16ddd8..e379c4e8 100644 --- a/src/frontend/A64/translate/impl/load_store_register_unprivileged.cpp +++ b/src/frontend/A64/translate/impl/load_store_register_unprivileged.cpp @@ -11,7 +11,7 @@ namespace Dynarmic::A64 { static bool StoreRegister(TranslatorVisitor& v, const size_t datasize, const Imm<9> imm9, const Reg Rn, const Reg Rt) { const u64 offset = imm9.SignExtend(); - const AccType acctype = AccType::UNPRIV; + const auto acctype = IR::AccType::UNPRIV; IR::U64 address; if (Rn == Reg::SP) { @@ -30,7 +30,7 @@ static bool StoreRegister(TranslatorVisitor& v, const size_t datasize, static bool LoadRegister(TranslatorVisitor& v, const size_t datasize, const Imm<9> imm9, const Reg Rn, const Reg Rt) { const u64 offset = imm9.SignExtend(); - const AccType acctype = AccType::UNPRIV; + const auto acctype = IR::AccType::UNPRIV; IR::U64 address; if (Rn == Reg::SP) { @@ -51,19 +51,19 @@ static bool LoadRegister(TranslatorVisitor& v, const size_t datasize, static bool LoadRegisterSigned(TranslatorVisitor& v, const size_t datasize, const Imm<2> opc, const Imm<9> imm9, const Reg Rn, const Reg Rt) { const u64 offset = imm9.SignExtend(); - const AccType acctype = AccType::UNPRIV; + const auto acctype = IR::AccType::UNPRIV; - MemOp memop; + IR::MemOp memop; bool is_signed; size_t regsize; if (opc.Bit<1>() == 0) { // store or zero-extending load - memop = opc.Bit<0>() ? MemOp::LOAD : MemOp::STORE; + memop = opc.Bit<0>() ? IR::MemOp::LOAD : IR::MemOp::STORE; regsize = 32; is_signed = false; } else { // sign-extending load - memop = MemOp::LOAD; + memop = IR::MemOp::LOAD; regsize = opc.Bit<0>() ? 32 : 64; is_signed = true; } @@ -78,10 +78,10 @@ static bool LoadRegisterSigned(TranslatorVisitor& v, const size_t datasize, address = v.ir.Add(address, v.ir.Imm64(offset)); switch (memop) { - case MemOp::STORE: + case IR::MemOp::STORE: v.Mem(address, datasize / 8, acctype, v.X(datasize, Rt)); break; - case MemOp::LOAD: { + case IR::MemOp::LOAD: { const IR::UAny data = v.Mem(address, datasize / 8, acctype); if (is_signed) { v.X(regsize, Rt, v.SignExtend(data, regsize)); @@ -90,7 +90,7 @@ static bool LoadRegisterSigned(TranslatorVisitor& v, const size_t datasize, } break; } - case MemOp::PREFETCH: + case IR::MemOp::PREFETCH: // Prefetch(address, Rt); break; } @@ -135,7 +135,7 @@ bool TranslatorVisitor::LDTRSH(Imm<2> opc, Imm<9> imm9, Reg Rn, Reg Rt) { bool TranslatorVisitor::LDTRSW(Imm<9> imm9, Reg Rn, Reg Rt) { const u64 offset = imm9.SignExtend(); - const AccType acctype = AccType::UNPRIV; + const auto acctype = IR::AccType::UNPRIV; IR::U64 address; if (Rn == Reg::SP) { diff --git a/src/frontend/A64/translate/impl/load_store_single_structure.cpp b/src/frontend/A64/translate/impl/load_store_single_structure.cpp index 5b163d6f..a8228536 100644 --- a/src/frontend/A64/translate/impl/load_store_single_structure.cpp +++ b/src/frontend/A64/translate/impl/load_store_single_structure.cpp @@ -10,7 +10,7 @@ namespace Dynarmic::A64 { -static bool SharedDecodeAndOperation(TranslatorVisitor& v, bool wback, MemOp memop, +static bool SharedDecodeAndOperation(TranslatorVisitor& v, bool wback, IR::MemOp memop, bool Q, bool S, bool R, bool replicate, std::optional Rm, Imm<3> opcode, Imm<2> size, Reg Rn, Vec Vt) { const size_t selem = (opcode.Bit<0>() << 1 | u32{R}) + 1; @@ -42,7 +42,7 @@ static bool SharedDecodeAndOperation(TranslatorVisitor& v, bool wback, MemOp mem } break; case 3: - if (memop == MemOp::STORE || S) { + if (memop == IR::MemOp::STORE || S) { return v.UnallocatedEncoding(); } scale = size.ZeroExtend(); @@ -65,7 +65,7 @@ static bool SharedDecodeAndOperation(TranslatorVisitor& v, bool wback, MemOp mem if (replicate) { for (size_t s = 0; s < selem; s++) { const Vec tt = static_cast((VecNumber(Vt) + s) % 32); - const IR::UAnyU128 element = v.Mem(v.ir.Add(address, offs), ebytes, AccType::VEC); + const IR::UAnyU128 element = v.Mem(v.ir.Add(address, offs), ebytes, IR::AccType::VEC); const IR::U128 broadcasted_element = v.ir.VectorBroadcast(esize, element); v.V(datasize, tt, broadcasted_element); @@ -77,13 +77,13 @@ static bool SharedDecodeAndOperation(TranslatorVisitor& v, bool wback, MemOp mem const Vec tt = static_cast((VecNumber(Vt) + s) % 32); const IR::U128 rval = v.V(128, tt); - if (memop == MemOp::LOAD) { - const IR::UAny elem = v.Mem(v.ir.Add(address, offs), ebytes, AccType::VEC); + if (memop == IR::MemOp::LOAD) { + const IR::UAny elem = v.Mem(v.ir.Add(address, offs), ebytes, IR::AccType::VEC); const IR::U128 vec = v.ir.VectorSetElement(esize, rval, index, elem); v.V(128, tt, vec); } else { const IR::UAny elem = v.ir.VectorGetElement(esize, rval, index); - v.Mem(v.ir.Add(address, offs), ebytes, AccType::VEC, elem); + v.Mem(v.ir.Add(address, offs), ebytes, IR::AccType::VEC, elem); } offs = v.ir.Add(offs, v.ir.Imm64(ebytes)); } @@ -105,114 +105,122 @@ static bool SharedDecodeAndOperation(TranslatorVisitor& v, bool wback, MemOp mem } bool TranslatorVisitor::LD1_sngl_1(bool Q, Imm<2> upper_opcode, bool S, Imm<2> size, Reg Rn, Vec Vt) { - return SharedDecodeAndOperation(*this, false, MemOp::LOAD, Q, S, false, false, {}, + return SharedDecodeAndOperation(*this, false, IR::MemOp::LOAD, Q, S, false, false, {}, Imm<3>{upper_opcode.ZeroExtend() << 1}, size, Rn, Vt); } bool TranslatorVisitor::LD1_sngl_2(bool Q, Reg Rm, Imm<2> upper_opcode, bool S, Imm<2> size, Reg Rn, Vec Vt) { - return SharedDecodeAndOperation(*this, true, MemOp::LOAD, Q, S, false, false, Rm, + return SharedDecodeAndOperation(*this, true, IR::MemOp::LOAD, Q, S, false, false, Rm, Imm<3>{upper_opcode.ZeroExtend() << 1}, size, Rn, Vt); } bool TranslatorVisitor::LD1R_1(bool Q, Imm<2> size, Reg Rn, Vec Vt) { - return SharedDecodeAndOperation(*this, false, MemOp::LOAD, Q, false, false, true, {}, Imm<3>{0b110}, size, Rn, Vt); + return SharedDecodeAndOperation(*this, false, IR::MemOp::LOAD, Q, false, false, true, + {}, Imm<3>{0b110}, size, Rn, Vt); } bool TranslatorVisitor::LD1R_2(bool Q, Reg Rm, Imm<2> size, Reg Rn, Vec Vt) { - return SharedDecodeAndOperation(*this, true, MemOp::LOAD, Q, false, false, true, Rm, Imm<3>{0b110}, size, Rn, Vt); + return SharedDecodeAndOperation(*this, true, IR::MemOp::LOAD, Q, false, false, true, + Rm, Imm<3>{0b110}, size, Rn, Vt); } bool TranslatorVisitor::LD2_sngl_1(bool Q, Imm<2> upper_opcode, bool S, Imm<2> size, Reg Rn, Vec Vt) { - return SharedDecodeAndOperation(*this, false, MemOp::LOAD, Q, S, true, false, {}, + return SharedDecodeAndOperation(*this, false, IR::MemOp::LOAD, Q, S, true, false, {}, Imm<3>{upper_opcode.ZeroExtend() << 1}, size, Rn, Vt); } bool TranslatorVisitor::LD2_sngl_2(bool Q, Reg Rm, Imm<2> upper_opcode, bool S, Imm<2> size, Reg Rn, Vec Vt) { - return SharedDecodeAndOperation(*this, true, MemOp::LOAD, Q, S, true, false, Rm, + return SharedDecodeAndOperation(*this, true, IR::MemOp::LOAD, Q, S, true, false, Rm, Imm<3>{upper_opcode.ZeroExtend() << 1}, size, Rn, Vt); } bool TranslatorVisitor::LD2R_1(bool Q, Imm<2> size, Reg Rn, Vec Vt) { - return SharedDecodeAndOperation(*this, false, MemOp::LOAD, Q, false, true, true, {}, Imm<3>{0b110}, size, Rn, Vt); + return SharedDecodeAndOperation(*this, false, IR::MemOp::LOAD, Q, false, true, true, + {}, Imm<3>{0b110}, size, Rn, Vt); } bool TranslatorVisitor::LD2R_2(bool Q, Reg Rm, Imm<2> size, Reg Rn, Vec Vt) { - return SharedDecodeAndOperation(*this, true, MemOp::LOAD, Q, false, true, true, Rm, Imm<3>{0b110}, size, Rn, Vt); + return SharedDecodeAndOperation(*this, true, IR::MemOp::LOAD, Q, false, true, true, + Rm, Imm<3>{0b110}, size, Rn, Vt); } bool TranslatorVisitor::LD3_sngl_1(bool Q, Imm<2> upper_opcode, bool S, Imm<2> size, Reg Rn, Vec Vt) { - return SharedDecodeAndOperation(*this, false, MemOp::LOAD, Q, S, false, false, {}, + return SharedDecodeAndOperation(*this, false, IR::MemOp::LOAD, Q, S, false, false, {}, Imm<3>{(upper_opcode.ZeroExtend() << 1) | 1}, size, Rn, Vt); } bool TranslatorVisitor::LD3_sngl_2(bool Q, Reg Rm, Imm<2> upper_opcode, bool S, Imm<2> size, Reg Rn, Vec Vt) { - return SharedDecodeAndOperation(*this, true, MemOp::LOAD, Q, S, false, false, Rm, + return SharedDecodeAndOperation(*this, true, IR::MemOp::LOAD, Q, S, false, false, Rm, Imm<3>{(upper_opcode.ZeroExtend() << 1) | 1}, size, Rn, Vt); } bool TranslatorVisitor::LD3R_1(bool Q, Imm<2> size, Reg Rn, Vec Vt) { - return SharedDecodeAndOperation(*this, false, MemOp::LOAD, Q, false, false, true, {}, Imm<3>{0b111}, size, Rn, Vt); + return SharedDecodeAndOperation(*this, false, IR::MemOp::LOAD, Q, false, false, true, + {}, Imm<3>{0b111}, size, Rn, Vt); } bool TranslatorVisitor::LD3R_2(bool Q, Reg Rm, Imm<2> size, Reg Rn, Vec Vt) { - return SharedDecodeAndOperation(*this, true, MemOp::LOAD, Q, false, false, true, Rm, Imm<3>{0b111}, size, Rn, Vt); + return SharedDecodeAndOperation(*this, true, IR::MemOp::LOAD, Q, false, false, true, + Rm, Imm<3>{0b111}, size, Rn, Vt); } bool TranslatorVisitor::LD4_sngl_1(bool Q, Imm<2> upper_opcode, bool S, Imm<2> size, Reg Rn, Vec Vt) { - return SharedDecodeAndOperation(*this, false, MemOp::LOAD, Q, S, true, false, {}, + return SharedDecodeAndOperation(*this, false, IR::MemOp::LOAD, Q, S, true, false, {}, Imm<3>{(upper_opcode.ZeroExtend() << 1) | 1}, size, Rn, Vt); } bool TranslatorVisitor::LD4_sngl_2(bool Q, Reg Rm, Imm<2> upper_opcode, bool S, Imm<2> size, Reg Rn, Vec Vt) { - return SharedDecodeAndOperation(*this, true, MemOp::LOAD, Q, S, true, false, Rm, + return SharedDecodeAndOperation(*this, true, IR::MemOp::LOAD, Q, S, true, false, Rm, Imm<3>{(upper_opcode.ZeroExtend() << 1) | 1}, size, Rn, Vt); } bool TranslatorVisitor::LD4R_1(bool Q, Imm<2> size, Reg Rn, Vec Vt) { - return SharedDecodeAndOperation(*this, false, MemOp::LOAD, Q, false, true, true, {}, Imm<3>{0b111}, size, Rn, Vt); + return SharedDecodeAndOperation(*this, false, IR::MemOp::LOAD, Q, false, true, true, + {}, Imm<3>{0b111}, size, Rn, Vt); } bool TranslatorVisitor::LD4R_2(bool Q, Reg Rm, Imm<2> size, Reg Rn, Vec Vt) { - return SharedDecodeAndOperation(*this, true, MemOp::LOAD, Q, false, true, true, Rm, Imm<3>{0b111}, size, Rn, Vt); + return SharedDecodeAndOperation(*this, true, IR::MemOp::LOAD, Q, false, true, true, + Rm, Imm<3>{0b111}, size, Rn, Vt); } bool TranslatorVisitor::ST1_sngl_1(bool Q, Imm<2> upper_opcode, bool S, Imm<2> size, Reg Rn, Vec Vt) { - return SharedDecodeAndOperation(*this, false, MemOp::STORE, Q, S, false, false, {}, + return SharedDecodeAndOperation(*this, false, IR::MemOp::STORE, Q, S, false, false, {}, Imm<3>{upper_opcode.ZeroExtend() << 1}, size, Rn, Vt); } bool TranslatorVisitor::ST1_sngl_2(bool Q, Reg Rm, Imm<2> upper_opcode, bool S, Imm<2> size, Reg Rn, Vec Vt) { - return SharedDecodeAndOperation(*this, true, MemOp::STORE, Q, S, false, false, Rm, + return SharedDecodeAndOperation(*this, true, IR::MemOp::STORE, Q, S, false, false, Rm, Imm<3>{upper_opcode.ZeroExtend() << 1}, size, Rn, Vt); } bool TranslatorVisitor::ST2_sngl_1(bool Q, Imm<2> upper_opcode, bool S, Imm<2> size, Reg Rn, Vec Vt) { - return SharedDecodeAndOperation(*this, false, MemOp::STORE, Q, S, true, false, {}, + return SharedDecodeAndOperation(*this, false, IR::MemOp::STORE, Q, S, true, false, {}, Imm<3>{upper_opcode.ZeroExtend() << 1}, size, Rn, Vt); } bool TranslatorVisitor::ST2_sngl_2(bool Q, Reg Rm, Imm<2> upper_opcode, bool S, Imm<2> size, Reg Rn, Vec Vt) { - return SharedDecodeAndOperation(*this, true, MemOp::STORE, Q, S, true, false, Rm, + return SharedDecodeAndOperation(*this, true, IR::MemOp::STORE, Q, S, true, false, Rm, Imm<3>{upper_opcode.ZeroExtend() << 1}, size, Rn, Vt); } bool TranslatorVisitor::ST3_sngl_1(bool Q, Imm<2> upper_opcode, bool S, Imm<2> size, Reg Rn, Vec Vt) { - return SharedDecodeAndOperation(*this, false, MemOp::STORE, Q, S, false, false, {}, + return SharedDecodeAndOperation(*this, false, IR::MemOp::STORE, Q, S, false, false, {}, Imm<3>{(upper_opcode.ZeroExtend() << 1) | 1}, size, Rn, Vt); } bool TranslatorVisitor::ST3_sngl_2(bool Q, Reg Rm, Imm<2> upper_opcode, bool S, Imm<2> size, Reg Rn, Vec Vt) { - return SharedDecodeAndOperation(*this, true, MemOp::STORE, Q, S, false, false, Rm, + return SharedDecodeAndOperation(*this, true, IR::MemOp::STORE, Q, S, false, false, Rm, Imm<3>{(upper_opcode.ZeroExtend() << 1) | 1}, size, Rn, Vt); } bool TranslatorVisitor::ST4_sngl_1(bool Q, Imm<2> upper_opcode, bool S, Imm<2> size, Reg Rn, Vec Vt) { - return SharedDecodeAndOperation(*this, false, MemOp::STORE, Q, S, true, false, {}, + return SharedDecodeAndOperation(*this, false, IR::MemOp::STORE, Q, S, true, false, {}, Imm<3>{(upper_opcode.ZeroExtend() << 1) | 1}, size, Rn, Vt); } bool TranslatorVisitor::ST4_sngl_2(bool Q, Reg Rm, Imm<2> upper_opcode, bool S, Imm<2> size, Reg Rn, Vec Vt) { - return SharedDecodeAndOperation(*this, true, MemOp::STORE, Q, S, true, false, Rm, + return SharedDecodeAndOperation(*this, true, IR::MemOp::STORE, Q, S, true, false, Rm, Imm<3>{(upper_opcode.ZeroExtend() << 1) | 1}, size, Rn, Vt); } diff --git a/src/frontend/ir/ir_emitter.h b/src/frontend/ir/ir_emitter.h index 2e48aaa3..940a61e0 100644 --- a/src/frontend/ir/ir_emitter.h +++ b/src/frontend/ir/ir_emitter.h @@ -57,6 +57,16 @@ struct UpperAndLower { U128 lower; }; +enum class AccType { + NORMAL, VEC, STREAM, VECSTREAM, + ATOMIC, ORDERED, ORDEREDRW, LIMITEDORDERED, + UNPRIV, IFETCH, PTW, DC, IC, DCZVA, AT, +}; + +enum class MemOp { + LOAD, STORE, PREFETCH, +}; + /** * Convenience class to construct a basic block of the intermediate representation. * `block` is the resulting block.