A64/impl: Move AccType and MemOp enum into general IR emitter header
These will be used by both frontends in the future, so this performs the migratory changes separate from the changes that will make use of them.
This commit is contained in:
parent
ef3ca44e13
commit
af3614553b
11 changed files with 142 additions and 132 deletions
|
@ -275,7 +275,7 @@ void TranslatorVisitor::Vpart_scalar(size_t bitsize, Vec vec, size_t part, IR::U
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
IR::UAnyU128 TranslatorVisitor::Mem(IR::U64 address, size_t bytesize, AccType /*acctype*/) {
|
IR::UAnyU128 TranslatorVisitor::Mem(IR::U64 address, size_t bytesize, IR::AccType /*acc_type*/) {
|
||||||
switch (bytesize) {
|
switch (bytesize) {
|
||||||
case 1:
|
case 1:
|
||||||
return ir.ReadMemory8(address);
|
return ir.ReadMemory8(address);
|
||||||
|
@ -293,7 +293,7 @@ IR::UAnyU128 TranslatorVisitor::Mem(IR::U64 address, size_t bytesize, AccType /*
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void TranslatorVisitor::Mem(IR::U64 address, size_t bytesize, AccType /*acctype*/, IR::UAnyU128 value) {
|
void TranslatorVisitor::Mem(IR::U64 address, size_t bytesize, IR::AccType /*acc_type*/, IR::UAnyU128 value) {
|
||||||
switch (bytesize) {
|
switch (bytesize) {
|
||||||
case 1:
|
case 1:
|
||||||
ir.WriteMemory8(address, value);
|
ir.WriteMemory8(address, value);
|
||||||
|
@ -316,7 +316,7 @@ void TranslatorVisitor::Mem(IR::U64 address, size_t bytesize, AccType /*acctype*
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
IR::U32 TranslatorVisitor::ExclusiveMem(IR::U64 address, size_t bytesize, AccType /*acctype*/, IR::UAnyU128 value) {
|
IR::U32 TranslatorVisitor::ExclusiveMem(IR::U64 address, size_t bytesize, IR::AccType /*acc_type*/, IR::UAnyU128 value) {
|
||||||
switch (bytesize) {
|
switch (bytesize) {
|
||||||
case 1:
|
case 1:
|
||||||
return ir.ExclusiveWriteMemory8(address, value);
|
return ir.ExclusiveWriteMemory8(address, value);
|
||||||
|
|
|
@ -16,14 +16,6 @@
|
||||||
|
|
||||||
namespace Dynarmic::A64 {
|
namespace Dynarmic::A64 {
|
||||||
|
|
||||||
enum class AccType {
|
|
||||||
NORMAL, VEC, STREAM, VECSTREAM, ATOMIC, ORDERED, ORDEREDRW, LIMITEDORDERED, UNPRIV, IFETCH, PTW, DC, IC, DCZVA, AT,
|
|
||||||
};
|
|
||||||
|
|
||||||
enum class MemOp {
|
|
||||||
LOAD, STORE, PREFETCH,
|
|
||||||
};
|
|
||||||
|
|
||||||
struct TranslatorVisitor final {
|
struct TranslatorVisitor final {
|
||||||
using instruction_return_type = bool;
|
using instruction_return_type = bool;
|
||||||
|
|
||||||
|
@ -64,9 +56,9 @@ struct TranslatorVisitor final {
|
||||||
IR::UAny Vpart_scalar(size_t bitsize, Vec vec, size_t part);
|
IR::UAny Vpart_scalar(size_t bitsize, Vec vec, size_t part);
|
||||||
void Vpart_scalar(size_t bitsize, Vec vec, size_t part, IR::UAny value);
|
void Vpart_scalar(size_t bitsize, Vec vec, size_t part, IR::UAny value);
|
||||||
|
|
||||||
IR::UAnyU128 Mem(IR::U64 address, size_t size, AccType acctype);
|
IR::UAnyU128 Mem(IR::U64 address, size_t size, IR::AccType acctype);
|
||||||
void Mem(IR::U64 address, size_t size, AccType acctype, IR::UAnyU128 value);
|
void Mem(IR::U64 address, size_t size, IR::AccType acctype, IR::UAnyU128 value);
|
||||||
IR::U32 ExclusiveMem(IR::U64 address, size_t size, AccType acctype, IR::UAnyU128 value);
|
IR::U32 ExclusiveMem(IR::U64 address, size_t size, IR::AccType acctype, IR::UAnyU128 value);
|
||||||
|
|
||||||
IR::U32U64 SignExtend(IR::UAny value, size_t to_size);
|
IR::U32U64 SignExtend(IR::UAny value, size_t to_size);
|
||||||
IR::U32U64 ZeroExtend(IR::UAny value, size_t to_size);
|
IR::U32U64 ZeroExtend(IR::UAny value, size_t to_size);
|
||||||
|
|
|
@ -13,8 +13,8 @@ namespace Dynarmic::A64 {
|
||||||
static bool ExclusiveSharedDecodeAndOperation(TranslatorVisitor& v, bool pair, size_t size, bool L, bool o0, std::optional<Reg> Rs, std::optional<Reg> Rt2, Reg Rn, Reg Rt) {
|
static bool ExclusiveSharedDecodeAndOperation(TranslatorVisitor& v, bool pair, size_t size, bool L, bool o0, std::optional<Reg> Rs, std::optional<Reg> Rt2, Reg Rn, Reg Rt) {
|
||||||
// Shared Decode
|
// Shared Decode
|
||||||
|
|
||||||
const AccType acctype = o0 ? AccType::ORDERED : AccType::ATOMIC;
|
const auto acctype = o0 ? IR::AccType::ORDERED : IR::AccType::ATOMIC;
|
||||||
const MemOp memop = L ? MemOp::LOAD : MemOp::STORE;
|
const auto memop = L ? IR::MemOp::LOAD : IR::MemOp::STORE;
|
||||||
const size_t elsize = 8 << size;
|
const size_t elsize = 8 << size;
|
||||||
const size_t regsize = elsize == 64 ? 64 : 32;
|
const size_t regsize = elsize == 64 ? 64 : 32;
|
||||||
const size_t datasize = pair ? elsize * 2 : elsize;
|
const size_t datasize = pair ? elsize * 2 : elsize;
|
||||||
|
@ -23,14 +23,14 @@ static bool ExclusiveSharedDecodeAndOperation(TranslatorVisitor& v, bool pair, s
|
||||||
|
|
||||||
const size_t dbytes = datasize / 8;
|
const size_t dbytes = datasize / 8;
|
||||||
|
|
||||||
if (memop == MemOp::LOAD && pair && Rt == *Rt2) {
|
if (memop == IR::MemOp::LOAD && pair && Rt == *Rt2) {
|
||||||
return v.UnpredictableInstruction();
|
return v.UnpredictableInstruction();
|
||||||
} else if (memop == MemOp::STORE && (*Rs == Rt || (pair && *Rs == *Rt2))) {
|
} else if (memop == IR::MemOp::STORE && (*Rs == Rt || (pair && *Rs == *Rt2))) {
|
||||||
if (!v.options.define_unpredictable_behaviour) {
|
if (!v.options.define_unpredictable_behaviour) {
|
||||||
return v.UnpredictableInstruction();
|
return v.UnpredictableInstruction();
|
||||||
}
|
}
|
||||||
// UNPREDICTABLE: The Constraint_NONE case is executed.
|
// UNPREDICTABLE: The Constraint_NONE case is executed.
|
||||||
} else if (memop == MemOp::STORE && *Rs == Rn && Rn != Reg::R31) {
|
} else if (memop == IR::MemOp::STORE && *Rs == Rn && Rn != Reg::R31) {
|
||||||
return v.UnpredictableInstruction();
|
return v.UnpredictableInstruction();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -43,7 +43,7 @@ static bool ExclusiveSharedDecodeAndOperation(TranslatorVisitor& v, bool pair, s
|
||||||
}
|
}
|
||||||
|
|
||||||
switch (memop) {
|
switch (memop) {
|
||||||
case MemOp::STORE: {
|
case IR::MemOp::STORE: {
|
||||||
IR::UAnyU128 data;
|
IR::UAnyU128 data;
|
||||||
if (pair && elsize == 64) {
|
if (pair && elsize == 64) {
|
||||||
data = v.ir.Pack2x64To1x128(v.X(64, Rt), v.X(64, *Rt2));
|
data = v.ir.Pack2x64To1x128(v.X(64, Rt), v.X(64, *Rt2));
|
||||||
|
@ -56,7 +56,7 @@ static bool ExclusiveSharedDecodeAndOperation(TranslatorVisitor& v, bool pair, s
|
||||||
v.X(32, *Rs, status);
|
v.X(32, *Rs, status);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case MemOp::LOAD: {
|
case IR::MemOp::LOAD: {
|
||||||
v.ir.SetExclusive(address, dbytes);
|
v.ir.SetExclusive(address, dbytes);
|
||||||
const IR::UAnyU128 data = v.Mem(address, dbytes, acctype);
|
const IR::UAnyU128 data = v.Mem(address, dbytes, acctype);
|
||||||
if (pair && elsize == 64) {
|
if (pair && elsize == 64) {
|
||||||
|
@ -144,8 +144,8 @@ bool TranslatorVisitor::LDAXP(Imm<1> sz, Reg Rt2, Reg Rn, Reg Rt) {
|
||||||
static bool OrderedSharedDecodeAndOperation(TranslatorVisitor& v, size_t size, bool L, bool o0, Reg Rn, Reg Rt) {
|
static bool OrderedSharedDecodeAndOperation(TranslatorVisitor& v, size_t size, bool L, bool o0, Reg Rn, Reg Rt) {
|
||||||
// Shared Decode
|
// Shared Decode
|
||||||
|
|
||||||
const AccType acctype = !o0 ? AccType::LIMITEDORDERED : AccType::ORDERED;
|
const auto acctype = !o0 ? IR::AccType::LIMITEDORDERED : IR::AccType::ORDERED;
|
||||||
const MemOp memop = L ? MemOp::LOAD : MemOp::STORE;
|
const auto memop = L ? IR::MemOp::LOAD : IR::MemOp::STORE;
|
||||||
const size_t elsize = 8 << size;
|
const size_t elsize = 8 << size;
|
||||||
const size_t regsize = elsize == 64 ? 64 : 32;
|
const size_t regsize = elsize == 64 ? 64 : 32;
|
||||||
const size_t datasize = elsize;
|
const size_t datasize = elsize;
|
||||||
|
@ -163,12 +163,12 @@ static bool OrderedSharedDecodeAndOperation(TranslatorVisitor& v, size_t size, b
|
||||||
}
|
}
|
||||||
|
|
||||||
switch (memop) {
|
switch (memop) {
|
||||||
case MemOp::STORE: {
|
case IR::MemOp::STORE: {
|
||||||
const IR::UAny data = v.X(datasize, Rt);
|
const IR::UAny data = v.X(datasize, Rt);
|
||||||
v.Mem(address, dbytes, acctype, data);
|
v.Mem(address, dbytes, acctype, data);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case MemOp::LOAD: {
|
case IR::MemOp::LOAD: {
|
||||||
const IR::UAny data = v.Mem(address, dbytes, acctype);
|
const IR::UAny data = v.Mem(address, dbytes, acctype);
|
||||||
v.X(regsize, Rt, v.ZeroExtend(data, regsize));
|
v.X(regsize, Rt, v.ZeroExtend(data, regsize));
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -13,7 +13,7 @@ bool TranslatorVisitor::LDR_lit_gen(bool opc_0, Imm<19> imm19, Reg Rt) {
|
||||||
const s64 offset = concatenate(imm19, Imm<2>{0}).SignExtend<s64>();
|
const s64 offset = concatenate(imm19, Imm<2>{0}).SignExtend<s64>();
|
||||||
|
|
||||||
const u64 address = ir.PC() + offset;
|
const u64 address = ir.PC() + offset;
|
||||||
const auto data = Mem(ir.Imm64(address), size, AccType::NORMAL);
|
const auto data = Mem(ir.Imm64(address), size, IR::AccType::NORMAL);
|
||||||
|
|
||||||
X(8 * size, Rt, data);
|
X(8 * size, Rt, data);
|
||||||
return true;
|
return true;
|
||||||
|
@ -27,7 +27,7 @@ bool TranslatorVisitor::LDR_lit_fpsimd(Imm<2> opc, Imm<19> imm19, Vec Vt) {
|
||||||
const u64 size = 4 << opc.ZeroExtend();
|
const u64 size = 4 << opc.ZeroExtend();
|
||||||
const u64 offset = imm19.SignExtend<u64>() << 2;
|
const u64 offset = imm19.SignExtend<u64>() << 2;
|
||||||
const IR::U64 address = ir.Imm64(ir.PC() + offset);
|
const IR::U64 address = ir.Imm64(ir.PC() + offset);
|
||||||
const IR::UAnyU128 data = Mem(address, size, AccType::VEC);
|
const IR::UAnyU128 data = Mem(address, size, IR::AccType::VEC);
|
||||||
|
|
||||||
if (size == 16) {
|
if (size == 16) {
|
||||||
V(128, Vt, data);
|
V(128, Vt, data);
|
||||||
|
@ -40,7 +40,7 @@ bool TranslatorVisitor::LDR_lit_fpsimd(Imm<2> opc, Imm<19> imm19, Vec Vt) {
|
||||||
bool TranslatorVisitor::LDRSW_lit(Imm<19> imm19, Reg Rt) {
|
bool TranslatorVisitor::LDRSW_lit(Imm<19> imm19, Reg Rt) {
|
||||||
const s64 offset = concatenate(imm19, Imm<2>{0}).SignExtend<s64>();
|
const s64 offset = concatenate(imm19, Imm<2>{0}).SignExtend<s64>();
|
||||||
const u64 address = ir.PC() + offset;
|
const u64 address = ir.PC() + offset;
|
||||||
const auto data = Mem(ir.Imm64(address), 4, AccType::NORMAL);
|
const auto data = Mem(ir.Imm64(address), 4, IR::AccType::NORMAL);
|
||||||
|
|
||||||
X(64, Rt, ir.SignExtendWordToLong(data));
|
X(64, Rt, ir.SignExtendWordToLong(data));
|
||||||
return true;
|
return true;
|
||||||
|
|
|
@ -10,7 +10,7 @@
|
||||||
|
|
||||||
namespace Dynarmic::A64 {
|
namespace Dynarmic::A64 {
|
||||||
|
|
||||||
static bool SharedDecodeAndOperation(TranslatorVisitor& v, bool wback, MemOp memop, bool Q, std::optional<Reg> Rm, Imm<4> opcode, Imm<2> size, Reg Rn, Vec Vt) {
|
static bool SharedDecodeAndOperation(TranslatorVisitor& v, bool wback, IR::MemOp memop, bool Q, std::optional<Reg> Rm, Imm<4> opcode, Imm<2> size, Reg Rn, Vec Vt) {
|
||||||
const size_t datasize = Q ? 128 : 64;
|
const size_t datasize = Q ? 128 : 64;
|
||||||
const size_t esize = 8 << size.ZeroExtend<size_t>();
|
const size_t esize = 8 << size.ZeroExtend<size_t>();
|
||||||
const size_t elements = datasize / esize;
|
const size_t elements = datasize / esize;
|
||||||
|
@ -67,12 +67,12 @@ static bool SharedDecodeAndOperation(TranslatorVisitor& v, bool wback, MemOp mem
|
||||||
if (selem == 1) {
|
if (selem == 1) {
|
||||||
for (size_t r = 0; r < rpt; r++) {
|
for (size_t r = 0; r < rpt; r++) {
|
||||||
const Vec tt = static_cast<Vec>((VecNumber(Vt) + r) % 32);
|
const Vec tt = static_cast<Vec>((VecNumber(Vt) + r) % 32);
|
||||||
if (memop == MemOp::LOAD) {
|
if (memop == IR::MemOp::LOAD) {
|
||||||
const IR::UAnyU128 vec = v.Mem(v.ir.Add(address, offs), ebytes * elements, AccType::VEC);
|
const IR::UAnyU128 vec = v.Mem(v.ir.Add(address, offs), ebytes * elements, IR::AccType::VEC);
|
||||||
v.V_scalar(datasize, tt, vec);
|
v.V_scalar(datasize, tt, vec);
|
||||||
} else {
|
} else {
|
||||||
const IR::UAnyU128 vec = v.V_scalar(datasize, tt);
|
const IR::UAnyU128 vec = v.V_scalar(datasize, tt);
|
||||||
v.Mem(v.ir.Add(address, offs), ebytes * elements, AccType::VEC, vec);
|
v.Mem(v.ir.Add(address, offs), ebytes * elements, IR::AccType::VEC, vec);
|
||||||
}
|
}
|
||||||
offs = v.ir.Add(offs, v.ir.Imm64(ebytes * elements));
|
offs = v.ir.Add(offs, v.ir.Imm64(ebytes * elements));
|
||||||
}
|
}
|
||||||
|
@ -80,13 +80,13 @@ static bool SharedDecodeAndOperation(TranslatorVisitor& v, bool wback, MemOp mem
|
||||||
for (size_t e = 0; e < elements; e++) {
|
for (size_t e = 0; e < elements; e++) {
|
||||||
for (size_t s = 0; s < selem; s++) {
|
for (size_t s = 0; s < selem; s++) {
|
||||||
const Vec tt = static_cast<Vec>((VecNumber(Vt) + s) % 32);
|
const Vec tt = static_cast<Vec>((VecNumber(Vt) + s) % 32);
|
||||||
if (memop == MemOp::LOAD) {
|
if (memop == IR::MemOp::LOAD) {
|
||||||
const IR::UAny elem = v.Mem(v.ir.Add(address, offs), ebytes, AccType::VEC);
|
const IR::UAny elem = v.Mem(v.ir.Add(address, offs), ebytes, IR::AccType::VEC);
|
||||||
const IR::U128 vec = v.ir.VectorSetElement(esize, v.V(datasize, tt), e, elem);
|
const IR::U128 vec = v.ir.VectorSetElement(esize, v.V(datasize, tt), e, elem);
|
||||||
v.V(datasize, tt, vec);
|
v.V(datasize, tt, vec);
|
||||||
} else {
|
} else {
|
||||||
const IR::UAny elem = v.ir.VectorGetElement(esize, v.V(datasize, tt), e);
|
const IR::UAny elem = v.ir.VectorGetElement(esize, v.V(datasize, tt), e);
|
||||||
v.Mem(v.ir.Add(address, offs), ebytes, AccType::VEC, elem);
|
v.Mem(v.ir.Add(address, offs), ebytes, IR::AccType::VEC, elem);
|
||||||
}
|
}
|
||||||
offs = v.ir.Add(offs, v.ir.Imm64(ebytes));
|
offs = v.ir.Add(offs, v.ir.Imm64(ebytes));
|
||||||
}
|
}
|
||||||
|
@ -110,25 +110,25 @@ static bool SharedDecodeAndOperation(TranslatorVisitor& v, bool wback, MemOp mem
|
||||||
|
|
||||||
bool TranslatorVisitor::STx_mult_1(bool Q, Imm<4> opcode, Imm<2> size, Reg Rn, Vec Vt) {
|
bool TranslatorVisitor::STx_mult_1(bool Q, Imm<4> opcode, Imm<2> size, Reg Rn, Vec Vt) {
|
||||||
const bool wback = false;
|
const bool wback = false;
|
||||||
const MemOp memop = MemOp::STORE;
|
const auto memop = IR::MemOp::STORE;
|
||||||
return SharedDecodeAndOperation(*this, wback, memop, Q, {}, opcode, size, Rn, Vt);
|
return SharedDecodeAndOperation(*this, wback, memop, Q, {}, opcode, size, Rn, Vt);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool TranslatorVisitor::STx_mult_2(bool Q, Reg Rm, Imm<4> opcode, Imm<2> size, Reg Rn, Vec Vt) {
|
bool TranslatorVisitor::STx_mult_2(bool Q, Reg Rm, Imm<4> opcode, Imm<2> size, Reg Rn, Vec Vt) {
|
||||||
const bool wback = true;
|
const bool wback = true;
|
||||||
const MemOp memop = MemOp::STORE;
|
const auto memop = IR::MemOp::STORE;
|
||||||
return SharedDecodeAndOperation(*this, wback, memop, Q, Rm, opcode, size, Rn, Vt);
|
return SharedDecodeAndOperation(*this, wback, memop, Q, Rm, opcode, size, Rn, Vt);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool TranslatorVisitor::LDx_mult_1(bool Q, Imm<4> opcode, Imm<2> size, Reg Rn, Vec Vt) {
|
bool TranslatorVisitor::LDx_mult_1(bool Q, Imm<4> opcode, Imm<2> size, Reg Rn, Vec Vt) {
|
||||||
const bool wback = false;
|
const bool wback = false;
|
||||||
const MemOp memop = MemOp::LOAD;
|
const auto memop = IR::MemOp::LOAD;
|
||||||
return SharedDecodeAndOperation(*this, wback, memop, Q, {}, opcode, size, Rn, Vt);
|
return SharedDecodeAndOperation(*this, wback, memop, Q, {}, opcode, size, Rn, Vt);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool TranslatorVisitor::LDx_mult_2(bool Q, Reg Rm, Imm<4> opcode, Imm<2> size, Reg Rn, Vec Vt) {
|
bool TranslatorVisitor::LDx_mult_2(bool Q, Reg Rm, Imm<4> opcode, Imm<2> size, Reg Rn, Vec Vt) {
|
||||||
const bool wback = true;
|
const bool wback = true;
|
||||||
const MemOp memop = MemOp::LOAD;
|
const auto memop = IR::MemOp::LOAD;
|
||||||
return SharedDecodeAndOperation(*this, wback, memop, Q, Rm, opcode, size, Rn, Vt);
|
return SharedDecodeAndOperation(*this, wback, memop, Q, Rm, opcode, size, Rn, Vt);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -9,28 +9,28 @@
|
||||||
namespace Dynarmic::A64 {
|
namespace Dynarmic::A64 {
|
||||||
|
|
||||||
static bool LoadStoreRegisterImmediate(TranslatorVisitor& v, bool wback, bool postindex, size_t scale, u64 offset, Imm<2> size, Imm<2> opc, Reg Rn, Reg Rt) {
|
static bool LoadStoreRegisterImmediate(TranslatorVisitor& v, bool wback, bool postindex, size_t scale, u64 offset, Imm<2> size, Imm<2> opc, Reg Rn, Reg Rt) {
|
||||||
MemOp memop;
|
IR::MemOp memop;
|
||||||
bool signed_ = false;
|
bool signed_ = false;
|
||||||
size_t regsize = 0;
|
size_t regsize = 0;
|
||||||
|
|
||||||
if (opc.Bit<1>() == 0) {
|
if (opc.Bit<1>() == 0) {
|
||||||
memop = opc.Bit<0>() ? MemOp::LOAD : MemOp::STORE;
|
memop = opc.Bit<0>() ? IR::MemOp::LOAD : IR::MemOp::STORE;
|
||||||
regsize = size == 0b11 ? 64 : 32;
|
regsize = size == 0b11 ? 64 : 32;
|
||||||
signed_ = false;
|
signed_ = false;
|
||||||
} else if (size == 0b11) {
|
} else if (size == 0b11) {
|
||||||
memop = MemOp::PREFETCH;
|
memop = IR::MemOp::PREFETCH;
|
||||||
ASSERT(!opc.Bit<0>());
|
ASSERT(!opc.Bit<0>());
|
||||||
} else {
|
} else {
|
||||||
memop = MemOp::LOAD;
|
memop = IR::MemOp::LOAD;
|
||||||
ASSERT(!(size == 0b10 && opc.Bit<0>() == 1));
|
ASSERT(!(size == 0b10 && opc.Bit<0>() == 1));
|
||||||
regsize = opc.Bit<0>() ? 32 : 64;
|
regsize = opc.Bit<0>() ? 32 : 64;
|
||||||
signed_ = true;
|
signed_ = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (memop == MemOp::LOAD && wback && Rn == Rt && Rn != Reg::R31) {
|
if (memop == IR::MemOp::LOAD && wback && Rn == Rt && Rn != Reg::R31) {
|
||||||
return v.UnpredictableInstruction();
|
return v.UnpredictableInstruction();
|
||||||
}
|
}
|
||||||
if (memop == MemOp::STORE && wback && Rn == Rt && Rn != Reg::R31) {
|
if (memop == IR::MemOp::STORE && wback && Rn == Rt && Rn != Reg::R31) {
|
||||||
return v.UnpredictableInstruction();
|
return v.UnpredictableInstruction();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -42,13 +42,13 @@ static bool LoadStoreRegisterImmediate(TranslatorVisitor& v, bool wback, bool po
|
||||||
|
|
||||||
const size_t datasize = 8 << scale;
|
const size_t datasize = 8 << scale;
|
||||||
switch (memop) {
|
switch (memop) {
|
||||||
case MemOp::STORE: {
|
case IR::MemOp::STORE: {
|
||||||
const auto data = v.X(datasize, Rt);
|
const auto data = v.X(datasize, Rt);
|
||||||
v.Mem(address, datasize / 8, AccType::NORMAL, data);
|
v.Mem(address, datasize / 8, IR::AccType::NORMAL, data);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case MemOp::LOAD: {
|
case IR::MemOp::LOAD: {
|
||||||
const auto data = v.Mem(address, datasize / 8, AccType::NORMAL);
|
const auto data = v.Mem(address, datasize / 8, IR::AccType::NORMAL);
|
||||||
if (signed_) {
|
if (signed_) {
|
||||||
v.X(regsize, Rt, v.SignExtend(data, regsize));
|
v.X(regsize, Rt, v.SignExtend(data, regsize));
|
||||||
} else {
|
} else {
|
||||||
|
@ -56,7 +56,7 @@ static bool LoadStoreRegisterImmediate(TranslatorVisitor& v, bool wback, bool po
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case MemOp::PREFETCH:
|
case IR::MemOp::PREFETCH:
|
||||||
// Prefetch(address, Rt)
|
// Prefetch(address, Rt)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -115,8 +115,8 @@ bool TranslatorVisitor::PRFM_unscaled_imm([[maybe_unused]] Imm<9> imm9, [[maybe_
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool LoadStoreSIMD(TranslatorVisitor& v, bool wback, bool postindex, size_t scale, u64 offset, MemOp memop, Reg Rn, Vec Vt) {
|
static bool LoadStoreSIMD(TranslatorVisitor& v, bool wback, bool postindex, size_t scale, u64 offset, IR::MemOp memop, Reg Rn, Vec Vt) {
|
||||||
const AccType acctype = AccType::VEC;
|
const auto acctype = IR::AccType::VEC;
|
||||||
const size_t datasize = 8 << scale;
|
const size_t datasize = 8 << scale;
|
||||||
|
|
||||||
IR::U64 address;
|
IR::U64 address;
|
||||||
|
@ -132,7 +132,7 @@ static bool LoadStoreSIMD(TranslatorVisitor& v, bool wback, bool postindex, size
|
||||||
}
|
}
|
||||||
|
|
||||||
switch (memop) {
|
switch (memop) {
|
||||||
case MemOp::STORE:
|
case IR::MemOp::STORE:
|
||||||
if (datasize == 128) {
|
if (datasize == 128) {
|
||||||
const IR::U128 data = v.V(128, Vt);
|
const IR::U128 data = v.V(128, Vt);
|
||||||
v.Mem(address, 16, acctype, data);
|
v.Mem(address, 16, acctype, data);
|
||||||
|
@ -141,7 +141,7 @@ static bool LoadStoreSIMD(TranslatorVisitor& v, bool wback, bool postindex, size
|
||||||
v.Mem(address, datasize / 8, acctype, data);
|
v.Mem(address, datasize / 8, acctype, data);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case MemOp::LOAD:
|
case IR::MemOp::LOAD:
|
||||||
if (datasize == 128) {
|
if (datasize == 128) {
|
||||||
const IR::U128 data = v.Mem(address, 16, acctype);
|
const IR::U128 data = v.Mem(address, 16, acctype);
|
||||||
v.V(128, Vt, data);
|
v.V(128, Vt, data);
|
||||||
|
@ -179,7 +179,7 @@ bool TranslatorVisitor::STR_imm_fpsimd_1(Imm<2> size, Imm<1> opc_1, Imm<9> imm9,
|
||||||
const bool postindex = !not_postindex;
|
const bool postindex = !not_postindex;
|
||||||
const u64 offset = imm9.SignExtend<u64>();
|
const u64 offset = imm9.SignExtend<u64>();
|
||||||
|
|
||||||
return LoadStoreSIMD(*this, wback, postindex, scale, offset, MemOp::STORE, Rn, Vt);
|
return LoadStoreSIMD(*this, wback, postindex, scale, offset, IR::MemOp::STORE, Rn, Vt);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool TranslatorVisitor::STR_imm_fpsimd_2(Imm<2> size, Imm<1> opc_1, Imm<12> imm12, Reg Rn, Vec Vt) {
|
bool TranslatorVisitor::STR_imm_fpsimd_2(Imm<2> size, Imm<1> opc_1, Imm<12> imm12, Reg Rn, Vec Vt) {
|
||||||
|
@ -192,7 +192,7 @@ bool TranslatorVisitor::STR_imm_fpsimd_2(Imm<2> size, Imm<1> opc_1, Imm<12> imm1
|
||||||
const bool postindex = false;
|
const bool postindex = false;
|
||||||
const u64 offset = imm12.ZeroExtend<u64>() << scale;
|
const u64 offset = imm12.ZeroExtend<u64>() << scale;
|
||||||
|
|
||||||
return LoadStoreSIMD(*this, wback, postindex, scale, offset, MemOp::STORE, Rn, Vt);
|
return LoadStoreSIMD(*this, wback, postindex, scale, offset, IR::MemOp::STORE, Rn, Vt);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool TranslatorVisitor::LDR_imm_fpsimd_1(Imm<2> size, Imm<1> opc_1, Imm<9> imm9, bool not_postindex, Reg Rn, Vec Vt) {
|
bool TranslatorVisitor::LDR_imm_fpsimd_1(Imm<2> size, Imm<1> opc_1, Imm<9> imm9, bool not_postindex, Reg Rn, Vec Vt) {
|
||||||
|
@ -205,7 +205,7 @@ bool TranslatorVisitor::LDR_imm_fpsimd_1(Imm<2> size, Imm<1> opc_1, Imm<9> imm9,
|
||||||
const bool postindex = !not_postindex;
|
const bool postindex = !not_postindex;
|
||||||
const u64 offset = imm9.SignExtend<u64>();
|
const u64 offset = imm9.SignExtend<u64>();
|
||||||
|
|
||||||
return LoadStoreSIMD(*this, wback, postindex, scale, offset, MemOp::LOAD, Rn, Vt);
|
return LoadStoreSIMD(*this, wback, postindex, scale, offset, IR::MemOp::LOAD, Rn, Vt);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool TranslatorVisitor::LDR_imm_fpsimd_2(Imm<2> size, Imm<1> opc_1, Imm<12> imm12, Reg Rn, Vec Vt) {
|
bool TranslatorVisitor::LDR_imm_fpsimd_2(Imm<2> size, Imm<1> opc_1, Imm<12> imm12, Reg Rn, Vec Vt) {
|
||||||
|
@ -218,7 +218,7 @@ bool TranslatorVisitor::LDR_imm_fpsimd_2(Imm<2> size, Imm<1> opc_1, Imm<12> imm1
|
||||||
const bool postindex = false;
|
const bool postindex = false;
|
||||||
const u64 offset = imm12.ZeroExtend<u64>() << scale;
|
const u64 offset = imm12.ZeroExtend<u64>() << scale;
|
||||||
|
|
||||||
return LoadStoreSIMD(*this, wback, postindex, scale, offset, MemOp::LOAD, Rn, Vt);
|
return LoadStoreSIMD(*this, wback, postindex, scale, offset, IR::MemOp::LOAD, Rn, Vt);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool TranslatorVisitor::STUR_fpsimd(Imm<2> size, Imm<1> opc_1, Imm<9> imm9, Reg Rn, Vec Vt) {
|
bool TranslatorVisitor::STUR_fpsimd(Imm<2> size, Imm<1> opc_1, Imm<9> imm9, Reg Rn, Vec Vt) {
|
||||||
|
@ -231,7 +231,7 @@ bool TranslatorVisitor::STUR_fpsimd(Imm<2> size, Imm<1> opc_1, Imm<9> imm9, Reg
|
||||||
const bool postindex = false;
|
const bool postindex = false;
|
||||||
const u64 offset = imm9.SignExtend<u64>();
|
const u64 offset = imm9.SignExtend<u64>();
|
||||||
|
|
||||||
return LoadStoreSIMD(*this, wback, postindex, scale, offset, MemOp::STORE, Rn, Vt);
|
return LoadStoreSIMD(*this, wback, postindex, scale, offset, IR::MemOp::STORE, Rn, Vt);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool TranslatorVisitor::LDUR_fpsimd(Imm<2> size, Imm<1> opc_1, Imm<9> imm9, Reg Rn, Vec Vt) {
|
bool TranslatorVisitor::LDUR_fpsimd(Imm<2> size, Imm<1> opc_1, Imm<9> imm9, Reg Rn, Vec Vt) {
|
||||||
|
@ -244,7 +244,7 @@ bool TranslatorVisitor::LDUR_fpsimd(Imm<2> size, Imm<1> opc_1, Imm<9> imm9, Reg
|
||||||
const bool postindex = false;
|
const bool postindex = false;
|
||||||
const u64 offset = imm9.SignExtend<u64>();
|
const u64 offset = imm9.SignExtend<u64>();
|
||||||
|
|
||||||
return LoadStoreSIMD(*this, wback, postindex, scale, offset, MemOp::LOAD, Rn, Vt);
|
return LoadStoreSIMD(*this, wback, postindex, scale, offset, IR::MemOp::LOAD, Rn, Vt);
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace Dynarmic::A64
|
} // namespace Dynarmic::A64
|
||||||
|
|
|
@ -13,14 +13,14 @@ bool TranslatorVisitor::STP_LDP_gen(Imm<2> opc, bool not_postindex, bool wback,
|
||||||
return UnallocatedEncoding();
|
return UnallocatedEncoding();
|
||||||
}
|
}
|
||||||
|
|
||||||
const MemOp memop = L == 1 ? MemOp::LOAD : MemOp::STORE;
|
const auto memop = L == 1 ? IR::MemOp::LOAD : IR::MemOp::STORE;
|
||||||
if (memop == MemOp::LOAD && wback && (Rt == Rn || Rt2 == Rn) && Rn != Reg::R31) {
|
if (memop == IR::MemOp::LOAD && wback && (Rt == Rn || Rt2 == Rn) && Rn != Reg::R31) {
|
||||||
return UnpredictableInstruction();
|
return UnpredictableInstruction();
|
||||||
}
|
}
|
||||||
if (memop == MemOp::STORE && wback && (Rt == Rn || Rt2 == Rn) && Rn != Reg::R31) {
|
if (memop == IR::MemOp::STORE && wback && (Rt == Rn || Rt2 == Rn) && Rn != Reg::R31) {
|
||||||
return UnpredictableInstruction();
|
return UnpredictableInstruction();
|
||||||
}
|
}
|
||||||
if (memop == MemOp::LOAD && Rt == Rt2) {
|
if (memop == IR::MemOp::LOAD && Rt == Rt2) {
|
||||||
return UnpredictableInstruction();
|
return UnpredictableInstruction();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -44,16 +44,16 @@ bool TranslatorVisitor::STP_LDP_gen(Imm<2> opc, bool not_postindex, bool wback,
|
||||||
|
|
||||||
const size_t dbytes = datasize / 8;
|
const size_t dbytes = datasize / 8;
|
||||||
switch (memop) {
|
switch (memop) {
|
||||||
case MemOp::STORE: {
|
case IR::MemOp::STORE: {
|
||||||
const IR::U32U64 data1 = X(datasize, Rt);
|
const IR::U32U64 data1 = X(datasize, Rt);
|
||||||
const IR::U32U64 data2 = X(datasize, Rt2);
|
const IR::U32U64 data2 = X(datasize, Rt2);
|
||||||
Mem(address, dbytes, AccType::NORMAL, data1);
|
Mem(address, dbytes, IR::AccType::NORMAL, data1);
|
||||||
Mem(ir.Add(address, ir.Imm64(dbytes)), dbytes, AccType::NORMAL, data2);
|
Mem(ir.Add(address, ir.Imm64(dbytes)), dbytes, IR::AccType::NORMAL, data2);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case MemOp::LOAD: {
|
case IR::MemOp::LOAD: {
|
||||||
const IR::U32U64 data1 = Mem(address, dbytes, AccType::NORMAL);
|
const IR::U32U64 data1 = Mem(address, dbytes, IR::AccType::NORMAL);
|
||||||
const IR::U32U64 data2 = Mem(ir.Add(address, ir.Imm64(dbytes)), dbytes, AccType::NORMAL);
|
const IR::U32U64 data2 = Mem(ir.Add(address, ir.Imm64(dbytes)), dbytes, IR::AccType::NORMAL);
|
||||||
if (signed_) {
|
if (signed_) {
|
||||||
X(64, Rt, SignExtend(data1, 64));
|
X(64, Rt, SignExtend(data1, 64));
|
||||||
X(64, Rt2, SignExtend(data2, 64));
|
X(64, Rt2, SignExtend(data2, 64));
|
||||||
|
@ -63,7 +63,7 @@ bool TranslatorVisitor::STP_LDP_gen(Imm<2> opc, bool not_postindex, bool wback,
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case MemOp::PREFETCH:
|
case IR::MemOp::PREFETCH:
|
||||||
UNREACHABLE();
|
UNREACHABLE();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -87,8 +87,8 @@ bool TranslatorVisitor::STP_LDP_fpsimd(Imm<2> opc, bool not_postindex, bool wbac
|
||||||
return UnallocatedEncoding();
|
return UnallocatedEncoding();
|
||||||
}
|
}
|
||||||
|
|
||||||
const MemOp memop = L == 1 ? MemOp::LOAD : MemOp::STORE;
|
const auto memop = L == 1 ? IR::MemOp::LOAD : IR::MemOp::STORE;
|
||||||
if (memop == MemOp::LOAD && Vt == Vt2) {
|
if (memop == IR::MemOp::LOAD && Vt == Vt2) {
|
||||||
return UnpredictableInstruction();
|
return UnpredictableInstruction();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -111,20 +111,20 @@ bool TranslatorVisitor::STP_LDP_fpsimd(Imm<2> opc, bool not_postindex, bool wbac
|
||||||
}
|
}
|
||||||
|
|
||||||
switch (memop) {
|
switch (memop) {
|
||||||
case MemOp::STORE: {
|
case IR::MemOp::STORE: {
|
||||||
IR::UAnyU128 data1 = V(datasize, Vt);
|
IR::UAnyU128 data1 = V(datasize, Vt);
|
||||||
IR::UAnyU128 data2 = V(datasize, Vt2);
|
IR::UAnyU128 data2 = V(datasize, Vt2);
|
||||||
if (datasize != 128) {
|
if (datasize != 128) {
|
||||||
data1 = ir.VectorGetElement(datasize, data1, 0);
|
data1 = ir.VectorGetElement(datasize, data1, 0);
|
||||||
data2 = ir.VectorGetElement(datasize, data2, 0);
|
data2 = ir.VectorGetElement(datasize, data2, 0);
|
||||||
}
|
}
|
||||||
Mem(address, dbytes, AccType::VEC, data1);
|
Mem(address, dbytes, IR::AccType::VEC, data1);
|
||||||
Mem(ir.Add(address, ir.Imm64(dbytes)), dbytes, AccType::VEC, data2);
|
Mem(ir.Add(address, ir.Imm64(dbytes)), dbytes, IR::AccType::VEC, data2);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case MemOp::LOAD: {
|
case IR::MemOp::LOAD: {
|
||||||
IR::UAnyU128 data1 = Mem(address, dbytes, AccType::VEC);
|
IR::UAnyU128 data1 = Mem(address, dbytes, IR::AccType::VEC);
|
||||||
IR::UAnyU128 data2 = Mem(ir.Add(address, ir.Imm64(dbytes)), dbytes, AccType::VEC);
|
IR::UAnyU128 data2 = Mem(ir.Add(address, ir.Imm64(dbytes)), dbytes, IR::AccType::VEC);
|
||||||
if (datasize != 128) {
|
if (datasize != 128) {
|
||||||
data1 = ir.ZeroExtendToQuad(data1);
|
data1 = ir.ZeroExtendToQuad(data1);
|
||||||
data2 = ir.ZeroExtendToQuad(data2);
|
data2 = ir.ZeroExtendToQuad(data2);
|
||||||
|
@ -133,7 +133,7 @@ bool TranslatorVisitor::STP_LDP_fpsimd(Imm<2> opc, bool not_postindex, bool wbac
|
||||||
V(datasize, Vt2, data2);
|
V(datasize, Vt2, data2);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case MemOp::PREFETCH:
|
case IR::MemOp::PREFETCH:
|
||||||
UNREACHABLE();
|
UNREACHABLE();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -11,22 +11,22 @@ namespace Dynarmic::A64 {
|
||||||
static bool RegSharedDecodeAndOperation(TranslatorVisitor& v, size_t scale, u8 shift, Imm<2> size, Imm<1> opc_1, Imm<1> opc_0, Reg Rm, Imm<3> option, Reg Rn, Reg Rt) {
|
static bool RegSharedDecodeAndOperation(TranslatorVisitor& v, size_t scale, u8 shift, Imm<2> size, Imm<1> opc_1, Imm<1> opc_0, Reg Rm, Imm<3> option, Reg Rn, Reg Rt) {
|
||||||
// Shared Decode
|
// Shared Decode
|
||||||
|
|
||||||
const AccType acctype = AccType::NORMAL;
|
const auto acctype = IR::AccType::NORMAL;
|
||||||
MemOp memop;
|
IR::MemOp memop;
|
||||||
size_t regsize = 64;
|
size_t regsize = 64;
|
||||||
bool signed_ = false;
|
bool signed_ = false;
|
||||||
|
|
||||||
if (opc_1 == 0) {
|
if (opc_1 == 0) {
|
||||||
memop = opc_0 == 1 ? MemOp::LOAD : MemOp::STORE;
|
memop = opc_0 == 1 ? IR::MemOp::LOAD : IR::MemOp::STORE;
|
||||||
regsize = size == 0b11 ? 64 : 32;
|
regsize = size == 0b11 ? 64 : 32;
|
||||||
signed_ = false;
|
signed_ = false;
|
||||||
} else if (size == 0b11) {
|
} else if (size == 0b11) {
|
||||||
memop = MemOp::PREFETCH;
|
memop = IR::MemOp::PREFETCH;
|
||||||
if (opc_0 == 1) {
|
if (opc_0 == 1) {
|
||||||
return v.UnallocatedEncoding();
|
return v.UnallocatedEncoding();
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
memop = MemOp::LOAD;
|
memop = IR::MemOp::LOAD;
|
||||||
if (size == 0b10 && opc_0 == 1) {
|
if (size == 0b10 && opc_0 == 1) {
|
||||||
return v.UnallocatedEncoding();
|
return v.UnallocatedEncoding();
|
||||||
}
|
}
|
||||||
|
@ -50,12 +50,12 @@ static bool RegSharedDecodeAndOperation(TranslatorVisitor& v, size_t scale, u8 s
|
||||||
address = v.ir.Add(address, offset);
|
address = v.ir.Add(address, offset);
|
||||||
|
|
||||||
switch (memop) {
|
switch (memop) {
|
||||||
case MemOp::STORE: {
|
case IR::MemOp::STORE: {
|
||||||
const IR::UAny data = v.X(datasize, Rt);
|
const IR::UAny data = v.X(datasize, Rt);
|
||||||
v.Mem(address, datasize / 8, acctype, data);
|
v.Mem(address, datasize / 8, acctype, data);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case MemOp::LOAD: {
|
case IR::MemOp::LOAD: {
|
||||||
const IR::UAny data = v.Mem(address, datasize / 8, acctype);
|
const IR::UAny data = v.Mem(address, datasize / 8, acctype);
|
||||||
if (signed_) {
|
if (signed_) {
|
||||||
v.X(regsize, Rt, v.SignExtend(data, regsize));
|
v.X(regsize, Rt, v.SignExtend(data, regsize));
|
||||||
|
@ -64,7 +64,7 @@ static bool RegSharedDecodeAndOperation(TranslatorVisitor& v, size_t scale, u8 s
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case MemOp::PREFETCH:
|
case IR::MemOp::PREFETCH:
|
||||||
// TODO: Prefetch
|
// TODO: Prefetch
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
|
@ -97,8 +97,8 @@ bool TranslatorVisitor::LDRx_reg(Imm<2> size, Imm<1> opc_1, Reg Rm, Imm<3> optio
|
||||||
static bool VecSharedDecodeAndOperation(TranslatorVisitor& v, size_t scale, u8 shift, Imm<1> opc_0, Reg Rm, Imm<3> option, Reg Rn, Vec Vt) {
|
static bool VecSharedDecodeAndOperation(TranslatorVisitor& v, size_t scale, u8 shift, Imm<1> opc_0, Reg Rm, Imm<3> option, Reg Rn, Vec Vt) {
|
||||||
// Shared Decode
|
// Shared Decode
|
||||||
|
|
||||||
const AccType acctype = AccType::VEC;
|
const auto acctype = IR::AccType::VEC;
|
||||||
const MemOp memop = opc_0 == 1 ? MemOp::LOAD : MemOp::STORE;
|
const auto memop = opc_0 == 1 ? IR::MemOp::LOAD : IR::MemOp::STORE;
|
||||||
const size_t datasize = 8 << scale;
|
const size_t datasize = 8 << scale;
|
||||||
|
|
||||||
// Operation
|
// Operation
|
||||||
|
@ -115,12 +115,12 @@ static bool VecSharedDecodeAndOperation(TranslatorVisitor& v, size_t scale, u8 s
|
||||||
address = v.ir.Add(address, offset);
|
address = v.ir.Add(address, offset);
|
||||||
|
|
||||||
switch (memop) {
|
switch (memop) {
|
||||||
case MemOp::STORE: {
|
case IR::MemOp::STORE: {
|
||||||
const IR::UAnyU128 data = v.V_scalar(datasize, Vt);
|
const IR::UAnyU128 data = v.V_scalar(datasize, Vt);
|
||||||
v.Mem(address, datasize / 8, acctype, data);
|
v.Mem(address, datasize / 8, acctype, data);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case MemOp::LOAD: {
|
case IR::MemOp::LOAD: {
|
||||||
const IR::UAnyU128 data = v.Mem(address, datasize / 8, acctype);
|
const IR::UAnyU128 data = v.Mem(address, datasize / 8, acctype);
|
||||||
v.V_scalar(datasize, Vt, data);
|
v.V_scalar(datasize, Vt, data);
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -11,7 +11,7 @@ namespace Dynarmic::A64 {
|
||||||
static bool StoreRegister(TranslatorVisitor& v, const size_t datasize,
|
static bool StoreRegister(TranslatorVisitor& v, const size_t datasize,
|
||||||
const Imm<9> imm9, const Reg Rn, const Reg Rt) {
|
const Imm<9> imm9, const Reg Rn, const Reg Rt) {
|
||||||
const u64 offset = imm9.SignExtend<u64>();
|
const u64 offset = imm9.SignExtend<u64>();
|
||||||
const AccType acctype = AccType::UNPRIV;
|
const auto acctype = IR::AccType::UNPRIV;
|
||||||
|
|
||||||
IR::U64 address;
|
IR::U64 address;
|
||||||
if (Rn == Reg::SP) {
|
if (Rn == Reg::SP) {
|
||||||
|
@ -30,7 +30,7 @@ static bool StoreRegister(TranslatorVisitor& v, const size_t datasize,
|
||||||
static bool LoadRegister(TranslatorVisitor& v, const size_t datasize,
|
static bool LoadRegister(TranslatorVisitor& v, const size_t datasize,
|
||||||
const Imm<9> imm9, const Reg Rn, const Reg Rt) {
|
const Imm<9> imm9, const Reg Rn, const Reg Rt) {
|
||||||
const u64 offset = imm9.SignExtend<u64>();
|
const u64 offset = imm9.SignExtend<u64>();
|
||||||
const AccType acctype = AccType::UNPRIV;
|
const auto acctype = IR::AccType::UNPRIV;
|
||||||
|
|
||||||
IR::U64 address;
|
IR::U64 address;
|
||||||
if (Rn == Reg::SP) {
|
if (Rn == Reg::SP) {
|
||||||
|
@ -51,19 +51,19 @@ static bool LoadRegister(TranslatorVisitor& v, const size_t datasize,
|
||||||
static bool LoadRegisterSigned(TranslatorVisitor& v, const size_t datasize,
|
static bool LoadRegisterSigned(TranslatorVisitor& v, const size_t datasize,
|
||||||
const Imm<2> opc, const Imm<9> imm9, const Reg Rn, const Reg Rt) {
|
const Imm<2> opc, const Imm<9> imm9, const Reg Rn, const Reg Rt) {
|
||||||
const u64 offset = imm9.SignExtend<u64>();
|
const u64 offset = imm9.SignExtend<u64>();
|
||||||
const AccType acctype = AccType::UNPRIV;
|
const auto acctype = IR::AccType::UNPRIV;
|
||||||
|
|
||||||
MemOp memop;
|
IR::MemOp memop;
|
||||||
bool is_signed;
|
bool is_signed;
|
||||||
size_t regsize;
|
size_t regsize;
|
||||||
if (opc.Bit<1>() == 0) {
|
if (opc.Bit<1>() == 0) {
|
||||||
// store or zero-extending load
|
// store or zero-extending load
|
||||||
memop = opc.Bit<0>() ? MemOp::LOAD : MemOp::STORE;
|
memop = opc.Bit<0>() ? IR::MemOp::LOAD : IR::MemOp::STORE;
|
||||||
regsize = 32;
|
regsize = 32;
|
||||||
is_signed = false;
|
is_signed = false;
|
||||||
} else {
|
} else {
|
||||||
// sign-extending load
|
// sign-extending load
|
||||||
memop = MemOp::LOAD;
|
memop = IR::MemOp::LOAD;
|
||||||
regsize = opc.Bit<0>() ? 32 : 64;
|
regsize = opc.Bit<0>() ? 32 : 64;
|
||||||
is_signed = true;
|
is_signed = true;
|
||||||
}
|
}
|
||||||
|
@ -78,10 +78,10 @@ static bool LoadRegisterSigned(TranslatorVisitor& v, const size_t datasize,
|
||||||
address = v.ir.Add(address, v.ir.Imm64(offset));
|
address = v.ir.Add(address, v.ir.Imm64(offset));
|
||||||
|
|
||||||
switch (memop) {
|
switch (memop) {
|
||||||
case MemOp::STORE:
|
case IR::MemOp::STORE:
|
||||||
v.Mem(address, datasize / 8, acctype, v.X(datasize, Rt));
|
v.Mem(address, datasize / 8, acctype, v.X(datasize, Rt));
|
||||||
break;
|
break;
|
||||||
case MemOp::LOAD: {
|
case IR::MemOp::LOAD: {
|
||||||
const IR::UAny data = v.Mem(address, datasize / 8, acctype);
|
const IR::UAny data = v.Mem(address, datasize / 8, acctype);
|
||||||
if (is_signed) {
|
if (is_signed) {
|
||||||
v.X(regsize, Rt, v.SignExtend(data, regsize));
|
v.X(regsize, Rt, v.SignExtend(data, regsize));
|
||||||
|
@ -90,7 +90,7 @@ static bool LoadRegisterSigned(TranslatorVisitor& v, const size_t datasize,
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case MemOp::PREFETCH:
|
case IR::MemOp::PREFETCH:
|
||||||
// Prefetch(address, Rt);
|
// Prefetch(address, Rt);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -135,7 +135,7 @@ bool TranslatorVisitor::LDTRSH(Imm<2> opc, Imm<9> imm9, Reg Rn, Reg Rt) {
|
||||||
|
|
||||||
bool TranslatorVisitor::LDTRSW(Imm<9> imm9, Reg Rn, Reg Rt) {
|
bool TranslatorVisitor::LDTRSW(Imm<9> imm9, Reg Rn, Reg Rt) {
|
||||||
const u64 offset = imm9.SignExtend<u64>();
|
const u64 offset = imm9.SignExtend<u64>();
|
||||||
const AccType acctype = AccType::UNPRIV;
|
const auto acctype = IR::AccType::UNPRIV;
|
||||||
|
|
||||||
IR::U64 address;
|
IR::U64 address;
|
||||||
if (Rn == Reg::SP) {
|
if (Rn == Reg::SP) {
|
||||||
|
|
|
@ -10,7 +10,7 @@
|
||||||
|
|
||||||
namespace Dynarmic::A64 {
|
namespace Dynarmic::A64 {
|
||||||
|
|
||||||
static bool SharedDecodeAndOperation(TranslatorVisitor& v, bool wback, MemOp memop,
|
static bool SharedDecodeAndOperation(TranslatorVisitor& v, bool wback, IR::MemOp memop,
|
||||||
bool Q, bool S, bool R, bool replicate, std::optional<Reg> Rm,
|
bool Q, bool S, bool R, bool replicate, std::optional<Reg> Rm,
|
||||||
Imm<3> opcode, Imm<2> size, Reg Rn, Vec Vt) {
|
Imm<3> opcode, Imm<2> size, Reg Rn, Vec Vt) {
|
||||||
const size_t selem = (opcode.Bit<0>() << 1 | u32{R}) + 1;
|
const size_t selem = (opcode.Bit<0>() << 1 | u32{R}) + 1;
|
||||||
|
@ -42,7 +42,7 @@ static bool SharedDecodeAndOperation(TranslatorVisitor& v, bool wback, MemOp mem
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case 3:
|
case 3:
|
||||||
if (memop == MemOp::STORE || S) {
|
if (memop == IR::MemOp::STORE || S) {
|
||||||
return v.UnallocatedEncoding();
|
return v.UnallocatedEncoding();
|
||||||
}
|
}
|
||||||
scale = size.ZeroExtend();
|
scale = size.ZeroExtend();
|
||||||
|
@ -65,7 +65,7 @@ static bool SharedDecodeAndOperation(TranslatorVisitor& v, bool wback, MemOp mem
|
||||||
if (replicate) {
|
if (replicate) {
|
||||||
for (size_t s = 0; s < selem; s++) {
|
for (size_t s = 0; s < selem; s++) {
|
||||||
const Vec tt = static_cast<Vec>((VecNumber(Vt) + s) % 32);
|
const Vec tt = static_cast<Vec>((VecNumber(Vt) + s) % 32);
|
||||||
const IR::UAnyU128 element = v.Mem(v.ir.Add(address, offs), ebytes, AccType::VEC);
|
const IR::UAnyU128 element = v.Mem(v.ir.Add(address, offs), ebytes, IR::AccType::VEC);
|
||||||
const IR::U128 broadcasted_element = v.ir.VectorBroadcast(esize, element);
|
const IR::U128 broadcasted_element = v.ir.VectorBroadcast(esize, element);
|
||||||
|
|
||||||
v.V(datasize, tt, broadcasted_element);
|
v.V(datasize, tt, broadcasted_element);
|
||||||
|
@ -77,13 +77,13 @@ static bool SharedDecodeAndOperation(TranslatorVisitor& v, bool wback, MemOp mem
|
||||||
const Vec tt = static_cast<Vec>((VecNumber(Vt) + s) % 32);
|
const Vec tt = static_cast<Vec>((VecNumber(Vt) + s) % 32);
|
||||||
const IR::U128 rval = v.V(128, tt);
|
const IR::U128 rval = v.V(128, tt);
|
||||||
|
|
||||||
if (memop == MemOp::LOAD) {
|
if (memop == IR::MemOp::LOAD) {
|
||||||
const IR::UAny elem = v.Mem(v.ir.Add(address, offs), ebytes, AccType::VEC);
|
const IR::UAny elem = v.Mem(v.ir.Add(address, offs), ebytes, IR::AccType::VEC);
|
||||||
const IR::U128 vec = v.ir.VectorSetElement(esize, rval, index, elem);
|
const IR::U128 vec = v.ir.VectorSetElement(esize, rval, index, elem);
|
||||||
v.V(128, tt, vec);
|
v.V(128, tt, vec);
|
||||||
} else {
|
} else {
|
||||||
const IR::UAny elem = v.ir.VectorGetElement(esize, rval, index);
|
const IR::UAny elem = v.ir.VectorGetElement(esize, rval, index);
|
||||||
v.Mem(v.ir.Add(address, offs), ebytes, AccType::VEC, elem);
|
v.Mem(v.ir.Add(address, offs), ebytes, IR::AccType::VEC, elem);
|
||||||
}
|
}
|
||||||
offs = v.ir.Add(offs, v.ir.Imm64(ebytes));
|
offs = v.ir.Add(offs, v.ir.Imm64(ebytes));
|
||||||
}
|
}
|
||||||
|
@ -105,114 +105,122 @@ static bool SharedDecodeAndOperation(TranslatorVisitor& v, bool wback, MemOp mem
|
||||||
}
|
}
|
||||||
|
|
||||||
bool TranslatorVisitor::LD1_sngl_1(bool Q, Imm<2> upper_opcode, bool S, Imm<2> size, Reg Rn, Vec Vt) {
|
bool TranslatorVisitor::LD1_sngl_1(bool Q, Imm<2> upper_opcode, bool S, Imm<2> size, Reg Rn, Vec Vt) {
|
||||||
return SharedDecodeAndOperation(*this, false, MemOp::LOAD, Q, S, false, false, {},
|
return SharedDecodeAndOperation(*this, false, IR::MemOp::LOAD, Q, S, false, false, {},
|
||||||
Imm<3>{upper_opcode.ZeroExtend() << 1}, size, Rn, Vt);
|
Imm<3>{upper_opcode.ZeroExtend() << 1}, size, Rn, Vt);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool TranslatorVisitor::LD1_sngl_2(bool Q, Reg Rm, Imm<2> upper_opcode, bool S, Imm<2> size, Reg Rn, Vec Vt) {
|
bool TranslatorVisitor::LD1_sngl_2(bool Q, Reg Rm, Imm<2> upper_opcode, bool S, Imm<2> size, Reg Rn, Vec Vt) {
|
||||||
return SharedDecodeAndOperation(*this, true, MemOp::LOAD, Q, S, false, false, Rm,
|
return SharedDecodeAndOperation(*this, true, IR::MemOp::LOAD, Q, S, false, false, Rm,
|
||||||
Imm<3>{upper_opcode.ZeroExtend() << 1}, size, Rn, Vt);
|
Imm<3>{upper_opcode.ZeroExtend() << 1}, size, Rn, Vt);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool TranslatorVisitor::LD1R_1(bool Q, Imm<2> size, Reg Rn, Vec Vt) {
|
bool TranslatorVisitor::LD1R_1(bool Q, Imm<2> size, Reg Rn, Vec Vt) {
|
||||||
return SharedDecodeAndOperation(*this, false, MemOp::LOAD, Q, false, false, true, {}, Imm<3>{0b110}, size, Rn, Vt);
|
return SharedDecodeAndOperation(*this, false, IR::MemOp::LOAD, Q, false, false, true,
|
||||||
|
{}, Imm<3>{0b110}, size, Rn, Vt);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool TranslatorVisitor::LD1R_2(bool Q, Reg Rm, Imm<2> size, Reg Rn, Vec Vt) {
|
bool TranslatorVisitor::LD1R_2(bool Q, Reg Rm, Imm<2> size, Reg Rn, Vec Vt) {
|
||||||
return SharedDecodeAndOperation(*this, true, MemOp::LOAD, Q, false, false, true, Rm, Imm<3>{0b110}, size, Rn, Vt);
|
return SharedDecodeAndOperation(*this, true, IR::MemOp::LOAD, Q, false, false, true,
|
||||||
|
Rm, Imm<3>{0b110}, size, Rn, Vt);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool TranslatorVisitor::LD2_sngl_1(bool Q, Imm<2> upper_opcode, bool S, Imm<2> size, Reg Rn, Vec Vt) {
|
bool TranslatorVisitor::LD2_sngl_1(bool Q, Imm<2> upper_opcode, bool S, Imm<2> size, Reg Rn, Vec Vt) {
|
||||||
return SharedDecodeAndOperation(*this, false, MemOp::LOAD, Q, S, true, false, {},
|
return SharedDecodeAndOperation(*this, false, IR::MemOp::LOAD, Q, S, true, false, {},
|
||||||
Imm<3>{upper_opcode.ZeroExtend() << 1}, size, Rn, Vt);
|
Imm<3>{upper_opcode.ZeroExtend() << 1}, size, Rn, Vt);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool TranslatorVisitor::LD2_sngl_2(bool Q, Reg Rm, Imm<2> upper_opcode, bool S, Imm<2> size, Reg Rn, Vec Vt) {
|
bool TranslatorVisitor::LD2_sngl_2(bool Q, Reg Rm, Imm<2> upper_opcode, bool S, Imm<2> size, Reg Rn, Vec Vt) {
|
||||||
return SharedDecodeAndOperation(*this, true, MemOp::LOAD, Q, S, true, false, Rm,
|
return SharedDecodeAndOperation(*this, true, IR::MemOp::LOAD, Q, S, true, false, Rm,
|
||||||
Imm<3>{upper_opcode.ZeroExtend() << 1}, size, Rn, Vt);
|
Imm<3>{upper_opcode.ZeroExtend() << 1}, size, Rn, Vt);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool TranslatorVisitor::LD2R_1(bool Q, Imm<2> size, Reg Rn, Vec Vt) {
|
bool TranslatorVisitor::LD2R_1(bool Q, Imm<2> size, Reg Rn, Vec Vt) {
|
||||||
return SharedDecodeAndOperation(*this, false, MemOp::LOAD, Q, false, true, true, {}, Imm<3>{0b110}, size, Rn, Vt);
|
return SharedDecodeAndOperation(*this, false, IR::MemOp::LOAD, Q, false, true, true,
|
||||||
|
{}, Imm<3>{0b110}, size, Rn, Vt);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool TranslatorVisitor::LD2R_2(bool Q, Reg Rm, Imm<2> size, Reg Rn, Vec Vt) {
|
bool TranslatorVisitor::LD2R_2(bool Q, Reg Rm, Imm<2> size, Reg Rn, Vec Vt) {
|
||||||
return SharedDecodeAndOperation(*this, true, MemOp::LOAD, Q, false, true, true, Rm, Imm<3>{0b110}, size, Rn, Vt);
|
return SharedDecodeAndOperation(*this, true, IR::MemOp::LOAD, Q, false, true, true,
|
||||||
|
Rm, Imm<3>{0b110}, size, Rn, Vt);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool TranslatorVisitor::LD3_sngl_1(bool Q, Imm<2> upper_opcode, bool S, Imm<2> size, Reg Rn, Vec Vt) {
|
bool TranslatorVisitor::LD3_sngl_1(bool Q, Imm<2> upper_opcode, bool S, Imm<2> size, Reg Rn, Vec Vt) {
|
||||||
return SharedDecodeAndOperation(*this, false, MemOp::LOAD, Q, S, false, false, {},
|
return SharedDecodeAndOperation(*this, false, IR::MemOp::LOAD, Q, S, false, false, {},
|
||||||
Imm<3>{(upper_opcode.ZeroExtend() << 1) | 1}, size, Rn, Vt);
|
Imm<3>{(upper_opcode.ZeroExtend() << 1) | 1}, size, Rn, Vt);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool TranslatorVisitor::LD3_sngl_2(bool Q, Reg Rm, Imm<2> upper_opcode, bool S, Imm<2> size, Reg Rn, Vec Vt) {
|
bool TranslatorVisitor::LD3_sngl_2(bool Q, Reg Rm, Imm<2> upper_opcode, bool S, Imm<2> size, Reg Rn, Vec Vt) {
|
||||||
return SharedDecodeAndOperation(*this, true, MemOp::LOAD, Q, S, false, false, Rm,
|
return SharedDecodeAndOperation(*this, true, IR::MemOp::LOAD, Q, S, false, false, Rm,
|
||||||
Imm<3>{(upper_opcode.ZeroExtend() << 1) | 1}, size, Rn, Vt);
|
Imm<3>{(upper_opcode.ZeroExtend() << 1) | 1}, size, Rn, Vt);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool TranslatorVisitor::LD3R_1(bool Q, Imm<2> size, Reg Rn, Vec Vt) {
|
bool TranslatorVisitor::LD3R_1(bool Q, Imm<2> size, Reg Rn, Vec Vt) {
|
||||||
return SharedDecodeAndOperation(*this, false, MemOp::LOAD, Q, false, false, true, {}, Imm<3>{0b111}, size, Rn, Vt);
|
return SharedDecodeAndOperation(*this, false, IR::MemOp::LOAD, Q, false, false, true,
|
||||||
|
{}, Imm<3>{0b111}, size, Rn, Vt);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool TranslatorVisitor::LD3R_2(bool Q, Reg Rm, Imm<2> size, Reg Rn, Vec Vt) {
|
bool TranslatorVisitor::LD3R_2(bool Q, Reg Rm, Imm<2> size, Reg Rn, Vec Vt) {
|
||||||
return SharedDecodeAndOperation(*this, true, MemOp::LOAD, Q, false, false, true, Rm, Imm<3>{0b111}, size, Rn, Vt);
|
return SharedDecodeAndOperation(*this, true, IR::MemOp::LOAD, Q, false, false, true,
|
||||||
|
Rm, Imm<3>{0b111}, size, Rn, Vt);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool TranslatorVisitor::LD4_sngl_1(bool Q, Imm<2> upper_opcode, bool S, Imm<2> size, Reg Rn, Vec Vt) {
|
bool TranslatorVisitor::LD4_sngl_1(bool Q, Imm<2> upper_opcode, bool S, Imm<2> size, Reg Rn, Vec Vt) {
|
||||||
return SharedDecodeAndOperation(*this, false, MemOp::LOAD, Q, S, true, false, {},
|
return SharedDecodeAndOperation(*this, false, IR::MemOp::LOAD, Q, S, true, false, {},
|
||||||
Imm<3>{(upper_opcode.ZeroExtend() << 1) | 1}, size, Rn, Vt);
|
Imm<3>{(upper_opcode.ZeroExtend() << 1) | 1}, size, Rn, Vt);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool TranslatorVisitor::LD4_sngl_2(bool Q, Reg Rm, Imm<2> upper_opcode, bool S, Imm<2> size, Reg Rn, Vec Vt) {
|
bool TranslatorVisitor::LD4_sngl_2(bool Q, Reg Rm, Imm<2> upper_opcode, bool S, Imm<2> size, Reg Rn, Vec Vt) {
|
||||||
return SharedDecodeAndOperation(*this, true, MemOp::LOAD, Q, S, true, false, Rm,
|
return SharedDecodeAndOperation(*this, true, IR::MemOp::LOAD, Q, S, true, false, Rm,
|
||||||
Imm<3>{(upper_opcode.ZeroExtend() << 1) | 1}, size, Rn, Vt);
|
Imm<3>{(upper_opcode.ZeroExtend() << 1) | 1}, size, Rn, Vt);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool TranslatorVisitor::LD4R_1(bool Q, Imm<2> size, Reg Rn, Vec Vt) {
|
bool TranslatorVisitor::LD4R_1(bool Q, Imm<2> size, Reg Rn, Vec Vt) {
|
||||||
return SharedDecodeAndOperation(*this, false, MemOp::LOAD, Q, false, true, true, {}, Imm<3>{0b111}, size, Rn, Vt);
|
return SharedDecodeAndOperation(*this, false, IR::MemOp::LOAD, Q, false, true, true,
|
||||||
|
{}, Imm<3>{0b111}, size, Rn, Vt);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool TranslatorVisitor::LD4R_2(bool Q, Reg Rm, Imm<2> size, Reg Rn, Vec Vt) {
|
bool TranslatorVisitor::LD4R_2(bool Q, Reg Rm, Imm<2> size, Reg Rn, Vec Vt) {
|
||||||
return SharedDecodeAndOperation(*this, true, MemOp::LOAD, Q, false, true, true, Rm, Imm<3>{0b111}, size, Rn, Vt);
|
return SharedDecodeAndOperation(*this, true, IR::MemOp::LOAD, Q, false, true, true,
|
||||||
|
Rm, Imm<3>{0b111}, size, Rn, Vt);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool TranslatorVisitor::ST1_sngl_1(bool Q, Imm<2> upper_opcode, bool S, Imm<2> size, Reg Rn, Vec Vt) {
|
bool TranslatorVisitor::ST1_sngl_1(bool Q, Imm<2> upper_opcode, bool S, Imm<2> size, Reg Rn, Vec Vt) {
|
||||||
return SharedDecodeAndOperation(*this, false, MemOp::STORE, Q, S, false, false, {},
|
return SharedDecodeAndOperation(*this, false, IR::MemOp::STORE, Q, S, false, false, {},
|
||||||
Imm<3>{upper_opcode.ZeroExtend() << 1}, size, Rn, Vt);
|
Imm<3>{upper_opcode.ZeroExtend() << 1}, size, Rn, Vt);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool TranslatorVisitor::ST1_sngl_2(bool Q, Reg Rm, Imm<2> upper_opcode, bool S, Imm<2> size, Reg Rn, Vec Vt) {
|
bool TranslatorVisitor::ST1_sngl_2(bool Q, Reg Rm, Imm<2> upper_opcode, bool S, Imm<2> size, Reg Rn, Vec Vt) {
|
||||||
return SharedDecodeAndOperation(*this, true, MemOp::STORE, Q, S, false, false, Rm,
|
return SharedDecodeAndOperation(*this, true, IR::MemOp::STORE, Q, S, false, false, Rm,
|
||||||
Imm<3>{upper_opcode.ZeroExtend() << 1}, size, Rn, Vt);
|
Imm<3>{upper_opcode.ZeroExtend() << 1}, size, Rn, Vt);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool TranslatorVisitor::ST2_sngl_1(bool Q, Imm<2> upper_opcode, bool S, Imm<2> size, Reg Rn, Vec Vt) {
|
bool TranslatorVisitor::ST2_sngl_1(bool Q, Imm<2> upper_opcode, bool S, Imm<2> size, Reg Rn, Vec Vt) {
|
||||||
return SharedDecodeAndOperation(*this, false, MemOp::STORE, Q, S, true, false, {},
|
return SharedDecodeAndOperation(*this, false, IR::MemOp::STORE, Q, S, true, false, {},
|
||||||
Imm<3>{upper_opcode.ZeroExtend() << 1}, size, Rn, Vt);
|
Imm<3>{upper_opcode.ZeroExtend() << 1}, size, Rn, Vt);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool TranslatorVisitor::ST2_sngl_2(bool Q, Reg Rm, Imm<2> upper_opcode, bool S, Imm<2> size, Reg Rn, Vec Vt) {
|
bool TranslatorVisitor::ST2_sngl_2(bool Q, Reg Rm, Imm<2> upper_opcode, bool S, Imm<2> size, Reg Rn, Vec Vt) {
|
||||||
return SharedDecodeAndOperation(*this, true, MemOp::STORE, Q, S, true, false, Rm,
|
return SharedDecodeAndOperation(*this, true, IR::MemOp::STORE, Q, S, true, false, Rm,
|
||||||
Imm<3>{upper_opcode.ZeroExtend() << 1}, size, Rn, Vt);
|
Imm<3>{upper_opcode.ZeroExtend() << 1}, size, Rn, Vt);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool TranslatorVisitor::ST3_sngl_1(bool Q, Imm<2> upper_opcode, bool S, Imm<2> size, Reg Rn, Vec Vt) {
|
bool TranslatorVisitor::ST3_sngl_1(bool Q, Imm<2> upper_opcode, bool S, Imm<2> size, Reg Rn, Vec Vt) {
|
||||||
return SharedDecodeAndOperation(*this, false, MemOp::STORE, Q, S, false, false, {},
|
return SharedDecodeAndOperation(*this, false, IR::MemOp::STORE, Q, S, false, false, {},
|
||||||
Imm<3>{(upper_opcode.ZeroExtend() << 1) | 1}, size, Rn, Vt);
|
Imm<3>{(upper_opcode.ZeroExtend() << 1) | 1}, size, Rn, Vt);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool TranslatorVisitor::ST3_sngl_2(bool Q, Reg Rm, Imm<2> upper_opcode, bool S, Imm<2> size, Reg Rn, Vec Vt) {
|
bool TranslatorVisitor::ST3_sngl_2(bool Q, Reg Rm, Imm<2> upper_opcode, bool S, Imm<2> size, Reg Rn, Vec Vt) {
|
||||||
return SharedDecodeAndOperation(*this, true, MemOp::STORE, Q, S, false, false, Rm,
|
return SharedDecodeAndOperation(*this, true, IR::MemOp::STORE, Q, S, false, false, Rm,
|
||||||
Imm<3>{(upper_opcode.ZeroExtend() << 1) | 1}, size, Rn, Vt);
|
Imm<3>{(upper_opcode.ZeroExtend() << 1) | 1}, size, Rn, Vt);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool TranslatorVisitor::ST4_sngl_1(bool Q, Imm<2> upper_opcode, bool S, Imm<2> size, Reg Rn, Vec Vt) {
|
bool TranslatorVisitor::ST4_sngl_1(bool Q, Imm<2> upper_opcode, bool S, Imm<2> size, Reg Rn, Vec Vt) {
|
||||||
return SharedDecodeAndOperation(*this, false, MemOp::STORE, Q, S, true, false, {},
|
return SharedDecodeAndOperation(*this, false, IR::MemOp::STORE, Q, S, true, false, {},
|
||||||
Imm<3>{(upper_opcode.ZeroExtend() << 1) | 1}, size, Rn, Vt);
|
Imm<3>{(upper_opcode.ZeroExtend() << 1) | 1}, size, Rn, Vt);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool TranslatorVisitor::ST4_sngl_2(bool Q, Reg Rm, Imm<2> upper_opcode, bool S, Imm<2> size, Reg Rn, Vec Vt) {
|
bool TranslatorVisitor::ST4_sngl_2(bool Q, Reg Rm, Imm<2> upper_opcode, bool S, Imm<2> size, Reg Rn, Vec Vt) {
|
||||||
return SharedDecodeAndOperation(*this, true, MemOp::STORE, Q, S, true, false, Rm,
|
return SharedDecodeAndOperation(*this, true, IR::MemOp::STORE, Q, S, true, false, Rm,
|
||||||
Imm<3>{(upper_opcode.ZeroExtend() << 1) | 1}, size, Rn, Vt);
|
Imm<3>{(upper_opcode.ZeroExtend() << 1) | 1}, size, Rn, Vt);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -57,6 +57,16 @@ struct UpperAndLower {
|
||||||
U128 lower;
|
U128 lower;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
enum class AccType {
|
||||||
|
NORMAL, VEC, STREAM, VECSTREAM,
|
||||||
|
ATOMIC, ORDERED, ORDEREDRW, LIMITEDORDERED,
|
||||||
|
UNPRIV, IFETCH, PTW, DC, IC, DCZVA, AT,
|
||||||
|
};
|
||||||
|
|
||||||
|
enum class MemOp {
|
||||||
|
LOAD, STORE, PREFETCH,
|
||||||
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Convenience class to construct a basic block of the intermediate representation.
|
* Convenience class to construct a basic block of the intermediate representation.
|
||||||
* `block` is the resulting block.
|
* `block` is the resulting block.
|
||||||
|
|
Loading…
Reference in a new issue