A64/translate: Standardize arguments of helper functions

Don't pass in IREmitter when TranslatorVisitor is already available.
This commit is contained in:
MerryMage 2018-08-21 12:26:46 +01:00
parent a4e556d59c
commit 3b13f1eb12
9 changed files with 134 additions and 136 deletions

View file

@ -8,9 +8,8 @@
namespace Dynarmic::A64 {
static IR::U8 SanitizeShiftAmount(TranslatorVisitor& v, IREmitter& ir, size_t datasize,
const IR::U32U64& amount) {
return ir.LeastSignificantByte(ir.And(amount, v.I(datasize, datasize - 1)));
static IR::U8 SanitizeShiftAmount(TranslatorVisitor& v, size_t datasize, const IR::U32U64& amount) {
return v.ir.LeastSignificantByte(v.ir.And(amount, v.I(datasize, datasize - 1)));
}
bool TranslatorVisitor::LSLV(bool sf, Reg Rm, Reg Rn, Reg Rd) {
@ -19,7 +18,7 @@ bool TranslatorVisitor::LSLV(bool sf, Reg Rm, Reg Rn, Reg Rd) {
const IR::U32U64 operand = X(datasize, Rn);
const IR::U32U64 shift_amount = X(datasize, Rm);
const IR::U32U64 result = ir.LogicalShiftLeft(operand, SanitizeShiftAmount(*this, ir, datasize, shift_amount));
const IR::U32U64 result = ir.LogicalShiftLeft(operand, SanitizeShiftAmount(*this, datasize, shift_amount));
X(datasize, Rd, result);
return true;
@ -31,7 +30,7 @@ bool TranslatorVisitor::LSRV(bool sf, Reg Rm, Reg Rn, Reg Rd) {
const IR::U32U64 operand = X(datasize, Rn);
const IR::U32U64 shift_amount = X(datasize, Rm);
const IR::U32U64 result = ir.LogicalShiftRight(operand, SanitizeShiftAmount(*this, ir, datasize, shift_amount));
const IR::U32U64 result = ir.LogicalShiftRight(operand, SanitizeShiftAmount(*this, datasize, shift_amount));
X(datasize, Rd, result);
return true;
@ -43,7 +42,7 @@ bool TranslatorVisitor::ASRV(bool sf, Reg Rm, Reg Rn, Reg Rd) {
const IR::U32U64 operand = X(datasize, Rn);
const IR::U32U64 shift_amount = X(datasize, Rm);
const IR::U32U64 result = ir.ArithmeticShiftRight(operand, SanitizeShiftAmount(*this, ir, datasize, shift_amount));
const IR::U32U64 result = ir.ArithmeticShiftRight(operand, SanitizeShiftAmount(*this, datasize, shift_amount));
X(datasize, Rd, result);
return true;
@ -55,7 +54,7 @@ bool TranslatorVisitor::RORV(bool sf, Reg Rm, Reg Rn, Reg Rd) {
const IR::U32U64 operand = X(datasize, Rn);
const IR::U32U64 shift_amount = X(datasize, Rm);
const IR::U32U64 result = ir.RotateRight(operand, SanitizeShiftAmount(*this, ir, datasize, shift_amount));
const IR::U32U64 result = ir.RotateRight(operand, SanitizeShiftAmount(*this, datasize, shift_amount));
X(datasize, Rd, result);
return true;

View file

@ -244,7 +244,6 @@ struct TranslatorVisitor final {
bool STP_LDP_fpsimd(Imm<2> opc, bool not_postindex, bool wback, Imm<1> L, Imm<7> imm7, Vec Vt2, Reg Rn, Vec Vt);
// Loads and stores - Load/Store register (immediate)
bool load_store_register_immediate(bool wback, bool postindex, size_t scale, u64 offset, Imm<2> size, Imm<2> opc, Reg Rn, Reg Rt);
bool STRx_LDRx_imm_1(Imm<2> size, Imm<2> opc, Imm<9> imm9, bool not_postindex, Reg Rn, Reg Rt);
bool STRx_LDRx_imm_2(Imm<2> size, Imm<2> opc, Imm<12> imm12, Reg Rn, Reg Rt);
bool STURx_LDURx(Imm<2> size, Imm<2> opc, Imm<9> imm9, Reg Rn, Reg Rt);

View file

@ -10,7 +10,7 @@
namespace Dynarmic::A64 {
static bool ExclusiveSharedDecodeAndOperation(TranslatorVisitor& v, IREmitter& ir, bool pair, size_t size, bool L, bool o0, boost::optional<Reg> Rs, boost::optional<Reg> Rt2, Reg Rn, Reg Rt) {
static bool ExclusiveSharedDecodeAndOperation(TranslatorVisitor& v, bool pair, size_t size, bool L, bool o0, boost::optional<Reg> Rs, boost::optional<Reg> Rt2, Reg Rn, Reg Rt) {
// Shared Decode
const AccType acctype = o0 ? AccType::ORDERED : AccType::ATOMIC;
@ -46,9 +46,9 @@ static bool ExclusiveSharedDecodeAndOperation(TranslatorVisitor& v, IREmitter& i
case MemOp::STORE: {
IR::UAnyU128 data;
if (pair && elsize == 64) {
data = ir.Pack2x64To1x128(v.X(64, Rt), v.X(64, *Rt2));
data = v.ir.Pack2x64To1x128(v.X(64, Rt), v.X(64, *Rt2));
} else if (pair && elsize == 32) {
data = ir.Pack2x32To1x64(v.X(32, Rt), v.X(32, *Rt2));
data = v.ir.Pack2x32To1x64(v.X(32, Rt), v.X(32, *Rt2));
} else {
data = v.X(elsize, Rt);
}
@ -57,14 +57,14 @@ static bool ExclusiveSharedDecodeAndOperation(TranslatorVisitor& v, IREmitter& i
break;
}
case MemOp::LOAD: {
ir.SetExclusive(address, dbytes);
v.ir.SetExclusive(address, dbytes);
IR::UAnyU128 data = v.Mem(address, dbytes, acctype);
if (pair && elsize == 64) {
v.X(64, Rt, ir.VectorGetElement(64, data, 0));
v.X(64, *Rt2, ir.VectorGetElement(64, data, 1));
v.X(64, Rt, v.ir.VectorGetElement(64, data, 0));
v.X(64, *Rt2, v.ir.VectorGetElement(64, data, 1));
} else if (pair && elsize == 32) {
v.X(32, Rt, ir.LeastSignificantWord(data));
v.X(32, *Rt2, ir.MostSignificantWord(data).result);
v.X(32, Rt, v.ir.LeastSignificantWord(data));
v.X(32, *Rt2, v.ir.MostSignificantWord(data).result);
} else {
v.X(regsize, Rt, v.ZeroExtend(data, regsize));
}
@ -82,7 +82,7 @@ bool TranslatorVisitor::STXR(Imm<2> sz, Reg Rs, Reg Rn, Reg Rt) {
const size_t size = sz.ZeroExtend<size_t>();
const bool L = 0;
const bool o0 = 0;
return ExclusiveSharedDecodeAndOperation(*this, ir, pair, size, L, o0, Rs, {}, Rn, Rt);
return ExclusiveSharedDecodeAndOperation(*this, pair, size, L, o0, Rs, {}, Rn, Rt);
}
bool TranslatorVisitor::STLXR(Imm<2> sz, Reg Rs, Reg Rn, Reg Rt) {
@ -90,7 +90,7 @@ bool TranslatorVisitor::STLXR(Imm<2> sz, Reg Rs, Reg Rn, Reg Rt) {
const size_t size = sz.ZeroExtend<size_t>();
const bool L = 0;
const bool o0 = 1;
return ExclusiveSharedDecodeAndOperation(*this, ir, pair, size, L, o0, Rs, {}, Rn, Rt);
return ExclusiveSharedDecodeAndOperation(*this, pair, size, L, o0, Rs, {}, Rn, Rt);
}
bool TranslatorVisitor::STXP(Imm<1> sz, Reg Rs, Reg Rt2, Reg Rn, Reg Rt) {
@ -98,7 +98,7 @@ bool TranslatorVisitor::STXP(Imm<1> sz, Reg Rs, Reg Rt2, Reg Rn, Reg Rt) {
const size_t size = concatenate(Imm<1>{1}, sz).ZeroExtend<size_t>();
const bool L = 0;
const bool o0 = 0;
return ExclusiveSharedDecodeAndOperation(*this, ir, pair, size, L, o0, Rs, Rt2, Rn, Rt);
return ExclusiveSharedDecodeAndOperation(*this, pair, size, L, o0, Rs, Rt2, Rn, Rt);
}
bool TranslatorVisitor::STLXP(Imm<1> sz, Reg Rs, Reg Rt2, Reg Rn, Reg Rt) {
@ -106,7 +106,7 @@ bool TranslatorVisitor::STLXP(Imm<1> sz, Reg Rs, Reg Rt2, Reg Rn, Reg Rt) {
const size_t size = concatenate(Imm<1>{1}, sz).ZeroExtend<size_t>();
const bool L = 0;
const bool o0 = 1;
return ExclusiveSharedDecodeAndOperation(*this, ir, pair, size, L, o0, Rs, Rt2, Rn, Rt);
return ExclusiveSharedDecodeAndOperation(*this, pair, size, L, o0, Rs, Rt2, Rn, Rt);
}
bool TranslatorVisitor::LDXR(Imm<2> sz, Reg Rn, Reg Rt) {
@ -114,7 +114,7 @@ bool TranslatorVisitor::LDXR(Imm<2> sz, Reg Rn, Reg Rt) {
const size_t size = sz.ZeroExtend<size_t>();
const bool L = 1;
const bool o0 = 0;
return ExclusiveSharedDecodeAndOperation(*this, ir, pair, size, L, o0, {}, {}, Rn, Rt);
return ExclusiveSharedDecodeAndOperation(*this, pair, size, L, o0, {}, {}, Rn, Rt);
}
bool TranslatorVisitor::LDAXR(Imm<2> sz, Reg Rn, Reg Rt) {
@ -122,7 +122,7 @@ bool TranslatorVisitor::LDAXR(Imm<2> sz, Reg Rn, Reg Rt) {
const size_t size = sz.ZeroExtend<size_t>();
const bool L = 1;
const bool o0 = 1;
return ExclusiveSharedDecodeAndOperation(*this, ir, pair, size, L, o0, {}, {}, Rn, Rt);
return ExclusiveSharedDecodeAndOperation(*this, pair, size, L, o0, {}, {}, Rn, Rt);
}
bool TranslatorVisitor::LDXP(Imm<1> sz, Reg Rt2, Reg Rn, Reg Rt) {
@ -130,7 +130,7 @@ bool TranslatorVisitor::LDXP(Imm<1> sz, Reg Rt2, Reg Rn, Reg Rt) {
const size_t size = concatenate(Imm<1>{1}, sz).ZeroExtend<size_t>();
const bool L = 1;
const bool o0 = 0;
return ExclusiveSharedDecodeAndOperation(*this, ir, pair, size, L, o0, {}, Rt2, Rn, Rt);
return ExclusiveSharedDecodeAndOperation(*this, pair, size, L, o0, {}, Rt2, Rn, Rt);
}
bool TranslatorVisitor::LDAXP(Imm<1> sz, Reg Rt2, Reg Rn, Reg Rt) {
@ -138,7 +138,7 @@ bool TranslatorVisitor::LDAXP(Imm<1> sz, Reg Rt2, Reg Rn, Reg Rt) {
const size_t size = concatenate(Imm<1>{1}, sz).ZeroExtend<size_t>();
const bool L = 1;
const bool o0 = 1;
return ExclusiveSharedDecodeAndOperation(*this, ir, pair, size, L, o0, {}, Rt2, Rn, Rt);
return ExclusiveSharedDecodeAndOperation(*this, pair, size, L, o0, {}, Rt2, Rn, Rt);
}
static bool OrderedSharedDecodeAndOperation(TranslatorVisitor& v, size_t size, bool L, bool o0, Reg Rn, Reg Rt) {

View file

@ -12,7 +12,7 @@
namespace Dynarmic::A64 {
static bool SharedDecodeAndOperation(TranslatorVisitor& v, IREmitter& ir, bool wback, MemOp memop, bool Q, boost::optional<Reg> Rm, Imm<4> opcode, Imm<2> size, Reg Rn, Vec Vt) {
static bool SharedDecodeAndOperation(TranslatorVisitor& v, bool wback, MemOp memop, bool Q, boost::optional<Reg> Rm, Imm<4> opcode, Imm<2> size, Reg Rn, Vec Vt) {
const size_t datasize = Q ? 128 : 64;
const size_t esize = 8 << size.ZeroExtend<size_t>();
const size_t elements = datasize / esize;
@ -64,32 +64,32 @@ static bool SharedDecodeAndOperation(TranslatorVisitor& v, IREmitter& ir, bool w
else
address = v.X(64, Rn);
IR::U64 offs = ir.Imm64(0);
IR::U64 offs = v.ir.Imm64(0);
if (selem == 1) {
for (size_t r = 0; r < rpt; r++) {
const Vec tt = static_cast<Vec>((VecNumber(Vt) + r) % 32);
if (memop == MemOp::LOAD) {
const IR::UAnyU128 vec = v.Mem(ir.Add(address, offs), ebytes * elements, AccType::VEC);
const IR::UAnyU128 vec = v.Mem(v.ir.Add(address, offs), ebytes * elements, AccType::VEC);
v.V_scalar(datasize, tt, vec);
} else {
const IR::UAnyU128 vec = v.V_scalar(datasize, tt);
v.Mem(ir.Add(address, offs), ebytes * elements, AccType::VEC, vec);
v.Mem(v.ir.Add(address, offs), ebytes * elements, AccType::VEC, vec);
}
offs = ir.Add(offs, ir.Imm64(ebytes * elements));
offs = v.ir.Add(offs, v.ir.Imm64(ebytes * elements));
}
} else {
for (size_t e = 0; e < elements; e++) {
for (size_t s = 0; s < selem; s++) {
const Vec tt = static_cast<Vec>((VecNumber(Vt) + s) % 32);
if (memop == MemOp::LOAD) {
const IR::UAny elem = v.Mem(ir.Add(address, offs), ebytes, AccType::VEC);
const IR::U128 vec = ir.VectorSetElement(esize, v.V(datasize, tt), e, elem);
const IR::UAny elem = v.Mem(v.ir.Add(address, offs), ebytes, AccType::VEC);
const IR::U128 vec = v.ir.VectorSetElement(esize, v.V(datasize, tt), e, elem);
v.V(datasize, tt, vec);
} else {
const IR::UAny elem = ir.VectorGetElement(esize, v.V(datasize, tt), e);
v.Mem(ir.Add(address, offs), ebytes, AccType::VEC, elem);
const IR::UAny elem = v.ir.VectorGetElement(esize, v.V(datasize, tt), e);
v.Mem(v.ir.Add(address, offs), ebytes, AccType::VEC, elem);
}
offs = ir.Add(offs, ir.Imm64(ebytes));
offs = v.ir.Add(offs, v.ir.Imm64(ebytes));
}
}
}
@ -98,9 +98,9 @@ static bool SharedDecodeAndOperation(TranslatorVisitor& v, IREmitter& ir, bool w
if (*Rm != Reg::SP)
offs = v.X(64, *Rm);
if (Rn == Reg::SP)
v.SP(64, ir.Add(address, offs));
v.SP(64, v.ir.Add(address, offs));
else
v.X(64, Rn, ir.Add(address, offs));
v.X(64, Rn, v.ir.Add(address, offs));
}
return true;
@ -109,25 +109,25 @@ static bool SharedDecodeAndOperation(TranslatorVisitor& v, IREmitter& ir, bool w
bool TranslatorVisitor::STx_mult_1(bool Q, Imm<4> opcode, Imm<2> size, Reg Rn, Vec Vt) {
const bool wback = false;
const MemOp memop = MemOp::STORE;
return SharedDecodeAndOperation(*this, ir, wback, memop, Q, {}, opcode, size, Rn, Vt);
return SharedDecodeAndOperation(*this, wback, memop, Q, {}, opcode, size, Rn, Vt);
}
bool TranslatorVisitor::STx_mult_2(bool Q, Reg Rm, Imm<4> opcode, Imm<2> size, Reg Rn, Vec Vt) {
const bool wback = true;
const MemOp memop = MemOp::STORE;
return SharedDecodeAndOperation(*this, ir, wback, memop, Q, Rm, opcode, size, Rn, Vt);
return SharedDecodeAndOperation(*this, wback, memop, Q, Rm, opcode, size, Rn, Vt);
}
bool TranslatorVisitor::LDx_mult_1(bool Q, Imm<4> opcode, Imm<2> size, Reg Rn, Vec Vt) {
const bool wback = false;
const MemOp memop = MemOp::LOAD;
return SharedDecodeAndOperation(*this, ir, wback, memop, Q, {}, opcode, size, Rn, Vt);
return SharedDecodeAndOperation(*this, wback, memop, Q, {}, opcode, size, Rn, Vt);
}
bool TranslatorVisitor::LDx_mult_2(bool Q, Reg Rm, Imm<4> opcode, Imm<2> size, Reg Rn, Vec Vt) {
const bool wback = true;
const MemOp memop = MemOp::LOAD;
return SharedDecodeAndOperation(*this, ir, wback, memop, Q, Rm, opcode, size, Rn, Vt);
return SharedDecodeAndOperation(*this, wback, memop, Q, Rm, opcode, size, Rn, Vt);
}
} // namespace Dynarmic::A64

View file

@ -8,7 +8,7 @@
namespace Dynarmic::A64 {
bool TranslatorVisitor::load_store_register_immediate(bool wback, bool postindex, size_t scale, u64 offset, Imm<2> size, Imm<2> opc, Reg Rn, Reg Rt) {
static bool LoadStoreRegisterImmediate(TranslatorVisitor& v, bool wback, bool postindex, size_t scale, u64 offset, Imm<2> size, Imm<2> opc, Reg Rn, Reg Rt) {
MemOp memop;
bool signed_ = false;
size_t regsize = 0;
@ -30,30 +30,30 @@ bool TranslatorVisitor::load_store_register_immediate(bool wback, bool postindex
const size_t datasize = 8 << scale;
if (memop == MemOp::LOAD && wback && Rn == Rt && Rn != Reg::R31) {
return UnpredictableInstruction();
return v.UnpredictableInstruction();
}
if (memop == MemOp::STORE && wback && Rn == Rt && Rn != Reg::R31) {
return UnpredictableInstruction();
return v.UnpredictableInstruction();
}
// TODO: Check SP alignment
IR::U64 address = Rn == Reg::SP ? IR::U64(SP(64)) : IR::U64(X(64, Rn));
IR::U64 address = Rn == Reg::SP ? IR::U64(v.SP(64)) : IR::U64(v.X(64, Rn));
if (!postindex)
address = ir.Add(address, ir.Imm64(offset));
address = v.ir.Add(address, v.ir.Imm64(offset));
switch (memop) {
case MemOp::STORE: {
auto data = X(datasize, Rt);
Mem(address, datasize / 8, AccType::NORMAL, data);
auto data = v.X(datasize, Rt);
v.Mem(address, datasize / 8, AccType::NORMAL, data);
break;
}
case MemOp::LOAD: {
auto data = Mem(address, datasize / 8, AccType::NORMAL);
auto data = v.Mem(address, datasize / 8, AccType::NORMAL);
if (signed_)
X(regsize, Rt, SignExtend(data, regsize));
v.X(regsize, Rt, v.SignExtend(data, regsize));
else
X(regsize, Rt, ZeroExtend(data, regsize));
v.X(regsize, Rt, v.ZeroExtend(data, regsize));
break;
}
case MemOp::PREFETCH:
@ -63,11 +63,11 @@ bool TranslatorVisitor::load_store_register_immediate(bool wback, bool postindex
if (wback) {
if (postindex)
address = ir.Add(address, ir.Imm64(offset));
address = v.ir.Add(address, v.ir.Imm64(offset));
if (Rn == Reg::SP)
SP(64, address);
v.SP(64, address);
else
X(64, Rn, address);
v.X(64, Rn, address);
}
return true;
@ -79,7 +79,7 @@ bool TranslatorVisitor::STRx_LDRx_imm_1(Imm<2> size, Imm<2> opc, Imm<9> imm9, bo
const size_t scale = size.ZeroExtend<size_t>();
const u64 offset = imm9.SignExtend<u64>();
return load_store_register_immediate(wback, postindex, scale, offset, size, opc, Rn, Rt);
return LoadStoreRegisterImmediate(*this, wback, postindex, scale, offset, size, opc, Rn, Rt);
}
bool TranslatorVisitor::STRx_LDRx_imm_2(Imm<2> size, Imm<2> opc, Imm<12> imm12, Reg Rn, Reg Rt) {
@ -88,7 +88,7 @@ bool TranslatorVisitor::STRx_LDRx_imm_2(Imm<2> size, Imm<2> opc, Imm<12> imm12,
const size_t scale = size.ZeroExtend<size_t>();
const u64 offset = imm12.ZeroExtend<u64>() << scale;
return load_store_register_immediate(wback, postindex, scale, offset, size, opc, Rn, Rt);
return LoadStoreRegisterImmediate(*this, wback, postindex, scale, offset, size, opc, Rn, Rt);
}
bool TranslatorVisitor::STURx_LDURx(Imm<2> size, Imm<2> opc, Imm<9> imm9, Reg Rn, Reg Rt) {
@ -97,7 +97,7 @@ bool TranslatorVisitor::STURx_LDURx(Imm<2> size, Imm<2> opc, Imm<9> imm9, Reg Rn
const size_t scale = size.ZeroExtend<size_t>();
const u64 offset = imm9.SignExtend<u64>();
return load_store_register_immediate(wback, postindex, scale, offset, size, opc, Rn, Rt);
return LoadStoreRegisterImmediate(*this, wback, postindex, scale, offset, size, opc, Rn, Rt);
}
bool TranslatorVisitor::PRFM_imm([[maybe_unused]] Imm<12> imm12, [[maybe_unused]] Reg Rn, [[maybe_unused]] Reg Rt) {
@ -112,7 +112,7 @@ bool TranslatorVisitor::PRFM_unscaled_imm([[maybe_unused]] Imm<9> imm9, [[maybe_
return true;
}
static bool LoadStoreSIMD(TranslatorVisitor& v, IREmitter& ir, bool wback, bool postindex, size_t scale, u64 offset, MemOp memop, Reg Rn, Vec Vt) {
static bool LoadStoreSIMD(TranslatorVisitor& v, bool wback, bool postindex, size_t scale, u64 offset, MemOp memop, Reg Rn, Vec Vt) {
const AccType acctype = AccType::VEC;
const size_t datasize = 8 << scale;
@ -125,7 +125,7 @@ static bool LoadStoreSIMD(TranslatorVisitor& v, IREmitter& ir, bool wback, bool
}
if (!postindex) {
address = ir.Add(address, ir.Imm64(offset));
address = v.ir.Add(address, v.ir.Imm64(offset));
}
switch (memop) {
@ -134,7 +134,7 @@ static bool LoadStoreSIMD(TranslatorVisitor& v, IREmitter& ir, bool wback, bool
const IR::U128 data = v.V(128, Vt);
v.Mem(address, 16, acctype, data);
} else {
const IR::UAny data = ir.VectorGetElement(datasize, v.V(128, Vt), 0);
const IR::UAny data = v.ir.VectorGetElement(datasize, v.V(128, Vt), 0);
v.Mem(address, datasize / 8, acctype, data);
}
break;
@ -144,7 +144,7 @@ static bool LoadStoreSIMD(TranslatorVisitor& v, IREmitter& ir, bool wback, bool
v.V(128, Vt, data);
} else {
const IR::UAny data = v.Mem(address, datasize / 8, acctype);
v.V(128, Vt, ir.ZeroExtendToQuad(data));
v.V(128, Vt, v.ir.ZeroExtendToQuad(data));
}
break;
default:
@ -153,7 +153,7 @@ static bool LoadStoreSIMD(TranslatorVisitor& v, IREmitter& ir, bool wback, bool
if (wback) {
if (postindex) {
address = ir.Add(address, ir.Imm64(offset));
address = v.ir.Add(address, v.ir.Imm64(offset));
}
if (Rn == Reg::SP) {
v.SP(64, address);
@ -172,7 +172,7 @@ bool TranslatorVisitor::STR_imm_fpsimd_1(Imm<2> size, Imm<1> opc_1, Imm<9> imm9,
if (scale > 4) return UnallocatedEncoding();
const u64 offset = imm9.SignExtend<u64>();
return LoadStoreSIMD(*this, ir, wback, postindex, scale, offset, MemOp::STORE, Rn, Vt);
return LoadStoreSIMD(*this, wback, postindex, scale, offset, MemOp::STORE, Rn, Vt);
}
bool TranslatorVisitor::STR_imm_fpsimd_2(Imm<2> size, Imm<1> opc_1, Imm<12> imm12, Reg Rn, Vec Vt) {
@ -182,7 +182,7 @@ bool TranslatorVisitor::STR_imm_fpsimd_2(Imm<2> size, Imm<1> opc_1, Imm<12> imm1
if (scale > 4) return UnallocatedEncoding();
const u64 offset = imm12.ZeroExtend<u64>() << scale;
return LoadStoreSIMD(*this, ir, wback, postindex, scale, offset, MemOp::STORE, Rn, Vt);
return LoadStoreSIMD(*this, wback, postindex, scale, offset, MemOp::STORE, Rn, Vt);
}
bool TranslatorVisitor::LDR_imm_fpsimd_1(Imm<2> size, Imm<1> opc_1, Imm<9> imm9, bool not_postindex, Reg Rn, Vec Vt) {
@ -192,7 +192,7 @@ bool TranslatorVisitor::LDR_imm_fpsimd_1(Imm<2> size, Imm<1> opc_1, Imm<9> imm9,
if (scale > 4) return UnallocatedEncoding();
const u64 offset = imm9.SignExtend<u64>();
return LoadStoreSIMD(*this, ir, wback, postindex, scale, offset, MemOp::LOAD, Rn, Vt);
return LoadStoreSIMD(*this, wback, postindex, scale, offset, MemOp::LOAD, Rn, Vt);
}
bool TranslatorVisitor::LDR_imm_fpsimd_2(Imm<2> size, Imm<1> opc_1, Imm<12> imm12, Reg Rn, Vec Vt) {
@ -202,7 +202,7 @@ bool TranslatorVisitor::LDR_imm_fpsimd_2(Imm<2> size, Imm<1> opc_1, Imm<12> imm1
if (scale > 4) return UnallocatedEncoding();
const u64 offset = imm12.ZeroExtend<u64>() << scale;
return LoadStoreSIMD(*this, ir, wback, postindex, scale, offset, MemOp::LOAD, Rn, Vt);
return LoadStoreSIMD(*this, wback, postindex, scale, offset, MemOp::LOAD, Rn, Vt);
}
bool TranslatorVisitor::STUR_fpsimd(Imm<2> size, Imm<1> opc_1, Imm<9> imm9, Reg Rn, Vec Vt) {
@ -212,7 +212,7 @@ bool TranslatorVisitor::STUR_fpsimd(Imm<2> size, Imm<1> opc_1, Imm<9> imm9, Reg
if (scale > 4) return UnallocatedEncoding();
const u64 offset = imm9.SignExtend<u64>();
return LoadStoreSIMD(*this, ir, wback, postindex, scale, offset, MemOp::STORE, Rn, Vt);
return LoadStoreSIMD(*this, wback, postindex, scale, offset, MemOp::STORE, Rn, Vt);
}
bool TranslatorVisitor::LDUR_fpsimd(Imm<2> size, Imm<1> opc_1, Imm<9> imm9, Reg Rn, Vec Vt) {
@ -222,7 +222,7 @@ bool TranslatorVisitor::LDUR_fpsimd(Imm<2> size, Imm<1> opc_1, Imm<9> imm9, Reg
if (scale > 4) return UnallocatedEncoding();
const u64 offset = imm9.SignExtend<u64>();
return LoadStoreSIMD(*this, ir, wback, postindex, scale, offset, MemOp::LOAD, Rn, Vt);
return LoadStoreSIMD(*this, wback, postindex, scale, offset, MemOp::LOAD, Rn, Vt);
}
} // namespace Dynarmic::A64

View file

@ -8,7 +8,7 @@
namespace Dynarmic::A64 {
static bool RegSharedDecodeAndOperation(TranslatorVisitor& v, IREmitter& ir, size_t scale, u8 shift, Imm<2> size, Imm<1> opc_1, Imm<1> opc_0, Reg Rm, Imm<3> option, Reg Rn, Reg Rt) {
static bool RegSharedDecodeAndOperation(TranslatorVisitor& v, size_t scale, u8 shift, Imm<2> size, Imm<1> opc_1, Imm<1> opc_0, Reg Rm, Imm<3> option, Reg Rn, Reg Rt) {
// Shared Decode
const AccType acctype = AccType::NORMAL;
@ -47,7 +47,7 @@ static bool RegSharedDecodeAndOperation(TranslatorVisitor& v, IREmitter& ir, siz
} else {
address = v.X(64, Rn);
}
address = ir.Add(address, offset);
address = v.ir.Add(address, offset);
switch (memop) {
case MemOp::STORE: {
@ -81,7 +81,7 @@ bool TranslatorVisitor::STRx_reg(Imm<2> size, Imm<1> opc_1, Reg Rm, Imm<3> optio
if (!option.Bit<1>()) {
return UnallocatedEncoding();
}
return RegSharedDecodeAndOperation(*this, ir, scale, shift, size, opc_1, opc_0, Rm, option, Rn, Rt);
return RegSharedDecodeAndOperation(*this, scale, shift, size, opc_1, opc_0, Rm, option, Rn, Rt);
}
bool TranslatorVisitor::LDRx_reg(Imm<2> size, Imm<1> opc_1, Reg Rm, Imm<3> option, bool S, Reg Rn, Reg Rt) {
@ -91,10 +91,10 @@ bool TranslatorVisitor::LDRx_reg(Imm<2> size, Imm<1> opc_1, Reg Rm, Imm<3> optio
if (!option.Bit<1>()) {
return UnallocatedEncoding();
}
return RegSharedDecodeAndOperation(*this, ir, scale, shift, size, opc_1, opc_0, Rm, option, Rn, Rt);
return RegSharedDecodeAndOperation(*this, scale, shift, size, opc_1, opc_0, Rm, option, Rn, Rt);
}
static bool VecSharedDecodeAndOperation(TranslatorVisitor& v, IREmitter& ir, size_t scale, u8 shift, Imm<1> opc_0, Reg Rm, Imm<3> option, Reg Rn, Vec Vt) {
static bool VecSharedDecodeAndOperation(TranslatorVisitor& v, size_t scale, u8 shift, Imm<1> opc_0, Reg Rm, Imm<3> option, Reg Rn, Vec Vt) {
// Shared Decode
const AccType acctype = AccType::VEC;
@ -112,7 +112,7 @@ static bool VecSharedDecodeAndOperation(TranslatorVisitor& v, IREmitter& ir, siz
} else {
address = v.X(64, Rn);
}
address = ir.Add(address, offset);
address = v.ir.Add(address, offset);
switch (memop) {
case MemOp::STORE: {
@ -142,7 +142,7 @@ bool TranslatorVisitor::STR_reg_fpsimd(Imm<2> size, Imm<1> opc_1, Reg Rm, Imm<3>
if (!option.Bit<1>()) {
return UnallocatedEncoding();
}
return VecSharedDecodeAndOperation(*this, ir, scale, shift, opc_0, Rm, option, Rn, Vt);
return VecSharedDecodeAndOperation(*this, scale, shift, opc_0, Rm, option, Rn, Vt);
}
bool TranslatorVisitor::LDR_reg_fpsimd(Imm<2> size, Imm<1> opc_1, Reg Rm, Imm<3> option, bool S, Reg Rn, Vec Vt) {
@ -155,7 +155,7 @@ bool TranslatorVisitor::LDR_reg_fpsimd(Imm<2> size, Imm<1> opc_1, Reg Rm, Imm<3>
if (!option.Bit<1>()) {
return UnallocatedEncoding();
}
return VecSharedDecodeAndOperation(*this, ir, scale, shift, opc_0, Rm, option, Rn, Vt);
return VecSharedDecodeAndOperation(*this, scale, shift, opc_0, Rm, option, Rn, Vt);
}
} // namespace Dynarmic::A64

View file

@ -8,7 +8,7 @@
namespace Dynarmic::A64 {
static bool StoreRegister(TranslatorVisitor& v, IREmitter& ir, const size_t datasize,
static bool StoreRegister(TranslatorVisitor& v, const size_t datasize,
const Imm<9> imm9, const Reg Rn, const Reg Rt) {
const u64 offset = imm9.SignExtend<u64>();
AccType acctype = AccType::UNPRIV;
@ -20,13 +20,13 @@ static bool StoreRegister(TranslatorVisitor& v, IREmitter& ir, const size_t data
} else {
address = v.X(64, Rn);
}
address = ir.Add(address, ir.Imm64(offset));
address = v.ir.Add(address, v.ir.Imm64(offset));
IR::UAny data = v.X(datasize, Rt);
v.Mem(address, datasize / 8, acctype, data);
return true;
}
static bool LoadRegister(TranslatorVisitor& v, IREmitter& ir, const size_t datasize,
static bool LoadRegister(TranslatorVisitor& v, const size_t datasize,
const Imm<9> imm9, const Reg Rn, const Reg Rt) {
const u64 offset = imm9.SignExtend<u64>();
AccType acctype = AccType::UNPRIV;
@ -38,7 +38,7 @@ static bool LoadRegister(TranslatorVisitor& v, IREmitter& ir, const size_t datas
} else {
address = v.X(64, Rn);
}
address = ir.Add(address, ir.Imm64(offset));
address = v.ir.Add(address, v.ir.Imm64(offset));
IR::UAny data = v.Mem(address, datasize / 8, acctype);
// max is used to zeroextend < 32 to 32, and > 32 to 64
const size_t extended_size = std::max<size_t>(32, datasize);
@ -46,7 +46,7 @@ static bool LoadRegister(TranslatorVisitor& v, IREmitter& ir, const size_t datas
return true;
}
static bool LoadRegisterSigned(TranslatorVisitor& v, IREmitter& ir, const size_t datasize,
static bool LoadRegisterSigned(TranslatorVisitor& v, const size_t datasize,
const Imm<2> opc, const Imm<9> imm9, const Reg Rn, const Reg Rt) {
const u64 offset = imm9.SignExtend<u64>();
AccType acctype = AccType::UNPRIV;
@ -72,7 +72,7 @@ static bool LoadRegisterSigned(TranslatorVisitor& v, IREmitter& ir, const size_t
} else {
address = v.X(64, Rn);
}
address = ir.Add(address, ir.Imm64(offset));
address = v.ir.Add(address, v.ir.Imm64(offset));
switch (memop) {
case MemOp::STORE:
@ -95,39 +95,39 @@ static bool LoadRegisterSigned(TranslatorVisitor& v, IREmitter& ir, const size_t
}
bool TranslatorVisitor::STTRB(Imm<9> imm9, Reg Rn, Reg Rt) {
return StoreRegister(*this, ir, 8, imm9, Rn, Rt);
return StoreRegister(*this, 8, imm9, Rn, Rt);
}
bool TranslatorVisitor::STTRH(Imm<9> imm9, Reg Rn, Reg Rt) {
return StoreRegister(*this, ir, 16, imm9, Rn, Rt);
return StoreRegister(*this, 16, imm9, Rn, Rt);
}
bool TranslatorVisitor::STTR(Imm<2> size, Imm<9> imm9, Reg Rn, Reg Rt) {
const size_t scale = size.ZeroExtend<size_t>();
const size_t datasize = 8 << scale;
return StoreRegister(*this, ir, datasize, imm9, Rn, Rt);
return StoreRegister(*this, datasize, imm9, Rn, Rt);
}
bool TranslatorVisitor::LDTRB(Imm<9> imm9, Reg Rn, Reg Rt) {
return LoadRegister(*this, ir, 8, imm9, Rn, Rt);
return LoadRegister(*this, 8, imm9, Rn, Rt);
}
bool TranslatorVisitor::LDTRH(Imm<9> imm9, Reg Rn, Reg Rt) {
return LoadRegister(*this, ir, 16, imm9, Rn, Rt);
return LoadRegister(*this, 16, imm9, Rn, Rt);
}
bool TranslatorVisitor::LDTR(Imm<2> size, Imm<9> imm9, Reg Rn, Reg Rt) {
const size_t scale = size.ZeroExtend<size_t>();
const size_t datasize = 8 << scale;
return LoadRegister(*this, ir, datasize, imm9, Rn, Rt);
return LoadRegister(*this, datasize, imm9, Rn, Rt);
}
bool TranslatorVisitor::LDTRSB(Imm<2> opc, Imm<9> imm9, Reg Rn, Reg Rt) {
return LoadRegisterSigned(*this, ir, 8, opc, imm9, Rn, Rt);
return LoadRegisterSigned(*this, 8, opc, imm9, Rn, Rt);
}
bool TranslatorVisitor::LDTRSH(Imm<2> opc, Imm<9> imm9, Reg Rn, Reg Rt) {
return LoadRegisterSigned(*this, ir, 16, opc, imm9, Rn, Rt);
return LoadRegisterSigned(*this, 16, opc, imm9, Rn, Rt);
}
bool TranslatorVisitor::LDTRSW(Imm<9> imm9, Reg Rn, Reg Rt) {

View file

@ -10,7 +10,7 @@
namespace Dynarmic::A64 {
static bool SharedDecodeAndOperation(TranslatorVisitor& v, IREmitter& ir, bool wback, MemOp memop,
static bool SharedDecodeAndOperation(TranslatorVisitor& v, bool wback, MemOp memop,
bool Q, bool S, bool R, bool replicate, boost::optional<Reg> Rm,
Imm<3> opcode, Imm<2> size, Reg Rn, Vec Vt) {
const size_t selem = (opcode.Bit<0>() << 1 | u32{R}) + 1;
@ -60,16 +60,16 @@ static bool SharedDecodeAndOperation(TranslatorVisitor& v, IREmitter& ir, bool w
else
address = v.X(64, Rn);
IR::U64 offs = ir.Imm64(0);
IR::U64 offs = v.ir.Imm64(0);
if (replicate) {
for (size_t s = 0; s < selem; s++) {
const Vec tt = static_cast<Vec>((VecNumber(Vt) + s) % 32);
const IR::UAnyU128 element = v.Mem(ir.Add(address, offs), ebytes, AccType::VEC);
const IR::U128 broadcasted_element = ir.VectorBroadcast(esize, element);
const IR::UAnyU128 element = v.Mem(v.ir.Add(address, offs), ebytes, AccType::VEC);
const IR::U128 broadcasted_element = v.ir.VectorBroadcast(esize, element);
v.V(datasize, tt, broadcasted_element);
offs = ir.Add(offs, ir.Imm64(ebytes));
offs = v.ir.Add(offs, v.ir.Imm64(ebytes));
}
} else {
for (size_t s = 0; s < selem; s++) {
@ -77,14 +77,14 @@ static bool SharedDecodeAndOperation(TranslatorVisitor& v, IREmitter& ir, bool w
const IR::U128 rval = v.V(128, tt);
if (memop == MemOp::LOAD) {
const IR::UAny elem = v.Mem(ir.Add(address, offs), ebytes, AccType::VEC);
const IR::U128 vec = ir.VectorSetElement(esize, rval, index, elem);
const IR::UAny elem = v.Mem(v.ir.Add(address, offs), ebytes, AccType::VEC);
const IR::U128 vec = v.ir.VectorSetElement(esize, rval, index, elem);
v.V(128, tt, vec);
} else {
const IR::UAny elem = ir.VectorGetElement(esize, rval, index);
v.Mem(ir.Add(address, offs), ebytes, AccType::VEC, elem);
const IR::UAny elem = v.ir.VectorGetElement(esize, rval, index);
v.Mem(v.ir.Add(address, offs), ebytes, AccType::VEC, elem);
}
offs = ir.Add(offs, ir.Imm64(ebytes));
offs = v.ir.Add(offs, v.ir.Imm64(ebytes));
}
}
@ -92,123 +92,123 @@ static bool SharedDecodeAndOperation(TranslatorVisitor& v, IREmitter& ir, bool w
if (*Rm != Reg::SP)
offs = v.X(64, *Rm);
if (Rn == Reg::SP)
v.SP(64, ir.Add(address, offs));
v.SP(64, v.ir.Add(address, offs));
else
v.X(64, Rn, ir.Add(address, offs));
v.X(64, Rn, v.ir.Add(address, offs));
}
return true;
}
bool TranslatorVisitor::LD1_sngl_1(bool Q, Imm<2> upper_opcode, bool S, Imm<2> size, Reg Rn, Vec Vt) {
return SharedDecodeAndOperation(*this, ir, false, MemOp::LOAD, Q, S, false, false, {},
return SharedDecodeAndOperation(*this, false, MemOp::LOAD, Q, S, false, false, {},
Imm<3>{upper_opcode.ZeroExtend() << 1}, size, Rn, Vt);
}
bool TranslatorVisitor::LD1_sngl_2(bool Q, Reg Rm, Imm<2> upper_opcode, bool S, Imm<2> size, Reg Rn, Vec Vt) {
return SharedDecodeAndOperation(*this, ir, true, MemOp::LOAD, Q, S, false, false, Rm,
return SharedDecodeAndOperation(*this, true, MemOp::LOAD, Q, S, false, false, Rm,
Imm<3>{upper_opcode.ZeroExtend() << 1}, size, Rn, Vt);
}
bool TranslatorVisitor::LD1R_1(bool Q, Imm<2> size, Reg Rn, Vec Vt) {
return SharedDecodeAndOperation(*this, ir, false, MemOp::LOAD, Q, false, false, true, {}, Imm<3>{0b110}, size, Rn, Vt);
return SharedDecodeAndOperation(*this, false, MemOp::LOAD, Q, false, false, true, {}, Imm<3>{0b110}, size, Rn, Vt);
}
bool TranslatorVisitor::LD1R_2(bool Q, Reg Rm, Imm<2> size, Reg Rn, Vec Vt) {
return SharedDecodeAndOperation(*this, ir, true, MemOp::LOAD, Q, false, false, true, Rm, Imm<3>{0b110}, size, Rn, Vt);
return SharedDecodeAndOperation(*this, true, MemOp::LOAD, Q, false, false, true, Rm, Imm<3>{0b110}, size, Rn, Vt);
}
bool TranslatorVisitor::LD2_sngl_1(bool Q, Imm<2> upper_opcode, bool S, Imm<2> size, Reg Rn, Vec Vt) {
return SharedDecodeAndOperation(*this, ir, false, MemOp::LOAD, Q, S, true, false, {},
return SharedDecodeAndOperation(*this, false, MemOp::LOAD, Q, S, true, false, {},
Imm<3>{upper_opcode.ZeroExtend() << 1}, size, Rn, Vt);
}
bool TranslatorVisitor::LD2_sngl_2(bool Q, Reg Rm, Imm<2> upper_opcode, bool S, Imm<2> size, Reg Rn, Vec Vt) {
return SharedDecodeAndOperation(*this, ir, true, MemOp::LOAD, Q, S, true, false, Rm,
return SharedDecodeAndOperation(*this, true, MemOp::LOAD, Q, S, true, false, Rm,
Imm<3>{upper_opcode.ZeroExtend() << 1}, size, Rn, Vt);
}
bool TranslatorVisitor::LD2R_1(bool Q, Imm<2> size, Reg Rn, Vec Vt) {
return SharedDecodeAndOperation(*this, ir, false, MemOp::LOAD, Q, false, true, true, {}, Imm<3>{0b110}, size, Rn, Vt);
return SharedDecodeAndOperation(*this, false, MemOp::LOAD, Q, false, true, true, {}, Imm<3>{0b110}, size, Rn, Vt);
}
bool TranslatorVisitor::LD2R_2(bool Q, Reg Rm, Imm<2> size, Reg Rn, Vec Vt) {
return SharedDecodeAndOperation(*this, ir, true, MemOp::LOAD, Q, false, true, true, Rm, Imm<3>{0b110}, size, Rn, Vt);
return SharedDecodeAndOperation(*this, true, MemOp::LOAD, Q, false, true, true, Rm, Imm<3>{0b110}, size, Rn, Vt);
}
bool TranslatorVisitor::LD3_sngl_1(bool Q, Imm<2> upper_opcode, bool S, Imm<2> size, Reg Rn, Vec Vt) {
return SharedDecodeAndOperation(*this, ir, false, MemOp::LOAD, Q, S, false, false, {},
return SharedDecodeAndOperation(*this, false, MemOp::LOAD, Q, S, false, false, {},
Imm<3>{(upper_opcode.ZeroExtend() << 1) | 1}, size, Rn, Vt);
}
bool TranslatorVisitor::LD3_sngl_2(bool Q, Reg Rm, Imm<2> upper_opcode, bool S, Imm<2> size, Reg Rn, Vec Vt) {
return SharedDecodeAndOperation(*this, ir, true, MemOp::LOAD, Q, S, false, false, Rm,
return SharedDecodeAndOperation(*this, true, MemOp::LOAD, Q, S, false, false, Rm,
Imm<3>{(upper_opcode.ZeroExtend() << 1) | 1}, size, Rn, Vt);
}
bool TranslatorVisitor::LD3R_1(bool Q, Imm<2> size, Reg Rn, Vec Vt) {
return SharedDecodeAndOperation(*this, ir, false, MemOp::LOAD, Q, false, false, true, {}, Imm<3>{0b111}, size, Rn, Vt);
return SharedDecodeAndOperation(*this, false, MemOp::LOAD, Q, false, false, true, {}, Imm<3>{0b111}, size, Rn, Vt);
}
bool TranslatorVisitor::LD3R_2(bool Q, Reg Rm, Imm<2> size, Reg Rn, Vec Vt) {
return SharedDecodeAndOperation(*this, ir, true, MemOp::LOAD, Q, false, false, true, Rm, Imm<3>{0b111}, size, Rn, Vt);
return SharedDecodeAndOperation(*this, true, MemOp::LOAD, Q, false, false, true, Rm, Imm<3>{0b111}, size, Rn, Vt);
}
bool TranslatorVisitor::LD4_sngl_1(bool Q, Imm<2> upper_opcode, bool S, Imm<2> size, Reg Rn, Vec Vt) {
return SharedDecodeAndOperation(*this, ir, false, MemOp::LOAD, Q, S, true, false, {},
return SharedDecodeAndOperation(*this, false, MemOp::LOAD, Q, S, true, false, {},
Imm<3>{(upper_opcode.ZeroExtend() << 1) | 1}, size, Rn, Vt);
}
bool TranslatorVisitor::LD4_sngl_2(bool Q, Reg Rm, Imm<2> upper_opcode, bool S, Imm<2> size, Reg Rn, Vec Vt) {
return SharedDecodeAndOperation(*this, ir, true, MemOp::LOAD, Q, S, true, false, Rm,
return SharedDecodeAndOperation(*this, true, MemOp::LOAD, Q, S, true, false, Rm,
Imm<3>{(upper_opcode.ZeroExtend() << 1) | 1}, size, Rn, Vt);
}
bool TranslatorVisitor::LD4R_1(bool Q, Imm<2> size, Reg Rn, Vec Vt) {
return SharedDecodeAndOperation(*this, ir, false, MemOp::LOAD, Q, false, true, true, {}, Imm<3>{0b111}, size, Rn, Vt);
return SharedDecodeAndOperation(*this, false, MemOp::LOAD, Q, false, true, true, {}, Imm<3>{0b111}, size, Rn, Vt);
}
bool TranslatorVisitor::LD4R_2(bool Q, Reg Rm, Imm<2> size, Reg Rn, Vec Vt) {
return SharedDecodeAndOperation(*this, ir, true, MemOp::LOAD, Q, false, true, true, Rm, Imm<3>{0b111}, size, Rn, Vt);
return SharedDecodeAndOperation(*this, true, MemOp::LOAD, Q, false, true, true, Rm, Imm<3>{0b111}, size, Rn, Vt);
}
bool TranslatorVisitor::ST1_sngl_1(bool Q, Imm<2> upper_opcode, bool S, Imm<2> size, Reg Rn, Vec Vt) {
return SharedDecodeAndOperation(*this, ir, false, MemOp::STORE, Q, S, false, false, {},
return SharedDecodeAndOperation(*this, false, MemOp::STORE, Q, S, false, false, {},
Imm<3>{upper_opcode.ZeroExtend() << 1}, size, Rn, Vt);
}
bool TranslatorVisitor::ST1_sngl_2(bool Q, Reg Rm, Imm<2> upper_opcode, bool S, Imm<2> size, Reg Rn, Vec Vt) {
return SharedDecodeAndOperation(*this, ir, true, MemOp::STORE, Q, S, false, false, Rm,
return SharedDecodeAndOperation(*this, true, MemOp::STORE, Q, S, false, false, Rm,
Imm<3>{upper_opcode.ZeroExtend() << 1}, size, Rn, Vt);
}
bool TranslatorVisitor::ST2_sngl_1(bool Q, Imm<2> upper_opcode, bool S, Imm<2> size, Reg Rn, Vec Vt) {
return SharedDecodeAndOperation(*this, ir, false, MemOp::STORE, Q, S, true, false, {},
return SharedDecodeAndOperation(*this, false, MemOp::STORE, Q, S, true, false, {},
Imm<3>{upper_opcode.ZeroExtend() << 1}, size, Rn, Vt);
}
bool TranslatorVisitor::ST2_sngl_2(bool Q, Reg Rm, Imm<2> upper_opcode, bool S, Imm<2> size, Reg Rn, Vec Vt) {
return SharedDecodeAndOperation(*this, ir, true, MemOp::STORE, Q, S, true, false, Rm,
return SharedDecodeAndOperation(*this, true, MemOp::STORE, Q, S, true, false, Rm,
Imm<3>{upper_opcode.ZeroExtend() << 1}, size, Rn, Vt);
}
bool TranslatorVisitor::ST3_sngl_1(bool Q, Imm<2> upper_opcode, bool S, Imm<2> size, Reg Rn, Vec Vt) {
return SharedDecodeAndOperation(*this, ir, false, MemOp::STORE, Q, S, false, false, {},
return SharedDecodeAndOperation(*this, false, MemOp::STORE, Q, S, false, false, {},
Imm<3>{(upper_opcode.ZeroExtend() << 1) | 1}, size, Rn, Vt);
}
bool TranslatorVisitor::ST3_sngl_2(bool Q, Reg Rm, Imm<2> upper_opcode, bool S, Imm<2> size, Reg Rn, Vec Vt) {
return SharedDecodeAndOperation(*this, ir, true, MemOp::STORE, Q, S, false, false, Rm,
return SharedDecodeAndOperation(*this, true, MemOp::STORE, Q, S, false, false, Rm,
Imm<3>{(upper_opcode.ZeroExtend() << 1) | 1}, size, Rn, Vt);
}
bool TranslatorVisitor::ST4_sngl_1(bool Q, Imm<2> upper_opcode, bool S, Imm<2> size, Reg Rn, Vec Vt) {
return SharedDecodeAndOperation(*this, ir, false, MemOp::STORE, Q, S, true, false, {},
return SharedDecodeAndOperation(*this, false, MemOp::STORE, Q, S, true, false, {},
Imm<3>{(upper_opcode.ZeroExtend() << 1) | 1}, size, Rn, Vt);
}
bool TranslatorVisitor::ST4_sngl_2(bool Q, Reg Rm, Imm<2> upper_opcode, bool S, Imm<2> size, Reg Rn, Vec Vt) {
return SharedDecodeAndOperation(*this, ir, true, MemOp::STORE, Q, S, true, false, Rm,
return SharedDecodeAndOperation(*this, true, MemOp::STORE, Q, S, true, false, Rm,
Imm<3>{(upper_opcode.ZeroExtend() << 1) | 1}, size, Rn, Vt);
}

View file

@ -8,45 +8,45 @@
namespace Dynarmic::A64 {
static bool DataCacheInstruction(TranslatorVisitor& v, IREmitter& ir, DataCacheOperation op, const Reg Rt) {
ir.DataCacheOperationRaised(op, v.X(64, Rt));
static bool DataCacheInstruction(TranslatorVisitor& v, DataCacheOperation op, const Reg Rt) {
v.ir.DataCacheOperationRaised(op, v.X(64, Rt));
return true;
}
bool TranslatorVisitor::DC_IVAC(Reg Rt) {
return DataCacheInstruction(*this, ir, DataCacheOperation::InvalidateByVAToPoC, Rt);
return DataCacheInstruction(*this, DataCacheOperation::InvalidateByVAToPoC, Rt);
}
bool TranslatorVisitor::DC_ISW(Reg Rt) {
return DataCacheInstruction(*this, ir, DataCacheOperation::InvalidateBySetWay, Rt);
return DataCacheInstruction(*this, DataCacheOperation::InvalidateBySetWay, Rt);
}
bool TranslatorVisitor::DC_CSW(Reg Rt) {
return DataCacheInstruction(*this, ir, DataCacheOperation::CleanBySetWay, Rt);
return DataCacheInstruction(*this, DataCacheOperation::CleanBySetWay, Rt);
}
bool TranslatorVisitor::DC_CISW(Reg Rt) {
return DataCacheInstruction(*this, ir, DataCacheOperation::CleanAndInvalidateBySetWay, Rt);
return DataCacheInstruction(*this, DataCacheOperation::CleanAndInvalidateBySetWay, Rt);
}
bool TranslatorVisitor::DC_ZVA(Reg Rt) {
return DataCacheInstruction(*this, ir, DataCacheOperation::ZeroByVA, Rt);
return DataCacheInstruction(*this, DataCacheOperation::ZeroByVA, Rt);
}
bool TranslatorVisitor::DC_CVAC(Reg Rt) {
return DataCacheInstruction(*this, ir, DataCacheOperation::CleanByVAToPoC, Rt);
return DataCacheInstruction(*this, DataCacheOperation::CleanByVAToPoC, Rt);
}
bool TranslatorVisitor::DC_CVAU(Reg Rt) {
return DataCacheInstruction(*this, ir, DataCacheOperation::CleanByVAToPoU, Rt);
return DataCacheInstruction(*this, DataCacheOperation::CleanByVAToPoU, Rt);
}
bool TranslatorVisitor::DC_CVAP(Reg Rt) {
return DataCacheInstruction(*this, ir, DataCacheOperation::CleanByVAToPoP, Rt);
return DataCacheInstruction(*this, DataCacheOperation::CleanByVAToPoP, Rt);
}
bool TranslatorVisitor::DC_CIVAC(Reg Rt) {
return DataCacheInstruction(*this, ir, DataCacheOperation::CleanAndInvalidateByVAToPoC, Rt);
return DataCacheInstruction(*this, DataCacheOperation::CleanAndInvalidateByVAToPoC, Rt);
}
} // namespace Dynarmic::A64