A64/translate: Standardize TranslatorVisitor abbreviation

Prefer v to tv.
This commit is contained in:
MerryMage 2018-08-21 12:16:16 +01:00
parent 9a0dc61efd
commit a4e556d59c
8 changed files with 111 additions and 111 deletions

View file

@ -8,9 +8,9 @@
namespace Dynarmic::A64 {
static IR::U8 SanitizeShiftAmount(TranslatorVisitor& tv, IREmitter& ir, size_t datasize,
static IR::U8 SanitizeShiftAmount(TranslatorVisitor& v, IREmitter& ir, size_t datasize,
const IR::U32U64& amount) {
return ir.LeastSignificantByte(ir.And(amount, tv.I(datasize, datasize - 1)));
return ir.LeastSignificantByte(ir.And(amount, v.I(datasize, datasize - 1)));
}
bool TranslatorVisitor::LSLV(bool sf, Reg Rm, Reg Rn, Reg Rd) {

View file

@ -10,7 +10,7 @@
namespace Dynarmic::A64 {
static bool ExclusiveSharedDecodeAndOperation(TranslatorVisitor& tv, IREmitter& ir, bool pair, size_t size, bool L, bool o0, boost::optional<Reg> Rs, boost::optional<Reg> Rt2, Reg Rn, Reg Rt) {
static bool ExclusiveSharedDecodeAndOperation(TranslatorVisitor& v, IREmitter& ir, bool pair, size_t size, bool L, bool o0, boost::optional<Reg> Rs, boost::optional<Reg> Rt2, Reg Rn, Reg Rt) {
// Shared Decode
const AccType acctype = o0 ? AccType::ORDERED : AccType::ATOMIC;
@ -24,49 +24,49 @@ static bool ExclusiveSharedDecodeAndOperation(TranslatorVisitor& tv, IREmitter&
const size_t dbytes = datasize / 8;
if (memop == MemOp::LOAD && pair && Rt == *Rt2) {
return tv.UnpredictableInstruction();
return v.UnpredictableInstruction();
} else if (memop == MemOp::STORE && (*Rs == Rt || (pair && *Rs == *Rt2))) {
if (!tv.options.define_unpredictable_behaviour) {
return tv.UnpredictableInstruction();
if (!v.options.define_unpredictable_behaviour) {
return v.UnpredictableInstruction();
}
// UNPREDICTABLE: The Constraint_NONE case is executed.
} else if (memop == MemOp::STORE && *Rs == Rn && Rn != Reg::R31) {
return tv.UnpredictableInstruction();
return v.UnpredictableInstruction();
}
IR::U64 address;
if (Rn == Reg::SP) {
// TODO: Check SP Alignment
address = tv.SP(64);
address = v.SP(64);
} else {
address = tv.X(64, Rn);
address = v.X(64, Rn);
}
switch (memop) {
case MemOp::STORE: {
IR::UAnyU128 data;
if (pair && elsize == 64) {
data = ir.Pack2x64To1x128(tv.X(64, Rt), tv.X(64, *Rt2));
data = ir.Pack2x64To1x128(v.X(64, Rt), v.X(64, *Rt2));
} else if (pair && elsize == 32) {
data = ir.Pack2x32To1x64(tv.X(32, Rt), tv.X(32, *Rt2));
data = ir.Pack2x32To1x64(v.X(32, Rt), v.X(32, *Rt2));
} else {
data = tv.X(elsize, Rt);
data = v.X(elsize, Rt);
}
IR::U32 status = tv.ExclusiveMem(address, dbytes, acctype, data);
tv.X(32, *Rs, status);
IR::U32 status = v.ExclusiveMem(address, dbytes, acctype, data);
v.X(32, *Rs, status);
break;
}
case MemOp::LOAD: {
ir.SetExclusive(address, dbytes);
IR::UAnyU128 data = tv.Mem(address, dbytes, acctype);
IR::UAnyU128 data = v.Mem(address, dbytes, acctype);
if (pair && elsize == 64) {
tv.X(64, Rt, ir.VectorGetElement(64, data, 0));
tv.X(64, *Rt2, ir.VectorGetElement(64, data, 1));
v.X(64, Rt, ir.VectorGetElement(64, data, 0));
v.X(64, *Rt2, ir.VectorGetElement(64, data, 1));
} else if (pair && elsize == 32) {
tv.X(32, Rt, ir.LeastSignificantWord(data));
tv.X(32, *Rt2, ir.MostSignificantWord(data).result);
v.X(32, Rt, ir.LeastSignificantWord(data));
v.X(32, *Rt2, ir.MostSignificantWord(data).result);
} else {
tv.X(regsize, Rt, tv.ZeroExtend(data, regsize));
v.X(regsize, Rt, v.ZeroExtend(data, regsize));
}
break;
}
@ -141,7 +141,7 @@ bool TranslatorVisitor::LDAXP(Imm<1> sz, Reg Rt2, Reg Rn, Reg Rt) {
return ExclusiveSharedDecodeAndOperation(*this, ir, pair, size, L, o0, {}, Rt2, Rn, Rt);
}
static bool OrderedSharedDecodeAndOperation(TranslatorVisitor& tv, size_t size, bool L, bool o0, Reg Rn, Reg Rt) {
static bool OrderedSharedDecodeAndOperation(TranslatorVisitor& v, size_t size, bool L, bool o0, Reg Rn, Reg Rt) {
// Shared Decode
const AccType acctype = !o0 ? AccType::LIMITEDORDERED : AccType::ORDERED;
@ -157,20 +157,20 @@ static bool OrderedSharedDecodeAndOperation(TranslatorVisitor& tv, size_t size,
IR::U64 address;
if (Rn == Reg::SP) {
// TODO: Check SP Alignment
address = tv.SP(64);
address = v.SP(64);
} else {
address = tv.X(64, Rn);
address = v.X(64, Rn);
}
switch (memop) {
case MemOp::STORE: {
IR::UAny data = tv.X(datasize, Rt);
tv.Mem(address, dbytes, acctype, data);
IR::UAny data = v.X(datasize, Rt);
v.Mem(address, dbytes, acctype, data);
break;
}
case MemOp::LOAD: {
IR::UAny data = tv.Mem(address, dbytes, acctype);
tv.X(regsize, Rt, tv.ZeroExtend(data, regsize));
IR::UAny data = v.Mem(address, dbytes, acctype);
v.X(regsize, Rt, v.ZeroExtend(data, regsize));
break;
}
default:

View file

@ -12,7 +12,7 @@
namespace Dynarmic::A64 {
static bool SharedDecodeAndOperation(TranslatorVisitor& tv, IREmitter& ir, bool wback, MemOp memop, bool Q, boost::optional<Reg> Rm, Imm<4> opcode, Imm<2> size, Reg Rn, Vec Vt) {
static bool SharedDecodeAndOperation(TranslatorVisitor& v, IREmitter& ir, bool wback, MemOp memop, bool Q, boost::optional<Reg> Rm, Imm<4> opcode, Imm<2> size, Reg Rn, Vec Vt) {
const size_t datasize = Q ? 128 : 64;
const size_t esize = 8 << size.ZeroExtend<size_t>();
const size_t elements = datasize / esize;
@ -49,31 +49,31 @@ static bool SharedDecodeAndOperation(TranslatorVisitor& tv, IREmitter& ir, bool
selem = 1;
break;
default:
return tv.UnallocatedEncoding();
return v.UnallocatedEncoding();
}
ASSERT(rpt == 1 || selem == 1);
if ((size == 0b11 && !Q) && selem != 1) {
return tv.ReservedValue();
return v.ReservedValue();
}
IR::U64 address;
if (Rn == Reg::SP)
// TODO: Check SP Alignment
address = tv.SP(64);
address = v.SP(64);
else
address = tv.X(64, Rn);
address = v.X(64, Rn);
IR::U64 offs = ir.Imm64(0);
if (selem == 1) {
for (size_t r = 0; r < rpt; r++) {
const Vec tt = static_cast<Vec>((VecNumber(Vt) + r) % 32);
if (memop == MemOp::LOAD) {
const IR::UAnyU128 vec = tv.Mem(ir.Add(address, offs), ebytes * elements, AccType::VEC);
tv.V_scalar(datasize, tt, vec);
const IR::UAnyU128 vec = v.Mem(ir.Add(address, offs), ebytes * elements, AccType::VEC);
v.V_scalar(datasize, tt, vec);
} else {
const IR::UAnyU128 vec = tv.V_scalar(datasize, tt);
tv.Mem(ir.Add(address, offs), ebytes * elements, AccType::VEC, vec);
const IR::UAnyU128 vec = v.V_scalar(datasize, tt);
v.Mem(ir.Add(address, offs), ebytes * elements, AccType::VEC, vec);
}
offs = ir.Add(offs, ir.Imm64(ebytes * elements));
}
@ -82,12 +82,12 @@ static bool SharedDecodeAndOperation(TranslatorVisitor& tv, IREmitter& ir, bool
for (size_t s = 0; s < selem; s++) {
const Vec tt = static_cast<Vec>((VecNumber(Vt) + s) % 32);
if (memop == MemOp::LOAD) {
const IR::UAny elem = tv.Mem(ir.Add(address, offs), ebytes, AccType::VEC);
const IR::U128 vec = ir.VectorSetElement(esize, tv.V(datasize, tt), e, elem);
tv.V(datasize, tt, vec);
const IR::UAny elem = v.Mem(ir.Add(address, offs), ebytes, AccType::VEC);
const IR::U128 vec = ir.VectorSetElement(esize, v.V(datasize, tt), e, elem);
v.V(datasize, tt, vec);
} else {
const IR::UAny elem = ir.VectorGetElement(esize, tv.V(datasize, tt), e);
tv.Mem(ir.Add(address, offs), ebytes, AccType::VEC, elem);
const IR::UAny elem = ir.VectorGetElement(esize, v.V(datasize, tt), e);
v.Mem(ir.Add(address, offs), ebytes, AccType::VEC, elem);
}
offs = ir.Add(offs, ir.Imm64(ebytes));
}
@ -96,11 +96,11 @@ static bool SharedDecodeAndOperation(TranslatorVisitor& tv, IREmitter& ir, bool
if (wback) {
if (*Rm != Reg::SP)
offs = tv.X(64, *Rm);
offs = v.X(64, *Rm);
if (Rn == Reg::SP)
tv.SP(64, ir.Add(address, offs));
v.SP(64, ir.Add(address, offs));
else
tv.X(64, Rn, ir.Add(address, offs));
v.X(64, Rn, ir.Add(address, offs));
}
return true;

View file

@ -112,16 +112,16 @@ bool TranslatorVisitor::PRFM_unscaled_imm([[maybe_unused]] Imm<9> imm9, [[maybe_
return true;
}
static bool LoadStoreSIMD(TranslatorVisitor& tv, IREmitter& ir, bool wback, bool postindex, size_t scale, u64 offset, MemOp memop, Reg Rn, Vec Vt) {
static bool LoadStoreSIMD(TranslatorVisitor& v, IREmitter& ir, bool wback, bool postindex, size_t scale, u64 offset, MemOp memop, Reg Rn, Vec Vt) {
const AccType acctype = AccType::VEC;
const size_t datasize = 8 << scale;
IR::U64 address;
if (Rn == Reg::SP) {
// TODO: Check SP Alignment
address = tv.SP(64);
address = v.SP(64);
} else {
address = tv.X(64, Rn);
address = v.X(64, Rn);
}
if (!postindex) {
@ -131,20 +131,20 @@ static bool LoadStoreSIMD(TranslatorVisitor& tv, IREmitter& ir, bool wback, bool
switch (memop) {
case MemOp::STORE:
if (datasize == 128) {
const IR::U128 data = tv.V(128, Vt);
tv.Mem(address, 16, acctype, data);
const IR::U128 data = v.V(128, Vt);
v.Mem(address, 16, acctype, data);
} else {
const IR::UAny data = ir.VectorGetElement(datasize, tv.V(128, Vt), 0);
tv.Mem(address, datasize / 8, acctype, data);
const IR::UAny data = ir.VectorGetElement(datasize, v.V(128, Vt), 0);
v.Mem(address, datasize / 8, acctype, data);
}
break;
case MemOp::LOAD:
if (datasize == 128) {
const IR::U128 data = tv.Mem(address, 16, acctype);
tv.V(128, Vt, data);
const IR::U128 data = v.Mem(address, 16, acctype);
v.V(128, Vt, data);
} else {
const IR::UAny data = tv.Mem(address, datasize / 8, acctype);
tv.V(128, Vt, ir.ZeroExtendToQuad(data));
const IR::UAny data = v.Mem(address, datasize / 8, acctype);
v.V(128, Vt, ir.ZeroExtendToQuad(data));
}
break;
default:
@ -156,9 +156,9 @@ static bool LoadStoreSIMD(TranslatorVisitor& tv, IREmitter& ir, bool wback, bool
address = ir.Add(address, ir.Imm64(offset));
}
if (Rn == Reg::SP) {
tv.SP(64, address);
v.SP(64, address);
} else {
tv.X(64, Rn, address);
v.X(64, Rn, address);
}
}

View file

@ -8,7 +8,7 @@
namespace Dynarmic::A64 {
static bool RegSharedDecodeAndOperation(TranslatorVisitor& tv, IREmitter& ir, size_t scale, u8 shift, Imm<2> size, Imm<1> opc_1, Imm<1> opc_0, Reg Rm, Imm<3> option, Reg Rn, Reg Rt) {
static bool RegSharedDecodeAndOperation(TranslatorVisitor& v, IREmitter& ir, size_t scale, u8 shift, Imm<2> size, Imm<1> opc_1, Imm<1> opc_0, Reg Rm, Imm<3> option, Reg Rn, Reg Rt) {
// Shared Decode
const AccType acctype = AccType::NORMAL;
@ -23,12 +23,12 @@ static bool RegSharedDecodeAndOperation(TranslatorVisitor& tv, IREmitter& ir, si
} else if (size == 0b11) {
memop = MemOp::PREFETCH;
if (opc_0 == 1) {
return tv.UnallocatedEncoding();
return v.UnallocatedEncoding();
}
} else {
memop = MemOp::LOAD;
if (size == 0b10 && opc_0 == 1) {
return tv.UnallocatedEncoding();
return v.UnallocatedEncoding();
}
regsize = opc_0 == 1 ? 32 : 64;
signed_ = true;
@ -38,29 +38,29 @@ static bool RegSharedDecodeAndOperation(TranslatorVisitor& tv, IREmitter& ir, si
// Operation
const IR::U64 offset = tv.ExtendReg(64, Rm, option, shift);
const IR::U64 offset = v.ExtendReg(64, Rm, option, shift);
IR::U64 address;
if (Rn == Reg::SP) {
// TODO: Check SP alignment
address = tv.SP(64);
address = v.SP(64);
} else {
address = tv.X(64, Rn);
address = v.X(64, Rn);
}
address = ir.Add(address, offset);
switch (memop) {
case MemOp::STORE: {
IR::UAny data = tv.X(datasize, Rt);
tv.Mem(address, datasize / 8, acctype, data);
IR::UAny data = v.X(datasize, Rt);
v.Mem(address, datasize / 8, acctype, data);
break;
}
case MemOp::LOAD: {
IR::UAny data = tv.Mem(address, datasize / 8, acctype);
IR::UAny data = v.Mem(address, datasize / 8, acctype);
if (signed_) {
tv.X(regsize, Rt, tv.SignExtend(data, regsize));
v.X(regsize, Rt, v.SignExtend(data, regsize));
} else {
tv.X(regsize, Rt, tv.ZeroExtend(data, regsize));
v.X(regsize, Rt, v.ZeroExtend(data, regsize));
}
break;
}
@ -94,7 +94,7 @@ bool TranslatorVisitor::LDRx_reg(Imm<2> size, Imm<1> opc_1, Reg Rm, Imm<3> optio
return RegSharedDecodeAndOperation(*this, ir, scale, shift, size, opc_1, opc_0, Rm, option, Rn, Rt);
}
static bool VecSharedDecodeAndOperation(TranslatorVisitor& tv, IREmitter& ir, size_t scale, u8 shift, Imm<1> opc_0, Reg Rm, Imm<3> option, Reg Rn, Vec Vt) {
static bool VecSharedDecodeAndOperation(TranslatorVisitor& v, IREmitter& ir, size_t scale, u8 shift, Imm<1> opc_0, Reg Rm, Imm<3> option, Reg Rn, Vec Vt) {
// Shared Decode
const AccType acctype = AccType::VEC;
@ -103,26 +103,26 @@ static bool VecSharedDecodeAndOperation(TranslatorVisitor& tv, IREmitter& ir, si
// Operation
const IR::U64 offset = tv.ExtendReg(64, Rm, option, shift);
const IR::U64 offset = v.ExtendReg(64, Rm, option, shift);
IR::U64 address;
if (Rn == Reg::SP) {
// TODO: Check SP alignment
address = tv.SP(64);
address = v.SP(64);
} else {
address = tv.X(64, Rn);
address = v.X(64, Rn);
}
address = ir.Add(address, offset);
switch (memop) {
case MemOp::STORE: {
const IR::UAnyU128 data = tv.V_scalar(datasize, Vt);
tv.Mem(address, datasize / 8, acctype, data);
const IR::UAnyU128 data = v.V_scalar(datasize, Vt);
v.Mem(address, datasize / 8, acctype, data);
break;
}
case MemOp::LOAD: {
const IR::UAnyU128 data = tv.Mem(address, datasize / 8, acctype);
tv.V_scalar(datasize, Vt, data);
const IR::UAnyU128 data = v.Mem(address, datasize / 8, acctype);
v.V_scalar(datasize, Vt, data);
break;
}
default:

View file

@ -8,7 +8,7 @@
namespace Dynarmic::A64 {
static bool StoreRegister(TranslatorVisitor& tv, IREmitter& ir, const size_t datasize,
static bool StoreRegister(TranslatorVisitor& v, IREmitter& ir, const size_t datasize,
const Imm<9> imm9, const Reg Rn, const Reg Rt) {
const u64 offset = imm9.SignExtend<u64>();
AccType acctype = AccType::UNPRIV;
@ -16,17 +16,17 @@ static bool StoreRegister(TranslatorVisitor& tv, IREmitter& ir, const size_t dat
if (Rn == Reg::SP) {
// TODO: Check Stack Alignment
address = tv.SP(64);
address = v.SP(64);
} else {
address = tv.X(64, Rn);
address = v.X(64, Rn);
}
address = ir.Add(address, ir.Imm64(offset));
IR::UAny data = tv.X(datasize, Rt);
tv.Mem(address, datasize / 8, acctype, data);
IR::UAny data = v.X(datasize, Rt);
v.Mem(address, datasize / 8, acctype, data);
return true;
}
static bool LoadRegister(TranslatorVisitor& tv, IREmitter& ir, const size_t datasize,
static bool LoadRegister(TranslatorVisitor& v, IREmitter& ir, const size_t datasize,
const Imm<9> imm9, const Reg Rn, const Reg Rt) {
const u64 offset = imm9.SignExtend<u64>();
AccType acctype = AccType::UNPRIV;
@ -34,19 +34,19 @@ static bool LoadRegister(TranslatorVisitor& tv, IREmitter& ir, const size_t data
if (Rn == Reg::SP) {
// TODO: Check Stack Alignment
address = tv.SP(64);
address = v.SP(64);
} else {
address = tv.X(64, Rn);
address = v.X(64, Rn);
}
address = ir.Add(address, ir.Imm64(offset));
IR::UAny data = tv.Mem(address, datasize / 8, acctype);
IR::UAny data = v.Mem(address, datasize / 8, acctype);
// max is used to zeroextend < 32 to 32, and > 32 to 64
const size_t extended_size = std::max<size_t>(32, datasize);
tv.X(extended_size, Rt, tv.ZeroExtend(data, extended_size));
v.X(extended_size, Rt, v.ZeroExtend(data, extended_size));
return true;
}
static bool LoadRegisterSigned(TranslatorVisitor& tv, IREmitter& ir, const size_t datasize,
static bool LoadRegisterSigned(TranslatorVisitor& v, IREmitter& ir, const size_t datasize,
const Imm<2> opc, const Imm<9> imm9, const Reg Rn, const Reg Rt) {
const u64 offset = imm9.SignExtend<u64>();
AccType acctype = AccType::UNPRIV;
@ -68,22 +68,22 @@ static bool LoadRegisterSigned(TranslatorVisitor& tv, IREmitter& ir, const size_
IR::U64 address;
if (Rn == Reg::SP) {
// TODO: Check Stack Alignment
address = tv.SP(64);
address = v.SP(64);
} else {
address = tv.X(64, Rn);
address = v.X(64, Rn);
}
address = ir.Add(address, ir.Imm64(offset));
switch (memop) {
case MemOp::STORE:
tv.Mem(address, datasize / 8, acctype, tv.X(datasize, Rt));
v.Mem(address, datasize / 8, acctype, v.X(datasize, Rt));
break;
case MemOp::LOAD: {
IR::UAny data = tv.Mem(address, datasize / 8, acctype);
IR::UAny data = v.Mem(address, datasize / 8, acctype);
if (is_signed) {
tv.X(regsize, Rt, tv.SignExtend(data, regsize));
v.X(regsize, Rt, v.SignExtend(data, regsize));
} else {
tv.X(regsize, Rt, tv.ZeroExtend(data, regsize));
v.X(regsize, Rt, v.ZeroExtend(data, regsize));
}
break;
}

View file

@ -10,7 +10,7 @@
namespace Dynarmic::A64 {
static bool SharedDecodeAndOperation(TranslatorVisitor& tv, IREmitter& ir, bool wback, MemOp memop,
static bool SharedDecodeAndOperation(TranslatorVisitor& v, IREmitter& ir, bool wback, MemOp memop,
bool Q, bool S, bool R, bool replicate, boost::optional<Reg> Rm,
Imm<3> opcode, Imm<2> size, Reg Rn, Vec Vt) {
const size_t selem = (opcode.Bit<0>() << 1 | u32{R}) + 1;
@ -23,17 +23,17 @@ static bool SharedDecodeAndOperation(TranslatorVisitor& tv, IREmitter& ir, bool
break;
case 1:
if (size.Bit<0>()) {
return tv.UnallocatedEncoding();
return v.UnallocatedEncoding();
}
index = Q << 2 | S << 1 | u32{size.Bit<1>()};
break;
case 2:
if (size.Bit<1>()) {
return tv.UnallocatedEncoding();
return v.UnallocatedEncoding();
}
if (size.Bit<0>()) {
if (S) {
return tv.UnallocatedEncoding();
return v.UnallocatedEncoding();
}
index = Q;
scale = 3;
@ -43,7 +43,7 @@ static bool SharedDecodeAndOperation(TranslatorVisitor& tv, IREmitter& ir, bool
break;
case 3:
if (memop == MemOp::STORE || S) {
return tv.UnallocatedEncoding();
return v.UnallocatedEncoding();
}
scale = size.ZeroExtend();
break;
@ -56,33 +56,33 @@ static bool SharedDecodeAndOperation(TranslatorVisitor& tv, IREmitter& ir, bool
IR::U64 address;
if (Rn == Reg::SP)
// TODO: Check SP Alignment
address = tv.SP(64);
address = v.SP(64);
else
address = tv.X(64, Rn);
address = v.X(64, Rn);
IR::U64 offs = ir.Imm64(0);
if (replicate) {
for (size_t s = 0; s < selem; s++) {
const Vec tt = static_cast<Vec>((VecNumber(Vt) + s) % 32);
const IR::UAnyU128 element = tv.Mem(ir.Add(address, offs), ebytes, AccType::VEC);
const IR::UAnyU128 element = v.Mem(ir.Add(address, offs), ebytes, AccType::VEC);
const IR::U128 broadcasted_element = ir.VectorBroadcast(esize, element);
tv.V(datasize, tt, broadcasted_element);
v.V(datasize, tt, broadcasted_element);
offs = ir.Add(offs, ir.Imm64(ebytes));
}
} else {
for (size_t s = 0; s < selem; s++) {
const Vec tt = static_cast<Vec>((VecNumber(Vt) + s) % 32);
const IR::U128 rval = tv.V(128, tt);
const IR::U128 rval = v.V(128, tt);
if (memop == MemOp::LOAD) {
const IR::UAny elem = tv.Mem(ir.Add(address, offs), ebytes, AccType::VEC);
const IR::UAny elem = v.Mem(ir.Add(address, offs), ebytes, AccType::VEC);
const IR::U128 vec = ir.VectorSetElement(esize, rval, index, elem);
tv.V(128, tt, vec);
v.V(128, tt, vec);
} else {
const IR::UAny elem = ir.VectorGetElement(esize, rval, index);
tv.Mem(ir.Add(address, offs), ebytes, AccType::VEC, elem);
v.Mem(ir.Add(address, offs), ebytes, AccType::VEC, elem);
}
offs = ir.Add(offs, ir.Imm64(ebytes));
}
@ -90,11 +90,11 @@ static bool SharedDecodeAndOperation(TranslatorVisitor& tv, IREmitter& ir, bool
if (wback) {
if (*Rm != Reg::SP)
offs = tv.X(64, *Rm);
offs = v.X(64, *Rm);
if (Rn == Reg::SP)
tv.SP(64, ir.Add(address, offs));
v.SP(64, ir.Add(address, offs));
else
tv.X(64, Rn, ir.Add(address, offs));
v.X(64, Rn, ir.Add(address, offs));
}
return true;

View file

@ -8,8 +8,8 @@
namespace Dynarmic::A64 {
static bool DataCacheInstruction(TranslatorVisitor& tv, IREmitter& ir, DataCacheOperation op, const Reg Rt) {
ir.DataCacheOperationRaised(op, tv.X(64, Rt));
static bool DataCacheInstruction(TranslatorVisitor& v, IREmitter& ir, DataCacheOperation op, const Reg Rt) {
ir.DataCacheOperationRaised(op, v.X(64, Rt));
return true;
}