load_store_*: Make bracing consistent and variables const where applicable

Makes bracing consistent, and variables const where applicable to be
consistent with the rest of the codebase.

In most bracing cases, they'd need to be added to conditionals that
would involve checking stack pointer alignment in the future anyways.
This commit is contained in:
Lioncash 2019-04-12 23:51:32 -04:00 committed by MerryMage
parent b91c6c8bae
commit b5bf890584
6 changed files with 110 additions and 91 deletions

View file

@ -52,13 +52,13 @@ static bool ExclusiveSharedDecodeAndOperation(TranslatorVisitor& v, bool pair, s
} else {
data = v.X(elsize, Rt);
}
IR::U32 status = v.ExclusiveMem(address, dbytes, acctype, data);
const IR::U32 status = v.ExclusiveMem(address, dbytes, acctype, data);
v.X(32, *Rs, status);
break;
}
case MemOp::LOAD: {
v.ir.SetExclusive(address, dbytes);
IR::UAnyU128 data = v.Mem(address, dbytes, acctype);
const IR::UAnyU128 data = v.Mem(address, dbytes, acctype);
if (pair && elsize == 64) {
v.X(64, Rt, v.ir.VectorGetElement(64, data, 0));
v.X(64, *Rt2, v.ir.VectorGetElement(64, data, 1));
@ -164,12 +164,12 @@ static bool OrderedSharedDecodeAndOperation(TranslatorVisitor& v, size_t size, b
switch (memop) {
case MemOp::STORE: {
IR::UAny data = v.X(datasize, Rt);
const IR::UAny data = v.X(datasize, Rt);
v.Mem(address, dbytes, acctype, data);
break;
}
case MemOp::LOAD: {
IR::UAny data = v.Mem(address, dbytes, acctype);
const IR::UAny data = v.Mem(address, dbytes, acctype);
v.X(regsize, Rt, v.ZeroExtend(data, regsize));
break;
}

View file

@ -57,11 +57,12 @@ static bool SharedDecodeAndOperation(TranslatorVisitor& v, bool wback, MemOp mem
}
IR::U64 address;
if (Rn == Reg::SP)
if (Rn == Reg::SP) {
// TODO: Check SP Alignment
address = v.SP(64);
else
} else {
address = v.X(64, Rn);
}
IR::U64 offs = v.ir.Imm64(0);
if (selem == 1) {
@ -94,13 +95,16 @@ static bool SharedDecodeAndOperation(TranslatorVisitor& v, bool wback, MemOp mem
}
if (wback) {
if (*Rm != Reg::SP)
if (*Rm != Reg::SP) {
offs = v.X(64, *Rm);
if (Rn == Reg::SP)
}
if (Rn == Reg::SP) {
v.SP(64, v.ir.Add(address, offs));
else
} else {
v.X(64, Rn, v.ir.Add(address, offs));
}
}
return true;
}

View file

@ -27,8 +27,6 @@ static bool LoadStoreRegisterImmediate(TranslatorVisitor& v, bool wback, bool po
signed_ = true;
}
const size_t datasize = 8 << scale;
if (memop == MemOp::LOAD && wback && Rn == Rt && Rn != Reg::R31) {
return v.UnpredictableInstruction();
}
@ -38,22 +36,24 @@ static bool LoadStoreRegisterImmediate(TranslatorVisitor& v, bool wback, bool po
// TODO: Check SP alignment
IR::U64 address = Rn == Reg::SP ? IR::U64(v.SP(64)) : IR::U64(v.X(64, Rn));
if (!postindex)
if (!postindex) {
address = v.ir.Add(address, v.ir.Imm64(offset));
}
const size_t datasize = 8 << scale;
switch (memop) {
case MemOp::STORE: {
auto data = v.X(datasize, Rt);
const auto data = v.X(datasize, Rt);
v.Mem(address, datasize / 8, AccType::NORMAL, data);
break;
}
case MemOp::LOAD: {
auto data = v.Mem(address, datasize / 8, AccType::NORMAL);
if (signed_)
const auto data = v.Mem(address, datasize / 8, AccType::NORMAL);
if (signed_) {
v.X(regsize, Rt, v.SignExtend(data, regsize));
else
} else {
v.X(regsize, Rt, v.ZeroExtend(data, regsize));
}
break;
}
case MemOp::PREFETCH:
@ -62,13 +62,16 @@ static bool LoadStoreRegisterImmediate(TranslatorVisitor& v, bool wback, bool po
}
if (wback) {
if (postindex)
if (postindex) {
address = v.ir.Add(address, v.ir.Imm64(offset));
if (Rn == Reg::SP)
}
if (Rn == Reg::SP) {
v.SP(64, address);
else
} else {
v.X(64, Rn, address);
}
}
return true;
}
@ -155,6 +158,7 @@ static bool LoadStoreSIMD(TranslatorVisitor& v, bool wback, bool postindex, size
if (postindex) {
address = v.ir.Add(address, v.ir.Imm64(offset));
}
if (Rn == Reg::SP) {
v.SP(64, address);
} else {
@ -166,84 +170,78 @@ static bool LoadStoreSIMD(TranslatorVisitor& v, bool wback, bool postindex, size
}
bool TranslatorVisitor::STR_imm_fpsimd_1(Imm<2> size, Imm<1> opc_1, Imm<9> imm9, bool not_postindex, Reg Rn, Vec Vt) {
const bool wback = true;
const bool postindex = !not_postindex;
const size_t scale = concatenate(opc_1, size).ZeroExtend<size_t>();
if (scale > 4) {
return UnallocatedEncoding();
}
const bool wback = true;
const bool postindex = !not_postindex;
const u64 offset = imm9.SignExtend<u64>();
return LoadStoreSIMD(*this, wback, postindex, scale, offset, MemOp::STORE, Rn, Vt);
}
bool TranslatorVisitor::STR_imm_fpsimd_2(Imm<2> size, Imm<1> opc_1, Imm<12> imm12, Reg Rn, Vec Vt) {
const bool wback = false;
const bool postindex = false;
const size_t scale = concatenate(opc_1, size).ZeroExtend<size_t>();
if (scale > 4) {
return UnallocatedEncoding();
}
const bool wback = false;
const bool postindex = false;
const u64 offset = imm12.ZeroExtend<u64>() << scale;
return LoadStoreSIMD(*this, wback, postindex, scale, offset, MemOp::STORE, Rn, Vt);
}
bool TranslatorVisitor::LDR_imm_fpsimd_1(Imm<2> size, Imm<1> opc_1, Imm<9> imm9, bool not_postindex, Reg Rn, Vec Vt) {
const bool wback = true;
const bool postindex = !not_postindex;
const size_t scale = concatenate(opc_1, size).ZeroExtend<size_t>();
if (scale > 4) {
return UnallocatedEncoding();
}
const bool wback = true;
const bool postindex = !not_postindex;
const u64 offset = imm9.SignExtend<u64>();
return LoadStoreSIMD(*this, wback, postindex, scale, offset, MemOp::LOAD, Rn, Vt);
}
bool TranslatorVisitor::LDR_imm_fpsimd_2(Imm<2> size, Imm<1> opc_1, Imm<12> imm12, Reg Rn, Vec Vt) {
const bool wback = false;
const bool postindex = false;
const size_t scale = concatenate(opc_1, size).ZeroExtend<size_t>();
if (scale > 4) {
return UnallocatedEncoding();
}
const bool wback = false;
const bool postindex = false;
const u64 offset = imm12.ZeroExtend<u64>() << scale;
return LoadStoreSIMD(*this, wback, postindex, scale, offset, MemOp::LOAD, Rn, Vt);
}
bool TranslatorVisitor::STUR_fpsimd(Imm<2> size, Imm<1> opc_1, Imm<9> imm9, Reg Rn, Vec Vt) {
const bool wback = false;
const bool postindex = false;
const size_t scale = concatenate(opc_1, size).ZeroExtend<size_t>();
if (scale > 4) {
return UnallocatedEncoding();
}
const bool wback = false;
const bool postindex = false;
const u64 offset = imm9.SignExtend<u64>();
return LoadStoreSIMD(*this, wback, postindex, scale, offset, MemOp::STORE, Rn, Vt);
}
bool TranslatorVisitor::LDUR_fpsimd(Imm<2> size, Imm<1> opc_1, Imm<9> imm9, Reg Rn, Vec Vt) {
const bool wback = false;
const bool postindex = false;
const size_t scale = concatenate(opc_1, size).ZeroExtend<size_t>();
if (scale > 4) {
return UnallocatedEncoding();
}
const bool wback = false;
const bool postindex = false;
const u64 offset = imm9.SignExtend<u64>();
return LoadStoreSIMD(*this, wback, postindex, scale, offset, MemOp::LOAD, Rn, Vt);

View file

@ -9,46 +9,51 @@
namespace Dynarmic::A64 {
bool TranslatorVisitor::STP_LDP_gen(Imm<2> opc, bool not_postindex, bool wback, Imm<1> L, Imm<7> imm7, Reg Rt2, Reg Rn, Reg Rt) {
const bool postindex = !not_postindex;
if ((L == 0 && opc.Bit<0>() == 1) || opc == 0b11) {
return UnallocatedEncoding();
}
const MemOp memop = L == 1 ? MemOp::LOAD : MemOp::STORE;
if ((L == 0 && opc.Bit<0>() == 1) || opc == 0b11)
return UnallocatedEncoding();
if (memop == MemOp::LOAD && wback && (Rt == Rn || Rt2 == Rn) && Rn != Reg::R31) {
return UnpredictableInstruction();
}
if (memop == MemOp::STORE && wback && (Rt == Rn || Rt2 == Rn) && Rn != Reg::R31) {
return UnpredictableInstruction();
}
if (memop == MemOp::LOAD && Rt == Rt2) {
return UnpredictableInstruction();
}
IR::U64 address;
if (Rn == Reg::SP) {
// TODO: Check SP Alignment
address = SP(64);
} else {
address = X(64, Rn);
}
const bool postindex = !not_postindex;
const bool signed_ = opc.Bit<0>() != 0;
const size_t scale = 2 + opc.Bit<1>();
const size_t datasize = 8 << scale;
const u64 offset = imm7.SignExtend<u64>() << scale;
if (memop == MemOp::LOAD && wback && (Rt == Rn || Rt2 == Rn) && Rn != Reg::R31)
return UnpredictableInstruction();
if (memop == MemOp::STORE && wback && (Rt == Rn || Rt2 == Rn) && Rn != Reg::R31)
return UnpredictableInstruction();
if (memop == MemOp::LOAD && Rt == Rt2)
return UnpredictableInstruction();
IR::U64 address;
const size_t dbytes = datasize / 8;
if (Rn == Reg::SP)
// TODO: Check SP Alignment
address = SP(64);
else
address = X(64, Rn);
if (!postindex)
if (!postindex) {
address = ir.Add(address, ir.Imm64(offset));
}
const size_t dbytes = datasize / 8;
switch (memop) {
case MemOp::STORE: {
IR::U32U64 data1 = X(datasize, Rt);
IR::U32U64 data2 = X(datasize, Rt2);
const IR::U32U64 data1 = X(datasize, Rt);
const IR::U32U64 data2 = X(datasize, Rt2);
Mem(address, dbytes, AccType::NORMAL, data1);
Mem(ir.Add(address, ir.Imm64(dbytes)), dbytes, AccType::NORMAL, data2);
break;
}
case MemOp::LOAD: {
IR::U32U64 data1 = Mem(address, dbytes, AccType::NORMAL);
IR::U32U64 data2 = Mem(ir.Add(address, ir.Imm64(dbytes)), dbytes, AccType::NORMAL);
const IR::U32U64 data1 = Mem(address, dbytes, AccType::NORMAL);
const IR::U32U64 data2 = Mem(ir.Add(address, ir.Imm64(dbytes)), dbytes, AccType::NORMAL);
if (signed_) {
X(64, Rt, SignExtend(data1, 64));
X(64, Rt2, SignExtend(data2, 64));
@ -63,42 +68,47 @@ bool TranslatorVisitor::STP_LDP_gen(Imm<2> opc, bool not_postindex, bool wback,
}
if (wback) {
if (postindex)
if (postindex) {
address = ir.Add(address, ir.Imm64(offset));
if (Rn == Reg::SP)
}
if (Rn == Reg::SP) {
SP(64, address);
else
} else {
X(64, Rn, address);
}
}
return true;
}
bool TranslatorVisitor::STP_LDP_fpsimd(Imm<2> opc, bool not_postindex, bool wback, Imm<1> L, Imm<7> imm7, Vec Vt2, Reg Rn, Vec Vt) {
const bool postindex = !not_postindex;
if (opc == 0b11) {
return UnallocatedEncoding();
}
const MemOp memop = L == 1 ? MemOp::LOAD : MemOp::STORE;
if (opc == 0b11)
return UnallocatedEncoding();
if (memop == MemOp::LOAD && Vt == Vt2) {
return UnpredictableInstruction();
}
IR::U64 address;
if (Rn == Reg::SP) {
// TODO: Check SP Alignment
address = SP(64);
} else {
address = X(64, Rn);
}
const bool postindex = !not_postindex;
const size_t scale = 2 + opc.ZeroExtend<size_t>();
const size_t datasize = 8 << scale;
const u64 offset = imm7.SignExtend<u64>() << scale;
const size_t dbytes = datasize / 8;
if (memop == MemOp::LOAD && Vt == Vt2)
return UnpredictableInstruction();
IR::U64 address;
if (Rn == Reg::SP)
// TODO: Check SP Alignment
address = SP(64);
else
address = X(64, Rn);
if (!postindex)
if (!postindex) {
address = ir.Add(address, ir.Imm64(offset));
}
switch (memop) {
case MemOp::STORE: {
@ -128,13 +138,16 @@ bool TranslatorVisitor::STP_LDP_fpsimd(Imm<2> opc, bool not_postindex, bool wbac
}
if (wback) {
if (postindex)
if (postindex) {
address = ir.Add(address, ir.Imm64(offset));
if (Rn == Reg::SP)
}
if (Rn == Reg::SP) {
SP(64, address);
else
} else {
X(64, Rn, address);
}
}
return true;
}

View file

@ -51,12 +51,12 @@ static bool RegSharedDecodeAndOperation(TranslatorVisitor& v, size_t scale, u8 s
switch (memop) {
case MemOp::STORE: {
IR::UAny data = v.X(datasize, Rt);
const IR::UAny data = v.X(datasize, Rt);
v.Mem(address, datasize / 8, acctype, data);
break;
}
case MemOp::LOAD: {
IR::UAny data = v.Mem(address, datasize / 8, acctype);
const IR::UAny data = v.Mem(address, datasize / 8, acctype);
if (signed_) {
v.X(regsize, Rt, v.SignExtend(data, regsize));
} else {

View file

@ -54,11 +54,12 @@ static bool SharedDecodeAndOperation(TranslatorVisitor& v, bool wback, MemOp mem
const size_t ebytes = esize / 8;
IR::U64 address;
if (Rn == Reg::SP)
if (Rn == Reg::SP) {
// TODO: Check SP Alignment
address = v.SP(64);
else
} else {
address = v.X(64, Rn);
}
IR::U64 offs = v.ir.Imm64(0);
if (replicate) {
@ -89,13 +90,16 @@ static bool SharedDecodeAndOperation(TranslatorVisitor& v, bool wback, MemOp mem
}
if (wback) {
if (*Rm != Reg::SP)
if (*Rm != Reg::SP) {
offs = v.X(64, *Rm);
if (Rn == Reg::SP)
}
if (Rn == Reg::SP) {
v.SP(64, v.ir.Add(address, offs));
else
} else {
v.X(64, Rn, v.ir.Add(address, offs));
}
}
return true;
}