A32: Implement ASIMD modified immediate functions
Implements VBIC, VMOV, VMVN, and VORR modified immediate instructions.
This commit is contained in:
parent
659d78c9c4
commit
fc112e61f2
4 changed files with 155 additions and 11 deletions
|
@ -123,6 +123,7 @@ if ("A32" IN_LIST DYNARMIC_FRONTENDS)
|
|||
frontend/A32/location_descriptor.h
|
||||
frontend/A32/PSR.h
|
||||
frontend/A32/translate/impl/asimd_load_store_structures.cpp
|
||||
frontend/A32/translate/impl/asimd_one_reg_modified_immediate.cpp
|
||||
frontend/A32/translate/impl/asimd_three_same.cpp
|
||||
frontend/A32/translate/impl/asimd_two_regs_misc.cpp
|
||||
frontend/A32/translate/impl/barrier.cpp
|
||||
|
|
|
@ -108,17 +108,7 @@ INST(asimd_VSWP, "VSWP", "111100111D110010dddd000
|
|||
//INST(asimd_VCVT_integer, "VCVT (integer)", "111100111-11--11----011xxx-0----") // ASIMD
|
||||
|
||||
// One register and modified immediate
|
||||
//INST(asimd_VMOV_imm, "VMOV (immediate)", "1111001a1-000bcd----0xx00-01efgh") // ASIMD
|
||||
//INST(asimd_VORR_imm, "VORR (immediate)", "1111001a1-000bcd----0xx10-01efgh") // ASIMD
|
||||
//INST(asimd_VMOV_imm, "VMOV (immediate)", "1111001a1-000bcd----10x00-01efgh") // ASIMD
|
||||
//INST(asimd_VORR_imm, "VORR (immediate)", "1111001a1-000bcd----10x10-01efgh") // ASIMD
|
||||
//INST(asimd_VMOV_imm, "VMOV (immediate)", "1111001a1-000bcd----11xx0-01efgh") // ASIMD
|
||||
//INST(asimd_VMVN_imm, "VMVN (immediate)", "1111001a1-000bcd----0xx00-11efgh") // ASIMD
|
||||
//INST(asimd_VBIC_imm, "VBIC (immediate)", "1111001a1-000bcd----0xx10-11efgh") // ASIMD
|
||||
//INST(asimd_VMVN_imm, "VMVN (immediate)", "1111001a1-000bcd----10x00-11efgh") // ASIMD
|
||||
//INST(asimd_VBIC_imm, "VBIC (immediate)", "1111001a1-000bcd----10x10-11efgh") // ASIMD
|
||||
//INST(asimd_VMVN_imm, "VMVN (immediate)", "1111001a1-000bcd----110x0-11efgh") // ASIMD
|
||||
//INST(asimd_VMOV_imm, "VMOV (immediate)", "1111001a1-000bcd----11100-11efgh") // ASIMD
|
||||
INST(asimd_VMOV_imm, "VBIC, VMOV, VMVN, VORR (immediate)", "1111001a1D000bcdVVVVmmmm0Qo1efgh") // ASIMD
|
||||
|
||||
// Advanced SIMD load/store structures
|
||||
INST(v8_VST_multiple, "VST{1-4} (multiple)", "111101000D00nnnnddddxxxxzzaammmm") // v8
|
||||
|
|
|
@ -0,0 +1,149 @@
|
|||
/* This file is part of the dynarmic project.
|
||||
* Copyright (c) 2020 MerryMage
|
||||
* SPDX-License-Identifier: 0BSD
|
||||
*/
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/bit_util.h"
|
||||
|
||||
#include "frontend/A32/translate/impl/translate_arm.h"
|
||||
|
||||
namespace Dynarmic::A32 {
|
||||
namespace {
|
||||
ExtReg ToExtRegD(size_t base, bool bit) {
|
||||
return ExtReg::D0 + (base + (bit ? 16 : 0));
|
||||
}
|
||||
|
||||
u64 AdvSIMDExpandImm(bool op, Imm<4> cmode, Imm<8> imm8) {
|
||||
switch (cmode.Bits<1, 3>()) {
|
||||
case 0b000:
|
||||
return Common::Replicate<u64>(imm8.ZeroExtend<u64>(), 32);
|
||||
case 0b001:
|
||||
return Common::Replicate<u64>(imm8.ZeroExtend<u64>() << 8, 32);
|
||||
case 0b010:
|
||||
return Common::Replicate<u64>(imm8.ZeroExtend<u64>() << 16, 32);
|
||||
case 0b011:
|
||||
return Common::Replicate<u64>(imm8.ZeroExtend<u64>() << 24, 32);
|
||||
case 0b100:
|
||||
return Common::Replicate<u64>(imm8.ZeroExtend<u64>(), 16);
|
||||
case 0b101:
|
||||
return Common::Replicate<u64>(imm8.ZeroExtend<u64>() << 8, 16);
|
||||
case 0b110:
|
||||
if (!cmode.Bit<0>()) {
|
||||
return Common::Replicate<u64>((imm8.ZeroExtend<u64>() << 8) | Common::Ones<u64>(8), 32);
|
||||
}
|
||||
return Common::Replicate<u64>((imm8.ZeroExtend<u64>() << 16) | Common::Ones<u64>(16), 32);
|
||||
case 0b111:
|
||||
if (!cmode.Bit<0>() && !op) {
|
||||
return Common::Replicate<u64>(imm8.ZeroExtend<u64>(), 8);
|
||||
}
|
||||
if (!cmode.Bit<0>() && op) {
|
||||
u64 result = 0;
|
||||
result |= imm8.Bit<0>() ? Common::Ones<u64>(8) << (0 * 8) : 0;
|
||||
result |= imm8.Bit<1>() ? Common::Ones<u64>(8) << (1 * 8) : 0;
|
||||
result |= imm8.Bit<2>() ? Common::Ones<u64>(8) << (2 * 8) : 0;
|
||||
result |= imm8.Bit<3>() ? Common::Ones<u64>(8) << (3 * 8) : 0;
|
||||
result |= imm8.Bit<4>() ? Common::Ones<u64>(8) << (4 * 8) : 0;
|
||||
result |= imm8.Bit<5>() ? Common::Ones<u64>(8) << (5 * 8) : 0;
|
||||
result |= imm8.Bit<6>() ? Common::Ones<u64>(8) << (6 * 8) : 0;
|
||||
result |= imm8.Bit<7>() ? Common::Ones<u64>(8) << (7 * 8) : 0;
|
||||
return result;
|
||||
}
|
||||
if (cmode.Bit<0>() && !op) {
|
||||
u64 result = 0;
|
||||
result |= imm8.Bit<7>() ? 0x80000000 : 0;
|
||||
result |= imm8.Bit<6>() ? 0x3E000000 : 0x40000000;
|
||||
result |= imm8.Bits<0, 5, u64>() << 19;
|
||||
return Common::Replicate<u64>(result, 32);
|
||||
}
|
||||
if (cmode.Bit<0>() && op) {
|
||||
u64 result = 0;
|
||||
result |= imm8.Bit<7>() ? 0x80000000'00000000 : 0;
|
||||
result |= imm8.Bit<6>() ? 0x3FC00000'00000000 : 0x40000000'00000000;
|
||||
result |= imm8.Bits<0, 5, u64>() << 48;
|
||||
return result;
|
||||
}
|
||||
}
|
||||
UNREACHABLE();
|
||||
}
|
||||
} // Anonymous namespace
|
||||
|
||||
bool ArmTranslatorVisitor::asimd_VMOV_imm(Imm<1> a, bool D, Imm<1> b, Imm<1> c, Imm<1> d, size_t Vd,
|
||||
Imm<4> cmode, bool Q, bool op, Imm<1> e, Imm<1> f, Imm<1> g, Imm<1> h) {
|
||||
if (Q && Common::Bit<0>(Vd)) {
|
||||
return UndefinedInstruction();
|
||||
}
|
||||
|
||||
const auto d_reg = ToExtRegD(Vd, D);
|
||||
const size_t regs = Q ? 2 : 1;
|
||||
const auto imm = AdvSIMDExpandImm(op, cmode, concatenate(a, b, c, d, e, f, g, h));
|
||||
|
||||
// VMOV
|
||||
const auto mov = [&] {
|
||||
const auto imm64 = ir.Imm64(imm);
|
||||
for (size_t i = 0; i < regs; i++) {
|
||||
ir.SetExtendedRegister(d_reg + i, imm64);
|
||||
}
|
||||
return true;
|
||||
};
|
||||
|
||||
// VMVN
|
||||
const auto mvn = [&] {
|
||||
const auto imm64 = ir.Imm64(~imm);
|
||||
for (size_t i = 0; i < regs; i++) {
|
||||
ir.SetExtendedRegister(d_reg + i, imm64);
|
||||
}
|
||||
return true;
|
||||
};
|
||||
|
||||
// VORR
|
||||
const auto orr = [&] {
|
||||
const auto imm64 = ir.Imm64(imm);
|
||||
for (size_t i = 0; i < regs; i++) {
|
||||
const auto d_index = d_reg + i;
|
||||
const auto reg_value = ir.GetExtendedRegister(d_index);
|
||||
ir.SetExtendedRegister(d_index, ir.Or(reg_value, imm64));
|
||||
}
|
||||
return true;
|
||||
};
|
||||
|
||||
// VBIC
|
||||
const auto bic = [&] {
|
||||
const auto imm64 = ir.Imm64(~imm);
|
||||
for (size_t i = 0; i < regs; i++) {
|
||||
const auto d_index = d_reg + i;
|
||||
const auto reg_value = ir.GetExtendedRegister(d_index);
|
||||
ir.SetExtendedRegister(d_index, ir.And(reg_value, imm64));
|
||||
}
|
||||
return true;
|
||||
};
|
||||
|
||||
switch (concatenate(cmode, Imm<1>{op}).ZeroExtend()) {
|
||||
case 0b00000: case 0b00100:
|
||||
case 0b01000: case 0b01100:
|
||||
case 0b10000: case 0b10100:
|
||||
case 0b11000: case 0b11010:
|
||||
case 0b11100: case 0b11101:
|
||||
case 0b11110:
|
||||
return mov();
|
||||
case 0b11111:
|
||||
return UndefinedInstruction();
|
||||
case 0b00001: case 0b00101:
|
||||
case 0b01001: case 0b01101:
|
||||
case 0b10001: case 0b10101:
|
||||
case 0b11001: case 0b11011:
|
||||
return mvn();
|
||||
case 0b00010: case 0b00110:
|
||||
case 0b01010: case 0b01110:
|
||||
case 0b10010: case 0b10110:
|
||||
return orr();
|
||||
case 0b00011: case 0b00111:
|
||||
case 0b01011: case 0b01111:
|
||||
case 0b10011: case 0b10111:
|
||||
return bic();
|
||||
}
|
||||
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
} // namespace Dynarmic::A32
|
|
@ -429,6 +429,10 @@ struct ArmTranslatorVisitor final {
|
|||
bool vfp_VLDM_a1(Cond cond, bool p, bool u, bool D, bool w, Reg n, size_t Vd, Imm<8> imm8);
|
||||
bool vfp_VLDM_a2(Cond cond, bool p, bool u, bool D, bool w, Reg n, size_t Vd, Imm<8> imm8);
|
||||
|
||||
// Advanced SIMD one register, modified immediate
|
||||
bool asimd_VMOV_imm(Imm<1> a, bool D, Imm<1> b, Imm<1> c, Imm<1> d, size_t Vd,
|
||||
Imm<4> cmode, bool Q, bool op, Imm<1> e, Imm<1> f, Imm<1> g, Imm<1> h);
|
||||
|
||||
// Advanced SIMD three register variants
|
||||
bool asimd_VAND_reg(bool D, size_t Vn, size_t Vd, bool N, bool Q, bool M, size_t Vm);
|
||||
bool asimd_VBIC_reg(bool D, size_t Vn, size_t Vd, bool N, bool Q, bool M, size_t Vm);
|
||||
|
|
Loading…
Reference in a new issue