From 98ec9c5f90a28e1e618fdcf2cdef591bac7de944 Mon Sep 17 00:00:00 2001 From: MerryMage Date: Sat, 27 Jan 2018 22:36:55 +0000 Subject: [PATCH] A32: Change UserCallbacks to be similar to A64's interface --- include/dynarmic/A32/a32.h | 4 +- include/dynarmic/A32/callbacks.h | 71 ----- include/dynarmic/A32/config.h | 86 ++++++ src/CMakeLists.txt | 2 +- src/backend_x64/a32_emit_x64.cpp | 136 ++++---- src/backend_x64/a32_emit_x64.h | 6 +- src/backend_x64/a32_interface.cpp | 23 +- src/backend_x64/block_of_code.h | 2 +- src/backend_x64/devirtualize.h | 2 +- src/frontend/A32/translate/translate.h | 2 +- src/ir_opt/a32_constant_memory_reads_pass.cpp | 20 +- src/ir_opt/constant_propagation_pass.cpp | 2 +- src/ir_opt/passes.h | 4 +- tests/A32/fuzz_arm.cpp | 292 ++++++------------ tests/A32/fuzz_thumb.cpp | 184 +++-------- .../dyncom/arm_dyncom_interpreter.cpp | 4 +- .../skyeye_common/armstate.cpp | 16 +- .../skyeye_common/armstate.h | 4 +- tests/A32/test_thumb_instructions.cpp | 140 +++------ tests/A32/testenv.h | 93 ++++++ tests/CMakeLists.txt | 1 + 21 files changed, 472 insertions(+), 622 deletions(-) delete mode 100644 include/dynarmic/A32/callbacks.h create mode 100644 include/dynarmic/A32/config.h create mode 100644 tests/A32/testenv.h diff --git a/include/dynarmic/A32/a32.h b/include/dynarmic/A32/a32.h index 05a6fa71..88f3cf9c 100644 --- a/include/dynarmic/A32/a32.h +++ b/include/dynarmic/A32/a32.h @@ -11,7 +11,7 @@ #include #include -#include +#include namespace Dynarmic { namespace IR { @@ -26,7 +26,7 @@ struct Context; class Jit final { public: - explicit Jit(UserCallbacks callbacks); + explicit Jit(UserConfig conf); ~Jit(); /** diff --git a/include/dynarmic/A32/callbacks.h b/include/dynarmic/A32/callbacks.h deleted file mode 100644 index 13f0e783..00000000 --- a/include/dynarmic/A32/callbacks.h +++ /dev/null @@ -1,71 +0,0 @@ -/* This file is part of the dynarmic project. - * Copyright (c) 2016 MerryMage - * This software may be used and distributed according to the terms of the GNU - * General Public License version 2 or any later version. - */ - -#pragma once - -#include -#include -#include -#include - -namespace Dynarmic { -namespace A32 { - -class Coprocessor; -class Jit; - -/// These function pointers may be inserted into compiled code. -struct UserCallbacks { - struct Memory { - // All reads through this callback are 4-byte aligned. - // Memory must be interpreted as little endian. - std::uint32_t (*ReadCode)(std::uint32_t vaddr); - - // Reads through these callbacks may not be aligned. - // Memory must be interpreted as if ENDIANSTATE == 0, endianness will be corrected by the JIT. - std::uint8_t (*Read8)(std::uint32_t vaddr); - std::uint16_t (*Read16)(std::uint32_t vaddr); - std::uint32_t (*Read32)(std::uint32_t vaddr); - std::uint64_t (*Read64)(std::uint32_t vaddr); - - // Writes through these callbacks may not be aligned. - // Memory must be interpreted as if ENDIANSTATE == 0, endianness will be corrected by the JIT. - void (*Write8)(std::uint32_t vaddr, std::uint8_t value); - void (*Write16)(std::uint32_t vaddr, std::uint16_t value); - void (*Write32)(std::uint32_t vaddr, std::uint32_t value); - void (*Write64)(std::uint32_t vaddr, std::uint64_t value); - - // If this callback returns true, the JIT will assume MemoryRead* callbacks will always - // return the same value at any point in time for this vaddr. The JIT may use this information - // in optimizations. - // An conservative implementation that always returns false is safe. - bool (*IsReadOnlyMemory)(std::uint32_t vaddr); - } memory = {}; - - /// The intrepreter must execute only one instruction at PC. - void (*InterpreterFallback)(std::uint32_t pc, Jit* jit, void* user_arg); - void* user_arg = nullptr; - - // This callback is called whenever a SVC instruction is executed. - void (*CallSVC)(std::uint32_t swi); - - // Timing-related callbacks - void (*AddTicks)(std::uint64_t ticks); - std::uint64_t (*GetTicksRemaining)(); - - // Page Table - // The page table is used for faster memory access. If an entry in the table is nullptr, - // the JIT will fallback to calling the MemoryRead*/MemoryWrite* callbacks. - static constexpr std::size_t PAGE_BITS = 12; - static constexpr std::size_t NUM_PAGE_TABLE_ENTRIES = 1 << (32 - PAGE_BITS); - std::array* page_table = nullptr; - - // Coprocessors - std::array, 16> coprocessors; -}; - -} // namespace A32 -} // namespace Dynarmic diff --git a/include/dynarmic/A32/config.h b/include/dynarmic/A32/config.h new file mode 100644 index 00000000..aa8f40d2 --- /dev/null +++ b/include/dynarmic/A32/config.h @@ -0,0 +1,86 @@ +/* This file is part of the dynarmic project. + * Copyright (c) 2016 MerryMage + * This software may be used and distributed according to the terms of the GNU + * General Public License version 2 or any later version. + */ + +#pragma once + +#include +#include +#include +#include + +namespace Dynarmic { +namespace A32 { + +using VAddr = std::uint32_t; + +class Coprocessor; + +enum class Exception { + /// An UndefinedFault occured due to executing instruction with an unallocated encoding + UndefinedInstruction, + /// An unpredictable instruction is to be executed. Implementation-defined behaviour should now happen. + /// This behaviour is up to the user of this library to define. + UnpredictableInstruction, +}; + +/// These function pointers may be inserted into compiled code. +struct UserCallbacks { + virtual ~UserCallbacks() = default; + + // All reads through this callback are 4-byte aligned. + // Memory must be interpreted as little endian. + virtual std::uint32_t MemoryReadCode(VAddr vaddr) { return MemoryRead32(vaddr); } + + // Reads through these callbacks may not be aligned. + // Memory must be interpreted as if ENDIANSTATE == 0, endianness will be corrected by the JIT. + virtual std::uint8_t MemoryRead8(VAddr vaddr) = 0; + virtual std::uint16_t MemoryRead16(VAddr vaddr) = 0; + virtual std::uint32_t MemoryRead32(VAddr vaddr) = 0; + virtual std::uint64_t MemoryRead64(VAddr vaddr) = 0; + + // Writes through these callbacks may not be aligned. + virtual void MemoryWrite8(VAddr vaddr, std::uint8_t value) = 0; + virtual void MemoryWrite16(VAddr vaddr, std::uint16_t value) = 0; + virtual void MemoryWrite32(VAddr vaddr, std::uint32_t value) = 0; + virtual void MemoryWrite64(VAddr vaddr, std::uint64_t value) = 0; + + // If this callback returns true, the JIT will assume MemoryRead* callbacks will always + // return the same value at any point in time for this vaddr. The JIT may use this information + // in optimizations. + // A conservative implementation that always returns false is safe. + virtual bool IsReadOnlyMemory(VAddr /* vaddr */) { return false; } + + /// The intrepreter must execute exactly num_instructions starting from PC. + virtual void InterpreterFallback(VAddr pc, size_t num_instructions) = 0; + + // This callback is called whenever a SVC instruction is executed. + virtual void CallSVC(std::uint32_t swi) = 0; + + virtual void ExceptionRaised(VAddr pc, Exception exception) = 0; + + // Timing-related callbacks + // ticks ticks have passed + virtual void AddTicks(std::uint64_t ticks) = 0; + // How many more ticks am I allowed to execute? + virtual std::uint64_t GetTicksRemaining() = 0; +}; + +struct UserConfig { + UserCallbacks* callbacks; + + // Page Table + // The page table is used for faster memory access. If an entry in the table is nullptr, + // the JIT will fallback to calling the MemoryRead*/MemoryWrite* callbacks. + static constexpr std::size_t PAGE_BITS = 12; + static constexpr std::size_t NUM_PAGE_TABLE_ENTRIES = 1 << (32 - PAGE_BITS); + std::array* page_table = nullptr; + + // Coprocessors + std::array, 16> coprocessors; +}; + +} // namespace A32 +} // namespace Dynarmic diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 21a053a7..82248e3c 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -1,6 +1,6 @@ add_library(dynarmic ../include/dynarmic/A32/a32.h - ../include/dynarmic/A32/callbacks.h + ../include/dynarmic/A32/config.h ../include/dynarmic/A32/coprocessor.h ../include/dynarmic/A32/coprocessor_util.h ../include/dynarmic/A32/disassembler.h diff --git a/src/backend_x64/a32_emit_x64.cpp b/src/backend_x64/a32_emit_x64.cpp index 5f0e429f..021b2432 100644 --- a/src/backend_x64/a32_emit_x64.cpp +++ b/src/backend_x64/a32_emit_x64.cpp @@ -13,6 +13,7 @@ #include "backend_x64/a32_jitstate.h" #include "backend_x64/abi.h" #include "backend_x64/block_of_code.h" +#include "backend_x64/devirtualize.h" #include "backend_x64/emit_x64.h" #include "common/address_range.h" #include "common/assert.h" @@ -67,8 +68,8 @@ bool A32EmitContext::FPSCR_DN() const { return Location().FPSCR().DN(); } -A32EmitX64::A32EmitX64(BlockOfCode* code, A32::UserCallbacks cb, A32::Jit* jit_interface) - : EmitX64(code), cb(cb), jit_interface(jit_interface) +A32EmitX64::A32EmitX64(BlockOfCode* code, A32::UserConfig config, A32::Jit* jit_interface) + : EmitX64(code), config(config), jit_interface(jit_interface) { GenMemoryAccessors(); code->PreludeComplete(); @@ -146,56 +147,56 @@ void A32EmitX64::GenMemoryAccessors() { code->align(); read_memory_8 = code->getCurr(); ABI_PushCallerSaveRegistersAndAdjustStack(code); - code->CallFunction(cb.memory.Read8); + DEVIRT(config.callbacks, &A32::UserCallbacks::MemoryRead8).EmitCall(code); ABI_PopCallerSaveRegistersAndAdjustStack(code); code->ret(); code->align(); read_memory_16 = code->getCurr(); ABI_PushCallerSaveRegistersAndAdjustStack(code); - code->CallFunction(cb.memory.Read16); + DEVIRT(config.callbacks, &A32::UserCallbacks::MemoryRead16).EmitCall(code); ABI_PopCallerSaveRegistersAndAdjustStack(code); code->ret(); code->align(); read_memory_32 = code->getCurr(); ABI_PushCallerSaveRegistersAndAdjustStack(code); - code->CallFunction(cb.memory.Read32); + DEVIRT(config.callbacks, &A32::UserCallbacks::MemoryRead32).EmitCall(code); ABI_PopCallerSaveRegistersAndAdjustStack(code); code->ret(); code->align(); read_memory_64 = code->getCurr(); ABI_PushCallerSaveRegistersAndAdjustStack(code); - code->CallFunction(cb.memory.Read64); + DEVIRT(config.callbacks, &A32::UserCallbacks::MemoryRead64).EmitCall(code); ABI_PopCallerSaveRegistersAndAdjustStack(code); code->ret(); code->align(); write_memory_8 = code->getCurr(); ABI_PushCallerSaveRegistersAndAdjustStack(code); - code->CallFunction(cb.memory.Write8); + DEVIRT(config.callbacks, &A32::UserCallbacks::MemoryWrite8).EmitCall(code); ABI_PopCallerSaveRegistersAndAdjustStack(code); code->ret(); code->align(); write_memory_16 = code->getCurr(); ABI_PushCallerSaveRegistersAndAdjustStack(code); - code->CallFunction(cb.memory.Write16); + DEVIRT(config.callbacks, &A32::UserCallbacks::MemoryWrite16).EmitCall(code); ABI_PopCallerSaveRegistersAndAdjustStack(code); code->ret(); code->align(); write_memory_32 = code->getCurr(); ABI_PushCallerSaveRegistersAndAdjustStack(code); - code->CallFunction(cb.memory.Write32); + DEVIRT(config.callbacks, &A32::UserCallbacks::MemoryWrite32).EmitCall(code); ABI_PopCallerSaveRegistersAndAdjustStack(code); code->ret(); code->align(); write_memory_64 = code->getCurr(); ABI_PushCallerSaveRegistersAndAdjustStack(code); - code->CallFunction(cb.memory.Write64); + DEVIRT(config.callbacks, &A32::UserCallbacks::MemoryWrite64).EmitCall(code); ABI_PopCallerSaveRegistersAndAdjustStack(code); code->ret(); } @@ -568,14 +569,14 @@ void A32EmitX64::EmitA32CallSupervisor(A32EmitContext& ctx, IR::Inst* inst) { ctx.reg_alloc.HostCall(nullptr); code->SwitchMxcsrOnExit(); - code->mov(code->ABI_PARAM1, qword[r15 + offsetof(A32JitState, cycles_to_run)]); - code->sub(code->ABI_PARAM1, qword[r15 + offsetof(A32JitState, cycles_remaining)]); - code->CallFunction(cb.AddTicks); + code->mov(code->ABI_PARAM2, qword[r15 + offsetof(A32JitState, cycles_to_run)]); + code->sub(code->ABI_PARAM2, qword[r15 + offsetof(A32JitState, cycles_remaining)]); + DEVIRT(config.callbacks, &A32::UserCallbacks::AddTicks).EmitCall(code); ctx.reg_alloc.EndOfAllocScope(); auto args = ctx.reg_alloc.GetArgumentInfo(inst); - ctx.reg_alloc.HostCall(nullptr, args[0]); - code->CallFunction(cb.CallSVC); - code->CallFunction(cb.GetTicksRemaining); + ctx.reg_alloc.HostCall(nullptr, {}, args[0]); + DEVIRT(config.callbacks, &A32::UserCallbacks::CallSVC).EmitCall(code); + DEVIRT(config.callbacks, &A32::UserCallbacks::GetTicksRemaining).EmitCall(code); code->mov(qword[r15 + offsetof(A32JitState, cycles_to_run)], code->ABI_RETURN); code->mov(qword[r15 + offsetof(A32JitState, cycles_remaining)], code->ABI_RETURN); code->SwitchMxcsrOnEntry(); @@ -632,26 +633,27 @@ void A32EmitX64::EmitA32SetExclusive(A32EmitContext& ctx, IR::Inst* inst) { code->mov(dword[r15 + offsetof(A32JitState, exclusive_address)], address); } -template -static void ReadMemory(BlockOfCode* code, RegAlloc& reg_alloc, IR::Inst* inst, const A32::UserCallbacks& cb, size_t bit_size, RawFn raw_fn, const CodePtr wrapped_fn) { +template +static void ReadMemory(BlockOfCode* code, RegAlloc& reg_alloc, IR::Inst* inst, const A32::UserConfig& config, const CodePtr wrapped_fn) { + constexpr size_t bit_size = Common::BitSize(); auto args = reg_alloc.GetArgumentInfo(inst); - if (!cb.page_table) { - reg_alloc.HostCall(inst, args[0]); - code->CallFunction(raw_fn); + if (!config.page_table) { + reg_alloc.HostCall(inst, {}, args[0]); + DEVIRT(config.callbacks, raw_fn).EmitCall(code); return; } - reg_alloc.UseScratch(args[0], ABI_PARAM1); + reg_alloc.UseScratch(args[0], ABI_PARAM2); Xbyak::Reg64 result = reg_alloc.ScratchGpr({ABI_RETURN}); - Xbyak::Reg32 vaddr = code->ABI_PARAM1.cvt32(); + Xbyak::Reg32 vaddr = code->ABI_PARAM2.cvt32(); Xbyak::Reg64 page_index = reg_alloc.ScratchGpr(); Xbyak::Reg64 page_offset = reg_alloc.ScratchGpr(); Xbyak::Label abort, end; - code->mov(result, reinterpret_cast(cb.page_table)); + code->mov(result, reinterpret_cast(config.page_table)); code->mov(page_index.cvt32(), vaddr); code->shr(page_index.cvt32(), 12); code->mov(result, qword[result + page_index * 8]); @@ -684,28 +686,29 @@ static void ReadMemory(BlockOfCode* code, RegAlloc& reg_alloc, IR::Inst* inst, c reg_alloc.DefineValue(inst, result); } -template -static void WriteMemory(BlockOfCode* code, RegAlloc& reg_alloc, IR::Inst* inst, const A32::UserCallbacks& cb, size_t bit_size, RawFn raw_fn, const CodePtr wrapped_fn) { +template +static void WriteMemory(BlockOfCode* code, RegAlloc& reg_alloc, IR::Inst* inst, const A32::UserConfig& config, const CodePtr wrapped_fn) { + constexpr size_t bit_size = Common::BitSize(); auto args = reg_alloc.GetArgumentInfo(inst); - if (!cb.page_table) { - reg_alloc.HostCall(nullptr, args[0], args[1]); - code->CallFunction(raw_fn); + if (!config.page_table) { + reg_alloc.HostCall(nullptr, {}, args[0], args[1]); + DEVIRT(config.callbacks, raw_fn).EmitCall(code); return; } reg_alloc.ScratchGpr({ABI_RETURN}); - reg_alloc.UseScratch(args[0], ABI_PARAM1); - reg_alloc.UseScratch(args[1], ABI_PARAM2); + reg_alloc.UseScratch(args[0], ABI_PARAM2); + reg_alloc.UseScratch(args[1], ABI_PARAM3); - Xbyak::Reg32 vaddr = code->ABI_PARAM1.cvt32(); - Xbyak::Reg64 value = code->ABI_PARAM2; + Xbyak::Reg32 vaddr = code->ABI_PARAM2.cvt32(); + Xbyak::Reg64 value = code->ABI_PARAM3; Xbyak::Reg64 page_index = reg_alloc.ScratchGpr(); Xbyak::Reg64 page_offset = reg_alloc.ScratchGpr(); Xbyak::Label abort, end; - code->mov(rax, reinterpret_cast(cb.page_table)); + code->mov(rax, reinterpret_cast(config.page_table)); code->mov(page_index.cvt32(), vaddr); code->shr(page_index.cvt32(), 12); code->mov(rax, qword[rax + page_index * 8]); @@ -737,44 +740,44 @@ static void WriteMemory(BlockOfCode* code, RegAlloc& reg_alloc, IR::Inst* inst, } void A32EmitX64::EmitA32ReadMemory8(A32EmitContext& ctx, IR::Inst* inst) { - ReadMemory(code, ctx.reg_alloc, inst, cb, 8, cb.memory.Read8, read_memory_8); + ReadMemory(code, ctx.reg_alloc, inst, config, read_memory_8); } void A32EmitX64::EmitA32ReadMemory16(A32EmitContext& ctx, IR::Inst* inst) { - ReadMemory(code, ctx.reg_alloc, inst, cb, 16, cb.memory.Read16, read_memory_16); + ReadMemory(code, ctx.reg_alloc, inst, config, read_memory_16); } void A32EmitX64::EmitA32ReadMemory32(A32EmitContext& ctx, IR::Inst* inst) { - ReadMemory(code, ctx.reg_alloc, inst, cb, 32, cb.memory.Read32, read_memory_32); + ReadMemory(code, ctx.reg_alloc, inst, config, read_memory_32); } void A32EmitX64::EmitA32ReadMemory64(A32EmitContext& ctx, IR::Inst* inst) { - ReadMemory(code, ctx.reg_alloc, inst, cb, 64, cb.memory.Read64, read_memory_64); + ReadMemory(code, ctx.reg_alloc, inst, config, read_memory_64); } void A32EmitX64::EmitA32WriteMemory8(A32EmitContext& ctx, IR::Inst* inst) { - WriteMemory(code, ctx.reg_alloc, inst, cb, 8, cb.memory.Write8, write_memory_8); + WriteMemory(code, ctx.reg_alloc, inst, config, write_memory_8); } void A32EmitX64::EmitA32WriteMemory16(A32EmitContext& ctx, IR::Inst* inst) { - WriteMemory(code, ctx.reg_alloc, inst, cb, 16, cb.memory.Write16, write_memory_16); + WriteMemory(code, ctx.reg_alloc, inst, config, write_memory_16); } void A32EmitX64::EmitA32WriteMemory32(A32EmitContext& ctx, IR::Inst* inst) { - WriteMemory(code, ctx.reg_alloc, inst, cb, 32, cb.memory.Write32, write_memory_32); + WriteMemory(code, ctx.reg_alloc, inst, config, write_memory_32); } void A32EmitX64::EmitA32WriteMemory64(A32EmitContext& ctx, IR::Inst* inst) { - WriteMemory(code, ctx.reg_alloc, inst, cb, 64, cb.memory.Write64, write_memory_64); + WriteMemory(code, ctx.reg_alloc, inst, config, write_memory_64); } -template -static void ExclusiveWrite(BlockOfCode* code, RegAlloc& reg_alloc, IR::Inst* inst, FunctionPointer fn, bool prepend_high_word) { +template +static void ExclusiveWrite(BlockOfCode* code, RegAlloc& reg_alloc, IR::Inst* inst, const A32::UserConfig& config, bool prepend_high_word) { auto args = reg_alloc.GetArgumentInfo(inst); if (prepend_high_word) { - reg_alloc.HostCall(nullptr, args[0], args[1], args[2]); + reg_alloc.HostCall(nullptr, {}, args[0], args[1], args[2]); } else { - reg_alloc.HostCall(nullptr, args[0], args[1]); + reg_alloc.HostCall(nullptr, {}, args[0], args[1]); } Xbyak::Reg32 passed = reg_alloc.ScratchGpr().cvt32(); Xbyak::Reg32 tmp = code->ABI_RETURN.cvt32(); // Use one of the unusued HostCall registers. @@ -784,17 +787,17 @@ static void ExclusiveWrite(BlockOfCode* code, RegAlloc& reg_alloc, IR::Inst* ins code->mov(passed, u32(1)); code->cmp(code->byte[r15 + offsetof(A32JitState, exclusive_state)], u8(0)); code->je(end); - code->mov(tmp, code->ABI_PARAM1); + code->mov(tmp, code->ABI_PARAM2); code->xor_(tmp, dword[r15 + offsetof(A32JitState, exclusive_address)]); code->test(tmp, A32JitState::RESERVATION_GRANULE_MASK); code->jne(end); code->mov(code->byte[r15 + offsetof(A32JitState, exclusive_state)], u8(0)); if (prepend_high_word) { - code->mov(code->ABI_PARAM2.cvt32(), code->ABI_PARAM2.cvt32()); // zero extend to 64-bits - code->shl(code->ABI_PARAM3, 32); - code->or_(code->ABI_PARAM2, code->ABI_PARAM3); + code->mov(code->ABI_PARAM3.cvt32(), code->ABI_PARAM3.cvt32()); // zero extend to 64-bits + code->shl(code->ABI_PARAM4, 32); + code->or_(code->ABI_PARAM3, code->ABI_PARAM4); } - code->CallFunction(fn); + DEVIRT(config.callbacks, fn).EmitCall(code); code->xor_(passed, passed); code->L(end); @@ -802,19 +805,19 @@ static void ExclusiveWrite(BlockOfCode* code, RegAlloc& reg_alloc, IR::Inst* ins } void A32EmitX64::EmitA32ExclusiveWriteMemory8(A32EmitContext& ctx, IR::Inst* inst) { - ExclusiveWrite(code, ctx.reg_alloc, inst, cb.memory.Write8, false); + ExclusiveWrite(code, ctx.reg_alloc, inst, config, false); } void A32EmitX64::EmitA32ExclusiveWriteMemory16(A32EmitContext& ctx, IR::Inst* inst) { - ExclusiveWrite(code, ctx.reg_alloc, inst, cb.memory.Write16, false); + ExclusiveWrite(code, ctx.reg_alloc, inst, config, false); } void A32EmitX64::EmitA32ExclusiveWriteMemory32(A32EmitContext& ctx, IR::Inst* inst) { - ExclusiveWrite(code, ctx.reg_alloc, inst, cb.memory.Write32, false); + ExclusiveWrite(code, ctx.reg_alloc, inst, config, false); } void A32EmitX64::EmitA32ExclusiveWriteMemory64(A32EmitContext& ctx, IR::Inst* inst) { - ExclusiveWrite(code, ctx.reg_alloc, inst, cb.memory.Write64, true); + ExclusiveWrite(code, ctx.reg_alloc, inst, config, true); } static void EmitCoprocessorException() { @@ -843,7 +846,7 @@ void A32EmitX64::EmitA32CoprocInternalOperation(A32EmitContext& ctx, IR::Inst* i A32::CoprocReg CRm = static_cast(coproc_info[5]); unsigned opc2 = static_cast(coproc_info[6]); - std::shared_ptr coproc = cb.coprocessors[coproc_num]; + std::shared_ptr coproc = config.coprocessors[coproc_num]; if (!coproc) { EmitCoprocessorException(); return; @@ -869,7 +872,7 @@ void A32EmitX64::EmitA32CoprocSendOneWord(A32EmitContext& ctx, IR::Inst* inst) { A32::CoprocReg CRm = static_cast(coproc_info[4]); unsigned opc2 = static_cast(coproc_info[5]); - std::shared_ptr coproc = cb.coprocessors[coproc_num]; + std::shared_ptr coproc = config.coprocessors[coproc_num]; if (!coproc) { EmitCoprocessorException(); return; @@ -908,7 +911,7 @@ void A32EmitX64::EmitA32CoprocSendTwoWords(A32EmitContext& ctx, IR::Inst* inst) unsigned opc = static_cast(coproc_info[2]); A32::CoprocReg CRm = static_cast(coproc_info[3]); - std::shared_ptr coproc = cb.coprocessors[coproc_num]; + std::shared_ptr coproc = config.coprocessors[coproc_num]; if (!coproc) { EmitCoprocessorException(); return; @@ -951,7 +954,7 @@ void A32EmitX64::EmitA32CoprocGetOneWord(A32EmitContext& ctx, IR::Inst* inst) { A32::CoprocReg CRm = static_cast(coproc_info[4]); unsigned opc2 = static_cast(coproc_info[5]); - std::shared_ptr coproc = cb.coprocessors[coproc_num]; + std::shared_ptr coproc = config.coprocessors[coproc_num]; if (!coproc) { EmitCoprocessorException(); return; @@ -991,7 +994,7 @@ void A32EmitX64::EmitA32CoprocGetTwoWords(A32EmitContext& ctx, IR::Inst* inst) { unsigned opc = coproc_info[2]; A32::CoprocReg CRm = static_cast(coproc_info[3]); - std::shared_ptr coproc = cb.coprocessors[coproc_num]; + std::shared_ptr coproc = config.coprocessors[coproc_num]; if (!coproc) { EmitCoprocessorException(); return; @@ -1039,7 +1042,7 @@ void A32EmitX64::EmitA32CoprocLoadWords(A32EmitContext& ctx, IR::Inst* inst) { bool has_option = coproc_info[4] != 0; boost::optional option{has_option, coproc_info[5]}; - std::shared_ptr coproc = cb.coprocessors[coproc_num]; + std::shared_ptr coproc = config.coprocessors[coproc_num]; if (!coproc) { EmitCoprocessorException(); return; @@ -1065,7 +1068,7 @@ void A32EmitX64::EmitA32CoprocStoreWords(A32EmitContext& ctx, IR::Inst* inst) { bool has_option = coproc_info[4] != 0; boost::optional option{has_option, coproc_info[5]}; - std::shared_ptr coproc = cb.coprocessors[coproc_num]; + std::shared_ptr coproc = config.coprocessors[coproc_num]; if (!coproc) { EmitCoprocessorException(); return; @@ -1085,12 +1088,11 @@ void A32EmitX64::EmitTerminalImpl(IR::Term::Interpret terminal, IR::LocationDesc ASSERT_MSG(A32::LocationDescriptor{terminal.next}.EFlag() == A32::LocationDescriptor{initial_location}.EFlag(), "Unimplemented"); ASSERT_MSG(terminal.num_instructions == 1, "Unimplemented"); - code->mov(code->ABI_PARAM1.cvt32(), A32::LocationDescriptor{terminal.next}.PC()); - code->mov(code->ABI_PARAM2, reinterpret_cast(jit_interface)); - code->mov(code->ABI_PARAM3, reinterpret_cast(cb.user_arg)); - code->mov(MJitStateReg(A32::Reg::PC), code->ABI_PARAM1.cvt32()); + code->mov(code->ABI_PARAM2.cvt32(), A32::LocationDescriptor{terminal.next}.PC()); + code->mov(code->ABI_PARAM3.cvt32(), 1); + code->mov(MJitStateReg(A32::Reg::PC), code->ABI_PARAM2.cvt32()); code->SwitchMxcsrOnExit(); - code->CallFunction(cb.InterpreterFallback); + DEVIRT(config.callbacks, &A32::UserCallbacks::InterpreterFallback).EmitCall(code); code->ReturnFromRunCode(true); // TODO: Check cycles } diff --git a/src/backend_x64/a32_emit_x64.h b/src/backend_x64/a32_emit_x64.h index 5549f9aa..14c28716 100644 --- a/src/backend_x64/a32_emit_x64.h +++ b/src/backend_x64/a32_emit_x64.h @@ -12,7 +12,7 @@ #include "backend_x64/block_range_information.h" #include "backend_x64/emit_x64.h" #include "dynarmic/A32/a32.h" -#include "dynarmic/A32/callbacks.h" +#include "dynarmic/A32/config.h" #include "frontend/A32/location_descriptor.h" #include "frontend/ir/terminal.h" @@ -30,7 +30,7 @@ struct A32EmitContext final : public EmitContext { class A32EmitX64 final : public EmitX64 { public: - A32EmitX64(BlockOfCode* code, A32::UserCallbacks cb, A32::Jit* jit_interface); + A32EmitX64(BlockOfCode* code, A32::UserConfig config, A32::Jit* jit_interface); ~A32EmitX64() override; /** @@ -44,7 +44,7 @@ public: void InvalidateCacheRanges(const boost::icl::interval_set& ranges); protected: - const A32::UserCallbacks cb; + const A32::UserConfig config; A32::Jit* jit_interface; BlockRangeInformation block_ranges; diff --git a/src/backend_x64/a32_interface.cpp b/src/backend_x64/a32_interface.cpp index 8ea058ce..1583c042 100644 --- a/src/backend_x64/a32_interface.cpp +++ b/src/backend_x64/a32_interface.cpp @@ -18,6 +18,7 @@ #include "backend_x64/a32_jitstate.h" #include "backend_x64/block_of_code.h" #include "backend_x64/callback.h" +#include "backend_x64/devirtualize.h" #include "backend_x64/jitstate_info.h" #include "common/assert.h" #include "common/common_types.h" @@ -33,26 +34,26 @@ namespace Dynarmic::A32 { using namespace BackendX64; -static RunCodeCallbacks GenRunCodeCallbacks(A32::UserCallbacks cb, CodePtr (*LookupBlock)(void* lookup_block_arg), void* arg) { +static RunCodeCallbacks GenRunCodeCallbacks(A32::UserCallbacks* cb, CodePtr (*LookupBlock)(void* lookup_block_arg), void* arg) { return RunCodeCallbacks{ std::make_unique(LookupBlock, reinterpret_cast(arg)), - std::make_unique(cb.AddTicks), - std::make_unique(cb.GetTicksRemaining), + std::make_unique(DEVIRT(cb, &A32::UserCallbacks::AddTicks)), + std::make_unique(DEVIRT(cb, &A32::UserCallbacks::GetTicksRemaining)), }; } struct Jit::Impl { - Impl(Jit* jit, A32::UserCallbacks callbacks) - : block_of_code(GenRunCodeCallbacks(callbacks, &GetCurrentBlock, this), JitStateInfo{jit_state}) - , emitter(&block_of_code, callbacks, jit) - , callbacks(callbacks) + Impl(Jit* jit, A32::UserConfig config) + : block_of_code(GenRunCodeCallbacks(config.callbacks, &GetCurrentBlock, this), JitStateInfo{jit_state}) + , emitter(&block_of_code, config, jit) + , config(config) , jit_interface(jit) {} A32JitState jit_state; BlockOfCode block_of_code; A32EmitX64 emitter; - const A32::UserCallbacks callbacks; + const A32::UserConfig config; // Requests made during execution to invalidate the cache are queued up here. size_t invalid_cache_generation = 0; @@ -164,10 +165,10 @@ private: PerformCacheInvalidation(); } - IR::Block ir_block = A32::Translate(A32::LocationDescriptor{descriptor}, callbacks.memory.ReadCode); + IR::Block ir_block = A32::Translate(A32::LocationDescriptor{descriptor}, [this](u32 vaddr) { return config.callbacks->MemoryReadCode(vaddr); }); Optimization::A32GetSetElimination(ir_block); Optimization::DeadCodeElimination(ir_block); - Optimization::A32ConstantMemoryReads(ir_block, callbacks.memory); + Optimization::A32ConstantMemoryReads(ir_block, config.callbacks); Optimization::ConstantPropagation(ir_block); Optimization::DeadCodeElimination(ir_block); Optimization::VerificationPass(ir_block); @@ -175,7 +176,7 @@ private: } }; -Jit::Jit(UserCallbacks callbacks) : impl(std::make_unique(this, callbacks)) {} +Jit::Jit(UserConfig config) : impl(std::make_unique(this, config)) {} Jit::~Jit() {} diff --git a/src/backend_x64/block_of_code.h b/src/backend_x64/block_of_code.h index 57ad2835..59ec0e6e 100644 --- a/src/backend_x64/block_of_code.h +++ b/src/backend_x64/block_of_code.h @@ -6,6 +6,7 @@ #pragma once +#include #include #include @@ -16,7 +17,6 @@ #include "backend_x64/constant_pool.h" #include "backend_x64/jitstate_info.h" #include "common/common_types.h" -#include "dynarmic/A32/callbacks.h" namespace Dynarmic::BackendX64 { diff --git a/src/backend_x64/devirtualize.h b/src/backend_x64/devirtualize.h index 9c405571..e7714f11 100644 --- a/src/backend_x64/devirtualize.h +++ b/src/backend_x64/devirtualize.h @@ -30,7 +30,7 @@ struct ThunkBuilder { } // namespace impl template -ArgCallback Devirtualize(mp::class_type_t* this_) { +ArgCallback Devirtualize(mp::class_type_t* this_) { return ArgCallback{&impl::ThunkBuilder::Thunk, reinterpret_cast(this_)}; } diff --git a/src/frontend/A32/translate/translate.h b/src/frontend/A32/translate/translate.h index b251570c..9e089fe1 100644 --- a/src/frontend/A32/translate/translate.h +++ b/src/frontend/A32/translate/translate.h @@ -15,7 +15,7 @@ namespace Dynarmic::A32 { class LocationDescriptor; -using MemoryReadCodeFuncType = u32 (*)(u32 vaddr); +using MemoryReadCodeFuncType = std::function; /** * This function translates instructions in memory into our intermediate representation. diff --git a/src/ir_opt/a32_constant_memory_reads_pass.cpp b/src/ir_opt/a32_constant_memory_reads_pass.cpp index 19b0a52a..66aaa320 100644 --- a/src/ir_opt/a32_constant_memory_reads_pass.cpp +++ b/src/ir_opt/a32_constant_memory_reads_pass.cpp @@ -4,7 +4,7 @@ * General Public License version 2 or any later version. */ -#include +#include #include "frontend/ir/basic_block.h" #include "frontend/ir/opcodes.h" @@ -12,7 +12,7 @@ namespace Dynarmic::Optimization { -void A32ConstantMemoryReads(IR::Block& block, const A32::UserCallbacks::Memory& memory_callbacks) { +void A32ConstantMemoryReads(IR::Block& block, A32::UserCallbacks* cb) { for (auto& inst : block) { switch (inst.GetOpcode()) { case IR::Opcode::A32SetCFlag: { @@ -27,8 +27,8 @@ void A32ConstantMemoryReads(IR::Block& block, const A32::UserCallbacks::Memory& break; u32 vaddr = inst.GetArg(0).GetU32(); - if (memory_callbacks.IsReadOnlyMemory(vaddr)) { - u8 value_from_memory = memory_callbacks.Read8(vaddr); + if (cb->IsReadOnlyMemory(vaddr)) { + u8 value_from_memory = cb->MemoryRead8(vaddr); inst.ReplaceUsesWith(IR::Value{value_from_memory}); } break; @@ -38,8 +38,8 @@ void A32ConstantMemoryReads(IR::Block& block, const A32::UserCallbacks::Memory& break; u32 vaddr = inst.GetArg(0).GetU32(); - if (memory_callbacks.IsReadOnlyMemory(vaddr)) { - u16 value_from_memory = memory_callbacks.Read16(vaddr); + if (cb->IsReadOnlyMemory(vaddr)) { + u16 value_from_memory = cb->MemoryRead16(vaddr); inst.ReplaceUsesWith(IR::Value{value_from_memory}); } break; @@ -49,8 +49,8 @@ void A32ConstantMemoryReads(IR::Block& block, const A32::UserCallbacks::Memory& break; u32 vaddr = inst.GetArg(0).GetU32(); - if (memory_callbacks.IsReadOnlyMemory(vaddr)) { - u32 value_from_memory = memory_callbacks.Read32(vaddr); + if (cb->IsReadOnlyMemory(vaddr)) { + u32 value_from_memory = cb->MemoryRead32(vaddr); inst.ReplaceUsesWith(IR::Value{value_from_memory}); } break; @@ -60,8 +60,8 @@ void A32ConstantMemoryReads(IR::Block& block, const A32::UserCallbacks::Memory& break; u32 vaddr = inst.GetArg(0).GetU32(); - if (memory_callbacks.IsReadOnlyMemory(vaddr)) { - u64 value_from_memory = memory_callbacks.Read64(vaddr); + if (cb->IsReadOnlyMemory(vaddr)) { + u64 value_from_memory = cb->MemoryRead64(vaddr); inst.ReplaceUsesWith(IR::Value{value_from_memory}); } break; diff --git a/src/ir_opt/constant_propagation_pass.cpp b/src/ir_opt/constant_propagation_pass.cpp index eb59c692..62d9bf01 100644 --- a/src/ir_opt/constant_propagation_pass.cpp +++ b/src/ir_opt/constant_propagation_pass.cpp @@ -4,7 +4,7 @@ * General Public License version 2 or any later version. */ -#include +#include #include "frontend/ir/basic_block.h" #include "frontend/ir/opcodes.h" diff --git a/src/ir_opt/passes.h b/src/ir_opt/passes.h index cc685ff3..b4548a70 100644 --- a/src/ir_opt/passes.h +++ b/src/ir_opt/passes.h @@ -6,7 +6,7 @@ #pragma once -#include +#include #include namespace Dynarmic::IR { @@ -16,7 +16,7 @@ class Block; namespace Dynarmic::Optimization { void A32GetSetElimination(IR::Block& block); -void A32ConstantMemoryReads(IR::Block& block, const A32::UserCallbacks::Memory& memory_callbacks); +void A32ConstantMemoryReads(IR::Block& block, A32::UserCallbacks* cb); void A64GetSetElimination(IR::Block& block); void A64MergeInterpretBlocksPass(IR::Block& block, A64::UserCallbacks* cb); void ConstantPropagation(IR::Block& block); diff --git a/tests/A32/fuzz_arm.cpp b/tests/A32/fuzz_arm.cpp index 2db6211b..81e08858 100644 --- a/tests/A32/fuzz_arm.cpp +++ b/tests/A32/fuzz_arm.cpp @@ -28,6 +28,7 @@ #include "frontend/ir/location_descriptor.h" #include "ir_opt/passes.h" #include "rand_int.h" +#include "testenv.h" #include "A32/skyeye_interpreter/dyncom/arm_dyncom_interpreter.h" #include "A32/skyeye_interpreter/skyeye_common/armstate.h" @@ -37,126 +38,10 @@ using Dynarmic::Common::Bits; -struct WriteRecord { - size_t size; - u32 address; - u64 data; -}; - -static bool operator==(const WriteRecord& a, const WriteRecord& b) { - return std::tie(a.size, a.address, a.data) == std::tie(b.size, b.address, b.data); -} - -static u64 jit_num_ticks = 0; -static std::array code_mem{}; -static std::vector write_records; - -static u64 GetTicksRemaining(); -static void AddTicks(u64 ticks); -static bool IsReadOnlyMemory(u32 vaddr); -static u8 MemoryRead8(u32 vaddr); -static u16 MemoryRead16(u32 vaddr); -static u32 MemoryRead32(u32 vaddr); -static u64 MemoryRead64(u32 vaddr); -static u32 MemoryReadCode(u32 vaddr); -static void MemoryWrite8(u32 vaddr, u8 value); -static void MemoryWrite16(u32 vaddr, u16 value); -static void MemoryWrite32(u32 vaddr, u32 value); -static void MemoryWrite64(u32 vaddr, u64 value); -static void InterpreterFallback(u32 pc, Dynarmic::A32::Jit* jit, void*); -static Dynarmic::A32::UserCallbacks GetUserCallbacks(); - -static u64 GetTicksRemaining() { - return jit_num_ticks; -} -static void AddTicks(u64 ticks) { - if (ticks > jit_num_ticks) { - jit_num_ticks = 0; - return; - } - jit_num_ticks -= ticks; -} - -static bool IsReadOnlyMemory(u32 vaddr) { - return vaddr < code_mem.size(); -} -static u8 MemoryRead8(u32 vaddr) { - return static_cast(vaddr); -} -static u16 MemoryRead16(u32 vaddr) { - return static_cast(vaddr); -} -static u32 MemoryRead32(u32 vaddr) { - return vaddr; -} -static u64 MemoryRead64(u32 vaddr) { - return MemoryRead32(vaddr) | (u64(MemoryRead32(vaddr+4)) << 32); -} -static u32 MemoryReadCode(u32 vaddr) { - if (vaddr < code_mem.size() * sizeof(u32)) { - size_t index = vaddr / sizeof(u32); - return code_mem[index]; - } - return 0xeafffffe; // b +#0 -} - -static void MemoryWrite8(u32 vaddr, u8 value){ - write_records.push_back({8, vaddr, value}); -} -static void MemoryWrite16(u32 vaddr, u16 value){ - write_records.push_back({16, vaddr, value}); -} -static void MemoryWrite32(u32 vaddr, u32 value){ - write_records.push_back({32, vaddr, value}); -} -static void MemoryWrite64(u32 vaddr, u64 value){ - write_records.push_back({64, vaddr, value}); -} - -static void InterpreterFallback(u32 pc, Dynarmic::A32::Jit* jit, void*) { - ARMul_State interp_state{USER32MODE}; - interp_state.user_callbacks = GetUserCallbacks(); - interp_state.NumInstrsToExecute = 1; - - interp_state.Reg = jit->Regs(); - interp_state.ExtReg = jit->ExtRegs(); - interp_state.Cpsr = jit->Cpsr(); - interp_state.VFP[VFP_FPSCR] = jit->Fpscr(); - interp_state.Reg[15] = pc; - - InterpreterClearCache(); - InterpreterMainLoop(&interp_state); - - bool T = Dynarmic::Common::Bit<5>(interp_state.Cpsr); - interp_state.Reg[15] &= T ? 0xFFFFFFFE : 0xFFFFFFFC; - - jit->Regs() = interp_state.Reg; - jit->ExtRegs() = interp_state.ExtReg; - jit->SetCpsr(interp_state.Cpsr); - jit->SetFpscr(interp_state.VFP[VFP_FPSCR]); -} - -static void Fail() { - FAIL(); -} - -static Dynarmic::A32::UserCallbacks GetUserCallbacks() { - Dynarmic::A32::UserCallbacks user_callbacks{}; - user_callbacks.InterpreterFallback = &InterpreterFallback; - user_callbacks.CallSVC = (void (*)(u32)) &Fail; - user_callbacks.memory.IsReadOnlyMemory = &IsReadOnlyMemory; - user_callbacks.memory.Read8 = &MemoryRead8; - user_callbacks.memory.Read16 = &MemoryRead16; - user_callbacks.memory.Read32 = &MemoryRead32; - user_callbacks.memory.Read64 = &MemoryRead64; - user_callbacks.memory.ReadCode = &MemoryReadCode; - user_callbacks.memory.Write8 = &MemoryWrite8; - user_callbacks.memory.Write16 = &MemoryWrite16; - user_callbacks.memory.Write32 = &MemoryWrite32; - user_callbacks.memory.Write64 = &MemoryWrite64; - user_callbacks.GetTicksRemaining = &GetTicksRemaining; - user_callbacks.AddTicks = &AddTicks; - return user_callbacks; +static Dynarmic::A32::UserConfig GetUserConfig(ArmTestEnv* testenv) { + Dynarmic::A32::UserConfig user_config; + user_config.callbacks = testenv; + return user_config; } namespace { @@ -210,7 +95,9 @@ private: }; } // namespace -static bool DoesBehaviorMatch(const ARMul_State& interp, const Dynarmic::A32::Jit& jit, const std::vector& interp_write_records, const std::vector& jit_write_records) { +using WriteRecords = std::map; + +static bool DoesBehaviorMatch(const ARMul_State& interp, const Dynarmic::A32::Jit& jit, const WriteRecords& interp_write_records, const WriteRecords& jit_write_records) { return interp.Reg == jit.Regs() && interp.ExtReg == jit.ExtRegs() && interp.Cpsr == jit.Cpsr() @@ -219,13 +106,15 @@ static bool DoesBehaviorMatch(const ARMul_State& interp, const Dynarmic::A32::Ji } void FuzzJitArm(const size_t instruction_count, const size_t instructions_to_execute_count, const size_t run_count, const std::function instruction_generator) { + ArmTestEnv test_env; + // Prepare memory - code_mem.fill(0xEAFFFFFE); // b +#0 + test_env.code_mem.fill(0xEAFFFFFE); // b +#0 // Prepare test subjects ARMul_State interp{USER32MODE}; - interp.user_callbacks = GetUserCallbacks(); - Dynarmic::A32::Jit jit{GetUserCallbacks()}; + interp.user_callbacks = &test_env; + Dynarmic::A32::Jit jit{GetUserConfig(&test_env)}; for (size_t run_number = 0; run_number < run_count; run_number++) { interp.instruction_cache.clear(); @@ -256,26 +145,25 @@ void FuzzJitArm(const size_t instruction_count, const size_t instructions_to_exe jit.ExtRegs() = initial_extregs; jit.SetFpscr(initial_fpscr); - std::generate_n(code_mem.begin(), instruction_count, instruction_generator); + std::generate_n(test_env.code_mem.begin(), instruction_count, instruction_generator); // Run interpreter - write_records.clear(); - std::vector interp_write_records; + test_env.modified_memory.clear(); interp.NumInstrsToExecute = static_cast(instructions_to_execute_count); InterpreterMainLoop(&interp); - interp_write_records = write_records; + WriteRecords interp_write_records = test_env.modified_memory; { bool T = Dynarmic::Common::Bit<5>(interp.Cpsr); interp.Reg[15] &= T ? 0xFFFFFFFE : 0xFFFFFFFC; } // Run jit - write_records.clear(); - std::vector jit_write_records; + test_env.modified_memory.clear(); + WriteRecords jit_write_records; try { - jit_num_ticks = instructions_to_execute_count; + test_env.ticks_left = instructions_to_execute_count; jit.Run(); - jit_write_records = write_records; + jit_write_records = test_env.modified_memory; } catch (...) { printf("Caught something!\n"); goto dump_state; @@ -289,7 +177,7 @@ void FuzzJitArm(const size_t instruction_count, const size_t instructions_to_exe printf("\nInstruction Listing: \n"); for (size_t i = 0; i < instruction_count; i++) { - printf("%x: %s\n", code_mem[i], Dynarmic::A32::DisassembleArm(code_mem[i]).c_str()); + printf("%x: %s\n", test_env.code_mem[i], Dynarmic::A32::DisassembleArm(test_env.code_mem[i]).c_str()); } printf("\nInitial Register Listing: \n"); @@ -317,21 +205,21 @@ void FuzzJitArm(const size_t instruction_count, const size_t instructions_to_exe printf("\nInterp Write Records:\n"); for (auto& record : interp_write_records) { - printf("%zu [%x] = %" PRIx64 "\n", record.size, record.address, record.data); + printf("[%08x] = %02x\n", record.first, record.second); } printf("\nJIT Write Records:\n"); for (auto& record : jit_write_records) { - printf("%zu [%x] = %" PRIx64 "\n", record.size, record.address, record.data); + printf("[%08x] = %02x\n", record.first, record.second); } size_t num_insts = 0; while (num_insts < instructions_to_execute_count) { Dynarmic::A32::LocationDescriptor descriptor = {u32(num_insts * 4), Dynarmic::A32::PSR{}, Dynarmic::A32::FPSCR{}}; - Dynarmic::IR::Block ir_block = Dynarmic::A32::Translate(descriptor, &MemoryReadCode); + Dynarmic::IR::Block ir_block = Dynarmic::A32::Translate(descriptor, [&test_env](u32 vaddr) { return test_env.MemoryReadCode(vaddr); }); Dynarmic::Optimization::A32GetSetElimination(ir_block); Dynarmic::Optimization::DeadCodeElimination(ir_block); - Dynarmic::Optimization::A32ConstantMemoryReads(ir_block, GetUserCallbacks().memory); + Dynarmic::Optimization::A32ConstantMemoryReads(ir_block, &test_env); Dynarmic::Optimization::ConstantPropagation(ir_block); Dynarmic::Optimization::DeadCodeElimination(ir_block); Dynarmic::Optimization::VerificationPass(ir_block); @@ -353,7 +241,7 @@ void FuzzJitArm(const size_t instruction_count, const size_t instructions_to_exe } } -TEST_CASE( "arm: Optimization Failure (Randomized test case)", "[arm]" ) { +TEST_CASE( "arm: Optimization Failure (Randomized test case)", "[arm][A32]" ) { // This was a randomized test-case that was failing. // // IR produced for location {12, !T, !E} was: @@ -376,14 +264,15 @@ TEST_CASE( "arm: Optimization Failure (Randomized test case)", "[arm]" ) { // Changing the EmitSet*Flag instruction to declare their arguments as UseScratch // solved this bug. - Dynarmic::A32::Jit jit{GetUserCallbacks()}; - code_mem.fill({}); - code_mem[0] = 0xe35f0cd9; // cmp pc, #55552 - code_mem[1] = 0xe11c0474; // tst r12, r4, ror r4 - code_mem[2] = 0xe1a006a7; // mov r0, r7, lsr #13 - code_mem[3] = 0xe35107fa; // cmp r1, #0x3E80000 - code_mem[4] = 0xe2a54c8a; // adc r4, r5, #35328 - code_mem[5] = 0xeafffffe; // b +#0 + ArmTestEnv test_env; + Dynarmic::A32::Jit jit{GetUserConfig(&test_env)}; + test_env.code_mem.fill({}); + test_env.code_mem[0] = 0xe35f0cd9; // cmp pc, #55552 + test_env.code_mem[1] = 0xe11c0474; // tst r12, r4, ror r4 + test_env.code_mem[2] = 0xe1a006a7; // mov r0, r7, lsr #13 + test_env.code_mem[3] = 0xe35107fa; // cmp r1, #0x3E80000 + test_env.code_mem[4] = 0xe2a54c8a; // adc r4, r5, #35328 + test_env.code_mem[5] = 0xeafffffe; // b +#0 jit.Regs() = { 0x6973b6bb, 0x267ea626, 0x69debf49, 0x8f976895, 0x4ecd2d0d, 0xcf89b8c7, 0xb6713f85, 0x15e2aa5, @@ -391,7 +280,7 @@ TEST_CASE( "arm: Optimization Failure (Randomized test case)", "[arm]" ) { }; jit.SetCpsr(0x000001d0); // User-mode - jit_num_ticks = 6; + test_env.ticks_left = 6; jit.Run(); REQUIRE( jit.Regs()[0] == 0x00000af1 ); @@ -413,16 +302,17 @@ TEST_CASE( "arm: Optimization Failure (Randomized test case)", "[arm]" ) { REQUIRE( jit.Cpsr() == 0x200001d0 ); } -TEST_CASE( "arm: shsax r11, sp, r9 (Edge-case)", "[arm]" ) { +TEST_CASE( "arm: shsax r11, sp, r9 (Edge-case)", "[arm][A32]" ) { // This was a randomized test-case that was failing. // // The issue here was one of the words to be subtracted was 0x8000. // When the 2s complement was calculated by (~a + 1), it was 0x8000. - Dynarmic::A32::Jit jit{GetUserCallbacks()}; - code_mem.fill({}); - code_mem[0] = 0xe63dbf59; // shsax r11, sp, r9 - code_mem[1] = 0xeafffffe; // b +#0 + ArmTestEnv test_env; + Dynarmic::A32::Jit jit{GetUserConfig(&test_env)}; + test_env.code_mem.fill({}); + test_env.code_mem[0] = 0xe63dbf59; // shsax r11, sp, r9 + test_env.code_mem[1] = 0xeafffffe; // b +#0 jit.Regs() = { 0x3a3b8b18, 0x96156555, 0xffef039f, 0xafb946f2, 0x2030a69a, 0xafe09b2a, 0x896823c8, 0xabde0ded, @@ -430,7 +320,7 @@ TEST_CASE( "arm: shsax r11, sp, r9 (Edge-case)", "[arm]" ) { }; jit.SetCpsr(0x000001d0); // User-mode - jit_num_ticks = 2; + test_env.ticks_left = 2; jit.Run(); REQUIRE( jit.Regs()[0] == 0x3a3b8b18 ); @@ -452,22 +342,23 @@ TEST_CASE( "arm: shsax r11, sp, r9 (Edge-case)", "[arm]" ) { REQUIRE( jit.Cpsr() == 0x000001d0 ); } -TEST_CASE( "arm: uasx (Edge-case)", "[arm]" ) { +TEST_CASE( "arm: uasx (Edge-case)", "[arm][A32]" ) { // UASX's Rm<31:16> == 0x0000. // An implementation that depends on addition overflow to detect // if diff >= 0 will fail this testcase. - Dynarmic::A32::Jit jit{GetUserCallbacks()}; - code_mem.fill({}); - code_mem[0] = 0xe6549f35; // uasx r9, r4, r5 - code_mem[1] = 0xeafffffe; // b +#0 + ArmTestEnv test_env; + Dynarmic::A32::Jit jit{GetUserConfig(&test_env)}; + test_env.code_mem.fill({}); + test_env.code_mem[0] = 0xe6549f35; // uasx r9, r4, r5 + test_env.code_mem[1] = 0xeafffffe; // b +#0 jit.Regs()[4] = 0x8ed38f4c; jit.Regs()[5] = 0x0000261d; jit.Regs()[15] = 0x00000000; jit.SetCpsr(0x000001d0); // User-mode - jit_num_ticks = 2; + test_env.ticks_left = 2; jit.Run(); REQUIRE( jit.Regs()[4] == 0x8ed38f4c ); @@ -486,10 +377,11 @@ struct VfpTest { }; static void RunVfpTests(u32 instr, std::vector tests) { - Dynarmic::A32::Jit jit{GetUserCallbacks()}; - code_mem.fill({}); - code_mem[0] = instr; - code_mem[1] = 0xeafffffe; // b +#0 + ArmTestEnv test_env; + Dynarmic::A32::Jit jit{GetUserConfig(&test_env)}; + test_env.code_mem.fill({}); + test_env.code_mem[0] = instr; + test_env.code_mem[1] = 0xeafffffe; // b +#0 printf("vfp test 0x%08x\r", instr); @@ -500,7 +392,7 @@ static void RunVfpTests(u32 instr, std::vector tests) { jit.ExtRegs()[6] = test.b; jit.SetFpscr(test.initial_fpscr); - jit_num_ticks = 2; + test_env.ticks_left = 2; jit.Run(); const auto check = [&test, &jit](bool p) { @@ -524,21 +416,21 @@ static void RunVfpTests(u32 instr, std::vector tests) { } } -TEST_CASE("vfp: vadd", "[vfp]") { +TEST_CASE("vfp: vadd", "[vfp][A32]") { // vadd.f32 s2, s4, s6 RunVfpTests(0xEE321A03, { #include "vfp_vadd_f32.inc" }); } -TEST_CASE("vfp: vsub", "[vfp]") { +TEST_CASE("vfp: vsub", "[vfp][A32]") { // vsub.f32 s2, s4, s6 RunVfpTests(0xEE321A43, { #include "vfp_vsub_f32.inc" }); } -TEST_CASE("VFP: VMOV", "[JitX64][vfp]") { +TEST_CASE("VFP: VMOV", "[JitX64][vfp][A32]") { const auto is_valid = [](u32 instr) -> bool { return Bits<0, 6>(instr) != 0b111111 && Bits<12, 15>(instr) != 0b1111 @@ -562,7 +454,7 @@ TEST_CASE("VFP: VMOV", "[JitX64][vfp]") { }); } -TEST_CASE("VFP: VMOV (reg), VLDR, VSTR", "[JitX64][vfp]") { +TEST_CASE("VFP: VMOV (reg), VLDR, VSTR", "[JitX64][vfp][A32]") { const std::array instructions = {{ InstructionGenerator("1111000100000001000000e000000000"), // SETEND InstructionGenerator("cccc11101D110000dddd101z01M0mmmm"), // VMOV (reg) @@ -575,7 +467,7 @@ TEST_CASE("VFP: VMOV (reg), VLDR, VSTR", "[JitX64][vfp]") { }); } -TEST_CASE("VFP: VCMP", "[JitX64][vfp]") { +TEST_CASE("VFP: VCMP", "[JitX64][vfp][A32]") { const std::array instructions = {{ InstructionGenerator("cccc11101D110100dddd101zE1M0mmmm"), // VCMP InstructionGenerator("cccc11101D110101dddd101zE1000000"), // VCMP (zero) @@ -586,7 +478,7 @@ TEST_CASE("VFP: VCMP", "[JitX64][vfp]") { }); } -TEST_CASE("Fuzz ARM data processing instructions", "[JitX64]") { +TEST_CASE("Fuzz ARM data processing instructions", "[JitX64][A32]") { const std::array imm_instructions = {{ InstructionGenerator("cccc0010101Snnnnddddrrrrvvvvvvvv"), InstructionGenerator("cccc0010100Snnnnddddrrrrvvvvvvvv"), @@ -709,7 +601,7 @@ TEST_CASE("Fuzz ARM data processing instructions", "[JitX64]") { } } -TEST_CASE("Fuzz ARM load/store instructions (byte, half-word, word)", "[JitX64]") { +TEST_CASE("Fuzz ARM load/store instructions (byte, half-word, word)", "[JitX64][A32]") { auto EXD_valid = [](u32 inst) -> bool { return Bits<0, 3>(inst) % 2 == 0 && Bits<0, 3>(inst) != 14 && Bits<12, 15>(inst) != (Bits<0, 3>(inst) + 1); }; @@ -808,7 +700,7 @@ TEST_CASE("Fuzz ARM load/store instructions (byte, half-word, word)", "[JitX64]" } } -TEST_CASE("Fuzz ARM load/store multiple instructions", "[JitX64]") { +TEST_CASE("Fuzz ARM load/store multiple instructions", "[JitX64][A32]") { const std::array instructions = {{ InstructionGenerator("cccc100pu0w1nnnnxxxxxxxxxxxxxxxx"), // LDM InstructionGenerator("cccc100pu0w0nnnnxxxxxxxxxxxxxxxx"), // STM @@ -849,7 +741,7 @@ TEST_CASE("Fuzz ARM load/store multiple instructions", "[JitX64]") { FuzzJitArm(1, 1, 10000, instruction_select); } -TEST_CASE("Fuzz ARM branch instructions", "[JitX64]") { +TEST_CASE("Fuzz ARM branch instructions", "[JitX64][A32]") { const std::array instructions = {{ InstructionGenerator("1111101hvvvvvvvvvvvvvvvvvvvvvvvv"), InstructionGenerator("cccc000100101111111111110011mmmm", @@ -864,7 +756,7 @@ TEST_CASE("Fuzz ARM branch instructions", "[JitX64]") { }); } -TEST_CASE("Fuzz ARM reversal instructions", "[JitX64]") { +TEST_CASE("Fuzz ARM reversal instructions", "[JitX64][A32]") { const auto is_valid = [](u32 instr) -> bool { // R15 is UNPREDICTABLE return Bits<0, 3>(instr) != 0b1111 && Bits<12, 15>(instr) != 0b1111; @@ -883,7 +775,7 @@ TEST_CASE("Fuzz ARM reversal instructions", "[JitX64]") { } } -TEST_CASE("Fuzz ARM extension instructions", "[JitX64]") { +TEST_CASE("Fuzz ARM extension instructions", "[JitX64][A32]") { const auto is_valid = [](u32 instr) -> bool { // R15 as Rd or Rm is UNPREDICTABLE return Bits<0, 3>(instr) != 0b1111 && Bits<12, 15>(instr) != 0b1111; @@ -920,7 +812,7 @@ TEST_CASE("Fuzz ARM extension instructions", "[JitX64]") { } } -TEST_CASE("Fuzz ARM multiply instructions", "[JitX64]") { +TEST_CASE("Fuzz ARM multiply instructions", "[JitX64][A32]") { auto validate_d_m_n = [](u32 inst) -> bool { return Bits<16, 19>(inst) != 15 && Bits<8, 11>(inst) != 15 && @@ -970,7 +862,7 @@ TEST_CASE("Fuzz ARM multiply instructions", "[JitX64]") { } } -TEST_CASE("Fuzz ARM parallel instructions", "[JitX64][parallel]") { +TEST_CASE("Fuzz ARM parallel instructions", "[JitX64][parallel][A32]") { const auto is_valid = [](u32 instr) -> bool { // R15 as Rd, Rn, or Rm is UNPREDICTABLE return Bits<0, 3>(instr) != 0b1111 && Bits<12, 15>(instr) != 0b1111 && Bits<16, 19>(instr) != 0b1111; @@ -1093,7 +985,7 @@ TEST_CASE("Fuzz ARM parallel instructions", "[JitX64][parallel]") { } } -TEST_CASE("Fuzz ARM sum of absolute differences", "[JitX64]") { +TEST_CASE("Fuzz ARM sum of absolute differences", "[JitX64][A32]") { auto validate_d_m_n = [](u32 inst) -> bool { return Bits<16, 19>(inst) != 15 && Bits<8, 11>(inst) != 15 && @@ -1116,10 +1008,11 @@ TEST_CASE("Fuzz ARM sum of absolute differences", "[JitX64]") { } } -TEST_CASE( "SMUAD", "[JitX64]" ) { - Dynarmic::A32::Jit jit{GetUserCallbacks()}; - code_mem.fill({}); - code_mem[0] = 0xE700F211; // smuad r0, r1, r2 +TEST_CASE( "SMUAD", "[JitX64][A32]" ) { + ArmTestEnv test_env; + Dynarmic::A32::Jit jit{GetUserConfig(&test_env)}; + test_env.code_mem.fill({}); + test_env.code_mem[0] = 0xE700F211; // smuad r0, r1, r2 jit.Regs() = { 0, // Rd @@ -1132,7 +1025,7 @@ TEST_CASE( "SMUAD", "[JitX64]" ) { }; jit.SetCpsr(0x000001d0); // User-mode - jit_num_ticks = 6; + test_env.ticks_left = 6; jit.Run(); REQUIRE(jit.Regs()[0] == 0x80000000); @@ -1141,7 +1034,7 @@ TEST_CASE( "SMUAD", "[JitX64]" ) { REQUIRE(jit.Cpsr() == 0x080001d0); } -TEST_CASE("VFP: VPUSH, VPOP", "[JitX64][vfp]") { +TEST_CASE("VFP: VPUSH, VPOP", "[JitX64][vfp][A32]") { const auto is_valid = [](u32 instr) -> bool { auto regs = (instr & 0x100) ? (Bits<0, 7>(instr) >> 1) : Bits<0, 7>(instr); auto base = Bits<12, 15>(instr); @@ -1165,7 +1058,7 @@ TEST_CASE("VFP: VPUSH, VPOP", "[JitX64][vfp]") { }); } -TEST_CASE("Test ARM misc instructions", "[JitX64]") { +TEST_CASE("Test ARM misc instructions", "[JitX64][A32]") { const auto is_clz_valid = [](u32 instr) -> bool { // R15 as Rd, or Rm is UNPREDICTABLE return Bits<0, 3>(instr) != 0b1111 && Bits<12, 15>(instr) != 0b1111; @@ -1180,7 +1073,7 @@ TEST_CASE("Test ARM misc instructions", "[JitX64]") { } } -TEST_CASE("Test ARM MSR instructions", "[JitX64]") { +TEST_CASE("Test ARM MSR instructions", "[JitX64][A32]") { const auto is_msr_valid = [](u32 instr) -> bool { return Bits<18, 19>(instr) != 0; }; @@ -1212,7 +1105,7 @@ TEST_CASE("Test ARM MSR instructions", "[JitX64]") { } } -TEST_CASE("Fuzz ARM saturated add/sub instructions", "[JitX64]") { +TEST_CASE("Fuzz ARM saturated add/sub instructions", "[JitX64][A32]") { auto is_valid = [](u32 inst) -> bool { // R15 as Rd, Rn, or Rm is UNPREDICTABLE return Bits<16, 19>(inst) != 0b1111 && @@ -1234,7 +1127,7 @@ TEST_CASE("Fuzz ARM saturated add/sub instructions", "[JitX64]") { } } -TEST_CASE("Fuzz ARM saturation instructions", "[JitX64]") { +TEST_CASE("Fuzz ARM saturation instructions", "[JitX64][A32]") { auto is_valid = [](u32 inst) -> bool { // R15 as Rd or Rn is UNPREDICTABLE return Bits<12, 15>(inst) != 0b1111 && @@ -1253,7 +1146,7 @@ TEST_CASE("Fuzz ARM saturation instructions", "[JitX64]") { }); } -TEST_CASE("Fuzz ARM packing instructions", "[JitX64]") { +TEST_CASE("Fuzz ARM packing instructions", "[JitX64][A32]") { auto is_pkh_valid = [](u32 inst) -> bool { // R15 as Rd, Rn, or Rm is UNPREDICTABLE return Bits<16, 19>(inst) != 0b1111 && @@ -1273,18 +1166,19 @@ TEST_CASE("Fuzz ARM packing instructions", "[JitX64]") { } } -TEST_CASE("arm: Test InvalidateCacheRange", "[arm]") { - Dynarmic::A32::Jit jit{GetUserCallbacks()}; - code_mem.fill({}); - code_mem[0] = 0xe3a00005; // mov r0, #5 - code_mem[1] = 0xe3a0100D; // mov r1, #13 - code_mem[2] = 0xe0812000; // add r2, r1, r0 - code_mem[3] = 0xeafffffe; // b +#0 (infinite loop) +TEST_CASE("arm: Test InvalidateCacheRange", "[arm][A32]") { + ArmTestEnv test_env; + Dynarmic::A32::Jit jit{GetUserConfig(&test_env)}; + test_env.code_mem.fill({}); + test_env.code_mem[0] = 0xe3a00005; // mov r0, #5 + test_env.code_mem[1] = 0xe3a0100D; // mov r1, #13 + test_env.code_mem[2] = 0xe0812000; // add r2, r1, r0 + test_env.code_mem[3] = 0xeafffffe; // b +#0 (infinite loop) jit.Regs() = {}; jit.SetCpsr(0x000001d0); // User-mode - jit_num_ticks = 4; + test_env.ticks_left = 4; jit.Run(); REQUIRE(jit.Regs()[0] == 5); @@ -1294,13 +1188,13 @@ TEST_CASE("arm: Test InvalidateCacheRange", "[arm]") { REQUIRE(jit.Cpsr() == 0x000001d0); // Change the code - code_mem[1] = 0xe3a01007; // mov r1, #7 + test_env.code_mem[1] = 0xe3a01007; // mov r1, #7 jit.InvalidateCacheRange(/*start_memory_location = */ 4, /* length_in_bytes = */ 4); // Reset position of PC jit.Regs()[15] = 0; - jit_num_ticks = 4; + test_env.ticks_left = 4; jit.Run(); REQUIRE(jit.Regs()[0] == 5); diff --git a/tests/A32/fuzz_thumb.cpp b/tests/A32/fuzz_thumb.cpp index 911690e2..5bac231a 100644 --- a/tests/A32/fuzz_thumb.cpp +++ b/tests/A32/fuzz_thumb.cpp @@ -26,133 +26,17 @@ #include "frontend/ir/basic_block.h" #include "ir_opt/passes.h" #include "rand_int.h" +#include "testenv.h" #include "A32/skyeye_interpreter/dyncom/arm_dyncom_interpreter.h" #include "A32/skyeye_interpreter/skyeye_common/armstate.h" -struct WriteRecord { - size_t size; - u32 address; - u64 data; -}; - -static bool operator==(const WriteRecord& a, const WriteRecord& b) { - return std::tie(a.size, a.address, a.data) == std::tie(b.size, b.address, b.data); +static Dynarmic::A32::UserConfig GetUserConfig(ThumbTestEnv* testenv) { + Dynarmic::A32::UserConfig user_config; + user_config.callbacks = testenv; + return user_config; } -static u64 jit_num_ticks = 0; -static std::array code_mem{}; -static std::vector write_records; - -static u64 GetTicksRemaining(); -static void AddTicks(u64 ticks); -static bool IsReadOnlyMemory(u32 vaddr); -static u8 MemoryRead8(u32 vaddr); -static u16 MemoryRead16(u32 vaddr); -static u32 MemoryRead32(u32 vaddr); -static u64 MemoryRead64(u32 vaddr); -static void MemoryWrite8(u32 vaddr, u8 value); -static void MemoryWrite16(u32 vaddr, u16 value); -static void MemoryWrite32(u32 vaddr, u32 value); -static void MemoryWrite64(u32 vaddr, u64 value); -static void InterpreterFallback(u32 pc, Dynarmic::A32::Jit* jit, void*); -static Dynarmic::A32::UserCallbacks GetUserCallbacks(); - -static u64 GetTicksRemaining() { - return jit_num_ticks; -} -static void AddTicks(u64 ticks) { - if (ticks > jit_num_ticks) { - jit_num_ticks = 0; - return; - } - jit_num_ticks -= ticks; -} - -static bool IsReadOnlyMemory(u32 vaddr) { - return vaddr < code_mem.size(); -} -static u8 MemoryRead8(u32 vaddr) { - return static_cast(vaddr); -} -static u16 MemoryRead16(u32 vaddr) { - return static_cast(vaddr); -} -static u32 MemoryRead32(u32 vaddr) { - if (vaddr < code_mem.size() * sizeof(u16)) { - size_t index = vaddr / sizeof(u16); - if (index + 1 >= code_mem.size()) - return code_mem[index]; - return code_mem[index] | (code_mem[index+1] << 16); - } - return vaddr; -} -static u64 MemoryRead64(u32 vaddr) { - return vaddr; -} -static u32 MemoryReadCode(u32 vaddr) { - if (vaddr < code_mem.size() * sizeof(u16)) { - size_t index = vaddr / sizeof(u16); - if (index + 1 >= code_mem.size()) - return code_mem[index]; - return code_mem[index] | (code_mem[index + 1] << 16); - } - return 0xE7FEE7FE; // b +#0, b +#0 -} - -static void MemoryWrite8(u32 vaddr, u8 value){ - write_records.push_back({8, vaddr, value}); -} -static void MemoryWrite16(u32 vaddr, u16 value){ - write_records.push_back({16, vaddr, value}); -} -static void MemoryWrite32(u32 vaddr, u32 value){ - write_records.push_back({32, vaddr, value}); -} -static void MemoryWrite64(u32 vaddr, u64 value){ - write_records.push_back({64, vaddr, value}); -} - -static void InterpreterFallback(u32 pc, Dynarmic::A32::Jit* jit, void*) { - ARMul_State interp_state{USER32MODE}; - interp_state.user_callbacks = GetUserCallbacks(); - interp_state.NumInstrsToExecute = 1; - - interp_state.Reg = jit->Regs(); - interp_state.Cpsr = jit->Cpsr(); - interp_state.Reg[15] = pc; - - InterpreterClearCache(); - InterpreterMainLoop(&interp_state); - - bool T = Dynarmic::Common::Bit<5>(interp_state.Cpsr); - interp_state.Reg[15] &= T ? 0xFFFFFFFE : 0xFFFFFFFC; - - jit->Regs() = interp_state.Reg; - jit->SetCpsr(interp_state.Cpsr); -} - -static void Fail() { - FAIL(); -} - -static Dynarmic::A32::UserCallbacks GetUserCallbacks() { - Dynarmic::A32::UserCallbacks user_callbacks{}; - user_callbacks.InterpreterFallback = &InterpreterFallback; - user_callbacks.CallSVC = (void (*)(u32)) &Fail; - user_callbacks.memory.IsReadOnlyMemory = &IsReadOnlyMemory; - user_callbacks.memory.Read8 = &MemoryRead8; - user_callbacks.memory.Read16 = &MemoryRead16; - user_callbacks.memory.Read32 = &MemoryRead32; - user_callbacks.memory.Read64 = &MemoryRead64; - user_callbacks.memory.ReadCode = &MemoryReadCode; - user_callbacks.memory.Write8 = &MemoryWrite8; - user_callbacks.memory.Write16 = &MemoryWrite16; - user_callbacks.memory.Write32 = &MemoryWrite32; - user_callbacks.memory.Write64 = &MemoryWrite64; - user_callbacks.GetTicksRemaining = &GetTicksRemaining; - user_callbacks.AddTicks = &AddTicks; - return user_callbacks; -} +using WriteRecords = std::map; struct ThumbInstGen final { public: @@ -193,7 +77,7 @@ private: std::function is_valid; }; -static bool DoesBehaviorMatch(const ARMul_State& interp, const Dynarmic::A32::Jit& jit, const std::vector& interp_write_records, const std::vector& jit_write_records) { +static bool DoesBehaviorMatch(const ARMul_State& interp, const Dynarmic::A32::Jit& jit, WriteRecords& interp_write_records, WriteRecords& jit_write_records) { const auto interp_regs = interp.Reg; const auto jit_regs = jit.Regs(); @@ -202,7 +86,7 @@ static bool DoesBehaviorMatch(const ARMul_State& interp, const Dynarmic::A32::Ji && interp_write_records == jit_write_records; } -static void RunInstance(size_t run_number, ARMul_State& interp, Dynarmic::A32::Jit& jit, const std::array& initial_regs, size_t instruction_count, size_t instructions_to_execute_count) { +static void RunInstance(size_t run_number, ThumbTestEnv& test_env, ARMul_State& interp, Dynarmic::A32::Jit& jit, const std::array& initial_regs, size_t instruction_count, size_t instructions_to_execute_count) { interp.instruction_cache.clear(); InterpreterClearCache(); jit.ClearCache(); @@ -215,20 +99,20 @@ static void RunInstance(size_t run_number, ARMul_State& interp, Dynarmic::A32::J jit.Regs() = initial_regs; // Run interpreter - write_records.clear(); + test_env.modified_memory.clear(); interp.NumInstrsToExecute = static_cast(instructions_to_execute_count); InterpreterMainLoop(&interp); - auto interp_write_records = write_records; + auto interp_write_records = test_env.modified_memory; { bool T = Dynarmic::Common::Bit<5>(interp.Cpsr); interp.Reg[15] &= T ? 0xFFFFFFFE : 0xFFFFFFFC; } // Run jit - write_records.clear(); - jit_num_ticks = instructions_to_execute_count; + test_env.modified_memory.clear(); + test_env.ticks_left = instructions_to_execute_count; jit.Run(); - auto jit_write_records = write_records; + auto jit_write_records = test_env.modified_memory; // Compare if (!DoesBehaviorMatch(interp, jit, interp_write_records, jit_write_records)) { @@ -236,7 +120,7 @@ static void RunInstance(size_t run_number, ARMul_State& interp, Dynarmic::A32::J printf("\nInstruction Listing: \n"); for (size_t i = 0; i < instruction_count; i++) { - printf("%04x %s\n", code_mem[i], Dynarmic::A32::DisassembleThumb16(code_mem[i]).c_str()); + printf("%04x %s\n", test_env.code_mem[i], Dynarmic::A32::DisassembleThumb16(test_env.code_mem[i]).c_str()); } printf("\nInitial Register Listing: \n"); @@ -253,12 +137,12 @@ static void RunInstance(size_t run_number, ARMul_State& interp, Dynarmic::A32::J printf("\nInterp Write Records:\n"); for (auto& record : interp_write_records) { - printf("%zu [%x] = %" PRIu64 "\n", record.size, record.address, record.data); + printf("[%08x] = %02x\n", record.first, record.second); } printf("\nJIT Write Records:\n"); for (auto& record : jit_write_records) { - printf("%zu [%x] = %" PRIu64 "\n", record.size, record.address, record.data); + printf("[%08x] = %02x\n", record.first, record.second); } Dynarmic::A32::PSR cpsr; @@ -267,10 +151,10 @@ static void RunInstance(size_t run_number, ARMul_State& interp, Dynarmic::A32::J size_t num_insts = 0; while (num_insts < instructions_to_execute_count) { Dynarmic::A32::LocationDescriptor descriptor = {u32(num_insts * 4), cpsr, Dynarmic::A32::FPSCR{}}; - Dynarmic::IR::Block ir_block = Dynarmic::A32::Translate(descriptor, &MemoryReadCode); + Dynarmic::IR::Block ir_block = Dynarmic::A32::Translate(descriptor, [&test_env](u32 vaddr) { return test_env.MemoryReadCode(vaddr); }); Dynarmic::Optimization::A32GetSetElimination(ir_block); Dynarmic::Optimization::DeadCodeElimination(ir_block); - Dynarmic::Optimization::A32ConstantMemoryReads(ir_block, GetUserCallbacks().memory); + Dynarmic::Optimization::A32ConstantMemoryReads(ir_block, &test_env); Dynarmic::Optimization::ConstantPropagation(ir_block); Dynarmic::Optimization::DeadCodeElimination(ir_block); Dynarmic::Optimization::VerificationPass(ir_block); @@ -287,22 +171,24 @@ static void RunInstance(size_t run_number, ARMul_State& interp, Dynarmic::A32::J } void FuzzJitThumb(const size_t instruction_count, const size_t instructions_to_execute_count, const size_t run_count, const std::function instruction_generator) { + ThumbTestEnv test_env; + // Prepare memory - code_mem.fill(0xE7FE); // b +#0 + test_env.code_mem.fill(0xE7FE); // b +#0 // Prepare test subjects ARMul_State interp{USER32MODE}; - interp.user_callbacks = GetUserCallbacks(); - Dynarmic::A32::Jit jit{GetUserCallbacks()}; + interp.user_callbacks = &test_env; + Dynarmic::A32::Jit jit{GetUserConfig(&test_env)}; for (size_t run_number = 0; run_number < run_count; run_number++) { std::array initial_regs; std::generate_n(initial_regs.begin(), 15, []{ return RandInt(0, 0xFFFFFFFF); }); initial_regs[15] = 0; - std::generate_n(code_mem.begin(), instruction_count, instruction_generator); + std::generate_n(test_env.code_mem.begin(), instruction_count, instruction_generator); - RunInstance(run_number, interp, jit, initial_regs, instruction_count, instructions_to_execute_count); + RunInstance(run_number, test_env, interp, jit, initial_regs, instruction_count, instructions_to_execute_count); } } @@ -388,13 +274,15 @@ TEST_CASE("Fuzz Thumb instructions set 2 (affects PC)", "[JitX64][Thumb]") { } TEST_CASE("Verify fix for off by one error in MemoryRead32 worked", "[Thumb]") { + ThumbTestEnv test_env; + // Prepare memory - code_mem.fill(0xE7FE); // b +#0 + test_env.code_mem.fill(0xE7FE); // b +#0 // Prepare test subjects ARMul_State interp{USER32MODE}; - interp.user_callbacks = GetUserCallbacks(); - Dynarmic::A32::Jit jit{GetUserCallbacks()}; + interp.user_callbacks = &test_env; + Dynarmic::A32::Jit jit{GetUserConfig(&test_env)}; std::array initial_regs { 0xe90ecd70, @@ -415,11 +303,11 @@ TEST_CASE("Verify fix for off by one error in MemoryRead32 worked", "[Thumb]") { 0x00000000, }; - code_mem[0] = 0x40B8; // lsls r0, r7, #0 - code_mem[1] = 0x01CA; // lsls r2, r1, #7 - code_mem[2] = 0x83A1; // strh r1, [r4, #28] - code_mem[3] = 0x708A; // strb r2, [r1, #2] - code_mem[4] = 0xBCC4; // pop {r2, r6, r7} + test_env.code_mem[0] = 0x40B8; // lsls r0, r7, #0 + test_env.code_mem[1] = 0x01CA; // lsls r2, r1, #7 + test_env.code_mem[2] = 0x83A1; // strh r1, [r4, #28] + test_env.code_mem[3] = 0x708A; // strb r2, [r1, #2] + test_env.code_mem[4] = 0xBCC4; // pop {r2, r6, r7} - RunInstance(1, interp, jit, initial_regs, 5, 5); + RunInstance(1, test_env, interp, jit, initial_regs, 5, 5); } diff --git a/tests/A32/skyeye_interpreter/dyncom/arm_dyncom_interpreter.cpp b/tests/A32/skyeye_interpreter/dyncom/arm_dyncom_interpreter.cpp index 69757ed4..4001595c 100644 --- a/tests/A32/skyeye_interpreter/dyncom/arm_dyncom_interpreter.cpp +++ b/tests/A32/skyeye_interpreter/dyncom/arm_dyncom_interpreter.cpp @@ -803,7 +803,7 @@ enum { static unsigned int InterpreterTranslateInstruction(const ARMul_State* cpu, const u32 phys_addr, ARM_INST_PTR& inst_base) { unsigned int inst_size = 4; - unsigned int inst = (*cpu->user_callbacks.memory.ReadCode)(phys_addr & 0xFFFFFFFC); + unsigned int inst = cpu->user_callbacks->MemoryReadCode(phys_addr & 0xFFFFFFFC); // If we are in Thumb mode, we'll translate one Thumb instruction to the corresponding ARM instruction if (cpu->TFlag) { @@ -3506,7 +3506,7 @@ unsigned InterpreterMainLoop(ARMul_State* cpu) { if (inst_base->cond == ConditionCode::AL || CondPassed(cpu, inst_base->cond)) { swi_inst* const inst_cream = (swi_inst*)inst_base->component; // SVC::CallSVC(inst_cream->num & 0xFFFF); - (*cpu->user_callbacks.CallSVC)(inst_cream->num & 0xFFFF); + cpu->user_callbacks->CallSVC(inst_cream->num & 0xFFFF); } cpu->Reg[15] += cpu->GetInstructionSize(); diff --git a/tests/A32/skyeye_interpreter/skyeye_common/armstate.cpp b/tests/A32/skyeye_interpreter/skyeye_common/armstate.cpp index 0365ff81..a251b392 100644 --- a/tests/A32/skyeye_interpreter/skyeye_common/armstate.cpp +++ b/tests/A32/skyeye_interpreter/skyeye_common/armstate.cpp @@ -204,14 +204,14 @@ u8 ARMul_State::ReadMemory8(u32 address) const { // CheckMemoryBreakpoint(address, GDBStub::BreakpointType::Read); - return (*user_callbacks.memory.Read8)(address); + return user_callbacks->MemoryRead8(address); } u16 ARMul_State::ReadMemory16(u32 address) const { // CheckMemoryBreakpoint(address, GDBStub::BreakpointType::Read); - u16 data = (*user_callbacks.memory.Read16)(address); + u16 data = user_callbacks->MemoryRead16(address); if (InBigEndianMode()) data = Common::swap16(data); @@ -223,7 +223,7 @@ u32 ARMul_State::ReadMemory32(u32 address) const { // CheckMemoryBreakpoint(address, GDBStub::BreakpointType::Read); - u32 data = (*user_callbacks.memory.Read32)(address); + u32 data = user_callbacks->MemoryRead32(address); if (InBigEndianMode()) data = Common::swap32(data); @@ -235,7 +235,7 @@ u64 ARMul_State::ReadMemory64(u32 address) const { // CheckMemoryBreakpoint(address, GDBStub::BreakpointType::Read); - u64 data = (*user_callbacks.memory.Read64)(address); + u64 data = user_callbacks->MemoryRead64(address); if (InBigEndianMode()) data = Common::swap64(data); @@ -247,7 +247,7 @@ void ARMul_State::WriteMemory8(u32 address, u8 data) { // CheckMemoryBreakpoint(address, GDBStub::BreakpointType::Write); - (*user_callbacks.memory.Write8)(address, data); + user_callbacks->MemoryWrite8(address, data); } void ARMul_State::WriteMemory16(u32 address, u16 data) @@ -257,7 +257,7 @@ void ARMul_State::WriteMemory16(u32 address, u16 data) if (InBigEndianMode()) data = Common::swap16(data); - (*user_callbacks.memory.Write16)(address, data); + user_callbacks->MemoryWrite16(address, data); } void ARMul_State::WriteMemory32(u32 address, u32 data) @@ -267,7 +267,7 @@ void ARMul_State::WriteMemory32(u32 address, u32 data) if (InBigEndianMode()) data = Common::swap32(data); - (*user_callbacks.memory.Write32)(address, data); + user_callbacks->MemoryWrite32(address, data); } void ARMul_State::WriteMemory64(u32 address, u64 data) @@ -277,7 +277,7 @@ void ARMul_State::WriteMemory64(u32 address, u64 data) if (InBigEndianMode()) data = Common::swap64(data); - (*user_callbacks.memory.Write64)(address, data); + user_callbacks->MemoryWrite64(address, data); } diff --git a/tests/A32/skyeye_interpreter/skyeye_common/armstate.h b/tests/A32/skyeye_interpreter/skyeye_common/armstate.h index ef5d0c5e..4fdf20ed 100644 --- a/tests/A32/skyeye_interpreter/skyeye_common/armstate.h +++ b/tests/A32/skyeye_interpreter/skyeye_common/armstate.h @@ -20,7 +20,7 @@ #include #include -#include +#include #include "common/common_types.h" #include "A32/skyeye_interpreter/skyeye_common/arm_regformat.h" @@ -252,5 +252,5 @@ public: u32 exclusive_tag; // The address for which the local monitor is in exclusive access mode bool exclusive_state; - Dynarmic::A32::UserCallbacks user_callbacks; + Dynarmic::A32::UserCallbacks* user_callbacks; }; diff --git a/tests/A32/test_thumb_instructions.cpp b/tests/A32/test_thumb_instructions.cpp index df3bbfb7..012e678b 100644 --- a/tests/A32/test_thumb_instructions.cpp +++ b/tests/A32/test_thumb_instructions.cpp @@ -11,77 +11,27 @@ #include "common/common_types.h" #include "A32/skyeye_interpreter/dyncom/arm_dyncom_interpreter.h" #include "A32/skyeye_interpreter/skyeye_common/armstate.h" +#include "testenv.h" -static u64 jit_num_ticks = 0; -static std::array code_mem{}; - -static u64 GetTicksRemaining(); -static void AddTicks(u64 ticks); -static u32 MemoryRead32(u32 vaddr); -static u32 MemoryReadCode(u32 vaddr); -static void InterpreterFallback(u32 pc, Dynarmic::A32::Jit* jit, void*); -static Dynarmic::A32::UserCallbacks GetUserCallbacks(); - -static u64 GetTicksRemaining() { - return jit_num_ticks; -} -static void AddTicks(u64 ticks) { - if (ticks > jit_num_ticks) { - jit_num_ticks = 0; - return; - } - jit_num_ticks -= ticks; -} - -static u32 MemoryRead32(u32 vaddr) { - return vaddr; -} -static u32 MemoryReadCode(u32 vaddr) { - if (vaddr < code_mem.size() * sizeof(u16)) { - size_t index = vaddr / sizeof(u16); - return code_mem[index] | (code_mem[index+1] << 16); - } - return 0xE7FEE7FE; //b +#0, b +#0 -} - -static void InterpreterFallback(u32 pc, Dynarmic::A32::Jit* jit, void*) { - ARMul_State interp_state{USER32MODE}; - interp_state.user_callbacks = GetUserCallbacks(); - interp_state.NumInstrsToExecute = 1; - - interp_state.Reg = jit->Regs(); - interp_state.Cpsr = jit->Cpsr(); - interp_state.Reg[15] = pc; - - InterpreterClearCache(); - InterpreterMainLoop(&interp_state); - - jit->Regs() = interp_state.Reg; - jit->SetCpsr(interp_state.Cpsr); -} - -static Dynarmic::A32::UserCallbacks GetUserCallbacks() { - Dynarmic::A32::UserCallbacks user_callbacks{}; - user_callbacks.memory.Read32 = &MemoryRead32; - user_callbacks.memory.ReadCode = &MemoryReadCode; - user_callbacks.InterpreterFallback = &InterpreterFallback; - user_callbacks.GetTicksRemaining = &GetTicksRemaining; - user_callbacks.AddTicks = &AddTicks; - return user_callbacks; +static Dynarmic::A32::UserConfig GetUserConfig(ThumbTestEnv* testenv) { + Dynarmic::A32::UserConfig user_config; + user_config.callbacks = testenv; + return user_config; } TEST_CASE( "thumb: lsls r0, r1, #2", "[thumb]" ) { - Dynarmic::A32::Jit jit{GetUserCallbacks()}; - code_mem.fill({}); - code_mem[0] = 0x0088; // lsls r0, r1, #2 - code_mem[1] = 0xE7FE; // b +#0 + ThumbTestEnv test_env; + Dynarmic::A32::Jit jit{GetUserConfig(&test_env)}; + test_env.code_mem.fill({}); + test_env.code_mem[0] = 0x0088; // lsls r0, r1, #2 + test_env.code_mem[1] = 0xE7FE; // b +#0 jit.Regs()[0] = 1; jit.Regs()[1] = 2; jit.Regs()[15] = 0; // PC = 0 jit.SetCpsr(0x00000030); // Thumb, User-mode - jit_num_ticks = 1; + test_env.ticks_left = 1; jit.Run(); REQUIRE( jit.Regs()[0] == 8 ); @@ -91,17 +41,18 @@ TEST_CASE( "thumb: lsls r0, r1, #2", "[thumb]" ) { } TEST_CASE( "thumb: lsls r0, r1, #31", "[thumb]" ) { - Dynarmic::A32::Jit jit{GetUserCallbacks()}; - code_mem.fill({}); - code_mem[0] = 0x07C8; // lsls r0, r1, #31 - code_mem[1] = 0xE7FE; // b +#0 + ThumbTestEnv test_env; + Dynarmic::A32::Jit jit{GetUserConfig(&test_env)}; + test_env.code_mem.fill({}); + test_env.code_mem[0] = 0x07C8; // lsls r0, r1, #31 + test_env.code_mem[1] = 0xE7FE; // b +#0 jit.Regs()[0] = 1; jit.Regs()[1] = 0xFFFFFFFF; jit.Regs()[15] = 0; // PC = 0 jit.SetCpsr(0x00000030); // Thumb, User-mode - jit_num_ticks = 1; + test_env.ticks_left = 1; jit.Run(); REQUIRE( jit.Regs()[0] == 0x80000000 ); @@ -111,16 +62,17 @@ TEST_CASE( "thumb: lsls r0, r1, #31", "[thumb]" ) { } TEST_CASE( "thumb: revsh r4, r3", "[thumb]" ) { - Dynarmic::A32::Jit jit{GetUserCallbacks()}; - code_mem.fill({}); - code_mem[0] = 0xBADC; // revsh r4, r3 - code_mem[1] = 0xE7FE; // b +#0 + ThumbTestEnv test_env; + Dynarmic::A32::Jit jit{GetUserConfig(&test_env)}; + test_env.code_mem.fill({}); + test_env.code_mem[0] = 0xBADC; // revsh r4, r3 + test_env.code_mem[1] = 0xE7FE; // b +#0 jit.Regs()[3] = 0x12345678; jit.Regs()[15] = 0; // PC = 0 jit.SetCpsr(0x00000030); // Thumb, User-mode - jit_num_ticks = 1; + test_env.ticks_left = 1; jit.Run(); REQUIRE( jit.Regs()[3] == 0x12345678 ); @@ -130,33 +82,35 @@ TEST_CASE( "thumb: revsh r4, r3", "[thumb]" ) { } TEST_CASE( "thumb: ldr r3, [r3, #28]", "[thumb]" ) { - Dynarmic::A32::Jit jit{GetUserCallbacks()}; - code_mem.fill({}); - code_mem[0] = 0x69DB; // ldr r3, [r3, #28] - code_mem[1] = 0xE7FE; // b +#0 + ThumbTestEnv test_env; + Dynarmic::A32::Jit jit{GetUserConfig(&test_env)}; + test_env.code_mem.fill({}); + test_env.code_mem[0] = 0x69DB; // ldr r3, [r3, #28] + test_env.code_mem[1] = 0xE7FE; // b +#0 jit.Regs()[3] = 0x12345678; jit.Regs()[15] = 0; // PC = 0 jit.SetCpsr(0x00000030); // Thumb, User-mode - jit_num_ticks = 1; + test_env.ticks_left = 1; jit.Run(); - REQUIRE( jit.Regs()[3] == 0x12345694 ); + REQUIRE( jit.Regs()[3] == 0x97969594 ); // Memory location 0x12345694 REQUIRE( jit.Regs()[15] == 2 ); REQUIRE( jit.Cpsr() == 0x00000030 ); // Thumb, User-mode } TEST_CASE( "thumb: blx +#67712", "[thumb]" ) { - Dynarmic::A32::Jit jit{GetUserCallbacks()}; - code_mem.fill({}); - code_mem[0] = 0xF010; code_mem[1] = 0xEC3E; // blx +#67712 - code_mem[2] = 0xE7FE; // b +#0 + ThumbTestEnv test_env; + Dynarmic::A32::Jit jit{GetUserConfig(&test_env)}; + test_env.code_mem.fill({}); + test_env.code_mem[0] = 0xF010; test_env.code_mem[1] = 0xEC3E; // blx +#67712 + test_env.code_mem[2] = 0xE7FE; // b +#0 jit.Regs()[15] = 0; // PC = 0 jit.SetCpsr(0x00000030); // Thumb, User-mode - jit_num_ticks = 1; + test_env.ticks_left = 1; jit.Run(); REQUIRE( jit.Regs()[14] == (0x4 | 1) ); @@ -165,15 +119,16 @@ TEST_CASE( "thumb: blx +#67712", "[thumb]" ) { } TEST_CASE( "thumb: bl +#234584", "[thumb]" ) { - Dynarmic::A32::Jit jit{GetUserCallbacks()}; - code_mem.fill({}); - code_mem[0] = 0xF039; code_mem[1] = 0xFA2A; // bl +#234584 - code_mem[2] = 0xE7FE; // b +#0 + ThumbTestEnv test_env; + Dynarmic::A32::Jit jit{GetUserConfig(&test_env)}; + test_env.code_mem.fill({}); + test_env.code_mem[0] = 0xF039; test_env.code_mem[1] = 0xFA2A; // bl +#234584 + test_env.code_mem[2] = 0xE7FE; // b +#0 jit.Regs()[15] = 0; // PC = 0 jit.SetCpsr(0x00000030); // Thumb, User-mode - jit_num_ticks = 1; + test_env.ticks_left = 1; jit.Run(); REQUIRE( jit.Regs()[14] == (0x4 | 1) ); @@ -182,15 +137,16 @@ TEST_CASE( "thumb: bl +#234584", "[thumb]" ) { } TEST_CASE( "thumb: bl -#42", "[thumb]" ) { - Dynarmic::A32::Jit jit{GetUserCallbacks()}; - code_mem.fill({}); - code_mem[0] = 0xF7FF; code_mem[1] = 0xFFE9; // bl -#42 - code_mem[2] = 0xE7FE; // b +#0 + ThumbTestEnv test_env; + Dynarmic::A32::Jit jit{GetUserConfig(&test_env)}; + test_env.code_mem.fill({}); + test_env.code_mem[0] = 0xF7FF; test_env.code_mem[1] = 0xFFE9; // bl -#42 + test_env.code_mem[2] = 0xE7FE; // b +#0 jit.Regs()[15] = 0; // PC = 0 jit.SetCpsr(0x00000030); // Thumb, User-mode - jit_num_ticks = 1; + test_env.ticks_left = 1; jit.Run(); REQUIRE( jit.Regs()[14] == (0x4 | 1) ); diff --git a/tests/A32/testenv.h b/tests/A32/testenv.h new file mode 100644 index 00000000..365c82bd --- /dev/null +++ b/tests/A32/testenv.h @@ -0,0 +1,93 @@ +/* This file is part of the dynarmic project. + * Copyright (c) 2018 MerryMage + * This software may be used and distributed according to the terms of the GNU + * General Public License version 2 or any later version. + */ + +#pragma once + +#include +#include +#include + +#include + +#include "common/assert.h" +#include "common/common_types.h" + +template +class A32TestEnv final : public Dynarmic::A32::UserCallbacks { +public: + u64 ticks_left = 0; + bool code_mem_modified_by_guest = false; + std::array code_mem{}; + std::map modified_memory; + + std::uint32_t MemoryReadCode(u32 vaddr) override { + const size_t index = vaddr / sizeof(InstructionType); + if (index < code_mem.size()) { + u32 value; + std::memcpy(&value, &code_mem[index], sizeof(u32)); + return value; + } + return infinite_loop; // B . + } + + std::uint8_t MemoryRead8(u32 vaddr) override { + if (vaddr < sizeof(InstructionType) * code_mem.size()) { + return reinterpret_cast(code_mem.data())[vaddr]; + } + if (auto iter = modified_memory.find(vaddr); iter != modified_memory.end()) { + return iter->second; + } + return static_cast(vaddr); + } + std::uint16_t MemoryRead16(u32 vaddr) override { + return u16(MemoryRead8(vaddr)) | u16(MemoryRead8(vaddr + 1)) << 8; + } + std::uint32_t MemoryRead32(u32 vaddr) override { + return u32(MemoryRead16(vaddr)) | u32(MemoryRead16(vaddr + 2)) << 16; + } + std::uint64_t MemoryRead64(u32 vaddr) override { + return u64(MemoryRead32(vaddr)) | u64(MemoryRead32(vaddr + 4)) << 32; + } + + void MemoryWrite8(u32 vaddr, std::uint8_t value) override { + if (vaddr < code_mem.size() * sizeof(u32)) { + code_mem_modified_by_guest = true; + } + modified_memory[vaddr] = value; + } + void MemoryWrite16(u32 vaddr, std::uint16_t value) override { + MemoryWrite8(vaddr, static_cast(value)); + MemoryWrite8(vaddr + 1, static_cast(value >> 8)); + } + void MemoryWrite32(u32 vaddr, std::uint32_t value) override { + MemoryWrite16(vaddr, static_cast(value)); + MemoryWrite16(vaddr + 2, static_cast(value >> 16)); + } + void MemoryWrite64(u32 vaddr, std::uint64_t value) override { + MemoryWrite32(vaddr, static_cast(value)); + MemoryWrite32(vaddr + 4, static_cast(value >> 32)); + } + + void InterpreterFallback(u32 pc, size_t num_instructions) override { ASSERT_MSG(false, "InterpreterFallback(%08x, %zu)", pc, num_instructions); } + + void CallSVC(std::uint32_t swi) override { ASSERT_MSG(false, "CallSVC(%u)", swi); } + + void ExceptionRaised(u32 pc, Dynarmic::A32::Exception /*exception*/) override { ASSERT_MSG(false, "ExceptionRaised(%08x)", pc); } + + void AddTicks(std::uint64_t ticks) override { + if (ticks > ticks_left) { + ticks_left = 0; + return; + } + ticks_left -= ticks; + } + std::uint64_t GetTicksRemaining() override { + return ticks_left; + } +}; + +using ArmTestEnv = A32TestEnv; +using ThumbTestEnv = A32TestEnv; diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 59ea2c94..68c1f4b3 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -24,6 +24,7 @@ add_executable(dynarmic_tests A32/skyeye_interpreter/skyeye_common/vfp/vfpsingle.cpp A32/test_arm_disassembler.cpp A32/test_thumb_instructions.cpp + A32/testenv.h A64/a64.cpp A64/inst_gen.cpp A64/inst_gen.h