Ranged cache invalidation

* Fix clearing code block on a partial invalidation
* Remove unnecessary use of boost::variant
* Code cleanup
This commit is contained in:
MerryMage 2017-09-11 00:09:52 +01:00
parent a362bffdd4
commit b992e5f8ec
3 changed files with 31 additions and 40 deletions

View file

@ -3532,31 +3532,20 @@ void EmitX64::ClearCache() {
void EmitX64::InvalidateCacheRange(const Common::AddressRange& range) { void EmitX64::InvalidateCacheRange(const Common::AddressRange& range) {
// Remove cached block descriptors and patch information overlapping with the given range. // Remove cached block descriptors and patch information overlapping with the given range.
for (auto it = block_descriptors.begin(); it != block_descriptors.end();) {
switch (range.which()) { IR::LocationDescriptor descriptor = it->second.start_location;
case 0: // FullAddressRange
ClearCache();
break;
case 1: // AddressInterval
auto interval = boost::get<Common::AddressInterval>(range);
for (auto it = std::begin(block_descriptors); it != std::end(block_descriptors);) {
const IR::LocationDescriptor& descriptor = it->second.start_location;
u32 start = descriptor.PC(); u32 start = descriptor.PC();
u32 end = it->second.end_location_pc; u32 end = it->second.end_location_pc;
if (interval.Overlaps(start, end)) { if (range.Overlaps(start, end)) {
it = block_descriptors.erase(it); it = block_descriptors.erase(it);
auto patch_it = patch_information.find(descriptor.UniqueHash()); if (patch_information.count(descriptor.UniqueHash())) {
if (patch_it != patch_information.end()) {
Unpatch(descriptor); Unpatch(descriptor);
} }
} else { } else {
++it; ++it;
} }
} }
break;
}
} }
} // namespace BackendX64 } // namespace BackendX64

View file

@ -4,8 +4,8 @@
* General Public License version 2 or any later version. * General Public License version 2 or any later version.
*/ */
#include <deque>
#include <memory> #include <memory>
#include <queue>
#include <fmt/format.h> #include <fmt/format.h>
@ -45,7 +45,8 @@ struct Jit::Impl {
const UserCallbacks callbacks; const UserCallbacks callbacks;
// Requests made during execution to invalidate the cache are queued up here. // Requests made during execution to invalidate the cache are queued up here.
std::queue<Common::AddressRange> invalid_cache_ranges; std::deque<Common::AddressRange> invalid_cache_ranges;
bool invalidate_entire_cache = false;
size_t Execute(size_t cycle_count) { size_t Execute(size_t cycle_count) {
return block_of_code.RunCode(&jit_state, cycle_count); return block_of_code.RunCode(&jit_state, cycle_count);
@ -90,19 +91,28 @@ struct Jit::Impl {
} }
void PerformCacheInvalidation() { void PerformCacheInvalidation() {
if (invalidate_entire_cache) {
jit_state.ResetRSB();
block_of_code.ClearCache();
emitter.ClearCache();
invalid_cache_ranges.clear();
invalidate_entire_cache = false;
return;
}
if (invalid_cache_ranges.empty()) { if (invalid_cache_ranges.empty()) {
return; return;
} }
jit_state.ResetRSB(); jit_state.ResetRSB();
block_of_code.ClearCache();
while (!invalid_cache_ranges.empty()) { while (!invalid_cache_ranges.empty()) {
emitter.InvalidateCacheRange(invalid_cache_ranges.front()); emitter.InvalidateCacheRange(invalid_cache_ranges.front());
invalid_cache_ranges.pop(); invalid_cache_ranges.pop_front();
} }
} }
void HandleNewCacheRange() { void RequestCacheInvalidation() {
if (jit_interface->is_executing) { if (jit_interface->is_executing) {
jit_state.halt_requested = true; jit_state.halt_requested = true;
return; return;
@ -160,13 +170,13 @@ size_t Jit::Run(size_t cycle_count) {
} }
void Jit::ClearCache() { void Jit::ClearCache() {
impl->invalid_cache_ranges.push(Common::FullAddressRange{}); impl->invalidate_entire_cache = true;
impl->HandleNewCacheRange(); impl->RequestCacheInvalidation();
} }
void Jit::InvalidateCacheRange(std::uint32_t start_address, std::size_t length) { void Jit::InvalidateCacheRange(std::uint32_t start_address, std::size_t length) {
impl->invalid_cache_ranges.push(Common::AddressInterval{start_address, length}); impl->invalid_cache_ranges.emplace_back(Common::AddressRange{start_address, length});
impl->HandleNewCacheRange(); impl->RequestCacheInvalidation();
} }
void Jit::Reset() { void Jit::Reset() {

View file

@ -6,20 +6,14 @@
#pragma once #pragma once
#include <cstddef>
#include <boost/variant.hpp>
#include "common/common_types.h" #include "common/common_types.h"
namespace Dynarmic { namespace Dynarmic {
namespace Common { namespace Common {
struct FullAddressRange {}; struct AddressRange {
struct AddressInterval {
u32 start_address; u32 start_address;
std::size_t length; size_t length;
// Does this interval overlap with [from, to)? // Does this interval overlap with [from, to)?
bool Overlaps(u32 from, u32 to) const { bool Overlaps(u32 from, u32 to) const {
@ -27,7 +21,5 @@ struct AddressInterval {
} }
}; };
using AddressRange = boost::variant<FullAddressRange, AddressInterval>;
} // namespace Common } // namespace Common
} // namespace Dynarmic } // namespace Dynarmic