2015-05-04 04:01:16 +01:00
|
|
|
// Copyright 2015 Citra Emulator Project
|
|
|
|
// Licensed under GPLv2 or any later version
|
|
|
|
// Refer to the license.txt file included.
|
|
|
|
|
2017-12-07 18:52:08 +00:00
|
|
|
#include <algorithm>
|
2016-04-05 13:29:55 +01:00
|
|
|
#include <memory>
|
2015-05-04 04:01:16 +01:00
|
|
|
#include "common/assert.h"
|
2015-05-08 20:51:48 +01:00
|
|
|
#include "common/common_funcs.h"
|
|
|
|
#include "common/logging/log.h"
|
2017-05-21 08:11:36 +01:00
|
|
|
#include "core/hle/kernel/errors.h"
|
2015-07-29 16:08:00 +01:00
|
|
|
#include "core/hle/kernel/memory.h"
|
2016-09-21 07:52:38 +01:00
|
|
|
#include "core/hle/kernel/process.h"
|
2015-05-12 21:25:15 +01:00
|
|
|
#include "core/hle/kernel/resource_limit.h"
|
2015-05-04 04:01:16 +01:00
|
|
|
#include "core/hle/kernel/thread.h"
|
2015-07-10 02:52:15 +01:00
|
|
|
#include "core/hle/kernel/vm_manager.h"
|
2015-05-13 02:38:29 +01:00
|
|
|
#include "core/memory.h"
|
2015-05-04 04:01:16 +01:00
|
|
|
|
|
|
|
namespace Kernel {
|
|
|
|
|
2019-03-23 20:04:19 +00:00
|
|
|
std::shared_ptr<CodeSet> KernelSystem::CreateCodeSet(std::string name, u64 program_id) {
|
|
|
|
auto codeset{std::make_shared<CodeSet>(*this)};
|
2015-07-10 02:52:15 +01:00
|
|
|
|
|
|
|
codeset->name = std::move(name);
|
|
|
|
codeset->program_id = program_id;
|
|
|
|
|
|
|
|
return codeset;
|
|
|
|
}
|
|
|
|
|
2018-10-13 22:24:51 +01:00
|
|
|
CodeSet::CodeSet(KernelSystem& kernel) : Object(kernel) {}
|
2016-09-19 02:01:46 +01:00
|
|
|
CodeSet::~CodeSet() {}
|
2015-07-10 02:52:15 +01:00
|
|
|
|
2019-03-23 20:04:19 +00:00
|
|
|
std::shared_ptr<Process> KernelSystem::CreateProcess(std::shared_ptr<CodeSet> code_set) {
|
|
|
|
auto process{std::make_shared<Process>(*this)};
|
2015-05-04 04:01:16 +01:00
|
|
|
|
2015-07-10 02:52:15 +01:00
|
|
|
process->codeset = std::move(code_set);
|
2015-05-08 21:53:19 +01:00
|
|
|
process->flags.raw = 0;
|
2016-02-11 17:41:15 +00:00
|
|
|
process->flags.memory_region.Assign(MemoryRegion::APPLICATION);
|
2017-12-15 15:58:09 +00:00
|
|
|
process->status = ProcessStatus::Created;
|
2018-10-17 18:47:42 +01:00
|
|
|
process->process_id = ++next_process_id;
|
2015-05-08 21:53:19 +01:00
|
|
|
|
2017-12-07 18:52:08 +00:00
|
|
|
process_list.push_back(process);
|
2015-05-04 04:01:16 +01:00
|
|
|
return process;
|
|
|
|
}
|
|
|
|
|
2018-09-06 21:03:28 +01:00
|
|
|
void Process::ParseKernelCaps(const u32* kernel_caps, std::size_t len) {
|
|
|
|
for (std::size_t i = 0; i < len; ++i) {
|
2015-05-08 20:51:48 +01:00
|
|
|
u32 descriptor = kernel_caps[i];
|
|
|
|
u32 type = descriptor >> 20;
|
|
|
|
|
|
|
|
if (descriptor == 0xFFFFFFFF) {
|
|
|
|
// Unused descriptor entry
|
|
|
|
continue;
|
|
|
|
} else if ((type & 0xF00) == 0xE00) { // 0x0FFF
|
|
|
|
// Allowed interrupts list
|
2018-06-29 12:18:07 +01:00
|
|
|
LOG_WARNING(Loader, "ExHeader allowed interrupts list ignored");
|
2015-05-08 20:51:48 +01:00
|
|
|
} else if ((type & 0xF80) == 0xF00) { // 0x07FF
|
|
|
|
// Allowed syscalls mask
|
|
|
|
unsigned int index = ((descriptor >> 24) & 7) * 24;
|
|
|
|
u32 bits = descriptor & 0xFFFFFF;
|
|
|
|
|
|
|
|
while (bits && index < svc_access_mask.size()) {
|
|
|
|
svc_access_mask.set(index, bits & 1);
|
2016-09-18 01:38:01 +01:00
|
|
|
++index;
|
|
|
|
bits >>= 1;
|
2015-05-08 20:51:48 +01:00
|
|
|
}
|
|
|
|
} else if ((type & 0xFF0) == 0xFE0) { // 0x00FF
|
|
|
|
// Handle table size
|
|
|
|
handle_table_size = descriptor & 0x3FF;
|
|
|
|
} else if ((type & 0xFF8) == 0xFF0) { // 0x007F
|
|
|
|
// Misc. flags
|
2015-05-08 21:53:19 +01:00
|
|
|
flags.raw = descriptor & 0xFFFF;
|
2015-05-08 20:51:48 +01:00
|
|
|
} else if ((type & 0xFFE) == 0xFF8) { // 0x001F
|
|
|
|
// Mapped memory range
|
2016-09-18 01:38:01 +01:00
|
|
|
if (i + 1 >= len || ((kernel_caps[i + 1] >> 20) & 0xFFE) != 0xFF8) {
|
2018-06-29 12:18:07 +01:00
|
|
|
LOG_WARNING(Loader, "Incomplete exheader memory range descriptor ignored.");
|
2015-05-08 20:51:48 +01:00
|
|
|
continue;
|
|
|
|
}
|
2016-09-18 01:38:01 +01:00
|
|
|
u32 end_desc = kernel_caps[i + 1];
|
2015-05-08 20:51:48 +01:00
|
|
|
++i; // Skip over the second descriptor on the next iteration
|
|
|
|
|
2015-05-08 22:12:25 +01:00
|
|
|
AddressMapping mapping;
|
2015-05-08 20:51:48 +01:00
|
|
|
mapping.address = descriptor << 12;
|
2017-05-06 07:11:06 +01:00
|
|
|
VAddr end_address = end_desc << 12;
|
|
|
|
|
|
|
|
if (mapping.address < end_address) {
|
|
|
|
mapping.size = end_address - mapping.address;
|
|
|
|
} else {
|
|
|
|
mapping.size = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
mapping.read_only = (descriptor & (1 << 20)) != 0;
|
2015-05-14 17:59:12 +01:00
|
|
|
mapping.unk_flag = (end_desc & (1 << 20)) != 0;
|
2015-05-08 20:51:48 +01:00
|
|
|
|
2015-05-08 22:12:25 +01:00
|
|
|
address_mappings.push_back(mapping);
|
2015-05-08 20:51:48 +01:00
|
|
|
} else if ((type & 0xFFF) == 0xFFE) { // 0x000F
|
|
|
|
// Mapped memory page
|
2015-05-08 22:12:25 +01:00
|
|
|
AddressMapping mapping;
|
2015-05-08 20:51:48 +01:00
|
|
|
mapping.address = descriptor << 12;
|
|
|
|
mapping.size = Memory::PAGE_SIZE;
|
2017-05-06 07:11:06 +01:00
|
|
|
mapping.read_only = false;
|
2015-05-08 20:51:48 +01:00
|
|
|
mapping.unk_flag = false;
|
2017-05-06 07:11:06 +01:00
|
|
|
|
|
|
|
address_mappings.push_back(mapping);
|
2015-05-08 20:51:48 +01:00
|
|
|
} else if ((type & 0xFE0) == 0xFC0) { // 0x01FF
|
|
|
|
// Kernel version
|
2015-07-19 19:18:57 +01:00
|
|
|
kernel_version = descriptor & 0xFFFF;
|
|
|
|
|
|
|
|
int minor = kernel_version & 0xFF;
|
|
|
|
int major = (kernel_version >> 8) & 0xFF;
|
2018-06-29 12:18:07 +01:00
|
|
|
LOG_INFO(Loader, "ExHeader kernel version: {}.{}", major, minor);
|
2015-05-08 20:51:48 +01:00
|
|
|
} else {
|
2018-06-29 12:18:07 +01:00
|
|
|
LOG_ERROR(Loader, "Unhandled kernel caps descriptor: 0x{:08X}", descriptor);
|
2015-05-08 20:51:48 +01:00
|
|
|
}
|
|
|
|
}
|
2015-05-04 04:01:16 +01:00
|
|
|
}
|
|
|
|
|
2015-07-10 02:52:15 +01:00
|
|
|
void Process::Run(s32 main_thread_priority, u32 stack_size) {
|
2018-10-26 02:07:15 +01:00
|
|
|
memory_region = kernel.GetMemoryRegion(flags.memory_region);
|
2015-08-06 01:26:52 +01:00
|
|
|
|
2016-09-18 01:38:01 +01:00
|
|
|
auto MapSegment = [&](CodeSet::Segment& segment, VMAPermission permissions,
|
|
|
|
MemoryState memory_state) {
|
2018-11-06 20:00:47 +00:00
|
|
|
HeapAllocate(segment.addr, segment.size, permissions, memory_state, true);
|
Port various minor changes from yuzu PRs (#4725)
* common/thread: Remove unused functions
Many of these functions are carried over from Dolphin (where they aren't
used anymore). Given these have no use (and we really shouldn't be
screwing around with OS-specific thread scheduler handling from the
emulator, these can be removed.
The function for setting the thread name is left, however, since it can
have debugging utility usages.
* input_common/sdl: Use a type alias to shorten declaration of GetPollers
Just makes the definitions a little bit more tidy.
* input_common/sdl: Correct return values within implementations of GetPollers()
In both cases, we weren't actually returning anything, which is
undefined behavior.
* yuzu/debugger/graphics_surface: Fill in missing surface format listings
Fills in the missing surface types that were marked as unknown. The
order corresponds with the TextureFormat enum within
video_core/texture.h.
We also don't need to all of these strings as translatable (only the
first string, as it's an English word).
* yuzu/debugger/graphics_surface: Clean up connection overload deduction
We can utilize qOverload with the signal connections to make the
function deducing a little less ugly.
* yuzu/debugger/graphics_surface: Tidy up SaveSurface
- Use QStringLiteral where applicable.
- Use const where applicable
- Remove unnecessary precondition check (we already assert the pixbuf
being non null)
* yuzu/debugger/graphics_surface: Display error messages for file I/O errors
* core: Add missing override specifiers where applicable
Applies the override specifier where applicable. In the case of
destructors that are defaulted in their definition, they can
simply be removed.
This also removes the unnecessary inclusions being done in audin_u and
audrec_u, given their close proximity.
* kernel/thread: Make parameter of GetWaitObjectIndex() const qualified
The pointed to member is never actually modified, so it can be made
const.
* kernel/thread: Avoid sign conversion within GetCommandBufferAddress()
Previously this was performing a u64 + int sign conversion. When dealing
with addresses, we should generally be keeping the arithmetic in the
same signedness type.
This also gets rid of the static lifetime of the constant, as there's no
need to make a trivial type like this potentially live for the entire
duration of the program.
* kernel/codeset: Make CodeSet's memory data member a regular std::vector
The use of a shared_ptr is an implementation detail of the VMManager
itself when mapping memory. Because of that, we shouldn't require all
users of the CodeSet to have to allocate the shared_ptr ahead of time.
It's intended that CodeSet simply pass in the required direct data, and
that the memory manager takes care of it from that point on.
This means we just do the shared pointer allocation in a single place,
when loading modules, as opposed to in each loader.
* kernel/wait_object: Make ShouldWait() take thread members by pointer-to-const
Given this is intended as a querying function, it doesn't make sense to
allow the implementer to modify the state of the given thread.
2019-05-01 13:28:49 +01:00
|
|
|
kernel.memory.WriteBlock(*this, segment.addr, codeset->memory.data() + segment.offset,
|
2018-11-21 20:21:30 +00:00
|
|
|
segment.size);
|
2015-07-10 02:52:15 +01:00
|
|
|
};
|
|
|
|
|
2015-07-18 03:19:16 +01:00
|
|
|
// Map CodeSet segments
|
2018-08-03 19:33:59 +01:00
|
|
|
MapSegment(codeset->CodeSegment(), VMAPermission::ReadExecute, MemoryState::Code);
|
|
|
|
MapSegment(codeset->RODataSegment(), VMAPermission::Read, MemoryState::Code);
|
|
|
|
MapSegment(codeset->DataSegment(), VMAPermission::ReadWrite, MemoryState::Private);
|
2015-07-10 02:52:15 +01:00
|
|
|
|
2015-07-18 03:19:16 +01:00
|
|
|
// Allocate and map stack
|
2018-11-06 20:00:47 +00:00
|
|
|
HeapAllocate(Memory::HEAP_VADDR_END - stack_size, stack_size, VMAPermission::ReadWrite,
|
|
|
|
MemoryState::Locked, true);
|
2015-07-18 03:19:16 +01:00
|
|
|
|
2017-05-06 07:11:06 +01:00
|
|
|
// Map special address mappings
|
2018-10-25 15:51:00 +01:00
|
|
|
kernel.MapSharedPages(vm_manager);
|
2017-05-06 07:11:06 +01:00
|
|
|
for (const auto& mapping : address_mappings) {
|
2018-11-21 17:01:19 +00:00
|
|
|
kernel.HandleSpecialMapping(vm_manager, mapping);
|
2017-05-06 07:11:06 +01:00
|
|
|
}
|
|
|
|
|
2017-12-15 15:58:09 +00:00
|
|
|
status = ProcessStatus::Running;
|
|
|
|
|
2015-07-18 03:19:16 +01:00
|
|
|
vm_manager.LogLayout(Log::Level::Debug);
|
2019-03-23 20:04:19 +00:00
|
|
|
Kernel::SetupMainThread(kernel, codeset->entrypoint, main_thread_priority, SharedFrom(this));
|
2015-05-04 04:01:16 +01:00
|
|
|
}
|
|
|
|
|
2016-03-06 06:09:59 +00:00
|
|
|
VAddr Process::GetLinearHeapAreaAddress() const {
|
2017-06-19 02:38:40 +01:00
|
|
|
// Starting from system version 8.0.0 a new linear heap layout is supported to allow usage of
|
|
|
|
// the extra RAM in the n3DS.
|
2016-03-06 06:09:59 +00:00
|
|
|
return kernel_version < 0x22C ? Memory::LINEAR_HEAP_VADDR : Memory::NEW_LINEAR_HEAP_VADDR;
|
|
|
|
}
|
2017-05-06 07:11:06 +01:00
|
|
|
|
2015-08-06 01:26:52 +01:00
|
|
|
VAddr Process::GetLinearHeapBase() const {
|
2016-03-06 06:09:59 +00:00
|
|
|
return GetLinearHeapAreaAddress() + memory_region->base;
|
2015-08-06 01:26:52 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
VAddr Process::GetLinearHeapLimit() const {
|
|
|
|
return GetLinearHeapBase() + memory_region->size;
|
|
|
|
}
|
|
|
|
|
2018-11-06 20:00:47 +00:00
|
|
|
ResultVal<VAddr> Process::HeapAllocate(VAddr target, u32 size, VMAPermission perms,
|
|
|
|
MemoryState memory_state, bool skip_range_check) {
|
|
|
|
LOG_DEBUG(Kernel, "Allocate heap target={:08X}, size={:08X}", target, size);
|
2016-09-18 01:38:01 +01:00
|
|
|
if (target < Memory::HEAP_VADDR || target + size > Memory::HEAP_VADDR_END ||
|
|
|
|
target + size < target) {
|
2018-11-06 20:00:47 +00:00
|
|
|
if (!skip_range_check) {
|
|
|
|
LOG_ERROR(Kernel, "Invalid heap address");
|
|
|
|
return ERR_INVALID_ADDRESS;
|
|
|
|
}
|
2015-07-18 03:19:16 +01:00
|
|
|
}
|
|
|
|
|
2018-11-06 20:00:47 +00:00
|
|
|
auto vma = vm_manager.FindVMA(target);
|
|
|
|
if (vma->second.type != VMAType::Free || vma->second.base + vma->second.size < target + size) {
|
|
|
|
LOG_ERROR(Kernel, "Trying to allocate already allocated memory");
|
|
|
|
return ERR_INVALID_ADDRESS_STATE;
|
2015-07-18 03:19:16 +01:00
|
|
|
}
|
|
|
|
|
2018-11-06 20:00:47 +00:00
|
|
|
auto allocated_fcram = memory_region->HeapAllocate(size);
|
|
|
|
if (allocated_fcram.empty()) {
|
|
|
|
LOG_ERROR(Kernel, "Not enough space");
|
|
|
|
return ERR_OUT_OF_HEAP_MEMORY;
|
2015-07-18 03:19:16 +01:00
|
|
|
}
|
|
|
|
|
2018-11-06 20:00:47 +00:00
|
|
|
// Maps heap block by block
|
|
|
|
VAddr interval_target = target;
|
|
|
|
for (const auto& interval : allocated_fcram) {
|
|
|
|
u32 interval_size = interval.upper() - interval.lower();
|
|
|
|
LOG_DEBUG(Kernel, "Allocated FCRAM region lower={:08X}, upper={:08X}", interval.lower(),
|
|
|
|
interval.upper());
|
2018-12-01 22:46:18 +00:00
|
|
|
std::fill(kernel.memory.GetFCRAMPointer(interval.lower()),
|
|
|
|
kernel.memory.GetFCRAMPointer(interval.upper()), 0);
|
2018-11-21 21:18:23 +00:00
|
|
|
auto vma = vm_manager.MapBackingMemory(interval_target,
|
2018-12-01 22:46:18 +00:00
|
|
|
kernel.memory.GetFCRAMPointer(interval.lower()),
|
2018-11-21 21:18:23 +00:00
|
|
|
interval_size, memory_state);
|
2018-11-06 20:00:47 +00:00
|
|
|
ASSERT(vma.Succeeded());
|
|
|
|
vm_manager.Reprotect(vma.Unwrap(), perms);
|
|
|
|
interval_target += interval_size;
|
|
|
|
}
|
2015-07-18 03:19:16 +01:00
|
|
|
|
2018-11-06 20:00:47 +00:00
|
|
|
memory_used += size;
|
|
|
|
resource_limit->current_commit += size;
|
2015-08-06 01:39:53 +01:00
|
|
|
|
2018-11-06 20:00:47 +00:00
|
|
|
return MakeResult<VAddr>(target);
|
2015-07-18 03:19:16 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
ResultCode Process::HeapFree(VAddr target, u32 size) {
|
2018-11-06 20:00:47 +00:00
|
|
|
LOG_DEBUG(Kernel, "Free heap target={:08X}, size={:08X}", target, size);
|
2016-09-18 01:38:01 +01:00
|
|
|
if (target < Memory::HEAP_VADDR || target + size > Memory::HEAP_VADDR_END ||
|
|
|
|
target + size < target) {
|
2018-11-06 20:00:47 +00:00
|
|
|
LOG_ERROR(Kernel, "Invalid heap address");
|
2015-07-18 03:19:16 +01:00
|
|
|
return ERR_INVALID_ADDRESS;
|
|
|
|
}
|
|
|
|
|
2015-08-26 10:38:26 +01:00
|
|
|
if (size == 0) {
|
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2018-11-06 20:00:47 +00:00
|
|
|
// Free heaps block by block
|
|
|
|
CASCADE_RESULT(auto backing_blocks, vm_manager.GetBackingBlocksForRange(target, size));
|
|
|
|
for (const auto [backing_memory, block_size] : backing_blocks) {
|
2018-11-21 03:52:44 +00:00
|
|
|
memory_region->Free(kernel.memory.GetFCRAMOffset(backing_memory), block_size);
|
2018-11-06 20:00:47 +00:00
|
|
|
}
|
|
|
|
|
2015-07-18 03:19:16 +01:00
|
|
|
ResultCode result = vm_manager.UnmapRange(target, size);
|
2018-11-06 20:00:47 +00:00
|
|
|
ASSERT(result.IsSuccess());
|
2015-07-18 03:19:16 +01:00
|
|
|
|
2018-11-06 20:00:47 +00:00
|
|
|
memory_used -= size;
|
|
|
|
resource_limit->current_commit -= size;
|
2015-08-06 01:39:53 +01:00
|
|
|
|
2015-07-18 03:19:16 +01:00
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
ResultVal<VAddr> Process::LinearAllocate(VAddr target, u32 size, VMAPermission perms) {
|
2018-11-06 20:00:47 +00:00
|
|
|
LOG_DEBUG(Kernel, "Allocate linear heap target={:08X}, size={:08X}", target, size);
|
|
|
|
u32 physical_offset;
|
2015-07-18 03:19:16 +01:00
|
|
|
if (target == 0) {
|
2018-11-06 20:00:47 +00:00
|
|
|
auto offset = memory_region->LinearAllocate(size);
|
|
|
|
if (!offset) {
|
|
|
|
LOG_ERROR(Kernel, "Not enough space");
|
|
|
|
return ERR_OUT_OF_HEAP_MEMORY;
|
|
|
|
}
|
|
|
|
physical_offset = *offset;
|
|
|
|
target = physical_offset + GetLinearHeapAreaAddress();
|
|
|
|
} else {
|
|
|
|
if (target < GetLinearHeapBase() || target + size > GetLinearHeapLimit() ||
|
|
|
|
target + size < target) {
|
|
|
|
LOG_ERROR(Kernel, "Invalid linear heap address");
|
|
|
|
return ERR_INVALID_ADDRESS;
|
|
|
|
}
|
2015-07-18 03:19:16 +01:00
|
|
|
|
2018-11-06 20:00:47 +00:00
|
|
|
// Kernel would crash/return error when target doesn't meet some requirement.
|
|
|
|
// It seems that target is required to follow immediately after the allocated linear heap,
|
|
|
|
// or cover the entire hole if there is any.
|
|
|
|
// Right now we just ignore these checks because they are still unclear. Further more,
|
|
|
|
// games and homebrew only ever seem to pass target = 0 here (which lets the kernel decide
|
|
|
|
// the address), so this not important.
|
|
|
|
|
|
|
|
physical_offset = target - GetLinearHeapAreaAddress(); // relative to FCRAM
|
|
|
|
if (!memory_region->LinearAllocate(physical_offset, size)) {
|
|
|
|
LOG_ERROR(Kernel, "Trying to allocate already allocated memory");
|
|
|
|
return ERR_INVALID_ADDRESS_STATE;
|
|
|
|
}
|
2015-07-18 03:19:16 +01:00
|
|
|
}
|
|
|
|
|
2018-12-01 22:46:18 +00:00
|
|
|
u8* backing_memory = kernel.memory.GetFCRAMPointer(physical_offset);
|
2015-07-18 03:19:16 +01:00
|
|
|
|
2018-11-06 20:00:47 +00:00
|
|
|
std::fill(backing_memory, backing_memory + size, 0);
|
|
|
|
auto vma = vm_manager.MapBackingMemory(target, backing_memory, size, MemoryState::Continuous);
|
|
|
|
ASSERT(vma.Succeeded());
|
|
|
|
vm_manager.Reprotect(vma.Unwrap(), perms);
|
2015-07-18 03:19:16 +01:00
|
|
|
|
2018-11-06 20:00:47 +00:00
|
|
|
memory_used += size;
|
|
|
|
resource_limit->current_commit += size;
|
2015-08-06 01:39:53 +01:00
|
|
|
|
2018-11-06 20:00:47 +00:00
|
|
|
LOG_DEBUG(Kernel, "Allocated at target={:08X}", target);
|
2015-07-18 03:19:16 +01:00
|
|
|
return MakeResult<VAddr>(target);
|
|
|
|
}
|
|
|
|
|
|
|
|
ResultCode Process::LinearFree(VAddr target, u32 size) {
|
2018-11-06 20:00:47 +00:00
|
|
|
LOG_DEBUG(Kernel, "Free linear heap target={:08X}, size={:08X}", target, size);
|
2015-08-06 01:26:52 +01:00
|
|
|
if (target < GetLinearHeapBase() || target + size > GetLinearHeapLimit() ||
|
|
|
|
target + size < target) {
|
2018-11-06 20:00:47 +00:00
|
|
|
LOG_ERROR(Kernel, "Invalid linear heap address");
|
2015-07-18 03:19:16 +01:00
|
|
|
return ERR_INVALID_ADDRESS;
|
|
|
|
}
|
|
|
|
|
2015-08-26 10:38:26 +01:00
|
|
|
if (size == 0) {
|
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2018-11-06 20:00:47 +00:00
|
|
|
ResultCode result = vm_manager.UnmapRange(target, size);
|
|
|
|
if (result.IsError()) {
|
|
|
|
LOG_ERROR(Kernel, "Trying to free already freed memory");
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
memory_used -= size;
|
|
|
|
resource_limit->current_commit -= size;
|
|
|
|
|
|
|
|
u32 physical_offset = target - GetLinearHeapAreaAddress(); // relative to FCRAM
|
|
|
|
memory_region->Free(physical_offset, size);
|
|
|
|
|
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2018-11-07 17:13:00 +00:00
|
|
|
ResultCode Process::Map(VAddr target, VAddr source, u32 size, VMAPermission perms,
|
|
|
|
bool privileged) {
|
2018-11-06 20:00:47 +00:00
|
|
|
LOG_DEBUG(Kernel, "Map memory target={:08X}, source={:08X}, size={:08X}, perms={:08X}", target,
|
|
|
|
source, size, static_cast<u8>(perms));
|
|
|
|
if (source < Memory::HEAP_VADDR || source + size > Memory::HEAP_VADDR_END ||
|
|
|
|
source + size < source) {
|
|
|
|
LOG_ERROR(Kernel, "Invalid source address");
|
|
|
|
return ERR_INVALID_ADDRESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO(wwylele): check target address range. Is it also restricted to heap region?
|
|
|
|
|
|
|
|
auto vma = vm_manager.FindVMA(target);
|
|
|
|
if (vma->second.type != VMAType::Free || vma->second.base + vma->second.size < target + size) {
|
|
|
|
LOG_ERROR(Kernel, "Trying to map to already allocated memory");
|
2015-07-18 03:19:16 +01:00
|
|
|
return ERR_INVALID_ADDRESS_STATE;
|
|
|
|
}
|
|
|
|
|
2018-11-11 17:38:52 +00:00
|
|
|
// Check range overlapping
|
|
|
|
if (source - target < size || target - source < size) {
|
2018-11-07 17:13:00 +00:00
|
|
|
if (privileged) {
|
2018-11-11 17:38:52 +00:00
|
|
|
if (source == target) {
|
|
|
|
// privileged Map allows identical source and target address, which simply changes
|
|
|
|
// the state and the permission of the memory
|
|
|
|
return vm_manager.ChangeMemoryState(source, size, MemoryState::Private,
|
|
|
|
VMAPermission::ReadWrite,
|
|
|
|
MemoryState::AliasCode, perms);
|
|
|
|
} else {
|
|
|
|
return ERR_INVALID_ADDRESS;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
return ERR_INVALID_ADDRESS_STATE;
|
2018-11-07 17:13:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
MemoryState source_state = privileged ? MemoryState::Locked : MemoryState::Aliased;
|
|
|
|
MemoryState target_state = privileged ? MemoryState::AliasCode : MemoryState::Alias;
|
|
|
|
VMAPermission source_perm = privileged ? VMAPermission::None : VMAPermission::ReadWrite;
|
|
|
|
|
2018-11-06 20:00:47 +00:00
|
|
|
// Mark source region as Aliased
|
|
|
|
CASCADE_CODE(vm_manager.ChangeMemoryState(source, size, MemoryState::Private,
|
2018-11-07 17:13:00 +00:00
|
|
|
VMAPermission::ReadWrite, source_state, source_perm));
|
2018-11-06 20:00:47 +00:00
|
|
|
|
|
|
|
CASCADE_RESULT(auto backing_blocks, vm_manager.GetBackingBlocksForRange(source, size));
|
|
|
|
VAddr interval_target = target;
|
|
|
|
for (const auto [backing_memory, block_size] : backing_blocks) {
|
2018-11-07 17:13:00 +00:00
|
|
|
auto target_vma =
|
|
|
|
vm_manager.MapBackingMemory(interval_target, backing_memory, block_size, target_state);
|
|
|
|
ASSERT(target_vma.Succeeded());
|
|
|
|
vm_manager.Reprotect(target_vma.Unwrap(), perms);
|
2018-11-06 20:00:47 +00:00
|
|
|
interval_target += block_size;
|
|
|
|
}
|
2015-07-18 03:19:16 +01:00
|
|
|
|
2018-11-06 20:00:47 +00:00
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
2018-11-07 17:13:00 +00:00
|
|
|
ResultCode Process::Unmap(VAddr target, VAddr source, u32 size, VMAPermission perms,
|
|
|
|
bool privileged) {
|
2018-11-06 20:00:47 +00:00
|
|
|
LOG_DEBUG(Kernel, "Unmap memory target={:08X}, source={:08X}, size={:08X}, perms={:08X}",
|
|
|
|
target, source, size, static_cast<u8>(perms));
|
|
|
|
if (source < Memory::HEAP_VADDR || source + size > Memory::HEAP_VADDR_END ||
|
|
|
|
source + size < source) {
|
|
|
|
LOG_ERROR(Kernel, "Invalid source address");
|
|
|
|
return ERR_INVALID_ADDRESS;
|
2015-07-18 03:19:16 +01:00
|
|
|
}
|
|
|
|
|
2018-11-06 20:00:47 +00:00
|
|
|
// TODO(wwylele): check target address range. Is it also restricted to heap region?
|
|
|
|
|
|
|
|
// TODO(wwylele): check that the source and the target are actually a pair created by Map
|
|
|
|
// Should return error 0xD8E007F5 in this case
|
|
|
|
|
2018-11-11 17:38:52 +00:00
|
|
|
if (source - target < size || target - source < size) {
|
2018-11-07 17:13:00 +00:00
|
|
|
if (privileged) {
|
2018-11-11 17:38:52 +00:00
|
|
|
if (source == target) {
|
|
|
|
// privileged Unmap allows identical source and target address, which simply changes
|
|
|
|
// the state and the permission of the memory
|
|
|
|
return vm_manager.ChangeMemoryState(source, size, MemoryState::AliasCode,
|
|
|
|
VMAPermission::None, MemoryState::Private,
|
|
|
|
perms);
|
|
|
|
} else {
|
|
|
|
return ERR_INVALID_ADDRESS;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
return ERR_INVALID_ADDRESS_STATE;
|
2018-11-07 17:13:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
MemoryState source_state = privileged ? MemoryState::Locked : MemoryState::Aliased;
|
|
|
|
|
2018-11-06 20:00:47 +00:00
|
|
|
CASCADE_CODE(vm_manager.UnmapRange(target, size));
|
|
|
|
|
|
|
|
// Change back source region state. Note that the permission is reprotected according to param
|
2018-11-07 17:13:00 +00:00
|
|
|
CASCADE_CODE(vm_manager.ChangeMemoryState(source, size, source_state, VMAPermission::None,
|
|
|
|
MemoryState::Private, perms));
|
2018-11-06 20:00:47 +00:00
|
|
|
|
2015-07-18 03:19:16 +01:00
|
|
|
return RESULT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2018-10-20 02:04:18 +01:00
|
|
|
Kernel::Process::Process(KernelSystem& kernel)
|
2019-07-14 19:44:17 +01:00
|
|
|
: Object(kernel), handle_table(kernel), vm_manager(kernel.memory), kernel(kernel) {
|
2018-12-11 03:13:10 +00:00
|
|
|
|
|
|
|
kernel.memory.RegisterPageTable(&vm_manager.page_table);
|
|
|
|
}
|
|
|
|
Kernel::Process::~Process() {
|
2019-01-29 16:18:51 +00:00
|
|
|
// Release all objects this process owns first so that their potential destructor can do clean
|
|
|
|
// up with this process before further destruction.
|
|
|
|
// TODO(wwylele): explicitly destroy or invalidate objects this process owns (threads, shared
|
|
|
|
// memory etc.) even if they are still referenced by other processes.
|
|
|
|
handle_table.Clear();
|
|
|
|
|
2018-12-11 03:13:10 +00:00
|
|
|
kernel.memory.UnregisterPageTable(&vm_manager.page_table);
|
|
|
|
}
|
2015-05-04 04:01:16 +01:00
|
|
|
|
2019-03-23 20:04:19 +00:00
|
|
|
std::shared_ptr<Process> KernelSystem::GetProcessById(u32 process_id) const {
|
2017-12-07 18:52:08 +00:00
|
|
|
auto itr = std::find_if(
|
|
|
|
process_list.begin(), process_list.end(),
|
2019-03-23 20:04:19 +00:00
|
|
|
[&](const std::shared_ptr<Process>& process) { return process->process_id == process_id; });
|
2017-12-07 18:52:08 +00:00
|
|
|
|
|
|
|
if (itr == process_list.end())
|
|
|
|
return nullptr;
|
|
|
|
|
|
|
|
return *itr;
|
2015-05-04 04:01:16 +01:00
|
|
|
}
|
2017-12-07 18:52:08 +00:00
|
|
|
} // namespace Kernel
|