Initial implementation of svcMapPhysicalMemory

This commit is contained in:
David Marcec
2018-11-15 14:57:28 +11:00
parent 3bd503d59c
commit d68162a7c9
8 changed files with 281 additions and 40 deletions

View File

@@ -63,6 +63,10 @@ u32 ProgramMetadata::GetMainThreadStackSize() const {
return npdm_header.main_stack_size;
}
u32 ProgramMetadata::GetSystemResourceSize() const {
return npdm_header.system_resource_size;
}
u64 ProgramMetadata::GetTitleID() const {
return aci_header.title_id;
}

View File

@@ -50,6 +50,7 @@ public:
u32 GetMainThreadStackSize() const;
u64 GetTitleID() const;
u64 GetFilesystemPermissions() const;
u32 GetSystemResourceSize() const;
void Print() const;
@@ -68,7 +69,8 @@ private:
u8 reserved_3;
u8 main_thread_priority;
u8 main_thread_cpu;
std::array<u8, 8> reserved_4;
std::array<u8, 4> reserved_4;
u32 system_resource_size;
u32_le process_category;
u32_le main_stack_size;
std::array<u8, 0x10> application_name;

View File

@@ -38,6 +38,7 @@ SharedPtr<Process> Process::Create(KernelCore& kernel, std::string&& name) {
}
void Process::LoadFromMetadata(const FileSys::ProgramMetadata& metadata) {
system_resource_size = metadata.GetSystemResourceSize();
program_id = metadata.GetTitleID();
is_64bit_process = metadata.Is64BitProgram();
vm_manager.Reset(metadata.GetAddressSpaceType());

View File

@@ -168,6 +168,10 @@ public:
return program_id;
}
u32 GetSystemResourceSize() const {
return system_resource_size;
}
/// Gets the resource limit descriptor for this process
ResourceLimit& GetResourceLimit() {
return *resource_limit;
@@ -271,6 +275,8 @@ private:
/// Title ID corresponding to the process
u64 program_id;
u32 system_resource_size = 0;
/// Resource limit descriptor for this process
SharedPtr<ResourceLimit> resource_limit;

View File

@@ -39,30 +39,6 @@
namespace Kernel {
namespace {
// Checks if address + size is greater than the given address
// This can return false if the size causes an overflow of a 64-bit type
// or if the given size is zero.
constexpr bool IsValidAddressRange(VAddr address, u64 size) {
return address + size > address;
}
// Checks if a given address range lies within a larger address range.
constexpr bool IsInsideAddressRange(VAddr address, u64 size, VAddr address_range_begin,
VAddr address_range_end) {
const VAddr end_address = address + size - 1;
return address_range_begin <= address && end_address <= address_range_end - 1;
}
bool IsInsideAddressSpace(const VMManager& vm, VAddr address, u64 size) {
return IsInsideAddressRange(address, size, vm.GetAddressSpaceBaseAddress(),
vm.GetAddressSpaceEndAddress());
}
bool IsInsideNewMapRegion(const VMManager& vm, VAddr address, u64 size) {
return IsInsideAddressRange(address, size, vm.GetNewMapRegionBaseAddress(),
vm.GetNewMapRegionEndAddress());
}
// Helper function that performs the common sanity checks for svcMapMemory
// and svcUnmapMemory. This is doable, as both functions perform their sanitizing
// in the same order.
@@ -84,11 +60,11 @@ ResultCode MapUnmapMemorySanityChecks(const VMManager& vm_manager, VAddr dst_add
return ERR_INVALID_ADDRESS_STATE;
}
if (!IsInsideAddressSpace(vm_manager, src_addr, size)) {
if (!vm_manager.IsInsideAddressSpace(src_addr, size)) {
return ERR_INVALID_ADDRESS_STATE;
}
if (!IsInsideNewMapRegion(vm_manager, dst_addr, size)) {
if (!vm_manager.IsInsideNewMapRegion(dst_addr, size)) {
return ERR_INVALID_MEMORY_RANGE;
}
@@ -517,7 +493,7 @@ static ResultCode GetInfo(u64* result, u64 info_id, u64 handle, u64 info_sub_id)
NewMapRegionBaseAddr = 14,
NewMapRegionSize = 15,
// 3.0.0+
IsVirtualAddressMemoryEnabled = 16,
SystemResourceSize = 16,
PersonalMmHeapUsage = 17,
TitleId = 18,
// 4.0.0+
@@ -573,9 +549,11 @@ static ResultCode GetInfo(u64* result, u64 info_id, u64 handle, u64 info_sub_id)
case GetInfoType::NewMapRegionSize:
*result = vm_manager.GetNewMapRegionSize();
break;
case GetInfoType::IsVirtualAddressMemoryEnabled:
*result = current_process->IsVirtualMemoryEnabled();
case GetInfoType::SystemResourceSize:
*result = current_process->GetSystemResourceSize();
break;
case GetInfoType::PersonalMmHeapUsage:
*result = vm_manager.GetPersonalMmHeapUsage();
case GetInfoType::TitleId:
*result = current_process->GetTitleID();
break;
@@ -1275,6 +1253,22 @@ static ResultCode GetProcessInfo(u64* out, Handle process_handle, u32 type) {
return RESULT_SUCCESS;
}
static ResultCode MapPhysicalMemory(VAddr addr, u64 size) {
LOG_DEBUG(Kernel_SVC, "called, addr=0x{:08X}, size=0x{:X}", addr, size);
auto* const current_process = Core::CurrentProcess();
auto& vm_manager = current_process->VMManager();
vm_manager.MapPhysicalMemory(addr, size);
return RESULT_SUCCESS;
}
static ResultCode UnmapPhysicalMemory(VAddr addr, u64 size) {
LOG_DEBUG(Kernel_SVC, "called, addr=0x{:08X}, size=0x{:X}", addr, size);
auto* const current_process = Core::CurrentProcess();
auto& vm_manager = current_process->VMManager();
vm_manager.UnmapPhysicalMemory(addr, size);
return RESULT_SUCCESS;
}
namespace {
struct FunctionDef {
using Func = void();
@@ -1330,8 +1324,8 @@ static const FunctionDef SVC_Table[] = {
{0x29, SvcWrap<GetInfo>, "GetInfo"},
{0x2A, nullptr, "FlushEntireDataCache"},
{0x2B, nullptr, "FlushDataCache"},
{0x2C, nullptr, "MapPhysicalMemory"},
{0x2D, nullptr, "UnmapPhysicalMemory"},
{0x2C, SvcWrap<MapPhysicalMemory>, "MapPhysicalMemory"},
{0x2D, SvcWrap<UnmapPhysicalMemory>, "UnmapPhysicalMemory"},
{0x2E, nullptr, "GetFutureThreadInfo"},
{0x2F, nullptr, "GetLastThreadInfo"},
{0x30, nullptr, "GetResourceLimitLimitValue"},

View File

@@ -132,6 +132,11 @@ void SvcWrap() {
FuncReturn(func(static_cast<u32>(Param(0)), Param(1), Param(2)).raw);
}
template <ResultCode func(u64, u64)>
void SvcWrap() {
FuncReturn(func(Param(0), Param(1)).raw);
}
template <ResultCode func(u32*, u64, u64, s64)>
void SvcWrap() {
u32 param_1 = 0;

View File

@@ -7,6 +7,7 @@
#include <utility>
#include "common/assert.h"
#include "common/logging/log.h"
#include "common/scope_exit.h"
#include "core/arm/arm_interface.h"
#include "core/core.h"
#include "core/file_sys/program_metadata.h"
@@ -163,6 +164,177 @@ ResultVal<VAddr> VMManager::FindFreeRegion(u64 size) const {
return MakeResult<VAddr>(target);
}
constexpr bool FallsInAddress(u64 addr_start, u64 addr_end, u64 start_range) {
return (addr_start >= start_range && addr_end >= start_range);
}
#pragma optimize("", off)
ResultCode VMManager::MapPhysicalMemory(VAddr addr, u64 size) {
const auto base = GetMapRegionBaseAddress();
const auto end = GetMapRegionEndAddress();
if (!IsInsideMapRegion(addr, size)) {
return ERR_INVALID_ADDRESS;
}
// We have nothing mapped, we can just map directly
if (personal_heap_usage == 0) {
const auto result = MapMemoryBlock(addr, std::make_shared<std::vector<u8>>(size, 0), 0,
size, MemoryState::Mapped);
personal_heap_usage += size;
return result.Code();
}
auto vma = FindVMA(base);
u64 remaining_to_map = size;
auto last_result = RESULT_SUCCESS;
// Needed just in case we fail to map a region, we'll unmap everything.
std::vector<std::pair<u64, u64>> mapped_regions;
while (vma != vma_map.end() && vma->second.base <= end && remaining_to_map > 0) {
const auto vma_start = vma->second.base;
const auto vma_end = vma_start + vma->second.size;
const auto is_mapped = vma->second.meminfo_state == MemoryState::Mapped;
// Something failed, lets bail out
if (last_result.IsError()) {
break;
}
last_result = RESULT_SUCCESS;
// Allows us to use continue without worrying about incrementing the vma
SCOPE_EXIT({ vma++; });
// We're out of range now, we can just break. We should be done with everything now
if (vma_start > addr + size - 1) {
break;
}
// We're not processing addresses yet, lets keep skipping
if (!IsInsideAddressRange(addr, size, vma_start, vma_end)) {
continue;
}
const auto offset_in_vma = vma_start + (addr - vma_start);
const auto remaining_vma_size = (vma_end - offset_in_vma);
// Our vma is already mapped
if (is_mapped) {
if (remaining_vma_size >= remaining_to_map) {
// Our region we need is already mapped
break;
} else {
// We are partially mapped, Make note of it and move on
remaining_to_map -= remaining_vma_size;
continue;
}
} else {
// We're not mapped, so lets map some space
if (remaining_vma_size >= remaining_to_map) {
// We can fit everything in this region, lets finish off the mapping
last_result = MapMemoryBlock(offset_in_vma,
std::make_shared<std::vector<u8>>(remaining_to_map, 0),
0, remaining_to_map, MemoryState::Mapped)
.Code();
if (last_result.IsSuccess()) {
personal_heap_usage += remaining_to_map;
mapped_regions.push_back(std::make_pair(offset_in_vma, remaining_to_map));
}
break;
} else {
// We can do a partial mapping here
last_result =
MapMemoryBlock(offset_in_vma,
std::make_shared<std::vector<u8>>(remaining_vma_size, 0), 0,
remaining_vma_size, MemoryState::Mapped)
.Code();
// Update our usage and continue to the next vma
if (last_result.IsSuccess()) {
personal_heap_usage += remaining_vma_size;
remaining_to_map -= remaining_vma_size;
mapped_regions.push_back(std::make_pair(offset_in_vma, remaining_vma_size));
}
continue;
}
}
}
// We failed to map something, lets unmap everything we mapped
if (last_result.IsError() && !mapped_regions.empty()) {
for (const auto [mapped_addr, mapped_size] : mapped_regions) {
UnmapRange(mapped_addr, mapped_size);
personal_heap_usage -= mapped_size;
}
}
return last_result;
}
ResultCode VMManager::UnmapPhysicalMemory(VAddr addr, u64 size) {
const VAddr base = GetMapRegionBaseAddress();
const VAddr end = GetMapRegionEndAddress();
// We have nothing mapped, we can just map directly
if (personal_heap_usage == 0) {
MapMemoryBlock(addr, std::make_shared<std::vector<u8>>(size, 0), 0, size,
MemoryState::Mapped);
personal_heap_usage += size;
return RESULT_SUCCESS;
}
auto vma = FindVMA(base);
const bool has_base = vma->second.base == base;
u64 remaining_to_unmap = size;
u64 last_addr = addr;
while (vma != vma_map.end() && vma->second.base <= end && remaining_to_unmap > 0) {
const auto vma_start = vma->second.base;
const auto vma_end = vma_start + vma->second.size;
const auto is_unmapped = vma->second.meminfo_state != MemoryState::Mapped;
// Allows us to use continue without worrying about incrementing the vma
SCOPE_EXIT({ vma++; });
// We're out of range now, we can just break. We should be done with everything now
if (vma_start > addr + size - 1) {
break;
}
if (vma_end < addr + size - 1) {
continue;
}
// We're not processing addresses yet, lets keep skipping
if (!FallsInAddress(addr, addr + size - 1, vma_start)) {
continue;
}
const auto offset_in_vma = vma_start + (addr - vma_start);
const auto remaining_vma_size = (vma_end - offset_in_vma);
if (is_unmapped) {
// We're already unmapped
if (remaining_vma_size >= remaining_to_unmap) {
break;
} else {
// We're partially unmapped
remaining_to_unmap -= remaining_vma_size;
continue;
}
} else {
// This region is mapped, we can completely unmap our range in one go
if (remaining_vma_size >= remaining_to_unmap) {
UnmapRange(offset_in_vma, remaining_to_unmap);
personal_heap_usage -= remaining_to_unmap;
break;
} else {
// We can only do a partial unmap
UnmapRange(offset_in_vma, remaining_vma_size);
personal_heap_usage -= remaining_vma_size;
remaining_to_unmap -= remaining_vma_size;
continue;
}
}
}
return RESULT_SUCCESS;
}
ResultVal<VMManager::VMAHandle> VMManager::MapMMIO(VAddr target, PAddr paddr, u64 size,
MemoryState state,
Memory::MemoryHookPointer mmio_handler) {
@@ -202,8 +374,8 @@ ResultCode VMManager::UnmapRange(VAddr target, u64 size) {
const VAddr target_end = target + size;
const VMAIter end = vma_map.end();
// The comparison against the end of the range must be done using addresses since VMAs can be
// merged during this process, causing invalidation of the iterators.
// The comparison against the end of the range must be done using addresses since VMAs can
// be merged during this process, causing invalidation of the iterators.
while (vma != end && vma->second.base < target_end) {
vma = std::next(Unmap(vma));
}
@@ -234,8 +406,8 @@ ResultCode VMManager::ReprotectRange(VAddr target, u64 size, VMAPermission new_p
const VAddr target_end = target + size;
const VMAIter end = vma_map.end();
// The comparison against the end of the range must be done using addresses since VMAs can be
// merged during this process, causing invalidation of the iterators.
// The comparison against the end of the range must be done using addresses since VMAs can
// be merged during this process, causing invalidation of the iterators.
while (vma != end && vma->second.base < target_end) {
vma = std::next(StripIterConstness(Reprotect(vma, new_perms)));
}
@@ -323,8 +495,8 @@ ResultCode VMManager::MirrorMemory(VAddr dst_addr, VAddr src_addr, u64 size) {
}
void VMManager::RefreshMemoryBlockMappings(const std::vector<u8>* block) {
// If this ever proves to have a noticeable performance impact, allow users of the function to
// specify a specific range of addresses to limit the scan to.
// If this ever proves to have a noticeable performance impact, allow users of the function
// to specify a specific range of addresses to limit the scan to.
for (const auto& p : vma_map) {
const VirtualMemoryArea& vma = p.second;
if (block == vma.backing_block.get()) {
@@ -419,8 +591,8 @@ VMManager::VMAIter VMManager::SplitVMA(VMAIter vma_handle, u64 offset_in_vma) {
VirtualMemoryArea& old_vma = vma_handle->second;
VirtualMemoryArea new_vma = old_vma; // Make a copy of the VMA
// For now, don't allow no-op VMA splits (trying to split at a boundary) because it's probably
// a bug. This restriction might be removed later.
// For now, don't allow no-op VMA splits (trying to split at a boundary) because it's
// probably a bug. This restriction might be removed later.
ASSERT(offset_in_vma < old_vma.size);
ASSERT(offset_in_vma > 0);
@@ -685,4 +857,22 @@ u64 VMManager::GetTLSIORegionSize() const {
return tls_io_region_end - tls_io_region_base;
}
u64 VMManager::GetPersonalMmHeapUsage() const {
return personal_heap_usage;
}
bool VMManager::IsInsideAddressSpace(VAddr address, u64 size) const {
return IsInsideAddressRange(address, size, GetAddressSpaceBaseAddress(),
GetAddressSpaceEndAddress());
}
bool VMManager::IsInsideNewMapRegion(VAddr address, u64 size) const {
return IsInsideAddressRange(address, size, GetNewMapRegionBaseAddress(),
GetNewMapRegionEndAddress());
}
bool VMManager::IsInsideMapRegion(VAddr address, u64 size) const {
return IsInsideAddressRange(address, size, GetMapRegionBaseAddress(), GetMapRegionEndAddress());
}
} // namespace Kernel

View File

@@ -18,6 +18,20 @@ enum class ProgramAddressSpaceType : u8;
namespace Kernel {
// Checks if address + size is greater than the given address
// This can return false if the size causes an overflow of a 64-bit type
// or if the given size is zero.
constexpr bool IsValidAddressRange(VAddr address, u64 size) {
return address + size > address;
}
// Checks if a given address range lies within a larger address range.
constexpr bool IsInsideAddressRange(VAddr address, u64 size, VAddr address_range_begin,
VAddr address_range_end) {
const VAddr end_address = address + size - 1;
return address_range_begin <= address && end_address <= address_range_end - 1;
}
enum class VMAType : u8 {
/// VMA represents an unmapped region of the address space.
Free,
@@ -165,6 +179,22 @@ public:
*/
ResultVal<VAddr> FindFreeRegion(u64 size) const;
/**
* Maps memory to the PersonalMmHeap region at a given address.
*
* @param target The address of where you want to map
* @param size The size of the memory you want to map
*/
ResultCode MapPhysicalMemory(VAddr target, u64 size);
/**
* Unmaps memory from the PersonalMmHeap region at a given address.
*
* @param target The address of where you want to unmap
* @param size The size of the memory you want to unmap
*/
ResultCode UnmapPhysicalMemory(VAddr target, u64 size);
/**
* Maps a memory-mapped IO region at a given address.
*
@@ -275,6 +305,13 @@ public:
/// Gets the total size of the TLS IO region in bytes.
u64 GetTLSIORegionSize() const;
/// Gets the total size of the PersonalMmHeap region in bytes.
u64 GetPersonalMmHeapUsage() const;
bool IsInsideAddressSpace(VAddr address, u64 size) const;
bool IsInsideNewMapRegion(VAddr address, u64 size) const;
bool IsInsideMapRegion(VAddr address, u64 size) const;
/// Each VMManager has its own page table, which is set as the main one when the owning process
/// is scheduled.
Memory::PageTable page_table;
@@ -358,5 +395,7 @@ private:
VAddr heap_start = 0;
VAddr heap_end = 0;
u64 heap_used = 0;
u64 personal_heap_usage = 0;
};
} // namespace Kernel