Compare commits

...

23 Commits

Author SHA1 Message Date
bunnei
e791da9791 core: hle: kernel: KPageTable: Various improvements to MapPages and UnmapPages. 2022-01-22 20:51:34 -08:00
bunnei
07add23251 core: hle: kernel: KPageTable: MapProcessCode: Various cleanup. 2022-01-22 20:51:34 -08:00
bunnei
ee25e0a40b core: hle: kernel: KPageTable: ReserveTransferMemory: Various cleanup. 2022-01-22 20:51:34 -08:00
bunnei
0cee5e1af8 core: hle: kernel: KPageTable: ResetTransferMemory: Various cleanup. 2022-01-22 20:51:34 -08:00
bunnei
ffcaf5af90 core: hle: kernel: KPageTable: SetMemoryAttribute: Various cleanup. 2022-01-22 20:51:34 -08:00
bunnei
2935c9d8de core: hle: kernel: KPageTable: Assert valid address on GetPhysicalAddr. 2022-01-22 01:33:26 -08:00
bunnei
264bb5abf7 core: hle: kernel: KPageTable: Operate: Assert lock ownership. 2022-01-22 01:33:26 -08:00
bunnei
0137f2e6e1 core: hle: kernel: KPageTable: SetHeapSize: Cleanup & take physical memory lock. 2022-01-22 01:33:26 -08:00
bunnei
6d8e498f76 core: hle: kernel: Refactor Un/MapPhysicalMemory to remove unnecessary methods. 2022-01-22 01:33:26 -08:00
bunnei
b8b1b58f36 core: hle: kernel: Rename Un/Map to Un/MapMeory. 2022-01-22 01:33:26 -08:00
bunnei
8433edacb3 Merge pull request #7735 from german77/udp_battery
input_common: Report battery for UDP controllers
2022-01-22 01:28:14 -08:00
bunnei
68c8a1b170 Merge pull request #7737 from bunnei/fix-dummy-thread-leak
Various fixes to HLE service thread management
2022-01-21 22:34:47 -08:00
bunnei
615fb40416 hle: kernel: KThread: Ensure host (dummy) threads block on locking.
- But do not enter the priority queue, as otherwise they will be scheduled.
- Allows dummy threads to use guest synchronization primitives.
2022-01-21 17:12:06 -08:00
bunnei
f6815086a1 hle: kernel: Remove redundant tracking of dummy threads.
- These are already tracked by kernel's registered_objects member.
2022-01-20 17:08:00 -08:00
bunnei
91ff6d4cb3 hle: kernel: KThread: DummyThread can be waited, ensure wait_queue is not nullptr. 2022-01-20 17:08:00 -08:00
bunnei
46a620f9d7 hle: kernel: KThread: Decrease DummyThread priority to ensure it is never scheduled. 2022-01-20 17:08:00 -08:00
bunnei
0b37e7cb39 hle: kernel: service_thread: Ensure dummy thread is closed & destroyed on thread exit. 2022-01-20 17:08:00 -08:00
bunnei
384e24d3e9 hle: kernel: KServerSession: Remove hack for CompleteSyncRequest.
- This does not appear to be necessary anymore.
2022-01-20 17:08:00 -08:00
bunnei
ad53dc22fd hle: kernel: KServerSession: Simplify CompleteSyncRequest EndWait.
- Considering is_thread_waiting is never set, so we can remove IsThreadWaiting.
- KThread::EndWait will take the scheduler lock, so we can remove the redundant lock.
2022-01-20 17:08:00 -08:00
bunnei
5ffec69dc7 hle: kernel: KThread: Ensure dummy threads never call EndWait.
- These are only used by host threads for locking and will never have a wait_queue.
2022-01-20 17:08:00 -08:00
bunnei
11a380c3da hle: kernel: KScheduler: Ensure dummy threads are never scheduled.
- These are only used by host threads for locking.
2022-01-20 17:08:00 -08:00
bunnei
f6cbb14dce hle: kernel: KThread: Rename thread_type_for_debugging -> thread_type.
- This will be used to ensure that we do not schedule dummy threads.
2022-01-20 17:08:00 -08:00
Narr the Reg
36144a5690 input_common: Report battery for UDP controllers 2022-01-17 13:30:15 -06:00
14 changed files with 253 additions and 151 deletions

View File

@@ -341,10 +341,6 @@ public:
return *thread;
}
bool IsThreadWaiting() const {
return is_thread_waiting;
}
private:
friend class IPC::ResponseBuilder;
@@ -379,7 +375,6 @@ private:
u32 domain_offset{};
std::shared_ptr<SessionRequestManager> manager;
bool is_thread_waiting{};
KernelCore& kernel;
Core::Memory::Memory& memory;

View File

@@ -276,22 +276,23 @@ ResultCode KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_
ResultCode KPageTable::MapProcessCode(VAddr addr, std::size_t num_pages, KMemoryState state,
KMemoryPermission perm) {
std::lock_guard lock{page_table_lock};
const u64 size{num_pages * PageSize};
if (!CanContain(addr, size, state)) {
return ResultInvalidCurrentMemory;
}
// Validate the mapping request.
R_UNLESS(this->CanContain(addr, size, state), ResultInvalidCurrentMemory);
if (IsRegionMapped(addr, size)) {
return ResultInvalidCurrentMemory;
}
// Lock the table.
std::lock_guard lock{page_table_lock};
// Verify that the destination memory is unmapped.
R_TRY(this->CheckMemoryState(addr, size, KMemoryState::All, KMemoryState::Free,
KMemoryPermission::None, KMemoryPermission::None,
KMemoryAttribute::None, KMemoryAttribute::None));
KPageLinkedList page_linked_list;
CASCADE_CODE(system.Kernel().MemoryManager().Allocate(page_linked_list, num_pages, memory_pool,
allocation_option));
CASCADE_CODE(Operate(addr, num_pages, page_linked_list, OperationType::MapGroup));
R_TRY(system.Kernel().MemoryManager().Allocate(page_linked_list, num_pages, memory_pool,
allocation_option));
R_TRY(Operate(addr, num_pages, page_linked_list, OperationType::MapGroup));
block_manager->Update(addr, num_pages, state, perm);
@@ -395,39 +396,12 @@ ResultCode KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size,
return ResultSuccess;
}
void KPageTable::MapPhysicalMemory(KPageLinkedList& page_linked_list, VAddr start, VAddr end) {
auto node{page_linked_list.Nodes().begin()};
PAddr map_addr{node->GetAddress()};
std::size_t src_num_pages{node->GetNumPages()};
block_manager->IterateForRange(start, end, [&](const KMemoryInfo& info) {
if (info.state != KMemoryState::Free) {
return;
}
std::size_t dst_num_pages{GetSizeInRange(info, start, end) / PageSize};
VAddr dst_addr{GetAddressInRange(info, start)};
while (dst_num_pages) {
if (!src_num_pages) {
node = std::next(node);
map_addr = node->GetAddress();
src_num_pages = node->GetNumPages();
}
const std::size_t num_pages{std::min(src_num_pages, dst_num_pages)};
Operate(dst_addr, num_pages, KMemoryPermission::UserReadWrite, OperationType::Map,
map_addr);
dst_addr += num_pages * PageSize;
map_addr += num_pages * PageSize;
src_num_pages -= num_pages;
dst_num_pages -= num_pages;
}
});
}
ResultCode KPageTable::MapPhysicalMemory(VAddr addr, std::size_t size) {
// Lock the physical memory lock.
std::lock_guard phys_lk(map_physical_memory_lock);
// Lock the table.
std::lock_guard lock{page_table_lock};
std::size_t mapped_size{};
@@ -463,7 +437,35 @@ ResultCode KPageTable::MapPhysicalMemory(VAddr addr, std::size_t size) {
// We succeeded, so commit the memory reservation.
memory_reservation.Commit();
MapPhysicalMemory(page_linked_list, addr, end_addr);
// Map the memory.
auto node{page_linked_list.Nodes().begin()};
PAddr map_addr{node->GetAddress()};
std::size_t src_num_pages{node->GetNumPages()};
block_manager->IterateForRange(addr, end_addr, [&](const KMemoryInfo& info) {
if (info.state != KMemoryState::Free) {
return;
}
std::size_t dst_num_pages{GetSizeInRange(info, addr, end_addr) / PageSize};
VAddr dst_addr{GetAddressInRange(info, addr)};
while (dst_num_pages) {
if (!src_num_pages) {
node = std::next(node);
map_addr = node->GetAddress();
src_num_pages = node->GetNumPages();
}
const std::size_t num_pages{std::min(src_num_pages, dst_num_pages)};
Operate(dst_addr, num_pages, KMemoryPermission::UserReadWrite, OperationType::Map,
map_addr);
dst_addr += num_pages * PageSize;
map_addr += num_pages * PageSize;
src_num_pages -= num_pages;
dst_num_pages -= num_pages;
}
});
mapped_physical_memory_size += remaining_size;
@@ -503,23 +505,8 @@ ResultCode KPageTable::UnmapPhysicalMemory(VAddr addr, std::size_t size) {
return ResultSuccess;
}
CASCADE_CODE(UnmapMemory(addr, size));
auto process{system.Kernel().CurrentProcess()};
process->GetResourceLimit()->Release(LimitableResource::PhysicalMemory, mapped_size);
mapped_physical_memory_size -= mapped_size;
return ResultSuccess;
}
ResultCode KPageTable::UnmapMemory(VAddr addr, std::size_t size) {
std::lock_guard lock{page_table_lock};
const VAddr end_addr{addr + size};
ResultCode result{ResultSuccess};
KPageLinkedList page_linked_list;
// Unmap each region within the range
KPageLinkedList page_linked_list;
block_manager->IterateForRange(addr, end_addr, [&](const KMemoryInfo& info) {
if (info.state == KMemoryState::Normal) {
const std::size_t block_size{GetSizeInRange(info, addr, end_addr)};
@@ -535,7 +522,6 @@ ResultCode KPageTable::UnmapMemory(VAddr addr, std::size_t size) {
}
}
});
if (result.IsError()) {
return result;
}
@@ -546,10 +532,14 @@ ResultCode KPageTable::UnmapMemory(VAddr addr, std::size_t size) {
block_manager->Update(addr, num_pages, KMemoryState::Free);
auto process{system.Kernel().CurrentProcess()};
process->GetResourceLimit()->Release(LimitableResource::PhysicalMemory, mapped_size);
mapped_physical_memory_size -= mapped_size;
return ResultSuccess;
}
ResultCode KPageTable::Map(VAddr dst_addr, VAddr src_addr, std::size_t size) {
ResultCode KPageTable::MapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) {
std::lock_guard lock{page_table_lock};
KMemoryState src_state{};
@@ -588,7 +578,7 @@ ResultCode KPageTable::Map(VAddr dst_addr, VAddr src_addr, std::size_t size) {
return ResultSuccess;
}
ResultCode KPageTable::Unmap(VAddr dst_addr, VAddr src_addr, std::size_t size) {
ResultCode KPageTable::UnmapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) {
std::lock_guard lock{page_table_lock};
KMemoryState src_state{};
@@ -652,24 +642,26 @@ ResultCode KPageTable::MapPages(VAddr addr, const KPageLinkedList& page_linked_l
return ResultSuccess;
}
ResultCode KPageTable::MapPages(VAddr addr, KPageLinkedList& page_linked_list, KMemoryState state,
KMemoryPermission perm) {
std::lock_guard lock{page_table_lock};
ResultCode KPageTable::MapPages(VAddr address, KPageLinkedList& page_linked_list,
KMemoryState state, KMemoryPermission perm) {
// Check that the map is in range.
const std::size_t num_pages{page_linked_list.GetNumPages()};
const std::size_t size{num_pages * PageSize};
R_UNLESS(this->CanContain(address, size, state), ResultInvalidCurrentMemory);
if (!CanContain(addr, size, state)) {
return ResultInvalidCurrentMemory;
}
// Lock the table.
std::lock_guard lock{page_table_lock};
if (IsRegionMapped(addr, num_pages * PageSize)) {
return ResultInvalidCurrentMemory;
}
// Check the memory state.
R_TRY(this->CheckMemoryState(address, size, KMemoryState::All, KMemoryState::Free,
KMemoryPermission::None, KMemoryPermission::None,
KMemoryAttribute::None, KMemoryAttribute::None));
CASCADE_CODE(MapPages(addr, page_linked_list, perm));
// Map the pages.
R_TRY(MapPages(address, page_linked_list, perm));
block_manager->Update(addr, num_pages, state, perm);
// Update the blocks.
block_manager->Update(address, num_pages, state, perm);
return ResultSuccess;
}
@@ -693,21 +685,23 @@ ResultCode KPageTable::UnmapPages(VAddr addr, const KPageLinkedList& page_linked
ResultCode KPageTable::UnmapPages(VAddr addr, KPageLinkedList& page_linked_list,
KMemoryState state) {
std::lock_guard lock{page_table_lock};
// Check that the unmap is in range.
const std::size_t num_pages{page_linked_list.GetNumPages()};
const std::size_t size{num_pages * PageSize};
R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory);
if (!CanContain(addr, size, state)) {
return ResultInvalidCurrentMemory;
}
// Lock the table.
std::lock_guard lock{page_table_lock};
if (IsRegionMapped(addr, num_pages * PageSize)) {
return ResultInvalidCurrentMemory;
}
// Check the memory state.
R_TRY(this->CheckMemoryState(addr, size, KMemoryState::All, state, KMemoryPermission::None,
KMemoryPermission::None, KMemoryAttribute::All,
KMemoryAttribute::None));
CASCADE_CODE(UnmapPages(addr, page_linked_list));
// Perform the unmap.
R_TRY(UnmapPages(addr, page_linked_list));
// Update the blocks.
block_manager->Update(addr, num_pages, state, KMemoryPermission::None);
return ResultSuccess;
@@ -765,7 +759,6 @@ ResultCode KPageTable::SetProcessMemoryPermission(VAddr addr, std::size_t size,
// Ensure cache coherency, if we're setting pages as executable.
if (is_x) {
// Memory execution state is changing, invalidate CPU cache range
system.InvalidateCpuInstructionCacheRange(addr, size);
}
@@ -793,12 +786,12 @@ ResultCode KPageTable::ReserveTransferMemory(VAddr addr, std::size_t size, KMemo
KMemoryState state{};
KMemoryAttribute attribute{};
CASCADE_CODE(CheckMemoryState(
&state, nullptr, &attribute, nullptr, addr, size,
KMemoryState::FlagCanTransfer | KMemoryState::FlagReferenceCounted,
KMemoryState::FlagCanTransfer | KMemoryState::FlagReferenceCounted, KMemoryPermission::All,
KMemoryPermission::UserReadWrite, KMemoryAttribute::Mask, KMemoryAttribute::None,
KMemoryAttribute::IpcAndDeviceMapped));
R_TRY(CheckMemoryState(&state, nullptr, &attribute, nullptr, addr, size,
KMemoryState::FlagCanTransfer | KMemoryState::FlagReferenceCounted,
KMemoryState::FlagCanTransfer | KMemoryState::FlagReferenceCounted,
KMemoryPermission::All, KMemoryPermission::UserReadWrite,
KMemoryAttribute::Mask, KMemoryAttribute::None,
KMemoryAttribute::IpcAndDeviceMapped));
block_manager->Update(addr, size / PageSize, state, perm, attribute | KMemoryAttribute::Locked);
@@ -810,12 +803,11 @@ ResultCode KPageTable::ResetTransferMemory(VAddr addr, std::size_t size) {
KMemoryState state{};
CASCADE_CODE(
CheckMemoryState(&state, nullptr, nullptr, nullptr, addr, size,
KMemoryState::FlagCanTransfer | KMemoryState::FlagReferenceCounted,
KMemoryState::FlagCanTransfer | KMemoryState::FlagReferenceCounted,
KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::Mask,
KMemoryAttribute::Locked, KMemoryAttribute::IpcAndDeviceMapped));
R_TRY(CheckMemoryState(&state, nullptr, nullptr, nullptr, addr, size,
KMemoryState::FlagCanTransfer | KMemoryState::FlagReferenceCounted,
KMemoryState::FlagCanTransfer | KMemoryState::FlagReferenceCounted,
KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::Mask,
KMemoryAttribute::Locked, KMemoryAttribute::IpcAndDeviceMapped));
block_manager->Update(addr, size / PageSize, state, KMemoryPermission::UserReadWrite);
return ResultSuccess;
@@ -871,8 +863,9 @@ ResultCode KPageTable::SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask
AttributeTestMask, KMemoryAttribute::None, ~AttributeTestMask));
// Determine the new attribute.
const auto new_attr = ((old_attr & static_cast<KMemoryAttribute>(~mask)) |
static_cast<KMemoryAttribute>(attr & mask));
const KMemoryAttribute new_attr =
static_cast<KMemoryAttribute>(((old_attr & static_cast<KMemoryAttribute>(~mask)) |
static_cast<KMemoryAttribute>(attr & mask)));
// Perform operation.
this->Operate(addr, num_pages, old_perm, OperationType::ChangePermissionsAndRefresh);
@@ -896,6 +889,9 @@ ResultCode KPageTable::SetMaxHeapSize(std::size_t size) {
}
ResultCode KPageTable::SetHeapSize(VAddr* out, std::size_t size) {
// Lock the physical memory lock.
std::lock_guard phys_lk(map_physical_memory_lock);
// Try to perform a reduction in heap, instead of an extension.
VAddr cur_address{};
std::size_t allocation_size{};
@@ -1025,12 +1021,12 @@ ResultVal<VAddr> KPageTable::AllocateAndMapMemory(std::size_t needed_num_pages,
}
if (is_map_only) {
CASCADE_CODE(Operate(addr, needed_num_pages, perm, OperationType::Map, map_addr));
R_TRY(Operate(addr, needed_num_pages, perm, OperationType::Map, map_addr));
} else {
KPageLinkedList page_group;
CASCADE_CODE(system.Kernel().MemoryManager().Allocate(page_group, needed_num_pages,
memory_pool, allocation_option));
CASCADE_CODE(Operate(addr, needed_num_pages, page_group, OperationType::MapGroup));
R_TRY(system.Kernel().MemoryManager().Allocate(page_group, needed_num_pages, memory_pool,
allocation_option));
R_TRY(Operate(addr, needed_num_pages, page_group, OperationType::MapGroup));
}
block_manager->Update(addr, needed_num_pages, state, perm);
@@ -1186,7 +1182,7 @@ VAddr KPageTable::AllocateVirtualMemory(VAddr start, std::size_t region_num_page
ResultCode KPageTable::Operate(VAddr addr, std::size_t num_pages, const KPageLinkedList& page_group,
OperationType operation) {
std::lock_guard lock{page_table_lock};
ASSERT(this->IsLockedByCurrentThread());
ASSERT(Common::IsAligned(addr, PageSize));
ASSERT(num_pages > 0);
@@ -1211,7 +1207,7 @@ ResultCode KPageTable::Operate(VAddr addr, std::size_t num_pages, const KPageLin
ResultCode KPageTable::Operate(VAddr addr, std::size_t num_pages, KMemoryPermission perm,
OperationType operation, PAddr map_addr) {
std::lock_guard lock{page_table_lock};
ASSERT(this->IsLockedByCurrentThread());
ASSERT(num_pages > 0);
ASSERT(Common::IsAligned(addr, PageSize));

View File

@@ -37,9 +37,8 @@ public:
VAddr src_addr);
ResultCode MapPhysicalMemory(VAddr addr, std::size_t size);
ResultCode UnmapPhysicalMemory(VAddr addr, std::size_t size);
ResultCode UnmapMemory(VAddr addr, std::size_t size);
ResultCode Map(VAddr dst_addr, VAddr src_addr, std::size_t size);
ResultCode Unmap(VAddr dst_addr, VAddr src_addr, std::size_t size);
ResultCode MapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size);
ResultCode UnmapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size);
ResultCode MapPages(VAddr addr, KPageLinkedList& page_linked_list, KMemoryState state,
KMemoryPermission perm);
ResultCode UnmapPages(VAddr addr, KPageLinkedList& page_linked_list, KMemoryState state);
@@ -88,7 +87,6 @@ private:
ResultCode MapPages(VAddr addr, const KPageLinkedList& page_linked_list,
KMemoryPermission perm);
ResultCode UnmapPages(VAddr addr, const KPageLinkedList& page_linked_list);
void MapPhysicalMemory(KPageLinkedList& page_linked_list, VAddr start, VAddr end);
bool IsRegionMapped(VAddr address, u64 size);
bool IsRegionContiguous(VAddr addr, u64 size) const;
void AddRegionToPages(VAddr start, std::size_t num_pages, KPageLinkedList& page_linked_list);
@@ -148,6 +146,7 @@ private:
}
std::recursive_mutex page_table_lock;
std::mutex map_physical_memory_lock;
std::unique_ptr<KMemoryBlockManager> block_manager;
public:
@@ -249,7 +248,9 @@ public:
return !IsOutsideASLRRegion(address, size);
}
constexpr PAddr GetPhysicalAddr(VAddr addr) {
return page_table_impl.backing_addr[addr >> PageBits] + addr;
const auto backing_addr = page_table_impl.backing_addr[addr >> PageBits];
ASSERT(backing_addr);
return backing_addr + addr;
}
constexpr bool Contains(VAddr addr) const {
return address_space_start <= addr && addr <= address_space_end - 1;

View File

@@ -45,6 +45,7 @@ concept KPriorityQueueMember = !std::is_reference_v<T> && requires(T & t) {
{ t.GetActiveCore() } -> Common::ConvertibleTo<s32>;
{ t.GetPriority() } -> Common::ConvertibleTo<s32>;
{ t.IsDummyThread() } -> Common::ConvertibleTo<bool>;
};
template <typename Member, size_t NumCores_, int LowestPriority, int HighestPriority>
@@ -349,24 +350,49 @@ public:
// Mutators.
constexpr void PushBack(Member* member) {
// This is for host (dummy) threads that we do not want to enter the priority queue.
if (member->IsDummyThread()) {
return;
}
this->PushBack(member->GetPriority(), member);
}
constexpr void Remove(Member* member) {
// This is for host (dummy) threads that we do not want to enter the priority queue.
if (member->IsDummyThread()) {
return;
}
this->Remove(member->GetPriority(), member);
}
constexpr void MoveToScheduledFront(Member* member) {
// This is for host (dummy) threads that we do not want to enter the priority queue.
if (member->IsDummyThread()) {
return;
}
this->scheduled_queue.MoveToFront(member->GetPriority(), member->GetActiveCore(), member);
}
constexpr KThread* MoveToScheduledBack(Member* member) {
// This is for host (dummy) threads that we do not want to enter the priority queue.
if (member->IsDummyThread()) {
return {};
}
return this->scheduled_queue.MoveToBack(member->GetPriority(), member->GetActiveCore(),
member);
}
// First class fancy operations.
constexpr void ChangePriority(s32 prev_priority, bool is_running, Member* member) {
// This is for host (dummy) threads that we do not want to enter the priority queue.
if (member->IsDummyThread()) {
return;
}
ASSERT(IsValidPriority(prev_priority));
// Remove the member from the queues.
@@ -383,6 +409,11 @@ public:
constexpr void ChangeAffinityMask(s32 prev_core, const AffinityMaskType& prev_affinity,
Member* member) {
// This is for host (dummy) threads that we do not want to enter the priority queue.
if (member->IsDummyThread()) {
return;
}
// Get the new information.
const s32 priority = member->GetPriority();
const AffinityMaskType& new_affinity = member->GetAffinityMask();
@@ -412,6 +443,11 @@ public:
}
constexpr void ChangeCore(s32 prev_core, Member* member, bool to_front = false) {
// This is for host (dummy) threads that we do not want to enter the priority queue.
if (member->IsDummyThread()) {
return;
}
// Get the new information.
const s32 new_core = member->GetActiveCore();
const s32 priority = member->GetPriority();

View File

@@ -406,6 +406,9 @@ void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduli
} else {
RescheduleCores(kernel, cores_needing_scheduling);
}
// Special case to ensure dummy threads that are waiting block.
current_thread->IfDummyThreadTryWait();
}
u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) {
@@ -739,6 +742,12 @@ void KScheduler::ScheduleImpl() {
next_thread = idle_thread;
}
// We never want to schedule a dummy thread, as these are only used by host threads for locking.
if (next_thread->GetThreadType() == ThreadType::Dummy) {
ASSERT_MSG(false, "Dummy threads should never be scheduled!");
next_thread = idle_thread;
}
// If we're not actually switching thread, there's nothing to do.
if (next_thread == current_thread.load()) {
previous_thread->EnableDispatch();

View File

@@ -8,7 +8,6 @@
#include "common/assert.h"
#include "common/common_types.h"
#include "common/logging/log.h"
#include "common/scope_exit.h"
#include "core/core_timing.h"
#include "core/hle/ipc_helpers.h"
#include "core/hle/kernel/hle_ipc.h"
@@ -123,20 +122,10 @@ ResultCode KServerSession::QueueSyncRequest(KThread* thread, Core::Memory::Memor
context->PopulateFromIncomingCommandBuffer(kernel.CurrentProcess()->GetHandleTable(), cmd_buf);
// In the event that something fails here, stub a result to prevent the game from crashing.
// This is a work-around in the event that somehow we process a service request after the
// session has been closed by the game. This has been observed to happen rarely in Pokemon
// Sword/Shield and is likely a result of us using host threads/scheduling for services.
// TODO(bunnei): Find a better solution here.
auto error_guard = SCOPE_GUARD({ CompleteSyncRequest(*context); });
// Ensure we have a session request handler
if (manager->HasSessionRequestHandler(*context)) {
if (auto strong_ptr = manager->GetServiceThread().lock()) {
strong_ptr->QueueSyncRequest(*parent, std::move(context));
// We succeeded.
error_guard.Cancel();
} else {
ASSERT_MSG(false, "strong_ptr is nullptr!");
}
@@ -171,13 +160,8 @@ ResultCode KServerSession::CompleteSyncRequest(HLERequestContext& context) {
convert_to_domain = false;
}
// Some service requests require the thread to block
{
KScopedSchedulerLock lock(kernel);
if (!context.IsThreadWaiting()) {
context.GetThread().EndWait(result);
}
}
// The calling thread is waiting for this request to complete, so wake it up.
context.GetThread().EndWait(result);
return result;
}

View File

@@ -106,7 +106,7 @@ KThread::~KThread() = default;
ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack_top, s32 prio,
s32 virt_core, KProcess* owner, ThreadType type) {
// Assert parameters are valid.
ASSERT((type == ThreadType::Main) ||
ASSERT((type == ThreadType::Main) || (type == ThreadType::Dummy) ||
(Svc::HighestThreadPriority <= prio && prio <= Svc::LowestThreadPriority));
ASSERT((owner != nullptr) || (type != ThreadType::User));
ASSERT(0 <= virt_core && virt_core < static_cast<s32>(Common::BitSize<u64>()));
@@ -140,7 +140,7 @@ ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_s
UNREACHABLE_MSG("KThread::Initialize: Unknown ThreadType {}", static_cast<u32>(type));
break;
}
thread_type_for_debugging = type;
thread_type = type;
// Set the ideal core ID and affinity mask.
virtual_ideal_core_id = virt_core;
@@ -262,7 +262,7 @@ ResultCode KThread::InitializeThread(KThread* thread, KThreadFunction func, uint
}
ResultCode KThread::InitializeDummyThread(KThread* thread) {
return thread->Initialize({}, {}, {}, DefaultThreadPriority, 3, {}, ThreadType::Dummy);
return thread->Initialize({}, {}, {}, DummyThreadPriority, 3, {}, ThreadType::Dummy);
}
ResultCode KThread::InitializeIdleThread(Core::System& system, KThread* thread, s32 virt_core) {
@@ -1075,12 +1075,46 @@ ResultCode KThread::Sleep(s64 timeout) {
return ResultSuccess;
}
void KThread::IfDummyThreadTryWait() {
if (!IsDummyThread()) {
return;
}
if (GetState() != ThreadState::Waiting) {
return;
}
// Block until we can grab the lock.
KScopedSpinLock lk{dummy_wait_lock};
}
void KThread::IfDummyThreadBeginWait() {
if (!IsDummyThread()) {
return;
}
// Ensure the thread will block when IfDummyThreadTryWait is called.
dummy_wait_lock.Lock();
}
void KThread::IfDummyThreadEndWait() {
if (!IsDummyThread()) {
return;
}
// Ensure the thread will no longer block.
dummy_wait_lock.Unlock();
}
void KThread::BeginWait(KThreadQueue* queue) {
// Set our state as waiting.
SetState(ThreadState::Waiting);
// Set our wait queue.
wait_queue = queue;
// Special case for dummy threads to ensure they block.
IfDummyThreadBeginWait();
}
void KThread::NotifyAvailable(KSynchronizationObject* signaled_object, ResultCode wait_result_) {
@@ -1099,7 +1133,16 @@ void KThread::EndWait(ResultCode wait_result_) {
// If we're waiting, notify our queue that we're available.
if (GetState() == ThreadState::Waiting) {
if (wait_queue == nullptr) {
// This should never happen, but avoid a hard crash below to get this logged.
ASSERT_MSG(false, "wait_queue is nullptr!");
return;
}
wait_queue->EndWait(this, wait_result_);
// Special case for dummy threads to wakeup if necessary.
IfDummyThreadEndWait();
}
}

View File

@@ -112,6 +112,7 @@ private:
public:
static constexpr s32 DefaultThreadPriority = 44;
static constexpr s32 IdleThreadPriority = Svc::LowestThreadPriority + 1;
static constexpr s32 DummyThreadPriority = Svc::LowestThreadPriority + 2;
explicit KThread(KernelCore& kernel_);
~KThread() override;
@@ -553,8 +554,12 @@ public:
return wait_reason_for_debugging;
}
[[nodiscard]] ThreadType GetThreadTypeForDebugging() const {
return thread_type_for_debugging;
[[nodiscard]] ThreadType GetThreadType() const {
return thread_type;
}
[[nodiscard]] bool IsDummyThread() const {
return GetThreadType() == ThreadType::Dummy;
}
void SetWaitObjectsForDebugging(const std::span<KSynchronizationObject*>& objects) {
@@ -631,6 +636,14 @@ public:
return condvar_key;
}
// Dummy threads (used for HLE host threads) cannot wait based on the guest scheduler, and
// therefore will not block on guest kernel synchronization primitives. These methods handle
// blocking as needed.
void IfDummyThreadTryWait();
void IfDummyThreadBeginWait();
void IfDummyThreadEndWait();
private:
static constexpr size_t PriorityInheritanceCountMax = 10;
union SyncObjectBuffer {
@@ -749,16 +762,17 @@ private:
bool resource_limit_release_hint{};
StackParameters stack_parameters{};
KSpinLock context_guard{};
KSpinLock dummy_wait_lock{};
// For emulation
std::shared_ptr<Common::Fiber> host_context{};
bool is_single_core{};
ThreadType thread_type{};
// For debugging
std::vector<KSynchronizationObject*> wait_objects_for_debugging;
VAddr mutex_wait_address_for_debugging{};
ThreadWaitReasonForDebugging wait_reason_for_debugging{};
ThreadType thread_type_for_debugging{};
public:
using ConditionVariableThreadTreeType = ConditionVariableThreadTree;

View File

@@ -301,12 +301,10 @@ struct KernelCore::Impl {
// Gets the dummy KThread for the caller, allocating a new one if this is the first time
KThread* GetHostDummyThread() {
auto make_thread = [this]() {
std::lock_guard lk(dummy_thread_lock);
auto& thread = dummy_threads.emplace_back(std::make_unique<KThread>(system.Kernel()));
KAutoObject::Create(thread.get());
ASSERT(KThread::InitializeDummyThread(thread.get()).IsSuccess());
KThread* thread = KThread::Create(system.Kernel());
ASSERT(KThread::InitializeDummyThread(thread).IsSuccess());
thread->SetName(fmt::format("DummyThread:{}", GetHostThreadId()));
return thread.get();
return thread;
};
thread_local KThread* saved_thread = make_thread();
@@ -731,7 +729,6 @@ struct KernelCore::Impl {
std::mutex server_sessions_lock;
std::mutex registered_objects_lock;
std::mutex registered_in_use_objects_lock;
std::mutex dummy_thread_lock;
std::atomic<u32> next_object_id{0};
std::atomic<u64> next_kernel_process_id{KProcess::InitialKIPIDMin};
@@ -788,9 +785,6 @@ struct KernelCore::Impl {
std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES> interrupts{};
std::array<std::unique_ptr<Kernel::KScheduler>, Core::Hardware::NUM_CPU_CORES> schedulers{};
// Specifically tracked to be automatically destroyed with kernel
std::vector<std::unique_ptr<KThread>> dummy_threads;
bool is_multicore{};
std::atomic_bool is_shutting_down{};
bool is_phantom_mode_for_singlecore{};

View File

@@ -12,6 +12,7 @@
#include "common/scope_exit.h"
#include "common/thread.h"
#include "core/hle/kernel/k_session.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/service_thread.h"
@@ -50,6 +51,10 @@ ServiceThread::Impl::Impl(KernelCore& kernel, std::size_t num_threads, const std
kernel.RegisterHostThread();
// Ensure the dummy thread allocated for this host thread is closed on exit.
auto* dummy_thread = kernel.GetCurrentEmuThread();
SCOPE_EXIT({ dummy_thread->Close(); });
while (true) {
std::function<void()> task;

View File

@@ -230,7 +230,7 @@ static ResultCode MapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr
return result;
}
return page_table.Map(dst_addr, src_addr, size);
return page_table.MapMemory(dst_addr, src_addr, size);
}
static ResultCode MapMemory32(Core::System& system, u32 dst_addr, u32 src_addr, u32 size) {
@@ -249,7 +249,7 @@ static ResultCode UnmapMemory(Core::System& system, VAddr dst_addr, VAddr src_ad
return result;
}
return page_table.Unmap(dst_addr, src_addr, size);
return page_table.UnmapMemory(dst_addr, src_addr, size);
}
static ResultCode UnmapMemory32(Core::System& system, u32 dst_addr, u32 src_addr, u32 size) {

View File

@@ -192,6 +192,25 @@ std::size_t UDPClient::GetClientNumber(std::string_view host, u16 port) const {
return MAX_UDP_CLIENTS;
}
BatteryLevel UDPClient::GetBatteryLevel(Response::Battery battery) const {
switch (battery) {
case Response::Battery::Dying:
return BatteryLevel::Empty;
case Response::Battery::Low:
return BatteryLevel::Critical;
case Response::Battery::Medium:
return BatteryLevel::Low;
case Response::Battery::High:
return BatteryLevel::Medium;
case Response::Battery::Full:
case Response::Battery::Charged:
return BatteryLevel::Full;
case Response::Battery::Charging:
default:
return BatteryLevel::Charging;
}
}
void UDPClient::OnVersion([[maybe_unused]] Response::Version data) {
LOG_TRACE(Input, "Version packet received: {}", data.version);
}
@@ -299,6 +318,8 @@ void UDPClient::OnPadData(Response::PadData data, std::size_t client) {
const int button = static_cast<int>(buttons[i]);
SetButton(identifier, button, button_status);
}
SetBattery(identifier, GetBatteryLevel(data.info.battery));
}
void UDPClient::StartCommunication(std::size_t client, const std::string& host, u16 port) {

View File

@@ -15,6 +15,7 @@ namespace InputCommon::CemuhookUDP {
class Socket;
namespace Response {
enum class Battery : u8;
struct PadData;
struct PortInfo;
struct TouchPad;
@@ -137,6 +138,9 @@ private:
// Translates configuration to client number
std::size_t GetClientNumber(std::string_view host, u16 port) const;
// Translates UDP battery level to input engine battery level
BatteryLevel GetBatteryLevel(Response::Battery battery) const;
void OnVersion(Response::Version);
void OnPortInfo(Response::PortInfo);
void OnPadData(Response::PadData, std::size_t client);

View File

@@ -95,7 +95,7 @@ std::vector<std::unique_ptr<WaitTreeThread>> WaitTreeItem::MakeThreadItemList(
std::size_t row = 0;
auto add_threads = [&](const std::vector<Kernel::KThread*>& threads) {
for (std::size_t i = 0; i < threads.size(); ++i) {
if (threads[i]->GetThreadTypeForDebugging() == Kernel::ThreadType::User) {
if (threads[i]->GetThreadType() == Kernel::ThreadType::User) {
item_list.push_back(std::make_unique<WaitTreeThread>(*threads[i], system));
item_list.back()->row = row;
}
@@ -153,7 +153,7 @@ QString WaitTreeCallstack::GetText() const {
std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeCallstack::GetChildren() const {
std::vector<std::unique_ptr<WaitTreeItem>> list;
if (thread.GetThreadTypeForDebugging() != Kernel::ThreadType::User) {
if (thread.GetThreadType() != Kernel::ThreadType::User) {
return list;
}