Compare commits

..

7 Commits

Author SHA1 Message Date
lat9nq
7d9369d15e startup_checks: Use WaitForSingleObject and more cleanup 2022-07-12 14:23:50 -04:00
lat9nq
2d2a69ab5b startup_checks: Use GetEnvironmentVariableA
Solves MSVC compile error. Also drops need string use for comparison.
2022-07-10 20:24:37 -04:00
lat9nq
d57cd8bcdd startup_checks: Clean up
Adds some comments, removes unused includes, and removes last bits of
logging since this is before the logging backend starts up.
2022-07-10 17:18:31 -04:00
lat9nq
fd4e48f96e startup_checks: Implement unix side code
Wow fork() is nice, isn't it?
2022-07-10 17:01:37 -04:00
lat9nq
33abdfff9b yuzu: Simplify broken Vulkan handling 2022-07-10 16:52:00 -04:00
lat9nq
4f15d9ed6f yuzu: Check Vulkan on startup with a child 2022-07-10 14:08:20 -04:00
lat9nq
568a0ac397 yuzu: Rename check_vulkan to startup_checks 2022-07-10 12:34:34 -04:00
33 changed files with 802 additions and 767 deletions

View File

@@ -65,7 +65,7 @@ python3 .ci/scripts/windows/scan_dll.py package/*.exe package/imageformats/*.dll
# copy FFmpeg libraries
EXTERNALS_PATH="$(pwd)/build/externals"
FFMPEG_DLL_PATH="$(find "${EXTERNALS_PATH}" -maxdepth 1 -type d | grep 'ffmpeg-')/bin"
find ${FFMPEG_DLL_PATH} -type f -regex ".*\.dll" -exec cp -nv {} package/ ';'
find ${FFMPEG_DLL_PATH} -type f -regex ".*\.dll" -exec cp -v {} package/ ';'
# copy libraries from yuzu.exe path
find "$(pwd)/build/bin/" -type f -regex ".*\.dll" -exec cp -v {} package/ ';'

View File

@@ -13,7 +13,7 @@ jobs:
container: yuzuemu/build-environments:linux-transifex
if: ${{ github.repository == 'yuzu-emu/yuzu' && !github.head_ref }}
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v2
with:
submodules: recursive
fetch-depth: 0

View File

@@ -12,7 +12,7 @@ jobs:
image: yuzuemu/build-environments:linux-clang-format
options: -u 1001
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v2
with:
submodules: false
- name: 'Verify Formatting'
@@ -35,12 +35,12 @@ jobs:
image: yuzuemu/build-environments:${{ matrix.image }}
options: -u 1001
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v2
with:
submodules: recursive
fetch-depth: 0
- name: Set up cache
uses: actions/cache@v3
uses: actions/cache@v2
id: ccache-restore
with:
path: ~/.ccache
@@ -69,7 +69,7 @@ jobs:
runs-on: windows-2019
steps:
- name: Set up cache
uses: actions/cache@v3
uses: actions/cache@v2
with:
path: ~/.buildcache
key: ${{ runner.os }}-msvc-${{ github.sha }}
@@ -89,7 +89,7 @@ jobs:
echo %PATH% >> %GITHUB_PATH%
- name: Set up MSVC
uses: ilammy/msvc-dev-cmd@v1
- uses: actions/checkout@v3
- uses: actions/checkout@v2
with:
submodules: recursive
fetch-depth: 0

View File

@@ -155,10 +155,9 @@ void ARM_Interface::Run() {
break;
}
// Handle syscalls and scheduling (this may change the current thread/core)
// Handle syscalls and scheduling (this may change the current thread)
if (Has(hr, svc_call)) {
Kernel::Svc::Call(system, GetSvcNumber());
break;
}
if (Has(hr, break_loop) || !uses_wall_clock) {
break;

View File

@@ -8,7 +8,6 @@
#include "core/core.h"
#include "core/core_timing.h"
#include "core/cpu_manager.h"
#include "core/hle/kernel/k_interrupt_manager.h"
#include "core/hle/kernel/k_scheduler.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h"
@@ -50,6 +49,14 @@ void CpuManager::GuestThreadFunction() {
}
}
void CpuManager::GuestRewindFunction() {
if (is_multicore) {
MultiCoreRunGuestLoop();
} else {
SingleCoreRunGuestLoop();
}
}
void CpuManager::IdleThreadFunction() {
if (is_multicore) {
MultiCoreRunIdleThread();
@@ -62,21 +69,21 @@ void CpuManager::ShutdownThreadFunction() {
ShutdownThread();
}
void CpuManager::HandleInterrupt() {
auto& kernel = system.Kernel();
auto core_index = kernel.CurrentPhysicalCoreIndex();
Kernel::KInterruptManager::HandleInterrupt(kernel, static_cast<s32>(core_index));
}
///////////////////////////////////////////////////////////////////////////////
/// MultiCore ///
///////////////////////////////////////////////////////////////////////////////
void CpuManager::MultiCoreRunGuestThread() {
// Similar to UserModeThreadStarter in HOS
auto& kernel = system.Kernel();
kernel.CurrentScheduler()->OnThreadStart();
auto* thread = kernel.CurrentScheduler()->GetSchedulerCurrentThread();
auto& host_context = thread->GetHostContext();
host_context->SetRewindPoint([this] { GuestRewindFunction(); });
MultiCoreRunGuestLoop();
}
void CpuManager::MultiCoreRunGuestLoop() {
auto& kernel = system.Kernel();
while (true) {
auto* physical_core = &kernel.CurrentPhysicalCore();
@@ -84,26 +91,18 @@ void CpuManager::MultiCoreRunGuestThread() {
physical_core->Run();
physical_core = &kernel.CurrentPhysicalCore();
}
HandleInterrupt();
{
Kernel::KScopedDisableDispatch dd(kernel);
physical_core->ArmInterface().ClearExclusiveState();
}
}
}
void CpuManager::MultiCoreRunIdleThread() {
// Not accurate to HOS. Remove this entire method when singlecore is removed.
// See notes in KScheduler::ScheduleImpl for more information about why this
// is inaccurate.
auto& kernel = system.Kernel();
kernel.CurrentScheduler()->OnThreadStart();
while (true) {
auto& physical_core = kernel.CurrentPhysicalCore();
if (!physical_core.IsInterrupted()) {
physical_core.Idle();
}
HandleInterrupt();
Kernel::KScopedDisableDispatch dd(kernel);
kernel.CurrentPhysicalCore().Idle();
}
}
@@ -114,73 +113,80 @@ void CpuManager::MultiCoreRunIdleThread() {
void CpuManager::SingleCoreRunGuestThread() {
auto& kernel = system.Kernel();
kernel.CurrentScheduler()->OnThreadStart();
auto* thread = kernel.CurrentScheduler()->GetSchedulerCurrentThread();
auto& host_context = thread->GetHostContext();
host_context->SetRewindPoint([this] { GuestRewindFunction(); });
SingleCoreRunGuestLoop();
}
void CpuManager::SingleCoreRunGuestLoop() {
auto& kernel = system.Kernel();
while (true) {
auto* physical_core = &kernel.CurrentPhysicalCore();
if (!physical_core->IsInterrupted()) {
physical_core->Run();
physical_core = &kernel.CurrentPhysicalCore();
}
kernel.SetIsPhantomModeForSingleCore(true);
system.CoreTiming().Advance();
kernel.SetIsPhantomModeForSingleCore(false);
physical_core->ArmInterface().ClearExclusiveState();
PreemptSingleCore();
HandleInterrupt();
auto& scheduler = kernel.Scheduler(current_core);
scheduler.RescheduleCurrentCore();
}
}
void CpuManager::SingleCoreRunIdleThread() {
auto& kernel = system.Kernel();
kernel.CurrentScheduler()->OnThreadStart();
while (true) {
auto& physical_core = kernel.CurrentPhysicalCore();
PreemptSingleCore(false);
system.CoreTiming().AddTicks(1000U);
idle_count++;
HandleInterrupt();
auto& scheduler = physical_core.Scheduler();
scheduler.RescheduleCurrentCore();
}
}
void CpuManager::PreemptSingleCore(bool from_running_environment) {
auto& kernel = system.Kernel();
void CpuManager::PreemptSingleCore(bool from_running_enviroment) {
{
auto& kernel = system.Kernel();
auto& scheduler = kernel.Scheduler(current_core);
Kernel::KThread* current_thread = scheduler.GetSchedulerCurrentThread();
if (idle_count >= 4 || from_running_enviroment) {
if (!from_running_enviroment) {
system.CoreTiming().Idle();
idle_count = 0;
}
kernel.SetIsPhantomModeForSingleCore(true);
system.CoreTiming().Advance();
kernel.SetIsPhantomModeForSingleCore(false);
}
current_core.store((current_core + 1) % Core::Hardware::NUM_CPU_CORES);
system.CoreTiming().ResetTicks();
scheduler.Unload(scheduler.GetSchedulerCurrentThread());
if (idle_count >= 4 || from_running_environment) {
if (!from_running_environment) {
system.CoreTiming().Idle();
auto& next_scheduler = kernel.Scheduler(current_core);
Common::Fiber::YieldTo(current_thread->GetHostContext(), *next_scheduler.ControlContext());
}
// May have changed scheduler
{
auto& scheduler = system.Kernel().Scheduler(current_core);
scheduler.Reload(scheduler.GetSchedulerCurrentThread());
if (!scheduler.IsIdle()) {
idle_count = 0;
}
kernel.SetIsPhantomModeForSingleCore(true);
system.CoreTiming().Advance();
kernel.SetIsPhantomModeForSingleCore(false);
}
current_core.store((current_core + 1) % Core::Hardware::NUM_CPU_CORES);
system.CoreTiming().ResetTicks();
kernel.Scheduler(current_core).PreemptSingleCore();
// We've now been scheduled again, and we may have exchanged schedulers.
// Reload the scheduler in case it's different.
if (!kernel.Scheduler(current_core).IsIdle()) {
idle_count = 0;
}
}
void CpuManager::GuestActivate() {
// Similar to the HorizonKernelMain callback in HOS
auto& kernel = system.Kernel();
auto* scheduler = kernel.CurrentScheduler();
scheduler->Activate();
UNREACHABLE();
}
void CpuManager::ShutdownThread() {
auto& kernel = system.Kernel();
auto* thread = kernel.GetCurrentEmuThread();
auto core = is_multicore ? kernel.CurrentPhysicalCoreIndex() : 0;
auto* current_thread = kernel.GetCurrentEmuThread();
Common::Fiber::YieldTo(thread->GetHostContext(), *core_data[core].host_context);
Common::Fiber::YieldTo(current_thread->GetHostContext(), *core_data[core].host_context);
UNREACHABLE();
}
@@ -212,12 +218,9 @@ void CpuManager::RunThread(std::size_t core) {
system.GPU().ObtainContext();
}
auto& kernel = system.Kernel();
auto& scheduler = *kernel.CurrentScheduler();
auto* thread = scheduler.GetSchedulerCurrentThread();
Kernel::SetCurrentThread(kernel, thread);
Common::Fiber::YieldTo(data.host_context, *thread->GetHostContext());
auto* current_thread = system.Kernel().CurrentScheduler()->GetIdleThread();
Kernel::SetCurrentThread(system.Kernel(), current_thread);
Common::Fiber::YieldTo(data.host_context, *current_thread->GetHostContext());
}
} // namespace Core

View File

@@ -50,10 +50,7 @@ public:
void Initialize();
void Shutdown();
std::function<void()> GetGuestActivateFunc() {
return [this] { GuestActivate(); };
}
std::function<void()> GetGuestThreadFunc() {
std::function<void()> GetGuestThreadStartFunc() {
return [this] { GuestThreadFunction(); };
}
std::function<void()> GetIdleThreadStartFunc() {
@@ -71,19 +68,20 @@ public:
private:
void GuestThreadFunction();
void GuestRewindFunction();
void IdleThreadFunction();
void ShutdownThreadFunction();
void MultiCoreRunGuestThread();
void MultiCoreRunGuestLoop();
void MultiCoreRunIdleThread();
void SingleCoreRunGuestThread();
void SingleCoreRunGuestLoop();
void SingleCoreRunIdleThread();
static void ThreadStart(std::stop_token stop_token, CpuManager& cpu_manager, std::size_t core);
void GuestActivate();
void HandleInterrupt();
void ShutdownThread();
void RunThread(std::size_t core);

View File

@@ -41,7 +41,12 @@ void GlobalSchedulerContext::PreemptThreads() {
ASSERT(IsLocked());
for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
const u32 priority = preemption_priorities[core_id];
KScheduler::RotateScheduledQueue(kernel, core_id, priority);
kernel.Scheduler(core_id).RotateScheduledQueue(core_id, priority);
// Signal an interrupt occurred. For core 3, this is a certainty, as preemption will result
// in the rotator thread being scheduled. For cores 0-2, this is to simulate or system
// interrupts that may have occurred.
kernel.PhysicalCore(core_id).Interrupt();
}
}

View File

@@ -6,7 +6,6 @@
#include "core/hle/kernel/k_scheduler.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/physical_core.h"
namespace Kernel::KInterruptManager {
@@ -16,9 +15,6 @@ void HandleInterrupt(KernelCore& kernel, s32 core_id) {
return;
}
// Acknowledge the interrupt.
kernel.PhysicalCore(core_id).ClearInterrupt();
auto& current_thread = GetCurrentThread(kernel);
// If the user disable count is set, we may need to pin the current thread.
@@ -31,9 +27,6 @@ void HandleInterrupt(KernelCore& kernel, s32 core_id) {
// Set the interrupt flag for the thread.
GetCurrentThread(kernel).SetInterruptFlag();
}
// Request interrupt scheduling.
kernel.CurrentScheduler()->RequestScheduleOnInterrupt();
}
} // namespace Kernel::KInterruptManager

View File

@@ -27,185 +27,69 @@ static void IncrementScheduledCount(Kernel::KThread* thread) {
}
}
KScheduler::KScheduler(KernelCore& kernel_) : kernel{kernel_} {
m_switch_fiber = std::make_shared<Common::Fiber>([this] {
while (true) {
ScheduleImplFiber();
void KScheduler::RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedule) {
auto scheduler = kernel.CurrentScheduler();
u32 current_core{0xF};
bool must_context_switch{};
if (scheduler) {
current_core = scheduler->core_id;
// TODO(bunnei): Should be set to true when we deprecate single core
must_context_switch = !kernel.IsPhantomModeForSingleCore();
}
while (cores_pending_reschedule != 0) {
const auto core = static_cast<u32>(std::countr_zero(cores_pending_reschedule));
ASSERT(core < Core::Hardware::NUM_CPU_CORES);
if (!must_context_switch || core != current_core) {
auto& phys_core = kernel.PhysicalCore(core);
phys_core.Interrupt();
}
});
m_state.needs_scheduling = true;
}
KScheduler::~KScheduler() = default;
void KScheduler::SetInterruptTaskRunnable() {
m_state.interrupt_task_runnable = true;
m_state.needs_scheduling = true;
}
void KScheduler::RequestScheduleOnInterrupt() {
m_state.needs_scheduling = true;
if (CanSchedule(kernel)) {
ScheduleOnInterrupt();
}
}
void KScheduler::DisableScheduling(KernelCore& kernel) {
ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() >= 0);
GetCurrentThread(kernel).DisableDispatch();
}
void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling) {
ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() >= 1);
auto* scheduler{kernel.CurrentScheduler()};
if (!scheduler || kernel.IsPhantomModeForSingleCore()) {
KScheduler::RescheduleCores(kernel, cores_needing_scheduling);
KScheduler::RescheduleCurrentHLEThread(kernel);
return;
cores_pending_reschedule &= ~(1ULL << core);
}
scheduler->RescheduleOtherCores(cores_needing_scheduling);
if (GetCurrentThread(kernel).GetDisableDispatchCount() > 1) {
GetCurrentThread(kernel).EnableDispatch();
} else {
scheduler->RescheduleCurrentCore();
for (std::size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; ++core_id) {
if (kernel.PhysicalCore(core_id).IsInterrupted()) {
KInterruptManager::HandleInterrupt(kernel, static_cast<s32>(core_id));
}
}
}
void KScheduler::RescheduleCurrentHLEThread(KernelCore& kernel) {
// HACK: we cannot schedule from this thread, it is not a core thread
ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() == 1);
// Special case to ensure dummy threads that are waiting block
GetCurrentThread(kernel).IfDummyThreadTryWait();
ASSERT(GetCurrentThread(kernel).GetState() != ThreadState::Waiting);
GetCurrentThread(kernel).EnableDispatch();
}
u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) {
if (IsSchedulerUpdateNeeded(kernel)) {
return UpdateHighestPriorityThreadsImpl(kernel);
} else {
return 0;
if (must_context_switch) {
auto core_scheduler = kernel.CurrentScheduler();
kernel.ExitSVCProfile();
core_scheduler->RescheduleCurrentCore();
kernel.EnterSVCProfile();
}
}
void KScheduler::Schedule() {
ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() == 1);
ASSERT(m_core_id == GetCurrentCoreId(kernel));
ScheduleImpl();
}
void KScheduler::ScheduleOnInterrupt() {
GetCurrentThread(kernel).DisableDispatch();
Schedule();
GetCurrentThread(kernel).EnableDispatch();
}
void KScheduler::PreemptSingleCore() {
GetCurrentThread(kernel).DisableDispatch();
auto* thread = GetCurrentThreadPointer(kernel);
auto& previous_scheduler = kernel.Scheduler(thread->GetCurrentCore());
previous_scheduler.Unload(thread);
Common::Fiber::YieldTo(thread->GetHostContext(), *m_switch_fiber);
GetCurrentThread(kernel).EnableDispatch();
}
void KScheduler::RescheduleCurrentCore() {
ASSERT(!kernel.IsPhantomModeForSingleCore());
ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() == 1);
GetCurrentThread(kernel).EnableDispatch();
if (m_state.needs_scheduling.load()) {
// Disable interrupts, and then check again if rescheduling is needed.
// KScopedInterruptDisable intr_disable;
kernel.CurrentScheduler()->RescheduleCurrentCoreImpl();
}
}
void KScheduler::RescheduleCurrentCoreImpl() {
// Check that scheduling is needed.
if (m_state.needs_scheduling.load()) [[likely]] {
GetCurrentThread(kernel).DisableDispatch();
Schedule();
GetCurrentThread(kernel).EnableDispatch();
}
}
void KScheduler::Initialize(KThread* main_thread, KThread* idle_thread, s32 core_id) {
// Set core ID/idle thread/interrupt task manager.
m_core_id = core_id;
m_idle_thread = idle_thread;
// m_state.idle_thread_stack = m_idle_thread->GetStackTop();
// m_state.interrupt_task_manager = &kernel.GetInterruptTaskManager();
// Insert the main thread into the priority queue.
// {
// KScopedSchedulerLock lk{kernel};
// GetPriorityQueue(kernel).PushBack(GetCurrentThreadPointer(kernel));
// SetSchedulerUpdateNeeded(kernel);
// }
// Bind interrupt handler.
// kernel.GetInterruptManager().BindHandler(
// GetSchedulerInterruptHandler(kernel), KInterruptName::Scheduler, m_core_id,
// KInterruptController::PriorityLevel::Scheduler, false, false);
// Set the current thread.
m_current_thread = main_thread;
}
void KScheduler::Activate() {
ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() == 1);
// m_state.should_count_idle = KTargetSystem::IsDebugMode();
m_is_active = true;
RescheduleCurrentCore();
}
void KScheduler::OnThreadStart() {
GetCurrentThread(kernel).EnableDispatch();
}
u64 KScheduler::UpdateHighestPriorityThread(KThread* highest_thread) {
if (KThread* prev_highest_thread = m_state.highest_priority_thread;
prev_highest_thread != highest_thread) [[likely]] {
if (prev_highest_thread != nullptr) [[likely]] {
KScopedSpinLock lk{guard};
if (KThread* prev_highest_thread = state.highest_priority_thread;
prev_highest_thread != highest_thread) {
if (prev_highest_thread != nullptr) {
IncrementScheduledCount(prev_highest_thread);
prev_highest_thread->SetLastScheduledTick(kernel.System().CoreTiming().GetCPUTicks());
prev_highest_thread->SetLastScheduledTick(system.CoreTiming().GetCPUTicks());
}
if (m_state.should_count_idle) {
if (highest_thread != nullptr) [[likely]] {
if (state.should_count_idle) {
if (highest_thread != nullptr) {
if (KProcess* process = highest_thread->GetOwnerProcess(); process != nullptr) {
process->SetRunningThread(m_core_id, highest_thread, m_state.idle_count);
process->SetRunningThread(core_id, highest_thread, state.idle_count);
}
} else {
m_state.idle_count++;
state.idle_count++;
}
}
m_state.highest_priority_thread = highest_thread;
m_state.needs_scheduling = true;
return (1ULL << m_core_id);
state.highest_priority_thread = highest_thread;
state.needs_scheduling.store(true);
return (1ULL << core_id);
} else {
return 0;
}
}
u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) {
ASSERT(IsSchedulerLockedByCurrentThread(kernel));
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
// Clear that we need to update.
ClearSchedulerUpdateNeeded(kernel);
@@ -214,20 +98,18 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) {
KThread* top_threads[Core::Hardware::NUM_CPU_CORES];
auto& priority_queue = GetPriorityQueue(kernel);
// We want to go over all cores, finding the highest priority thread and determining if
// scheduling is needed for that core.
/// We want to go over all cores, finding the highest priority thread and determining if
/// scheduling is needed for that core.
for (size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
KThread* top_thread = priority_queue.GetScheduledFront(static_cast<s32>(core_id));
if (top_thread != nullptr) {
// We need to check if the thread's process has a pinned thread.
if (KProcess* parent = top_thread->GetOwnerProcess()) {
// Check that there's a pinned thread other than the current top thread.
if (KThread* pinned = parent->GetPinnedThread(static_cast<s32>(core_id));
pinned != nullptr && pinned != top_thread) {
// We need to prefer threads with kernel waiters to the pinned thread.
if (top_thread->GetNumKernelWaiters() ==
0 /* && top_thread != parent->GetExceptionThread() */) {
// If the pinned thread is runnable, use it.
// If the thread has no waiters, we need to check if the process has a thread pinned.
if (top_thread->GetNumKernelWaiters() == 0) {
if (KProcess* parent = top_thread->GetOwnerProcess(); parent != nullptr) {
if (KThread* pinned = parent->GetPinnedThread(static_cast<s32>(core_id));
pinned != nullptr && pinned != top_thread) {
// We prefer our parent's pinned thread if possible. However, we also don't
// want to schedule un-runnable threads.
if (pinned->GetRawState() == ThreadState::Runnable) {
top_thread = pinned;
} else {
@@ -247,8 +129,7 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) {
// Idle cores are bad. We're going to try to migrate threads to each idle core in turn.
while (idle_cores != 0) {
const s32 core_id = static_cast<s32>(std::countr_zero(idle_cores));
const auto core_id = static_cast<u32>(std::countr_zero(idle_cores));
if (KThread* suggested = priority_queue.GetSuggestedFront(core_id); suggested != nullptr) {
s32 migration_candidates[Core::Hardware::NUM_CPU_CORES];
size_t num_candidates = 0;
@@ -269,6 +150,7 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) {
// The suggested thread isn't bound to its core, so we can migrate it!
suggested->SetActiveCore(core_id);
priority_queue.ChangeCore(suggested_core, suggested);
top_threads[core_id] = suggested;
cores_needing_scheduling |=
kernel.Scheduler(core_id).UpdateHighestPriorityThread(top_threads[core_id]);
@@ -301,6 +183,7 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) {
// Perform the migration.
suggested->SetActiveCore(core_id);
priority_queue.ChangeCore(candidate_core, suggested);
top_threads[core_id] = suggested;
cores_needing_scheduling |=
kernel.Scheduler(core_id).UpdateHighestPriorityThread(
@@ -317,210 +200,24 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) {
return cores_needing_scheduling;
}
void KScheduler::SwitchThread(KThread* next_thread) {
KProcess* const cur_process = kernel.CurrentProcess();
KThread* const cur_thread = GetCurrentThreadPointer(kernel);
// We never want to schedule a null thread, so use the idle thread if we don't have a next.
if (next_thread == nullptr) {
next_thread = m_idle_thread;
}
if (next_thread->GetCurrentCore() != m_core_id) {
next_thread->SetCurrentCore(m_core_id);
}
// If we're not actually switching thread, there's nothing to do.
if (next_thread == cur_thread) {
return;
}
// Next thread is now known not to be nullptr, and must not be dispatchable.
ASSERT(next_thread->GetDisableDispatchCount() == 1);
ASSERT(!next_thread->IsDummyThread());
// Update the CPU time tracking variables.
const s64 prev_tick = m_last_context_switch_time;
const s64 cur_tick = kernel.System().CoreTiming().GetCPUTicks();
const s64 tick_diff = cur_tick - prev_tick;
cur_thread->AddCpuTime(m_core_id, tick_diff);
if (cur_process != nullptr) {
cur_process->UpdateCPUTimeTicks(tick_diff);
}
m_last_context_switch_time = cur_tick;
// Update our previous thread.
if (cur_process != nullptr) {
if (!cur_thread->IsTerminationRequested() && cur_thread->GetActiveCore() == m_core_id)
[[likely]] {
m_state.prev_thread = cur_thread;
} else {
m_state.prev_thread = nullptr;
}
}
// Switch the current process, if we're switching processes.
// if (KProcess *next_process = next_thread->GetOwnerProcess(); next_process != cur_process) {
// KProcess::Switch(cur_process, next_process);
// }
// Set the new thread.
SetCurrentThread(kernel, next_thread);
m_current_thread = next_thread;
// Set the new Thread Local region.
// cpu::SwitchThreadLocalRegion(GetInteger(next_thread->GetThreadLocalRegionAddress()));
}
void KScheduler::ScheduleImpl() {
// First, clear the needs scheduling bool.
m_state.needs_scheduling.store(false, std::memory_order_seq_cst);
// Load the appropriate thread pointers for scheduling.
KThread* const cur_thread{GetCurrentThreadPointer(kernel)};
KThread* highest_priority_thread{m_state.highest_priority_thread};
// Check whether there are runnable interrupt tasks.
if (m_state.interrupt_task_runnable) {
// The interrupt task is runnable.
// We want to switch to the interrupt task/idle thread.
highest_priority_thread = nullptr;
}
// If there aren't, we want to check if the highest priority thread is the same as the current
// thread.
if (highest_priority_thread == cur_thread) {
// If they're the same, then we can just return.
return;
}
// The highest priority thread is not the same as the current thread.
// Jump to the switcher and continue executing from there.
m_switch_cur_thread = cur_thread;
m_switch_highest_priority_thread = highest_priority_thread;
m_switch_from_schedule = true;
Common::Fiber::YieldTo(cur_thread->host_context, *m_switch_fiber);
// Returning from ScheduleImpl occurs after this thread has been scheduled again.
}
void KScheduler::ScheduleImplFiber() {
KThread* const cur_thread{m_switch_cur_thread};
KThread* highest_priority_thread{m_switch_highest_priority_thread};
// If we're not coming from scheduling (i.e., we came from SC preemption),
// we should restart the scheduling loop directly. Not accurate to HOS.
if (!m_switch_from_schedule) {
goto retry;
}
// Mark that we are not coming from scheduling anymore.
m_switch_from_schedule = false;
// Save the original thread context.
Unload(cur_thread);
// The current thread's context has been entirely taken care of.
// Now we want to loop until we successfully switch the thread context.
while (true) {
// We're starting to try to do the context switch.
// Check if the highest priority thread is null.
if (!highest_priority_thread) {
// The next thread is nullptr!
// Switch to the idle thread. Note: HOS treats idling as a special case for
// performance. This is not *required* for yuzu's purposes, and for singlecore
// compatibility, we can just move the logic that would go here into the execution
// of the idle thread. If we ever remove singlecore, we should implement this
// accurately to HOS.
highest_priority_thread = m_idle_thread;
}
// We want to try to lock the highest priority thread's context.
// Try to take it.
while (!highest_priority_thread->context_guard.try_lock()) {
// The highest priority thread's context is already locked.
// Check if we need scheduling. If we don't, we can retry directly.
if (m_state.needs_scheduling.load(std::memory_order_seq_cst)) {
// If we do, another core is interfering, and we must start again.
goto retry;
}
}
// It's time to switch the thread.
// Switch to the highest priority thread.
SwitchThread(highest_priority_thread);
// Check if we need scheduling. If we do, then we can't complete the switch and should
// retry.
if (m_state.needs_scheduling.load(std::memory_order_seq_cst)) {
// Our switch failed.
// We should unlock the thread context, and then retry.
highest_priority_thread->context_guard.unlock();
goto retry;
} else {
break;
}
retry:
// We failed to successfully do the context switch, and need to retry.
// Clear needs_scheduling.
m_state.needs_scheduling.store(false, std::memory_order_seq_cst);
// Refresh the highest priority thread.
highest_priority_thread = m_state.highest_priority_thread;
}
// Reload the guest thread context.
Reload(highest_priority_thread);
// Reload the host thread.
Common::Fiber::YieldTo(m_switch_fiber, *highest_priority_thread->host_context);
}
void KScheduler::Unload(KThread* thread) {
auto& cpu_core = kernel.System().ArmInterface(m_core_id);
cpu_core.SaveContext(thread->GetContext32());
cpu_core.SaveContext(thread->GetContext64());
// Save the TPIDR_EL0 system register in case it was modified.
thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0());
cpu_core.ClearExclusiveState();
// Check if the thread is terminated by checking the DPC flags.
if ((thread->GetStackParameters().dpc_flags & static_cast<u32>(DpcFlag::Terminated)) == 0) {
// The thread isn't terminated, so we want to unlock it.
thread->context_guard.unlock();
}
}
void KScheduler::Reload(KThread* thread) {
auto& cpu_core = kernel.System().ArmInterface(m_core_id);
cpu_core.LoadContext(thread->GetContext32());
cpu_core.LoadContext(thread->GetContext64());
cpu_core.SetTlsAddress(thread->GetTLSAddress());
cpu_core.SetTPIDR_EL0(thread->GetTPIDR_EL0());
cpu_core.LoadWatchpointArray(thread->GetOwnerProcess()->GetWatchpoints());
cpu_core.ClearExclusiveState();
}
void KScheduler::ClearPreviousThread(KernelCore& kernel, KThread* thread) {
ASSERT(IsSchedulerLockedByCurrentThread(kernel));
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
for (size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; ++i) {
// Get an atomic reference to the core scheduler's previous thread.
auto& prev_thread{kernel.Scheduler(i).m_state.prev_thread};
std::atomic_ref<KThread*> prev_thread(kernel.Scheduler(static_cast<s32>(i)).prev_thread);
static_assert(std::atomic_ref<KThread*>::is_always_lock_free);
// Atomically clear the previous thread if it's our target.
KThread* compare = thread;
prev_thread.compare_exchange_strong(compare, nullptr, std::memory_order_seq_cst);
prev_thread.compare_exchange_strong(compare, nullptr);
}
}
void KScheduler::OnThreadStateChanged(KernelCore& kernel, KThread* thread, ThreadState old_state) {
ASSERT(IsSchedulerLockedByCurrentThread(kernel));
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
// Check if the state has changed, because if it hasn't there's nothing to do.
const ThreadState cur_state = thread->GetRawState();
const auto cur_state = thread->GetRawState();
if (cur_state == old_state) {
return;
}
@@ -540,12 +237,12 @@ void KScheduler::OnThreadStateChanged(KernelCore& kernel, KThread* thread, Threa
}
void KScheduler::OnThreadPriorityChanged(KernelCore& kernel, KThread* thread, s32 old_priority) {
ASSERT(IsSchedulerLockedByCurrentThread(kernel));
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
// If the thread is runnable, we want to change its priority in the queue.
if (thread->GetRawState() == ThreadState::Runnable) {
GetPriorityQueue(kernel).ChangePriority(old_priority,
thread == GetCurrentThreadPointer(kernel), thread);
thread == kernel.GetCurrentEmuThread(), thread);
IncrementScheduledCount(thread);
SetSchedulerUpdateNeeded(kernel);
}
@@ -553,7 +250,7 @@ void KScheduler::OnThreadPriorityChanged(KernelCore& kernel, KThread* thread, s3
void KScheduler::OnThreadAffinityMaskChanged(KernelCore& kernel, KThread* thread,
const KAffinityMask& old_affinity, s32 old_core) {
ASSERT(IsSchedulerLockedByCurrentThread(kernel));
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
// If the thread is runnable, we want to change its affinity in the queue.
if (thread->GetRawState() == ThreadState::Runnable) {
@@ -563,14 +260,15 @@ void KScheduler::OnThreadAffinityMaskChanged(KernelCore& kernel, KThread* thread
}
}
void KScheduler::RotateScheduledQueue(KernelCore& kernel, s32 core_id, s32 priority) {
ASSERT(IsSchedulerLockedByCurrentThread(kernel));
void KScheduler::RotateScheduledQueue(s32 cpu_core_id, s32 priority) {
ASSERT(system.GlobalSchedulerContext().IsLocked());
// Get a reference to the priority queue.
auto& kernel = system.Kernel();
auto& priority_queue = GetPriorityQueue(kernel);
// Rotate the front of the queue to the end.
KThread* top_thread = priority_queue.GetScheduledFront(core_id, priority);
KThread* top_thread = priority_queue.GetScheduledFront(cpu_core_id, priority);
KThread* next_thread = nullptr;
if (top_thread != nullptr) {
next_thread = priority_queue.MoveToScheduledBack(top_thread);
@@ -582,7 +280,7 @@ void KScheduler::RotateScheduledQueue(KernelCore& kernel, s32 core_id, s32 prior
// While we have a suggested thread, try to migrate it!
{
KThread* suggested = priority_queue.GetSuggestedFront(core_id, priority);
KThread* suggested = priority_queue.GetSuggestedFront(cpu_core_id, priority);
while (suggested != nullptr) {
// Check if the suggested thread is the top thread on its core.
const s32 suggested_core = suggested->GetActiveCore();
@@ -603,7 +301,7 @@ void KScheduler::RotateScheduledQueue(KernelCore& kernel, s32 core_id, s32 prior
// to the front of the queue.
if (top_on_suggested_core == nullptr ||
top_on_suggested_core->GetPriority() >= HighestCoreMigrationAllowedPriority) {
suggested->SetActiveCore(core_id);
suggested->SetActiveCore(cpu_core_id);
priority_queue.ChangeCore(suggested_core, suggested, true);
IncrementScheduledCount(suggested);
break;
@@ -611,21 +309,22 @@ void KScheduler::RotateScheduledQueue(KernelCore& kernel, s32 core_id, s32 prior
}
// Get the next suggestion.
suggested = priority_queue.GetSamePriorityNext(core_id, suggested);
suggested = priority_queue.GetSamePriorityNext(cpu_core_id, suggested);
}
}
// Now that we might have migrated a thread with the same priority, check if we can do better.
{
KThread* best_thread = priority_queue.GetScheduledFront(core_id);
KThread* best_thread = priority_queue.GetScheduledFront(cpu_core_id);
if (best_thread == GetCurrentThreadPointer(kernel)) {
best_thread = priority_queue.GetScheduledNext(core_id, best_thread);
best_thread = priority_queue.GetScheduledNext(cpu_core_id, best_thread);
}
// If the best thread we can choose has a priority the same or worse than ours, try to
// migrate a higher priority thread.
if (best_thread != nullptr && best_thread->GetPriority() >= priority) {
KThread* suggested = priority_queue.GetSuggestedFront(core_id);
KThread* suggested = priority_queue.GetSuggestedFront(cpu_core_id);
while (suggested != nullptr) {
// If the suggestion's priority is the same as ours, don't bother.
if (suggested->GetPriority() >= best_thread->GetPriority()) {
@@ -644,7 +343,7 @@ void KScheduler::RotateScheduledQueue(KernelCore& kernel, s32 core_id, s32 prior
if (top_on_suggested_core == nullptr ||
top_on_suggested_core->GetPriority() >=
HighestCoreMigrationAllowedPriority) {
suggested->SetActiveCore(core_id);
suggested->SetActiveCore(cpu_core_id);
priority_queue.ChangeCore(suggested_core, suggested, true);
IncrementScheduledCount(suggested);
break;
@@ -652,7 +351,7 @@ void KScheduler::RotateScheduledQueue(KernelCore& kernel, s32 core_id, s32 prior
}
// Get the next suggestion.
suggested = priority_queue.GetSuggestedNext(core_id, suggested);
suggested = priority_queue.GetSuggestedNext(cpu_core_id, suggested);
}
}
}
@@ -661,6 +360,64 @@ void KScheduler::RotateScheduledQueue(KernelCore& kernel, s32 core_id, s32 prior
SetSchedulerUpdateNeeded(kernel);
}
bool KScheduler::CanSchedule(KernelCore& kernel) {
return kernel.GetCurrentEmuThread()->GetDisableDispatchCount() <= 1;
}
bool KScheduler::IsSchedulerUpdateNeeded(const KernelCore& kernel) {
return kernel.GlobalSchedulerContext().scheduler_update_needed.load(std::memory_order_acquire);
}
void KScheduler::SetSchedulerUpdateNeeded(KernelCore& kernel) {
kernel.GlobalSchedulerContext().scheduler_update_needed.store(true, std::memory_order_release);
}
void KScheduler::ClearSchedulerUpdateNeeded(KernelCore& kernel) {
kernel.GlobalSchedulerContext().scheduler_update_needed.store(false, std::memory_order_release);
}
void KScheduler::DisableScheduling(KernelCore& kernel) {
// If we are shutting down the kernel, none of this is relevant anymore.
if (kernel.IsShuttingDown()) {
return;
}
ASSERT(GetCurrentThreadPointer(kernel)->GetDisableDispatchCount() >= 0);
GetCurrentThreadPointer(kernel)->DisableDispatch();
}
void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling) {
// If we are shutting down the kernel, none of this is relevant anymore.
if (kernel.IsShuttingDown()) {
return;
}
auto* current_thread = GetCurrentThreadPointer(kernel);
ASSERT(current_thread->GetDisableDispatchCount() >= 1);
if (current_thread->GetDisableDispatchCount() > 1) {
current_thread->EnableDispatch();
} else {
RescheduleCores(kernel, cores_needing_scheduling);
}
// Special case to ensure dummy threads that are waiting block.
current_thread->IfDummyThreadTryWait();
}
u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) {
if (IsSchedulerUpdateNeeded(kernel)) {
return UpdateHighestPriorityThreadsImpl(kernel);
} else {
return 0;
}
}
KSchedulerPriorityQueue& KScheduler::GetPriorityQueue(KernelCore& kernel) {
return kernel.GlobalSchedulerContext().priority_queue;
}
void KScheduler::YieldWithoutCoreMigration(KernelCore& kernel) {
// Validate preconditions.
ASSERT(CanSchedule(kernel));
@@ -680,7 +437,7 @@ void KScheduler::YieldWithoutCoreMigration(KernelCore& kernel) {
// Perform the yield.
{
KScopedSchedulerLock sl{kernel};
KScopedSchedulerLock lock(kernel);
const auto cur_state = cur_thread.GetRawState();
if (cur_state == ThreadState::Runnable) {
@@ -719,7 +476,7 @@ void KScheduler::YieldWithCoreMigration(KernelCore& kernel) {
// Perform the yield.
{
KScopedSchedulerLock sl{kernel};
KScopedSchedulerLock lock(kernel);
const auto cur_state = cur_thread.GetRawState();
if (cur_state == ThreadState::Runnable) {
@@ -739,7 +496,7 @@ void KScheduler::YieldWithCoreMigration(KernelCore& kernel) {
if (KThread* running_on_suggested_core =
(suggested_core >= 0)
? kernel.Scheduler(suggested_core).m_state.highest_priority_thread
? kernel.Scheduler(suggested_core).state.highest_priority_thread
: nullptr;
running_on_suggested_core != suggested) {
// If the current thread's priority is higher than our suggestion's we prefer
@@ -807,7 +564,7 @@ void KScheduler::YieldToAnyThread(KernelCore& kernel) {
// Perform the yield.
{
KScopedSchedulerLock sl{kernel};
KScopedSchedulerLock lock(kernel);
const auto cur_state = cur_thread.GetRawState();
if (cur_state == ThreadState::Runnable) {
@@ -864,19 +621,223 @@ void KScheduler::YieldToAnyThread(KernelCore& kernel) {
}
}
void KScheduler::RescheduleOtherCores(u64 cores_needing_scheduling) {
if (const u64 core_mask = cores_needing_scheduling & ~(1ULL << m_core_id); core_mask != 0) {
RescheduleCores(kernel, core_mask);
KScheduler::KScheduler(Core::System& system_, s32 core_id_) : system{system_}, core_id{core_id_} {
switch_fiber = std::make_shared<Common::Fiber>([this] { SwitchToCurrent(); });
state.needs_scheduling.store(true);
state.interrupt_task_thread_runnable = false;
state.should_count_idle = false;
state.idle_count = 0;
state.idle_thread_stack = nullptr;
state.highest_priority_thread = nullptr;
}
void KScheduler::Finalize() {
if (idle_thread) {
idle_thread->Close();
idle_thread = nullptr;
}
}
void KScheduler::RescheduleCores(KernelCore& kernel, u64 core_mask) {
// Send IPI
for (size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
if (core_mask & (1ULL << i)) {
kernel.PhysicalCore(i).Interrupt();
}
KScheduler::~KScheduler() {
ASSERT(!idle_thread);
}
KThread* KScheduler::GetSchedulerCurrentThread() const {
if (auto result = current_thread.load(); result) {
return result;
}
return idle_thread;
}
u64 KScheduler::GetLastContextSwitchTicks() const {
return last_context_switch_time;
}
void KScheduler::RescheduleCurrentCore() {
ASSERT(GetCurrentThread(system.Kernel()).GetDisableDispatchCount() == 1);
auto& phys_core = system.Kernel().PhysicalCore(core_id);
if (phys_core.IsInterrupted()) {
phys_core.ClearInterrupt();
}
guard.Lock();
if (state.needs_scheduling.load()) {
Schedule();
} else {
GetCurrentThread(system.Kernel()).EnableDispatch();
guard.Unlock();
}
}
void KScheduler::OnThreadStart() {
SwitchContextStep2();
}
void KScheduler::Unload(KThread* thread) {
ASSERT(thread);
LOG_TRACE(Kernel, "core {}, unload thread {}", core_id, thread ? thread->GetName() : "nullptr");
if (thread->IsCallingSvc()) {
thread->ClearIsCallingSvc();
}
auto& physical_core = system.Kernel().PhysicalCore(core_id);
if (!physical_core.IsInitialized()) {
return;
}
Core::ARM_Interface& cpu_core = physical_core.ArmInterface();
cpu_core.SaveContext(thread->GetContext32());
cpu_core.SaveContext(thread->GetContext64());
// Save the TPIDR_EL0 system register in case it was modified.
thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0());
cpu_core.ClearExclusiveState();
if (!thread->IsTerminationRequested() && thread->GetActiveCore() == core_id) {
prev_thread = thread;
} else {
prev_thread = nullptr;
}
thread->context_guard.unlock();
}
void KScheduler::Reload(KThread* thread) {
LOG_TRACE(Kernel, "core {}, reload thread {}", core_id, thread->GetName());
Core::ARM_Interface& cpu_core = system.ArmInterface(core_id);
cpu_core.LoadContext(thread->GetContext32());
cpu_core.LoadContext(thread->GetContext64());
cpu_core.LoadWatchpointArray(thread->GetOwnerProcess()->GetWatchpoints());
cpu_core.SetTlsAddress(thread->GetTLSAddress());
cpu_core.SetTPIDR_EL0(thread->GetTPIDR_EL0());
cpu_core.ClearExclusiveState();
}
void KScheduler::SwitchContextStep2() {
// Load context of new thread
Reload(GetCurrentThreadPointer(system.Kernel()));
RescheduleCurrentCore();
}
void KScheduler::Schedule() {
ASSERT(GetCurrentThread(system.Kernel()).GetDisableDispatchCount() == 1);
this->ScheduleImpl();
}
void KScheduler::ScheduleImpl() {
KThread* previous_thread = GetCurrentThreadPointer(system.Kernel());
KThread* next_thread = state.highest_priority_thread;
state.needs_scheduling.store(false);
// We never want to schedule a null thread, so use the idle thread if we don't have a next.
if (next_thread == nullptr) {
next_thread = idle_thread;
}
if (next_thread->GetCurrentCore() != core_id) {
next_thread->SetCurrentCore(core_id);
}
// We never want to schedule a dummy thread, as these are only used by host threads for locking.
if (next_thread->GetThreadType() == ThreadType::Dummy) {
ASSERT_MSG(false, "Dummy threads should never be scheduled!");
next_thread = idle_thread;
}
// If we're not actually switching thread, there's nothing to do.
if (next_thread == current_thread.load()) {
previous_thread->EnableDispatch();
guard.Unlock();
return;
}
// Update the CPU time tracking variables.
KProcess* const previous_process = system.Kernel().CurrentProcess();
UpdateLastContextSwitchTime(previous_thread, previous_process);
// Save context for previous thread
Unload(previous_thread);
std::shared_ptr<Common::Fiber>* old_context;
old_context = &previous_thread->GetHostContext();
// Set the new thread.
SetCurrentThread(system.Kernel(), next_thread);
current_thread.store(next_thread);
guard.Unlock();
Common::Fiber::YieldTo(*old_context, *switch_fiber);
/// When a thread wakes up, the scheduler may have changed to other in another core.
auto& next_scheduler = *system.Kernel().CurrentScheduler();
next_scheduler.SwitchContextStep2();
}
void KScheduler::SwitchToCurrent() {
while (true) {
{
KScopedSpinLock lk{guard};
current_thread.store(state.highest_priority_thread);
state.needs_scheduling.store(false);
}
const auto is_switch_pending = [this] {
KScopedSpinLock lk{guard};
return state.needs_scheduling.load();
};
do {
auto next_thread = current_thread.load();
if (next_thread != nullptr) {
const auto locked = next_thread->context_guard.try_lock();
if (state.needs_scheduling.load()) {
next_thread->context_guard.unlock();
break;
}
if (next_thread->GetActiveCore() != core_id) {
next_thread->context_guard.unlock();
break;
}
if (!locked) {
continue;
}
}
auto thread = next_thread ? next_thread : idle_thread;
SetCurrentThread(system.Kernel(), thread);
Common::Fiber::YieldTo(switch_fiber, *thread->GetHostContext());
} while (!is_switch_pending());
}
}
void KScheduler::UpdateLastContextSwitchTime(KThread* thread, KProcess* process) {
const u64 prev_switch_ticks = last_context_switch_time;
const u64 most_recent_switch_ticks = system.CoreTiming().GetCPUTicks();
const u64 update_ticks = most_recent_switch_ticks - prev_switch_ticks;
if (thread != nullptr) {
thread->AddCpuTime(core_id, update_ticks);
}
if (process != nullptr) {
process->UpdateCPUTimeTicks(update_ticks);
}
last_context_switch_time = most_recent_switch_ticks;
}
void KScheduler::Initialize() {
idle_thread = KThread::Create(system.Kernel());
ASSERT(KThread::InitializeIdleThread(system, idle_thread, core_id).IsSuccess());
idle_thread->SetName(fmt::format("IdleThread:{}", core_id));
idle_thread->EnableDispatch();
}
KScopedSchedulerLock::KScopedSchedulerLock(KernelCore& kernel)
: KScopedLock(kernel.GlobalSchedulerContext().SchedulerLock()) {}
KScopedSchedulerLock::~KScopedSchedulerLock() = default;
} // namespace Kernel

View File

@@ -11,7 +11,6 @@
#include "core/hle/kernel/k_scheduler_lock.h"
#include "core/hle/kernel/k_scoped_lock.h"
#include "core/hle/kernel/k_spin_lock.h"
#include "core/hle/kernel/k_thread.h"
namespace Common {
class Fiber;
@@ -24,150 +23,184 @@ class System;
namespace Kernel {
class KernelCore;
class KInterruptTaskManager;
class KProcess;
class SchedulerLock;
class KThread;
class KScopedDisableDispatch;
class KScopedSchedulerLock;
class KScopedSchedulerLockAndSleep;
class KScheduler final {
public:
YUZU_NON_COPYABLE(KScheduler);
YUZU_NON_MOVEABLE(KScheduler);
using LockType = KAbstractSchedulerLock<KScheduler>;
explicit KScheduler(KernelCore& kernel);
explicit KScheduler(Core::System& system_, s32 core_id_);
~KScheduler();
void Initialize(KThread* main_thread, KThread* idle_thread, s32 core_id);
void Activate();
void OnThreadStart();
void Finalize();
/// Reschedules to the next available thread (call after current thread is suspended)
void RescheduleCurrentCore();
/// Reschedules cores pending reschedule, to be called on EnableScheduling.
static void RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedule);
/// The next two are for SingleCore Only.
/// Unload current thread before preempting core.
void Unload(KThread* thread);
/// Reload current thread after core preemption.
void Reload(KThread* thread);
void SetInterruptTaskRunnable();
void RequestScheduleOnInterrupt();
void PreemptSingleCore();
/// Gets the current running thread
[[nodiscard]] KThread* GetSchedulerCurrentThread() const;
u64 GetIdleCount() {
return m_state.idle_count;
/// Gets the idle thread
[[nodiscard]] KThread* GetIdleThread() const {
return idle_thread;
}
KThread* GetIdleThread() const {
return m_idle_thread;
/// Returns true if the scheduler is idle
[[nodiscard]] bool IsIdle() const {
return GetSchedulerCurrentThread() == idle_thread;
}
bool IsIdle() const {
return m_current_thread.load() == m_idle_thread;
/// Gets the timestamp for the last context switch in ticks.
[[nodiscard]] u64 GetLastContextSwitchTicks() const;
[[nodiscard]] bool ContextSwitchPending() const {
return state.needs_scheduling.load(std::memory_order_relaxed);
}
KThread* GetPreviousThread() const {
return m_state.prev_thread;
void Initialize();
void OnThreadStart();
[[nodiscard]] std::shared_ptr<Common::Fiber>& ControlContext() {
return switch_fiber;
}
KThread* GetSchedulerCurrentThread() const {
return m_current_thread.load();
[[nodiscard]] const std::shared_ptr<Common::Fiber>& ControlContext() const {
return switch_fiber;
}
s64 GetLastContextSwitchTime() const {
return m_last_context_switch_time;
}
[[nodiscard]] u64 UpdateHighestPriorityThread(KThread* highest_thread);
// Static public API.
static bool CanSchedule(KernelCore& kernel) {
return GetCurrentThread(kernel).GetDisableDispatchCount() == 0;
}
static bool IsSchedulerLockedByCurrentThread(KernelCore& kernel) {
return kernel.GlobalSchedulerContext().scheduler_lock.IsLockedByCurrentThread();
}
/**
* Takes a thread and moves it to the back of the it's priority list.
*
* @note This operation can be redundant and no scheduling is changed if marked as so.
*/
static void YieldWithoutCoreMigration(KernelCore& kernel);
static bool IsSchedulerUpdateNeeded(KernelCore& kernel) {
return kernel.GlobalSchedulerContext().scheduler_update_needed;
}
static void SetSchedulerUpdateNeeded(KernelCore& kernel) {
kernel.GlobalSchedulerContext().scheduler_update_needed = true;
}
static void ClearSchedulerUpdateNeeded(KernelCore& kernel) {
kernel.GlobalSchedulerContext().scheduler_update_needed = false;
}
/**
* Takes a thread and moves it to the back of the it's priority list.
* Afterwards, tries to pick a suggested thread from the suggested queue that has worse time or
* a better priority than the next thread in the core.
*
* @note This operation can be redundant and no scheduling is changed if marked as so.
*/
static void YieldWithCoreMigration(KernelCore& kernel);
static void DisableScheduling(KernelCore& kernel);
static void EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling);
static u64 UpdateHighestPriorityThreads(KernelCore& kernel);
/**
* Takes a thread and moves it out of the scheduling queue.
* and into the suggested queue. If no thread can be scheduled afterwards in that core,
* a suggested thread is obtained instead.
*
* @note This operation can be redundant and no scheduling is changed if marked as so.
*/
static void YieldToAnyThread(KernelCore& kernel);
static void ClearPreviousThread(KernelCore& kernel, KThread* thread);
/// Notify the scheduler a thread's status has changed.
static void OnThreadStateChanged(KernelCore& kernel, KThread* thread, ThreadState old_state);
/// Notify the scheduler a thread's priority has changed.
static void OnThreadPriorityChanged(KernelCore& kernel, KThread* thread, s32 old_priority);
/// Notify the scheduler a thread's core and/or affinity mask has changed.
static void OnThreadAffinityMaskChanged(KernelCore& kernel, KThread* thread,
const KAffinityMask& old_affinity, s32 old_core);
static void RotateScheduledQueue(KernelCore& kernel, s32 core_id, s32 priority);
static void RescheduleCores(KernelCore& kernel, u64 cores_needing_scheduling);
static void YieldWithoutCoreMigration(KernelCore& kernel);
static void YieldWithCoreMigration(KernelCore& kernel);
static void YieldToAnyThread(KernelCore& kernel);
static bool CanSchedule(KernelCore& kernel);
static bool IsSchedulerUpdateNeeded(const KernelCore& kernel);
static void SetSchedulerUpdateNeeded(KernelCore& kernel);
static void ClearSchedulerUpdateNeeded(KernelCore& kernel);
static void DisableScheduling(KernelCore& kernel);
static void EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling);
[[nodiscard]] static u64 UpdateHighestPriorityThreads(KernelCore& kernel);
private:
// Static private API.
static KSchedulerPriorityQueue& GetPriorityQueue(KernelCore& kernel) {
return kernel.GlobalSchedulerContext().priority_queue;
}
static u64 UpdateHighestPriorityThreadsImpl(KernelCore& kernel);
friend class GlobalSchedulerContext;
static void RescheduleCurrentHLEThread(KernelCore& kernel);
/**
* Takes care of selecting the new scheduled threads in three steps:
*
* 1. First a thread is selected from the top of the priority queue. If no thread
* is obtained then we move to step two, else we are done.
*
* 2. Second we try to get a suggested thread that's not assigned to any core or
* that is not the top thread in that core.
*
* 3. Third is no suggested thread is found, we do a second pass and pick a running
* thread in another core and swap it with its current thread.
*
* returns the cores needing scheduling.
*/
[[nodiscard]] static u64 UpdateHighestPriorityThreadsImpl(KernelCore& kernel);
// Instanced private API.
void ScheduleImpl();
void ScheduleImplFiber();
void SwitchThread(KThread* next_thread);
[[nodiscard]] static KSchedulerPriorityQueue& GetPriorityQueue(KernelCore& kernel);
void RotateScheduledQueue(s32 cpu_core_id, s32 priority);
void Schedule();
void ScheduleOnInterrupt();
void RescheduleOtherCores(u64 cores_needing_scheduling);
void RescheduleCurrentCore();
void RescheduleCurrentCoreImpl();
/// Switches the CPU's active thread context to that of the specified thread
void ScheduleImpl();
u64 UpdateHighestPriorityThread(KThread* thread);
/// When a thread wakes up, it must run this through it's new scheduler
void SwitchContextStep2();
private:
friend class KScopedDisableDispatch;
/**
* Called on every context switch to update the internal timestamp
* This also updates the running time ticks for the given thread and
* process using the following difference:
*
* ticks += most_recent_ticks - last_context_switch_ticks
*
* The internal tick timestamp for the scheduler is simply the
* most recent tick count retrieved. No special arithmetic is
* applied to it.
*/
void UpdateLastContextSwitchTime(KThread* thread, KProcess* process);
void SwitchToCurrent();
KThread* prev_thread{};
std::atomic<KThread*> current_thread{};
KThread* idle_thread{};
std::shared_ptr<Common::Fiber> switch_fiber{};
struct SchedulingState {
std::atomic<bool> needs_scheduling{false};
bool interrupt_task_runnable{false};
bool should_count_idle{false};
u64 idle_count{0};
KThread* highest_priority_thread{nullptr};
void* idle_thread_stack{nullptr};
std::atomic<KThread*> prev_thread{nullptr};
KInterruptTaskManager* interrupt_task_manager{nullptr};
std::atomic<bool> needs_scheduling{};
bool interrupt_task_thread_runnable{};
bool should_count_idle{};
u64 idle_count{};
KThread* highest_priority_thread{};
void* idle_thread_stack{};
};
KernelCore& kernel;
SchedulingState m_state;
bool m_is_active{false};
s32 m_core_id{0};
s64 m_last_context_switch_time{0};
KThread* m_idle_thread{nullptr};
std::atomic<KThread*> m_current_thread{nullptr};
SchedulingState state;
std::shared_ptr<Common::Fiber> m_switch_fiber{};
KThread* m_switch_cur_thread{};
KThread* m_switch_highest_priority_thread{};
bool m_switch_from_schedule{};
Core::System& system;
u64 last_context_switch_time{};
const s32 core_id;
KSpinLock guard{};
};
class KScopedSchedulerLock : public KScopedLock<KScheduler::LockType> {
class [[nodiscard]] KScopedSchedulerLock : KScopedLock<GlobalSchedulerContext::LockType> {
public:
explicit KScopedSchedulerLock(KernelCore& kernel)
: KScopedLock(kernel.GlobalSchedulerContext().scheduler_lock) {}
~KScopedSchedulerLock() = default;
explicit KScopedSchedulerLock(KernelCore& kernel);
~KScopedSchedulerLock();
};
} // namespace Kernel

View File

@@ -5,11 +5,9 @@
#include <atomic>
#include "common/assert.h"
#include "core/hle/kernel/k_interrupt_manager.h"
#include "core/hle/kernel/k_spin_lock.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/physical_core.h"
namespace Kernel {

View File

@@ -258,18 +258,7 @@ Result KThread::InitializeThread(KThread* thread, KThreadFunction func, uintptr_
}
Result KThread::InitializeDummyThread(KThread* thread) {
// Initialize the thread.
R_TRY(thread->Initialize({}, {}, {}, DummyThreadPriority, 3, {}, ThreadType::Dummy));
// Initialize emulation parameters.
thread->stack_parameters.disable_count = 0;
return ResultSuccess;
}
Result KThread::InitializeMainThread(Core::System& system, KThread* thread, s32 virt_core) {
return InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {}, ThreadType::Main,
system.GetCpuManager().GetGuestActivateFunc());
return thread->Initialize({}, {}, {}, DummyThreadPriority, 3, {}, ThreadType::Dummy);
}
Result KThread::InitializeIdleThread(Core::System& system, KThread* thread, s32 virt_core) {
@@ -288,7 +277,7 @@ Result KThread::InitializeUserThread(Core::System& system, KThread* thread, KThr
KProcess* owner) {
system.Kernel().GlobalSchedulerContext().AddThread(thread);
return InitializeThread(thread, func, arg, user_stack_top, prio, virt_core, owner,
ThreadType::User, system.GetCpuManager().GetGuestThreadFunc());
ThreadType::User, system.GetCpuManager().GetGuestThreadStartFunc());
}
void KThread::PostDestroy(uintptr_t arg) {
@@ -319,20 +308,14 @@ void KThread::Finalize() {
auto it = waiter_list.begin();
while (it != waiter_list.end()) {
// Get the thread.
KThread* const waiter = std::addressof(*it);
// The thread shouldn't be a kernel waiter.
ASSERT(!IsKernelAddressKey(waiter->GetAddressKey()));
// Clear the lock owner.
waiter->SetLockOwner(nullptr);
// Clear the lock owner
it->SetLockOwner(nullptr);
// Erase the waiter from our list.
it = waiter_list.erase(it);
// Cancel the thread's wait.
waiter->CancelWait(ResultInvalidState, true);
it->CancelWait(ResultInvalidState, true);
}
}
@@ -1069,8 +1052,6 @@ void KThread::Exit() {
// Register the thread as a work task.
KWorkerTaskManager::AddTask(kernel, KWorkerTaskManager::WorkerType::Exit, this);
}
UNREACHABLE_MSG("KThread::Exit() would return");
}
Result KThread::Sleep(s64 timeout) {
@@ -1106,8 +1087,6 @@ void KThread::IfDummyThreadTryWait() {
return;
}
ASSERT(!kernel.IsPhantomModeForSingleCore());
// Block until we are no longer waiting.
std::unique_lock lk(dummy_wait_lock);
dummy_wait_cv.wait(
@@ -1212,13 +1191,16 @@ KScopedDisableDispatch::~KScopedDisableDispatch() {
return;
}
if (GetCurrentThread(kernel).GetDisableDispatchCount() <= 1) {
auto* scheduler = kernel.CurrentScheduler();
// Skip the reschedule if single-core, as dispatch tracking is disabled here.
if (!Settings::values.use_multi_core.GetValue()) {
return;
}
if (scheduler && !kernel.IsPhantomModeForSingleCore()) {
if (GetCurrentThread(kernel).GetDisableDispatchCount() <= 1) {
auto scheduler = kernel.CurrentScheduler();
if (scheduler) {
scheduler->RescheduleCurrentCore();
} else {
KScheduler::RescheduleCurrentHLEThread(kernel);
}
} else {
GetCurrentThread(kernel).EnableDispatch();

View File

@@ -413,9 +413,6 @@ public:
[[nodiscard]] static Result InitializeDummyThread(KThread* thread);
[[nodiscard]] static Result InitializeMainThread(Core::System& system, KThread* thread,
s32 virt_core);
[[nodiscard]] static Result InitializeIdleThread(Core::System& system, KThread* thread,
s32 virt_core);
@@ -483,16 +480,39 @@ public:
return per_core_priority_queue_entry[core];
}
[[nodiscard]] bool IsKernelThread() const {
return GetActiveCore() == 3;
}
[[nodiscard]] bool IsDispatchTrackingDisabled() const {
return is_single_core || IsKernelThread();
}
[[nodiscard]] s32 GetDisableDispatchCount() const {
if (IsDispatchTrackingDisabled()) {
// TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch.
return 1;
}
return this->GetStackParameters().disable_count;
}
void DisableDispatch() {
if (IsDispatchTrackingDisabled()) {
// TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch.
return;
}
ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() >= 0);
this->GetStackParameters().disable_count++;
}
void EnableDispatch() {
if (IsDispatchTrackingDisabled()) {
// TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch.
return;
}
ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() > 0);
this->GetStackParameters().disable_count--;
}

View File

@@ -64,6 +64,8 @@ struct KernelCore::Impl {
is_phantom_mode_for_singlecore = false;
InitializePhysicalCores();
// Derive the initial memory layout from the emulated board
Init::InitializeSlabResourceCounts(kernel);
DeriveInitialMemoryLayout();
@@ -73,9 +75,9 @@ struct KernelCore::Impl {
InitializeSystemResourceLimit(kernel, system.CoreTiming());
InitializeMemoryLayout();
Init::InitializeKPageBufferSlabHeap(system);
InitializeSchedulers();
InitializeShutdownThreads();
InitializePreemption(kernel);
InitializePhysicalCores();
RegisterHostThread();
}
@@ -146,6 +148,7 @@ struct KernelCore::Impl {
shutdown_threads[core_id] = nullptr;
}
schedulers[core_id]->Finalize();
schedulers[core_id].reset();
}
@@ -192,21 +195,14 @@ struct KernelCore::Impl {
exclusive_monitor =
Core::MakeExclusiveMonitor(system.Memory(), Core::Hardware::NUM_CPU_CORES);
for (u32 i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
const s32 core{static_cast<s32>(i)};
schedulers[i] = std::make_unique<Kernel::KScheduler>(system.Kernel());
schedulers[i] = std::make_unique<Kernel::KScheduler>(system, i);
cores.emplace_back(i, system, *schedulers[i], interrupts);
}
}
auto* main_thread{Kernel::KThread::Create(system.Kernel())};
main_thread->SetName(fmt::format("MainThread:{}", core));
main_thread->SetCurrentCore(core);
ASSERT(Kernel::KThread::InitializeMainThread(system, main_thread, core).IsSuccess());
auto* idle_thread{Kernel::KThread::Create(system.Kernel())};
idle_thread->SetCurrentCore(core);
ASSERT(Kernel::KThread::InitializeIdleThread(system, idle_thread, core).IsSuccess());
schedulers[i]->Initialize(main_thread, idle_thread, core);
void InitializeSchedulers() {
for (u32 i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
cores[i].Scheduler().Initialize();
}
}
@@ -1105,11 +1101,10 @@ void KernelCore::Suspend(bool suspended) {
}
void KernelCore::ShutdownCores() {
KScopedSchedulerLock lk{*this};
for (auto* thread : impl->shutdown_threads) {
void(thread->Run());
}
InterruptAllPhysicalCores();
}
bool KernelCore::IsMulticore() const {

View File

@@ -43,7 +43,6 @@ void PhysicalCore::Initialize([[maybe_unused]] bool is_64_bit) {
void PhysicalCore::Run() {
arm_interface->Run();
arm_interface->ClearExclusiveState();
}
void PhysicalCore::Idle() {

View File

@@ -887,7 +887,7 @@ static Result GetInfo(Core::System& system, u64* result, u64 info_id, Handle han
const auto* const current_thread = GetCurrentThreadPointer(system.Kernel());
const bool same_thread = current_thread == thread.GetPointerUnsafe();
const u64 prev_ctx_ticks = scheduler.GetLastContextSwitchTime();
const u64 prev_ctx_ticks = scheduler.GetLastContextSwitchTicks();
u64 out_ticks = 0;
if (same_thread && info_sub_id == 0xFFFFFFFFFFFFFFFF) {
const u64 thread_ticks = current_thread->GetCpuTime();
@@ -3026,6 +3026,11 @@ void Call(Core::System& system, u32 immediate) {
}
kernel.ExitSVCProfile();
if (!thread->IsCallingSvc()) {
auto* host_context = thread->GetHostContext().get();
host_context->Rewind();
}
}
} // namespace Kernel::Svc

View File

@@ -838,11 +838,11 @@ bool Controller_NPad::VibrateControllerAtIndex(Core::HID::NpadIdType npad_id,
const auto now = steady_clock::now();
// Filter out non-zero vibrations that are within 15ms of each other.
// Filter out non-zero vibrations that are within 10ms of each other.
if ((vibration_value.low_amplitude != 0.0f || vibration_value.high_amplitude != 0.0f) &&
duration_cast<milliseconds>(
now - controller.vibration[device_index].last_vibration_timepoint) <
milliseconds(15)) {
milliseconds(10)) {
return false;
}

View File

@@ -438,15 +438,8 @@ SDLDriver::SDLDriver(std::string input_engine_) : InputEngine(std::move(input_en
using namespace std::chrono_literals;
while (initialized) {
SDL_PumpEvents();
std::this_thread::sleep_for(1ms);
}
});
vibration_thread = std::thread([this] {
Common::SetCurrentThreadName("yuzu:input:SDL_Vibration");
using namespace std::chrono_literals;
while (initialized) {
SendVibrations();
std::this_thread::sleep_for(10ms);
std::this_thread::sleep_for(1ms);
}
});
}
@@ -464,7 +457,6 @@ SDLDriver::~SDLDriver() {
initialized = false;
if (start_thread) {
poll_thread.join();
vibration_thread.join();
SDL_QuitSubSystem(SDL_INIT_JOYSTICK | SDL_INIT_GAMECONTROLLER);
}
}

View File

@@ -128,6 +128,5 @@ private:
std::atomic<bool> initialized = false;
std::thread poll_thread;
std::thread vibration_thread;
};
} // namespace InputCommon

View File

@@ -30,8 +30,6 @@ add_executable(yuzu
applets/qt_web_browser_scripts.h
bootmanager.cpp
bootmanager.h
check_vulkan.cpp
check_vulkan.h
compatdb.ui
compatibility_list.cpp
compatibility_list.h
@@ -155,6 +153,8 @@ add_executable(yuzu
main.cpp
main.h
main.ui
startup_checks.cpp
startup_checks.h
uisettings.cpp
uisettings.h
util/controller_navigation.cpp

View File

@@ -2,8 +2,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
#ifdef YUZU_USE_QT_WEB_ENGINE
#include <bit>
#include <QApplication>
#include <QKeyEvent>
@@ -213,10 +211,8 @@ template <Core::HID::NpadButton... T>
void QtNXWebEngineView::HandleWindowFooterButtonPressedOnce() {
const auto f = [this](Core::HID::NpadButton button) {
if (input_interpreter->IsButtonPressedOnce(button)) {
const auto button_index = std::countr_zero(static_cast<u64>(button));
page()->runJavaScript(
QStringLiteral("yuzu_key_callbacks[%1] == null;").arg(button_index),
QStringLiteral("yuzu_key_callbacks[%1] == null;").arg(static_cast<u8>(button)),
[this, button](const QVariant& variant) {
if (variant.toBool()) {
switch (button) {
@@ -240,7 +236,7 @@ void QtNXWebEngineView::HandleWindowFooterButtonPressedOnce() {
page()->runJavaScript(
QStringLiteral("if (yuzu_key_callbacks[%1] != null) { yuzu_key_callbacks[%1](); }")
.arg(button_index));
.arg(static_cast<u8>(button)));
}
};

View File

@@ -1,53 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "video_core/vulkan_common/vulkan_wrapper.h"
#include <filesystem>
#include <fstream>
#include "common/fs/fs.h"
#include "common/fs/path_util.h"
#include "common/logging/log.h"
#include "video_core/vulkan_common/vulkan_instance.h"
#include "video_core/vulkan_common/vulkan_library.h"
#include "yuzu/check_vulkan.h"
#include "yuzu/uisettings.h"
constexpr char TEMP_FILE_NAME[] = "vulkan_check";
bool CheckVulkan() {
if (UISettings::values.has_broken_vulkan) {
return true;
}
LOG_DEBUG(Frontend, "Checking presence of Vulkan");
const auto fs_config_loc = Common::FS::GetYuzuPath(Common::FS::YuzuPath::ConfigDir);
const auto temp_file_loc = fs_config_loc / TEMP_FILE_NAME;
if (std::filesystem::exists(temp_file_loc)) {
LOG_WARNING(Frontend, "Detected recovery from previous failed Vulkan initialization");
UISettings::values.has_broken_vulkan = true;
std::filesystem::remove(temp_file_loc);
return false;
}
std::ofstream temp_file_handle(temp_file_loc);
temp_file_handle.close();
try {
Vulkan::vk::InstanceDispatch dld;
const Common::DynamicLibrary library = Vulkan::OpenLibrary();
const Vulkan::vk::Instance instance =
Vulkan::CreateInstance(library, dld, VK_API_VERSION_1_0);
} catch (const Vulkan::vk::Exception& exception) {
LOG_ERROR(Frontend, "Failed to initialize Vulkan: {}", exception.what());
// Don't set has_broken_vulkan to true here: we care when loading Vulkan crashes the
// application, not when we can handle it.
}
std::filesystem::remove(temp_file_loc);
return true;
}

View File

@@ -1,6 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
bool CheckVulkan();

View File

@@ -682,12 +682,6 @@ void Config::ReadRendererValues() {
ReadGlobalSetting(Settings::values.bg_green);
ReadGlobalSetting(Settings::values.bg_blue);
if (!global && UISettings::values.has_broken_vulkan &&
Settings::values.renderer_backend.GetValue() == Settings::RendererBackend::Vulkan &&
!Settings::values.renderer_backend.UsingGlobal()) {
Settings::values.renderer_backend.SetGlobal(true);
}
if (global) {
ReadBasicSetting(Settings::values.renderer_debug);
ReadBasicSetting(Settings::values.renderer_shader_feedback);
@@ -807,7 +801,6 @@ void Config::ReadUIValues() {
ReadBasicSetting(UISettings::values.pause_when_in_background);
ReadBasicSetting(UISettings::values.mute_when_in_background);
ReadBasicSetting(UISettings::values.hide_mouse);
ReadBasicSetting(UISettings::values.has_broken_vulkan);
ReadBasicSetting(UISettings::values.disable_web_applet);
qt_config->endGroup();
@@ -1355,7 +1348,6 @@ void Config::SaveUIValues() {
WriteBasicSetting(UISettings::values.pause_when_in_background);
WriteBasicSetting(UISettings::values.mute_when_in_background);
WriteBasicSetting(UISettings::values.hide_mouse);
WriteBasicSetting(UISettings::values.has_broken_vulkan);
WriteBasicSetting(UISettings::values.disable_web_applet);
qt_config->endGroup();

View File

@@ -58,24 +58,9 @@ ConfigureGraphics::ConfigureGraphics(const Core::System& system_, QWidget* paren
UpdateBackgroundColorButton(new_bg_color);
});
connect(ui->button_check_vulkan, &QAbstractButton::clicked, this, [this] {
UISettings::values.has_broken_vulkan = false;
if (RetrieveVulkanDevices()) {
ui->api->setEnabled(true);
ui->button_check_vulkan->hide();
for (const auto& device : vulkan_devices) {
ui->device->addItem(device);
}
} else {
UISettings::values.has_broken_vulkan = true;
}
});
ui->api->setEnabled(!UISettings::values.has_broken_vulkan.GetValue());
ui->button_check_vulkan->setVisible(UISettings::values.has_broken_vulkan.GetValue());
ui->api->setEnabled(!UISettings::values.has_broken_vulkan);
ui->api_widget->setEnabled(!UISettings::values.has_broken_vulkan ||
Settings::IsConfiguringGlobal());
ui->bg_label->setVisible(Settings::IsConfiguringGlobal());
ui->bg_combobox->setVisible(!Settings::IsConfiguringGlobal());
}
@@ -315,7 +300,7 @@ void ConfigureGraphics::UpdateAPILayout() {
vulkan_device = Settings::values.vulkan_device.GetValue(true);
shader_backend = Settings::values.shader_backend.GetValue(true);
ui->device_widget->setEnabled(false);
ui->backend_widget->setEnabled(UISettings::values.has_broken_vulkan.GetValue());
ui->backend_widget->setEnabled(false);
} else {
vulkan_device = Settings::values.vulkan_device.GetValue();
shader_backend = Settings::values.shader_backend.GetValue();
@@ -337,9 +322,9 @@ void ConfigureGraphics::UpdateAPILayout() {
}
}
bool ConfigureGraphics::RetrieveVulkanDevices() try {
void ConfigureGraphics::RetrieveVulkanDevices() try {
if (UISettings::values.has_broken_vulkan) {
return false;
return;
}
using namespace Vulkan;
@@ -355,11 +340,8 @@ bool ConfigureGraphics::RetrieveVulkanDevices() try {
const std::string name = vk::PhysicalDevice(device, dld).GetProperties().deviceName;
vulkan_devices.push_back(QString::fromStdString(name));
}
return true;
} catch (const Vulkan::vk::Exception& exception) {
LOG_ERROR(Frontend, "Failed to enumerate devices with error: {}", exception.what());
return false;
}
Settings::RendererBackend ConfigureGraphics::GetCurrentGraphicsBackend() const {
@@ -440,11 +422,4 @@ void ConfigureGraphics::SetupPerGameUI() {
ui->api, static_cast<int>(Settings::values.renderer_backend.GetValue(true)));
ConfigurationShared::InsertGlobalItem(
ui->nvdec_emulation, static_cast<int>(Settings::values.nvdec_emulation.GetValue(true)));
if (UISettings::values.has_broken_vulkan) {
ui->backend_widget->setEnabled(true);
ConfigurationShared::SetColoredComboBox(
ui->backend, ui->backend_widget,
static_cast<int>(Settings::values.shader_backend.GetValue(true)));
}
}

View File

@@ -41,7 +41,7 @@ private:
void UpdateDeviceSelection(int device);
void UpdateShaderBackendSelection(int backend);
bool RetrieveVulkanDevices();
void RetrieveVulkanDevices();
void SetupPerGameUI();

View File

@@ -6,7 +6,7 @@
<rect>
<x>0</x>
<y>0</y>
<width>471</width>
<width>541</width>
<height>759</height>
</rect>
</property>
@@ -574,13 +574,6 @@
</property>
</spacer>
</item>
<item>
<widget class="QPushButton" name="button_check_vulkan">
<property name="text">
<string>Check for Working Vulkan</string>
</property>
</widget>
</item>
</layout>
</widget>
<resources/>

View File

@@ -115,7 +115,6 @@ static FileSys::VirtualFile VfsDirectoryCreateFileWrapper(const FileSys::Virtual
#include "video_core/shader_notify.h"
#include "yuzu/about_dialog.h"
#include "yuzu/bootmanager.h"
#include "yuzu/check_vulkan.h"
#include "yuzu/compatdb.h"
#include "yuzu/compatibility_list.h"
#include "yuzu/configuration/config.h"
@@ -131,6 +130,7 @@ static FileSys::VirtualFile VfsDirectoryCreateFileWrapper(const FileSys::Virtual
#include "yuzu/install_dialog.h"
#include "yuzu/loading_screen.h"
#include "yuzu/main.h"
#include "yuzu/startup_checks.h"
#include "yuzu/uisettings.h"
using namespace Common::Literals;
@@ -252,7 +252,7 @@ static QString PrettyProductName() {
return QSysInfo::prettyProductName();
}
GMainWindow::GMainWindow()
GMainWindow::GMainWindow(bool has_broken_vulkan)
: ui{std::make_unique<Ui::MainWindow>()}, system{std::make_unique<Core::System>()},
input_subsystem{std::make_shared<InputCommon::InputSubsystem>()},
config{std::make_unique<Config>(*system)},
@@ -352,17 +352,15 @@ GMainWindow::GMainWindow()
MigrateConfigFiles();
if (!CheckVulkan()) {
config->Save();
if (has_broken_vulkan) {
UISettings::values.has_broken_vulkan = true;
QMessageBox::warning(this, tr("Broken Vulkan Installation Detected"),
tr("Vulkan initialization failed during boot.<br><br>Click <a "
"href='https://yuzu-emu.org/wiki/faq/"
"#yuzu-starts-with-the-error-broken-vulkan-installation-detected'>"
"here for instructions to fix the issue</a>."));
QMessageBox::warning(
this, tr("Broken Vulkan Installation Detected"),
tr("Vulkan initialization failed on the previous boot.<br><br>Click <a "
"href='https://yuzu-emu.org/wiki/faq/"
"#yuzu-starts-with-the-error-broken-vulkan-installation-detected'>here for "
"instructions to fix the issue</a>."));
}
if (UISettings::values.has_broken_vulkan) {
Settings::values.renderer_backend = Settings::RendererBackend::OpenGL;
renderer_status_button->setDisabled(true);
@@ -3853,6 +3851,11 @@ void GMainWindow::SetDiscordEnabled([[maybe_unused]] bool state) {
#endif
int main(int argc, char* argv[]) {
bool has_broken_vulkan = false;
if (StartupChecks(argv[0], &has_broken_vulkan)) {
return 0;
}
Common::DetachedTasks detached_tasks;
MicroProfileOnThreadCreate("Frontend");
SCOPE_EXIT({ MicroProfileShutdown(); });
@@ -3892,7 +3895,7 @@ int main(int argc, char* argv[]) {
// generating shaders
setlocale(LC_ALL, "C");
GMainWindow main_window{};
GMainWindow main_window{has_broken_vulkan};
// After settings have been loaded by GMainWindow, apply the filter
main_window.show();

View File

@@ -118,7 +118,7 @@ class GMainWindow : public QMainWindow {
public:
void filterBarSetChecked(bool state);
void UpdateUITheme();
explicit GMainWindow();
explicit GMainWindow(bool has_broken_vulkan);
~GMainWindow() override;
bool DropAction(QDropEvent* event);

136
src/yuzu/startup_checks.cpp Normal file
View File

@@ -0,0 +1,136 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "video_core/vulkan_common/vulkan_wrapper.h"
#ifdef _WIN32
#include <cstring> // for memset, strncpy
#include <processthreadsapi.h>
#include <windows.h>
#elif defined(YUZU_UNIX)
#include <errno.h>
#include <sys/wait.h>
#include <unistd.h>
#endif
#include <cstdio>
#include "video_core/vulkan_common/vulkan_instance.h"
#include "video_core/vulkan_common/vulkan_library.h"
#include "yuzu/startup_checks.h"
void CheckVulkan() {
// Just start the Vulkan loader, this will crash if something is wrong
try {
Vulkan::vk::InstanceDispatch dld;
const Common::DynamicLibrary library = Vulkan::OpenLibrary();
const Vulkan::vk::Instance instance =
Vulkan::CreateInstance(library, dld, VK_API_VERSION_1_0);
} catch (const Vulkan::vk::Exception& exception) {
std::fprintf(stderr, "Failed to initialize Vulkan: %s\n", exception.what());
}
}
bool StartupChecks(const char* arg0, bool* has_broken_vulkan) {
#ifdef _WIN32
// Check environment variable to see if we are the child
char variable_contents[8];
const DWORD startup_check_var =
GetEnvironmentVariableA(STARTUP_CHECK_ENV_VAR, variable_contents, 8);
if (startup_check_var > 0 && std::strncmp(variable_contents, "ON", 8) == 0) {
CheckVulkan();
return true;
}
// Set the startup variable for child processes
const bool env_var_set = SetEnvironmentVariableA(STARTUP_CHECK_ENV_VAR, "ON");
if (!env_var_set) {
std::fprintf(stderr, "SetEnvironmentVariableA failed to set %s with error %d\n",
STARTUP_CHECK_ENV_VAR, GetLastError());
return false;
}
PROCESS_INFORMATION process_info;
std::memset(&process_info, '\0', sizeof(process_info));
if (!SpawnChild(arg0, &process_info)) {
return false;
}
// Wait until the processs exits and get exit code from it
WaitForSingleObject(process_info.hProcess, INFINITE);
DWORD exit_code = STILL_ACTIVE;
const int err = GetExitCodeProcess(process_info.hProcess, &exit_code);
if (err == 0) {
std::fprintf(stderr, "GetExitCodeProcess failed with error %d\n", GetLastError());
}
// Vulkan is broken if the child crashed (return value is not zero)
*has_broken_vulkan = (exit_code != 0);
if (CloseHandle(process_info.hProcess) == 0) {
std::fprintf(stderr, "CloseHandle failed with error %d\n", GetLastError());
}
if (CloseHandle(process_info.hThread) == 0) {
std::fprintf(stderr, "CloseHandle failed with error %d\n", GetLastError());
}
if (!SetEnvironmentVariableA(STARTUP_CHECK_ENV_VAR, nullptr)) {
std::fprintf(stderr, "SetEnvironmentVariableA failed to clear %s with error %d\n",
STARTUP_CHECK_ENV_VAR, GetLastError());
}
#elif defined(YUZU_UNIX)
const pid_t pid = fork();
if (pid == 0) {
CheckVulkan();
return true;
} else if (pid == -1) {
const int err = errno;
std::fprintf(stderr, "fork failed with error %d\n", err);
return false;
}
// Get exit code from child process
int status;
const int r_val = wait(&status);
if (r_val == -1) {
const int err = errno;
std::fprintf(stderr, "wait failed with error %d\n", err);
return false;
}
// Vulkan is broken if the child crashed (return value is not zero)
*has_broken_vulkan = (status != 0);
#endif
return false;
}
#ifdef _WIN32
bool SpawnChild(const char* arg0, PROCESS_INFORMATION* pi) {
STARTUPINFOA startup_info;
std::memset(&startup_info, '\0', sizeof(startup_info));
startup_info.cb = sizeof(startup_info);
char p_name[255];
std::strncpy(p_name, arg0, 255);
const bool process_created = CreateProcessA(nullptr, // lpApplicationName
p_name, // lpCommandLine
nullptr, // lpProcessAttributes
nullptr, // lpThreadAttributes
false, // bInheritHandles
0, // dwCreationFlags
nullptr, // lpEnvironment
nullptr, // lpCurrentDirectory
&startup_info, // lpStartupInfo
pi // lpProcessInformation
);
if (!process_created) {
std::fprintf(stderr, "CreateProcessA failed with error %d\n", GetLastError());
return false;
}
return true;
}
#endif

17
src/yuzu/startup_checks.h Normal file
View File

@@ -0,0 +1,17 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#ifdef _WIN32
#include <windows.h>
#endif
constexpr char STARTUP_CHECK_ENV_VAR[] = "YUZU_DO_STARTUP_CHECKS";
void CheckVulkan();
bool StartupChecks(const char* arg0, bool* has_broken_vulkan);
#ifdef _WIN32
bool SpawnChild(const char* arg0, PROCESS_INFORMATION* pi);
#endif

View File

@@ -78,7 +78,7 @@ struct Values {
Settings::Setting<bool> mute_when_in_background{false, "muteWhenInBackground"};
Settings::Setting<bool> hide_mouse{true, "hideInactiveMouse"};
// Set when Vulkan is known to crash the application
Settings::Setting<bool> has_broken_vulkan{false, "has_broken_vulkan"};
bool has_broken_vulkan = false;
Settings::Setting<bool> select_user_on_boot{false, "select_user_on_boot"};