diff --git a/src/common/x64/native_clock.cpp b/src/common/x64/native_clock.cpp index 87de406240..6b43afb10f 100644 --- a/src/common/x64/native_clock.cpp +++ b/src/common/x64/native_clock.cpp @@ -58,7 +58,8 @@ u64 NativeClock::GetRTSC() { TimePoint new_time_point{}; TimePoint current_time_point{}; do { - current_time_point.pack = time_point.pack; + std::ignore = Common::AtomicCompareAndSwap(current_time_point.pack.data(), time_point.pack, + u128{0}); // comparison value doesn't matter _mm_mfence(); const u64 current_measure = __rdtsc(); u64 diff = current_measure - current_time_point.inner.last_measure; @@ -78,7 +79,9 @@ void NativeClock::Pause(bool is_paused) { TimePoint current_time_point{}; TimePoint new_time_point{}; do { - current_time_point.pack = time_point.pack; + std::ignore = + Common::AtomicCompareAndSwap(current_time_point.pack.data(), time_point.pack, + u128{0}); // comparison value doesn't matter new_time_point.pack = current_time_point.pack; _mm_mfence(); new_time_point.inner.last_measure = __rdtsc(); diff --git a/src/core/hle/kernel/k_scheduler_lock.h b/src/core/hle/kernel/k_scheduler_lock.h index c571f29921..69dc7772fd 100644 --- a/src/core/hle/kernel/k_scheduler_lock.h +++ b/src/core/hle/kernel/k_scheduler_lock.h @@ -4,6 +4,7 @@ #pragma once +#include #include "common/assert.h" #include "core/hle/kernel/k_spin_lock.h" #include "core/hle/kernel/k_thread.h" @@ -19,7 +20,7 @@ public: explicit KAbstractSchedulerLock(KernelCore& kernel_) : kernel{kernel_} {} bool IsLockedByCurrentThread() const { - return owner_thread == GetCurrentThreadPointer(kernel); + return owner_thread.load(std::memory_order::consume) == GetCurrentThreadPointer(kernel); } void Lock() { @@ -38,7 +39,7 @@ public: // Increment count, take ownership. lock_count = 1; - owner_thread = GetCurrentThreadPointer(kernel); + owner_thread.store(GetCurrentThreadPointer(kernel), std::memory_order::release); } } @@ -53,7 +54,7 @@ public: SchedulerType::UpdateHighestPriorityThreads(kernel); // Note that we no longer hold the lock, and unlock the spinlock. - owner_thread = nullptr; + owner_thread.store(nullptr, std::memory_order::release); spin_lock.Unlock(); // Enable scheduling, and perform a rescheduling operation. @@ -65,7 +66,7 @@ private: KernelCore& kernel; KAlignedSpinLock spin_lock{}; s32 lock_count{}; - KThread* owner_thread{}; + std::atomic owner_thread{}; }; } // namespace Kernel