Compare commits
1 Commits
__refs_pul
...
__refs_pul
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9af8200b25 |
@@ -15,5 +15,5 @@ mv "${REV_NAME}-source.tar.xz" $RELEASE_NAME
|
||||
7z a "$REV_NAME.7z" $RELEASE_NAME
|
||||
|
||||
# move the compiled archive into the artifacts directory to be uploaded by travis releases
|
||||
mv "$ARCHIVE_NAME" "${ARTIFACTS_DIR}/"
|
||||
mv "$REV_NAME.7z" "${ARTIFACTS_DIR}/"
|
||||
mv "$ARCHIVE_NAME" artifacts/
|
||||
mv "$REV_NAME.7z" artifacts/
|
||||
|
||||
@@ -2,6 +2,5 @@
|
||||
|
||||
GITDATE="`git show -s --date=short --format='%ad' | sed 's/-//g'`"
|
||||
GITREV="`git show -s --format='%h'`"
|
||||
ARTIFACTS_DIR="artifacts"
|
||||
|
||||
mkdir -p "${ARTIFACTS_DIR}/"
|
||||
mkdir -p artifacts
|
||||
|
||||
@@ -1,49 +1,14 @@
|
||||
#!/bin/bash -ex
|
||||
|
||||
# Exit on error, rather than continuing with the rest of the script.
|
||||
set -e
|
||||
|
||||
cd /yuzu
|
||||
|
||||
ccache -s
|
||||
|
||||
mkdir build || true && cd build
|
||||
cmake .. -DDISPLAY_VERSION=$1 -DCMAKE_BUILD_TYPE=Release -DCMAKE_C_COMPILER=/usr/lib/ccache/gcc -DCMAKE_CXX_COMPILER=/usr/lib/ccache/g++ -DYUZU_ENABLE_COMPATIBILITY_REPORTING=${ENABLE_COMPATIBILITY_REPORTING:-"OFF"} -DENABLE_COMPATIBILITY_LIST_DOWNLOAD=ON -DUSE_DISCORD_PRESENCE=ON -DENABLE_QT_TRANSLATION=ON -DCMAKE_INSTALL_PREFIX="/usr"
|
||||
cmake .. -G Ninja -DDISPLAY_VERSION=$1 -DCMAKE_BUILD_TYPE=Release -DCMAKE_C_COMPILER=/usr/lib/ccache/gcc -DCMAKE_CXX_COMPILER=/usr/lib/ccache/g++ -DYUZU_ENABLE_COMPATIBILITY_REPORTING=${ENABLE_COMPATIBILITY_REPORTING:-"OFF"} -DENABLE_COMPATIBILITY_LIST_DOWNLOAD=ON -DUSE_DISCORD_PRESENCE=ON -DENABLE_QT_TRANSLATION=ON
|
||||
|
||||
make -j$(nproc)
|
||||
ninja
|
||||
|
||||
ccache -s
|
||||
|
||||
ctest -VV -C Release
|
||||
|
||||
make install DESTDIR=AppDir
|
||||
rm -vf AppDir/usr/bin/yuzu-cmd AppDir/usr/bin/yuzu-tester
|
||||
|
||||
# Download tools needed to build an AppImage
|
||||
wget -nc https://github.com/linuxdeploy/linuxdeploy/releases/download/continuous/linuxdeploy-x86_64.AppImage
|
||||
wget -nc https://github.com/linuxdeploy/linuxdeploy-plugin-qt/releases/download/continuous/linuxdeploy-plugin-qt-x86_64.AppImage
|
||||
wget -nc https://github.com/darealshinji/AppImageKit-checkrt/releases/download/continuous/AppRun-patched-x86_64
|
||||
wget -nc https://github.com/darealshinji/AppImageKit-checkrt/releases/download/continuous/exec-x86_64.so
|
||||
# Set executable bit
|
||||
chmod 755 \
|
||||
AppRun-patched-x86_64 \
|
||||
exec-x86_64.so \
|
||||
linuxdeploy-x86_64.AppImage \
|
||||
linuxdeploy-plugin-qt-x86_64.AppImage
|
||||
|
||||
# Workaround for https://github.com/AppImage/AppImageKit/issues/828
|
||||
export APPIMAGE_EXTRACT_AND_RUN=1
|
||||
|
||||
mkdir -p AppDir/usr/optional
|
||||
mkdir -p AppDir/usr/optional/libstdc++
|
||||
mkdir -p AppDir/usr/optional/libgcc_s
|
||||
|
||||
# Deploy yuzu's needed dependencies
|
||||
./linuxdeploy-x86_64.AppImage --appdir AppDir --plugin qt
|
||||
|
||||
# Workaround for building yuzu with GCC 10 but also trying to distribute it to Ubuntu 18.04 et al.
|
||||
# See https://github.com/darealshinji/AppImageKit-checkrt
|
||||
cp exec-x86_64.so AppDir/usr/optional/exec.so
|
||||
cp AppRun-patched-x86_64 AppDir/AppRun
|
||||
cp --dereference /usr/lib/x86_64-linux-gnu/libstdc++.so.6 AppDir/usr/optional/libstdc++/libstdc++.so.6
|
||||
cp --dereference /lib/x86_64-linux-gnu/libgcc_s.so.1 AppDir/usr/optional/libgcc_s/libgcc_s.so.1
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
|
||||
. .ci/scripts/common/pre-upload.sh
|
||||
|
||||
APPIMAGE_NAME="yuzu-${GITDATE}-${GITREV}.AppImage"
|
||||
REV_NAME="yuzu-linux-${GITDATE}-${GITREV}"
|
||||
ARCHIVE_NAME="${REV_NAME}.tar.xz"
|
||||
COMPRESSION_FLAGS="-cJvf"
|
||||
@@ -18,24 +17,4 @@ mkdir "$DIR_NAME"
|
||||
cp build/bin/yuzu-cmd "$DIR_NAME"
|
||||
cp build/bin/yuzu "$DIR_NAME"
|
||||
|
||||
# Build an AppImage
|
||||
cd build
|
||||
|
||||
wget -nc https://github.com/AppImage/AppImageKit/releases/download/continuous/appimagetool-x86_64.AppImage
|
||||
chmod 755 appimagetool-x86_64.AppImage
|
||||
|
||||
if [ "${RELEASE_NAME}" = "mainline" ]; then
|
||||
# Generate update information if releasing to mainline
|
||||
./appimagetool-x86_64.AppImage -u "gh-releases-zsync|yuzu-emu|yuzu-${RELEASE_NAME}|latest|yuzu-*.AppImage.zsync" AppDir "${APPIMAGE_NAME}"
|
||||
else
|
||||
./appimagetool-x86_64.AppImage AppDir "${APPIMAGE_NAME}"
|
||||
fi
|
||||
cd ..
|
||||
|
||||
# Copy the AppImage and update info to the artifacts directory and avoid compressing it
|
||||
cp "build/${APPIMAGE_NAME}" "${ARTIFACTS_DIR}/"
|
||||
if [ -f "build/${APPIMAGE_NAME}.zsync" ]; then
|
||||
cp "build/${APPIMAGE_NAME}.zsync" "${ARTIFACTS_DIR}/"
|
||||
fi
|
||||
|
||||
. .ci/scripts/common/post-upload.sh
|
||||
|
||||
@@ -8,7 +8,7 @@ steps:
|
||||
displayName: 'Install vulkan-sdk'
|
||||
- script: python -m pip install --upgrade pip conan
|
||||
displayName: 'Install conan'
|
||||
- script: refreshenv && mkdir build && cd build && cmake -G "Visual Studio 16 2019" -A x64 --config Release -DYUZU_USE_BUNDLED_QT=1 -DYUZU_USE_QT_WEB_ENGINE=ON -DENABLE_COMPATIBILITY_LIST_DOWNLOAD=ON -DYUZU_ENABLE_COMPATIBILITY_REPORTING=${COMPAT} -DUSE_DISCORD_PRESENCE=ON -DENABLE_QT_TRANSLATION=ON -DDISPLAY_VERSION=${{ parameters['version'] }} .. && cd ..
|
||||
- script: refreshenv && mkdir build && cd build && cmake -G "Visual Studio 16 2019" -A x64 --config Release -DYUZU_USE_BUNDLED_QT=1 -DYUZU_USE_QT_WEB_ENGINE=ON -DENABLE_COMPATIBILITY_LIST_DOWNLOAD=ON -DYUZU_ENABLE_COMPATIBILITY_REPORTING=${COMPAT} -DUSE_DISCORD_PRESENCE=ON -DDISPLAY_VERSION=${{ parameters['version'] }} .. && cd ..
|
||||
displayName: 'Configure CMake'
|
||||
- task: MSBuild@1
|
||||
displayName: 'Build'
|
||||
|
||||
@@ -24,11 +24,9 @@ option(YUZU_ENABLE_BOXCAT "Enable the Boxcat service, a yuzu high-level implemen
|
||||
|
||||
option(ENABLE_CUBEB "Enables the cubeb audio backend" ON)
|
||||
|
||||
option(USE_DISCORD_PRESENCE "Enables Discord Rich Presence" OFF)
|
||||
option(ENABLE_VULKAN "Enables Vulkan backend" ON)
|
||||
|
||||
if (NOT ENABLE_WEB_SERVICE)
|
||||
set(YUZU_ENABLE_BOXCAT OFF)
|
||||
endif()
|
||||
option(USE_DISCORD_PRESENCE "Enables Discord Rich Presence" OFF)
|
||||
|
||||
# Default to a Release build
|
||||
get_property(IS_MULTI_CONFIG GLOBAL PROPERTY GENERATOR_IS_MULTI_CONFIG)
|
||||
@@ -169,7 +167,7 @@ macro(yuzu_find_packages)
|
||||
"lz4 1.8 lz4/1.9.2"
|
||||
"nlohmann_json 3.8 nlohmann_json/3.8.0"
|
||||
"ZLIB 1.2 zlib/1.2.11"
|
||||
"zstd 1.4 zstd/1.4.8"
|
||||
"zstd 1.4 zstd/1.4.5"
|
||||
)
|
||||
|
||||
foreach(PACKAGE ${REQUIRED_LIBS})
|
||||
@@ -243,7 +241,7 @@ if(ENABLE_QT)
|
||||
if (YUZU_USE_QT_WEB_ENGINE)
|
||||
find_package(Qt5 COMPONENTS WebEngineCore WebEngineWidgets)
|
||||
endif()
|
||||
|
||||
|
||||
if (ENABLE_QT_TRANSLATION)
|
||||
find_package(Qt5 REQUIRED COMPONENTS LinguistTools ${QT_PREFIX_HINT})
|
||||
endif()
|
||||
@@ -326,7 +324,7 @@ if (CONAN_REQUIRED_LIBS)
|
||||
list(APPEND Boost_LIBRARIES Boost::context)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
|
||||
# Due to issues with variable scopes in functions, we need to also find_package(qt5) outside of the function
|
||||
if(ENABLE_QT)
|
||||
list(APPEND CMAKE_MODULE_PATH "${CONAN_QT_ROOT_RELEASE}")
|
||||
|
||||
@@ -30,6 +30,7 @@ If you want to contribute to the user interface translation, please check out th
|
||||
|
||||
* __Windows__: [Windows Build](https://github.com/yuzu-emu/yuzu/wiki/Building-For-Windows)
|
||||
* __Linux__: [Linux Build](https://github.com/yuzu-emu/yuzu/wiki/Building-For-Linux)
|
||||
* __macOS__: [macOS Build](https://github.com/yuzu-emu/yuzu/wiki/Building-for-macOS)
|
||||
|
||||
|
||||
### Support
|
||||
|
||||
4
externals/CMakeLists.txt
vendored
4
externals/CMakeLists.txt
vendored
@@ -61,7 +61,9 @@ if (USE_DISCORD_PRESENCE)
|
||||
endif()
|
||||
|
||||
# Sirit
|
||||
add_subdirectory(sirit)
|
||||
if (ENABLE_VULKAN)
|
||||
add_subdirectory(sirit)
|
||||
endif()
|
||||
|
||||
# libzip
|
||||
find_package(Libzip 1.5)
|
||||
|
||||
2
externals/dynarmic
vendored
2
externals/dynarmic
vendored
Submodule externals/dynarmic updated: 3806284cbe...0e1112b7df
@@ -45,15 +45,10 @@ if (MSVC)
|
||||
|
||||
# Warnings
|
||||
/W3
|
||||
/we4062 # enumerator 'identifier' in a switch of enum 'enumeration' is not handled
|
||||
/we4101 # 'identifier': unreferenced local variable
|
||||
/we4265 # 'class': class has virtual functions, but destructor is not virtual
|
||||
/we4388 # signed/unsigned mismatch
|
||||
/we4547 # 'operator' : operator before comma has no effect; expected operator with side-effect
|
||||
/we4549 # 'operator1': operator before comma has no effect; did you intend 'operator2'?
|
||||
/we4555 # Expression has no effect; expected expression with side-effect
|
||||
/we4834 # Discarding return value of function with 'nodiscard' attribute
|
||||
/we5038 # data member 'member1' will be initialized after data member 'member2'
|
||||
)
|
||||
|
||||
# /GS- - No stack buffer overflow checks
|
||||
@@ -67,11 +62,7 @@ else()
|
||||
-Werror=implicit-fallthrough
|
||||
-Werror=missing-declarations
|
||||
-Werror=reorder
|
||||
-Werror=switch
|
||||
-Werror=uninitialized
|
||||
-Werror=unused-function
|
||||
-Werror=unused-result
|
||||
-Werror=unused-variable
|
||||
-Wextra
|
||||
-Wmissing-declarations
|
||||
-Wno-attributes
|
||||
|
||||
@@ -218,7 +218,7 @@ void Resample(s32* output, const s32* input, s32 pitch, s32& fraction, std::size
|
||||
const auto l2 = lut[lut_index + 2];
|
||||
const auto l3 = lut[lut_index + 3];
|
||||
|
||||
const auto s0 = static_cast<s32>(input[index + 0]);
|
||||
const auto s0 = static_cast<s32>(input[index]);
|
||||
const auto s1 = static_cast<s32>(input[index + 1]);
|
||||
const auto s2 = static_cast<s32>(input[index + 2]);
|
||||
const auto s3 = static_cast<s32>(input[index + 3]);
|
||||
|
||||
@@ -11,6 +11,7 @@
|
||||
#include "audio_core/info_updater.h"
|
||||
#include "audio_core/voice_context.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "core/hle/kernel/writable_event.h"
|
||||
#include "core/memory.h"
|
||||
#include "core/settings.h"
|
||||
|
||||
@@ -70,9 +71,10 @@ namespace {
|
||||
namespace AudioCore {
|
||||
AudioRenderer::AudioRenderer(Core::Timing::CoreTiming& core_timing, Core::Memory::Memory& memory_,
|
||||
AudioCommon::AudioRendererParameter params,
|
||||
Stream::ReleaseCallback&& release_callback,
|
||||
std::shared_ptr<Kernel::WritableEvent> buffer_event_,
|
||||
std::size_t instance_number)
|
||||
: worker_params{params}, memory_pool_info(params.effect_count + params.voice_count * 4),
|
||||
: worker_params{params}, buffer_event{buffer_event_},
|
||||
memory_pool_info(params.effect_count + params.voice_count * 4),
|
||||
voice_context(params.voice_count), effect_context(params.effect_count), mix_context(),
|
||||
sink_context(params.sink_count), splitter_context(),
|
||||
voices(params.voice_count), memory{memory_},
|
||||
@@ -83,9 +85,10 @@ AudioRenderer::AudioRenderer(Core::Timing::CoreTiming& core_timing, Core::Memory
|
||||
params.num_splitter_send_channels);
|
||||
mix_context.Initialize(behavior_info, params.submix_count + 1, params.effect_count);
|
||||
audio_out = std::make_unique<AudioCore::AudioOut>();
|
||||
stream = audio_out->OpenStream(
|
||||
core_timing, params.sample_rate, AudioCommon::STREAM_NUM_CHANNELS,
|
||||
fmt::format("AudioRenderer-Instance{}", instance_number), std::move(release_callback));
|
||||
stream =
|
||||
audio_out->OpenStream(core_timing, params.sample_rate, AudioCommon::STREAM_NUM_CHANNELS,
|
||||
fmt::format("AudioRenderer-Instance{}", instance_number),
|
||||
[=]() { buffer_event_->Signal(); });
|
||||
audio_out->StartStream(stream);
|
||||
|
||||
QueueMixedBuffer(0);
|
||||
|
||||
@@ -27,6 +27,10 @@ namespace Core::Timing {
|
||||
class CoreTiming;
|
||||
}
|
||||
|
||||
namespace Kernel {
|
||||
class WritableEvent;
|
||||
}
|
||||
|
||||
namespace Core::Memory {
|
||||
class Memory;
|
||||
}
|
||||
@@ -40,7 +44,8 @@ class AudioRenderer {
|
||||
public:
|
||||
AudioRenderer(Core::Timing::CoreTiming& core_timing, Core::Memory::Memory& memory_,
|
||||
AudioCommon::AudioRendererParameter params,
|
||||
Stream::ReleaseCallback&& release_callback, std::size_t instance_number);
|
||||
std::shared_ptr<Kernel::WritableEvent> buffer_event_,
|
||||
std::size_t instance_number);
|
||||
~AudioRenderer();
|
||||
|
||||
[[nodiscard]] ResultCode UpdateAudioRenderer(const std::vector<u8>& input_params,
|
||||
@@ -56,6 +61,7 @@ private:
|
||||
BehaviorInfo behavior_info{};
|
||||
|
||||
AudioCommon::AudioRendererParameter worker_params;
|
||||
std::shared_ptr<Kernel::WritableEvent> buffer_event;
|
||||
std::vector<ServerMemoryPoolInfo> memory_pool_info;
|
||||
VoiceContext voice_context;
|
||||
EffectContext effect_context;
|
||||
|
||||
@@ -130,11 +130,7 @@ bool Stream::ContainsBuffer([[maybe_unused]] Buffer::Tag tag) const {
|
||||
std::vector<Buffer::Tag> Stream::GetTagsAndReleaseBuffers(std::size_t max_count) {
|
||||
std::vector<Buffer::Tag> tags;
|
||||
for (std::size_t count = 0; count < max_count && !released_buffers.empty(); ++count) {
|
||||
if (released_buffers.front()) {
|
||||
tags.push_back(released_buffers.front()->GetTag());
|
||||
} else {
|
||||
ASSERT_MSG(false, "Invalid tag in released_buffers!");
|
||||
}
|
||||
tags.push_back(released_buffers.front()->GetTag());
|
||||
released_buffers.pop();
|
||||
}
|
||||
return tags;
|
||||
@@ -144,11 +140,7 @@ std::vector<Buffer::Tag> Stream::GetTagsAndReleaseBuffers() {
|
||||
std::vector<Buffer::Tag> tags;
|
||||
tags.reserve(released_buffers.size());
|
||||
while (!released_buffers.empty()) {
|
||||
if (released_buffers.front()) {
|
||||
tags.push_back(released_buffers.front()->GetTag());
|
||||
} else {
|
||||
ASSERT_MSG(false, "Invalid tag in released_buffers!");
|
||||
}
|
||||
tags.push_back(released_buffers.front()->GetTag());
|
||||
released_buffers.pop();
|
||||
}
|
||||
return tags;
|
||||
|
||||
@@ -123,7 +123,6 @@ add_library(common STATIC
|
||||
hash.h
|
||||
hex_util.cpp
|
||||
hex_util.h
|
||||
intrusive_red_black_tree.h
|
||||
logging/backend.cpp
|
||||
logging/backend.h
|
||||
logging/filter.cpp
|
||||
@@ -136,6 +135,8 @@ add_library(common STATIC
|
||||
math_util.h
|
||||
memory_detect.cpp
|
||||
memory_detect.h
|
||||
memory_hook.cpp
|
||||
memory_hook.h
|
||||
microprofile.cpp
|
||||
microprofile.h
|
||||
microprofileui.h
|
||||
@@ -144,7 +145,6 @@ add_library(common STATIC
|
||||
page_table.h
|
||||
param_package.cpp
|
||||
param_package.h
|
||||
parent_of_member.h
|
||||
quaternion.h
|
||||
ring_buffer.h
|
||||
scm_rev.cpp
|
||||
@@ -162,14 +162,11 @@ add_library(common STATIC
|
||||
thread.cpp
|
||||
thread.h
|
||||
thread_queue_list.h
|
||||
thread_worker.cpp
|
||||
thread_worker.h
|
||||
threadsafe_queue.h
|
||||
time_zone.cpp
|
||||
time_zone.h
|
||||
timer.cpp
|
||||
timer.h
|
||||
tree.h
|
||||
uint128.cpp
|
||||
uint128.h
|
||||
uuid.cpp
|
||||
|
||||
@@ -93,14 +93,6 @@ __declspec(dllimport) void __stdcall DebugBreak(void);
|
||||
return static_cast<T>(key) == 0; \
|
||||
}
|
||||
|
||||
/// Evaluates a boolean expression, and returns a result unless that expression is true.
|
||||
#define R_UNLESS(expr, res) \
|
||||
{ \
|
||||
if (!(expr)) { \
|
||||
return res; \
|
||||
} \
|
||||
}
|
||||
|
||||
namespace Common {
|
||||
|
||||
[[nodiscard]] constexpr u32 MakeMagic(char a, char b, char c, char d) {
|
||||
|
||||
@@ -31,8 +31,4 @@ concept DerivedFrom = requires {
|
||||
std::is_convertible_v<const volatile Derived*, const volatile Base*>;
|
||||
};
|
||||
|
||||
// TODO: Replace with std::convertible_to when libc++ implements it.
|
||||
template <typename From, typename To>
|
||||
concept ConvertibleTo = std::is_convertible_v<From, To>;
|
||||
|
||||
} // namespace Common
|
||||
|
||||
@@ -11,16 +11,16 @@ namespace Common {
|
||||
|
||||
/// Ceiled integer division.
|
||||
template <typename N, typename D>
|
||||
requires std::is_integral_v<N>&& std::is_unsigned_v<D>[[nodiscard]] constexpr N DivCeil(N number,
|
||||
D divisor) {
|
||||
return static_cast<N>((static_cast<D>(number) + divisor - 1) / divisor);
|
||||
requires std::is_integral_v<N>&& std::is_unsigned_v<D>[[nodiscard]] constexpr auto DivCeil(
|
||||
N number, D divisor) {
|
||||
return (static_cast<D>(number) + divisor - 1) / divisor;
|
||||
}
|
||||
|
||||
/// Ceiled integer division with logarithmic divisor in base 2
|
||||
template <typename N, typename D>
|
||||
requires std::is_integral_v<N>&& std::is_unsigned_v<D>[[nodiscard]] constexpr N DivCeilLog2(
|
||||
requires std::is_integral_v<N>&& std::is_unsigned_v<D>[[nodiscard]] constexpr auto DivCeilLog2(
|
||||
N value, D alignment_log2) {
|
||||
return static_cast<N>((static_cast<D>(value) + (D(1) << alignment_log2) - 1) >> alignment_log2);
|
||||
return (static_cast<D>(value) + (D(1) << alignment_log2) - 1) >> alignment_log2;
|
||||
}
|
||||
|
||||
} // namespace Common
|
||||
|
||||
@@ -1,627 +0,0 @@
|
||||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/parent_of_member.h"
|
||||
#include "common/tree.h"
|
||||
|
||||
namespace Common {
|
||||
|
||||
namespace impl {
|
||||
|
||||
class IntrusiveRedBlackTreeImpl;
|
||||
|
||||
}
|
||||
|
||||
struct IntrusiveRedBlackTreeNode {
|
||||
|
||||
private:
|
||||
RB_ENTRY(IntrusiveRedBlackTreeNode) entry{};
|
||||
|
||||
friend class impl::IntrusiveRedBlackTreeImpl;
|
||||
|
||||
template <class, class, class>
|
||||
friend class IntrusiveRedBlackTree;
|
||||
|
||||
public:
|
||||
constexpr IntrusiveRedBlackTreeNode() = default;
|
||||
};
|
||||
|
||||
template <class T, class Traits, class Comparator>
|
||||
class IntrusiveRedBlackTree;
|
||||
|
||||
namespace impl {
|
||||
|
||||
class IntrusiveRedBlackTreeImpl {
|
||||
|
||||
private:
|
||||
template <class, class, class>
|
||||
friend class ::Common::IntrusiveRedBlackTree;
|
||||
|
||||
private:
|
||||
RB_HEAD(IntrusiveRedBlackTreeRoot, IntrusiveRedBlackTreeNode);
|
||||
using RootType = IntrusiveRedBlackTreeRoot;
|
||||
|
||||
private:
|
||||
IntrusiveRedBlackTreeRoot root;
|
||||
|
||||
public:
|
||||
template <bool Const>
|
||||
class Iterator;
|
||||
|
||||
using value_type = IntrusiveRedBlackTreeNode;
|
||||
using size_type = size_t;
|
||||
using difference_type = ptrdiff_t;
|
||||
using pointer = value_type*;
|
||||
using const_pointer = const value_type*;
|
||||
using reference = value_type&;
|
||||
using const_reference = const value_type&;
|
||||
using iterator = Iterator<false>;
|
||||
using const_iterator = Iterator<true>;
|
||||
|
||||
template <bool Const>
|
||||
class Iterator {
|
||||
public:
|
||||
using iterator_category = std::bidirectional_iterator_tag;
|
||||
using value_type = typename IntrusiveRedBlackTreeImpl::value_type;
|
||||
using difference_type = typename IntrusiveRedBlackTreeImpl::difference_type;
|
||||
using pointer = std::conditional_t<Const, IntrusiveRedBlackTreeImpl::const_pointer,
|
||||
IntrusiveRedBlackTreeImpl::pointer>;
|
||||
using reference = std::conditional_t<Const, IntrusiveRedBlackTreeImpl::const_reference,
|
||||
IntrusiveRedBlackTreeImpl::reference>;
|
||||
|
||||
private:
|
||||
pointer node;
|
||||
|
||||
public:
|
||||
explicit Iterator(pointer n) : node(n) {}
|
||||
|
||||
bool operator==(const Iterator& rhs) const {
|
||||
return this->node == rhs.node;
|
||||
}
|
||||
|
||||
bool operator!=(const Iterator& rhs) const {
|
||||
return !(*this == rhs);
|
||||
}
|
||||
|
||||
pointer operator->() const {
|
||||
return this->node;
|
||||
}
|
||||
|
||||
reference operator*() const {
|
||||
return *this->node;
|
||||
}
|
||||
|
||||
Iterator& operator++() {
|
||||
this->node = GetNext(this->node);
|
||||
return *this;
|
||||
}
|
||||
|
||||
Iterator& operator--() {
|
||||
this->node = GetPrev(this->node);
|
||||
return *this;
|
||||
}
|
||||
|
||||
Iterator operator++(int) {
|
||||
const Iterator it{*this};
|
||||
++(*this);
|
||||
return it;
|
||||
}
|
||||
|
||||
Iterator operator--(int) {
|
||||
const Iterator it{*this};
|
||||
--(*this);
|
||||
return it;
|
||||
}
|
||||
|
||||
operator Iterator<true>() const {
|
||||
return Iterator<true>(this->node);
|
||||
}
|
||||
};
|
||||
|
||||
protected:
|
||||
// Generate static implementations for non-comparison operations for IntrusiveRedBlackTreeRoot.
|
||||
RB_GENERATE_WITHOUT_COMPARE_STATIC(IntrusiveRedBlackTreeRoot, IntrusiveRedBlackTreeNode, entry);
|
||||
|
||||
private:
|
||||
// Define accessors using RB_* functions.
|
||||
constexpr void InitializeImpl() {
|
||||
RB_INIT(&this->root);
|
||||
}
|
||||
|
||||
bool EmptyImpl() const {
|
||||
return RB_EMPTY(&this->root);
|
||||
}
|
||||
|
||||
IntrusiveRedBlackTreeNode* GetMinImpl() const {
|
||||
return RB_MIN(IntrusiveRedBlackTreeRoot,
|
||||
const_cast<IntrusiveRedBlackTreeRoot*>(&this->root));
|
||||
}
|
||||
|
||||
IntrusiveRedBlackTreeNode* GetMaxImpl() const {
|
||||
return RB_MAX(IntrusiveRedBlackTreeRoot,
|
||||
const_cast<IntrusiveRedBlackTreeRoot*>(&this->root));
|
||||
}
|
||||
|
||||
IntrusiveRedBlackTreeNode* RemoveImpl(IntrusiveRedBlackTreeNode* node) {
|
||||
return RB_REMOVE(IntrusiveRedBlackTreeRoot, &this->root, node);
|
||||
}
|
||||
|
||||
public:
|
||||
static IntrusiveRedBlackTreeNode* GetNext(IntrusiveRedBlackTreeNode* node) {
|
||||
return RB_NEXT(IntrusiveRedBlackTreeRoot, nullptr, node);
|
||||
}
|
||||
|
||||
static IntrusiveRedBlackTreeNode* GetPrev(IntrusiveRedBlackTreeNode* node) {
|
||||
return RB_PREV(IntrusiveRedBlackTreeRoot, nullptr, node);
|
||||
}
|
||||
|
||||
static IntrusiveRedBlackTreeNode const* GetNext(const IntrusiveRedBlackTreeNode* node) {
|
||||
return static_cast<const IntrusiveRedBlackTreeNode*>(
|
||||
GetNext(const_cast<IntrusiveRedBlackTreeNode*>(node)));
|
||||
}
|
||||
|
||||
static IntrusiveRedBlackTreeNode const* GetPrev(const IntrusiveRedBlackTreeNode* node) {
|
||||
return static_cast<const IntrusiveRedBlackTreeNode*>(
|
||||
GetPrev(const_cast<IntrusiveRedBlackTreeNode*>(node)));
|
||||
}
|
||||
|
||||
public:
|
||||
constexpr IntrusiveRedBlackTreeImpl() : root() {
|
||||
this->InitializeImpl();
|
||||
}
|
||||
|
||||
// Iterator accessors.
|
||||
iterator begin() {
|
||||
return iterator(this->GetMinImpl());
|
||||
}
|
||||
|
||||
const_iterator begin() const {
|
||||
return const_iterator(this->GetMinImpl());
|
||||
}
|
||||
|
||||
iterator end() {
|
||||
return iterator(static_cast<IntrusiveRedBlackTreeNode*>(nullptr));
|
||||
}
|
||||
|
||||
const_iterator end() const {
|
||||
return const_iterator(static_cast<const IntrusiveRedBlackTreeNode*>(nullptr));
|
||||
}
|
||||
|
||||
const_iterator cbegin() const {
|
||||
return this->begin();
|
||||
}
|
||||
|
||||
const_iterator cend() const {
|
||||
return this->end();
|
||||
}
|
||||
|
||||
iterator iterator_to(reference ref) {
|
||||
return iterator(&ref);
|
||||
}
|
||||
|
||||
const_iterator iterator_to(const_reference ref) const {
|
||||
return const_iterator(&ref);
|
||||
}
|
||||
|
||||
// Content management.
|
||||
bool empty() const {
|
||||
return this->EmptyImpl();
|
||||
}
|
||||
|
||||
reference back() {
|
||||
return *this->GetMaxImpl();
|
||||
}
|
||||
|
||||
const_reference back() const {
|
||||
return *this->GetMaxImpl();
|
||||
}
|
||||
|
||||
reference front() {
|
||||
return *this->GetMinImpl();
|
||||
}
|
||||
|
||||
const_reference front() const {
|
||||
return *this->GetMinImpl();
|
||||
}
|
||||
|
||||
iterator erase(iterator it) {
|
||||
auto cur = std::addressof(*it);
|
||||
auto next = GetNext(cur);
|
||||
this->RemoveImpl(cur);
|
||||
return iterator(next);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace impl
|
||||
|
||||
template <typename T>
|
||||
concept HasLightCompareType = requires {
|
||||
{ std::is_same<typename T::LightCompareType, void>::value }
|
||||
->std::convertible_to<bool>;
|
||||
};
|
||||
|
||||
namespace impl {
|
||||
|
||||
template <typename T, typename Default>
|
||||
consteval auto* GetLightCompareType() {
|
||||
if constexpr (HasLightCompareType<T>) {
|
||||
return static_cast<typename T::LightCompareType*>(nullptr);
|
||||
} else {
|
||||
return static_cast<Default*>(nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace impl
|
||||
|
||||
template <typename T, typename Default>
|
||||
using LightCompareType = std::remove_pointer_t<decltype(impl::GetLightCompareType<T, Default>())>;
|
||||
|
||||
template <class T, class Traits, class Comparator>
|
||||
class IntrusiveRedBlackTree {
|
||||
|
||||
public:
|
||||
using ImplType = impl::IntrusiveRedBlackTreeImpl;
|
||||
|
||||
private:
|
||||
ImplType impl{};
|
||||
|
||||
public:
|
||||
struct IntrusiveRedBlackTreeRootWithCompare : ImplType::IntrusiveRedBlackTreeRoot {};
|
||||
|
||||
template <bool Const>
|
||||
class Iterator;
|
||||
|
||||
using value_type = T;
|
||||
using size_type = size_t;
|
||||
using difference_type = ptrdiff_t;
|
||||
using pointer = T*;
|
||||
using const_pointer = const T*;
|
||||
using reference = T&;
|
||||
using const_reference = const T&;
|
||||
using iterator = Iterator<false>;
|
||||
using const_iterator = Iterator<true>;
|
||||
|
||||
using light_value_type = LightCompareType<Comparator, value_type>;
|
||||
using const_light_pointer = const light_value_type*;
|
||||
using const_light_reference = const light_value_type&;
|
||||
|
||||
template <bool Const>
|
||||
class Iterator {
|
||||
public:
|
||||
friend class IntrusiveRedBlackTree<T, Traits, Comparator>;
|
||||
|
||||
using ImplIterator =
|
||||
std::conditional_t<Const, ImplType::const_iterator, ImplType::iterator>;
|
||||
|
||||
using iterator_category = std::bidirectional_iterator_tag;
|
||||
using value_type = typename IntrusiveRedBlackTree::value_type;
|
||||
using difference_type = typename IntrusiveRedBlackTree::difference_type;
|
||||
using pointer = std::conditional_t<Const, IntrusiveRedBlackTree::const_pointer,
|
||||
IntrusiveRedBlackTree::pointer>;
|
||||
using reference = std::conditional_t<Const, IntrusiveRedBlackTree::const_reference,
|
||||
IntrusiveRedBlackTree::reference>;
|
||||
|
||||
private:
|
||||
ImplIterator iterator;
|
||||
|
||||
private:
|
||||
explicit Iterator(ImplIterator it) : iterator(it) {}
|
||||
|
||||
explicit Iterator(typename std::conditional<Const, ImplType::const_iterator,
|
||||
ImplType::iterator>::type::pointer ptr)
|
||||
: iterator(ptr) {}
|
||||
|
||||
ImplIterator GetImplIterator() const {
|
||||
return this->iterator;
|
||||
}
|
||||
|
||||
public:
|
||||
bool operator==(const Iterator& rhs) const {
|
||||
return this->iterator == rhs.iterator;
|
||||
}
|
||||
|
||||
bool operator!=(const Iterator& rhs) const {
|
||||
return !(*this == rhs);
|
||||
}
|
||||
|
||||
pointer operator->() const {
|
||||
return Traits::GetParent(std::addressof(*this->iterator));
|
||||
}
|
||||
|
||||
reference operator*() const {
|
||||
return *Traits::GetParent(std::addressof(*this->iterator));
|
||||
}
|
||||
|
||||
Iterator& operator++() {
|
||||
++this->iterator;
|
||||
return *this;
|
||||
}
|
||||
|
||||
Iterator& operator--() {
|
||||
--this->iterator;
|
||||
return *this;
|
||||
}
|
||||
|
||||
Iterator operator++(int) {
|
||||
const Iterator it{*this};
|
||||
++this->iterator;
|
||||
return it;
|
||||
}
|
||||
|
||||
Iterator operator--(int) {
|
||||
const Iterator it{*this};
|
||||
--this->iterator;
|
||||
return it;
|
||||
}
|
||||
|
||||
operator Iterator<true>() const {
|
||||
return Iterator<true>(this->iterator);
|
||||
}
|
||||
};
|
||||
|
||||
private:
|
||||
// Generate static implementations for comparison operations for IntrusiveRedBlackTreeRoot.
|
||||
RB_GENERATE_WITH_COMPARE_STATIC(IntrusiveRedBlackTreeRootWithCompare, IntrusiveRedBlackTreeNode,
|
||||
entry, CompareImpl, LightCompareImpl);
|
||||
|
||||
private:
|
||||
static int CompareImpl(const IntrusiveRedBlackTreeNode* lhs,
|
||||
const IntrusiveRedBlackTreeNode* rhs) {
|
||||
return Comparator::Compare(*Traits::GetParent(lhs), *Traits::GetParent(rhs));
|
||||
}
|
||||
|
||||
static int LightCompareImpl(const void* elm, const IntrusiveRedBlackTreeNode* rhs) {
|
||||
return Comparator::Compare(*static_cast<const_light_pointer>(elm), *Traits::GetParent(rhs));
|
||||
}
|
||||
|
||||
// Define accessors using RB_* functions.
|
||||
IntrusiveRedBlackTreeNode* InsertImpl(IntrusiveRedBlackTreeNode* node) {
|
||||
return RB_INSERT(IntrusiveRedBlackTreeRootWithCompare,
|
||||
static_cast<IntrusiveRedBlackTreeRootWithCompare*>(&this->impl.root),
|
||||
node);
|
||||
}
|
||||
|
||||
IntrusiveRedBlackTreeNode* FindImpl(const IntrusiveRedBlackTreeNode* node) const {
|
||||
return RB_FIND(
|
||||
IntrusiveRedBlackTreeRootWithCompare,
|
||||
const_cast<IntrusiveRedBlackTreeRootWithCompare*>(
|
||||
static_cast<const IntrusiveRedBlackTreeRootWithCompare*>(&this->impl.root)),
|
||||
const_cast<IntrusiveRedBlackTreeNode*>(node));
|
||||
}
|
||||
|
||||
IntrusiveRedBlackTreeNode* NFindImpl(const IntrusiveRedBlackTreeNode* node) const {
|
||||
return RB_NFIND(
|
||||
IntrusiveRedBlackTreeRootWithCompare,
|
||||
const_cast<IntrusiveRedBlackTreeRootWithCompare*>(
|
||||
static_cast<const IntrusiveRedBlackTreeRootWithCompare*>(&this->impl.root)),
|
||||
const_cast<IntrusiveRedBlackTreeNode*>(node));
|
||||
}
|
||||
|
||||
IntrusiveRedBlackTreeNode* FindLightImpl(const_light_pointer lelm) const {
|
||||
return RB_FIND_LIGHT(
|
||||
IntrusiveRedBlackTreeRootWithCompare,
|
||||
const_cast<IntrusiveRedBlackTreeRootWithCompare*>(
|
||||
static_cast<const IntrusiveRedBlackTreeRootWithCompare*>(&this->impl.root)),
|
||||
static_cast<const void*>(lelm));
|
||||
}
|
||||
|
||||
IntrusiveRedBlackTreeNode* NFindLightImpl(const_light_pointer lelm) const {
|
||||
return RB_NFIND_LIGHT(
|
||||
IntrusiveRedBlackTreeRootWithCompare,
|
||||
const_cast<IntrusiveRedBlackTreeRootWithCompare*>(
|
||||
static_cast<const IntrusiveRedBlackTreeRootWithCompare*>(&this->impl.root)),
|
||||
static_cast<const void*>(lelm));
|
||||
}
|
||||
|
||||
public:
|
||||
constexpr IntrusiveRedBlackTree() = default;
|
||||
|
||||
// Iterator accessors.
|
||||
iterator begin() {
|
||||
return iterator(this->impl.begin());
|
||||
}
|
||||
|
||||
const_iterator begin() const {
|
||||
return const_iterator(this->impl.begin());
|
||||
}
|
||||
|
||||
iterator end() {
|
||||
return iterator(this->impl.end());
|
||||
}
|
||||
|
||||
const_iterator end() const {
|
||||
return const_iterator(this->impl.end());
|
||||
}
|
||||
|
||||
const_iterator cbegin() const {
|
||||
return this->begin();
|
||||
}
|
||||
|
||||
const_iterator cend() const {
|
||||
return this->end();
|
||||
}
|
||||
|
||||
iterator iterator_to(reference ref) {
|
||||
return iterator(this->impl.iterator_to(*Traits::GetNode(std::addressof(ref))));
|
||||
}
|
||||
|
||||
const_iterator iterator_to(const_reference ref) const {
|
||||
return const_iterator(this->impl.iterator_to(*Traits::GetNode(std::addressof(ref))));
|
||||
}
|
||||
|
||||
// Content management.
|
||||
bool empty() const {
|
||||
return this->impl.empty();
|
||||
}
|
||||
|
||||
reference back() {
|
||||
return *Traits::GetParent(std::addressof(this->impl.back()));
|
||||
}
|
||||
|
||||
const_reference back() const {
|
||||
return *Traits::GetParent(std::addressof(this->impl.back()));
|
||||
}
|
||||
|
||||
reference front() {
|
||||
return *Traits::GetParent(std::addressof(this->impl.front()));
|
||||
}
|
||||
|
||||
const_reference front() const {
|
||||
return *Traits::GetParent(std::addressof(this->impl.front()));
|
||||
}
|
||||
|
||||
iterator erase(iterator it) {
|
||||
return iterator(this->impl.erase(it.GetImplIterator()));
|
||||
}
|
||||
|
||||
iterator insert(reference ref) {
|
||||
ImplType::pointer node = Traits::GetNode(std::addressof(ref));
|
||||
this->InsertImpl(node);
|
||||
return iterator(node);
|
||||
}
|
||||
|
||||
iterator find(const_reference ref) const {
|
||||
return iterator(this->FindImpl(Traits::GetNode(std::addressof(ref))));
|
||||
}
|
||||
|
||||
iterator nfind(const_reference ref) const {
|
||||
return iterator(this->NFindImpl(Traits::GetNode(std::addressof(ref))));
|
||||
}
|
||||
|
||||
iterator find_light(const_light_reference ref) const {
|
||||
return iterator(this->FindLightImpl(std::addressof(ref)));
|
||||
}
|
||||
|
||||
iterator nfind_light(const_light_reference ref) const {
|
||||
return iterator(this->NFindLightImpl(std::addressof(ref)));
|
||||
}
|
||||
};
|
||||
|
||||
template <auto T, class Derived = impl::GetParentType<T>>
|
||||
class IntrusiveRedBlackTreeMemberTraits;
|
||||
|
||||
template <class Parent, IntrusiveRedBlackTreeNode Parent::*Member, class Derived>
|
||||
class IntrusiveRedBlackTreeMemberTraits<Member, Derived> {
|
||||
public:
|
||||
template <class Comparator>
|
||||
using TreeType = IntrusiveRedBlackTree<Derived, IntrusiveRedBlackTreeMemberTraits, Comparator>;
|
||||
using TreeTypeImpl = impl::IntrusiveRedBlackTreeImpl;
|
||||
|
||||
private:
|
||||
template <class, class, class>
|
||||
friend class IntrusiveRedBlackTree;
|
||||
|
||||
friend class impl::IntrusiveRedBlackTreeImpl;
|
||||
|
||||
static constexpr IntrusiveRedBlackTreeNode* GetNode(Derived* parent) {
|
||||
return std::addressof(parent->*Member);
|
||||
}
|
||||
|
||||
static constexpr IntrusiveRedBlackTreeNode const* GetNode(Derived const* parent) {
|
||||
return std::addressof(parent->*Member);
|
||||
}
|
||||
|
||||
static constexpr Derived* GetParent(IntrusiveRedBlackTreeNode* node) {
|
||||
return GetParentPointer<Member, Derived>(node);
|
||||
}
|
||||
|
||||
static constexpr Derived const* GetParent(const IntrusiveRedBlackTreeNode* node) {
|
||||
return GetParentPointer<Member, Derived>(node);
|
||||
}
|
||||
|
||||
private:
|
||||
static constexpr TypedStorage<Derived> DerivedStorage = {};
|
||||
static_assert(GetParent(GetNode(GetPointer(DerivedStorage))) == GetPointer(DerivedStorage));
|
||||
};
|
||||
|
||||
template <auto T, class Derived = impl::GetParentType<T>>
|
||||
class IntrusiveRedBlackTreeMemberTraitsDeferredAssert;
|
||||
|
||||
template <class Parent, IntrusiveRedBlackTreeNode Parent::*Member, class Derived>
|
||||
class IntrusiveRedBlackTreeMemberTraitsDeferredAssert<Member, Derived> {
|
||||
public:
|
||||
template <class Comparator>
|
||||
using TreeType =
|
||||
IntrusiveRedBlackTree<Derived, IntrusiveRedBlackTreeMemberTraitsDeferredAssert, Comparator>;
|
||||
using TreeTypeImpl = impl::IntrusiveRedBlackTreeImpl;
|
||||
|
||||
static constexpr bool IsValid() {
|
||||
TypedStorage<Derived> DerivedStorage = {};
|
||||
return GetParent(GetNode(GetPointer(DerivedStorage))) == GetPointer(DerivedStorage);
|
||||
}
|
||||
|
||||
private:
|
||||
template <class, class, class>
|
||||
friend class IntrusiveRedBlackTree;
|
||||
|
||||
friend class impl::IntrusiveRedBlackTreeImpl;
|
||||
|
||||
static constexpr IntrusiveRedBlackTreeNode* GetNode(Derived* parent) {
|
||||
return std::addressof(parent->*Member);
|
||||
}
|
||||
|
||||
static constexpr IntrusiveRedBlackTreeNode const* GetNode(Derived const* parent) {
|
||||
return std::addressof(parent->*Member);
|
||||
}
|
||||
|
||||
static constexpr Derived* GetParent(IntrusiveRedBlackTreeNode* node) {
|
||||
return GetParentPointer<Member, Derived>(node);
|
||||
}
|
||||
|
||||
static constexpr Derived const* GetParent(const IntrusiveRedBlackTreeNode* node) {
|
||||
return GetParentPointer<Member, Derived>(node);
|
||||
}
|
||||
};
|
||||
|
||||
template <class Derived>
|
||||
class IntrusiveRedBlackTreeBaseNode : public IntrusiveRedBlackTreeNode {
|
||||
public:
|
||||
constexpr Derived* GetPrev() {
|
||||
return static_cast<Derived*>(impl::IntrusiveRedBlackTreeImpl::GetPrev(this));
|
||||
}
|
||||
constexpr const Derived* GetPrev() const {
|
||||
return static_cast<const Derived*>(impl::IntrusiveRedBlackTreeImpl::GetPrev(this));
|
||||
}
|
||||
|
||||
constexpr Derived* GetNext() {
|
||||
return static_cast<Derived*>(impl::IntrusiveRedBlackTreeImpl::GetNext(this));
|
||||
}
|
||||
constexpr const Derived* GetNext() const {
|
||||
return static_cast<const Derived*>(impl::IntrusiveRedBlackTreeImpl::GetNext(this));
|
||||
}
|
||||
};
|
||||
|
||||
template <class Derived>
|
||||
class IntrusiveRedBlackTreeBaseTraits {
|
||||
public:
|
||||
template <class Comparator>
|
||||
using TreeType = IntrusiveRedBlackTree<Derived, IntrusiveRedBlackTreeBaseTraits, Comparator>;
|
||||
using TreeTypeImpl = impl::IntrusiveRedBlackTreeImpl;
|
||||
|
||||
private:
|
||||
template <class, class, class>
|
||||
friend class IntrusiveRedBlackTree;
|
||||
|
||||
friend class impl::IntrusiveRedBlackTreeImpl;
|
||||
|
||||
static constexpr IntrusiveRedBlackTreeNode* GetNode(Derived* parent) {
|
||||
return static_cast<IntrusiveRedBlackTreeNode*>(parent);
|
||||
}
|
||||
|
||||
static constexpr IntrusiveRedBlackTreeNode const* GetNode(Derived const* parent) {
|
||||
return static_cast<const IntrusiveRedBlackTreeNode*>(parent);
|
||||
}
|
||||
|
||||
static constexpr Derived* GetParent(IntrusiveRedBlackTreeNode* node) {
|
||||
return static_cast<Derived*>(node);
|
||||
}
|
||||
|
||||
static constexpr Derived const* GetParent(const IntrusiveRedBlackTreeNode* node) {
|
||||
return static_cast<const Derived*>(node);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace Common
|
||||
@@ -145,18 +145,10 @@ void ColorConsoleBackend::Write(const Entry& entry) {
|
||||
PrintColoredMessage(entry);
|
||||
}
|
||||
|
||||
FileBackend::FileBackend(const std::string& filename) : bytes_written(0) {
|
||||
if (Common::FS::Exists(filename + ".old.txt")) {
|
||||
Common::FS::Delete(filename + ".old.txt");
|
||||
}
|
||||
if (Common::FS::Exists(filename)) {
|
||||
Common::FS::Rename(filename, filename + ".old.txt");
|
||||
}
|
||||
|
||||
// _SH_DENYWR allows read only access to the file for other programs.
|
||||
// It is #defined to 0 on other platforms
|
||||
file = Common::FS::IOFile(filename, "w", _SH_DENYWR);
|
||||
}
|
||||
// _SH_DENYWR allows read only access to the file for other programs.
|
||||
// It is #defined to 0 on other platforms
|
||||
FileBackend::FileBackend(const std::string& filename)
|
||||
: file(filename, "w", _SH_DENYWR), bytes_written(0) {}
|
||||
|
||||
void FileBackend::Write(const Entry& entry) {
|
||||
// prevent logs from going over the maximum size (in case its spamming and the user doesn't
|
||||
|
||||
11
src/common/memory_hook.cpp
Normal file
11
src/common/memory_hook.cpp
Normal file
@@ -0,0 +1,11 @@
|
||||
// Copyright 2018 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "common/memory_hook.h"
|
||||
|
||||
namespace Common {
|
||||
|
||||
MemoryHook::~MemoryHook() = default;
|
||||
|
||||
} // namespace Common
|
||||
47
src/common/memory_hook.h
Normal file
47
src/common/memory_hook.h
Normal file
@@ -0,0 +1,47 @@
|
||||
// Copyright 2016 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace Common {
|
||||
|
||||
/**
|
||||
* Memory hooks have two purposes:
|
||||
* 1. To allow reads and writes to a region of memory to be intercepted. This is used to implement
|
||||
* texture forwarding and memory breakpoints for debugging.
|
||||
* 2. To allow for the implementation of MMIO devices.
|
||||
*
|
||||
* A hook may be mapped to multiple regions of memory.
|
||||
*
|
||||
* If a std::nullopt or false is returned from a function, the read/write request is passed through
|
||||
* to the underlying memory region.
|
||||
*/
|
||||
class MemoryHook {
|
||||
public:
|
||||
virtual ~MemoryHook();
|
||||
|
||||
virtual std::optional<bool> IsValidAddress(VAddr addr) = 0;
|
||||
|
||||
virtual std::optional<u8> Read8(VAddr addr) = 0;
|
||||
virtual std::optional<u16> Read16(VAddr addr) = 0;
|
||||
virtual std::optional<u32> Read32(VAddr addr) = 0;
|
||||
virtual std::optional<u64> Read64(VAddr addr) = 0;
|
||||
|
||||
virtual bool ReadBlock(VAddr src_addr, void* dest_buffer, std::size_t size) = 0;
|
||||
|
||||
virtual bool Write8(VAddr addr, u8 data) = 0;
|
||||
virtual bool Write16(VAddr addr, u16 data) = 0;
|
||||
virtual bool Write32(VAddr addr, u32 data) = 0;
|
||||
virtual bool Write64(VAddr addr, u64 data) = 0;
|
||||
|
||||
virtual bool WriteBlock(VAddr dest_addr, const void* src_buffer, std::size_t size) = 0;
|
||||
};
|
||||
|
||||
using MemoryHookPointer = std::shared_ptr<MemoryHook>;
|
||||
} // namespace Common
|
||||
@@ -10,10 +10,16 @@ PageTable::PageTable() = default;
|
||||
|
||||
PageTable::~PageTable() noexcept = default;
|
||||
|
||||
void PageTable::Resize(size_t address_space_width_in_bits, size_t page_size_in_bits) {
|
||||
const size_t num_page_table_entries{1ULL << (address_space_width_in_bits - page_size_in_bits)};
|
||||
void PageTable::Resize(std::size_t address_space_width_in_bits, std::size_t page_size_in_bits,
|
||||
bool has_attribute) {
|
||||
const std::size_t num_page_table_entries{1ULL
|
||||
<< (address_space_width_in_bits - page_size_in_bits)};
|
||||
pointers.resize(num_page_table_entries);
|
||||
backing_addr.resize(num_page_table_entries);
|
||||
|
||||
if (has_attribute) {
|
||||
attributes.resize(num_page_table_entries);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace Common
|
||||
|
||||
@@ -4,10 +4,10 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <atomic>
|
||||
#include <tuple>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "common/memory_hook.h"
|
||||
#include "common/virtual_buffer.h"
|
||||
|
||||
namespace Common {
|
||||
@@ -20,6 +20,27 @@ enum class PageType : u8 {
|
||||
/// Page is mapped to regular memory, but also needs to check for rasterizer cache flushing and
|
||||
/// invalidation
|
||||
RasterizerCachedMemory,
|
||||
/// Page is mapped to a I/O region. Writing and reading to this page is handled by functions.
|
||||
Special,
|
||||
/// Page is allocated for use.
|
||||
Allocated,
|
||||
};
|
||||
|
||||
struct SpecialRegion {
|
||||
enum class Type {
|
||||
DebugHook,
|
||||
IODevice,
|
||||
} type;
|
||||
|
||||
MemoryHookPointer handler;
|
||||
|
||||
[[nodiscard]] bool operator<(const SpecialRegion& other) const {
|
||||
return std::tie(type, handler) < std::tie(other.type, other.handler);
|
||||
}
|
||||
|
||||
[[nodiscard]] bool operator==(const SpecialRegion& other) const {
|
||||
return std::tie(type, handler) == std::tie(other.type, other.handler);
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -27,59 +48,6 @@ enum class PageType : u8 {
|
||||
* mimics the way a real CPU page table works.
|
||||
*/
|
||||
struct PageTable {
|
||||
/// Number of bits reserved for attribute tagging.
|
||||
/// This can be at most the guaranteed alignment of the pointers in the page table.
|
||||
static constexpr int ATTRIBUTE_BITS = 2;
|
||||
|
||||
/**
|
||||
* Pair of host pointer and page type attribute.
|
||||
* This uses the lower bits of a given pointer to store the attribute tag.
|
||||
* Writing and reading the pointer attribute pair is guaranteed to be atomic for the same method
|
||||
* call. In other words, they are guaranteed to be synchronized at all times.
|
||||
*/
|
||||
class PageInfo {
|
||||
public:
|
||||
/// Returns the page pointer
|
||||
[[nodiscard]] u8* Pointer() const noexcept {
|
||||
return ExtractPointer(raw.load(std::memory_order_relaxed));
|
||||
}
|
||||
|
||||
/// Returns the page type attribute
|
||||
[[nodiscard]] PageType Type() const noexcept {
|
||||
return ExtractType(raw.load(std::memory_order_relaxed));
|
||||
}
|
||||
|
||||
/// Returns the page pointer and attribute pair, extracted from the same atomic read
|
||||
[[nodiscard]] std::pair<u8*, PageType> PointerType() const noexcept {
|
||||
const uintptr_t non_atomic_raw = raw.load(std::memory_order_relaxed);
|
||||
return {ExtractPointer(non_atomic_raw), ExtractType(non_atomic_raw)};
|
||||
}
|
||||
|
||||
/// Returns the raw representation of the page information.
|
||||
/// Use ExtractPointer and ExtractType to unpack the value.
|
||||
[[nodiscard]] uintptr_t Raw() const noexcept {
|
||||
return raw.load(std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
/// Write a page pointer and type pair atomically
|
||||
void Store(u8* pointer, PageType type) noexcept {
|
||||
raw.store(reinterpret_cast<uintptr_t>(pointer) | static_cast<uintptr_t>(type));
|
||||
}
|
||||
|
||||
/// Unpack a pointer from a page info raw representation
|
||||
[[nodiscard]] static u8* ExtractPointer(uintptr_t raw) noexcept {
|
||||
return reinterpret_cast<u8*>(raw & (~uintptr_t{0} << ATTRIBUTE_BITS));
|
||||
}
|
||||
|
||||
/// Unpack a page type from a page info raw representation
|
||||
[[nodiscard]] static PageType ExtractType(uintptr_t raw) noexcept {
|
||||
return static_cast<PageType>(raw & ((uintptr_t{1} << ATTRIBUTE_BITS) - 1));
|
||||
}
|
||||
|
||||
private:
|
||||
std::atomic<uintptr_t> raw;
|
||||
};
|
||||
|
||||
PageTable();
|
||||
~PageTable() noexcept;
|
||||
|
||||
@@ -90,21 +58,25 @@ struct PageTable {
|
||||
PageTable& operator=(PageTable&&) noexcept = default;
|
||||
|
||||
/**
|
||||
* Resizes the page table to be able to accommodate enough pages within
|
||||
* Resizes the page table to be able to accomodate enough pages within
|
||||
* a given address space.
|
||||
*
|
||||
* @param address_space_width_in_bits The address size width in bits.
|
||||
* @param page_size_in_bits The page size in bits.
|
||||
* @param has_attribute Whether or not this page has any backing attributes.
|
||||
*/
|
||||
void Resize(size_t address_space_width_in_bits, size_t page_size_in_bits);
|
||||
void Resize(std::size_t address_space_width_in_bits, std::size_t page_size_in_bits,
|
||||
bool has_attribute);
|
||||
|
||||
/**
|
||||
* Vector of memory pointers backing each page. An entry can only be non-null if the
|
||||
* corresponding attribute element is of type `Memory`.
|
||||
* corresponding entry in the `attributes` vector is of type `Memory`.
|
||||
*/
|
||||
VirtualBuffer<PageInfo> pointers;
|
||||
VirtualBuffer<u8*> pointers;
|
||||
|
||||
VirtualBuffer<u64> backing_addr;
|
||||
|
||||
VirtualBuffer<PageType> attributes;
|
||||
};
|
||||
|
||||
} // namespace Common
|
||||
|
||||
@@ -1,191 +0,0 @@
|
||||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <type_traits>
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace Common {
|
||||
namespace detail {
|
||||
template <typename T, size_t Size, size_t Align>
|
||||
struct TypedStorageImpl {
|
||||
std::aligned_storage_t<Size, Align> storage_;
|
||||
};
|
||||
} // namespace detail
|
||||
|
||||
template <typename T>
|
||||
using TypedStorage = detail::TypedStorageImpl<T, sizeof(T), alignof(T)>;
|
||||
|
||||
template <typename T>
|
||||
static constexpr T* GetPointer(TypedStorage<T>& ts) {
|
||||
return static_cast<T*>(static_cast<void*>(std::addressof(ts.storage_)));
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static constexpr const T* GetPointer(const TypedStorage<T>& ts) {
|
||||
return static_cast<const T*>(static_cast<const void*>(std::addressof(ts.storage_)));
|
||||
}
|
||||
|
||||
namespace impl {
|
||||
|
||||
template <size_t MaxDepth>
|
||||
struct OffsetOfUnionHolder {
|
||||
template <typename ParentType, typename MemberType, size_t Offset>
|
||||
union UnionImpl {
|
||||
using PaddingMember = char;
|
||||
static constexpr size_t GetOffset() {
|
||||
return Offset;
|
||||
}
|
||||
|
||||
#pragma pack(push, 1)
|
||||
struct {
|
||||
PaddingMember padding[Offset];
|
||||
MemberType members[(sizeof(ParentType) / sizeof(MemberType)) + 1];
|
||||
} data;
|
||||
#pragma pack(pop)
|
||||
UnionImpl<ParentType, MemberType, Offset + 1> next_union;
|
||||
};
|
||||
|
||||
template <typename ParentType, typename MemberType>
|
||||
union UnionImpl<ParentType, MemberType, 0> {
|
||||
static constexpr size_t GetOffset() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct {
|
||||
MemberType members[(sizeof(ParentType) / sizeof(MemberType)) + 1];
|
||||
} data;
|
||||
UnionImpl<ParentType, MemberType, 1> next_union;
|
||||
};
|
||||
|
||||
template <typename ParentType, typename MemberType>
|
||||
union UnionImpl<ParentType, MemberType, MaxDepth> {};
|
||||
};
|
||||
|
||||
template <typename ParentType, typename MemberType>
|
||||
struct OffsetOfCalculator {
|
||||
using UnionHolder =
|
||||
typename OffsetOfUnionHolder<sizeof(MemberType)>::template UnionImpl<ParentType, MemberType,
|
||||
0>;
|
||||
union Union {
|
||||
char c{};
|
||||
UnionHolder first_union;
|
||||
TypedStorage<ParentType> parent;
|
||||
|
||||
constexpr Union() : c() {}
|
||||
};
|
||||
static constexpr Union U = {};
|
||||
|
||||
static constexpr const MemberType* GetNextAddress(const MemberType* start,
|
||||
const MemberType* target) {
|
||||
while (start < target) {
|
||||
start++;
|
||||
}
|
||||
return start;
|
||||
}
|
||||
|
||||
static constexpr std::ptrdiff_t GetDifference(const MemberType* start,
|
||||
const MemberType* target) {
|
||||
return (target - start) * sizeof(MemberType);
|
||||
}
|
||||
|
||||
template <typename CurUnion>
|
||||
static constexpr std::ptrdiff_t OffsetOfImpl(MemberType ParentType::*member,
|
||||
CurUnion& cur_union) {
|
||||
constexpr size_t Offset = CurUnion::GetOffset();
|
||||
const auto target = std::addressof(GetPointer(U.parent)->*member);
|
||||
const auto start = std::addressof(cur_union.data.members[0]);
|
||||
const auto next = GetNextAddress(start, target);
|
||||
|
||||
if (next != target) {
|
||||
if constexpr (Offset < sizeof(MemberType) - 1) {
|
||||
return OffsetOfImpl(member, cur_union.next_union);
|
||||
} else {
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
||||
return (next - start) * sizeof(MemberType) + Offset;
|
||||
}
|
||||
|
||||
static constexpr std::ptrdiff_t OffsetOf(MemberType ParentType::*member) {
|
||||
return OffsetOfImpl(member, U.first_union);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct GetMemberPointerTraits;
|
||||
|
||||
template <typename P, typename M>
|
||||
struct GetMemberPointerTraits<M P::*> {
|
||||
using Parent = P;
|
||||
using Member = M;
|
||||
};
|
||||
|
||||
template <auto MemberPtr>
|
||||
using GetParentType = typename GetMemberPointerTraits<decltype(MemberPtr)>::Parent;
|
||||
|
||||
template <auto MemberPtr>
|
||||
using GetMemberType = typename GetMemberPointerTraits<decltype(MemberPtr)>::Member;
|
||||
|
||||
template <auto MemberPtr, typename RealParentType = GetParentType<MemberPtr>>
|
||||
static inline std::ptrdiff_t OffsetOf = [] {
|
||||
using DeducedParentType = GetParentType<MemberPtr>;
|
||||
using MemberType = GetMemberType<MemberPtr>;
|
||||
static_assert(std::is_base_of<DeducedParentType, RealParentType>::value ||
|
||||
std::is_same<RealParentType, DeducedParentType>::value);
|
||||
|
||||
return OffsetOfCalculator<RealParentType, MemberType>::OffsetOf(MemberPtr);
|
||||
}();
|
||||
|
||||
} // namespace impl
|
||||
|
||||
template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
|
||||
constexpr RealParentType& GetParentReference(impl::GetMemberType<MemberPtr>* member) {
|
||||
std::ptrdiff_t Offset = impl::OffsetOf<MemberPtr, RealParentType>;
|
||||
return *static_cast<RealParentType*>(
|
||||
static_cast<void*>(static_cast<uint8_t*>(static_cast<void*>(member)) - Offset));
|
||||
}
|
||||
|
||||
template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
|
||||
constexpr RealParentType const& GetParentReference(impl::GetMemberType<MemberPtr> const* member) {
|
||||
std::ptrdiff_t Offset = impl::OffsetOf<MemberPtr, RealParentType>;
|
||||
return *static_cast<const RealParentType*>(static_cast<const void*>(
|
||||
static_cast<const uint8_t*>(static_cast<const void*>(member)) - Offset));
|
||||
}
|
||||
|
||||
template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
|
||||
constexpr RealParentType* GetParentPointer(impl::GetMemberType<MemberPtr>* member) {
|
||||
return std::addressof(GetParentReference<MemberPtr, RealParentType>(member));
|
||||
}
|
||||
|
||||
template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
|
||||
constexpr RealParentType const* GetParentPointer(impl::GetMemberType<MemberPtr> const* member) {
|
||||
return std::addressof(GetParentReference<MemberPtr, RealParentType>(member));
|
||||
}
|
||||
|
||||
template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
|
||||
constexpr RealParentType& GetParentReference(impl::GetMemberType<MemberPtr>& member) {
|
||||
return GetParentReference<MemberPtr, RealParentType>(std::addressof(member));
|
||||
}
|
||||
|
||||
template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
|
||||
constexpr RealParentType const& GetParentReference(impl::GetMemberType<MemberPtr> const& member) {
|
||||
return GetParentReference<MemberPtr, RealParentType>(std::addressof(member));
|
||||
}
|
||||
|
||||
template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
|
||||
constexpr RealParentType* GetParentPointer(impl::GetMemberType<MemberPtr>& member) {
|
||||
return std::addressof(GetParentReference<MemberPtr, RealParentType>(member));
|
||||
}
|
||||
|
||||
template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
|
||||
constexpr RealParentType const* GetParentPointer(impl::GetMemberType<MemberPtr> const& member) {
|
||||
return std::addressof(GetParentReference<MemberPtr, RealParentType>(member));
|
||||
}
|
||||
|
||||
} // namespace Common
|
||||
@@ -394,7 +394,7 @@ public:
|
||||
template <typename S, typename T2, typename F2>
|
||||
friend S operator%(const S& p, const swapped_t v);
|
||||
|
||||
// Arithmetics + assignments
|
||||
// Arithmetics + assignements
|
||||
template <typename S, typename T2, typename F2>
|
||||
friend S operator+=(const S& p, const swapped_t v);
|
||||
|
||||
@@ -451,7 +451,7 @@ S operator%(const S& i, const swap_struct_t<T, F> v) {
|
||||
return i % v.swap();
|
||||
}
|
||||
|
||||
// Arithmetics + assignments
|
||||
// Arithmetics + assignements
|
||||
template <typename S, typename T, typename F>
|
||||
S& operator+=(S& i, const swap_struct_t<T, F> v) {
|
||||
i += v.swap();
|
||||
|
||||
@@ -1,58 +0,0 @@
|
||||
// Copyright 2020 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "common/thread.h"
|
||||
#include "common/thread_worker.h"
|
||||
|
||||
namespace Common {
|
||||
|
||||
ThreadWorker::ThreadWorker(std::size_t num_workers, const std::string& name) {
|
||||
for (std::size_t i = 0; i < num_workers; ++i)
|
||||
threads.emplace_back([this, thread_name{std::string{name}}] {
|
||||
Common::SetCurrentThreadName(thread_name.c_str());
|
||||
|
||||
// Wait for first request
|
||||
{
|
||||
std::unique_lock lock{queue_mutex};
|
||||
condition.wait(lock, [this] { return stop || !requests.empty(); });
|
||||
}
|
||||
|
||||
while (true) {
|
||||
std::function<void()> task;
|
||||
|
||||
{
|
||||
std::unique_lock lock{queue_mutex};
|
||||
condition.wait(lock, [this] { return stop || !requests.empty(); });
|
||||
if (stop || requests.empty()) {
|
||||
return;
|
||||
}
|
||||
task = std::move(requests.front());
|
||||
requests.pop();
|
||||
}
|
||||
|
||||
task();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
ThreadWorker::~ThreadWorker() {
|
||||
{
|
||||
std::unique_lock lock{queue_mutex};
|
||||
stop = true;
|
||||
}
|
||||
condition.notify_all();
|
||||
for (std::thread& thread : threads) {
|
||||
thread.join();
|
||||
}
|
||||
}
|
||||
|
||||
void ThreadWorker::QueueWork(std::function<void()>&& work) {
|
||||
{
|
||||
std::unique_lock lock{queue_mutex};
|
||||
requests.emplace(work);
|
||||
}
|
||||
condition.notify_one();
|
||||
}
|
||||
|
||||
} // namespace Common
|
||||
@@ -1,30 +0,0 @@
|
||||
// Copyright 2020 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <atomic>
|
||||
#include <functional>
|
||||
#include <mutex>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include <queue>
|
||||
|
||||
namespace Common {
|
||||
|
||||
class ThreadWorker final {
|
||||
public:
|
||||
explicit ThreadWorker(std::size_t num_workers, const std::string& name);
|
||||
~ThreadWorker();
|
||||
void QueueWork(std::function<void()>&& work);
|
||||
|
||||
private:
|
||||
std::vector<std::thread> threads;
|
||||
std::queue<std::function<void()>> requests;
|
||||
std::mutex queue_mutex;
|
||||
std::condition_variable condition;
|
||||
std::atomic_bool stop{};
|
||||
};
|
||||
|
||||
} // namespace Common
|
||||
@@ -1,822 +0,0 @@
|
||||
/* $NetBSD: tree.h,v 1.8 2004/03/28 19:38:30 provos Exp $ */
|
||||
/* $OpenBSD: tree.h,v 1.7 2002/10/17 21:51:54 art Exp $ */
|
||||
/* $FreeBSD$ */
|
||||
|
||||
/*-
|
||||
* Copyright 2002 Niels Provos <provos@citi.umich.edu>
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
||||
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
||||
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
||||
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef _SYS_TREE_H_
|
||||
#define _SYS_TREE_H_
|
||||
|
||||
/* FreeBSD <sys/cdefs.h> has a lot of defines we don't really want. */
|
||||
/* tree.h only actually uses __inline and __unused, so we'll just define those. */
|
||||
|
||||
/* #include <sys/cdefs.h> */
|
||||
|
||||
#ifndef __inline
|
||||
#define __inline inline
|
||||
#endif
|
||||
|
||||
/*
|
||||
* This file defines data structures for different types of trees:
|
||||
* splay trees and red-black trees.
|
||||
*
|
||||
* A splay tree is a self-organizing data structure. Every operation
|
||||
* on the tree causes a splay to happen. The splay moves the requested
|
||||
* node to the root of the tree and partly rebalances it.
|
||||
*
|
||||
* This has the benefit that request locality causes faster lookups as
|
||||
* the requested nodes move to the top of the tree. On the other hand,
|
||||
* every lookup causes memory writes.
|
||||
*
|
||||
* The Balance Theorem bounds the total access time for m operations
|
||||
* and n inserts on an initially empty tree as O((m + n)lg n). The
|
||||
* amortized cost for a sequence of m accesses to a splay tree is O(lg n);
|
||||
*
|
||||
* A red-black tree is a binary search tree with the node color as an
|
||||
* extra attribute. It fulfills a set of conditions:
|
||||
* - every search path from the root to a leaf consists of the
|
||||
* same number of black nodes,
|
||||
* - each red node (except for the root) has a black parent,
|
||||
* - each leaf node is black.
|
||||
*
|
||||
* Every operation on a red-black tree is bounded as O(lg n).
|
||||
* The maximum height of a red-black tree is 2lg (n+1).
|
||||
*/
|
||||
|
||||
#define SPLAY_HEAD(name, type) \
|
||||
struct name { \
|
||||
struct type* sph_root; /* root of the tree */ \
|
||||
}
|
||||
|
||||
#define SPLAY_INITIALIZER(root) \
|
||||
{ NULL }
|
||||
|
||||
#define SPLAY_INIT(root) \
|
||||
do { \
|
||||
(root)->sph_root = NULL; \
|
||||
} while (/*CONSTCOND*/ 0)
|
||||
|
||||
#define SPLAY_ENTRY(type) \
|
||||
struct { \
|
||||
struct type* spe_left; /* left element */ \
|
||||
struct type* spe_right; /* right element */ \
|
||||
}
|
||||
|
||||
#define SPLAY_LEFT(elm, field) (elm)->field.spe_left
|
||||
#define SPLAY_RIGHT(elm, field) (elm)->field.spe_right
|
||||
#define SPLAY_ROOT(head) (head)->sph_root
|
||||
#define SPLAY_EMPTY(head) (SPLAY_ROOT(head) == NULL)
|
||||
|
||||
/* SPLAY_ROTATE_{LEFT,RIGHT} expect that tmp hold SPLAY_{RIGHT,LEFT} */
|
||||
#define SPLAY_ROTATE_RIGHT(head, tmp, field) \
|
||||
do { \
|
||||
SPLAY_LEFT((head)->sph_root, field) = SPLAY_RIGHT(tmp, field); \
|
||||
SPLAY_RIGHT(tmp, field) = (head)->sph_root; \
|
||||
(head)->sph_root = tmp; \
|
||||
} while (/*CONSTCOND*/ 0)
|
||||
|
||||
#define SPLAY_ROTATE_LEFT(head, tmp, field) \
|
||||
do { \
|
||||
SPLAY_RIGHT((head)->sph_root, field) = SPLAY_LEFT(tmp, field); \
|
||||
SPLAY_LEFT(tmp, field) = (head)->sph_root; \
|
||||
(head)->sph_root = tmp; \
|
||||
} while (/*CONSTCOND*/ 0)
|
||||
|
||||
#define SPLAY_LINKLEFT(head, tmp, field) \
|
||||
do { \
|
||||
SPLAY_LEFT(tmp, field) = (head)->sph_root; \
|
||||
tmp = (head)->sph_root; \
|
||||
(head)->sph_root = SPLAY_LEFT((head)->sph_root, field); \
|
||||
} while (/*CONSTCOND*/ 0)
|
||||
|
||||
#define SPLAY_LINKRIGHT(head, tmp, field) \
|
||||
do { \
|
||||
SPLAY_RIGHT(tmp, field) = (head)->sph_root; \
|
||||
tmp = (head)->sph_root; \
|
||||
(head)->sph_root = SPLAY_RIGHT((head)->sph_root, field); \
|
||||
} while (/*CONSTCOND*/ 0)
|
||||
|
||||
#define SPLAY_ASSEMBLE(head, node, left, right, field) \
|
||||
do { \
|
||||
SPLAY_RIGHT(left, field) = SPLAY_LEFT((head)->sph_root, field); \
|
||||
SPLAY_LEFT(right, field) = SPLAY_RIGHT((head)->sph_root, field); \
|
||||
SPLAY_LEFT((head)->sph_root, field) = SPLAY_RIGHT(node, field); \
|
||||
SPLAY_RIGHT((head)->sph_root, field) = SPLAY_LEFT(node, field); \
|
||||
} while (/*CONSTCOND*/ 0)
|
||||
|
||||
/* Generates prototypes and inline functions */
|
||||
|
||||
#define SPLAY_PROTOTYPE(name, type, field, cmp) \
|
||||
void name##_SPLAY(struct name*, struct type*); \
|
||||
void name##_SPLAY_MINMAX(struct name*, int); \
|
||||
struct type* name##_SPLAY_INSERT(struct name*, struct type*); \
|
||||
struct type* name##_SPLAY_REMOVE(struct name*, struct type*); \
|
||||
\
|
||||
/* Finds the node with the same key as elm */ \
|
||||
static __inline struct type* name##_SPLAY_FIND(struct name* head, struct type* elm) { \
|
||||
if (SPLAY_EMPTY(head)) \
|
||||
return (NULL); \
|
||||
name##_SPLAY(head, elm); \
|
||||
if ((cmp)(elm, (head)->sph_root) == 0) \
|
||||
return (head->sph_root); \
|
||||
return (NULL); \
|
||||
} \
|
||||
\
|
||||
static __inline struct type* name##_SPLAY_NEXT(struct name* head, struct type* elm) { \
|
||||
name##_SPLAY(head, elm); \
|
||||
if (SPLAY_RIGHT(elm, field) != NULL) { \
|
||||
elm = SPLAY_RIGHT(elm, field); \
|
||||
while (SPLAY_LEFT(elm, field) != NULL) { \
|
||||
elm = SPLAY_LEFT(elm, field); \
|
||||
} \
|
||||
} else \
|
||||
elm = NULL; \
|
||||
return (elm); \
|
||||
} \
|
||||
\
|
||||
static __inline struct type* name##_SPLAY_MIN_MAX(struct name* head, int val) { \
|
||||
name##_SPLAY_MINMAX(head, val); \
|
||||
return (SPLAY_ROOT(head)); \
|
||||
}
|
||||
|
||||
/* Main splay operation.
|
||||
* Moves node close to the key of elm to top
|
||||
*/
|
||||
#define SPLAY_GENERATE(name, type, field, cmp) \
|
||||
struct type* name##_SPLAY_INSERT(struct name* head, struct type* elm) { \
|
||||
if (SPLAY_EMPTY(head)) { \
|
||||
SPLAY_LEFT(elm, field) = SPLAY_RIGHT(elm, field) = NULL; \
|
||||
} else { \
|
||||
int __comp; \
|
||||
name##_SPLAY(head, elm); \
|
||||
__comp = (cmp)(elm, (head)->sph_root); \
|
||||
if (__comp < 0) { \
|
||||
SPLAY_LEFT(elm, field) = SPLAY_LEFT((head)->sph_root, field); \
|
||||
SPLAY_RIGHT(elm, field) = (head)->sph_root; \
|
||||
SPLAY_LEFT((head)->sph_root, field) = NULL; \
|
||||
} else if (__comp > 0) { \
|
||||
SPLAY_RIGHT(elm, field) = SPLAY_RIGHT((head)->sph_root, field); \
|
||||
SPLAY_LEFT(elm, field) = (head)->sph_root; \
|
||||
SPLAY_RIGHT((head)->sph_root, field) = NULL; \
|
||||
} else \
|
||||
return ((head)->sph_root); \
|
||||
} \
|
||||
(head)->sph_root = (elm); \
|
||||
return (NULL); \
|
||||
} \
|
||||
\
|
||||
struct type* name##_SPLAY_REMOVE(struct name* head, struct type* elm) { \
|
||||
struct type* __tmp; \
|
||||
if (SPLAY_EMPTY(head)) \
|
||||
return (NULL); \
|
||||
name##_SPLAY(head, elm); \
|
||||
if ((cmp)(elm, (head)->sph_root) == 0) { \
|
||||
if (SPLAY_LEFT((head)->sph_root, field) == NULL) { \
|
||||
(head)->sph_root = SPLAY_RIGHT((head)->sph_root, field); \
|
||||
} else { \
|
||||
__tmp = SPLAY_RIGHT((head)->sph_root, field); \
|
||||
(head)->sph_root = SPLAY_LEFT((head)->sph_root, field); \
|
||||
name##_SPLAY(head, elm); \
|
||||
SPLAY_RIGHT((head)->sph_root, field) = __tmp; \
|
||||
} \
|
||||
return (elm); \
|
||||
} \
|
||||
return (NULL); \
|
||||
} \
|
||||
\
|
||||
void name##_SPLAY(struct name* head, struct type* elm) { \
|
||||
struct type __node, *__left, *__right, *__tmp; \
|
||||
int __comp; \
|
||||
\
|
||||
SPLAY_LEFT(&__node, field) = SPLAY_RIGHT(&__node, field) = NULL; \
|
||||
__left = __right = &__node; \
|
||||
\
|
||||
while ((__comp = (cmp)(elm, (head)->sph_root)) != 0) { \
|
||||
if (__comp < 0) { \
|
||||
__tmp = SPLAY_LEFT((head)->sph_root, field); \
|
||||
if (__tmp == NULL) \
|
||||
break; \
|
||||
if ((cmp)(elm, __tmp) < 0) { \
|
||||
SPLAY_ROTATE_RIGHT(head, __tmp, field); \
|
||||
if (SPLAY_LEFT((head)->sph_root, field) == NULL) \
|
||||
break; \
|
||||
} \
|
||||
SPLAY_LINKLEFT(head, __right, field); \
|
||||
} else if (__comp > 0) { \
|
||||
__tmp = SPLAY_RIGHT((head)->sph_root, field); \
|
||||
if (__tmp == NULL) \
|
||||
break; \
|
||||
if ((cmp)(elm, __tmp) > 0) { \
|
||||
SPLAY_ROTATE_LEFT(head, __tmp, field); \
|
||||
if (SPLAY_RIGHT((head)->sph_root, field) == NULL) \
|
||||
break; \
|
||||
} \
|
||||
SPLAY_LINKRIGHT(head, __left, field); \
|
||||
} \
|
||||
} \
|
||||
SPLAY_ASSEMBLE(head, &__node, __left, __right, field); \
|
||||
} \
|
||||
\
|
||||
/* Splay with either the minimum or the maximum element \
|
||||
* Used to find minimum or maximum element in tree. \
|
||||
*/ \
|
||||
void name##_SPLAY_MINMAX(struct name* head, int __comp) { \
|
||||
struct type __node, *__left, *__right, *__tmp; \
|
||||
\
|
||||
SPLAY_LEFT(&__node, field) = SPLAY_RIGHT(&__node, field) = NULL; \
|
||||
__left = __right = &__node; \
|
||||
\
|
||||
while (1) { \
|
||||
if (__comp < 0) { \
|
||||
__tmp = SPLAY_LEFT((head)->sph_root, field); \
|
||||
if (__tmp == NULL) \
|
||||
break; \
|
||||
if (__comp < 0) { \
|
||||
SPLAY_ROTATE_RIGHT(head, __tmp, field); \
|
||||
if (SPLAY_LEFT((head)->sph_root, field) == NULL) \
|
||||
break; \
|
||||
} \
|
||||
SPLAY_LINKLEFT(head, __right, field); \
|
||||
} else if (__comp > 0) { \
|
||||
__tmp = SPLAY_RIGHT((head)->sph_root, field); \
|
||||
if (__tmp == NULL) \
|
||||
break; \
|
||||
if (__comp > 0) { \
|
||||
SPLAY_ROTATE_LEFT(head, __tmp, field); \
|
||||
if (SPLAY_RIGHT((head)->sph_root, field) == NULL) \
|
||||
break; \
|
||||
} \
|
||||
SPLAY_LINKRIGHT(head, __left, field); \
|
||||
} \
|
||||
} \
|
||||
SPLAY_ASSEMBLE(head, &__node, __left, __right, field); \
|
||||
}
|
||||
|
||||
#define SPLAY_NEGINF -1
|
||||
#define SPLAY_INF 1
|
||||
|
||||
#define SPLAY_INSERT(name, x, y) name##_SPLAY_INSERT(x, y)
|
||||
#define SPLAY_REMOVE(name, x, y) name##_SPLAY_REMOVE(x, y)
|
||||
#define SPLAY_FIND(name, x, y) name##_SPLAY_FIND(x, y)
|
||||
#define SPLAY_NEXT(name, x, y) name##_SPLAY_NEXT(x, y)
|
||||
#define SPLAY_MIN(name, x) (SPLAY_EMPTY(x) ? NULL : name##_SPLAY_MIN_MAX(x, SPLAY_NEGINF))
|
||||
#define SPLAY_MAX(name, x) (SPLAY_EMPTY(x) ? NULL : name##_SPLAY_MIN_MAX(x, SPLAY_INF))
|
||||
|
||||
#define SPLAY_FOREACH(x, name, head) \
|
||||
for ((x) = SPLAY_MIN(name, head); (x) != NULL; (x) = SPLAY_NEXT(name, head, x))
|
||||
|
||||
/* Macros that define a red-black tree */
|
||||
#define RB_HEAD(name, type) \
|
||||
struct name { \
|
||||
struct type* rbh_root; /* root of the tree */ \
|
||||
}
|
||||
|
||||
#define RB_INITIALIZER(root) \
|
||||
{ NULL }
|
||||
|
||||
#define RB_INIT(root) \
|
||||
do { \
|
||||
(root)->rbh_root = NULL; \
|
||||
} while (/*CONSTCOND*/ 0)
|
||||
|
||||
#define RB_BLACK 0
|
||||
#define RB_RED 1
|
||||
#define RB_ENTRY(type) \
|
||||
struct { \
|
||||
struct type* rbe_left; /* left element */ \
|
||||
struct type* rbe_right; /* right element */ \
|
||||
struct type* rbe_parent; /* parent element */ \
|
||||
int rbe_color; /* node color */ \
|
||||
}
|
||||
|
||||
#define RB_LEFT(elm, field) (elm)->field.rbe_left
|
||||
#define RB_RIGHT(elm, field) (elm)->field.rbe_right
|
||||
#define RB_PARENT(elm, field) (elm)->field.rbe_parent
|
||||
#define RB_COLOR(elm, field) (elm)->field.rbe_color
|
||||
#define RB_ROOT(head) (head)->rbh_root
|
||||
#define RB_EMPTY(head) (RB_ROOT(head) == NULL)
|
||||
|
||||
#define RB_SET(elm, parent, field) \
|
||||
do { \
|
||||
RB_PARENT(elm, field) = parent; \
|
||||
RB_LEFT(elm, field) = RB_RIGHT(elm, field) = NULL; \
|
||||
RB_COLOR(elm, field) = RB_RED; \
|
||||
} while (/*CONSTCOND*/ 0)
|
||||
|
||||
#define RB_SET_BLACKRED(black, red, field) \
|
||||
do { \
|
||||
RB_COLOR(black, field) = RB_BLACK; \
|
||||
RB_COLOR(red, field) = RB_RED; \
|
||||
} while (/*CONSTCOND*/ 0)
|
||||
|
||||
#ifndef RB_AUGMENT
|
||||
#define RB_AUGMENT(x) \
|
||||
do { \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
#define RB_ROTATE_LEFT(head, elm, tmp, field) \
|
||||
do { \
|
||||
(tmp) = RB_RIGHT(elm, field); \
|
||||
if ((RB_RIGHT(elm, field) = RB_LEFT(tmp, field)) != NULL) { \
|
||||
RB_PARENT(RB_LEFT(tmp, field), field) = (elm); \
|
||||
} \
|
||||
RB_AUGMENT(elm); \
|
||||
if ((RB_PARENT(tmp, field) = RB_PARENT(elm, field)) != NULL) { \
|
||||
if ((elm) == RB_LEFT(RB_PARENT(elm, field), field)) \
|
||||
RB_LEFT(RB_PARENT(elm, field), field) = (tmp); \
|
||||
else \
|
||||
RB_RIGHT(RB_PARENT(elm, field), field) = (tmp); \
|
||||
} else \
|
||||
(head)->rbh_root = (tmp); \
|
||||
RB_LEFT(tmp, field) = (elm); \
|
||||
RB_PARENT(elm, field) = (tmp); \
|
||||
RB_AUGMENT(tmp); \
|
||||
if ((RB_PARENT(tmp, field))) \
|
||||
RB_AUGMENT(RB_PARENT(tmp, field)); \
|
||||
} while (/*CONSTCOND*/ 0)
|
||||
|
||||
#define RB_ROTATE_RIGHT(head, elm, tmp, field) \
|
||||
do { \
|
||||
(tmp) = RB_LEFT(elm, field); \
|
||||
if ((RB_LEFT(elm, field) = RB_RIGHT(tmp, field)) != NULL) { \
|
||||
RB_PARENT(RB_RIGHT(tmp, field), field) = (elm); \
|
||||
} \
|
||||
RB_AUGMENT(elm); \
|
||||
if ((RB_PARENT(tmp, field) = RB_PARENT(elm, field)) != NULL) { \
|
||||
if ((elm) == RB_LEFT(RB_PARENT(elm, field), field)) \
|
||||
RB_LEFT(RB_PARENT(elm, field), field) = (tmp); \
|
||||
else \
|
||||
RB_RIGHT(RB_PARENT(elm, field), field) = (tmp); \
|
||||
} else \
|
||||
(head)->rbh_root = (tmp); \
|
||||
RB_RIGHT(tmp, field) = (elm); \
|
||||
RB_PARENT(elm, field) = (tmp); \
|
||||
RB_AUGMENT(tmp); \
|
||||
if ((RB_PARENT(tmp, field))) \
|
||||
RB_AUGMENT(RB_PARENT(tmp, field)); \
|
||||
} while (/*CONSTCOND*/ 0)
|
||||
|
||||
/* Generates prototypes and inline functions */
|
||||
#define RB_PROTOTYPE(name, type, field, cmp) RB_PROTOTYPE_INTERNAL(name, type, field, cmp, )
|
||||
#define RB_PROTOTYPE_STATIC(name, type, field, cmp) \
|
||||
RB_PROTOTYPE_INTERNAL(name, type, field, cmp, static)
|
||||
#define RB_PROTOTYPE_INTERNAL(name, type, field, cmp, attr) \
|
||||
RB_PROTOTYPE_INSERT_COLOR(name, type, attr); \
|
||||
RB_PROTOTYPE_REMOVE_COLOR(name, type, attr); \
|
||||
RB_PROTOTYPE_INSERT(name, type, attr); \
|
||||
RB_PROTOTYPE_REMOVE(name, type, attr); \
|
||||
RB_PROTOTYPE_FIND(name, type, attr); \
|
||||
RB_PROTOTYPE_NFIND(name, type, attr); \
|
||||
RB_PROTOTYPE_FIND_LIGHT(name, type, attr); \
|
||||
RB_PROTOTYPE_NFIND_LIGHT(name, type, attr); \
|
||||
RB_PROTOTYPE_NEXT(name, type, attr); \
|
||||
RB_PROTOTYPE_PREV(name, type, attr); \
|
||||
RB_PROTOTYPE_MINMAX(name, type, attr);
|
||||
#define RB_PROTOTYPE_INSERT_COLOR(name, type, attr) \
|
||||
attr void name##_RB_INSERT_COLOR(struct name*, struct type*)
|
||||
#define RB_PROTOTYPE_REMOVE_COLOR(name, type, attr) \
|
||||
attr void name##_RB_REMOVE_COLOR(struct name*, struct type*, struct type*)
|
||||
#define RB_PROTOTYPE_REMOVE(name, type, attr) \
|
||||
attr struct type* name##_RB_REMOVE(struct name*, struct type*)
|
||||
#define RB_PROTOTYPE_INSERT(name, type, attr) \
|
||||
attr struct type* name##_RB_INSERT(struct name*, struct type*)
|
||||
#define RB_PROTOTYPE_FIND(name, type, attr) \
|
||||
attr struct type* name##_RB_FIND(struct name*, struct type*)
|
||||
#define RB_PROTOTYPE_NFIND(name, type, attr) \
|
||||
attr struct type* name##_RB_NFIND(struct name*, struct type*)
|
||||
#define RB_PROTOTYPE_FIND_LIGHT(name, type, attr) \
|
||||
attr struct type* name##_RB_FIND_LIGHT(struct name*, const void*)
|
||||
#define RB_PROTOTYPE_NFIND_LIGHT(name, type, attr) \
|
||||
attr struct type* name##_RB_NFIND_LIGHT(struct name*, const void*)
|
||||
#define RB_PROTOTYPE_NEXT(name, type, attr) attr struct type* name##_RB_NEXT(struct type*)
|
||||
#define RB_PROTOTYPE_PREV(name, type, attr) attr struct type* name##_RB_PREV(struct type*)
|
||||
#define RB_PROTOTYPE_MINMAX(name, type, attr) attr struct type* name##_RB_MINMAX(struct name*, int)
|
||||
|
||||
/* Main rb operation.
|
||||
* Moves node close to the key of elm to top
|
||||
*/
|
||||
#define RB_GENERATE_WITHOUT_COMPARE(name, type, field) \
|
||||
RB_GENERATE_WITHOUT_COMPARE_INTERNAL(name, type, field, )
|
||||
#define RB_GENERATE_WITHOUT_COMPARE_STATIC(name, type, field) \
|
||||
RB_GENERATE_WITHOUT_COMPARE_INTERNAL(name, type, field, static)
|
||||
#define RB_GENERATE_WITHOUT_COMPARE_INTERNAL(name, type, field, attr) \
|
||||
RB_GENERATE_REMOVE_COLOR(name, type, field, attr) \
|
||||
RB_GENERATE_REMOVE(name, type, field, attr) \
|
||||
RB_GENERATE_NEXT(name, type, field, attr) \
|
||||
RB_GENERATE_PREV(name, type, field, attr) \
|
||||
RB_GENERATE_MINMAX(name, type, field, attr)
|
||||
|
||||
#define RB_GENERATE_WITH_COMPARE(name, type, field, cmp, lcmp) \
|
||||
RB_GENERATE_WITH_COMPARE_INTERNAL(name, type, field, cmp, lcmp, )
|
||||
#define RB_GENERATE_WITH_COMPARE_STATIC(name, type, field, cmp, lcmp) \
|
||||
RB_GENERATE_WITH_COMPARE_INTERNAL(name, type, field, cmp, lcmp, static)
|
||||
#define RB_GENERATE_WITH_COMPARE_INTERNAL(name, type, field, cmp, lcmp, attr) \
|
||||
RB_GENERATE_INSERT_COLOR(name, type, field, attr) \
|
||||
RB_GENERATE_INSERT(name, type, field, cmp, attr) \
|
||||
RB_GENERATE_FIND(name, type, field, cmp, attr) \
|
||||
RB_GENERATE_NFIND(name, type, field, cmp, attr) \
|
||||
RB_GENERATE_FIND_LIGHT(name, type, field, lcmp, attr) \
|
||||
RB_GENERATE_NFIND_LIGHT(name, type, field, lcmp, attr)
|
||||
|
||||
#define RB_GENERATE_ALL(name, type, field, cmp) RB_GENERATE_ALL_INTERNAL(name, type, field, cmp, )
|
||||
#define RB_GENERATE_ALL_STATIC(name, type, field, cmp) \
|
||||
RB_GENERATE_ALL_INTERNAL(name, type, field, cmp, static)
|
||||
#define RB_GENERATE_ALL_INTERNAL(name, type, field, cmp, attr) \
|
||||
RB_GENERATE_WITHOUT_COMPARE_INTERNAL(name, type, field, attr) \
|
||||
RB_GENERATE_WITH_COMPARE_INTERNAL(name, type, field, cmp, attr)
|
||||
|
||||
#define RB_GENERATE_INSERT_COLOR(name, type, field, attr) \
|
||||
attr void name##_RB_INSERT_COLOR(struct name* head, struct type* elm) { \
|
||||
struct type *parent, *gparent, *tmp; \
|
||||
while ((parent = RB_PARENT(elm, field)) != NULL && RB_COLOR(parent, field) == RB_RED) { \
|
||||
gparent = RB_PARENT(parent, field); \
|
||||
if (parent == RB_LEFT(gparent, field)) { \
|
||||
tmp = RB_RIGHT(gparent, field); \
|
||||
if (tmp && RB_COLOR(tmp, field) == RB_RED) { \
|
||||
RB_COLOR(tmp, field) = RB_BLACK; \
|
||||
RB_SET_BLACKRED(parent, gparent, field); \
|
||||
elm = gparent; \
|
||||
continue; \
|
||||
} \
|
||||
if (RB_RIGHT(parent, field) == elm) { \
|
||||
RB_ROTATE_LEFT(head, parent, tmp, field); \
|
||||
tmp = parent; \
|
||||
parent = elm; \
|
||||
elm = tmp; \
|
||||
} \
|
||||
RB_SET_BLACKRED(parent, gparent, field); \
|
||||
RB_ROTATE_RIGHT(head, gparent, tmp, field); \
|
||||
} else { \
|
||||
tmp = RB_LEFT(gparent, field); \
|
||||
if (tmp && RB_COLOR(tmp, field) == RB_RED) { \
|
||||
RB_COLOR(tmp, field) = RB_BLACK; \
|
||||
RB_SET_BLACKRED(parent, gparent, field); \
|
||||
elm = gparent; \
|
||||
continue; \
|
||||
} \
|
||||
if (RB_LEFT(parent, field) == elm) { \
|
||||
RB_ROTATE_RIGHT(head, parent, tmp, field); \
|
||||
tmp = parent; \
|
||||
parent = elm; \
|
||||
elm = tmp; \
|
||||
} \
|
||||
RB_SET_BLACKRED(parent, gparent, field); \
|
||||
RB_ROTATE_LEFT(head, gparent, tmp, field); \
|
||||
} \
|
||||
} \
|
||||
RB_COLOR(head->rbh_root, field) = RB_BLACK; \
|
||||
}
|
||||
|
||||
#define RB_GENERATE_REMOVE_COLOR(name, type, field, attr) \
|
||||
attr void name##_RB_REMOVE_COLOR(struct name* head, struct type* parent, struct type* elm) { \
|
||||
struct type* tmp; \
|
||||
while ((elm == NULL || RB_COLOR(elm, field) == RB_BLACK) && elm != RB_ROOT(head)) { \
|
||||
if (RB_LEFT(parent, field) == elm) { \
|
||||
tmp = RB_RIGHT(parent, field); \
|
||||
if (RB_COLOR(tmp, field) == RB_RED) { \
|
||||
RB_SET_BLACKRED(tmp, parent, field); \
|
||||
RB_ROTATE_LEFT(head, parent, tmp, field); \
|
||||
tmp = RB_RIGHT(parent, field); \
|
||||
} \
|
||||
if ((RB_LEFT(tmp, field) == NULL || \
|
||||
RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) && \
|
||||
(RB_RIGHT(tmp, field) == NULL || \
|
||||
RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK)) { \
|
||||
RB_COLOR(tmp, field) = RB_RED; \
|
||||
elm = parent; \
|
||||
parent = RB_PARENT(elm, field); \
|
||||
} else { \
|
||||
if (RB_RIGHT(tmp, field) == NULL || \
|
||||
RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK) { \
|
||||
struct type* oleft; \
|
||||
if ((oleft = RB_LEFT(tmp, field)) != NULL) \
|
||||
RB_COLOR(oleft, field) = RB_BLACK; \
|
||||
RB_COLOR(tmp, field) = RB_RED; \
|
||||
RB_ROTATE_RIGHT(head, tmp, oleft, field); \
|
||||
tmp = RB_RIGHT(parent, field); \
|
||||
} \
|
||||
RB_COLOR(tmp, field) = RB_COLOR(parent, field); \
|
||||
RB_COLOR(parent, field) = RB_BLACK; \
|
||||
if (RB_RIGHT(tmp, field)) \
|
||||
RB_COLOR(RB_RIGHT(tmp, field), field) = RB_BLACK; \
|
||||
RB_ROTATE_LEFT(head, parent, tmp, field); \
|
||||
elm = RB_ROOT(head); \
|
||||
break; \
|
||||
} \
|
||||
} else { \
|
||||
tmp = RB_LEFT(parent, field); \
|
||||
if (RB_COLOR(tmp, field) == RB_RED) { \
|
||||
RB_SET_BLACKRED(tmp, parent, field); \
|
||||
RB_ROTATE_RIGHT(head, parent, tmp, field); \
|
||||
tmp = RB_LEFT(parent, field); \
|
||||
} \
|
||||
if ((RB_LEFT(tmp, field) == NULL || \
|
||||
RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) && \
|
||||
(RB_RIGHT(tmp, field) == NULL || \
|
||||
RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK)) { \
|
||||
RB_COLOR(tmp, field) = RB_RED; \
|
||||
elm = parent; \
|
||||
parent = RB_PARENT(elm, field); \
|
||||
} else { \
|
||||
if (RB_LEFT(tmp, field) == NULL || \
|
||||
RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) { \
|
||||
struct type* oright; \
|
||||
if ((oright = RB_RIGHT(tmp, field)) != NULL) \
|
||||
RB_COLOR(oright, field) = RB_BLACK; \
|
||||
RB_COLOR(tmp, field) = RB_RED; \
|
||||
RB_ROTATE_LEFT(head, tmp, oright, field); \
|
||||
tmp = RB_LEFT(parent, field); \
|
||||
} \
|
||||
RB_COLOR(tmp, field) = RB_COLOR(parent, field); \
|
||||
RB_COLOR(parent, field) = RB_BLACK; \
|
||||
if (RB_LEFT(tmp, field)) \
|
||||
RB_COLOR(RB_LEFT(tmp, field), field) = RB_BLACK; \
|
||||
RB_ROTATE_RIGHT(head, parent, tmp, field); \
|
||||
elm = RB_ROOT(head); \
|
||||
break; \
|
||||
} \
|
||||
} \
|
||||
} \
|
||||
if (elm) \
|
||||
RB_COLOR(elm, field) = RB_BLACK; \
|
||||
}
|
||||
|
||||
#define RB_GENERATE_REMOVE(name, type, field, attr) \
|
||||
attr struct type* name##_RB_REMOVE(struct name* head, struct type* elm) { \
|
||||
struct type *child, *parent, *old = elm; \
|
||||
int color; \
|
||||
if (RB_LEFT(elm, field) == NULL) \
|
||||
child = RB_RIGHT(elm, field); \
|
||||
else if (RB_RIGHT(elm, field) == NULL) \
|
||||
child = RB_LEFT(elm, field); \
|
||||
else { \
|
||||
struct type* left; \
|
||||
elm = RB_RIGHT(elm, field); \
|
||||
while ((left = RB_LEFT(elm, field)) != NULL) \
|
||||
elm = left; \
|
||||
child = RB_RIGHT(elm, field); \
|
||||
parent = RB_PARENT(elm, field); \
|
||||
color = RB_COLOR(elm, field); \
|
||||
if (child) \
|
||||
RB_PARENT(child, field) = parent; \
|
||||
if (parent) { \
|
||||
if (RB_LEFT(parent, field) == elm) \
|
||||
RB_LEFT(parent, field) = child; \
|
||||
else \
|
||||
RB_RIGHT(parent, field) = child; \
|
||||
RB_AUGMENT(parent); \
|
||||
} else \
|
||||
RB_ROOT(head) = child; \
|
||||
if (RB_PARENT(elm, field) == old) \
|
||||
parent = elm; \
|
||||
(elm)->field = (old)->field; \
|
||||
if (RB_PARENT(old, field)) { \
|
||||
if (RB_LEFT(RB_PARENT(old, field), field) == old) \
|
||||
RB_LEFT(RB_PARENT(old, field), field) = elm; \
|
||||
else \
|
||||
RB_RIGHT(RB_PARENT(old, field), field) = elm; \
|
||||
RB_AUGMENT(RB_PARENT(old, field)); \
|
||||
} else \
|
||||
RB_ROOT(head) = elm; \
|
||||
RB_PARENT(RB_LEFT(old, field), field) = elm; \
|
||||
if (RB_RIGHT(old, field)) \
|
||||
RB_PARENT(RB_RIGHT(old, field), field) = elm; \
|
||||
if (parent) { \
|
||||
left = parent; \
|
||||
do { \
|
||||
RB_AUGMENT(left); \
|
||||
} while ((left = RB_PARENT(left, field)) != NULL); \
|
||||
} \
|
||||
goto color; \
|
||||
} \
|
||||
parent = RB_PARENT(elm, field); \
|
||||
color = RB_COLOR(elm, field); \
|
||||
if (child) \
|
||||
RB_PARENT(child, field) = parent; \
|
||||
if (parent) { \
|
||||
if (RB_LEFT(parent, field) == elm) \
|
||||
RB_LEFT(parent, field) = child; \
|
||||
else \
|
||||
RB_RIGHT(parent, field) = child; \
|
||||
RB_AUGMENT(parent); \
|
||||
} else \
|
||||
RB_ROOT(head) = child; \
|
||||
color: \
|
||||
if (color == RB_BLACK) \
|
||||
name##_RB_REMOVE_COLOR(head, parent, child); \
|
||||
return (old); \
|
||||
}
|
||||
|
||||
#define RB_GENERATE_INSERT(name, type, field, cmp, attr) \
|
||||
/* Inserts a node into the RB tree */ \
|
||||
attr struct type* name##_RB_INSERT(struct name* head, struct type* elm) { \
|
||||
struct type* tmp; \
|
||||
struct type* parent = NULL; \
|
||||
int comp = 0; \
|
||||
tmp = RB_ROOT(head); \
|
||||
while (tmp) { \
|
||||
parent = tmp; \
|
||||
comp = (cmp)(elm, parent); \
|
||||
if (comp < 0) \
|
||||
tmp = RB_LEFT(tmp, field); \
|
||||
else if (comp > 0) \
|
||||
tmp = RB_RIGHT(tmp, field); \
|
||||
else \
|
||||
return (tmp); \
|
||||
} \
|
||||
RB_SET(elm, parent, field); \
|
||||
if (parent != NULL) { \
|
||||
if (comp < 0) \
|
||||
RB_LEFT(parent, field) = elm; \
|
||||
else \
|
||||
RB_RIGHT(parent, field) = elm; \
|
||||
RB_AUGMENT(parent); \
|
||||
} else \
|
||||
RB_ROOT(head) = elm; \
|
||||
name##_RB_INSERT_COLOR(head, elm); \
|
||||
return (NULL); \
|
||||
}
|
||||
|
||||
#define RB_GENERATE_FIND(name, type, field, cmp, attr) \
|
||||
/* Finds the node with the same key as elm */ \
|
||||
attr struct type* name##_RB_FIND(struct name* head, struct type* elm) { \
|
||||
struct type* tmp = RB_ROOT(head); \
|
||||
int comp; \
|
||||
while (tmp) { \
|
||||
comp = cmp(elm, tmp); \
|
||||
if (comp < 0) \
|
||||
tmp = RB_LEFT(tmp, field); \
|
||||
else if (comp > 0) \
|
||||
tmp = RB_RIGHT(tmp, field); \
|
||||
else \
|
||||
return (tmp); \
|
||||
} \
|
||||
return (NULL); \
|
||||
}
|
||||
|
||||
#define RB_GENERATE_NFIND(name, type, field, cmp, attr) \
|
||||
/* Finds the first node greater than or equal to the search key */ \
|
||||
attr struct type* name##_RB_NFIND(struct name* head, struct type* elm) { \
|
||||
struct type* tmp = RB_ROOT(head); \
|
||||
struct type* res = NULL; \
|
||||
int comp; \
|
||||
while (tmp) { \
|
||||
comp = cmp(elm, tmp); \
|
||||
if (comp < 0) { \
|
||||
res = tmp; \
|
||||
tmp = RB_LEFT(tmp, field); \
|
||||
} else if (comp > 0) \
|
||||
tmp = RB_RIGHT(tmp, field); \
|
||||
else \
|
||||
return (tmp); \
|
||||
} \
|
||||
return (res); \
|
||||
}
|
||||
|
||||
#define RB_GENERATE_FIND_LIGHT(name, type, field, lcmp, attr) \
|
||||
/* Finds the node with the same key as elm */ \
|
||||
attr struct type* name##_RB_FIND_LIGHT(struct name* head, const void* lelm) { \
|
||||
struct type* tmp = RB_ROOT(head); \
|
||||
int comp; \
|
||||
while (tmp) { \
|
||||
comp = lcmp(lelm, tmp); \
|
||||
if (comp < 0) \
|
||||
tmp = RB_LEFT(tmp, field); \
|
||||
else if (comp > 0) \
|
||||
tmp = RB_RIGHT(tmp, field); \
|
||||
else \
|
||||
return (tmp); \
|
||||
} \
|
||||
return (NULL); \
|
||||
}
|
||||
|
||||
#define RB_GENERATE_NFIND_LIGHT(name, type, field, lcmp, attr) \
|
||||
/* Finds the first node greater than or equal to the search key */ \
|
||||
attr struct type* name##_RB_NFIND_LIGHT(struct name* head, const void* lelm) { \
|
||||
struct type* tmp = RB_ROOT(head); \
|
||||
struct type* res = NULL; \
|
||||
int comp; \
|
||||
while (tmp) { \
|
||||
comp = lcmp(lelm, tmp); \
|
||||
if (comp < 0) { \
|
||||
res = tmp; \
|
||||
tmp = RB_LEFT(tmp, field); \
|
||||
} else if (comp > 0) \
|
||||
tmp = RB_RIGHT(tmp, field); \
|
||||
else \
|
||||
return (tmp); \
|
||||
} \
|
||||
return (res); \
|
||||
}
|
||||
|
||||
#define RB_GENERATE_NEXT(name, type, field, attr) \
|
||||
/* ARGSUSED */ \
|
||||
attr struct type* name##_RB_NEXT(struct type* elm) { \
|
||||
if (RB_RIGHT(elm, field)) { \
|
||||
elm = RB_RIGHT(elm, field); \
|
||||
while (RB_LEFT(elm, field)) \
|
||||
elm = RB_LEFT(elm, field); \
|
||||
} else { \
|
||||
if (RB_PARENT(elm, field) && (elm == RB_LEFT(RB_PARENT(elm, field), field))) \
|
||||
elm = RB_PARENT(elm, field); \
|
||||
else { \
|
||||
while (RB_PARENT(elm, field) && (elm == RB_RIGHT(RB_PARENT(elm, field), field))) \
|
||||
elm = RB_PARENT(elm, field); \
|
||||
elm = RB_PARENT(elm, field); \
|
||||
} \
|
||||
} \
|
||||
return (elm); \
|
||||
}
|
||||
|
||||
#define RB_GENERATE_PREV(name, type, field, attr) \
|
||||
/* ARGSUSED */ \
|
||||
attr struct type* name##_RB_PREV(struct type* elm) { \
|
||||
if (RB_LEFT(elm, field)) { \
|
||||
elm = RB_LEFT(elm, field); \
|
||||
while (RB_RIGHT(elm, field)) \
|
||||
elm = RB_RIGHT(elm, field); \
|
||||
} else { \
|
||||
if (RB_PARENT(elm, field) && (elm == RB_RIGHT(RB_PARENT(elm, field), field))) \
|
||||
elm = RB_PARENT(elm, field); \
|
||||
else { \
|
||||
while (RB_PARENT(elm, field) && (elm == RB_LEFT(RB_PARENT(elm, field), field))) \
|
||||
elm = RB_PARENT(elm, field); \
|
||||
elm = RB_PARENT(elm, field); \
|
||||
} \
|
||||
} \
|
||||
return (elm); \
|
||||
}
|
||||
|
||||
#define RB_GENERATE_MINMAX(name, type, field, attr) \
|
||||
attr struct type* name##_RB_MINMAX(struct name* head, int val) { \
|
||||
struct type* tmp = RB_ROOT(head); \
|
||||
struct type* parent = NULL; \
|
||||
while (tmp) { \
|
||||
parent = tmp; \
|
||||
if (val < 0) \
|
||||
tmp = RB_LEFT(tmp, field); \
|
||||
else \
|
||||
tmp = RB_RIGHT(tmp, field); \
|
||||
} \
|
||||
return (parent); \
|
||||
}
|
||||
|
||||
#define RB_NEGINF -1
|
||||
#define RB_INF 1
|
||||
|
||||
#define RB_INSERT(name, x, y) name##_RB_INSERT(x, y)
|
||||
#define RB_REMOVE(name, x, y) name##_RB_REMOVE(x, y)
|
||||
#define RB_FIND(name, x, y) name##_RB_FIND(x, y)
|
||||
#define RB_NFIND(name, x, y) name##_RB_NFIND(x, y)
|
||||
#define RB_FIND_LIGHT(name, x, y) name##_RB_FIND_LIGHT(x, y)
|
||||
#define RB_NFIND_LIGHT(name, x, y) name##_RB_NFIND_LIGHT(x, y)
|
||||
#define RB_NEXT(name, x, y) name##_RB_NEXT(y)
|
||||
#define RB_PREV(name, x, y) name##_RB_PREV(y)
|
||||
#define RB_MIN(name, x) name##_RB_MINMAX(x, RB_NEGINF)
|
||||
#define RB_MAX(name, x) name##_RB_MINMAX(x, RB_INF)
|
||||
|
||||
#define RB_FOREACH(x, name, head) \
|
||||
for ((x) = RB_MIN(name, head); (x) != NULL; (x) = name##_RB_NEXT(x))
|
||||
|
||||
#define RB_FOREACH_FROM(x, name, y) \
|
||||
for ((x) = (y); ((x) != NULL) && ((y) = name##_RB_NEXT(x), (x) != NULL); (x) = (y))
|
||||
|
||||
#define RB_FOREACH_SAFE(x, name, head, y) \
|
||||
for ((x) = RB_MIN(name, head); ((x) != NULL) && ((y) = name##_RB_NEXT(x), (x) != NULL); \
|
||||
(x) = (y))
|
||||
|
||||
#define RB_FOREACH_REVERSE(x, name, head) \
|
||||
for ((x) = RB_MAX(name, head); (x) != NULL; (x) = name##_RB_PREV(x))
|
||||
|
||||
#define RB_FOREACH_REVERSE_FROM(x, name, y) \
|
||||
for ((x) = (y); ((x) != NULL) && ((y) = name##_RB_PREV(x), (x) != NULL); (x) = (y))
|
||||
|
||||
#define RB_FOREACH_REVERSE_SAFE(x, name, head, y) \
|
||||
for ((x) = RB_MAX(name, head); ((x) != NULL) && ((y) = name##_RB_PREV(x), (x) != NULL); \
|
||||
(x) = (y))
|
||||
|
||||
#endif /* _SYS_TREE_H_ */
|
||||
@@ -15,12 +15,10 @@ void FreeMemoryPages(void* base, std::size_t size) noexcept;
|
||||
template <typename T>
|
||||
class VirtualBuffer final {
|
||||
public:
|
||||
// TODO: Uncomment this and change Common::PageTable::PageInfo to be trivially constructible
|
||||
// using std::atomic_ref once libc++ has support for it
|
||||
// static_assert(
|
||||
// std::is_trivially_constructible_v<T>,
|
||||
// "T must be trivially constructible, as non-trivial constructors will not be executed "
|
||||
// "with the current allocator");
|
||||
static_assert(
|
||||
std::is_trivially_constructible_v<T>,
|
||||
"T must be trivially constructible, as non-trivial constructors will not be executed "
|
||||
"with the current allocator");
|
||||
|
||||
constexpr VirtualBuffer() = default;
|
||||
explicit VirtualBuffer(std::size_t count) : alloc_size{count * sizeof(T)} {
|
||||
|
||||
@@ -142,6 +142,8 @@ add_library(core STATIC
|
||||
hardware_interrupt_manager.h
|
||||
hle/ipc.h
|
||||
hle/ipc_helpers.h
|
||||
hle/kernel/address_arbiter.cpp
|
||||
hle/kernel/address_arbiter.h
|
||||
hle/kernel/client_port.cpp
|
||||
hle/kernel/client_port.h
|
||||
hle/kernel/client_session.cpp
|
||||
@@ -155,19 +157,13 @@ add_library(core STATIC
|
||||
hle/kernel/handle_table.h
|
||||
hle/kernel/hle_ipc.cpp
|
||||
hle/kernel/hle_ipc.h
|
||||
hle/kernel/k_address_arbiter.cpp
|
||||
hle/kernel/k_address_arbiter.h
|
||||
hle/kernel/k_affinity_mask.h
|
||||
hle/kernel/k_condition_variable.cpp
|
||||
hle/kernel/k_condition_variable.h
|
||||
hle/kernel/k_priority_queue.h
|
||||
hle/kernel/k_scheduler.cpp
|
||||
hle/kernel/k_scheduler.h
|
||||
hle/kernel/k_scheduler_lock.h
|
||||
hle/kernel/k_scoped_lock.h
|
||||
hle/kernel/k_scoped_scheduler_lock_and_sleep.h
|
||||
hle/kernel/k_synchronization_object.cpp
|
||||
hle/kernel/k_synchronization_object.h
|
||||
hle/kernel/kernel.cpp
|
||||
hle/kernel/kernel.h
|
||||
hle/kernel/memory/address_space_info.cpp
|
||||
@@ -187,6 +183,8 @@ add_library(core STATIC
|
||||
hle/kernel/memory/slab_heap.h
|
||||
hle/kernel/memory/system_control.cpp
|
||||
hle/kernel/memory/system_control.h
|
||||
hle/kernel/mutex.cpp
|
||||
hle/kernel/mutex.h
|
||||
hle/kernel/object.cpp
|
||||
hle/kernel/object.h
|
||||
hle/kernel/physical_core.cpp
|
||||
@@ -204,18 +202,18 @@ add_library(core STATIC
|
||||
hle/kernel/server_port.h
|
||||
hle/kernel/server_session.cpp
|
||||
hle/kernel/server_session.h
|
||||
hle/kernel/service_thread.cpp
|
||||
hle/kernel/service_thread.h
|
||||
hle/kernel/session.cpp
|
||||
hle/kernel/session.h
|
||||
hle/kernel/shared_memory.cpp
|
||||
hle/kernel/shared_memory.h
|
||||
hle/kernel/svc.cpp
|
||||
hle/kernel/svc.h
|
||||
hle/kernel/svc_common.h
|
||||
hle/kernel/svc_results.h
|
||||
hle/kernel/svc_types.h
|
||||
hle/kernel/svc_wrap.h
|
||||
hle/kernel/synchronization_object.cpp
|
||||
hle/kernel/synchronization_object.h
|
||||
hle/kernel/synchronization.cpp
|
||||
hle/kernel/synchronization.h
|
||||
hle/kernel/thread.cpp
|
||||
hle/kernel/thread.h
|
||||
hle/kernel/time_manager.cpp
|
||||
@@ -502,6 +500,7 @@ add_library(core STATIC
|
||||
hle/service/sm/controller.h
|
||||
hle/service/sm/sm.cpp
|
||||
hle/service/sm/sm.h
|
||||
hle/service/sockets/blocking_worker.h
|
||||
hle/service/sockets/bsd.cpp
|
||||
hle/service/sockets/bsd.h
|
||||
hle/service/sockets/ethc.cpp
|
||||
@@ -635,8 +634,6 @@ if (MSVC)
|
||||
/we4267
|
||||
# 'context' : truncation from 'type1' to 'type2'
|
||||
/we4305
|
||||
# 'function' : not all control paths return a value
|
||||
/we4715
|
||||
)
|
||||
else()
|
||||
target_compile_options(core PRIVATE
|
||||
|
||||
@@ -26,10 +26,9 @@ using CPUInterrupts = std::array<CPUInterruptHandler, Core::Hardware::NUM_CPU_CO
|
||||
/// Generic ARMv8 CPU interface
|
||||
class ARM_Interface : NonCopyable {
|
||||
public:
|
||||
explicit ARM_Interface(System& system_, CPUInterrupts& interrupt_handlers_,
|
||||
bool uses_wall_clock_)
|
||||
: system{system_}, interrupt_handlers{interrupt_handlers_}, uses_wall_clock{
|
||||
uses_wall_clock_} {}
|
||||
explicit ARM_Interface(System& system_, CPUInterrupts& interrupt_handlers, bool uses_wall_clock)
|
||||
: system{system_}, interrupt_handlers{interrupt_handlers}, uses_wall_clock{
|
||||
uses_wall_clock} {}
|
||||
virtual ~ARM_Interface() = default;
|
||||
|
||||
struct ThreadContext32 {
|
||||
|
||||
@@ -71,8 +71,15 @@ public:
|
||||
}
|
||||
|
||||
void ExceptionRaised(u32 pc, Dynarmic::A32::Exception exception) override {
|
||||
switch (exception) {
|
||||
case Dynarmic::A32::Exception::UndefinedInstruction:
|
||||
case Dynarmic::A32::Exception::UnpredictableInstruction:
|
||||
break;
|
||||
case Dynarmic::A32::Exception::Breakpoint:
|
||||
break;
|
||||
}
|
||||
LOG_CRITICAL(Core_ARM, "ExceptionRaised(exception = {}, pc = {:08X}, code = {:08X})",
|
||||
exception, pc, MemoryReadCode(pc));
|
||||
static_cast<std::size_t>(exception), pc, MemoryReadCode(pc));
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
||||
@@ -126,7 +133,6 @@ std::shared_ptr<Dynarmic::A32::Jit> ARM_Dynarmic_32::MakeJit(Common::PageTable&
|
||||
config.page_table = reinterpret_cast<std::array<std::uint8_t*, NUM_PAGE_TABLE_ENTRIES>*>(
|
||||
page_table.pointers.data());
|
||||
config.absolute_offset_page_table = true;
|
||||
config.page_table_pointer_mask_bits = Common::PageTable::ATTRIBUTE_BITS;
|
||||
config.detect_misaligned_access_via_page_table = 16 | 32 | 64 | 128;
|
||||
config.only_detect_misalignment_via_page_table_on_page_boundary = true;
|
||||
|
||||
@@ -174,9 +180,6 @@ std::shared_ptr<Dynarmic::A32::Jit> ARM_Dynarmic_32::MakeJit(Common::PageTable&
|
||||
if (Settings::values.cpuopt_unsafe_reduce_fp_error) {
|
||||
config.optimizations |= Dynarmic::OptimizationFlag::Unsafe_ReducedErrorFP;
|
||||
}
|
||||
if (Settings::values.cpuopt_unsafe_inaccurate_nan) {
|
||||
config.optimizations |= Dynarmic::OptimizationFlag::Unsafe_InaccurateNaN;
|
||||
}
|
||||
}
|
||||
|
||||
return std::make_unique<Dynarmic::A32::Jit>(config);
|
||||
|
||||
@@ -152,7 +152,6 @@ std::shared_ptr<Dynarmic::A64::Jit> ARM_Dynarmic_64::MakeJit(Common::PageTable&
|
||||
// Memory
|
||||
config.page_table = reinterpret_cast<void**>(page_table.pointers.data());
|
||||
config.page_table_address_space_bits = address_space_bits;
|
||||
config.page_table_pointer_mask_bits = Common::PageTable::ATTRIBUTE_BITS;
|
||||
config.silently_mirror_page_table = false;
|
||||
config.absolute_offset_page_table = true;
|
||||
config.detect_misaligned_access_via_page_table = 16 | 32 | 64 | 128;
|
||||
@@ -212,9 +211,6 @@ std::shared_ptr<Dynarmic::A64::Jit> ARM_Dynarmic_64::MakeJit(Common::PageTable&
|
||||
if (Settings::values.cpuopt_unsafe_reduce_fp_error) {
|
||||
config.optimizations |= Dynarmic::OptimizationFlag::Unsafe_ReducedErrorFP;
|
||||
}
|
||||
if (Settings::values.cpuopt_unsafe_inaccurate_nan) {
|
||||
config.optimizations |= Dynarmic::OptimizationFlag::Unsafe_InaccurateNaN;
|
||||
}
|
||||
}
|
||||
|
||||
return std::make_shared<Dynarmic::A64::Jit>(config);
|
||||
|
||||
@@ -159,7 +159,7 @@ struct System::Impl {
|
||||
device_memory = std::make_unique<Core::DeviceMemory>();
|
||||
|
||||
is_multicore = Settings::values.use_multi_core.GetValue();
|
||||
is_async_gpu = Settings::values.use_asynchronous_gpu_emulation.GetValue();
|
||||
is_async_gpu = is_multicore || Settings::values.use_asynchronous_gpu_emulation.GetValue();
|
||||
|
||||
kernel.SetMulticore(is_multicore);
|
||||
cpu_manager.SetMulticore(is_multicore);
|
||||
@@ -307,6 +307,7 @@ struct System::Impl {
|
||||
service_manager.reset();
|
||||
cheat_engine.reset();
|
||||
telemetry_session.reset();
|
||||
device_memory.reset();
|
||||
|
||||
// Close all CPU/threading state
|
||||
cpu_manager.Shutdown();
|
||||
|
||||
@@ -49,7 +49,6 @@ void CoreTiming::ThreadEntry(CoreTiming& instance) {
|
||||
Common::SetCurrentThreadPriority(Common::ThreadPriority::VeryHigh);
|
||||
instance.on_thread_init();
|
||||
instance.ThreadLoop();
|
||||
MicroProfileOnThreadExit();
|
||||
}
|
||||
|
||||
void CoreTiming::Initialize(std::function<void()>&& on_thread_init_) {
|
||||
|
||||
@@ -143,7 +143,6 @@ u64 GetSignatureTypeDataSize(SignatureType type) {
|
||||
return 0x3C;
|
||||
}
|
||||
UNREACHABLE();
|
||||
return 0;
|
||||
}
|
||||
|
||||
u64 GetSignatureTypePaddingSize(SignatureType type) {
|
||||
@@ -158,7 +157,6 @@ u64 GetSignatureTypePaddingSize(SignatureType type) {
|
||||
return 0x40;
|
||||
}
|
||||
UNREACHABLE();
|
||||
return 0;
|
||||
}
|
||||
|
||||
SignatureType Ticket::GetSignatureType() const {
|
||||
@@ -171,7 +169,8 @@ SignatureType Ticket::GetSignatureType() const {
|
||||
if (const auto* ticket = std::get_if<ECDSATicket>(&data)) {
|
||||
return ticket->sig_type;
|
||||
}
|
||||
throw std::bad_variant_access{};
|
||||
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
TicketData& Ticket::GetData() {
|
||||
@@ -184,7 +183,8 @@ TicketData& Ticket::GetData() {
|
||||
if (auto* ticket = std::get_if<ECDSATicket>(&data)) {
|
||||
return ticket->data;
|
||||
}
|
||||
throw std::bad_variant_access{};
|
||||
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
const TicketData& Ticket::GetData() const {
|
||||
@@ -197,7 +197,8 @@ const TicketData& Ticket::GetData() const {
|
||||
if (const auto* ticket = std::get_if<ECDSATicket>(&data)) {
|
||||
return ticket->data;
|
||||
}
|
||||
throw std::bad_variant_access{};
|
||||
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
u64 Ticket::GetSize() const {
|
||||
|
||||
@@ -51,8 +51,8 @@ std::pair<std::size_t, std::size_t> SearchBucketEntry(u64 offset, const BlockTyp
|
||||
low = mid + 1;
|
||||
}
|
||||
}
|
||||
|
||||
UNREACHABLE_MSG("Offset could not be found in BKTR block.");
|
||||
return {0, 0};
|
||||
}
|
||||
} // Anonymous namespace
|
||||
|
||||
|
||||
@@ -105,8 +105,7 @@ ContentRecordType GetCRTypeFromNCAType(NCAContentType type) {
|
||||
// TODO(DarkLordZach): Peek at NCA contents to differentiate Manual and Legal.
|
||||
return ContentRecordType::HtmlDocument;
|
||||
default:
|
||||
UNREACHABLE_MSG("Invalid NCAContentType={:02X}", type);
|
||||
return ContentRecordType{};
|
||||
UNREACHABLE_MSG("Invalid NCAContentType={:02X}", static_cast<u8>(type));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -67,18 +67,18 @@ public:
|
||||
virtual void Refresh() = 0;
|
||||
|
||||
virtual bool HasEntry(u64 title_id, ContentRecordType type) const = 0;
|
||||
bool HasEntry(ContentProviderEntry entry) const;
|
||||
virtual bool HasEntry(ContentProviderEntry entry) const;
|
||||
|
||||
virtual std::optional<u32> GetEntryVersion(u64 title_id) const = 0;
|
||||
|
||||
virtual VirtualFile GetEntryUnparsed(u64 title_id, ContentRecordType type) const = 0;
|
||||
VirtualFile GetEntryUnparsed(ContentProviderEntry entry) const;
|
||||
virtual VirtualFile GetEntryUnparsed(ContentProviderEntry entry) const;
|
||||
|
||||
virtual VirtualFile GetEntryRaw(u64 title_id, ContentRecordType type) const = 0;
|
||||
VirtualFile GetEntryRaw(ContentProviderEntry entry) const;
|
||||
virtual VirtualFile GetEntryRaw(ContentProviderEntry entry) const;
|
||||
|
||||
virtual std::unique_ptr<NCA> GetEntry(u64 title_id, ContentRecordType type) const = 0;
|
||||
std::unique_ptr<NCA> GetEntry(ContentProviderEntry entry) const;
|
||||
virtual std::unique_ptr<NCA> GetEntry(ContentProviderEntry entry) const;
|
||||
|
||||
virtual std::vector<ContentProviderEntry> ListEntries() const;
|
||||
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
#pragma once
|
||||
|
||||
#include <functional>
|
||||
#include <span>
|
||||
|
||||
#include "common/common_types.h"
|
||||
|
||||
@@ -23,9 +24,9 @@ struct ControllerParameters {
|
||||
bool keep_controllers_connected{};
|
||||
bool enable_single_mode{};
|
||||
bool enable_border_color{};
|
||||
std::vector<BorderColor> border_colors{};
|
||||
std::span<BorderColor> border_colors{};
|
||||
bool enable_explain_text{};
|
||||
std::vector<ExplainText> explain_text{};
|
||||
std::span<ExplainText> explain_text{};
|
||||
bool allow_pro_controller{};
|
||||
bool allow_handheld{};
|
||||
bool allow_dual_joycons{};
|
||||
|
||||
317
src/core/hle/kernel/address_arbiter.cpp
Normal file
317
src/core/hle/kernel/address_arbiter.cpp
Normal file
@@ -0,0 +1,317 @@
|
||||
// Copyright 2018 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <algorithm>
|
||||
#include <vector>
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/common_types.h"
|
||||
#include "core/arm/exclusive_monitor.h"
|
||||
#include "core/core.h"
|
||||
#include "core/hle/kernel/address_arbiter.h"
|
||||
#include "core/hle/kernel/errors.h"
|
||||
#include "core/hle/kernel/handle_table.h"
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
#include "core/hle/kernel/time_manager.h"
|
||||
#include "core/hle/result.h"
|
||||
#include "core/memory.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
// Wake up num_to_wake (or all) threads in a vector.
|
||||
void AddressArbiter::WakeThreads(const std::vector<std::shared_ptr<Thread>>& waiting_threads,
|
||||
s32 num_to_wake) {
|
||||
// Only process up to 'target' threads, unless 'target' is <= 0, in which case process
|
||||
// them all.
|
||||
std::size_t last = waiting_threads.size();
|
||||
if (num_to_wake > 0) {
|
||||
last = std::min(last, static_cast<std::size_t>(num_to_wake));
|
||||
}
|
||||
|
||||
// Signal the waiting threads.
|
||||
for (std::size_t i = 0; i < last; i++) {
|
||||
waiting_threads[i]->SetSynchronizationResults(nullptr, RESULT_SUCCESS);
|
||||
RemoveThread(waiting_threads[i]);
|
||||
waiting_threads[i]->WaitForArbitration(false);
|
||||
waiting_threads[i]->ResumeFromWait();
|
||||
}
|
||||
}
|
||||
|
||||
AddressArbiter::AddressArbiter(Core::System& system) : system{system} {}
|
||||
AddressArbiter::~AddressArbiter() = default;
|
||||
|
||||
ResultCode AddressArbiter::SignalToAddress(VAddr address, SignalType type, s32 value,
|
||||
s32 num_to_wake) {
|
||||
switch (type) {
|
||||
case SignalType::Signal:
|
||||
return SignalToAddressOnly(address, num_to_wake);
|
||||
case SignalType::IncrementAndSignalIfEqual:
|
||||
return IncrementAndSignalToAddressIfEqual(address, value, num_to_wake);
|
||||
case SignalType::ModifyByWaitingCountAndSignalIfEqual:
|
||||
return ModifyByWaitingCountAndSignalToAddressIfEqual(address, value, num_to_wake);
|
||||
default:
|
||||
return ERR_INVALID_ENUM_VALUE;
|
||||
}
|
||||
}
|
||||
|
||||
ResultCode AddressArbiter::SignalToAddressOnly(VAddr address, s32 num_to_wake) {
|
||||
KScopedSchedulerLock lock(system.Kernel());
|
||||
const std::vector<std::shared_ptr<Thread>> waiting_threads =
|
||||
GetThreadsWaitingOnAddress(address);
|
||||
WakeThreads(waiting_threads, num_to_wake);
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
ResultCode AddressArbiter::IncrementAndSignalToAddressIfEqual(VAddr address, s32 value,
|
||||
s32 num_to_wake) {
|
||||
KScopedSchedulerLock lock(system.Kernel());
|
||||
auto& memory = system.Memory();
|
||||
|
||||
// Ensure that we can write to the address.
|
||||
if (!memory.IsValidVirtualAddress(address)) {
|
||||
return ERR_INVALID_ADDRESS_STATE;
|
||||
}
|
||||
|
||||
const std::size_t current_core = system.CurrentCoreIndex();
|
||||
auto& monitor = system.Monitor();
|
||||
u32 current_value;
|
||||
do {
|
||||
current_value = monitor.ExclusiveRead32(current_core, address);
|
||||
|
||||
if (current_value != static_cast<u32>(value)) {
|
||||
return ERR_INVALID_STATE;
|
||||
}
|
||||
current_value++;
|
||||
} while (!monitor.ExclusiveWrite32(current_core, address, current_value));
|
||||
|
||||
return SignalToAddressOnly(address, num_to_wake);
|
||||
}
|
||||
|
||||
ResultCode AddressArbiter::ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr address, s32 value,
|
||||
s32 num_to_wake) {
|
||||
KScopedSchedulerLock lock(system.Kernel());
|
||||
auto& memory = system.Memory();
|
||||
|
||||
// Ensure that we can write to the address.
|
||||
if (!memory.IsValidVirtualAddress(address)) {
|
||||
return ERR_INVALID_ADDRESS_STATE;
|
||||
}
|
||||
|
||||
// Get threads waiting on the address.
|
||||
const std::vector<std::shared_ptr<Thread>> waiting_threads =
|
||||
GetThreadsWaitingOnAddress(address);
|
||||
|
||||
const std::size_t current_core = system.CurrentCoreIndex();
|
||||
auto& monitor = system.Monitor();
|
||||
s32 updated_value;
|
||||
do {
|
||||
updated_value = monitor.ExclusiveRead32(current_core, address);
|
||||
|
||||
if (updated_value != value) {
|
||||
return ERR_INVALID_STATE;
|
||||
}
|
||||
// Determine the modified value depending on the waiting count.
|
||||
if (num_to_wake <= 0) {
|
||||
if (waiting_threads.empty()) {
|
||||
updated_value = value + 1;
|
||||
} else {
|
||||
updated_value = value - 1;
|
||||
}
|
||||
} else {
|
||||
if (waiting_threads.empty()) {
|
||||
updated_value = value + 1;
|
||||
} else if (waiting_threads.size() <= static_cast<u32>(num_to_wake)) {
|
||||
updated_value = value - 1;
|
||||
} else {
|
||||
updated_value = value;
|
||||
}
|
||||
}
|
||||
} while (!monitor.ExclusiveWrite32(current_core, address, updated_value));
|
||||
|
||||
WakeThreads(waiting_threads, num_to_wake);
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
ResultCode AddressArbiter::WaitForAddress(VAddr address, ArbitrationType type, s32 value,
|
||||
s64 timeout_ns) {
|
||||
switch (type) {
|
||||
case ArbitrationType::WaitIfLessThan:
|
||||
return WaitForAddressIfLessThan(address, value, timeout_ns, false);
|
||||
case ArbitrationType::DecrementAndWaitIfLessThan:
|
||||
return WaitForAddressIfLessThan(address, value, timeout_ns, true);
|
||||
case ArbitrationType::WaitIfEqual:
|
||||
return WaitForAddressIfEqual(address, value, timeout_ns);
|
||||
default:
|
||||
return ERR_INVALID_ENUM_VALUE;
|
||||
}
|
||||
}
|
||||
|
||||
ResultCode AddressArbiter::WaitForAddressIfLessThan(VAddr address, s32 value, s64 timeout,
|
||||
bool should_decrement) {
|
||||
auto& memory = system.Memory();
|
||||
auto& kernel = system.Kernel();
|
||||
Thread* current_thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||
|
||||
Handle event_handle = InvalidHandle;
|
||||
{
|
||||
KScopedSchedulerLockAndSleep lock(kernel, event_handle, current_thread, timeout);
|
||||
|
||||
if (current_thread->IsPendingTermination()) {
|
||||
lock.CancelSleep();
|
||||
return ERR_THREAD_TERMINATING;
|
||||
}
|
||||
|
||||
// Ensure that we can read the address.
|
||||
if (!memory.IsValidVirtualAddress(address)) {
|
||||
lock.CancelSleep();
|
||||
return ERR_INVALID_ADDRESS_STATE;
|
||||
}
|
||||
|
||||
s32 current_value = static_cast<s32>(memory.Read32(address));
|
||||
if (current_value >= value) {
|
||||
lock.CancelSleep();
|
||||
return ERR_INVALID_STATE;
|
||||
}
|
||||
|
||||
current_thread->SetSynchronizationResults(nullptr, RESULT_TIMEOUT);
|
||||
|
||||
s32 decrement_value;
|
||||
|
||||
const std::size_t current_core = system.CurrentCoreIndex();
|
||||
auto& monitor = system.Monitor();
|
||||
do {
|
||||
current_value = static_cast<s32>(monitor.ExclusiveRead32(current_core, address));
|
||||
if (should_decrement) {
|
||||
decrement_value = current_value - 1;
|
||||
} else {
|
||||
decrement_value = current_value;
|
||||
}
|
||||
} while (
|
||||
!monitor.ExclusiveWrite32(current_core, address, static_cast<u32>(decrement_value)));
|
||||
|
||||
// Short-circuit without rescheduling, if timeout is zero.
|
||||
if (timeout == 0) {
|
||||
lock.CancelSleep();
|
||||
return RESULT_TIMEOUT;
|
||||
}
|
||||
|
||||
current_thread->SetArbiterWaitAddress(address);
|
||||
InsertThread(SharedFrom(current_thread));
|
||||
current_thread->SetStatus(ThreadStatus::WaitArb);
|
||||
current_thread->WaitForArbitration(true);
|
||||
}
|
||||
|
||||
if (event_handle != InvalidHandle) {
|
||||
auto& time_manager = kernel.TimeManager();
|
||||
time_manager.UnscheduleTimeEvent(event_handle);
|
||||
}
|
||||
|
||||
{
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
if (current_thread->IsWaitingForArbitration()) {
|
||||
RemoveThread(SharedFrom(current_thread));
|
||||
current_thread->WaitForArbitration(false);
|
||||
}
|
||||
}
|
||||
|
||||
return current_thread->GetSignalingResult();
|
||||
}
|
||||
|
||||
ResultCode AddressArbiter::WaitForAddressIfEqual(VAddr address, s32 value, s64 timeout) {
|
||||
auto& memory = system.Memory();
|
||||
auto& kernel = system.Kernel();
|
||||
Thread* current_thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||
|
||||
Handle event_handle = InvalidHandle;
|
||||
{
|
||||
KScopedSchedulerLockAndSleep lock(kernel, event_handle, current_thread, timeout);
|
||||
|
||||
if (current_thread->IsPendingTermination()) {
|
||||
lock.CancelSleep();
|
||||
return ERR_THREAD_TERMINATING;
|
||||
}
|
||||
|
||||
// Ensure that we can read the address.
|
||||
if (!memory.IsValidVirtualAddress(address)) {
|
||||
lock.CancelSleep();
|
||||
return ERR_INVALID_ADDRESS_STATE;
|
||||
}
|
||||
|
||||
s32 current_value = static_cast<s32>(memory.Read32(address));
|
||||
if (current_value != value) {
|
||||
lock.CancelSleep();
|
||||
return ERR_INVALID_STATE;
|
||||
}
|
||||
|
||||
// Short-circuit without rescheduling, if timeout is zero.
|
||||
if (timeout == 0) {
|
||||
lock.CancelSleep();
|
||||
return RESULT_TIMEOUT;
|
||||
}
|
||||
|
||||
current_thread->SetSynchronizationResults(nullptr, RESULT_TIMEOUT);
|
||||
current_thread->SetArbiterWaitAddress(address);
|
||||
InsertThread(SharedFrom(current_thread));
|
||||
current_thread->SetStatus(ThreadStatus::WaitArb);
|
||||
current_thread->WaitForArbitration(true);
|
||||
}
|
||||
|
||||
if (event_handle != InvalidHandle) {
|
||||
auto& time_manager = kernel.TimeManager();
|
||||
time_manager.UnscheduleTimeEvent(event_handle);
|
||||
}
|
||||
|
||||
{
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
if (current_thread->IsWaitingForArbitration()) {
|
||||
RemoveThread(SharedFrom(current_thread));
|
||||
current_thread->WaitForArbitration(false);
|
||||
}
|
||||
}
|
||||
|
||||
return current_thread->GetSignalingResult();
|
||||
}
|
||||
|
||||
void AddressArbiter::InsertThread(std::shared_ptr<Thread> thread) {
|
||||
const VAddr arb_addr = thread->GetArbiterWaitAddress();
|
||||
std::list<std::shared_ptr<Thread>>& thread_list = arb_threads[arb_addr];
|
||||
|
||||
const auto iter =
|
||||
std::find_if(thread_list.cbegin(), thread_list.cend(), [&thread](const auto& entry) {
|
||||
return entry->GetPriority() >= thread->GetPriority();
|
||||
});
|
||||
|
||||
if (iter == thread_list.cend()) {
|
||||
thread_list.push_back(std::move(thread));
|
||||
} else {
|
||||
thread_list.insert(iter, std::move(thread));
|
||||
}
|
||||
}
|
||||
|
||||
void AddressArbiter::RemoveThread(std::shared_ptr<Thread> thread) {
|
||||
const VAddr arb_addr = thread->GetArbiterWaitAddress();
|
||||
std::list<std::shared_ptr<Thread>>& thread_list = arb_threads[arb_addr];
|
||||
|
||||
const auto iter = std::find_if(thread_list.cbegin(), thread_list.cend(),
|
||||
[&thread](const auto& entry) { return thread == entry; });
|
||||
|
||||
if (iter != thread_list.cend()) {
|
||||
thread_list.erase(iter);
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<std::shared_ptr<Thread>> AddressArbiter::GetThreadsWaitingOnAddress(
|
||||
VAddr address) const {
|
||||
const auto iter = arb_threads.find(address);
|
||||
if (iter == arb_threads.cend()) {
|
||||
return {};
|
||||
}
|
||||
|
||||
const std::list<std::shared_ptr<Thread>>& thread_list = iter->second;
|
||||
return {thread_list.cbegin(), thread_list.cend()};
|
||||
}
|
||||
} // namespace Kernel
|
||||
91
src/core/hle/kernel/address_arbiter.h
Normal file
91
src/core/hle/kernel/address_arbiter.h
Normal file
@@ -0,0 +1,91 @@
|
||||
// Copyright 2018 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <list>
|
||||
#include <memory>
|
||||
#include <unordered_map>
|
||||
#include <vector>
|
||||
|
||||
#include "common/common_types.h"
|
||||
|
||||
union ResultCode;
|
||||
|
||||
namespace Core {
|
||||
class System;
|
||||
}
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class Thread;
|
||||
|
||||
class AddressArbiter {
|
||||
public:
|
||||
enum class ArbitrationType {
|
||||
WaitIfLessThan = 0,
|
||||
DecrementAndWaitIfLessThan = 1,
|
||||
WaitIfEqual = 2,
|
||||
};
|
||||
|
||||
enum class SignalType {
|
||||
Signal = 0,
|
||||
IncrementAndSignalIfEqual = 1,
|
||||
ModifyByWaitingCountAndSignalIfEqual = 2,
|
||||
};
|
||||
|
||||
explicit AddressArbiter(Core::System& system);
|
||||
~AddressArbiter();
|
||||
|
||||
AddressArbiter(const AddressArbiter&) = delete;
|
||||
AddressArbiter& operator=(const AddressArbiter&) = delete;
|
||||
|
||||
AddressArbiter(AddressArbiter&&) = default;
|
||||
AddressArbiter& operator=(AddressArbiter&&) = delete;
|
||||
|
||||
/// Signals an address being waited on with a particular signaling type.
|
||||
ResultCode SignalToAddress(VAddr address, SignalType type, s32 value, s32 num_to_wake);
|
||||
|
||||
/// Waits on an address with a particular arbitration type.
|
||||
ResultCode WaitForAddress(VAddr address, ArbitrationType type, s32 value, s64 timeout_ns);
|
||||
|
||||
private:
|
||||
/// Signals an address being waited on.
|
||||
ResultCode SignalToAddressOnly(VAddr address, s32 num_to_wake);
|
||||
|
||||
/// Signals an address being waited on and increments its value if equal to the value argument.
|
||||
ResultCode IncrementAndSignalToAddressIfEqual(VAddr address, s32 value, s32 num_to_wake);
|
||||
|
||||
/// Signals an address being waited on and modifies its value based on waiting thread count if
|
||||
/// equal to the value argument.
|
||||
ResultCode ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr address, s32 value,
|
||||
s32 num_to_wake);
|
||||
|
||||
/// Waits on an address if the value passed is less than the argument value,
|
||||
/// optionally decrementing.
|
||||
ResultCode WaitForAddressIfLessThan(VAddr address, s32 value, s64 timeout,
|
||||
bool should_decrement);
|
||||
|
||||
/// Waits on an address if the value passed is equal to the argument value.
|
||||
ResultCode WaitForAddressIfEqual(VAddr address, s32 value, s64 timeout);
|
||||
|
||||
/// Wake up num_to_wake (or all) threads in a vector.
|
||||
void WakeThreads(const std::vector<std::shared_ptr<Thread>>& waiting_threads, s32 num_to_wake);
|
||||
|
||||
/// Insert a thread into the address arbiter container
|
||||
void InsertThread(std::shared_ptr<Thread> thread);
|
||||
|
||||
/// Removes a thread from the address arbiter container
|
||||
void RemoveThread(std::shared_ptr<Thread> thread);
|
||||
|
||||
// Gets the threads waiting on an address.
|
||||
std::vector<std::shared_ptr<Thread>> GetThreadsWaitingOnAddress(VAddr address) const;
|
||||
|
||||
/// List of threads waiting for a address arbiter
|
||||
std::unordered_map<VAddr, std::list<std::shared_ptr<Thread>>> arb_threads;
|
||||
|
||||
Core::System& system;
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
@@ -33,6 +33,9 @@ ResultVal<std::shared_ptr<ClientSession>> ClientPort::Connect() {
|
||||
server_port->AppendPendingSession(std::move(server));
|
||||
}
|
||||
|
||||
// Wake the threads waiting on the ServerPort
|
||||
server_port->Signal();
|
||||
|
||||
return MakeResult(std::move(client));
|
||||
}
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
ClientSession::ClientSession(KernelCore& kernel) : KSynchronizationObject{kernel} {}
|
||||
ClientSession::ClientSession(KernelCore& kernel) : SynchronizationObject{kernel} {}
|
||||
|
||||
ClientSession::~ClientSession() {
|
||||
// This destructor will be called automatically when the last ClientSession handle is closed by
|
||||
@@ -22,6 +22,15 @@ ClientSession::~ClientSession() {
|
||||
}
|
||||
}
|
||||
|
||||
bool ClientSession::ShouldWait(const Thread* thread) const {
|
||||
UNIMPLEMENTED();
|
||||
return {};
|
||||
}
|
||||
|
||||
void ClientSession::Acquire(Thread* thread) {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
||||
bool ClientSession::IsSignaled() const {
|
||||
UNIMPLEMENTED();
|
||||
return true;
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
#include <memory>
|
||||
#include <string>
|
||||
|
||||
#include "core/hle/kernel/k_synchronization_object.h"
|
||||
#include "core/hle/kernel/synchronization_object.h"
|
||||
#include "core/hle/result.h"
|
||||
|
||||
union ResultCode;
|
||||
@@ -26,7 +26,7 @@ class KernelCore;
|
||||
class Session;
|
||||
class Thread;
|
||||
|
||||
class ClientSession final : public KSynchronizationObject {
|
||||
class ClientSession final : public SynchronizationObject {
|
||||
public:
|
||||
explicit ClientSession(KernelCore& kernel);
|
||||
~ClientSession() override;
|
||||
@@ -49,6 +49,10 @@ public:
|
||||
ResultCode SendSyncRequest(std::shared_ptr<Thread> thread, Core::Memory::Memory& memory,
|
||||
Core::Timing::CoreTiming& core_timing);
|
||||
|
||||
bool ShouldWait(const Thread* thread) const override;
|
||||
|
||||
void Acquire(Thread* thread) override;
|
||||
|
||||
bool IsSignaled() const override;
|
||||
|
||||
private:
|
||||
|
||||
@@ -13,14 +13,12 @@ namespace Kernel {
|
||||
constexpr ResultCode ERR_MAX_CONNECTIONS_REACHED{ErrorModule::Kernel, 7};
|
||||
constexpr ResultCode ERR_INVALID_CAPABILITY_DESCRIPTOR{ErrorModule::Kernel, 14};
|
||||
constexpr ResultCode ERR_THREAD_TERMINATING{ErrorModule::Kernel, 59};
|
||||
constexpr ResultCode ERR_TERMINATION_REQUESTED{ErrorModule::Kernel, 59};
|
||||
constexpr ResultCode ERR_INVALID_SIZE{ErrorModule::Kernel, 101};
|
||||
constexpr ResultCode ERR_INVALID_ADDRESS{ErrorModule::Kernel, 102};
|
||||
constexpr ResultCode ERR_OUT_OF_RESOURCES{ErrorModule::Kernel, 103};
|
||||
constexpr ResultCode ERR_OUT_OF_MEMORY{ErrorModule::Kernel, 104};
|
||||
constexpr ResultCode ERR_HANDLE_TABLE_FULL{ErrorModule::Kernel, 105};
|
||||
constexpr ResultCode ERR_INVALID_ADDRESS_STATE{ErrorModule::Kernel, 106};
|
||||
constexpr ResultCode ERR_INVALID_CURRENT_MEMORY{ErrorModule::Kernel, 106};
|
||||
constexpr ResultCode ERR_INVALID_MEMORY_PERMISSIONS{ErrorModule::Kernel, 108};
|
||||
constexpr ResultCode ERR_INVALID_MEMORY_RANGE{ErrorModule::Kernel, 110};
|
||||
constexpr ResultCode ERR_INVALID_PROCESSOR_ID{ErrorModule::Kernel, 113};
|
||||
@@ -30,7 +28,6 @@ constexpr ResultCode ERR_INVALID_POINTER{ErrorModule::Kernel, 115};
|
||||
constexpr ResultCode ERR_INVALID_COMBINATION{ErrorModule::Kernel, 116};
|
||||
constexpr ResultCode RESULT_TIMEOUT{ErrorModule::Kernel, 117};
|
||||
constexpr ResultCode ERR_SYNCHRONIZATION_CANCELED{ErrorModule::Kernel, 118};
|
||||
constexpr ResultCode ERR_CANCELLED{ErrorModule::Kernel, 118};
|
||||
constexpr ResultCode ERR_OUT_OF_RANGE{ErrorModule::Kernel, 119};
|
||||
constexpr ResultCode ERR_INVALID_ENUM_VALUE{ErrorModule::Kernel, 120};
|
||||
constexpr ResultCode ERR_NOT_FOUND{ErrorModule::Kernel, 121};
|
||||
|
||||
@@ -46,6 +46,43 @@ void SessionRequestHandler::ClientDisconnected(
|
||||
boost::range::remove_erase(connected_sessions, server_session);
|
||||
}
|
||||
|
||||
std::shared_ptr<WritableEvent> HLERequestContext::SleepClientThread(
|
||||
const std::string& reason, u64 timeout, WakeupCallback&& callback,
|
||||
std::shared_ptr<WritableEvent> writable_event) {
|
||||
// Put the client thread to sleep until the wait event is signaled or the timeout expires.
|
||||
|
||||
if (!writable_event) {
|
||||
// Create event if not provided
|
||||
const auto pair = WritableEvent::CreateEventPair(kernel, "HLE Pause Event: " + reason);
|
||||
writable_event = pair.writable;
|
||||
}
|
||||
|
||||
Handle event_handle = InvalidHandle;
|
||||
{
|
||||
KScopedSchedulerLockAndSleep lock(kernel, event_handle, thread.get(), timeout);
|
||||
thread->SetHLECallback(
|
||||
[context = *this, callback](std::shared_ptr<Thread> thread) mutable -> bool {
|
||||
ThreadWakeupReason reason = thread->GetSignalingResult() == RESULT_TIMEOUT
|
||||
? ThreadWakeupReason::Timeout
|
||||
: ThreadWakeupReason::Signal;
|
||||
callback(thread, context, reason);
|
||||
context.WriteToOutgoingCommandBuffer(*thread);
|
||||
return true;
|
||||
});
|
||||
const auto readable_event{writable_event->GetReadableEvent()};
|
||||
writable_event->Clear();
|
||||
thread->SetHLESyncObject(readable_event.get());
|
||||
thread->SetStatus(ThreadStatus::WaitHLEEvent);
|
||||
thread->SetSynchronizationResults(nullptr, RESULT_TIMEOUT);
|
||||
readable_event->AddWaitingThread(thread);
|
||||
}
|
||||
thread->SetHLETimeEvent(event_handle);
|
||||
|
||||
is_thread_waiting = true;
|
||||
|
||||
return writable_event;
|
||||
}
|
||||
|
||||
HLERequestContext::HLERequestContext(KernelCore& kernel, Core::Memory::Memory& memory,
|
||||
std::shared_ptr<ServerSession> server_session,
|
||||
std::shared_ptr<Thread> thread)
|
||||
|
||||
@@ -129,6 +129,23 @@ public:
|
||||
using WakeupCallback = std::function<void(
|
||||
std::shared_ptr<Thread> thread, HLERequestContext& context, ThreadWakeupReason reason)>;
|
||||
|
||||
/**
|
||||
* Puts the specified guest thread to sleep until the returned event is signaled or until the
|
||||
* specified timeout expires.
|
||||
* @param reason Reason for pausing the thread, to be used for debugging purposes.
|
||||
* @param timeout Timeout in nanoseconds after which the thread will be awoken and the callback
|
||||
* invoked with a Timeout reason.
|
||||
* @param callback Callback to be invoked when the thread is resumed. This callback must write
|
||||
* the entire command response once again, regardless of the state of it before this function
|
||||
* was called.
|
||||
* @param writable_event Event to use to wake up the thread. If unspecified, an event will be
|
||||
* created.
|
||||
* @returns Event that when signaled will resume the thread and call the callback function.
|
||||
*/
|
||||
std::shared_ptr<WritableEvent> SleepClientThread(
|
||||
const std::string& reason, u64 timeout, WakeupCallback&& callback,
|
||||
std::shared_ptr<WritableEvent> writable_event = nullptr);
|
||||
|
||||
/// Populates this context with data from the requesting process/thread.
|
||||
ResultCode PopulateFromIncomingCommandBuffer(const HandleTable& handle_table,
|
||||
u32_le* src_cmdbuf);
|
||||
|
||||
@@ -1,367 +0,0 @@
|
||||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "core/arm/exclusive_monitor.h"
|
||||
#include "core/core.h"
|
||||
#include "core/hle/kernel/k_address_arbiter.h"
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/svc_results.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
#include "core/hle/kernel/time_manager.h"
|
||||
#include "core/memory.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
KAddressArbiter::KAddressArbiter(Core::System& system_)
|
||||
: system{system_}, kernel{system.Kernel()} {}
|
||||
KAddressArbiter::~KAddressArbiter() = default;
|
||||
|
||||
namespace {
|
||||
|
||||
bool ReadFromUser(Core::System& system, s32* out, VAddr address) {
|
||||
*out = system.Memory().Read32(address);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 value) {
|
||||
auto& monitor = system.Monitor();
|
||||
const auto current_core = system.CurrentCoreIndex();
|
||||
|
||||
// TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
|
||||
// TODO(bunnei): We should call CanAccessAtomic(..) here.
|
||||
|
||||
// Load the value from the address.
|
||||
const s32 current_value = static_cast<s32>(monitor.ExclusiveRead32(current_core, address));
|
||||
|
||||
// Compare it to the desired one.
|
||||
if (current_value < value) {
|
||||
// If less than, we want to try to decrement.
|
||||
const s32 decrement_value = current_value - 1;
|
||||
|
||||
// Decrement and try to store.
|
||||
if (!monitor.ExclusiveWrite32(current_core, address, static_cast<u32>(decrement_value))) {
|
||||
// If we failed to store, try again.
|
||||
DecrementIfLessThan(system, out, address, value);
|
||||
}
|
||||
} else {
|
||||
// Otherwise, clear our exclusive hold and finish
|
||||
monitor.ClearExclusive();
|
||||
}
|
||||
|
||||
// We're done.
|
||||
*out = current_value;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool UpdateIfEqual(Core::System& system, s32* out, VAddr address, s32 value, s32 new_value) {
|
||||
auto& monitor = system.Monitor();
|
||||
const auto current_core = system.CurrentCoreIndex();
|
||||
|
||||
// TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
|
||||
// TODO(bunnei): We should call CanAccessAtomic(..) here.
|
||||
|
||||
// Load the value from the address.
|
||||
const s32 current_value = static_cast<s32>(monitor.ExclusiveRead32(current_core, address));
|
||||
|
||||
// Compare it to the desired one.
|
||||
if (current_value == value) {
|
||||
// If equal, we want to try to write the new value.
|
||||
|
||||
// Try to store.
|
||||
if (!monitor.ExclusiveWrite32(current_core, address, static_cast<u32>(new_value))) {
|
||||
// If we failed to store, try again.
|
||||
UpdateIfEqual(system, out, address, value, new_value);
|
||||
}
|
||||
} else {
|
||||
// Otherwise, clear our exclusive hold and finish.
|
||||
monitor.ClearExclusive();
|
||||
}
|
||||
|
||||
// We're done.
|
||||
*out = current_value;
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
ResultCode KAddressArbiter::Signal(VAddr addr, s32 count) {
|
||||
// Perform signaling.
|
||||
s32 num_waiters{};
|
||||
{
|
||||
KScopedSchedulerLock sl(kernel);
|
||||
|
||||
auto it = thread_tree.nfind_light({addr, -1});
|
||||
while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
|
||||
(it->GetAddressArbiterKey() == addr)) {
|
||||
Thread* target_thread = std::addressof(*it);
|
||||
target_thread->SetSyncedObject(nullptr, RESULT_SUCCESS);
|
||||
|
||||
ASSERT(target_thread->IsWaitingForAddressArbiter());
|
||||
target_thread->Wakeup();
|
||||
|
||||
it = thread_tree.erase(it);
|
||||
target_thread->ClearAddressArbiter();
|
||||
++num_waiters;
|
||||
}
|
||||
}
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
ResultCode KAddressArbiter::SignalAndIncrementIfEqual(VAddr addr, s32 value, s32 count) {
|
||||
// Perform signaling.
|
||||
s32 num_waiters{};
|
||||
{
|
||||
KScopedSchedulerLock sl(kernel);
|
||||
|
||||
// Check the userspace value.
|
||||
s32 user_value{};
|
||||
R_UNLESS(UpdateIfEqual(system, std::addressof(user_value), addr, value, value + 1),
|
||||
Svc::ResultInvalidCurrentMemory);
|
||||
R_UNLESS(user_value == value, Svc::ResultInvalidState);
|
||||
|
||||
auto it = thread_tree.nfind_light({addr, -1});
|
||||
while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
|
||||
(it->GetAddressArbiterKey() == addr)) {
|
||||
Thread* target_thread = std::addressof(*it);
|
||||
target_thread->SetSyncedObject(nullptr, RESULT_SUCCESS);
|
||||
|
||||
ASSERT(target_thread->IsWaitingForAddressArbiter());
|
||||
target_thread->Wakeup();
|
||||
|
||||
it = thread_tree.erase(it);
|
||||
target_thread->ClearAddressArbiter();
|
||||
++num_waiters;
|
||||
}
|
||||
}
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
ResultCode KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 value, s32 count) {
|
||||
// Perform signaling.
|
||||
s32 num_waiters{};
|
||||
{
|
||||
KScopedSchedulerLock sl(kernel);
|
||||
|
||||
auto it = thread_tree.nfind_light({addr, -1});
|
||||
// Determine the updated value.
|
||||
s32 new_value{};
|
||||
if (/*GetTargetFirmware() >= TargetFirmware_7_0_0*/ true) {
|
||||
if (count <= 0) {
|
||||
if ((it != thread_tree.end()) && (it->GetAddressArbiterKey() == addr)) {
|
||||
new_value = value - 2;
|
||||
} else {
|
||||
new_value = value + 1;
|
||||
}
|
||||
} else {
|
||||
if ((it != thread_tree.end()) && (it->GetAddressArbiterKey() == addr)) {
|
||||
auto tmp_it = it;
|
||||
s32 tmp_num_waiters{};
|
||||
while ((++tmp_it != thread_tree.end()) &&
|
||||
(tmp_it->GetAddressArbiterKey() == addr)) {
|
||||
if ((tmp_num_waiters++) >= count) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (tmp_num_waiters < count) {
|
||||
new_value = value - 1;
|
||||
} else {
|
||||
new_value = value;
|
||||
}
|
||||
} else {
|
||||
new_value = value + 1;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (count <= 0) {
|
||||
if ((it != thread_tree.end()) && (it->GetAddressArbiterKey() == addr)) {
|
||||
new_value = value - 1;
|
||||
} else {
|
||||
new_value = value + 1;
|
||||
}
|
||||
} else {
|
||||
auto tmp_it = it;
|
||||
s32 tmp_num_waiters{};
|
||||
while ((tmp_it != thread_tree.end()) && (tmp_it->GetAddressArbiterKey() == addr) &&
|
||||
(tmp_num_waiters < count + 1)) {
|
||||
++tmp_num_waiters;
|
||||
++tmp_it;
|
||||
}
|
||||
|
||||
if (tmp_num_waiters == 0) {
|
||||
new_value = value + 1;
|
||||
} else if (tmp_num_waiters <= count) {
|
||||
new_value = value - 1;
|
||||
} else {
|
||||
new_value = value;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check the userspace value.
|
||||
s32 user_value{};
|
||||
bool succeeded{};
|
||||
if (value != new_value) {
|
||||
succeeded = UpdateIfEqual(system, std::addressof(user_value), addr, value, new_value);
|
||||
} else {
|
||||
succeeded = ReadFromUser(system, std::addressof(user_value), addr);
|
||||
}
|
||||
|
||||
R_UNLESS(succeeded, Svc::ResultInvalidCurrentMemory);
|
||||
R_UNLESS(user_value == value, Svc::ResultInvalidState);
|
||||
|
||||
while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
|
||||
(it->GetAddressArbiterKey() == addr)) {
|
||||
Thread* target_thread = std::addressof(*it);
|
||||
target_thread->SetSyncedObject(nullptr, RESULT_SUCCESS);
|
||||
|
||||
ASSERT(target_thread->IsWaitingForAddressArbiter());
|
||||
target_thread->Wakeup();
|
||||
|
||||
it = thread_tree.erase(it);
|
||||
target_thread->ClearAddressArbiter();
|
||||
++num_waiters;
|
||||
}
|
||||
}
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
ResultCode KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout) {
|
||||
// Prepare to wait.
|
||||
Thread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||
Handle timer = InvalidHandle;
|
||||
|
||||
{
|
||||
KScopedSchedulerLockAndSleep slp(kernel, timer, cur_thread, timeout);
|
||||
|
||||
// Check that the thread isn't terminating.
|
||||
if (cur_thread->IsTerminationRequested()) {
|
||||
slp.CancelSleep();
|
||||
return Svc::ResultTerminationRequested;
|
||||
}
|
||||
|
||||
// Set the synced object.
|
||||
cur_thread->SetSyncedObject(nullptr, Svc::ResultTimedOut);
|
||||
|
||||
// Read the value from userspace.
|
||||
s32 user_value{};
|
||||
bool succeeded{};
|
||||
if (decrement) {
|
||||
succeeded = DecrementIfLessThan(system, std::addressof(user_value), addr, value);
|
||||
} else {
|
||||
succeeded = ReadFromUser(system, std::addressof(user_value), addr);
|
||||
}
|
||||
|
||||
if (!succeeded) {
|
||||
slp.CancelSleep();
|
||||
return Svc::ResultInvalidCurrentMemory;
|
||||
}
|
||||
|
||||
// Check that the value is less than the specified one.
|
||||
if (user_value >= value) {
|
||||
slp.CancelSleep();
|
||||
return Svc::ResultInvalidState;
|
||||
}
|
||||
|
||||
// Check that the timeout is non-zero.
|
||||
if (timeout == 0) {
|
||||
slp.CancelSleep();
|
||||
return Svc::ResultTimedOut;
|
||||
}
|
||||
|
||||
// Set the arbiter.
|
||||
cur_thread->SetAddressArbiter(std::addressof(thread_tree), addr);
|
||||
thread_tree.insert(*cur_thread);
|
||||
cur_thread->SetState(ThreadState::Waiting);
|
||||
cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Arbitration);
|
||||
}
|
||||
|
||||
// Cancel the timer wait.
|
||||
if (timer != InvalidHandle) {
|
||||
auto& time_manager = kernel.TimeManager();
|
||||
time_manager.UnscheduleTimeEvent(timer);
|
||||
}
|
||||
|
||||
// Remove from the address arbiter.
|
||||
{
|
||||
KScopedSchedulerLock sl(kernel);
|
||||
|
||||
if (cur_thread->IsWaitingForAddressArbiter()) {
|
||||
thread_tree.erase(thread_tree.iterator_to(*cur_thread));
|
||||
cur_thread->ClearAddressArbiter();
|
||||
}
|
||||
}
|
||||
|
||||
// Get the result.
|
||||
KSynchronizationObject* dummy{};
|
||||
return cur_thread->GetWaitResult(std::addressof(dummy));
|
||||
}
|
||||
|
||||
ResultCode KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) {
|
||||
// Prepare to wait.
|
||||
Thread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||
Handle timer = InvalidHandle;
|
||||
|
||||
{
|
||||
KScopedSchedulerLockAndSleep slp(kernel, timer, cur_thread, timeout);
|
||||
|
||||
// Check that the thread isn't terminating.
|
||||
if (cur_thread->IsTerminationRequested()) {
|
||||
slp.CancelSleep();
|
||||
return Svc::ResultTerminationRequested;
|
||||
}
|
||||
|
||||
// Set the synced object.
|
||||
cur_thread->SetSyncedObject(nullptr, Svc::ResultTimedOut);
|
||||
|
||||
// Read the value from userspace.
|
||||
s32 user_value{};
|
||||
if (!ReadFromUser(system, std::addressof(user_value), addr)) {
|
||||
slp.CancelSleep();
|
||||
return Svc::ResultInvalidCurrentMemory;
|
||||
}
|
||||
|
||||
// Check that the value is equal.
|
||||
if (value != user_value) {
|
||||
slp.CancelSleep();
|
||||
return Svc::ResultInvalidState;
|
||||
}
|
||||
|
||||
// Check that the timeout is non-zero.
|
||||
if (timeout == 0) {
|
||||
slp.CancelSleep();
|
||||
return Svc::ResultTimedOut;
|
||||
}
|
||||
|
||||
// Set the arbiter.
|
||||
cur_thread->SetAddressArbiter(std::addressof(thread_tree), addr);
|
||||
thread_tree.insert(*cur_thread);
|
||||
cur_thread->SetState(ThreadState::Waiting);
|
||||
cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Arbitration);
|
||||
}
|
||||
|
||||
// Cancel the timer wait.
|
||||
if (timer != InvalidHandle) {
|
||||
auto& time_manager = kernel.TimeManager();
|
||||
time_manager.UnscheduleTimeEvent(timer);
|
||||
}
|
||||
|
||||
// Remove from the address arbiter.
|
||||
{
|
||||
KScopedSchedulerLock sl(kernel);
|
||||
|
||||
if (cur_thread->IsWaitingForAddressArbiter()) {
|
||||
thread_tree.erase(thread_tree.iterator_to(*cur_thread));
|
||||
cur_thread->ClearAddressArbiter();
|
||||
}
|
||||
}
|
||||
|
||||
// Get the result.
|
||||
KSynchronizationObject* dummy{};
|
||||
return cur_thread->GetWaitResult(std::addressof(dummy));
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
@@ -1,70 +0,0 @@
|
||||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/kernel/k_condition_variable.h"
|
||||
#include "core/hle/kernel/svc_types.h"
|
||||
|
||||
union ResultCode;
|
||||
|
||||
namespace Core {
|
||||
class System;
|
||||
}
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class KernelCore;
|
||||
|
||||
class KAddressArbiter {
|
||||
public:
|
||||
using ThreadTree = KConditionVariable::ThreadTree;
|
||||
|
||||
explicit KAddressArbiter(Core::System& system_);
|
||||
~KAddressArbiter();
|
||||
|
||||
[[nodiscard]] ResultCode SignalToAddress(VAddr addr, Svc::SignalType type, s32 value,
|
||||
s32 count) {
|
||||
switch (type) {
|
||||
case Svc::SignalType::Signal:
|
||||
return Signal(addr, count);
|
||||
case Svc::SignalType::SignalAndIncrementIfEqual:
|
||||
return SignalAndIncrementIfEqual(addr, value, count);
|
||||
case Svc::SignalType::SignalAndModifyByWaitingCountIfEqual:
|
||||
return SignalAndModifyByWaitingCountIfEqual(addr, value, count);
|
||||
}
|
||||
UNREACHABLE();
|
||||
return RESULT_UNKNOWN;
|
||||
}
|
||||
|
||||
[[nodiscard]] ResultCode WaitForAddress(VAddr addr, Svc::ArbitrationType type, s32 value,
|
||||
s64 timeout) {
|
||||
switch (type) {
|
||||
case Svc::ArbitrationType::WaitIfLessThan:
|
||||
return WaitIfLessThan(addr, value, false, timeout);
|
||||
case Svc::ArbitrationType::DecrementAndWaitIfLessThan:
|
||||
return WaitIfLessThan(addr, value, true, timeout);
|
||||
case Svc::ArbitrationType::WaitIfEqual:
|
||||
return WaitIfEqual(addr, value, timeout);
|
||||
}
|
||||
UNREACHABLE();
|
||||
return RESULT_UNKNOWN;
|
||||
}
|
||||
|
||||
private:
|
||||
[[nodiscard]] ResultCode Signal(VAddr addr, s32 count);
|
||||
[[nodiscard]] ResultCode SignalAndIncrementIfEqual(VAddr addr, s32 value, s32 count);
|
||||
[[nodiscard]] ResultCode SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 value, s32 count);
|
||||
[[nodiscard]] ResultCode WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout);
|
||||
[[nodiscard]] ResultCode WaitIfEqual(VAddr addr, s32 value, s64 timeout);
|
||||
|
||||
ThreadTree thread_tree;
|
||||
|
||||
Core::System& system;
|
||||
KernelCore& kernel;
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
@@ -1,349 +0,0 @@
|
||||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "core/arm/exclusive_monitor.h"
|
||||
#include "core/core.h"
|
||||
#include "core/hle/kernel/k_condition_variable.h"
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
|
||||
#include "core/hle/kernel/k_synchronization_object.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/process.h"
|
||||
#include "core/hle/kernel/svc_common.h"
|
||||
#include "core/hle/kernel/svc_results.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
#include "core/memory.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
namespace {
|
||||
|
||||
bool ReadFromUser(Core::System& system, u32* out, VAddr address) {
|
||||
*out = system.Memory().Read32(address);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool WriteToUser(Core::System& system, VAddr address, const u32* p) {
|
||||
system.Memory().Write32(address, *p);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool UpdateLockAtomic(Core::System& system, u32* out, VAddr address, u32 if_zero,
|
||||
u32 new_orr_mask) {
|
||||
auto& monitor = system.Monitor();
|
||||
const auto current_core = system.CurrentCoreIndex();
|
||||
|
||||
// Load the value from the address.
|
||||
const auto expected = monitor.ExclusiveRead32(current_core, address);
|
||||
|
||||
// Orr in the new mask.
|
||||
u32 value = expected | new_orr_mask;
|
||||
|
||||
// If the value is zero, use the if_zero value, otherwise use the newly orr'd value.
|
||||
if (!expected) {
|
||||
value = if_zero;
|
||||
}
|
||||
|
||||
// Try to store.
|
||||
if (!monitor.ExclusiveWrite32(current_core, address, value)) {
|
||||
// If we failed to store, try again.
|
||||
return UpdateLockAtomic(system, out, address, if_zero, new_orr_mask);
|
||||
}
|
||||
|
||||
// We're done.
|
||||
*out = expected;
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
KConditionVariable::KConditionVariable(Core::System& system_)
|
||||
: system{system_}, kernel{system.Kernel()} {}
|
||||
|
||||
KConditionVariable::~KConditionVariable() = default;
|
||||
|
||||
ResultCode KConditionVariable::SignalToAddress(VAddr addr) {
|
||||
Thread* owner_thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||
|
||||
// Signal the address.
|
||||
{
|
||||
KScopedSchedulerLock sl(kernel);
|
||||
|
||||
// Remove waiter thread.
|
||||
s32 num_waiters{};
|
||||
Thread* next_owner_thread =
|
||||
owner_thread->RemoveWaiterByKey(std::addressof(num_waiters), addr);
|
||||
|
||||
// Determine the next tag.
|
||||
u32 next_value{};
|
||||
if (next_owner_thread) {
|
||||
next_value = next_owner_thread->GetAddressKeyValue();
|
||||
if (num_waiters > 1) {
|
||||
next_value |= Svc::HandleWaitMask;
|
||||
}
|
||||
|
||||
next_owner_thread->SetSyncedObject(nullptr, RESULT_SUCCESS);
|
||||
next_owner_thread->Wakeup();
|
||||
}
|
||||
|
||||
// Write the value to userspace.
|
||||
if (!WriteToUser(system, addr, std::addressof(next_value))) {
|
||||
if (next_owner_thread) {
|
||||
next_owner_thread->SetSyncedObject(nullptr, Svc::ResultInvalidCurrentMemory);
|
||||
}
|
||||
|
||||
return Svc::ResultInvalidCurrentMemory;
|
||||
}
|
||||
}
|
||||
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
ResultCode KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 value) {
|
||||
Thread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||
|
||||
// Wait for the address.
|
||||
{
|
||||
std::shared_ptr<Thread> owner_thread;
|
||||
ASSERT(!owner_thread);
|
||||
{
|
||||
KScopedSchedulerLock sl(kernel);
|
||||
cur_thread->SetSyncedObject(nullptr, RESULT_SUCCESS);
|
||||
|
||||
// Check if the thread should terminate.
|
||||
R_UNLESS(!cur_thread->IsTerminationRequested(), Svc::ResultTerminationRequested);
|
||||
|
||||
{
|
||||
// Read the tag from userspace.
|
||||
u32 test_tag{};
|
||||
R_UNLESS(ReadFromUser(system, std::addressof(test_tag), addr),
|
||||
Svc::ResultInvalidCurrentMemory);
|
||||
|
||||
// If the tag isn't the handle (with wait mask), we're done.
|
||||
R_UNLESS(test_tag == (handle | Svc::HandleWaitMask), RESULT_SUCCESS);
|
||||
|
||||
// Get the lock owner thread.
|
||||
owner_thread = kernel.CurrentProcess()->GetHandleTable().Get<Thread>(handle);
|
||||
R_UNLESS(owner_thread, Svc::ResultInvalidHandle);
|
||||
|
||||
// Update the lock.
|
||||
cur_thread->SetAddressKey(addr, value);
|
||||
owner_thread->AddWaiter(cur_thread);
|
||||
cur_thread->SetState(ThreadState::Waiting);
|
||||
cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::ConditionVar);
|
||||
cur_thread->SetMutexWaitAddressForDebugging(addr);
|
||||
}
|
||||
}
|
||||
ASSERT(owner_thread);
|
||||
}
|
||||
|
||||
// Remove the thread as a waiter from the lock owner.
|
||||
{
|
||||
KScopedSchedulerLock sl(kernel);
|
||||
Thread* owner_thread = cur_thread->GetLockOwner();
|
||||
if (owner_thread != nullptr) {
|
||||
owner_thread->RemoveWaiter(cur_thread);
|
||||
}
|
||||
}
|
||||
|
||||
// Get the wait result.
|
||||
KSynchronizationObject* dummy{};
|
||||
return cur_thread->GetWaitResult(std::addressof(dummy));
|
||||
}
|
||||
|
||||
Thread* KConditionVariable::SignalImpl(Thread* thread) {
|
||||
// Check pre-conditions.
|
||||
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
||||
|
||||
// Update the tag.
|
||||
VAddr address = thread->GetAddressKey();
|
||||
u32 own_tag = thread->GetAddressKeyValue();
|
||||
|
||||
u32 prev_tag{};
|
||||
bool can_access{};
|
||||
{
|
||||
// TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
|
||||
// TODO(bunnei): We should call CanAccessAtomic(..) here.
|
||||
can_access = true;
|
||||
if (can_access) {
|
||||
UpdateLockAtomic(system, std::addressof(prev_tag), address, own_tag,
|
||||
Svc::HandleWaitMask);
|
||||
}
|
||||
}
|
||||
|
||||
Thread* thread_to_close = nullptr;
|
||||
if (can_access) {
|
||||
if (prev_tag == InvalidHandle) {
|
||||
// If nobody held the lock previously, we're all good.
|
||||
thread->SetSyncedObject(nullptr, RESULT_SUCCESS);
|
||||
thread->Wakeup();
|
||||
} else {
|
||||
// Get the previous owner.
|
||||
auto owner_thread = kernel.CurrentProcess()->GetHandleTable().Get<Thread>(
|
||||
prev_tag & ~Svc::HandleWaitMask);
|
||||
|
||||
if (owner_thread) {
|
||||
// Add the thread as a waiter on the owner.
|
||||
owner_thread->AddWaiter(thread);
|
||||
thread_to_close = owner_thread.get();
|
||||
} else {
|
||||
// The lock was tagged with a thread that doesn't exist.
|
||||
thread->SetSyncedObject(nullptr, Svc::ResultInvalidState);
|
||||
thread->Wakeup();
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// If the address wasn't accessible, note so.
|
||||
thread->SetSyncedObject(nullptr, Svc::ResultInvalidCurrentMemory);
|
||||
thread->Wakeup();
|
||||
}
|
||||
|
||||
return thread_to_close;
|
||||
}
|
||||
|
||||
void KConditionVariable::Signal(u64 cv_key, s32 count) {
|
||||
// Prepare for signaling.
|
||||
constexpr int MaxThreads = 16;
|
||||
|
||||
// TODO(bunnei): This should just be Thread once we implement KAutoObject instead of using
|
||||
// std::shared_ptr.
|
||||
std::vector<std::shared_ptr<Thread>> thread_list;
|
||||
std::array<Thread*, MaxThreads> thread_array;
|
||||
s32 num_to_close{};
|
||||
|
||||
// Perform signaling.
|
||||
s32 num_waiters{};
|
||||
{
|
||||
KScopedSchedulerLock sl(kernel);
|
||||
|
||||
auto it = thread_tree.nfind_light({cv_key, -1});
|
||||
while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
|
||||
(it->GetConditionVariableKey() == cv_key)) {
|
||||
Thread* target_thread = std::addressof(*it);
|
||||
|
||||
if (Thread* thread = SignalImpl(target_thread); thread != nullptr) {
|
||||
if (num_to_close < MaxThreads) {
|
||||
thread_array[num_to_close++] = thread;
|
||||
} else {
|
||||
thread_list.push_back(SharedFrom(thread));
|
||||
}
|
||||
}
|
||||
|
||||
it = thread_tree.erase(it);
|
||||
target_thread->ClearConditionVariable();
|
||||
++num_waiters;
|
||||
}
|
||||
|
||||
// If we have no waiters, clear the has waiter flag.
|
||||
if (it == thread_tree.end() || it->GetConditionVariableKey() != cv_key) {
|
||||
const u32 has_waiter_flag{};
|
||||
WriteToUser(system, cv_key, std::addressof(has_waiter_flag));
|
||||
}
|
||||
}
|
||||
|
||||
// Close threads in the array.
|
||||
for (auto i = 0; i < num_to_close; ++i) {
|
||||
thread_array[i]->Close();
|
||||
}
|
||||
|
||||
// Close threads in the list.
|
||||
for (auto it = thread_list.begin(); it != thread_list.end(); it = thread_list.erase(it)) {
|
||||
(*it)->Close();
|
||||
}
|
||||
}
|
||||
|
||||
ResultCode KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) {
|
||||
// Prepare to wait.
|
||||
Thread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||
Handle timer = InvalidHandle;
|
||||
|
||||
{
|
||||
KScopedSchedulerLockAndSleep slp(kernel, timer, cur_thread, timeout);
|
||||
|
||||
// Set the synced object.
|
||||
cur_thread->SetSyncedObject(nullptr, Svc::ResultTimedOut);
|
||||
|
||||
// Check that the thread isn't terminating.
|
||||
if (cur_thread->IsTerminationRequested()) {
|
||||
slp.CancelSleep();
|
||||
return Svc::ResultTerminationRequested;
|
||||
}
|
||||
|
||||
// Update the value and process for the next owner.
|
||||
{
|
||||
// Remove waiter thread.
|
||||
s32 num_waiters{};
|
||||
Thread* next_owner_thread =
|
||||
cur_thread->RemoveWaiterByKey(std::addressof(num_waiters), addr);
|
||||
|
||||
// Update for the next owner thread.
|
||||
u32 next_value{};
|
||||
if (next_owner_thread != nullptr) {
|
||||
// Get the next tag value.
|
||||
next_value = next_owner_thread->GetAddressKeyValue();
|
||||
if (num_waiters > 1) {
|
||||
next_value |= Svc::HandleWaitMask;
|
||||
}
|
||||
|
||||
// Wake up the next owner.
|
||||
next_owner_thread->SetSyncedObject(nullptr, RESULT_SUCCESS);
|
||||
next_owner_thread->Wakeup();
|
||||
}
|
||||
|
||||
// Write to the cv key.
|
||||
{
|
||||
const u32 has_waiter_flag = 1;
|
||||
WriteToUser(system, key, std::addressof(has_waiter_flag));
|
||||
// TODO(bunnei): We should call DataMemoryBarrier(..) here.
|
||||
}
|
||||
|
||||
// Write the value to userspace.
|
||||
if (!WriteToUser(system, addr, std::addressof(next_value))) {
|
||||
slp.CancelSleep();
|
||||
return Svc::ResultInvalidCurrentMemory;
|
||||
}
|
||||
}
|
||||
|
||||
// Update condition variable tracking.
|
||||
{
|
||||
cur_thread->SetConditionVariable(std::addressof(thread_tree), addr, key, value);
|
||||
thread_tree.insert(*cur_thread);
|
||||
}
|
||||
|
||||
// If the timeout is non-zero, set the thread as waiting.
|
||||
if (timeout != 0) {
|
||||
cur_thread->SetState(ThreadState::Waiting);
|
||||
cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::ConditionVar);
|
||||
cur_thread->SetMutexWaitAddressForDebugging(addr);
|
||||
}
|
||||
}
|
||||
|
||||
// Cancel the timer wait.
|
||||
if (timer != InvalidHandle) {
|
||||
auto& time_manager = kernel.TimeManager();
|
||||
time_manager.UnscheduleTimeEvent(timer);
|
||||
}
|
||||
|
||||
// Remove from the condition variable.
|
||||
{
|
||||
KScopedSchedulerLock sl(kernel);
|
||||
|
||||
if (Thread* owner = cur_thread->GetLockOwner(); owner != nullptr) {
|
||||
owner->RemoveWaiter(cur_thread);
|
||||
}
|
||||
|
||||
if (cur_thread->IsWaitingForConditionVariable()) {
|
||||
thread_tree.erase(thread_tree.iterator_to(*cur_thread));
|
||||
cur_thread->ClearConditionVariable();
|
||||
}
|
||||
}
|
||||
|
||||
// Get the result.
|
||||
KSynchronizationObject* dummy{};
|
||||
return cur_thread->GetWaitResult(std::addressof(dummy));
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
@@ -1,59 +0,0 @@
|
||||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/common_types.h"
|
||||
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
#include "core/hle/result.h"
|
||||
|
||||
namespace Core {
|
||||
class System;
|
||||
}
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class KConditionVariable {
|
||||
public:
|
||||
using ThreadTree = typename Thread::ConditionVariableThreadTreeType;
|
||||
|
||||
explicit KConditionVariable(Core::System& system_);
|
||||
~KConditionVariable();
|
||||
|
||||
// Arbitration
|
||||
[[nodiscard]] ResultCode SignalToAddress(VAddr addr);
|
||||
[[nodiscard]] ResultCode WaitForAddress(Handle handle, VAddr addr, u32 value);
|
||||
|
||||
// Condition variable
|
||||
void Signal(u64 cv_key, s32 count);
|
||||
[[nodiscard]] ResultCode Wait(VAddr addr, u64 key, u32 value, s64 timeout);
|
||||
|
||||
private:
|
||||
[[nodiscard]] Thread* SignalImpl(Thread* thread);
|
||||
|
||||
ThreadTree thread_tree;
|
||||
|
||||
Core::System& system;
|
||||
KernelCore& kernel;
|
||||
};
|
||||
|
||||
inline void BeforeUpdatePriority(const KernelCore& kernel, KConditionVariable::ThreadTree* tree,
|
||||
Thread* thread) {
|
||||
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
||||
|
||||
tree->erase(tree->iterator_to(*thread));
|
||||
}
|
||||
|
||||
inline void AfterUpdatePriority(const KernelCore& kernel, KConditionVariable::ThreadTree* tree,
|
||||
Thread* thread) {
|
||||
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
||||
|
||||
tree->insert(*thread);
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
@@ -8,13 +8,11 @@
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <concepts>
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/bit_set.h"
|
||||
#include "common/bit_util.h"
|
||||
#include "common/common_types.h"
|
||||
#include "common/concepts.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
@@ -23,7 +21,7 @@ class Thread;
|
||||
template <typename T>
|
||||
concept KPriorityQueueAffinityMask = !std::is_reference_v<T> && requires(T & t) {
|
||||
{ t.GetAffinityMask() }
|
||||
->Common::ConvertibleTo<u64>;
|
||||
->std::convertible_to<u64>;
|
||||
{t.SetAffinityMask(std::declval<u64>())};
|
||||
|
||||
{ t.GetAffinity(std::declval<int32_t>()) }
|
||||
@@ -50,9 +48,9 @@ concept KPriorityQueueMember = !std::is_reference_v<T> && requires(T & t) {
|
||||
->KPriorityQueueAffinityMask;
|
||||
|
||||
{ t.GetActiveCore() }
|
||||
->Common::ConvertibleTo<s32>;
|
||||
->std::convertible_to<s32>;
|
||||
{ t.GetPriority() }
|
||||
->Common::ConvertibleTo<s32>;
|
||||
->std::convertible_to<s32>;
|
||||
};
|
||||
|
||||
template <typename Member, size_t _NumCores, int LowestPriority, int HighestPriority>
|
||||
|
||||
@@ -180,22 +180,22 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) {
|
||||
return cores_needing_scheduling;
|
||||
}
|
||||
|
||||
void KScheduler::OnThreadStateChanged(KernelCore& kernel, Thread* thread, ThreadState old_state) {
|
||||
void KScheduler::OnThreadStateChanged(KernelCore& kernel, Thread* thread, u32 old_state) {
|
||||
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
||||
|
||||
// Check if the state has changed, because if it hasn't there's nothing to do.
|
||||
const auto cur_state = thread->GetRawState();
|
||||
const auto cur_state = thread->scheduling_state;
|
||||
if (cur_state == old_state) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Update the priority queues.
|
||||
if (old_state == ThreadState::Runnable) {
|
||||
if (old_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
|
||||
// If we were previously runnable, then we're not runnable now, and we should remove.
|
||||
GetPriorityQueue(kernel).Remove(thread);
|
||||
IncrementScheduledCount(thread);
|
||||
SetSchedulerUpdateNeeded(kernel);
|
||||
} else if (cur_state == ThreadState::Runnable) {
|
||||
} else if (cur_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
|
||||
// If we're now runnable, then we weren't previously, and we should add.
|
||||
GetPriorityQueue(kernel).PushBack(thread);
|
||||
IncrementScheduledCount(thread);
|
||||
@@ -203,11 +203,13 @@ void KScheduler::OnThreadStateChanged(KernelCore& kernel, Thread* thread, Thread
|
||||
}
|
||||
}
|
||||
|
||||
void KScheduler::OnThreadPriorityChanged(KernelCore& kernel, Thread* thread, s32 old_priority) {
|
||||
void KScheduler::OnThreadPriorityChanged(KernelCore& kernel, Thread* thread, Thread* current_thread,
|
||||
u32 old_priority) {
|
||||
|
||||
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
||||
|
||||
// If the thread is runnable, we want to change its priority in the queue.
|
||||
if (thread->GetRawState() == ThreadState::Runnable) {
|
||||
if (thread->scheduling_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
|
||||
GetPriorityQueue(kernel).ChangePriority(
|
||||
old_priority, thread == kernel.CurrentScheduler()->GetCurrentThread(), thread);
|
||||
IncrementScheduledCount(thread);
|
||||
@@ -220,7 +222,7 @@ void KScheduler::OnThreadAffinityMaskChanged(KernelCore& kernel, Thread* thread,
|
||||
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
||||
|
||||
// If the thread is runnable, we want to change its affinity in the queue.
|
||||
if (thread->GetRawState() == ThreadState::Runnable) {
|
||||
if (thread->scheduling_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
|
||||
GetPriorityQueue(kernel).ChangeAffinityMask(old_core, old_affinity, thread);
|
||||
IncrementScheduledCount(thread);
|
||||
SetSchedulerUpdateNeeded(kernel);
|
||||
@@ -290,7 +292,7 @@ void KScheduler::RotateScheduledQueue(s32 core_id, s32 priority) {
|
||||
|
||||
// If the best thread we can choose has a priority the same or worse than ours, try to
|
||||
// migrate a higher priority thread.
|
||||
if (best_thread != nullptr && best_thread->GetPriority() >= priority) {
|
||||
if (best_thread != nullptr && best_thread->GetPriority() >= static_cast<u32>(priority)) {
|
||||
Thread* suggested = priority_queue.GetSuggestedFront(core_id);
|
||||
while (suggested != nullptr) {
|
||||
// If the suggestion's priority is the same as ours, don't bother.
|
||||
@@ -393,8 +395,8 @@ void KScheduler::YieldWithoutCoreMigration() {
|
||||
{
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
|
||||
const auto cur_state = cur_thread.GetRawState();
|
||||
if (cur_state == ThreadState::Runnable) {
|
||||
const auto cur_state = cur_thread.scheduling_state;
|
||||
if (cur_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
|
||||
// Put the current thread at the back of the queue.
|
||||
Thread* next_thread = priority_queue.MoveToScheduledBack(std::addressof(cur_thread));
|
||||
IncrementScheduledCount(std::addressof(cur_thread));
|
||||
@@ -434,8 +436,8 @@ void KScheduler::YieldWithCoreMigration() {
|
||||
{
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
|
||||
const auto cur_state = cur_thread.GetRawState();
|
||||
if (cur_state == ThreadState::Runnable) {
|
||||
const auto cur_state = cur_thread.scheduling_state;
|
||||
if (cur_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
|
||||
// Get the current active core.
|
||||
const s32 core_id = cur_thread.GetActiveCore();
|
||||
|
||||
@@ -524,8 +526,8 @@ void KScheduler::YieldToAnyThread() {
|
||||
{
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
|
||||
const auto cur_state = cur_thread.GetRawState();
|
||||
if (cur_state == ThreadState::Runnable) {
|
||||
const auto cur_state = cur_thread.scheduling_state;
|
||||
if (cur_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
|
||||
// Get the current active core.
|
||||
const s32 core_id = cur_thread.GetActiveCore();
|
||||
|
||||
@@ -643,7 +645,8 @@ void KScheduler::Unload(Thread* thread) {
|
||||
|
||||
void KScheduler::Reload(Thread* thread) {
|
||||
if (thread) {
|
||||
ASSERT_MSG(thread->GetState() == ThreadState::Runnable, "Thread must be runnable.");
|
||||
ASSERT_MSG(thread->GetSchedulingStatus() == ThreadSchedStatus::Runnable,
|
||||
"Thread must be runnable.");
|
||||
|
||||
// Cancel any outstanding wakeup events for this thread
|
||||
thread->SetIsRunning(true);
|
||||
@@ -722,7 +725,7 @@ void KScheduler::SwitchToCurrent() {
|
||||
do {
|
||||
if (current_thread != nullptr && !current_thread->IsHLEThread()) {
|
||||
current_thread->context_guard.lock();
|
||||
if (current_thread->GetRawState() != ThreadState::Runnable) {
|
||||
if (!current_thread->IsRunnable()) {
|
||||
current_thread->context_guard.unlock();
|
||||
break;
|
||||
}
|
||||
@@ -769,7 +772,7 @@ void KScheduler::Initialize() {
|
||||
|
||||
{
|
||||
KScopedSchedulerLock lock{system.Kernel()};
|
||||
idle_thread->SetState(ThreadState::Runnable);
|
||||
idle_thread->SetStatus(ThreadStatus::Ready);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -100,10 +100,11 @@ public:
|
||||
void YieldToAnyThread();
|
||||
|
||||
/// Notify the scheduler a thread's status has changed.
|
||||
static void OnThreadStateChanged(KernelCore& kernel, Thread* thread, ThreadState old_state);
|
||||
static void OnThreadStateChanged(KernelCore& kernel, Thread* thread, u32 old_state);
|
||||
|
||||
/// Notify the scheduler a thread's priority has changed.
|
||||
static void OnThreadPriorityChanged(KernelCore& kernel, Thread* thread, s32 old_priority);
|
||||
static void OnThreadPriorityChanged(KernelCore& kernel, Thread* thread, Thread* current_thread,
|
||||
u32 old_priority);
|
||||
|
||||
/// Notify the scheduler a thread's core and/or affinity mask has changed.
|
||||
static void OnThreadAffinityMaskChanged(KernelCore& kernel, Thread* thread,
|
||||
|
||||
@@ -10,7 +10,6 @@
|
||||
#include "common/assert.h"
|
||||
#include "common/spin_lock.h"
|
||||
#include "core/hardware_properties.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
@@ -19,7 +18,7 @@ class KernelCore;
|
||||
template <typename SchedulerType>
|
||||
class KAbstractSchedulerLock {
|
||||
public:
|
||||
explicit KAbstractSchedulerLock(KernelCore& kernel_) : kernel{kernel_} {}
|
||||
explicit KAbstractSchedulerLock(KernelCore& kernel) : kernel{kernel} {}
|
||||
|
||||
bool IsLockedByCurrentThread() const {
|
||||
return this->owner_thread == kernel.GetCurrentEmuThreadID();
|
||||
|
||||
@@ -1,172 +0,0 @@
|
||||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
|
||||
#include "core/hle/kernel/k_synchronization_object.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/svc_results.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
ResultCode KSynchronizationObject::Wait(KernelCore& kernel, s32* out_index,
|
||||
KSynchronizationObject** objects, const s32 num_objects,
|
||||
s64 timeout) {
|
||||
// Allocate space on stack for thread nodes.
|
||||
std::vector<ThreadListNode> thread_nodes(num_objects);
|
||||
|
||||
// Prepare for wait.
|
||||
Thread* thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||
Handle timer = InvalidHandle;
|
||||
|
||||
{
|
||||
// Setup the scheduling lock and sleep.
|
||||
KScopedSchedulerLockAndSleep slp(kernel, timer, thread, timeout);
|
||||
|
||||
// Check if any of the objects are already signaled.
|
||||
for (auto i = 0; i < num_objects; ++i) {
|
||||
ASSERT(objects[i] != nullptr);
|
||||
|
||||
if (objects[i]->IsSignaled()) {
|
||||
*out_index = i;
|
||||
slp.CancelSleep();
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
}
|
||||
|
||||
// Check if the timeout is zero.
|
||||
if (timeout == 0) {
|
||||
slp.CancelSleep();
|
||||
return Svc::ResultTimedOut;
|
||||
}
|
||||
|
||||
// Check if the thread should terminate.
|
||||
if (thread->IsTerminationRequested()) {
|
||||
slp.CancelSleep();
|
||||
return Svc::ResultTerminationRequested;
|
||||
}
|
||||
|
||||
// Check if waiting was canceled.
|
||||
if (thread->IsWaitCancelled()) {
|
||||
slp.CancelSleep();
|
||||
thread->ClearWaitCancelled();
|
||||
return Svc::ResultCancelled;
|
||||
}
|
||||
|
||||
// Add the waiters.
|
||||
for (auto i = 0; i < num_objects; ++i) {
|
||||
thread_nodes[i].thread = thread;
|
||||
thread_nodes[i].next = nullptr;
|
||||
|
||||
if (objects[i]->thread_list_tail == nullptr) {
|
||||
objects[i]->thread_list_head = std::addressof(thread_nodes[i]);
|
||||
} else {
|
||||
objects[i]->thread_list_tail->next = std::addressof(thread_nodes[i]);
|
||||
}
|
||||
|
||||
objects[i]->thread_list_tail = std::addressof(thread_nodes[i]);
|
||||
}
|
||||
|
||||
// For debugging only
|
||||
thread->SetWaitObjectsForDebugging({objects, static_cast<std::size_t>(num_objects)});
|
||||
|
||||
// Mark the thread as waiting.
|
||||
thread->SetCancellable();
|
||||
thread->SetSyncedObject(nullptr, Svc::ResultTimedOut);
|
||||
thread->SetState(ThreadState::Waiting);
|
||||
thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Synchronization);
|
||||
}
|
||||
|
||||
// The lock/sleep is done, so we should be able to get our result.
|
||||
|
||||
// Thread is no longer cancellable.
|
||||
thread->ClearCancellable();
|
||||
|
||||
// For debugging only
|
||||
thread->SetWaitObjectsForDebugging({});
|
||||
|
||||
// Cancel the timer as needed.
|
||||
if (timer != InvalidHandle) {
|
||||
auto& time_manager = kernel.TimeManager();
|
||||
time_manager.UnscheduleTimeEvent(timer);
|
||||
}
|
||||
|
||||
// Get the wait result.
|
||||
ResultCode wait_result{RESULT_SUCCESS};
|
||||
s32 sync_index = -1;
|
||||
{
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
KSynchronizationObject* synced_obj;
|
||||
wait_result = thread->GetWaitResult(std::addressof(synced_obj));
|
||||
|
||||
for (auto i = 0; i < num_objects; ++i) {
|
||||
// Unlink the object from the list.
|
||||
ThreadListNode* prev_ptr =
|
||||
reinterpret_cast<ThreadListNode*>(std::addressof(objects[i]->thread_list_head));
|
||||
ThreadListNode* prev_val = nullptr;
|
||||
ThreadListNode *prev, *tail_prev;
|
||||
|
||||
do {
|
||||
prev = prev_ptr;
|
||||
prev_ptr = prev_ptr->next;
|
||||
tail_prev = prev_val;
|
||||
prev_val = prev_ptr;
|
||||
} while (prev_ptr != std::addressof(thread_nodes[i]));
|
||||
|
||||
if (objects[i]->thread_list_tail == std::addressof(thread_nodes[i])) {
|
||||
objects[i]->thread_list_tail = tail_prev;
|
||||
}
|
||||
|
||||
prev->next = thread_nodes[i].next;
|
||||
|
||||
if (objects[i] == synced_obj) {
|
||||
sync_index = i;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Set output.
|
||||
*out_index = sync_index;
|
||||
return wait_result;
|
||||
}
|
||||
|
||||
KSynchronizationObject::KSynchronizationObject(KernelCore& kernel) : Object{kernel} {}
|
||||
|
||||
KSynchronizationObject ::~KSynchronizationObject() = default;
|
||||
|
||||
void KSynchronizationObject::NotifyAvailable(ResultCode result) {
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
|
||||
// If we're not signaled, we've nothing to notify.
|
||||
if (!this->IsSignaled()) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Iterate over each thread.
|
||||
for (auto* cur_node = thread_list_head; cur_node != nullptr; cur_node = cur_node->next) {
|
||||
Thread* thread = cur_node->thread;
|
||||
if (thread->GetState() == ThreadState::Waiting) {
|
||||
thread->SetSyncedObject(this, result);
|
||||
thread->SetState(ThreadState::Runnable);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<Thread*> KSynchronizationObject::GetWaitingThreadsForDebugging() const {
|
||||
std::vector<Thread*> threads;
|
||||
|
||||
// If debugging, dump the list of waiters.
|
||||
{
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
for (auto* cur_node = thread_list_head; cur_node != nullptr; cur_node = cur_node->next) {
|
||||
threads.emplace_back(cur_node->thread);
|
||||
}
|
||||
}
|
||||
|
||||
return threads;
|
||||
}
|
||||
} // namespace Kernel
|
||||
@@ -1,58 +0,0 @@
|
||||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "core/hle/kernel/object.h"
|
||||
#include "core/hle/result.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class KernelCore;
|
||||
class Synchronization;
|
||||
class Thread;
|
||||
|
||||
/// Class that represents a Kernel object that a thread can be waiting on
|
||||
class KSynchronizationObject : public Object {
|
||||
public:
|
||||
struct ThreadListNode {
|
||||
ThreadListNode* next{};
|
||||
Thread* thread{};
|
||||
};
|
||||
|
||||
[[nodiscard]] static ResultCode Wait(KernelCore& kernel, s32* out_index,
|
||||
KSynchronizationObject** objects, const s32 num_objects,
|
||||
s64 timeout);
|
||||
|
||||
[[nodiscard]] virtual bool IsSignaled() const = 0;
|
||||
|
||||
[[nodiscard]] std::vector<Thread*> GetWaitingThreadsForDebugging() const;
|
||||
|
||||
protected:
|
||||
explicit KSynchronizationObject(KernelCore& kernel);
|
||||
virtual ~KSynchronizationObject();
|
||||
|
||||
void NotifyAvailable(ResultCode result);
|
||||
void NotifyAvailable() {
|
||||
return this->NotifyAvailable(RESULT_SUCCESS);
|
||||
}
|
||||
|
||||
private:
|
||||
ThreadListNode* thread_list_head{};
|
||||
ThreadListNode* thread_list_tail{};
|
||||
};
|
||||
|
||||
// Specialization of DynamicObjectCast for KSynchronizationObjects
|
||||
template <>
|
||||
inline std::shared_ptr<KSynchronizationObject> DynamicObjectCast<KSynchronizationObject>(
|
||||
std::shared_ptr<Object> object) {
|
||||
if (object != nullptr && object->IsWaitable()) {
|
||||
return std::static_pointer_cast<KSynchronizationObject>(object);
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
@@ -8,14 +8,13 @@
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <thread>
|
||||
#include <unordered_set>
|
||||
#include <unordered_map>
|
||||
#include <utility>
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "common/microprofile.h"
|
||||
#include "common/thread.h"
|
||||
#include "common/thread_worker.h"
|
||||
#include "core/arm/arm_interface.h"
|
||||
#include "core/arm/cpu_interrupt_handler.h"
|
||||
#include "core/arm/exclusive_monitor.h"
|
||||
@@ -36,8 +35,8 @@
|
||||
#include "core/hle/kernel/physical_core.h"
|
||||
#include "core/hle/kernel/process.h"
|
||||
#include "core/hle/kernel/resource_limit.h"
|
||||
#include "core/hle/kernel/service_thread.h"
|
||||
#include "core/hle/kernel/shared_memory.h"
|
||||
#include "core/hle/kernel/synchronization.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
#include "core/hle/kernel/time_manager.h"
|
||||
#include "core/hle/lock.h"
|
||||
@@ -50,7 +49,8 @@ namespace Kernel {
|
||||
|
||||
struct KernelCore::Impl {
|
||||
explicit Impl(Core::System& system, KernelCore& kernel)
|
||||
: time_manager{system}, global_handle_table{kernel}, system{system} {}
|
||||
: synchronization{system}, time_manager{system}, global_handle_table{kernel}, system{
|
||||
system} {}
|
||||
|
||||
void SetMulticore(bool is_multicore) {
|
||||
this->is_multicore = is_multicore;
|
||||
@@ -60,8 +60,6 @@ struct KernelCore::Impl {
|
||||
RegisterHostThread();
|
||||
|
||||
global_scheduler_context = std::make_unique<Kernel::GlobalSchedulerContext>(kernel);
|
||||
service_thread_manager =
|
||||
std::make_unique<Common::ThreadWorker>(1, "yuzu:ServiceThreadManager");
|
||||
|
||||
InitializePhysicalCores();
|
||||
InitializeSystemResourceLimit(kernel);
|
||||
@@ -78,12 +76,6 @@ struct KernelCore::Impl {
|
||||
}
|
||||
|
||||
void Shutdown() {
|
||||
process_list.clear();
|
||||
|
||||
// Ensures all service threads gracefully shutdown
|
||||
service_thread_manager.reset();
|
||||
service_threads.clear();
|
||||
|
||||
next_object_id = 0;
|
||||
next_kernel_process_id = Process::InitialKIPIDMin;
|
||||
next_user_process_id = Process::ProcessIDMin;
|
||||
@@ -97,6 +89,8 @@ struct KernelCore::Impl {
|
||||
|
||||
cores.clear();
|
||||
|
||||
process_list.clear();
|
||||
|
||||
current_process = nullptr;
|
||||
|
||||
system_resource_limit = nullptr;
|
||||
@@ -109,8 +103,10 @@ struct KernelCore::Impl {
|
||||
|
||||
exclusive_monitor.reset();
|
||||
|
||||
// Next host thead ID to use, 0-3 IDs represent core threads, >3 represent others
|
||||
next_host_thread_id = Core::Hardware::NUM_CPU_CORES;
|
||||
num_host_threads = 0;
|
||||
std::fill(register_host_thread_keys.begin(), register_host_thread_keys.end(),
|
||||
std::thread::id{});
|
||||
std::fill(register_host_thread_values.begin(), register_host_thread_values.end(), 0);
|
||||
}
|
||||
|
||||
void InitializePhysicalCores() {
|
||||
@@ -190,46 +186,52 @@ struct KernelCore::Impl {
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a new host thread ID, should only be called by GetHostThreadId
|
||||
u32 AllocateHostThreadId(std::optional<std::size_t> core_id) {
|
||||
if (core_id) {
|
||||
// The first for slots are reserved for CPU core threads
|
||||
ASSERT(*core_id < Core::Hardware::NUM_CPU_CORES);
|
||||
return static_cast<u32>(*core_id);
|
||||
} else {
|
||||
return next_host_thread_id++;
|
||||
}
|
||||
}
|
||||
|
||||
/// Gets the host thread ID for the caller, allocating a new one if this is the first time
|
||||
u32 GetHostThreadId(std::optional<std::size_t> core_id = std::nullopt) {
|
||||
const thread_local auto host_thread_id{AllocateHostThreadId(core_id)};
|
||||
return host_thread_id;
|
||||
}
|
||||
|
||||
/// Registers a CPU core thread by allocating a host thread ID for it
|
||||
void RegisterCoreThread(std::size_t core_id) {
|
||||
ASSERT(core_id < Core::Hardware::NUM_CPU_CORES);
|
||||
const auto this_id = GetHostThreadId(core_id);
|
||||
const std::thread::id this_id = std::this_thread::get_id();
|
||||
if (!is_multicore) {
|
||||
single_core_thread_id = this_id;
|
||||
}
|
||||
const auto end =
|
||||
register_host_thread_keys.begin() + static_cast<ptrdiff_t>(num_host_threads);
|
||||
const auto it = std::find(register_host_thread_keys.begin(), end, this_id);
|
||||
ASSERT(core_id < Core::Hardware::NUM_CPU_CORES);
|
||||
ASSERT(it == end);
|
||||
InsertHostThread(static_cast<u32>(core_id));
|
||||
}
|
||||
|
||||
/// Registers a new host thread by allocating a host thread ID for it
|
||||
void RegisterHostThread() {
|
||||
[[maybe_unused]] const auto this_id = GetHostThreadId();
|
||||
const std::thread::id this_id = std::this_thread::get_id();
|
||||
const auto end =
|
||||
register_host_thread_keys.begin() + static_cast<ptrdiff_t>(num_host_threads);
|
||||
const auto it = std::find(register_host_thread_keys.begin(), end, this_id);
|
||||
if (it == end) {
|
||||
InsertHostThread(registered_thread_ids++);
|
||||
}
|
||||
}
|
||||
|
||||
[[nodiscard]] u32 GetCurrentHostThreadID() {
|
||||
const auto this_id = GetHostThreadId();
|
||||
void InsertHostThread(u32 value) {
|
||||
const size_t index = num_host_threads++;
|
||||
ASSERT_MSG(index < NUM_REGISTRABLE_HOST_THREADS, "Too many host threads");
|
||||
register_host_thread_values[index] = value;
|
||||
register_host_thread_keys[index] = std::this_thread::get_id();
|
||||
}
|
||||
|
||||
[[nodiscard]] u32 GetCurrentHostThreadID() const {
|
||||
const std::thread::id this_id = std::this_thread::get_id();
|
||||
if (!is_multicore && single_core_thread_id == this_id) {
|
||||
return static_cast<u32>(system.GetCpuManager().CurrentCore());
|
||||
}
|
||||
return this_id;
|
||||
const auto end =
|
||||
register_host_thread_keys.begin() + static_cast<ptrdiff_t>(num_host_threads);
|
||||
const auto it = std::find(register_host_thread_keys.begin(), end, this_id);
|
||||
if (it == end) {
|
||||
return Core::INVALID_HOST_THREAD_ID;
|
||||
}
|
||||
return register_host_thread_values[static_cast<size_t>(
|
||||
std::distance(register_host_thread_keys.begin(), it))];
|
||||
}
|
||||
|
||||
[[nodiscard]] Core::EmuThreadHandle GetCurrentEmuThreadID() {
|
||||
Core::EmuThreadHandle GetCurrentEmuThreadID() const {
|
||||
Core::EmuThreadHandle result = Core::EmuThreadHandle::InvalidHandle();
|
||||
result.host_handle = GetCurrentHostThreadID();
|
||||
if (result.host_handle >= Core::Hardware::NUM_CPU_CORES) {
|
||||
@@ -305,6 +307,7 @@ struct KernelCore::Impl {
|
||||
std::vector<std::shared_ptr<Process>> process_list;
|
||||
Process* current_process = nullptr;
|
||||
std::unique_ptr<Kernel::GlobalSchedulerContext> global_scheduler_context;
|
||||
Kernel::Synchronization synchronization;
|
||||
Kernel::TimeManager time_manager;
|
||||
|
||||
std::shared_ptr<ResourceLimit> system_resource_limit;
|
||||
@@ -322,8 +325,15 @@ struct KernelCore::Impl {
|
||||
std::unique_ptr<Core::ExclusiveMonitor> exclusive_monitor;
|
||||
std::vector<Kernel::PhysicalCore> cores;
|
||||
|
||||
// Next host thead ID to use, 0-3 IDs represent core threads, >3 represent others
|
||||
std::atomic<u32> next_host_thread_id{Core::Hardware::NUM_CPU_CORES};
|
||||
// 0-3 IDs represent core threads, >3 represent others
|
||||
std::atomic<u32> registered_thread_ids{Core::Hardware::NUM_CPU_CORES};
|
||||
|
||||
// Number of host threads is a relatively high number to avoid overflowing
|
||||
static constexpr size_t NUM_REGISTRABLE_HOST_THREADS = 64;
|
||||
std::atomic<size_t> num_host_threads{0};
|
||||
std::array<std::atomic<std::thread::id>, NUM_REGISTRABLE_HOST_THREADS>
|
||||
register_host_thread_keys{};
|
||||
std::array<std::atomic<u32>, NUM_REGISTRABLE_HOST_THREADS> register_host_thread_values{};
|
||||
|
||||
// Kernel memory management
|
||||
std::unique_ptr<Memory::MemoryManager> memory_manager;
|
||||
@@ -335,19 +345,12 @@ struct KernelCore::Impl {
|
||||
std::shared_ptr<Kernel::SharedMemory> irs_shared_mem;
|
||||
std::shared_ptr<Kernel::SharedMemory> time_shared_mem;
|
||||
|
||||
// Threads used for services
|
||||
std::unordered_set<std::shared_ptr<Kernel::ServiceThread>> service_threads;
|
||||
|
||||
// Service threads are managed by a worker thread, so that a calling service thread can queue up
|
||||
// the release of itself
|
||||
std::unique_ptr<Common::ThreadWorker> service_thread_manager;
|
||||
|
||||
std::array<std::shared_ptr<Thread>, Core::Hardware::NUM_CPU_CORES> suspend_threads{};
|
||||
std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES> interrupts{};
|
||||
std::array<std::unique_ptr<Kernel::KScheduler>, Core::Hardware::NUM_CPU_CORES> schedulers{};
|
||||
|
||||
bool is_multicore{};
|
||||
u32 single_core_thread_id{};
|
||||
std::thread::id single_core_thread_id{};
|
||||
|
||||
std::array<u64, Core::Hardware::NUM_CPU_CORES> svc_ticks{};
|
||||
|
||||
@@ -458,6 +461,14 @@ const std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& Kern
|
||||
return impl->interrupts;
|
||||
}
|
||||
|
||||
Kernel::Synchronization& KernelCore::Synchronization() {
|
||||
return impl->synchronization;
|
||||
}
|
||||
|
||||
const Kernel::Synchronization& KernelCore::Synchronization() const {
|
||||
return impl->synchronization;
|
||||
}
|
||||
|
||||
Kernel::TimeManager& KernelCore::TimeManager() {
|
||||
return impl->time_manager;
|
||||
}
|
||||
@@ -602,11 +613,9 @@ void KernelCore::Suspend(bool in_suspention) {
|
||||
const bool should_suspend = exception_exited || in_suspention;
|
||||
{
|
||||
KScopedSchedulerLock lock(*this);
|
||||
const auto state = should_suspend ? ThreadState::Runnable : ThreadState::Waiting;
|
||||
ThreadStatus status = should_suspend ? ThreadStatus::Ready : ThreadStatus::WaitSleep;
|
||||
for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
|
||||
impl->suspend_threads[i]->SetState(state);
|
||||
impl->suspend_threads[i]->SetWaitReasonForDebugging(
|
||||
ThreadWaitReasonForDebugging::Suspended);
|
||||
impl->suspend_threads[i]->SetStatus(status);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -630,19 +639,4 @@ void KernelCore::ExitSVCProfile() {
|
||||
MicroProfileLeave(MICROPROFILE_TOKEN(Kernel_SVC), impl->svc_ticks[core]);
|
||||
}
|
||||
|
||||
std::weak_ptr<Kernel::ServiceThread> KernelCore::CreateServiceThread(const std::string& name) {
|
||||
auto service_thread = std::make_shared<Kernel::ServiceThread>(*this, 1, name);
|
||||
impl->service_thread_manager->QueueWork(
|
||||
[this, service_thread] { impl->service_threads.emplace(service_thread); });
|
||||
return service_thread;
|
||||
}
|
||||
|
||||
void KernelCore::ReleaseServiceThread(std::weak_ptr<Kernel::ServiceThread> service_thread) {
|
||||
impl->service_thread_manager->QueueWork([this, service_thread] {
|
||||
if (auto strong_ptr = service_thread.lock()) {
|
||||
impl->service_threads.erase(strong_ptr);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -33,6 +33,7 @@ template <typename T>
|
||||
class SlabHeap;
|
||||
} // namespace Memory
|
||||
|
||||
class AddressArbiter;
|
||||
class ClientPort;
|
||||
class GlobalSchedulerContext;
|
||||
class HandleTable;
|
||||
@@ -41,7 +42,6 @@ class Process;
|
||||
class ResourceLimit;
|
||||
class KScheduler;
|
||||
class SharedMemory;
|
||||
class ServiceThread;
|
||||
class Synchronization;
|
||||
class Thread;
|
||||
class TimeManager;
|
||||
@@ -128,6 +128,12 @@ public:
|
||||
/// Gets the an instance of the current physical CPU core.
|
||||
const Kernel::PhysicalCore& CurrentPhysicalCore() const;
|
||||
|
||||
/// Gets the an instance of the Synchronization Interface.
|
||||
Kernel::Synchronization& Synchronization();
|
||||
|
||||
/// Gets the an instance of the Synchronization Interface.
|
||||
const Kernel::Synchronization& Synchronization() const;
|
||||
|
||||
/// Gets the an instance of the TimeManager Interface.
|
||||
Kernel::TimeManager& TimeManager();
|
||||
|
||||
@@ -221,22 +227,6 @@ public:
|
||||
|
||||
void ExitSVCProfile();
|
||||
|
||||
/**
|
||||
* Creates an HLE service thread, which are used to execute service routines asynchronously.
|
||||
* While these are allocated per ServerSession, these need to be owned and managed outside of
|
||||
* ServerSession to avoid a circular dependency.
|
||||
* @param name String name for the ServerSession creating this thread, used for debug purposes.
|
||||
* @returns The a weak pointer newly created service thread.
|
||||
*/
|
||||
std::weak_ptr<Kernel::ServiceThread> CreateServiceThread(const std::string& name);
|
||||
|
||||
/**
|
||||
* Releases a HLE service thread, instructing KernelCore to free it. This should be called when
|
||||
* the ServerSession associated with the thread is destroyed.
|
||||
* @param service_thread Service thread to release.
|
||||
*/
|
||||
void ReleaseServiceThread(std::weak_ptr<Kernel::ServiceThread> service_thread);
|
||||
|
||||
private:
|
||||
friend class Object;
|
||||
friend class Process;
|
||||
|
||||
@@ -96,7 +96,6 @@ u64 AddressSpaceInfo::GetAddressSpaceStart(std::size_t width, Type type) {
|
||||
return AddressSpaceInfos[AddressSpaceIndices39Bit[index]].address;
|
||||
}
|
||||
UNREACHABLE();
|
||||
return 0;
|
||||
}
|
||||
|
||||
std::size_t AddressSpaceInfo::GetAddressSpaceSize(std::size_t width, Type type) {
|
||||
@@ -113,7 +112,6 @@ std::size_t AddressSpaceInfo::GetAddressSpaceSize(std::size_t width, Type type)
|
||||
return AddressSpaceInfos[AddressSpaceIndices39Bit[index]].size;
|
||||
}
|
||||
UNREACHABLE();
|
||||
return 0;
|
||||
}
|
||||
|
||||
} // namespace Kernel::Memory
|
||||
|
||||
@@ -73,12 +73,12 @@ enum class MemoryState : u32 {
|
||||
ThreadLocal =
|
||||
static_cast<u32>(Svc::MemoryState::ThreadLocal) | FlagMapped | FlagReferenceCounted,
|
||||
|
||||
Transferred = static_cast<u32>(Svc::MemoryState::Transferred) | FlagsMisc |
|
||||
FlagCanAlignedDeviceMap | FlagCanChangeAttribute | FlagCanUseIpc |
|
||||
FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
|
||||
Transfered = static_cast<u32>(Svc::MemoryState::Transfered) | FlagsMisc |
|
||||
FlagCanAlignedDeviceMap | FlagCanChangeAttribute | FlagCanUseIpc |
|
||||
FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
|
||||
|
||||
SharedTransferred = static_cast<u32>(Svc::MemoryState::SharedTransferred) | FlagsMisc |
|
||||
FlagCanAlignedDeviceMap | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
|
||||
SharedTransfered = static_cast<u32>(Svc::MemoryState::SharedTransfered) | FlagsMisc |
|
||||
FlagCanAlignedDeviceMap | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
|
||||
|
||||
SharedCode = static_cast<u32>(Svc::MemoryState::SharedCode) | FlagMapped |
|
||||
FlagReferenceCounted | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
|
||||
@@ -111,8 +111,8 @@ static_assert(static_cast<u32>(MemoryState::AliasCodeData) == 0x03FFBD09);
|
||||
static_assert(static_cast<u32>(MemoryState::Ipc) == 0x005C3C0A);
|
||||
static_assert(static_cast<u32>(MemoryState::Stack) == 0x005C3C0B);
|
||||
static_assert(static_cast<u32>(MemoryState::ThreadLocal) == 0x0040200C);
|
||||
static_assert(static_cast<u32>(MemoryState::Transferred) == 0x015C3C0D);
|
||||
static_assert(static_cast<u32>(MemoryState::SharedTransferred) == 0x005C380E);
|
||||
static_assert(static_cast<u32>(MemoryState::Transfered) == 0x015C3C0D);
|
||||
static_assert(static_cast<u32>(MemoryState::SharedTransfered) == 0x005C380E);
|
||||
static_assert(static_cast<u32>(MemoryState::SharedCode) == 0x0040380F);
|
||||
static_assert(static_cast<u32>(MemoryState::Inaccessible) == 0x00000010);
|
||||
static_assert(static_cast<u32>(MemoryState::NonSecureIpc) == 0x005C3811);
|
||||
|
||||
@@ -5,28 +5,9 @@
|
||||
#pragma once
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "core/device_memory.h"
|
||||
|
||||
namespace Kernel::Memory {
|
||||
|
||||
constexpr std::size_t KernelAslrAlignment = 2 * 1024 * 1024;
|
||||
constexpr std::size_t KernelVirtualAddressSpaceWidth = 1ULL << 39;
|
||||
constexpr std::size_t KernelPhysicalAddressSpaceWidth = 1ULL << 48;
|
||||
constexpr std::size_t KernelVirtualAddressSpaceBase = 0ULL - KernelVirtualAddressSpaceWidth;
|
||||
constexpr std::size_t KernelVirtualAddressSpaceEnd =
|
||||
KernelVirtualAddressSpaceBase + (KernelVirtualAddressSpaceWidth - KernelAslrAlignment);
|
||||
constexpr std::size_t KernelVirtualAddressSpaceLast = KernelVirtualAddressSpaceEnd - 1;
|
||||
constexpr std::size_t KernelVirtualAddressSpaceSize =
|
||||
KernelVirtualAddressSpaceEnd - KernelVirtualAddressSpaceBase;
|
||||
|
||||
constexpr bool IsKernelAddressKey(VAddr key) {
|
||||
return KernelVirtualAddressSpaceBase <= key && key <= KernelVirtualAddressSpaceLast;
|
||||
}
|
||||
|
||||
constexpr bool IsKernelAddress(VAddr address) {
|
||||
return KernelVirtualAddressSpaceBase <= address && address < KernelVirtualAddressSpaceEnd;
|
||||
}
|
||||
|
||||
class MemoryRegion final {
|
||||
friend class MemoryLayout;
|
||||
|
||||
|
||||
@@ -265,7 +265,7 @@ ResultCode PageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_t
|
||||
physical_memory_usage = 0;
|
||||
memory_pool = pool;
|
||||
|
||||
page_table_impl.Resize(address_space_width, PageBits);
|
||||
page_table_impl.Resize(address_space_width, PageBits, true);
|
||||
|
||||
return InitializeMemoryLayout(start, end);
|
||||
}
|
||||
@@ -1007,8 +1007,8 @@ constexpr VAddr PageTable::GetRegionAddress(MemoryState state) const {
|
||||
case MemoryState::Shared:
|
||||
case MemoryState::AliasCode:
|
||||
case MemoryState::AliasCodeData:
|
||||
case MemoryState::Transferred:
|
||||
case MemoryState::SharedTransferred:
|
||||
case MemoryState::Transfered:
|
||||
case MemoryState::SharedTransfered:
|
||||
case MemoryState::SharedCode:
|
||||
case MemoryState::GeneratedCode:
|
||||
case MemoryState::CodeOut:
|
||||
@@ -1042,8 +1042,8 @@ constexpr std::size_t PageTable::GetRegionSize(MemoryState state) const {
|
||||
case MemoryState::Shared:
|
||||
case MemoryState::AliasCode:
|
||||
case MemoryState::AliasCodeData:
|
||||
case MemoryState::Transferred:
|
||||
case MemoryState::SharedTransferred:
|
||||
case MemoryState::Transfered:
|
||||
case MemoryState::SharedTransfered:
|
||||
case MemoryState::SharedCode:
|
||||
case MemoryState::GeneratedCode:
|
||||
case MemoryState::CodeOut:
|
||||
@@ -1080,8 +1080,8 @@ constexpr bool PageTable::CanContain(VAddr addr, std::size_t size, MemoryState s
|
||||
case MemoryState::AliasCodeData:
|
||||
case MemoryState::Stack:
|
||||
case MemoryState::ThreadLocal:
|
||||
case MemoryState::Transferred:
|
||||
case MemoryState::SharedTransferred:
|
||||
case MemoryState::Transfered:
|
||||
case MemoryState::SharedTransfered:
|
||||
case MemoryState::SharedCode:
|
||||
case MemoryState::GeneratedCode:
|
||||
case MemoryState::CodeOut:
|
||||
|
||||
170
src/core/hle/kernel/mutex.cpp
Normal file
170
src/core/hle/kernel/mutex.cpp
Normal file
@@ -0,0 +1,170 @@
|
||||
// Copyright 2014 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "core/core.h"
|
||||
#include "core/hle/kernel/errors.h"
|
||||
#include "core/hle/kernel/handle_table.h"
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/mutex.h"
|
||||
#include "core/hle/kernel/object.h"
|
||||
#include "core/hle/kernel/process.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
#include "core/hle/result.h"
|
||||
#include "core/memory.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
/// Returns the number of threads that are waiting for a mutex, and the highest priority one among
|
||||
/// those.
|
||||
static std::pair<std::shared_ptr<Thread>, u32> GetHighestPriorityMutexWaitingThread(
|
||||
const std::shared_ptr<Thread>& current_thread, VAddr mutex_addr) {
|
||||
|
||||
std::shared_ptr<Thread> highest_priority_thread;
|
||||
u32 num_waiters = 0;
|
||||
|
||||
for (const auto& thread : current_thread->GetMutexWaitingThreads()) {
|
||||
if (thread->GetMutexWaitAddress() != mutex_addr)
|
||||
continue;
|
||||
|
||||
++num_waiters;
|
||||
if (highest_priority_thread == nullptr ||
|
||||
thread->GetPriority() < highest_priority_thread->GetPriority()) {
|
||||
highest_priority_thread = thread;
|
||||
}
|
||||
}
|
||||
|
||||
return {highest_priority_thread, num_waiters};
|
||||
}
|
||||
|
||||
/// Update the mutex owner field of all threads waiting on the mutex to point to the new owner.
|
||||
static void TransferMutexOwnership(VAddr mutex_addr, std::shared_ptr<Thread> current_thread,
|
||||
std::shared_ptr<Thread> new_owner) {
|
||||
current_thread->RemoveMutexWaiter(new_owner);
|
||||
const auto threads = current_thread->GetMutexWaitingThreads();
|
||||
for (const auto& thread : threads) {
|
||||
if (thread->GetMutexWaitAddress() != mutex_addr)
|
||||
continue;
|
||||
|
||||
ASSERT(thread->GetLockOwner() == current_thread.get());
|
||||
current_thread->RemoveMutexWaiter(thread);
|
||||
if (new_owner != thread)
|
||||
new_owner->AddMutexWaiter(thread);
|
||||
}
|
||||
}
|
||||
|
||||
Mutex::Mutex(Core::System& system) : system{system} {}
|
||||
Mutex::~Mutex() = default;
|
||||
|
||||
ResultCode Mutex::TryAcquire(VAddr address, Handle holding_thread_handle,
|
||||
Handle requesting_thread_handle) {
|
||||
// The mutex address must be 4-byte aligned
|
||||
if ((address % sizeof(u32)) != 0) {
|
||||
LOG_ERROR(Kernel, "Address is not 4-byte aligned! address={:016X}", address);
|
||||
return ERR_INVALID_ADDRESS;
|
||||
}
|
||||
|
||||
auto& kernel = system.Kernel();
|
||||
std::shared_ptr<Thread> current_thread =
|
||||
SharedFrom(kernel.CurrentScheduler()->GetCurrentThread());
|
||||
{
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
// The mutex address must be 4-byte aligned
|
||||
if ((address % sizeof(u32)) != 0) {
|
||||
return ERR_INVALID_ADDRESS;
|
||||
}
|
||||
|
||||
const auto& handle_table = kernel.CurrentProcess()->GetHandleTable();
|
||||
std::shared_ptr<Thread> holding_thread = handle_table.Get<Thread>(holding_thread_handle);
|
||||
std::shared_ptr<Thread> requesting_thread =
|
||||
handle_table.Get<Thread>(requesting_thread_handle);
|
||||
|
||||
// TODO(Subv): It is currently unknown if it is possible to lock a mutex in behalf of
|
||||
// another thread.
|
||||
ASSERT(requesting_thread == current_thread);
|
||||
|
||||
current_thread->SetSynchronizationResults(nullptr, RESULT_SUCCESS);
|
||||
|
||||
const u32 addr_value = system.Memory().Read32(address);
|
||||
|
||||
// If the mutex isn't being held, just return success.
|
||||
if (addr_value != (holding_thread_handle | Mutex::MutexHasWaitersFlag)) {
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
if (holding_thread == nullptr) {
|
||||
return ERR_INVALID_HANDLE;
|
||||
}
|
||||
|
||||
// Wait until the mutex is released
|
||||
current_thread->SetMutexWaitAddress(address);
|
||||
current_thread->SetWaitHandle(requesting_thread_handle);
|
||||
|
||||
current_thread->SetStatus(ThreadStatus::WaitMutex);
|
||||
|
||||
// Update the lock holder thread's priority to prevent priority inversion.
|
||||
holding_thread->AddMutexWaiter(current_thread);
|
||||
}
|
||||
|
||||
{
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
auto* owner = current_thread->GetLockOwner();
|
||||
if (owner != nullptr) {
|
||||
owner->RemoveMutexWaiter(current_thread);
|
||||
}
|
||||
}
|
||||
return current_thread->GetSignalingResult();
|
||||
}
|
||||
|
||||
std::pair<ResultCode, std::shared_ptr<Thread>> Mutex::Unlock(std::shared_ptr<Thread> owner,
|
||||
VAddr address) {
|
||||
// The mutex address must be 4-byte aligned
|
||||
if ((address % sizeof(u32)) != 0) {
|
||||
LOG_ERROR(Kernel, "Address is not 4-byte aligned! address={:016X}", address);
|
||||
return {ERR_INVALID_ADDRESS, nullptr};
|
||||
}
|
||||
|
||||
auto [new_owner, num_waiters] = GetHighestPriorityMutexWaitingThread(owner, address);
|
||||
if (new_owner == nullptr) {
|
||||
system.Memory().Write32(address, 0);
|
||||
return {RESULT_SUCCESS, nullptr};
|
||||
}
|
||||
// Transfer the ownership of the mutex from the previous owner to the new one.
|
||||
TransferMutexOwnership(address, owner, new_owner);
|
||||
u32 mutex_value = new_owner->GetWaitHandle();
|
||||
if (num_waiters >= 2) {
|
||||
// Notify the guest that there are still some threads waiting for the mutex
|
||||
mutex_value |= Mutex::MutexHasWaitersFlag;
|
||||
}
|
||||
new_owner->SetSynchronizationResults(nullptr, RESULT_SUCCESS);
|
||||
new_owner->SetLockOwner(nullptr);
|
||||
new_owner->ResumeFromWait();
|
||||
|
||||
system.Memory().Write32(address, mutex_value);
|
||||
return {RESULT_SUCCESS, new_owner};
|
||||
}
|
||||
|
||||
ResultCode Mutex::Release(VAddr address) {
|
||||
auto& kernel = system.Kernel();
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
|
||||
std::shared_ptr<Thread> current_thread =
|
||||
SharedFrom(kernel.CurrentScheduler()->GetCurrentThread());
|
||||
|
||||
auto [result, new_owner] = Unlock(current_thread, address);
|
||||
|
||||
if (result != RESULT_SUCCESS && new_owner != nullptr) {
|
||||
new_owner->SetSynchronizationResults(nullptr, result);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
42
src/core/hle/kernel/mutex.h
Normal file
42
src/core/hle/kernel/mutex.h
Normal file
@@ -0,0 +1,42 @@
|
||||
// Copyright 2014 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/common_types.h"
|
||||
|
||||
union ResultCode;
|
||||
|
||||
namespace Core {
|
||||
class System;
|
||||
}
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class Mutex final {
|
||||
public:
|
||||
explicit Mutex(Core::System& system);
|
||||
~Mutex();
|
||||
|
||||
/// Flag that indicates that a mutex still has threads waiting for it.
|
||||
static constexpr u32 MutexHasWaitersFlag = 0x40000000;
|
||||
/// Mask of the bits in a mutex address value that contain the mutex owner.
|
||||
static constexpr u32 MutexOwnerMask = 0xBFFFFFFF;
|
||||
|
||||
/// Attempts to acquire a mutex at the specified address.
|
||||
ResultCode TryAcquire(VAddr address, Handle holding_thread_handle,
|
||||
Handle requesting_thread_handle);
|
||||
|
||||
/// Unlocks a mutex for owner at address
|
||||
std::pair<ResultCode, std::shared_ptr<Thread>> Unlock(std::shared_ptr<Thread> owner,
|
||||
VAddr address);
|
||||
|
||||
/// Releases the mutex at the specified address.
|
||||
ResultCode Release(VAddr address);
|
||||
|
||||
private:
|
||||
Core::System& system;
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
@@ -50,11 +50,6 @@ public:
|
||||
}
|
||||
virtual HandleType GetHandleType() const = 0;
|
||||
|
||||
void Close() {
|
||||
// TODO(bunnei): This is a placeholder to decrement the reference count, which we will use
|
||||
// when we implement KAutoObject instead of using shared_ptr.
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a thread can wait on the object
|
||||
* @return True if a thread can wait on the object, otherwise false
|
||||
|
||||
@@ -55,7 +55,7 @@ void SetupMainThread(Core::System& system, Process& owner_process, u32 priority,
|
||||
// Threads by default are dormant, wake up the main thread so it runs when the scheduler fires
|
||||
{
|
||||
KScopedSchedulerLock lock{kernel};
|
||||
thread->SetState(ThreadState::Runnable);
|
||||
thread->SetStatus(ThreadStatus::Ready);
|
||||
}
|
||||
}
|
||||
} // Anonymous namespace
|
||||
@@ -162,6 +162,48 @@ u64 Process::GetTotalPhysicalMemoryUsedWithoutSystemResource() const {
|
||||
return GetTotalPhysicalMemoryUsed() - GetSystemResourceUsage();
|
||||
}
|
||||
|
||||
void Process::InsertConditionVariableThread(std::shared_ptr<Thread> thread) {
|
||||
VAddr cond_var_addr = thread->GetCondVarWaitAddress();
|
||||
std::list<std::shared_ptr<Thread>>& thread_list = cond_var_threads[cond_var_addr];
|
||||
auto it = thread_list.begin();
|
||||
while (it != thread_list.end()) {
|
||||
const std::shared_ptr<Thread> current_thread = *it;
|
||||
if (current_thread->GetPriority() > thread->GetPriority()) {
|
||||
thread_list.insert(it, thread);
|
||||
return;
|
||||
}
|
||||
++it;
|
||||
}
|
||||
thread_list.push_back(thread);
|
||||
}
|
||||
|
||||
void Process::RemoveConditionVariableThread(std::shared_ptr<Thread> thread) {
|
||||
VAddr cond_var_addr = thread->GetCondVarWaitAddress();
|
||||
std::list<std::shared_ptr<Thread>>& thread_list = cond_var_threads[cond_var_addr];
|
||||
auto it = thread_list.begin();
|
||||
while (it != thread_list.end()) {
|
||||
const std::shared_ptr<Thread> current_thread = *it;
|
||||
if (current_thread.get() == thread.get()) {
|
||||
thread_list.erase(it);
|
||||
return;
|
||||
}
|
||||
++it;
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<std::shared_ptr<Thread>> Process::GetConditionVariableThreads(
|
||||
const VAddr cond_var_addr) {
|
||||
std::vector<std::shared_ptr<Thread>> result{};
|
||||
std::list<std::shared_ptr<Thread>>& thread_list = cond_var_threads[cond_var_addr];
|
||||
auto it = thread_list.begin();
|
||||
while (it != thread_list.end()) {
|
||||
std::shared_ptr<Thread> current_thread = *it;
|
||||
result.push_back(current_thread);
|
||||
++it;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
void Process::RegisterThread(const Thread* thread) {
|
||||
thread_list.push_back(thread);
|
||||
}
|
||||
@@ -276,7 +318,7 @@ void Process::PrepareForTermination() {
|
||||
continue;
|
||||
|
||||
// TODO(Subv): When are the other running/ready threads terminated?
|
||||
ASSERT_MSG(thread->GetState() == ThreadState::Waiting,
|
||||
ASSERT_MSG(thread->GetStatus() == ThreadStatus::WaitSynch,
|
||||
"Exiting processes with non-waiting threads is currently unimplemented");
|
||||
|
||||
thread->Stop();
|
||||
@@ -364,18 +406,21 @@ void Process::LoadModule(CodeSet code_set, VAddr base_addr) {
|
||||
ReprotectSegment(code_set.DataSegment(), Memory::MemoryPermission::ReadAndWrite);
|
||||
}
|
||||
|
||||
bool Process::IsSignaled() const {
|
||||
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
||||
return is_signaled;
|
||||
}
|
||||
|
||||
Process::Process(Core::System& system)
|
||||
: KSynchronizationObject{system.Kernel()},
|
||||
page_table{std::make_unique<Memory::PageTable>(system)}, handle_table{system.Kernel()},
|
||||
address_arbiter{system}, condition_var{system}, system{system} {}
|
||||
: SynchronizationObject{system.Kernel()}, page_table{std::make_unique<Memory::PageTable>(
|
||||
system)},
|
||||
handle_table{system.Kernel()}, address_arbiter{system}, mutex{system}, system{system} {}
|
||||
|
||||
Process::~Process() = default;
|
||||
|
||||
void Process::Acquire(Thread* thread) {
|
||||
ASSERT_MSG(!ShouldWait(thread), "Object unavailable!");
|
||||
}
|
||||
|
||||
bool Process::ShouldWait(const Thread* thread) const {
|
||||
return !is_signaled;
|
||||
}
|
||||
|
||||
void Process::ChangeStatus(ProcessStatus new_status) {
|
||||
if (status == new_status) {
|
||||
return;
|
||||
@@ -383,7 +428,7 @@ void Process::ChangeStatus(ProcessStatus new_status) {
|
||||
|
||||
status = new_status;
|
||||
is_signaled = true;
|
||||
NotifyAvailable();
|
||||
Signal();
|
||||
}
|
||||
|
||||
ResultCode Process::AllocateMainThreadStack(std::size_t stack_size) {
|
||||
|
||||
@@ -11,11 +11,11 @@
|
||||
#include <unordered_map>
|
||||
#include <vector>
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/kernel/address_arbiter.h"
|
||||
#include "core/hle/kernel/handle_table.h"
|
||||
#include "core/hle/kernel/k_address_arbiter.h"
|
||||
#include "core/hle/kernel/k_condition_variable.h"
|
||||
#include "core/hle/kernel/k_synchronization_object.h"
|
||||
#include "core/hle/kernel/mutex.h"
|
||||
#include "core/hle/kernel/process_capability.h"
|
||||
#include "core/hle/kernel/synchronization_object.h"
|
||||
#include "core/hle/result.h"
|
||||
|
||||
namespace Core {
|
||||
@@ -63,7 +63,7 @@ enum class ProcessStatus {
|
||||
DebugBreak,
|
||||
};
|
||||
|
||||
class Process final : public KSynchronizationObject {
|
||||
class Process final : public SynchronizationObject {
|
||||
public:
|
||||
explicit Process(Core::System& system);
|
||||
~Process() override;
|
||||
@@ -123,30 +123,24 @@ public:
|
||||
return handle_table;
|
||||
}
|
||||
|
||||
ResultCode SignalToAddress(VAddr address) {
|
||||
return condition_var.SignalToAddress(address);
|
||||
/// Gets a reference to the process' address arbiter.
|
||||
AddressArbiter& GetAddressArbiter() {
|
||||
return address_arbiter;
|
||||
}
|
||||
|
||||
ResultCode WaitForAddress(Handle handle, VAddr address, u32 tag) {
|
||||
return condition_var.WaitForAddress(handle, address, tag);
|
||||
/// Gets a const reference to the process' address arbiter.
|
||||
const AddressArbiter& GetAddressArbiter() const {
|
||||
return address_arbiter;
|
||||
}
|
||||
|
||||
void SignalConditionVariable(u64 cv_key, int32_t count) {
|
||||
return condition_var.Signal(cv_key, count);
|
||||
/// Gets a reference to the process' mutex lock.
|
||||
Mutex& GetMutex() {
|
||||
return mutex;
|
||||
}
|
||||
|
||||
ResultCode WaitConditionVariable(VAddr address, u64 cv_key, u32 tag, s64 ns) {
|
||||
return condition_var.Wait(address, cv_key, tag, ns);
|
||||
}
|
||||
|
||||
ResultCode SignalAddressArbiter(VAddr address, Svc::SignalType signal_type, s32 value,
|
||||
s32 count) {
|
||||
return address_arbiter.SignalToAddress(address, signal_type, value, count);
|
||||
}
|
||||
|
||||
ResultCode WaitAddressArbiter(VAddr address, Svc::ArbitrationType arb_type, s32 value,
|
||||
s64 timeout) {
|
||||
return address_arbiter.WaitForAddress(address, arb_type, value, timeout);
|
||||
/// Gets a const reference to the process' mutex lock
|
||||
const Mutex& GetMutex() const {
|
||||
return mutex;
|
||||
}
|
||||
|
||||
/// Gets the address to the process' dedicated TLS region.
|
||||
@@ -256,6 +250,15 @@ public:
|
||||
return thread_list;
|
||||
}
|
||||
|
||||
/// Insert a thread into the condition variable wait container
|
||||
void InsertConditionVariableThread(std::shared_ptr<Thread> thread);
|
||||
|
||||
/// Remove a thread from the condition variable wait container
|
||||
void RemoveConditionVariableThread(std::shared_ptr<Thread> thread);
|
||||
|
||||
/// Obtain all condition variable threads waiting for some address
|
||||
std::vector<std::shared_ptr<Thread>> GetConditionVariableThreads(VAddr cond_var_addr);
|
||||
|
||||
/// Registers a thread as being created under this process,
|
||||
/// adding it to this process' thread list.
|
||||
void RegisterThread(const Thread* thread);
|
||||
@@ -301,8 +304,6 @@ public:
|
||||
|
||||
void LoadModule(CodeSet code_set, VAddr base_addr);
|
||||
|
||||
bool IsSignaled() const override;
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Thread-local storage management
|
||||
|
||||
@@ -313,6 +314,12 @@ public:
|
||||
void FreeTLSRegion(VAddr tls_address);
|
||||
|
||||
private:
|
||||
/// Checks if the specified thread should wait until this process is available.
|
||||
bool ShouldWait(const Thread* thread) const override;
|
||||
|
||||
/// Acquires/locks this process for the specified thread if it's available.
|
||||
void Acquire(Thread* thread) override;
|
||||
|
||||
/// Changes the process status. If the status is different
|
||||
/// from the current process status, then this will trigger
|
||||
/// a process signal.
|
||||
@@ -366,12 +373,12 @@ private:
|
||||
HandleTable handle_table;
|
||||
|
||||
/// Per-process address arbiter.
|
||||
KAddressArbiter address_arbiter;
|
||||
AddressArbiter address_arbiter;
|
||||
|
||||
/// The per-process mutex lock instance used for handling various
|
||||
/// forms of services, such as lock arbitration, and condition
|
||||
/// variable related facilities.
|
||||
KConditionVariable condition_var;
|
||||
Mutex mutex;
|
||||
|
||||
/// Address indicating the location of the process' dedicated TLS region.
|
||||
VAddr tls_region_address = 0;
|
||||
@@ -382,6 +389,9 @@ private:
|
||||
/// List of threads that are running with this process as their owner.
|
||||
std::list<const Thread*> thread_list;
|
||||
|
||||
/// List of threads waiting for a condition variable
|
||||
std::unordered_map<VAddr, std::list<std::shared_ptr<Thread>>> cond_var_threads;
|
||||
|
||||
/// Address of the top of the main thread's stack
|
||||
VAddr main_thread_stack_top{};
|
||||
|
||||
@@ -400,8 +410,6 @@ private:
|
||||
/// Schedule count of this process
|
||||
s64 schedule_count{};
|
||||
|
||||
bool is_signaled{};
|
||||
|
||||
/// System context
|
||||
Core::System& system;
|
||||
};
|
||||
|
||||
@@ -14,22 +14,24 @@
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
ReadableEvent::ReadableEvent(KernelCore& kernel) : KSynchronizationObject{kernel} {}
|
||||
ReadableEvent::ReadableEvent(KernelCore& kernel) : SynchronizationObject{kernel} {}
|
||||
ReadableEvent::~ReadableEvent() = default;
|
||||
|
||||
bool ReadableEvent::ShouldWait(const Thread* thread) const {
|
||||
return !is_signaled;
|
||||
}
|
||||
|
||||
void ReadableEvent::Acquire(Thread* thread) {
|
||||
ASSERT_MSG(IsSignaled(), "object unavailable!");
|
||||
}
|
||||
|
||||
void ReadableEvent::Signal() {
|
||||
if (is_signaled) {
|
||||
return;
|
||||
}
|
||||
|
||||
is_signaled = true;
|
||||
NotifyAvailable();
|
||||
}
|
||||
|
||||
bool ReadableEvent::IsSignaled() const {
|
||||
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
||||
|
||||
return is_signaled;
|
||||
SynchronizationObject::Signal();
|
||||
}
|
||||
|
||||
void ReadableEvent::Clear() {
|
||||
|
||||
@@ -4,8 +4,8 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "core/hle/kernel/k_synchronization_object.h"
|
||||
#include "core/hle/kernel/object.h"
|
||||
#include "core/hle/kernel/synchronization_object.h"
|
||||
|
||||
union ResultCode;
|
||||
|
||||
@@ -14,7 +14,7 @@ namespace Kernel {
|
||||
class KernelCore;
|
||||
class WritableEvent;
|
||||
|
||||
class ReadableEvent final : public KSynchronizationObject {
|
||||
class ReadableEvent final : public SynchronizationObject {
|
||||
friend class WritableEvent;
|
||||
|
||||
public:
|
||||
@@ -32,6 +32,9 @@ public:
|
||||
return HANDLE_TYPE;
|
||||
}
|
||||
|
||||
bool ShouldWait(const Thread* thread) const override;
|
||||
void Acquire(Thread* thread) override;
|
||||
|
||||
/// Unconditionally clears the readable event's state.
|
||||
void Clear();
|
||||
|
||||
@@ -43,14 +46,11 @@ public:
|
||||
/// then ERR_INVALID_STATE will be returned.
|
||||
ResultCode Reset();
|
||||
|
||||
void Signal();
|
||||
|
||||
bool IsSignaled() const override;
|
||||
void Signal() override;
|
||||
|
||||
private:
|
||||
explicit ReadableEvent(KernelCore& kernel);
|
||||
|
||||
bool is_signaled{};
|
||||
std::string name; ///< Name of event (optional)
|
||||
};
|
||||
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
ServerPort::ServerPort(KernelCore& kernel) : KSynchronizationObject{kernel} {}
|
||||
ServerPort::ServerPort(KernelCore& kernel) : SynchronizationObject{kernel} {}
|
||||
ServerPort::~ServerPort() = default;
|
||||
|
||||
ResultVal<std::shared_ptr<ServerSession>> ServerPort::Accept() {
|
||||
@@ -28,9 +28,15 @@ ResultVal<std::shared_ptr<ServerSession>> ServerPort::Accept() {
|
||||
|
||||
void ServerPort::AppendPendingSession(std::shared_ptr<ServerSession> pending_session) {
|
||||
pending_sessions.push_back(std::move(pending_session));
|
||||
if (pending_sessions.size() == 1) {
|
||||
NotifyAvailable();
|
||||
}
|
||||
}
|
||||
|
||||
bool ServerPort::ShouldWait(const Thread* thread) const {
|
||||
// If there are no pending sessions, we wait until a new one is added.
|
||||
return pending_sessions.empty();
|
||||
}
|
||||
|
||||
void ServerPort::Acquire(Thread* thread) {
|
||||
ASSERT_MSG(!ShouldWait(thread), "object unavailable!");
|
||||
}
|
||||
|
||||
bool ServerPort::IsSignaled() const {
|
||||
|
||||
@@ -9,8 +9,8 @@
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/kernel/k_synchronization_object.h"
|
||||
#include "core/hle/kernel/object.h"
|
||||
#include "core/hle/kernel/synchronization_object.h"
|
||||
#include "core/hle/result.h"
|
||||
|
||||
namespace Kernel {
|
||||
@@ -20,7 +20,7 @@ class KernelCore;
|
||||
class ServerSession;
|
||||
class SessionRequestHandler;
|
||||
|
||||
class ServerPort final : public KSynchronizationObject {
|
||||
class ServerPort final : public SynchronizationObject {
|
||||
public:
|
||||
explicit ServerPort(KernelCore& kernel);
|
||||
~ServerPort() override;
|
||||
@@ -79,6 +79,9 @@ public:
|
||||
/// waiting to be accepted by this port.
|
||||
void AppendPendingSession(std::shared_ptr<ServerSession> pending_session);
|
||||
|
||||
bool ShouldWait(const Thread* thread) const override;
|
||||
void Acquire(Thread* thread) override;
|
||||
|
||||
bool IsSignaled() const override;
|
||||
|
||||
private:
|
||||
|
||||
@@ -24,24 +24,34 @@
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
ServerSession::ServerSession(KernelCore& kernel) : KSynchronizationObject{kernel} {}
|
||||
|
||||
ServerSession::~ServerSession() {
|
||||
kernel.ReleaseServiceThread(service_thread);
|
||||
}
|
||||
ServerSession::ServerSession(KernelCore& kernel) : SynchronizationObject{kernel} {}
|
||||
ServerSession::~ServerSession() = default;
|
||||
|
||||
ResultVal<std::shared_ptr<ServerSession>> ServerSession::Create(KernelCore& kernel,
|
||||
std::shared_ptr<Session> parent,
|
||||
std::string name) {
|
||||
std::shared_ptr<ServerSession> session{std::make_shared<ServerSession>(kernel)};
|
||||
|
||||
session->request_event =
|
||||
Core::Timing::CreateEvent(name, [session](std::uintptr_t, std::chrono::nanoseconds) {
|
||||
session->CompleteSyncRequest();
|
||||
});
|
||||
session->name = std::move(name);
|
||||
session->parent = std::move(parent);
|
||||
session->service_thread = kernel.CreateServiceThread(session->name);
|
||||
|
||||
return MakeResult(std::move(session));
|
||||
}
|
||||
|
||||
bool ServerSession::ShouldWait(const Thread* thread) const {
|
||||
// Closed sessions should never wait, an error will be returned from svcReplyAndReceive.
|
||||
if (!parent->Client()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Wait if we have no pending requests, or if we're currently handling a request.
|
||||
return pending_requesting_threads.empty() || currently_handling != nullptr;
|
||||
}
|
||||
|
||||
bool ServerSession::IsSignaled() const {
|
||||
// Closed sessions should never wait, an error will be returned from svcReplyAndReceive.
|
||||
if (!parent->Client()) {
|
||||
@@ -52,6 +62,15 @@ bool ServerSession::IsSignaled() const {
|
||||
return !pending_requesting_threads.empty() && currently_handling == nullptr;
|
||||
}
|
||||
|
||||
void ServerSession::Acquire(Thread* thread) {
|
||||
ASSERT_MSG(!ShouldWait(thread), "object unavailable!");
|
||||
// We are now handling a request, pop it from the stack.
|
||||
// TODO(Subv): What happens if the client endpoint is closed before any requests are made?
|
||||
ASSERT(!pending_requesting_threads.empty());
|
||||
currently_handling = pending_requesting_threads.back();
|
||||
pending_requesting_threads.pop_back();
|
||||
}
|
||||
|
||||
void ServerSession::ClientDisconnected() {
|
||||
// We keep a shared pointer to the hle handler to keep it alive throughout
|
||||
// the call to ClientDisconnected, as ClientDisconnected invalidates the
|
||||
@@ -123,16 +142,16 @@ ResultCode ServerSession::QueueSyncRequest(std::shared_ptr<Thread> thread,
|
||||
std::make_shared<HLERequestContext>(kernel, memory, SharedFrom(this), std::move(thread));
|
||||
|
||||
context->PopulateFromIncomingCommandBuffer(kernel.CurrentProcess()->GetHandleTable(), cmd_buf);
|
||||
|
||||
if (auto strong_ptr = service_thread.lock()) {
|
||||
strong_ptr->QueueSyncRequest(*this, std::move(context));
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
request_queue.Push(std::move(context));
|
||||
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
ResultCode ServerSession::CompleteSyncRequest(HLERequestContext& context) {
|
||||
ResultCode ServerSession::CompleteSyncRequest() {
|
||||
ASSERT(!request_queue.Empty());
|
||||
|
||||
auto& context = *request_queue.Front();
|
||||
|
||||
ResultCode result = RESULT_SUCCESS;
|
||||
// If the session has been converted to a domain, handle the domain request
|
||||
if (IsDomain() && context.HasDomainMessageHeader()) {
|
||||
@@ -153,18 +172,23 @@ ResultCode ServerSession::CompleteSyncRequest(HLERequestContext& context) {
|
||||
{
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
if (!context.IsThreadWaiting()) {
|
||||
context.GetThread().Wakeup();
|
||||
context.GetThread().ResumeFromWait();
|
||||
context.GetThread().SetSynchronizationResults(nullptr, result);
|
||||
}
|
||||
}
|
||||
|
||||
request_queue.Pop();
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
ResultCode ServerSession::HandleSyncRequest(std::shared_ptr<Thread> thread,
|
||||
Core::Memory::Memory& memory,
|
||||
Core::Timing::CoreTiming& core_timing) {
|
||||
return QueueSyncRequest(std::move(thread), memory);
|
||||
const ResultCode result = QueueSyncRequest(std::move(thread), memory);
|
||||
const auto delay = std::chrono::nanoseconds{kernel.IsMulticore() ? 0 : 20000};
|
||||
core_timing.ScheduleEvent(delay, request_event, {});
|
||||
return result;
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -10,8 +10,7 @@
|
||||
#include <vector>
|
||||
|
||||
#include "common/threadsafe_queue.h"
|
||||
#include "core/hle/kernel/k_synchronization_object.h"
|
||||
#include "core/hle/kernel/service_thread.h"
|
||||
#include "core/hle/kernel/synchronization_object.h"
|
||||
#include "core/hle/result.h"
|
||||
|
||||
namespace Core::Memory {
|
||||
@@ -43,9 +42,7 @@ class Thread;
|
||||
* After the server replies to the request, the response is marshalled back to the caller's
|
||||
* TLS buffer and control is transferred back to it.
|
||||
*/
|
||||
class ServerSession final : public KSynchronizationObject {
|
||||
friend class ServiceThread;
|
||||
|
||||
class ServerSession final : public SynchronizationObject {
|
||||
public:
|
||||
explicit ServerSession(KernelCore& kernel);
|
||||
~ServerSession() override;
|
||||
@@ -77,6 +74,8 @@ public:
|
||||
return parent.get();
|
||||
}
|
||||
|
||||
bool IsSignaled() const override;
|
||||
|
||||
/**
|
||||
* Sets the HLE handler for the session. This handler will be called to service IPC requests
|
||||
* instead of the regular IPC machinery. (The regular IPC machinery is currently not
|
||||
@@ -98,6 +97,10 @@ public:
|
||||
ResultCode HandleSyncRequest(std::shared_ptr<Thread> thread, Core::Memory::Memory& memory,
|
||||
Core::Timing::CoreTiming& core_timing);
|
||||
|
||||
bool ShouldWait(const Thread* thread) const override;
|
||||
|
||||
void Acquire(Thread* thread) override;
|
||||
|
||||
/// Called when a client disconnection occurs.
|
||||
void ClientDisconnected();
|
||||
|
||||
@@ -124,14 +127,12 @@ public:
|
||||
convert_to_domain = true;
|
||||
}
|
||||
|
||||
bool IsSignaled() const override;
|
||||
|
||||
private:
|
||||
/// Queues a sync request from the emulated application.
|
||||
ResultCode QueueSyncRequest(std::shared_ptr<Thread> thread, Core::Memory::Memory& memory);
|
||||
|
||||
/// Completes a sync request from the emulated application.
|
||||
ResultCode CompleteSyncRequest(HLERequestContext& context);
|
||||
ResultCode CompleteSyncRequest();
|
||||
|
||||
/// Handles a SyncRequest to a domain, forwarding the request to the proper object or closing an
|
||||
/// object handle.
|
||||
@@ -162,8 +163,11 @@ private:
|
||||
/// The name of this session (optional)
|
||||
std::string name;
|
||||
|
||||
/// Thread to dispatch service requests
|
||||
std::weak_ptr<ServiceThread> service_thread;
|
||||
/// Core timing event used to schedule the service request at some point in the future
|
||||
std::shared_ptr<Core::Timing::EventType> request_event;
|
||||
|
||||
/// Queue of scheduled service requests
|
||||
Common::MPSCQueue<std::shared_ptr<Kernel::HLERequestContext>> request_queue;
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -1,110 +0,0 @@
|
||||
// Copyright 2020 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <condition_variable>
|
||||
#include <functional>
|
||||
#include <mutex>
|
||||
#include <thread>
|
||||
#include <vector>
|
||||
#include <queue>
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/scope_exit.h"
|
||||
#include "common/thread.h"
|
||||
#include "core/core.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/server_session.h"
|
||||
#include "core/hle/kernel/service_thread.h"
|
||||
#include "core/hle/lock.h"
|
||||
#include "video_core/renderer_base.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class ServiceThread::Impl final {
|
||||
public:
|
||||
explicit Impl(KernelCore& kernel, std::size_t num_threads, const std::string& name);
|
||||
~Impl();
|
||||
|
||||
void QueueSyncRequest(ServerSession& session, std::shared_ptr<HLERequestContext>&& context);
|
||||
|
||||
private:
|
||||
std::vector<std::thread> threads;
|
||||
std::queue<std::function<void()>> requests;
|
||||
std::mutex queue_mutex;
|
||||
std::condition_variable condition;
|
||||
const std::string service_name;
|
||||
bool stop{};
|
||||
};
|
||||
|
||||
ServiceThread::Impl::Impl(KernelCore& kernel, std::size_t num_threads, const std::string& name)
|
||||
: service_name{name} {
|
||||
for (std::size_t i = 0; i < num_threads; ++i)
|
||||
threads.emplace_back([this, &kernel] {
|
||||
Common::SetCurrentThreadName(std::string{"yuzu:HleService:" + service_name}.c_str());
|
||||
|
||||
// Wait for first request before trying to acquire a render context
|
||||
{
|
||||
std::unique_lock lock{queue_mutex};
|
||||
condition.wait(lock, [this] { return stop || !requests.empty(); });
|
||||
}
|
||||
|
||||
kernel.RegisterHostThread();
|
||||
|
||||
while (true) {
|
||||
std::function<void()> task;
|
||||
|
||||
{
|
||||
std::unique_lock lock{queue_mutex};
|
||||
condition.wait(lock, [this] { return stop || !requests.empty(); });
|
||||
if (stop || requests.empty()) {
|
||||
return;
|
||||
}
|
||||
task = std::move(requests.front());
|
||||
requests.pop();
|
||||
}
|
||||
|
||||
task();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
void ServiceThread::Impl::QueueSyncRequest(ServerSession& session,
|
||||
std::shared_ptr<HLERequestContext>&& context) {
|
||||
{
|
||||
std::unique_lock lock{queue_mutex};
|
||||
|
||||
// ServerSession owns the service thread, so we cannot caption a strong pointer here in the
|
||||
// event that the ServerSession is terminated.
|
||||
std::weak_ptr<ServerSession> weak_ptr{SharedFrom(&session)};
|
||||
requests.emplace([weak_ptr, context{std::move(context)}]() {
|
||||
if (auto strong_ptr = weak_ptr.lock()) {
|
||||
strong_ptr->CompleteSyncRequest(*context);
|
||||
}
|
||||
});
|
||||
}
|
||||
condition.notify_one();
|
||||
}
|
||||
|
||||
ServiceThread::Impl::~Impl() {
|
||||
{
|
||||
std::unique_lock lock{queue_mutex};
|
||||
stop = true;
|
||||
}
|
||||
condition.notify_all();
|
||||
for (std::thread& thread : threads) {
|
||||
thread.join();
|
||||
}
|
||||
}
|
||||
|
||||
ServiceThread::ServiceThread(KernelCore& kernel, std::size_t num_threads, const std::string& name)
|
||||
: impl{std::make_unique<Impl>(kernel, num_threads, name)} {}
|
||||
|
||||
ServiceThread::~ServiceThread() = default;
|
||||
|
||||
void ServiceThread::QueueSyncRequest(ServerSession& session,
|
||||
std::shared_ptr<HLERequestContext>&& context) {
|
||||
impl->QueueSyncRequest(session, std::move(context));
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
@@ -1,28 +0,0 @@
|
||||
// Copyright 2020 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <string>
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class HLERequestContext;
|
||||
class KernelCore;
|
||||
class ServerSession;
|
||||
|
||||
class ServiceThread final {
|
||||
public:
|
||||
explicit ServiceThread(KernelCore& kernel, std::size_t num_threads, const std::string& name);
|
||||
~ServiceThread();
|
||||
|
||||
void QueueSyncRequest(ServerSession& session, std::shared_ptr<HLERequestContext>&& context);
|
||||
|
||||
private:
|
||||
class Impl;
|
||||
std::unique_ptr<Impl> impl;
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
@@ -9,7 +9,7 @@
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
Session::Session(KernelCore& kernel) : KSynchronizationObject{kernel} {}
|
||||
Session::Session(KernelCore& kernel) : SynchronizationObject{kernel} {}
|
||||
Session::~Session() = default;
|
||||
|
||||
Session::SessionPair Session::Create(KernelCore& kernel, std::string name) {
|
||||
@@ -24,9 +24,18 @@ Session::SessionPair Session::Create(KernelCore& kernel, std::string name) {
|
||||
return std::make_pair(std::move(client_session), std::move(server_session));
|
||||
}
|
||||
|
||||
bool Session::ShouldWait(const Thread* thread) const {
|
||||
UNIMPLEMENTED();
|
||||
return {};
|
||||
}
|
||||
|
||||
bool Session::IsSignaled() const {
|
||||
UNIMPLEMENTED();
|
||||
return true;
|
||||
}
|
||||
|
||||
void Session::Acquire(Thread* thread) {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
#include <string>
|
||||
#include <utility>
|
||||
|
||||
#include "core/hle/kernel/k_synchronization_object.h"
|
||||
#include "core/hle/kernel/synchronization_object.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
@@ -19,7 +19,7 @@ class ServerSession;
|
||||
* Parent structure to link the client and server endpoints of a session with their associated
|
||||
* client port.
|
||||
*/
|
||||
class Session final : public KSynchronizationObject {
|
||||
class Session final : public SynchronizationObject {
|
||||
public:
|
||||
explicit Session(KernelCore& kernel);
|
||||
~Session() override;
|
||||
@@ -37,8 +37,12 @@ public:
|
||||
return HANDLE_TYPE;
|
||||
}
|
||||
|
||||
bool ShouldWait(const Thread* thread) const override;
|
||||
|
||||
bool IsSignaled() const override;
|
||||
|
||||
void Acquire(Thread* thread) override;
|
||||
|
||||
std::shared_ptr<ClientSession> Client() {
|
||||
if (auto result{client.lock()}) {
|
||||
return result;
|
||||
|
||||
@@ -10,7 +10,6 @@
|
||||
|
||||
#include "common/alignment.h"
|
||||
#include "common/assert.h"
|
||||
#include "common/common_funcs.h"
|
||||
#include "common/fiber.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "common/microprofile.h"
|
||||
@@ -20,28 +19,26 @@
|
||||
#include "core/core_timing.h"
|
||||
#include "core/core_timing_util.h"
|
||||
#include "core/cpu_manager.h"
|
||||
#include "core/hle/kernel/address_arbiter.h"
|
||||
#include "core/hle/kernel/client_port.h"
|
||||
#include "core/hle/kernel/client_session.h"
|
||||
#include "core/hle/kernel/errors.h"
|
||||
#include "core/hle/kernel/handle_table.h"
|
||||
#include "core/hle/kernel/k_address_arbiter.h"
|
||||
#include "core/hle/kernel/k_condition_variable.h"
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
|
||||
#include "core/hle/kernel/k_synchronization_object.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/memory/memory_block.h"
|
||||
#include "core/hle/kernel/memory/memory_layout.h"
|
||||
#include "core/hle/kernel/memory/page_table.h"
|
||||
#include "core/hle/kernel/mutex.h"
|
||||
#include "core/hle/kernel/physical_core.h"
|
||||
#include "core/hle/kernel/process.h"
|
||||
#include "core/hle/kernel/readable_event.h"
|
||||
#include "core/hle/kernel/resource_limit.h"
|
||||
#include "core/hle/kernel/shared_memory.h"
|
||||
#include "core/hle/kernel/svc.h"
|
||||
#include "core/hle/kernel/svc_results.h"
|
||||
#include "core/hle/kernel/svc_types.h"
|
||||
#include "core/hle/kernel/svc_wrap.h"
|
||||
#include "core/hle/kernel/synchronization.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
#include "core/hle/kernel/time_manager.h"
|
||||
#include "core/hle/kernel/transfer_memory.h"
|
||||
@@ -346,11 +343,27 @@ static ResultCode SendSyncRequest(Core::System& system, Handle handle) {
|
||||
auto thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||
{
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
thread->SetState(ThreadState::Waiting);
|
||||
thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::IPC);
|
||||
thread->InvalidateHLECallback();
|
||||
thread->SetStatus(ThreadStatus::WaitIPC);
|
||||
session->SendSyncRequest(SharedFrom(thread), system.Memory(), system.CoreTiming());
|
||||
}
|
||||
|
||||
if (thread->HasHLECallback()) {
|
||||
Handle event_handle = thread->GetHLETimeEvent();
|
||||
if (event_handle != InvalidHandle) {
|
||||
auto& time_manager = kernel.TimeManager();
|
||||
time_manager.UnscheduleTimeEvent(event_handle);
|
||||
}
|
||||
|
||||
{
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
auto* sync_object = thread->GetHLESyncObject();
|
||||
sync_object->RemoveWaitingThread(SharedFrom(thread));
|
||||
}
|
||||
|
||||
thread->InvokeHLECallback(SharedFrom(thread));
|
||||
}
|
||||
|
||||
return thread->GetSignalingResult();
|
||||
}
|
||||
|
||||
@@ -423,7 +436,7 @@ static ResultCode GetProcessId32(Core::System& system, u32* process_id_low, u32*
|
||||
}
|
||||
|
||||
/// Wait for the given handles to synchronize, timeout after the specified nanoseconds
|
||||
static ResultCode WaitSynchronization(Core::System& system, s32* index, VAddr handles_address,
|
||||
static ResultCode WaitSynchronization(Core::System& system, Handle* index, VAddr handles_address,
|
||||
u64 handle_count, s64 nano_seconds) {
|
||||
LOG_TRACE(Kernel_SVC, "called handles_address=0x{:X}, handle_count={}, nano_seconds={}",
|
||||
handles_address, handle_count, nano_seconds);
|
||||
@@ -445,26 +458,28 @@ static ResultCode WaitSynchronization(Core::System& system, s32* index, VAddr ha
|
||||
}
|
||||
|
||||
auto& kernel = system.Kernel();
|
||||
std::vector<KSynchronizationObject*> objects(handle_count);
|
||||
Thread::ThreadSynchronizationObjects objects(handle_count);
|
||||
const auto& handle_table = kernel.CurrentProcess()->GetHandleTable();
|
||||
|
||||
for (u64 i = 0; i < handle_count; ++i) {
|
||||
const Handle handle = memory.Read32(handles_address + i * sizeof(Handle));
|
||||
const auto object = handle_table.Get<KSynchronizationObject>(handle);
|
||||
const auto object = handle_table.Get<SynchronizationObject>(handle);
|
||||
|
||||
if (object == nullptr) {
|
||||
LOG_ERROR(Kernel_SVC, "Object is a nullptr");
|
||||
return ERR_INVALID_HANDLE;
|
||||
}
|
||||
|
||||
objects[i] = object.get();
|
||||
objects[i] = object;
|
||||
}
|
||||
return KSynchronizationObject::Wait(kernel, index, objects.data(),
|
||||
static_cast<s32>(objects.size()), nano_seconds);
|
||||
auto& synchronization = kernel.Synchronization();
|
||||
const auto [result, handle_result] = synchronization.WaitFor(objects, nano_seconds);
|
||||
*index = handle_result;
|
||||
return result;
|
||||
}
|
||||
|
||||
static ResultCode WaitSynchronization32(Core::System& system, u32 timeout_low, u32 handles_address,
|
||||
s32 handle_count, u32 timeout_high, s32* index) {
|
||||
s32 handle_count, u32 timeout_high, Handle* index) {
|
||||
const s64 nano_seconds{(static_cast<s64>(timeout_high) << 32) | static_cast<s64>(timeout_low)};
|
||||
return WaitSynchronization(system, index, handles_address, handle_count, nano_seconds);
|
||||
}
|
||||
@@ -489,37 +504,56 @@ static ResultCode CancelSynchronization32(Core::System& system, Handle thread_ha
|
||||
return CancelSynchronization(system, thread_handle);
|
||||
}
|
||||
|
||||
/// Attempts to locks a mutex
|
||||
static ResultCode ArbitrateLock(Core::System& system, Handle thread_handle, VAddr address,
|
||||
u32 tag) {
|
||||
LOG_TRACE(Kernel_SVC, "called thread_handle=0x{:08X}, address=0x{:X}, tag=0x{:08X}",
|
||||
thread_handle, address, tag);
|
||||
/// Attempts to locks a mutex, creating it if it does not already exist
|
||||
static ResultCode ArbitrateLock(Core::System& system, Handle holding_thread_handle,
|
||||
VAddr mutex_addr, Handle requesting_thread_handle) {
|
||||
LOG_TRACE(Kernel_SVC,
|
||||
"called holding_thread_handle=0x{:08X}, mutex_addr=0x{:X}, "
|
||||
"requesting_current_thread_handle=0x{:08X}",
|
||||
holding_thread_handle, mutex_addr, requesting_thread_handle);
|
||||
|
||||
// Validate the input address.
|
||||
R_UNLESS(!Memory::IsKernelAddress(address), Svc::ResultInvalidCurrentMemory);
|
||||
R_UNLESS(Common::IsAligned(address, sizeof(u32)), Svc::ResultInvalidAddress);
|
||||
if (Core::Memory::IsKernelVirtualAddress(mutex_addr)) {
|
||||
LOG_ERROR(Kernel_SVC, "Mutex Address is a kernel virtual address, mutex_addr={:016X}",
|
||||
mutex_addr);
|
||||
return ERR_INVALID_ADDRESS_STATE;
|
||||
}
|
||||
|
||||
return system.Kernel().CurrentProcess()->WaitForAddress(thread_handle, address, tag);
|
||||
if (!Common::IsWordAligned(mutex_addr)) {
|
||||
LOG_ERROR(Kernel_SVC, "Mutex Address is not word aligned, mutex_addr={:016X}", mutex_addr);
|
||||
return ERR_INVALID_ADDRESS;
|
||||
}
|
||||
|
||||
auto* const current_process = system.Kernel().CurrentProcess();
|
||||
return current_process->GetMutex().TryAcquire(mutex_addr, holding_thread_handle,
|
||||
requesting_thread_handle);
|
||||
}
|
||||
|
||||
static ResultCode ArbitrateLock32(Core::System& system, Handle thread_handle, u32 address,
|
||||
u32 tag) {
|
||||
return ArbitrateLock(system, thread_handle, address, tag);
|
||||
static ResultCode ArbitrateLock32(Core::System& system, Handle holding_thread_handle,
|
||||
u32 mutex_addr, Handle requesting_thread_handle) {
|
||||
return ArbitrateLock(system, holding_thread_handle, mutex_addr, requesting_thread_handle);
|
||||
}
|
||||
|
||||
/// Unlock a mutex
|
||||
static ResultCode ArbitrateUnlock(Core::System& system, VAddr address) {
|
||||
LOG_TRACE(Kernel_SVC, "called address=0x{:X}", address);
|
||||
static ResultCode ArbitrateUnlock(Core::System& system, VAddr mutex_addr) {
|
||||
LOG_TRACE(Kernel_SVC, "called mutex_addr=0x{:X}", mutex_addr);
|
||||
|
||||
// Validate the input address.
|
||||
R_UNLESS(!Memory::IsKernelAddress(address), Svc::ResultInvalidCurrentMemory);
|
||||
R_UNLESS(Common::IsAligned(address, sizeof(u32)), Svc::ResultInvalidAddress);
|
||||
if (Core::Memory::IsKernelVirtualAddress(mutex_addr)) {
|
||||
LOG_ERROR(Kernel_SVC, "Mutex Address is a kernel virtual address, mutex_addr={:016X}",
|
||||
mutex_addr);
|
||||
return ERR_INVALID_ADDRESS_STATE;
|
||||
}
|
||||
|
||||
return system.Kernel().CurrentProcess()->SignalToAddress(address);
|
||||
if (!Common::IsWordAligned(mutex_addr)) {
|
||||
LOG_ERROR(Kernel_SVC, "Mutex Address is not word aligned, mutex_addr={:016X}", mutex_addr);
|
||||
return ERR_INVALID_ADDRESS;
|
||||
}
|
||||
|
||||
auto* const current_process = system.Kernel().CurrentProcess();
|
||||
return current_process->GetMutex().Release(mutex_addr);
|
||||
}
|
||||
|
||||
static ResultCode ArbitrateUnlock32(Core::System& system, u32 address) {
|
||||
return ArbitrateUnlock(system, address);
|
||||
static ResultCode ArbitrateUnlock32(Core::System& system, u32 mutex_addr) {
|
||||
return ArbitrateUnlock(system, mutex_addr);
|
||||
}
|
||||
|
||||
enum class BreakType : u32 {
|
||||
@@ -1146,7 +1180,7 @@ static ResultCode SetThreadPriority(Core::System& system, Handle handle, u32 pri
|
||||
return ERR_INVALID_HANDLE;
|
||||
}
|
||||
|
||||
thread->SetBasePriority(priority);
|
||||
thread->SetPriority(priority);
|
||||
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
@@ -1525,7 +1559,7 @@ static ResultCode StartThread(Core::System& system, Handle thread_handle) {
|
||||
return ERR_INVALID_HANDLE;
|
||||
}
|
||||
|
||||
ASSERT(thread->GetState() == ThreadState::Initialized);
|
||||
ASSERT(thread->GetStatus() == ThreadStatus::Dormant);
|
||||
|
||||
return thread->Start();
|
||||
}
|
||||
@@ -1549,7 +1583,7 @@ static void ExitThread32(Core::System& system) {
|
||||
|
||||
/// Sleep the current thread
|
||||
static void SleepThread(Core::System& system, s64 nanoseconds) {
|
||||
LOG_TRACE(Kernel_SVC, "called nanoseconds={}", nanoseconds);
|
||||
LOG_DEBUG(Kernel_SVC, "called nanoseconds={}", nanoseconds);
|
||||
|
||||
enum class SleepType : s64 {
|
||||
YieldWithoutCoreMigration = 0,
|
||||
@@ -1586,135 +1620,224 @@ static void SleepThread32(Core::System& system, u32 nanoseconds_low, u32 nanosec
|
||||
}
|
||||
|
||||
/// Wait process wide key atomic
|
||||
static ResultCode WaitProcessWideKeyAtomic(Core::System& system, VAddr address, VAddr cv_key,
|
||||
u32 tag, s64 timeout_ns) {
|
||||
LOG_TRACE(Kernel_SVC, "called address={:X}, cv_key={:X}, tag=0x{:08X}, timeout_ns={}", address,
|
||||
cv_key, tag, timeout_ns);
|
||||
static ResultCode WaitProcessWideKeyAtomic(Core::System& system, VAddr mutex_addr,
|
||||
VAddr condition_variable_addr, Handle thread_handle,
|
||||
s64 nano_seconds) {
|
||||
LOG_TRACE(
|
||||
Kernel_SVC,
|
||||
"called mutex_addr={:X}, condition_variable_addr={:X}, thread_handle=0x{:08X}, timeout={}",
|
||||
mutex_addr, condition_variable_addr, thread_handle, nano_seconds);
|
||||
|
||||
// Validate input.
|
||||
R_UNLESS(!Memory::IsKernelAddress(address), Svc::ResultInvalidCurrentMemory);
|
||||
R_UNLESS(Common::IsAligned(address, sizeof(int32_t)), Svc::ResultInvalidAddress);
|
||||
|
||||
// Convert timeout from nanoseconds to ticks.
|
||||
s64 timeout{};
|
||||
if (timeout_ns > 0) {
|
||||
const s64 offset_tick(timeout_ns);
|
||||
if (offset_tick > 0) {
|
||||
timeout = offset_tick + 2;
|
||||
if (timeout <= 0) {
|
||||
timeout = std::numeric_limits<s64>::max();
|
||||
}
|
||||
} else {
|
||||
timeout = std::numeric_limits<s64>::max();
|
||||
}
|
||||
} else {
|
||||
timeout = timeout_ns;
|
||||
if (Core::Memory::IsKernelVirtualAddress(mutex_addr)) {
|
||||
LOG_ERROR(
|
||||
Kernel_SVC,
|
||||
"Given mutex address must not be within the kernel address space. address=0x{:016X}",
|
||||
mutex_addr);
|
||||
return ERR_INVALID_ADDRESS_STATE;
|
||||
}
|
||||
|
||||
// Wait on the condition variable.
|
||||
return system.Kernel().CurrentProcess()->WaitConditionVariable(
|
||||
address, Common::AlignDown(cv_key, sizeof(u32)), tag, timeout);
|
||||
if (!Common::IsWordAligned(mutex_addr)) {
|
||||
LOG_ERROR(Kernel_SVC, "Given mutex address must be word-aligned. address=0x{:016X}",
|
||||
mutex_addr);
|
||||
return ERR_INVALID_ADDRESS;
|
||||
}
|
||||
|
||||
ASSERT(condition_variable_addr == Common::AlignDown(condition_variable_addr, 4));
|
||||
auto& kernel = system.Kernel();
|
||||
Handle event_handle;
|
||||
Thread* current_thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||
auto* const current_process = kernel.CurrentProcess();
|
||||
{
|
||||
KScopedSchedulerLockAndSleep lock(kernel, event_handle, current_thread, nano_seconds);
|
||||
const auto& handle_table = current_process->GetHandleTable();
|
||||
std::shared_ptr<Thread> thread = handle_table.Get<Thread>(thread_handle);
|
||||
ASSERT(thread);
|
||||
|
||||
current_thread->SetSynchronizationResults(nullptr, RESULT_TIMEOUT);
|
||||
|
||||
if (thread->IsPendingTermination()) {
|
||||
lock.CancelSleep();
|
||||
return ERR_THREAD_TERMINATING;
|
||||
}
|
||||
|
||||
const auto release_result = current_process->GetMutex().Release(mutex_addr);
|
||||
if (release_result.IsError()) {
|
||||
lock.CancelSleep();
|
||||
return release_result;
|
||||
}
|
||||
|
||||
if (nano_seconds == 0) {
|
||||
lock.CancelSleep();
|
||||
return RESULT_TIMEOUT;
|
||||
}
|
||||
|
||||
current_thread->SetCondVarWaitAddress(condition_variable_addr);
|
||||
current_thread->SetMutexWaitAddress(mutex_addr);
|
||||
current_thread->SetWaitHandle(thread_handle);
|
||||
current_thread->SetStatus(ThreadStatus::WaitCondVar);
|
||||
current_process->InsertConditionVariableThread(SharedFrom(current_thread));
|
||||
}
|
||||
|
||||
if (event_handle != InvalidHandle) {
|
||||
auto& time_manager = kernel.TimeManager();
|
||||
time_manager.UnscheduleTimeEvent(event_handle);
|
||||
}
|
||||
|
||||
{
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
|
||||
auto* owner = current_thread->GetLockOwner();
|
||||
if (owner != nullptr) {
|
||||
owner->RemoveMutexWaiter(SharedFrom(current_thread));
|
||||
}
|
||||
|
||||
current_process->RemoveConditionVariableThread(SharedFrom(current_thread));
|
||||
}
|
||||
// Note: Deliberately don't attempt to inherit the lock owner's priority.
|
||||
|
||||
return current_thread->GetSignalingResult();
|
||||
}
|
||||
|
||||
static ResultCode WaitProcessWideKeyAtomic32(Core::System& system, u32 address, u32 cv_key, u32 tag,
|
||||
u32 timeout_ns_low, u32 timeout_ns_high) {
|
||||
const auto timeout_ns = static_cast<s64>(timeout_ns_low | (u64{timeout_ns_high} << 32));
|
||||
return WaitProcessWideKeyAtomic(system, address, cv_key, tag, timeout_ns);
|
||||
static ResultCode WaitProcessWideKeyAtomic32(Core::System& system, u32 mutex_addr,
|
||||
u32 condition_variable_addr, Handle thread_handle,
|
||||
u32 nanoseconds_low, u32 nanoseconds_high) {
|
||||
const auto nanoseconds = static_cast<s64>(nanoseconds_low | (u64{nanoseconds_high} << 32));
|
||||
return WaitProcessWideKeyAtomic(system, mutex_addr, condition_variable_addr, thread_handle,
|
||||
nanoseconds);
|
||||
}
|
||||
|
||||
/// Signal process wide key
|
||||
static void SignalProcessWideKey(Core::System& system, VAddr cv_key, s32 count) {
|
||||
LOG_TRACE(Kernel_SVC, "called, cv_key=0x{:X}, count=0x{:08X}", cv_key, count);
|
||||
static void SignalProcessWideKey(Core::System& system, VAddr condition_variable_addr, s32 target) {
|
||||
LOG_TRACE(Kernel_SVC, "called, condition_variable_addr=0x{:X}, target=0x{:08X}",
|
||||
condition_variable_addr, target);
|
||||
|
||||
// Signal the condition variable.
|
||||
return system.Kernel().CurrentProcess()->SignalConditionVariable(
|
||||
Common::AlignDown(cv_key, sizeof(u32)), count);
|
||||
}
|
||||
ASSERT(condition_variable_addr == Common::AlignDown(condition_variable_addr, 4));
|
||||
|
||||
static void SignalProcessWideKey32(Core::System& system, u32 cv_key, s32 count) {
|
||||
SignalProcessWideKey(system, cv_key, count);
|
||||
}
|
||||
// Retrieve a list of all threads that are waiting for this condition variable.
|
||||
auto& kernel = system.Kernel();
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
auto* const current_process = kernel.CurrentProcess();
|
||||
std::vector<std::shared_ptr<Thread>> waiting_threads =
|
||||
current_process->GetConditionVariableThreads(condition_variable_addr);
|
||||
|
||||
namespace {
|
||||
// Only process up to 'target' threads, unless 'target' is less equal 0, in which case process
|
||||
// them all.
|
||||
std::size_t last = waiting_threads.size();
|
||||
if (target > 0) {
|
||||
last = std::min(waiting_threads.size(), static_cast<std::size_t>(target));
|
||||
}
|
||||
for (std::size_t index = 0; index < last; ++index) {
|
||||
auto& thread = waiting_threads[index];
|
||||
|
||||
constexpr bool IsValidSignalType(Svc::SignalType type) {
|
||||
switch (type) {
|
||||
case Svc::SignalType::Signal:
|
||||
case Svc::SignalType::SignalAndIncrementIfEqual:
|
||||
case Svc::SignalType::SignalAndModifyByWaitingCountIfEqual:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
ASSERT(thread->GetCondVarWaitAddress() == condition_variable_addr);
|
||||
|
||||
// liberate Cond Var Thread.
|
||||
current_process->RemoveConditionVariableThread(thread);
|
||||
|
||||
const std::size_t current_core = system.CurrentCoreIndex();
|
||||
auto& monitor = system.Monitor();
|
||||
|
||||
// Atomically read the value of the mutex.
|
||||
u32 mutex_val = 0;
|
||||
u32 update_val = 0;
|
||||
const VAddr mutex_address = thread->GetMutexWaitAddress();
|
||||
do {
|
||||
// If the mutex is not yet acquired, acquire it.
|
||||
mutex_val = monitor.ExclusiveRead32(current_core, mutex_address);
|
||||
|
||||
if (mutex_val != 0) {
|
||||
update_val = mutex_val | Mutex::MutexHasWaitersFlag;
|
||||
} else {
|
||||
update_val = thread->GetWaitHandle();
|
||||
}
|
||||
} while (!monitor.ExclusiveWrite32(current_core, mutex_address, update_val));
|
||||
monitor.ClearExclusive();
|
||||
if (mutex_val == 0) {
|
||||
// We were able to acquire the mutex, resume this thread.
|
||||
auto* const lock_owner = thread->GetLockOwner();
|
||||
if (lock_owner != nullptr) {
|
||||
lock_owner->RemoveMutexWaiter(thread);
|
||||
}
|
||||
|
||||
thread->SetLockOwner(nullptr);
|
||||
thread->SetSynchronizationResults(nullptr, RESULT_SUCCESS);
|
||||
thread->ResumeFromWait();
|
||||
} else {
|
||||
// The mutex is already owned by some other thread, make this thread wait on it.
|
||||
const Handle owner_handle = static_cast<Handle>(mutex_val & Mutex::MutexOwnerMask);
|
||||
const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
|
||||
auto owner = handle_table.Get<Thread>(owner_handle);
|
||||
ASSERT(owner);
|
||||
if (thread->GetStatus() == ThreadStatus::WaitCondVar) {
|
||||
thread->SetStatus(ThreadStatus::WaitMutex);
|
||||
}
|
||||
|
||||
owner->AddMutexWaiter(thread);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
constexpr bool IsValidArbitrationType(Svc::ArbitrationType type) {
|
||||
switch (type) {
|
||||
case Svc::ArbitrationType::WaitIfLessThan:
|
||||
case Svc::ArbitrationType::DecrementAndWaitIfLessThan:
|
||||
case Svc::ArbitrationType::WaitIfEqual:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
static void SignalProcessWideKey32(Core::System& system, u32 condition_variable_addr, s32 target) {
|
||||
SignalProcessWideKey(system, condition_variable_addr, target);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
// Wait for an address (via Address Arbiter)
|
||||
static ResultCode WaitForAddress(Core::System& system, VAddr address, Svc::ArbitrationType arb_type,
|
||||
s32 value, s64 timeout_ns) {
|
||||
LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, arb_type=0x{:X}, value=0x{:X}, timeout_ns={}",
|
||||
address, arb_type, value, timeout_ns);
|
||||
static ResultCode WaitForAddress(Core::System& system, VAddr address, u32 type, s32 value,
|
||||
s64 timeout) {
|
||||
LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, type=0x{:X}, value=0x{:X}, timeout={}", address,
|
||||
type, value, timeout);
|
||||
|
||||
// Validate input.
|
||||
R_UNLESS(!Memory::IsKernelAddress(address), Svc::ResultInvalidCurrentMemory);
|
||||
R_UNLESS(Common::IsAligned(address, sizeof(int32_t)), Svc::ResultInvalidAddress);
|
||||
R_UNLESS(IsValidArbitrationType(arb_type), Svc::ResultInvalidEnumValue);
|
||||
|
||||
// Convert timeout from nanoseconds to ticks.
|
||||
s64 timeout{};
|
||||
if (timeout_ns > 0) {
|
||||
const s64 offset_tick(timeout_ns);
|
||||
if (offset_tick > 0) {
|
||||
timeout = offset_tick + 2;
|
||||
if (timeout <= 0) {
|
||||
timeout = std::numeric_limits<s64>::max();
|
||||
}
|
||||
} else {
|
||||
timeout = std::numeric_limits<s64>::max();
|
||||
}
|
||||
} else {
|
||||
timeout = timeout_ns;
|
||||
// If the passed address is a kernel virtual address, return invalid memory state.
|
||||
if (Core::Memory::IsKernelVirtualAddress(address)) {
|
||||
LOG_ERROR(Kernel_SVC, "Address is a kernel virtual address, address={:016X}", address);
|
||||
return ERR_INVALID_ADDRESS_STATE;
|
||||
}
|
||||
|
||||
return system.Kernel().CurrentProcess()->WaitAddressArbiter(address, arb_type, value, timeout);
|
||||
// If the address is not properly aligned to 4 bytes, return invalid address.
|
||||
if (!Common::IsWordAligned(address)) {
|
||||
LOG_ERROR(Kernel_SVC, "Address is not word aligned, address={:016X}", address);
|
||||
return ERR_INVALID_ADDRESS;
|
||||
}
|
||||
|
||||
const auto arbitration_type = static_cast<AddressArbiter::ArbitrationType>(type);
|
||||
auto& address_arbiter = system.Kernel().CurrentProcess()->GetAddressArbiter();
|
||||
const ResultCode result =
|
||||
address_arbiter.WaitForAddress(address, arbitration_type, value, timeout);
|
||||
return result;
|
||||
}
|
||||
|
||||
static ResultCode WaitForAddress32(Core::System& system, u32 address, Svc::ArbitrationType arb_type,
|
||||
s32 value, u32 timeout_ns_low, u32 timeout_ns_high) {
|
||||
const auto timeout = static_cast<s64>(timeout_ns_low | (u64{timeout_ns_high} << 32));
|
||||
return WaitForAddress(system, address, arb_type, value, timeout);
|
||||
static ResultCode WaitForAddress32(Core::System& system, u32 address, u32 type, s32 value,
|
||||
u32 timeout_low, u32 timeout_high) {
|
||||
const auto timeout = static_cast<s64>(timeout_low | (u64{timeout_high} << 32));
|
||||
return WaitForAddress(system, address, type, value, timeout);
|
||||
}
|
||||
|
||||
// Signals to an address (via Address Arbiter)
|
||||
static ResultCode SignalToAddress(Core::System& system, VAddr address, Svc::SignalType signal_type,
|
||||
s32 value, s32 count) {
|
||||
LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, signal_type=0x{:X}, value=0x{:X}, count=0x{:X}",
|
||||
address, signal_type, value, count);
|
||||
static ResultCode SignalToAddress(Core::System& system, VAddr address, u32 type, s32 value,
|
||||
s32 num_to_wake) {
|
||||
LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, type=0x{:X}, value=0x{:X}, num_to_wake=0x{:X}",
|
||||
address, type, value, num_to_wake);
|
||||
|
||||
// Validate input.
|
||||
R_UNLESS(!Memory::IsKernelAddress(address), Svc::ResultInvalidCurrentMemory);
|
||||
R_UNLESS(Common::IsAligned(address, sizeof(s32)), Svc::ResultInvalidAddress);
|
||||
R_UNLESS(IsValidSignalType(signal_type), Svc::ResultInvalidEnumValue);
|
||||
// If the passed address is a kernel virtual address, return invalid memory state.
|
||||
if (Core::Memory::IsKernelVirtualAddress(address)) {
|
||||
LOG_ERROR(Kernel_SVC, "Address is a kernel virtual address, address={:016X}", address);
|
||||
return ERR_INVALID_ADDRESS_STATE;
|
||||
}
|
||||
|
||||
return system.Kernel().CurrentProcess()->SignalAddressArbiter(address, signal_type, value,
|
||||
count);
|
||||
// If the address is not properly aligned to 4 bytes, return invalid address.
|
||||
if (!Common::IsWordAligned(address)) {
|
||||
LOG_ERROR(Kernel_SVC, "Address is not word aligned, address={:016X}", address);
|
||||
return ERR_INVALID_ADDRESS;
|
||||
}
|
||||
|
||||
const auto signal_type = static_cast<AddressArbiter::SignalType>(type);
|
||||
auto& address_arbiter = system.Kernel().CurrentProcess()->GetAddressArbiter();
|
||||
return address_arbiter.SignalToAddress(address, signal_type, value, num_to_wake);
|
||||
}
|
||||
|
||||
static ResultCode SignalToAddress32(Core::System& system, u32 address, Svc::SignalType signal_type,
|
||||
s32 value, s32 count) {
|
||||
return SignalToAddress(system, address, signal_type, value, count);
|
||||
static ResultCode SignalToAddress32(Core::System& system, u32 address, u32 type, s32 value,
|
||||
s32 num_to_wake) {
|
||||
return SignalToAddress(system, address, type, value, num_to_wake);
|
||||
}
|
||||
|
||||
static void KernelDebug([[maybe_unused]] Core::System& system,
|
||||
|
||||
@@ -1,14 +0,0 @@
|
||||
// Copyright 2020 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace Kernel::Svc {
|
||||
|
||||
constexpr s32 ArgumentHandleCountMax = 0x40;
|
||||
constexpr u32 HandleWaitMask{1u << 30};
|
||||
|
||||
} // namespace Kernel::Svc
|
||||
@@ -1,20 +0,0 @@
|
||||
// Copyright 2020 yuzu emulator team
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "core/hle/result.h"
|
||||
|
||||
namespace Kernel::Svc {
|
||||
|
||||
constexpr ResultCode ResultTerminationRequested{ErrorModule::Kernel, 59};
|
||||
constexpr ResultCode ResultInvalidAddress{ErrorModule::Kernel, 102};
|
||||
constexpr ResultCode ResultInvalidCurrentMemory{ErrorModule::Kernel, 106};
|
||||
constexpr ResultCode ResultInvalidHandle{ErrorModule::Kernel, 114};
|
||||
constexpr ResultCode ResultTimedOut{ErrorModule::Kernel, 117};
|
||||
constexpr ResultCode ResultCancelled{ErrorModule::Kernel, 118};
|
||||
constexpr ResultCode ResultInvalidEnumValue{ErrorModule::Kernel, 120};
|
||||
constexpr ResultCode ResultInvalidState{ErrorModule::Kernel, 125};
|
||||
|
||||
} // namespace Kernel::Svc
|
||||
@@ -23,8 +23,8 @@ enum class MemoryState : u32 {
|
||||
Ipc = 0x0A,
|
||||
Stack = 0x0B,
|
||||
ThreadLocal = 0x0C,
|
||||
Transferred = 0x0D,
|
||||
SharedTransferred = 0x0E,
|
||||
Transfered = 0x0D,
|
||||
SharedTransfered = 0x0E,
|
||||
SharedCode = 0x0F,
|
||||
Inaccessible = 0x10,
|
||||
NonSecureIpc = 0x11,
|
||||
@@ -65,16 +65,4 @@ struct MemoryInfo {
|
||||
u32 padding{};
|
||||
};
|
||||
|
||||
enum class SignalType : u32 {
|
||||
Signal = 0,
|
||||
SignalAndIncrementIfEqual = 1,
|
||||
SignalAndModifyByWaitingCountIfEqual = 2,
|
||||
};
|
||||
|
||||
enum class ArbitrationType : u32 {
|
||||
WaitIfLessThan = 0,
|
||||
DecrementAndWaitIfLessThan = 1,
|
||||
WaitIfEqual = 2,
|
||||
};
|
||||
|
||||
} // namespace Kernel::Svc
|
||||
|
||||
@@ -7,7 +7,6 @@
|
||||
#include "common/common_types.h"
|
||||
#include "core/arm/arm_interface.h"
|
||||
#include "core/core.h"
|
||||
#include "core/hle/kernel/svc_types.h"
|
||||
#include "core/hle/result.h"
|
||||
|
||||
namespace Kernel {
|
||||
@@ -216,10 +215,9 @@ void SvcWrap64(Core::System& system) {
|
||||
func(system, static_cast<u32>(Param(system, 0)), Param(system, 1), Param(system, 2)).raw);
|
||||
}
|
||||
|
||||
// Used by WaitSynchronization
|
||||
template <ResultCode func(Core::System&, s32*, u64, u64, s64)>
|
||||
template <ResultCode func(Core::System&, u32*, u64, u64, s64)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
s32 param_1 = 0;
|
||||
u32 param_1 = 0;
|
||||
const u32 retval = func(system, ¶m_1, Param(system, 1), static_cast<u32>(Param(system, 2)),
|
||||
static_cast<s64>(Param(system, 3)))
|
||||
.raw;
|
||||
@@ -278,22 +276,18 @@ void SvcWrap64(Core::System& system) {
|
||||
FuncReturn(system, retval);
|
||||
}
|
||||
|
||||
// Used by WaitForAddress
|
||||
template <ResultCode func(Core::System&, u64, Svc::ArbitrationType, s32, s64)>
|
||||
template <ResultCode func(Core::System&, u64, u32, s32, s64)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
FuncReturn(system,
|
||||
func(system, Param(system, 0), static_cast<Svc::ArbitrationType>(Param(system, 1)),
|
||||
static_cast<s32>(Param(system, 2)), static_cast<s64>(Param(system, 3)))
|
||||
.raw);
|
||||
FuncReturn(system, func(system, Param(system, 0), static_cast<u32>(Param(system, 1)),
|
||||
static_cast<s32>(Param(system, 2)), static_cast<s64>(Param(system, 3)))
|
||||
.raw);
|
||||
}
|
||||
|
||||
// Used by SignalToAddress
|
||||
template <ResultCode func(Core::System&, u64, Svc::SignalType, s32, s32)>
|
||||
template <ResultCode func(Core::System&, u64, u32, s32, s32)>
|
||||
void SvcWrap64(Core::System& system) {
|
||||
FuncReturn(system,
|
||||
func(system, Param(system, 0), static_cast<Svc::SignalType>(Param(system, 1)),
|
||||
static_cast<s32>(Param(system, 2)), static_cast<s32>(Param(system, 3)))
|
||||
.raw);
|
||||
FuncReturn(system, func(system, Param(system, 0), static_cast<u32>(Param(system, 1)),
|
||||
static_cast<s32>(Param(system, 2)), static_cast<s32>(Param(system, 3)))
|
||||
.raw);
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
@@ -509,23 +503,22 @@ void SvcWrap32(Core::System& system) {
|
||||
}
|
||||
|
||||
// Used by WaitForAddress32
|
||||
template <ResultCode func(Core::System&, u32, Svc::ArbitrationType, s32, u32, u32)>
|
||||
template <ResultCode func(Core::System&, u32, u32, s32, u32, u32)>
|
||||
void SvcWrap32(Core::System& system) {
|
||||
const u32 retval = func(system, static_cast<u32>(Param(system, 0)),
|
||||
static_cast<Svc::ArbitrationType>(Param(system, 1)),
|
||||
static_cast<s32>(Param(system, 2)), static_cast<u32>(Param(system, 3)),
|
||||
static_cast<u32>(Param(system, 4)))
|
||||
static_cast<u32>(Param(system, 1)), static_cast<s32>(Param(system, 2)),
|
||||
static_cast<u32>(Param(system, 3)), static_cast<u32>(Param(system, 4)))
|
||||
.raw;
|
||||
FuncReturn(system, retval);
|
||||
}
|
||||
|
||||
// Used by SignalToAddress32
|
||||
template <ResultCode func(Core::System&, u32, Svc::SignalType, s32, s32)>
|
||||
template <ResultCode func(Core::System&, u32, u32, s32, s32)>
|
||||
void SvcWrap32(Core::System& system) {
|
||||
const u32 retval = func(system, static_cast<u32>(Param(system, 0)),
|
||||
static_cast<Svc::SignalType>(Param(system, 1)),
|
||||
static_cast<s32>(Param(system, 2)), static_cast<s32>(Param(system, 3)))
|
||||
.raw;
|
||||
const u32 retval =
|
||||
func(system, static_cast<u32>(Param(system, 0)), static_cast<u32>(Param(system, 1)),
|
||||
static_cast<s32>(Param(system, 2)), static_cast<s32>(Param(system, 3)))
|
||||
.raw;
|
||||
FuncReturn(system, retval);
|
||||
}
|
||||
|
||||
@@ -546,9 +539,9 @@ void SvcWrap32(Core::System& system) {
|
||||
}
|
||||
|
||||
// Used by WaitSynchronization32
|
||||
template <ResultCode func(Core::System&, u32, u32, s32, u32, s32*)>
|
||||
template <ResultCode func(Core::System&, u32, u32, s32, u32, Handle*)>
|
||||
void SvcWrap32(Core::System& system) {
|
||||
s32 param_1 = 0;
|
||||
u32 param_1 = 0;
|
||||
const u32 retval = func(system, Param32(system, 0), Param32(system, 1), Param32(system, 2),
|
||||
Param32(system, 3), ¶m_1)
|
||||
.raw;
|
||||
|
||||
116
src/core/hle/kernel/synchronization.cpp
Normal file
116
src/core/hle/kernel/synchronization.cpp
Normal file
@@ -0,0 +1,116 @@
|
||||
// Copyright 2020 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "core/core.h"
|
||||
#include "core/hle/kernel/errors.h"
|
||||
#include "core/hle/kernel/handle_table.h"
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/synchronization.h"
|
||||
#include "core/hle/kernel/synchronization_object.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
#include "core/hle/kernel/time_manager.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
Synchronization::Synchronization(Core::System& system) : system{system} {}
|
||||
|
||||
void Synchronization::SignalObject(SynchronizationObject& obj) const {
|
||||
auto& kernel = system.Kernel();
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
if (obj.IsSignaled()) {
|
||||
for (auto thread : obj.GetWaitingThreads()) {
|
||||
if (thread->GetSchedulingStatus() == ThreadSchedStatus::Paused) {
|
||||
if (thread->GetStatus() != ThreadStatus::WaitHLEEvent) {
|
||||
ASSERT(thread->GetStatus() == ThreadStatus::WaitSynch);
|
||||
ASSERT(thread->IsWaitingSync());
|
||||
}
|
||||
thread->SetSynchronizationResults(&obj, RESULT_SUCCESS);
|
||||
thread->ResumeFromWait();
|
||||
}
|
||||
}
|
||||
obj.ClearWaitingThreads();
|
||||
}
|
||||
}
|
||||
|
||||
std::pair<ResultCode, Handle> Synchronization::WaitFor(
|
||||
std::vector<std::shared_ptr<SynchronizationObject>>& sync_objects, s64 nano_seconds) {
|
||||
auto& kernel = system.Kernel();
|
||||
auto* const thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||
Handle event_handle = InvalidHandle;
|
||||
{
|
||||
KScopedSchedulerLockAndSleep lock(kernel, event_handle, thread, nano_seconds);
|
||||
const auto itr =
|
||||
std::find_if(sync_objects.begin(), sync_objects.end(),
|
||||
[thread](const std::shared_ptr<SynchronizationObject>& object) {
|
||||
return object->IsSignaled();
|
||||
});
|
||||
|
||||
if (itr != sync_objects.end()) {
|
||||
// We found a ready object, acquire it and set the result value
|
||||
SynchronizationObject* object = itr->get();
|
||||
object->Acquire(thread);
|
||||
const u32 index = static_cast<s32>(std::distance(sync_objects.begin(), itr));
|
||||
lock.CancelSleep();
|
||||
return {RESULT_SUCCESS, index};
|
||||
}
|
||||
|
||||
if (nano_seconds == 0) {
|
||||
lock.CancelSleep();
|
||||
return {RESULT_TIMEOUT, InvalidHandle};
|
||||
}
|
||||
|
||||
if (thread->IsPendingTermination()) {
|
||||
lock.CancelSleep();
|
||||
return {ERR_THREAD_TERMINATING, InvalidHandle};
|
||||
}
|
||||
|
||||
if (thread->IsSyncCancelled()) {
|
||||
thread->SetSyncCancelled(false);
|
||||
lock.CancelSleep();
|
||||
return {ERR_SYNCHRONIZATION_CANCELED, InvalidHandle};
|
||||
}
|
||||
|
||||
for (auto& object : sync_objects) {
|
||||
object->AddWaitingThread(SharedFrom(thread));
|
||||
}
|
||||
|
||||
thread->SetSynchronizationObjects(&sync_objects);
|
||||
thread->SetSynchronizationResults(nullptr, RESULT_TIMEOUT);
|
||||
thread->SetStatus(ThreadStatus::WaitSynch);
|
||||
thread->SetWaitingSync(true);
|
||||
}
|
||||
thread->SetWaitingSync(false);
|
||||
|
||||
if (event_handle != InvalidHandle) {
|
||||
auto& time_manager = kernel.TimeManager();
|
||||
time_manager.UnscheduleTimeEvent(event_handle);
|
||||
}
|
||||
|
||||
{
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
ResultCode signaling_result = thread->GetSignalingResult();
|
||||
SynchronizationObject* signaling_object = thread->GetSignalingObject();
|
||||
thread->SetSynchronizationObjects(nullptr);
|
||||
auto shared_thread = SharedFrom(thread);
|
||||
for (auto& obj : sync_objects) {
|
||||
obj->RemoveWaitingThread(shared_thread);
|
||||
}
|
||||
if (signaling_object != nullptr) {
|
||||
const auto itr = std::find_if(
|
||||
sync_objects.begin(), sync_objects.end(),
|
||||
[signaling_object](const std::shared_ptr<SynchronizationObject>& object) {
|
||||
return object.get() == signaling_object;
|
||||
});
|
||||
ASSERT(itr != sync_objects.end());
|
||||
signaling_object->Acquire(thread);
|
||||
const u32 index = static_cast<s32>(std::distance(sync_objects.begin(), itr));
|
||||
return {signaling_result, index};
|
||||
}
|
||||
return {signaling_result, -1};
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
44
src/core/hle/kernel/synchronization.h
Normal file
44
src/core/hle/kernel/synchronization.h
Normal file
@@ -0,0 +1,44 @@
|
||||
// Copyright 2020 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "core/hle/kernel/object.h"
|
||||
#include "core/hle/result.h"
|
||||
|
||||
namespace Core {
|
||||
class System;
|
||||
} // namespace Core
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class SynchronizationObject;
|
||||
|
||||
/**
|
||||
* The 'Synchronization' class is an interface for handling synchronization methods
|
||||
* used by Synchronization objects and synchronization SVCs. This centralizes processing of
|
||||
* such
|
||||
*/
|
||||
class Synchronization {
|
||||
public:
|
||||
explicit Synchronization(Core::System& system);
|
||||
|
||||
/// Signals a synchronization object, waking up all its waiting threads
|
||||
void SignalObject(SynchronizationObject& obj) const;
|
||||
|
||||
/// Tries to see if waiting for any of the sync_objects is necessary, if not
|
||||
/// it returns Success and the handle index of the signaled sync object. In
|
||||
/// case not, the current thread will be locked and wait for nano_seconds or
|
||||
/// for a synchronization object to signal.
|
||||
std::pair<ResultCode, Handle> WaitFor(
|
||||
std::vector<std::shared_ptr<SynchronizationObject>>& sync_objects, s64 nano_seconds);
|
||||
|
||||
private:
|
||||
Core::System& system;
|
||||
};
|
||||
} // namespace Kernel
|
||||
49
src/core/hle/kernel/synchronization_object.cpp
Normal file
49
src/core/hle/kernel/synchronization_object.cpp
Normal file
@@ -0,0 +1,49 @@
|
||||
// Copyright 2014 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <algorithm>
|
||||
#include "common/assert.h"
|
||||
#include "common/common_types.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "core/core.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/object.h"
|
||||
#include "core/hle/kernel/process.h"
|
||||
#include "core/hle/kernel/synchronization.h"
|
||||
#include "core/hle/kernel/synchronization_object.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
SynchronizationObject::SynchronizationObject(KernelCore& kernel) : Object{kernel} {}
|
||||
SynchronizationObject::~SynchronizationObject() = default;
|
||||
|
||||
void SynchronizationObject::Signal() {
|
||||
kernel.Synchronization().SignalObject(*this);
|
||||
}
|
||||
|
||||
void SynchronizationObject::AddWaitingThread(std::shared_ptr<Thread> thread) {
|
||||
auto itr = std::find(waiting_threads.begin(), waiting_threads.end(), thread);
|
||||
if (itr == waiting_threads.end())
|
||||
waiting_threads.push_back(std::move(thread));
|
||||
}
|
||||
|
||||
void SynchronizationObject::RemoveWaitingThread(std::shared_ptr<Thread> thread) {
|
||||
auto itr = std::find(waiting_threads.begin(), waiting_threads.end(), thread);
|
||||
// If a thread passed multiple handles to the same object,
|
||||
// the kernel might attempt to remove the thread from the object's
|
||||
// waiting threads list multiple times.
|
||||
if (itr != waiting_threads.end())
|
||||
waiting_threads.erase(itr);
|
||||
}
|
||||
|
||||
void SynchronizationObject::ClearWaitingThreads() {
|
||||
waiting_threads.clear();
|
||||
}
|
||||
|
||||
const std::vector<std::shared_ptr<Thread>>& SynchronizationObject::GetWaitingThreads() const {
|
||||
return waiting_threads;
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
77
src/core/hle/kernel/synchronization_object.h
Normal file
77
src/core/hle/kernel/synchronization_object.h
Normal file
@@ -0,0 +1,77 @@
|
||||
// Copyright 2014 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <atomic>
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
#include "core/hle/kernel/object.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class KernelCore;
|
||||
class Synchronization;
|
||||
class Thread;
|
||||
|
||||
/// Class that represents a Kernel object that a thread can be waiting on
|
||||
class SynchronizationObject : public Object {
|
||||
public:
|
||||
explicit SynchronizationObject(KernelCore& kernel);
|
||||
~SynchronizationObject() override;
|
||||
|
||||
/**
|
||||
* Check if the specified thread should wait until the object is available
|
||||
* @param thread The thread about which we're deciding.
|
||||
* @return True if the current thread should wait due to this object being unavailable
|
||||
*/
|
||||
virtual bool ShouldWait(const Thread* thread) const = 0;
|
||||
|
||||
/// Acquire/lock the object for the specified thread if it is available
|
||||
virtual void Acquire(Thread* thread) = 0;
|
||||
|
||||
/// Signal this object
|
||||
virtual void Signal();
|
||||
|
||||
virtual bool IsSignaled() const {
|
||||
return is_signaled;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a thread to wait on this object
|
||||
* @param thread Pointer to thread to add
|
||||
*/
|
||||
void AddWaitingThread(std::shared_ptr<Thread> thread);
|
||||
|
||||
/**
|
||||
* Removes a thread from waiting on this object (e.g. if it was resumed already)
|
||||
* @param thread Pointer to thread to remove
|
||||
*/
|
||||
void RemoveWaitingThread(std::shared_ptr<Thread> thread);
|
||||
|
||||
/// Get a const reference to the waiting threads list for debug use
|
||||
const std::vector<std::shared_ptr<Thread>>& GetWaitingThreads() const;
|
||||
|
||||
void ClearWaitingThreads();
|
||||
|
||||
protected:
|
||||
std::atomic_bool is_signaled{}; // Tells if this sync object is signaled
|
||||
|
||||
private:
|
||||
/// Threads waiting for this object to become available
|
||||
std::vector<std::shared_ptr<Thread>> waiting_threads;
|
||||
};
|
||||
|
||||
// Specialization of DynamicObjectCast for SynchronizationObjects
|
||||
template <>
|
||||
inline std::shared_ptr<SynchronizationObject> DynamicObjectCast<SynchronizationObject>(
|
||||
std::shared_ptr<Object> object) {
|
||||
if (object != nullptr && object->IsWaitable()) {
|
||||
return std::static_pointer_cast<SynchronizationObject>(object);
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
@@ -17,11 +17,9 @@
|
||||
#include "core/hardware_properties.h"
|
||||
#include "core/hle/kernel/errors.h"
|
||||
#include "core/hle/kernel/handle_table.h"
|
||||
#include "core/hle/kernel/k_condition_variable.h"
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/memory/memory_layout.h"
|
||||
#include "core/hle/kernel/object.h"
|
||||
#include "core/hle/kernel/process.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
@@ -36,19 +34,26 @@
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
bool Thread::IsSignaled() const {
|
||||
return signaled;
|
||||
bool Thread::ShouldWait(const Thread* thread) const {
|
||||
return status != ThreadStatus::Dead;
|
||||
}
|
||||
|
||||
Thread::Thread(KernelCore& kernel) : KSynchronizationObject{kernel} {}
|
||||
bool Thread::IsSignaled() const {
|
||||
return status == ThreadStatus::Dead;
|
||||
}
|
||||
|
||||
void Thread::Acquire(Thread* thread) {
|
||||
ASSERT_MSG(!ShouldWait(thread), "object unavailable!");
|
||||
}
|
||||
|
||||
Thread::Thread(KernelCore& kernel) : SynchronizationObject{kernel} {}
|
||||
Thread::~Thread() = default;
|
||||
|
||||
void Thread::Stop() {
|
||||
{
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
SetState(ThreadState::Terminated);
|
||||
signaled = true;
|
||||
NotifyAvailable();
|
||||
SetStatus(ThreadStatus::Dead);
|
||||
Signal();
|
||||
kernel.GlobalHandleTable().Close(global_handle);
|
||||
|
||||
if (owner_process) {
|
||||
@@ -62,27 +67,59 @@ void Thread::Stop() {
|
||||
global_handle = 0;
|
||||
}
|
||||
|
||||
void Thread::Wakeup() {
|
||||
void Thread::ResumeFromWait() {
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
SetState(ThreadState::Runnable);
|
||||
switch (status) {
|
||||
case ThreadStatus::Paused:
|
||||
case ThreadStatus::WaitSynch:
|
||||
case ThreadStatus::WaitHLEEvent:
|
||||
case ThreadStatus::WaitSleep:
|
||||
case ThreadStatus::WaitIPC:
|
||||
case ThreadStatus::WaitMutex:
|
||||
case ThreadStatus::WaitCondVar:
|
||||
case ThreadStatus::WaitArb:
|
||||
case ThreadStatus::Dormant:
|
||||
break;
|
||||
|
||||
case ThreadStatus::Ready:
|
||||
// The thread's wakeup callback must have already been cleared when the thread was first
|
||||
// awoken.
|
||||
ASSERT(hle_callback == nullptr);
|
||||
// If the thread is waiting on multiple wait objects, it might be awoken more than once
|
||||
// before actually resuming. We can ignore subsequent wakeups if the thread status has
|
||||
// already been set to ThreadStatus::Ready.
|
||||
return;
|
||||
case ThreadStatus::Dead:
|
||||
// This should never happen, as threads must complete before being stopped.
|
||||
DEBUG_ASSERT_MSG(false, "Thread with object id {} cannot be resumed because it's DEAD.",
|
||||
GetObjectId());
|
||||
return;
|
||||
}
|
||||
|
||||
SetStatus(ThreadStatus::Ready);
|
||||
}
|
||||
|
||||
void Thread::OnWakeUp() {
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
SetStatus(ThreadStatus::Ready);
|
||||
}
|
||||
|
||||
ResultCode Thread::Start() {
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
SetState(ThreadState::Runnable);
|
||||
SetStatus(ThreadStatus::Ready);
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
void Thread::CancelWait() {
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
if (GetState() != ThreadState::Waiting || !is_cancellable) {
|
||||
if (GetSchedulingStatus() != ThreadSchedStatus::Paused || !is_waiting_on_sync) {
|
||||
is_sync_cancelled = true;
|
||||
return;
|
||||
}
|
||||
// TODO(Blinkhawk): Implement cancel of server session
|
||||
is_sync_cancelled = false;
|
||||
SetSynchronizationResults(nullptr, ERR_SYNCHRONIZATION_CANCELED);
|
||||
SetState(ThreadState::Runnable);
|
||||
SetStatus(ThreadStatus::Ready);
|
||||
}
|
||||
|
||||
static void ResetThreadContext32(Core::ARM_Interface::ThreadContext32& context, u32 stack_top,
|
||||
@@ -146,24 +183,25 @@ ResultVal<std::shared_ptr<Thread>> Thread::Create(Core::System& system, ThreadTy
|
||||
std::shared_ptr<Thread> thread = std::make_shared<Thread>(kernel);
|
||||
|
||||
thread->thread_id = kernel.CreateNewThreadID();
|
||||
thread->thread_state = ThreadState::Initialized;
|
||||
thread->status = ThreadStatus::Dormant;
|
||||
thread->entry_point = entry_point;
|
||||
thread->stack_top = stack_top;
|
||||
thread->disable_count = 1;
|
||||
thread->tpidr_el0 = 0;
|
||||
thread->current_priority = priority;
|
||||
thread->base_priority = priority;
|
||||
thread->lock_owner = nullptr;
|
||||
thread->nominal_priority = thread->current_priority = priority;
|
||||
thread->schedule_count = -1;
|
||||
thread->last_scheduled_tick = 0;
|
||||
thread->processor_id = processor_id;
|
||||
thread->ideal_core = processor_id;
|
||||
thread->affinity_mask.SetAffinity(processor_id, true);
|
||||
thread->wait_objects = nullptr;
|
||||
thread->mutex_wait_address = 0;
|
||||
thread->condvar_wait_address = 0;
|
||||
thread->wait_handle = 0;
|
||||
thread->name = std::move(name);
|
||||
thread->global_handle = kernel.GlobalHandleTable().Create(thread).Unwrap();
|
||||
thread->owner_process = owner_process;
|
||||
thread->type = type_flags;
|
||||
thread->signaled = false;
|
||||
if ((type_flags & THREADTYPE_IDLE) == 0) {
|
||||
auto& scheduler = kernel.GlobalSchedulerContext();
|
||||
scheduler.AddThread(thread);
|
||||
@@ -188,185 +226,153 @@ ResultVal<std::shared_ptr<Thread>> Thread::Create(Core::System& system, ThreadTy
|
||||
return MakeResult<std::shared_ptr<Thread>>(std::move(thread));
|
||||
}
|
||||
|
||||
void Thread::SetBasePriority(u32 priority) {
|
||||
void Thread::SetPriority(u32 priority) {
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
ASSERT_MSG(priority <= THREADPRIO_LOWEST && priority >= THREADPRIO_HIGHEST,
|
||||
"Invalid priority value.");
|
||||
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
|
||||
// Change our base priority.
|
||||
base_priority = priority;
|
||||
|
||||
// Perform a priority restoration.
|
||||
RestorePriority(kernel, this);
|
||||
nominal_priority = priority;
|
||||
UpdatePriority();
|
||||
}
|
||||
|
||||
void Thread::SetSynchronizationResults(KSynchronizationObject* object, ResultCode result) {
|
||||
void Thread::SetSynchronizationResults(SynchronizationObject* object, ResultCode result) {
|
||||
signaling_object = object;
|
||||
signaling_result = result;
|
||||
}
|
||||
|
||||
s32 Thread::GetSynchronizationObjectIndex(std::shared_ptr<SynchronizationObject> object) const {
|
||||
ASSERT_MSG(!wait_objects->empty(), "Thread is not waiting for anything");
|
||||
const auto match = std::find(wait_objects->rbegin(), wait_objects->rend(), object);
|
||||
return static_cast<s32>(std::distance(match, wait_objects->rend()) - 1);
|
||||
}
|
||||
|
||||
VAddr Thread::GetCommandBufferAddress() const {
|
||||
// Offset from the start of TLS at which the IPC command buffer begins.
|
||||
constexpr u64 command_header_offset = 0x80;
|
||||
return GetTLSAddress() + command_header_offset;
|
||||
}
|
||||
|
||||
void Thread::SetState(ThreadState state) {
|
||||
KScopedSchedulerLock sl(kernel);
|
||||
|
||||
// Clear debugging state
|
||||
SetMutexWaitAddressForDebugging({});
|
||||
SetWaitReasonForDebugging({});
|
||||
|
||||
const ThreadState old_state = thread_state;
|
||||
thread_state =
|
||||
static_cast<ThreadState>((old_state & ~ThreadState::Mask) | (state & ThreadState::Mask));
|
||||
if (thread_state != old_state) {
|
||||
KScheduler::OnThreadStateChanged(kernel, this, old_state);
|
||||
void Thread::SetStatus(ThreadStatus new_status) {
|
||||
if (new_status == status) {
|
||||
return;
|
||||
}
|
||||
|
||||
switch (new_status) {
|
||||
case ThreadStatus::Ready:
|
||||
SetSchedulingStatus(ThreadSchedStatus::Runnable);
|
||||
break;
|
||||
case ThreadStatus::Dormant:
|
||||
SetSchedulingStatus(ThreadSchedStatus::None);
|
||||
break;
|
||||
case ThreadStatus::Dead:
|
||||
SetSchedulingStatus(ThreadSchedStatus::Exited);
|
||||
break;
|
||||
default:
|
||||
SetSchedulingStatus(ThreadSchedStatus::Paused);
|
||||
break;
|
||||
}
|
||||
|
||||
status = new_status;
|
||||
}
|
||||
|
||||
void Thread::AddWaiterImpl(Thread* thread) {
|
||||
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
||||
|
||||
// Find the right spot to insert the waiter.
|
||||
auto it = waiter_list.begin();
|
||||
while (it != waiter_list.end()) {
|
||||
if (it->GetPriority() > thread->GetPriority()) {
|
||||
break;
|
||||
}
|
||||
it++;
|
||||
void Thread::AddMutexWaiter(std::shared_ptr<Thread> thread) {
|
||||
if (thread->lock_owner.get() == this) {
|
||||
// If the thread is already waiting for this thread to release the mutex, ensure that the
|
||||
// waiters list is consistent and return without doing anything.
|
||||
const auto iter = std::find(wait_mutex_threads.begin(), wait_mutex_threads.end(), thread);
|
||||
ASSERT(iter != wait_mutex_threads.end());
|
||||
return;
|
||||
}
|
||||
|
||||
// Keep track of how many kernel waiters we have.
|
||||
if (Memory::IsKernelAddressKey(thread->GetAddressKey())) {
|
||||
ASSERT((num_kernel_waiters++) >= 0);
|
||||
}
|
||||
// A thread can't wait on two different mutexes at the same time.
|
||||
ASSERT(thread->lock_owner == nullptr);
|
||||
|
||||
// Insert the waiter.
|
||||
waiter_list.insert(it, *thread);
|
||||
thread->SetLockOwner(this);
|
||||
// Ensure that the thread is not already in the list of mutex waiters
|
||||
const auto iter = std::find(wait_mutex_threads.begin(), wait_mutex_threads.end(), thread);
|
||||
ASSERT(iter == wait_mutex_threads.end());
|
||||
|
||||
// Keep the list in an ordered fashion
|
||||
const auto insertion_point = std::find_if(
|
||||
wait_mutex_threads.begin(), wait_mutex_threads.end(),
|
||||
[&thread](const auto& entry) { return entry->GetPriority() > thread->GetPriority(); });
|
||||
wait_mutex_threads.insert(insertion_point, thread);
|
||||
thread->lock_owner = SharedFrom(this);
|
||||
|
||||
UpdatePriority();
|
||||
}
|
||||
|
||||
void Thread::RemoveWaiterImpl(Thread* thread) {
|
||||
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
||||
void Thread::RemoveMutexWaiter(std::shared_ptr<Thread> thread) {
|
||||
ASSERT(thread->lock_owner.get() == this);
|
||||
|
||||
// Keep track of how many kernel waiters we have.
|
||||
if (Memory::IsKernelAddressKey(thread->GetAddressKey())) {
|
||||
ASSERT((num_kernel_waiters--) > 0);
|
||||
}
|
||||
// Ensure that the thread is in the list of mutex waiters
|
||||
const auto iter = std::find(wait_mutex_threads.begin(), wait_mutex_threads.end(), thread);
|
||||
ASSERT(iter != wait_mutex_threads.end());
|
||||
|
||||
// Remove the waiter.
|
||||
waiter_list.erase(waiter_list.iterator_to(*thread));
|
||||
thread->SetLockOwner(nullptr);
|
||||
wait_mutex_threads.erase(iter);
|
||||
|
||||
thread->lock_owner = nullptr;
|
||||
UpdatePriority();
|
||||
}
|
||||
|
||||
void Thread::RestorePriority(KernelCore& kernel, Thread* thread) {
|
||||
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
||||
|
||||
while (true) {
|
||||
// We want to inherit priority where possible.
|
||||
s32 new_priority = thread->GetBasePriority();
|
||||
if (thread->HasWaiters()) {
|
||||
new_priority = std::min(new_priority, thread->waiter_list.front().GetPriority());
|
||||
}
|
||||
|
||||
// If the priority we would inherit is not different from ours, don't do anything.
|
||||
if (new_priority == thread->GetPriority()) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Ensure we don't violate condition variable red black tree invariants.
|
||||
if (auto* cv_tree = thread->GetConditionVariableTree(); cv_tree != nullptr) {
|
||||
BeforeUpdatePriority(kernel, cv_tree, thread);
|
||||
}
|
||||
|
||||
// Change the priority.
|
||||
const s32 old_priority = thread->GetPriority();
|
||||
thread->SetPriority(new_priority);
|
||||
|
||||
// Restore the condition variable, if relevant.
|
||||
if (auto* cv_tree = thread->GetConditionVariableTree(); cv_tree != nullptr) {
|
||||
AfterUpdatePriority(kernel, cv_tree, thread);
|
||||
}
|
||||
|
||||
// Update the scheduler.
|
||||
KScheduler::OnThreadPriorityChanged(kernel, thread, old_priority);
|
||||
|
||||
// Keep the lock owner up to date.
|
||||
Thread* lock_owner = thread->GetLockOwner();
|
||||
if (lock_owner == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Update the thread in the lock owner's sorted list, and continue inheriting.
|
||||
lock_owner->RemoveWaiterImpl(thread);
|
||||
lock_owner->AddWaiterImpl(thread);
|
||||
thread = lock_owner;
|
||||
}
|
||||
}
|
||||
|
||||
void Thread::AddWaiter(Thread* thread) {
|
||||
AddWaiterImpl(thread);
|
||||
RestorePriority(kernel, this);
|
||||
}
|
||||
|
||||
void Thread::RemoveWaiter(Thread* thread) {
|
||||
RemoveWaiterImpl(thread);
|
||||
RestorePriority(kernel, this);
|
||||
}
|
||||
|
||||
Thread* Thread::RemoveWaiterByKey(s32* out_num_waiters, VAddr key) {
|
||||
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
||||
|
||||
s32 num_waiters{};
|
||||
Thread* next_lock_owner{};
|
||||
auto it = waiter_list.begin();
|
||||
while (it != waiter_list.end()) {
|
||||
if (it->GetAddressKey() == key) {
|
||||
Thread* thread = std::addressof(*it);
|
||||
|
||||
// Keep track of how many kernel waiters we have.
|
||||
if (Memory::IsKernelAddressKey(thread->GetAddressKey())) {
|
||||
ASSERT((num_kernel_waiters--) > 0);
|
||||
}
|
||||
it = waiter_list.erase(it);
|
||||
|
||||
// Update the next lock owner.
|
||||
if (next_lock_owner == nullptr) {
|
||||
next_lock_owner = thread;
|
||||
next_lock_owner->SetLockOwner(nullptr);
|
||||
} else {
|
||||
next_lock_owner->AddWaiterImpl(thread);
|
||||
}
|
||||
num_waiters++;
|
||||
} else {
|
||||
it++;
|
||||
void Thread::UpdatePriority() {
|
||||
// If any of the threads waiting on the mutex have a higher priority
|
||||
// (taking into account priority inheritance), then this thread inherits
|
||||
// that thread's priority.
|
||||
u32 new_priority = nominal_priority;
|
||||
if (!wait_mutex_threads.empty()) {
|
||||
if (wait_mutex_threads.front()->current_priority < new_priority) {
|
||||
new_priority = wait_mutex_threads.front()->current_priority;
|
||||
}
|
||||
}
|
||||
|
||||
// Do priority updates, if we have a next owner.
|
||||
if (next_lock_owner) {
|
||||
RestorePriority(kernel, this);
|
||||
RestorePriority(kernel, next_lock_owner);
|
||||
if (new_priority == current_priority) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Return output.
|
||||
*out_num_waiters = num_waiters;
|
||||
return next_lock_owner;
|
||||
if (GetStatus() == ThreadStatus::WaitCondVar) {
|
||||
owner_process->RemoveConditionVariableThread(SharedFrom(this));
|
||||
}
|
||||
|
||||
SetCurrentPriority(new_priority);
|
||||
|
||||
if (GetStatus() == ThreadStatus::WaitCondVar) {
|
||||
owner_process->InsertConditionVariableThread(SharedFrom(this));
|
||||
}
|
||||
|
||||
if (!lock_owner) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Ensure that the thread is within the correct location in the waiting list.
|
||||
auto old_owner = lock_owner;
|
||||
lock_owner->RemoveMutexWaiter(SharedFrom(this));
|
||||
old_owner->AddMutexWaiter(SharedFrom(this));
|
||||
|
||||
// Recursively update the priority of the thread that depends on the priority of this one.
|
||||
lock_owner->UpdatePriority();
|
||||
}
|
||||
|
||||
bool Thread::AllSynchronizationObjectsReady() const {
|
||||
return std::none_of(wait_objects->begin(), wait_objects->end(),
|
||||
[this](const std::shared_ptr<SynchronizationObject>& object) {
|
||||
return object->ShouldWait(this);
|
||||
});
|
||||
}
|
||||
|
||||
bool Thread::InvokeHLECallback(std::shared_ptr<Thread> thread) {
|
||||
ASSERT(hle_callback);
|
||||
return hle_callback(std::move(thread));
|
||||
}
|
||||
|
||||
ResultCode Thread::SetActivity(ThreadActivity value) {
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
|
||||
auto sched_status = GetState();
|
||||
auto sched_status = GetSchedulingStatus();
|
||||
|
||||
if (sched_status != ThreadState::Runnable && sched_status != ThreadState::Waiting) {
|
||||
if (sched_status != ThreadSchedStatus::Runnable && sched_status != ThreadSchedStatus::Paused) {
|
||||
return ERR_INVALID_STATE;
|
||||
}
|
||||
|
||||
if (IsTerminationRequested()) {
|
||||
if (IsPendingTermination()) {
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
@@ -388,8 +394,7 @@ ResultCode Thread::Sleep(s64 nanoseconds) {
|
||||
Handle event_handle{};
|
||||
{
|
||||
KScopedSchedulerLockAndSleep lock(kernel, event_handle, this, nanoseconds);
|
||||
SetState(ThreadState::Waiting);
|
||||
SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Sleep);
|
||||
SetStatus(ThreadStatus::WaitSleep);
|
||||
}
|
||||
|
||||
if (event_handle != InvalidHandle) {
|
||||
@@ -400,21 +405,34 @@ ResultCode Thread::Sleep(s64 nanoseconds) {
|
||||
}
|
||||
|
||||
void Thread::AddSchedulingFlag(ThreadSchedFlags flag) {
|
||||
const auto old_state = GetRawState();
|
||||
const u32 old_state = scheduling_state;
|
||||
pausing_state |= static_cast<u32>(flag);
|
||||
const auto base_scheduling = GetState();
|
||||
thread_state = base_scheduling | static_cast<ThreadState>(pausing_state);
|
||||
const u32 base_scheduling = static_cast<u32>(GetSchedulingStatus());
|
||||
scheduling_state = base_scheduling | pausing_state;
|
||||
KScheduler::OnThreadStateChanged(kernel, this, old_state);
|
||||
}
|
||||
|
||||
void Thread::RemoveSchedulingFlag(ThreadSchedFlags flag) {
|
||||
const auto old_state = GetRawState();
|
||||
const u32 old_state = scheduling_state;
|
||||
pausing_state &= ~static_cast<u32>(flag);
|
||||
const auto base_scheduling = GetState();
|
||||
thread_state = base_scheduling | static_cast<ThreadState>(pausing_state);
|
||||
const u32 base_scheduling = static_cast<u32>(GetSchedulingStatus());
|
||||
scheduling_state = base_scheduling | pausing_state;
|
||||
KScheduler::OnThreadStateChanged(kernel, this, old_state);
|
||||
}
|
||||
|
||||
void Thread::SetSchedulingStatus(ThreadSchedStatus new_status) {
|
||||
const u32 old_state = scheduling_state;
|
||||
scheduling_state = (scheduling_state & static_cast<u32>(ThreadSchedMasks::HighMask)) |
|
||||
static_cast<u32>(new_status);
|
||||
KScheduler::OnThreadStateChanged(kernel, this, old_state);
|
||||
}
|
||||
|
||||
void Thread::SetCurrentPriority(u32 new_priority) {
|
||||
const u32 old_priority = std::exchange(current_priority, new_priority);
|
||||
KScheduler::OnThreadPriorityChanged(kernel, this, kernel.CurrentScheduler()->GetCurrentThread(),
|
||||
old_priority);
|
||||
}
|
||||
|
||||
ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) {
|
||||
KScopedSchedulerLock lock(kernel);
|
||||
const auto HighestSetCore = [](u64 mask, u32 max_cores) {
|
||||
|
||||
@@ -6,21 +6,16 @@
|
||||
|
||||
#include <array>
|
||||
#include <functional>
|
||||
#include <span>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include <boost/intrusive/list.hpp>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "common/intrusive_red_black_tree.h"
|
||||
#include "common/spin_lock.h"
|
||||
#include "core/arm/arm_interface.h"
|
||||
#include "core/hle/kernel/k_affinity_mask.h"
|
||||
#include "core/hle/kernel/k_synchronization_object.h"
|
||||
#include "core/hle/kernel/object.h"
|
||||
#include "core/hle/kernel/svc_common.h"
|
||||
#include "core/hle/kernel/synchronization_object.h"
|
||||
#include "core/hle/result.h"
|
||||
|
||||
namespace Common {
|
||||
@@ -78,24 +73,19 @@ enum ThreadProcessorId : s32 {
|
||||
(1 << THREADPROCESSORID_2) | (1 << THREADPROCESSORID_3)
|
||||
};
|
||||
|
||||
enum class ThreadState : u16 {
|
||||
Initialized = 0,
|
||||
Waiting = 1,
|
||||
Runnable = 2,
|
||||
Terminated = 3,
|
||||
|
||||
SuspendShift = 4,
|
||||
Mask = (1 << SuspendShift) - 1,
|
||||
|
||||
ProcessSuspended = (1 << (0 + SuspendShift)),
|
||||
ThreadSuspended = (1 << (1 + SuspendShift)),
|
||||
DebugSuspended = (1 << (2 + SuspendShift)),
|
||||
BacktraceSuspended = (1 << (3 + SuspendShift)),
|
||||
InitSuspended = (1 << (4 + SuspendShift)),
|
||||
|
||||
SuspendFlagMask = ((1 << 5) - 1) << SuspendShift,
|
||||
enum class ThreadStatus {
|
||||
Ready, ///< Ready to run
|
||||
Paused, ///< Paused by SetThreadActivity or debug
|
||||
WaitHLEEvent, ///< Waiting for hle event to finish
|
||||
WaitSleep, ///< Waiting due to a SleepThread SVC
|
||||
WaitIPC, ///< Waiting for the reply from an IPC request
|
||||
WaitSynch, ///< Waiting due to WaitSynchronization
|
||||
WaitMutex, ///< Waiting due to an ArbitrateLock svc
|
||||
WaitCondVar, ///< Waiting due to an WaitProcessWideKey svc
|
||||
WaitArb, ///< Waiting due to a SignalToAddress/WaitForAddress svc
|
||||
Dormant, ///< Created but not yet made ready
|
||||
Dead ///< Run to completion, or forcefully terminated
|
||||
};
|
||||
DECLARE_ENUM_FLAG_OPERATORS(ThreadState);
|
||||
|
||||
enum class ThreadWakeupReason {
|
||||
Signal, // The thread was woken up by WakeupAllWaitingThreads due to an object signal.
|
||||
@@ -107,6 +97,13 @@ enum class ThreadActivity : u32 {
|
||||
Paused = 1,
|
||||
};
|
||||
|
||||
enum class ThreadSchedStatus : u32 {
|
||||
None = 0,
|
||||
Paused = 1,
|
||||
Runnable = 2,
|
||||
Exited = 3,
|
||||
};
|
||||
|
||||
enum class ThreadSchedFlags : u32 {
|
||||
ProcessPauseFlag = 1 << 4,
|
||||
ThreadPauseFlag = 1 << 5,
|
||||
@@ -114,20 +111,13 @@ enum class ThreadSchedFlags : u32 {
|
||||
KernelInitPauseFlag = 1 << 8,
|
||||
};
|
||||
|
||||
enum class ThreadWaitReasonForDebugging : u32 {
|
||||
None, ///< Thread is not waiting
|
||||
Sleep, ///< Thread is waiting due to a SleepThread SVC
|
||||
IPC, ///< Thread is waiting for the reply from an IPC request
|
||||
Synchronization, ///< Thread is waiting due to a WaitSynchronization SVC
|
||||
ConditionVar, ///< Thread is waiting due to a WaitProcessWideKey SVC
|
||||
Arbitration, ///< Thread is waiting due to a SignalToAddress/WaitForAddress SVC
|
||||
Suspended, ///< Thread is waiting due to process suspension
|
||||
enum class ThreadSchedMasks : u32 {
|
||||
LowMask = 0x000f,
|
||||
HighMask = 0xfff0,
|
||||
ForcePauseMask = 0x0070,
|
||||
};
|
||||
|
||||
class Thread final : public KSynchronizationObject, public boost::intrusive::list_base_hook<> {
|
||||
friend class KScheduler;
|
||||
friend class Process;
|
||||
|
||||
class Thread final : public SynchronizationObject {
|
||||
public:
|
||||
explicit Thread(KernelCore& kernel);
|
||||
~Thread() override;
|
||||
@@ -137,6 +127,10 @@ public:
|
||||
using ThreadContext32 = Core::ARM_Interface::ThreadContext32;
|
||||
using ThreadContext64 = Core::ARM_Interface::ThreadContext64;
|
||||
|
||||
using ThreadSynchronizationObjects = std::vector<std::shared_ptr<SynchronizationObject>>;
|
||||
|
||||
using HLECallback = std::function<bool(std::shared_ptr<Thread> thread)>;
|
||||
|
||||
/**
|
||||
* Creates and returns a new thread. The new thread is immediately scheduled
|
||||
* @param system The instance of the whole system
|
||||
@@ -192,54 +186,59 @@ public:
|
||||
return HANDLE_TYPE;
|
||||
}
|
||||
|
||||
bool ShouldWait(const Thread* thread) const override;
|
||||
void Acquire(Thread* thread) override;
|
||||
bool IsSignaled() const override;
|
||||
|
||||
/**
|
||||
* Gets the thread's current priority
|
||||
* @return The current thread's priority
|
||||
*/
|
||||
[[nodiscard]] s32 GetPriority() const {
|
||||
u32 GetPriority() const {
|
||||
return current_priority;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the thread's current priority.
|
||||
* @param priority The new priority.
|
||||
*/
|
||||
void SetPriority(s32 priority) {
|
||||
current_priority = priority;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the thread's nominal priority.
|
||||
* @return The current thread's nominal priority.
|
||||
*/
|
||||
[[nodiscard]] s32 GetBasePriority() const {
|
||||
return base_priority;
|
||||
u32 GetNominalPriority() const {
|
||||
return nominal_priority;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the thread's nominal priority.
|
||||
* @param priority The new priority.
|
||||
* Sets the thread's current priority
|
||||
* @param priority The new priority
|
||||
*/
|
||||
void SetBasePriority(u32 priority);
|
||||
void SetPriority(u32 priority);
|
||||
|
||||
/// Adds a thread to the list of threads that are waiting for a lock held by this thread.
|
||||
void AddMutexWaiter(std::shared_ptr<Thread> thread);
|
||||
|
||||
/// Removes a thread from the list of threads that are waiting for a lock held by this thread.
|
||||
void RemoveMutexWaiter(std::shared_ptr<Thread> thread);
|
||||
|
||||
/// Recalculates the current priority taking into account priority inheritance.
|
||||
void UpdatePriority();
|
||||
|
||||
/// Changes the core that the thread is running or scheduled to run on.
|
||||
[[nodiscard]] ResultCode SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask);
|
||||
ResultCode SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask);
|
||||
|
||||
/**
|
||||
* Gets the thread's thread ID
|
||||
* @return The thread's ID
|
||||
*/
|
||||
[[nodiscard]] u64 GetThreadID() const {
|
||||
u64 GetThreadID() const {
|
||||
return thread_id;
|
||||
}
|
||||
|
||||
/// Resumes a thread from waiting
|
||||
void Wakeup();
|
||||
void ResumeFromWait();
|
||||
|
||||
void OnWakeUp();
|
||||
|
||||
ResultCode Start();
|
||||
|
||||
virtual bool IsSignaled() const override;
|
||||
|
||||
/// Cancels a waiting operation that this thread may or may not be within.
|
||||
///
|
||||
/// When the thread is within a waiting state, this will set the thread's
|
||||
@@ -248,21 +247,30 @@ public:
|
||||
///
|
||||
void CancelWait();
|
||||
|
||||
void SetSynchronizationResults(KSynchronizationObject* object, ResultCode result);
|
||||
void SetSynchronizationResults(SynchronizationObject* object, ResultCode result);
|
||||
|
||||
void SetSyncedObject(KSynchronizationObject* object, ResultCode result) {
|
||||
SetSynchronizationResults(object, result);
|
||||
}
|
||||
|
||||
ResultCode GetWaitResult(KSynchronizationObject** out) const {
|
||||
*out = signaling_object;
|
||||
return signaling_result;
|
||||
SynchronizationObject* GetSignalingObject() const {
|
||||
return signaling_object;
|
||||
}
|
||||
|
||||
ResultCode GetSignalingResult() const {
|
||||
return signaling_result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieves the index that this particular object occupies in the list of objects
|
||||
* that the thread passed to WaitSynchronization, starting the search from the last element.
|
||||
*
|
||||
* It is used to set the output index of WaitSynchronization when the thread is awakened.
|
||||
*
|
||||
* When a thread wakes up due to an object signal, the kernel will use the index of the last
|
||||
* matching object in the wait objects list in case of having multiple instances of the same
|
||||
* object in the list.
|
||||
*
|
||||
* @param object Object to query the index of.
|
||||
*/
|
||||
s32 GetSynchronizationObjectIndex(std::shared_ptr<SynchronizationObject> object) const;
|
||||
|
||||
/**
|
||||
* Stops a thread, invalidating it from further use
|
||||
*/
|
||||
@@ -333,22 +341,18 @@ public:
|
||||
|
||||
std::shared_ptr<Common::Fiber>& GetHostContext();
|
||||
|
||||
ThreadState GetState() const {
|
||||
return thread_state & ThreadState::Mask;
|
||||
ThreadStatus GetStatus() const {
|
||||
return status;
|
||||
}
|
||||
|
||||
ThreadState GetRawState() const {
|
||||
return thread_state;
|
||||
}
|
||||
|
||||
void SetState(ThreadState state);
|
||||
void SetStatus(ThreadStatus new_status);
|
||||
|
||||
s64 GetLastScheduledTick() const {
|
||||
return last_scheduled_tick;
|
||||
return this->last_scheduled_tick;
|
||||
}
|
||||
|
||||
void SetLastScheduledTick(s64 tick) {
|
||||
last_scheduled_tick = tick;
|
||||
this->last_scheduled_tick = tick;
|
||||
}
|
||||
|
||||
u64 GetTotalCPUTimeTicks() const {
|
||||
@@ -383,18 +387,98 @@ public:
|
||||
return owner_process;
|
||||
}
|
||||
|
||||
const ThreadSynchronizationObjects& GetSynchronizationObjects() const {
|
||||
return *wait_objects;
|
||||
}
|
||||
|
||||
void SetSynchronizationObjects(ThreadSynchronizationObjects* objects) {
|
||||
wait_objects = objects;
|
||||
}
|
||||
|
||||
void ClearSynchronizationObjects() {
|
||||
for (const auto& waiting_object : *wait_objects) {
|
||||
waiting_object->RemoveWaitingThread(SharedFrom(this));
|
||||
}
|
||||
wait_objects->clear();
|
||||
}
|
||||
|
||||
/// Determines whether all the objects this thread is waiting on are ready.
|
||||
bool AllSynchronizationObjectsReady() const;
|
||||
|
||||
const MutexWaitingThreads& GetMutexWaitingThreads() const {
|
||||
return wait_mutex_threads;
|
||||
}
|
||||
|
||||
Thread* GetLockOwner() const {
|
||||
return lock_owner;
|
||||
return lock_owner.get();
|
||||
}
|
||||
|
||||
void SetLockOwner(Thread* owner) {
|
||||
lock_owner = owner;
|
||||
void SetLockOwner(std::shared_ptr<Thread> owner) {
|
||||
lock_owner = std::move(owner);
|
||||
}
|
||||
|
||||
VAddr GetCondVarWaitAddress() const {
|
||||
return condvar_wait_address;
|
||||
}
|
||||
|
||||
void SetCondVarWaitAddress(VAddr address) {
|
||||
condvar_wait_address = address;
|
||||
}
|
||||
|
||||
VAddr GetMutexWaitAddress() const {
|
||||
return mutex_wait_address;
|
||||
}
|
||||
|
||||
void SetMutexWaitAddress(VAddr address) {
|
||||
mutex_wait_address = address;
|
||||
}
|
||||
|
||||
Handle GetWaitHandle() const {
|
||||
return wait_handle;
|
||||
}
|
||||
|
||||
void SetWaitHandle(Handle handle) {
|
||||
wait_handle = handle;
|
||||
}
|
||||
|
||||
VAddr GetArbiterWaitAddress() const {
|
||||
return arb_wait_address;
|
||||
}
|
||||
|
||||
void SetArbiterWaitAddress(VAddr address) {
|
||||
arb_wait_address = address;
|
||||
}
|
||||
|
||||
bool HasHLECallback() const {
|
||||
return hle_callback != nullptr;
|
||||
}
|
||||
|
||||
void SetHLECallback(HLECallback callback) {
|
||||
hle_callback = std::move(callback);
|
||||
}
|
||||
|
||||
void SetHLETimeEvent(Handle time_event) {
|
||||
hle_time_event = time_event;
|
||||
}
|
||||
|
||||
void SetHLESyncObject(SynchronizationObject* object) {
|
||||
hle_object = object;
|
||||
}
|
||||
|
||||
Handle GetHLETimeEvent() const {
|
||||
return hle_time_event;
|
||||
}
|
||||
|
||||
SynchronizationObject* GetHLESyncObject() const {
|
||||
return hle_object;
|
||||
}
|
||||
|
||||
void InvalidateHLECallback() {
|
||||
SetHLECallback(nullptr);
|
||||
}
|
||||
|
||||
bool InvokeHLECallback(std::shared_ptr<Thread> thread);
|
||||
|
||||
u32 GetIdealCore() const {
|
||||
return ideal_core;
|
||||
}
|
||||
@@ -409,11 +493,20 @@ public:
|
||||
ResultCode Sleep(s64 nanoseconds);
|
||||
|
||||
s64 GetYieldScheduleCount() const {
|
||||
return schedule_count;
|
||||
return this->schedule_count;
|
||||
}
|
||||
|
||||
void SetYieldScheduleCount(s64 count) {
|
||||
schedule_count = count;
|
||||
this->schedule_count = count;
|
||||
}
|
||||
|
||||
ThreadSchedStatus GetSchedulingStatus() const {
|
||||
return static_cast<ThreadSchedStatus>(scheduling_state &
|
||||
static_cast<u32>(ThreadSchedMasks::LowMask));
|
||||
}
|
||||
|
||||
bool IsRunnable() const {
|
||||
return scheduling_state == static_cast<u32>(ThreadSchedStatus::Runnable);
|
||||
}
|
||||
|
||||
bool IsRunning() const {
|
||||
@@ -424,32 +517,36 @@ public:
|
||||
is_running = value;
|
||||
}
|
||||
|
||||
bool IsWaitCancelled() const {
|
||||
bool IsSyncCancelled() const {
|
||||
return is_sync_cancelled;
|
||||
}
|
||||
|
||||
void ClearWaitCancelled() {
|
||||
is_sync_cancelled = false;
|
||||
void SetSyncCancelled(bool value) {
|
||||
is_sync_cancelled = value;
|
||||
}
|
||||
|
||||
Handle GetGlobalHandle() const {
|
||||
return global_handle;
|
||||
}
|
||||
|
||||
bool IsCancellable() const {
|
||||
return is_cancellable;
|
||||
bool IsWaitingForArbitration() const {
|
||||
return waiting_for_arbitration;
|
||||
}
|
||||
|
||||
void SetCancellable() {
|
||||
is_cancellable = true;
|
||||
void WaitForArbitration(bool set) {
|
||||
waiting_for_arbitration = set;
|
||||
}
|
||||
|
||||
void ClearCancellable() {
|
||||
is_cancellable = false;
|
||||
bool IsWaitingSync() const {
|
||||
return is_waiting_on_sync;
|
||||
}
|
||||
|
||||
bool IsTerminationRequested() const {
|
||||
return will_be_terminated || GetRawState() == ThreadState::Terminated;
|
||||
void SetWaitingSync(bool is_waiting) {
|
||||
is_waiting_on_sync = is_waiting;
|
||||
}
|
||||
|
||||
bool IsPendingTermination() const {
|
||||
return will_be_terminated || GetSchedulingStatus() == ThreadSchedStatus::Exited;
|
||||
}
|
||||
|
||||
bool IsPaused() const {
|
||||
@@ -481,21 +578,21 @@ public:
|
||||
constexpr QueueEntry() = default;
|
||||
|
||||
constexpr void Initialize() {
|
||||
prev = nullptr;
|
||||
next = nullptr;
|
||||
this->prev = nullptr;
|
||||
this->next = nullptr;
|
||||
}
|
||||
|
||||
constexpr Thread* GetPrev() const {
|
||||
return prev;
|
||||
return this->prev;
|
||||
}
|
||||
constexpr Thread* GetNext() const {
|
||||
return next;
|
||||
return this->next;
|
||||
}
|
||||
constexpr void SetPrev(Thread* thread) {
|
||||
prev = thread;
|
||||
this->prev = thread;
|
||||
}
|
||||
constexpr void SetNext(Thread* thread) {
|
||||
next = thread;
|
||||
this->next = thread;
|
||||
}
|
||||
|
||||
private:
|
||||
@@ -504,11 +601,11 @@ public:
|
||||
};
|
||||
|
||||
QueueEntry& GetPriorityQueueEntry(s32 core) {
|
||||
return per_core_priority_queue_entry[core];
|
||||
return this->per_core_priority_queue_entry[core];
|
||||
}
|
||||
|
||||
const QueueEntry& GetPriorityQueueEntry(s32 core) const {
|
||||
return per_core_priority_queue_entry[core];
|
||||
return this->per_core_priority_queue_entry[core];
|
||||
}
|
||||
|
||||
s32 GetDisableDispatchCount() const {
|
||||
@@ -525,170 +622,24 @@ public:
|
||||
disable_count--;
|
||||
}
|
||||
|
||||
void SetWaitReasonForDebugging(ThreadWaitReasonForDebugging reason) {
|
||||
wait_reason_for_debugging = reason;
|
||||
}
|
||||
|
||||
[[nodiscard]] ThreadWaitReasonForDebugging GetWaitReasonForDebugging() const {
|
||||
return wait_reason_for_debugging;
|
||||
}
|
||||
|
||||
void SetWaitObjectsForDebugging(const std::span<KSynchronizationObject*>& objects) {
|
||||
wait_objects_for_debugging.clear();
|
||||
wait_objects_for_debugging.reserve(objects.size());
|
||||
for (const auto& object : objects) {
|
||||
wait_objects_for_debugging.emplace_back(object);
|
||||
}
|
||||
}
|
||||
|
||||
[[nodiscard]] const std::vector<KSynchronizationObject*>& GetWaitObjectsForDebugging() const {
|
||||
return wait_objects_for_debugging;
|
||||
}
|
||||
|
||||
void SetMutexWaitAddressForDebugging(VAddr address) {
|
||||
mutex_wait_address_for_debugging = address;
|
||||
}
|
||||
|
||||
[[nodiscard]] VAddr GetMutexWaitAddressForDebugging() const {
|
||||
return mutex_wait_address_for_debugging;
|
||||
}
|
||||
|
||||
void AddWaiter(Thread* thread);
|
||||
|
||||
void RemoveWaiter(Thread* thread);
|
||||
|
||||
[[nodiscard]] Thread* RemoveWaiterByKey(s32* out_num_waiters, VAddr key);
|
||||
|
||||
[[nodiscard]] VAddr GetAddressKey() const {
|
||||
return address_key;
|
||||
}
|
||||
|
||||
[[nodiscard]] u32 GetAddressKeyValue() const {
|
||||
return address_key_value;
|
||||
}
|
||||
|
||||
void SetAddressKey(VAddr key) {
|
||||
address_key = key;
|
||||
}
|
||||
|
||||
void SetAddressKey(VAddr key, u32 val) {
|
||||
address_key = key;
|
||||
address_key_value = val;
|
||||
}
|
||||
|
||||
private:
|
||||
static constexpr size_t PriorityInheritanceCountMax = 10;
|
||||
union SyncObjectBuffer {
|
||||
std::array<KSynchronizationObject*, Svc::ArgumentHandleCountMax> sync_objects{};
|
||||
std::array<Handle,
|
||||
Svc::ArgumentHandleCountMax*(sizeof(KSynchronizationObject*) / sizeof(Handle))>
|
||||
handles;
|
||||
constexpr SyncObjectBuffer() {}
|
||||
};
|
||||
static_assert(sizeof(SyncObjectBuffer::sync_objects) == sizeof(SyncObjectBuffer::handles));
|
||||
friend class GlobalSchedulerContext;
|
||||
friend class KScheduler;
|
||||
friend class Process;
|
||||
|
||||
struct ConditionVariableComparator {
|
||||
struct LightCompareType {
|
||||
u64 cv_key{};
|
||||
s32 priority{};
|
||||
|
||||
[[nodiscard]] constexpr u64 GetConditionVariableKey() const {
|
||||
return cv_key;
|
||||
}
|
||||
|
||||
[[nodiscard]] constexpr s32 GetPriority() const {
|
||||
return priority;
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
requires(
|
||||
std::same_as<T, Thread> ||
|
||||
std::same_as<T, LightCompareType>) static constexpr int Compare(const T& lhs,
|
||||
const Thread& rhs) {
|
||||
const uintptr_t l_key = lhs.GetConditionVariableKey();
|
||||
const uintptr_t r_key = rhs.GetConditionVariableKey();
|
||||
|
||||
if (l_key < r_key) {
|
||||
// Sort first by key
|
||||
return -1;
|
||||
} else if (l_key == r_key && lhs.GetPriority() < rhs.GetPriority()) {
|
||||
// And then by priority.
|
||||
return -1;
|
||||
} else {
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
Common::IntrusiveRedBlackTreeNode condvar_arbiter_tree_node{};
|
||||
|
||||
using ConditionVariableThreadTreeTraits =
|
||||
Common::IntrusiveRedBlackTreeMemberTraitsDeferredAssert<&Thread::condvar_arbiter_tree_node>;
|
||||
using ConditionVariableThreadTree =
|
||||
ConditionVariableThreadTreeTraits::TreeType<ConditionVariableComparator>;
|
||||
|
||||
public:
|
||||
using ConditionVariableThreadTreeType = ConditionVariableThreadTree;
|
||||
|
||||
[[nodiscard]] uintptr_t GetConditionVariableKey() const {
|
||||
return condvar_key;
|
||||
}
|
||||
|
||||
[[nodiscard]] uintptr_t GetAddressArbiterKey() const {
|
||||
return condvar_key;
|
||||
}
|
||||
|
||||
void SetConditionVariable(ConditionVariableThreadTree* tree, VAddr address, uintptr_t cv_key,
|
||||
u32 value) {
|
||||
condvar_tree = tree;
|
||||
condvar_key = cv_key;
|
||||
address_key = address;
|
||||
address_key_value = value;
|
||||
}
|
||||
|
||||
void ClearConditionVariable() {
|
||||
condvar_tree = nullptr;
|
||||
}
|
||||
|
||||
[[nodiscard]] bool IsWaitingForConditionVariable() const {
|
||||
return condvar_tree != nullptr;
|
||||
}
|
||||
|
||||
void SetAddressArbiter(ConditionVariableThreadTree* tree, uintptr_t address) {
|
||||
condvar_tree = tree;
|
||||
condvar_key = address;
|
||||
}
|
||||
|
||||
void ClearAddressArbiter() {
|
||||
condvar_tree = nullptr;
|
||||
}
|
||||
|
||||
[[nodiscard]] bool IsWaitingForAddressArbiter() const {
|
||||
return condvar_tree != nullptr;
|
||||
}
|
||||
|
||||
[[nodiscard]] ConditionVariableThreadTree* GetConditionVariableTree() const {
|
||||
return condvar_tree;
|
||||
}
|
||||
|
||||
[[nodiscard]] bool HasWaiters() const {
|
||||
return !waiter_list.empty();
|
||||
}
|
||||
|
||||
private:
|
||||
void SetSchedulingStatus(ThreadSchedStatus new_status);
|
||||
void AddSchedulingFlag(ThreadSchedFlags flag);
|
||||
void RemoveSchedulingFlag(ThreadSchedFlags flag);
|
||||
void AddWaiterImpl(Thread* thread);
|
||||
void RemoveWaiterImpl(Thread* thread);
|
||||
static void RestorePriority(KernelCore& kernel, Thread* thread);
|
||||
|
||||
void SetCurrentPriority(u32 new_priority);
|
||||
|
||||
Common::SpinLock context_guard{};
|
||||
ThreadContext32 context_32{};
|
||||
ThreadContext64 context_64{};
|
||||
std::shared_ptr<Common::Fiber> host_context{};
|
||||
|
||||
ThreadState thread_state = ThreadState::Initialized;
|
||||
ThreadStatus status = ThreadStatus::Dormant;
|
||||
u32 scheduling_state = 0;
|
||||
|
||||
u64 thread_id = 0;
|
||||
|
||||
@@ -701,11 +652,11 @@ private:
|
||||
/// Nominal thread priority, as set by the emulated application.
|
||||
/// The nominal priority is the thread priority without priority
|
||||
/// inheritance taken into account.
|
||||
s32 base_priority{};
|
||||
u32 nominal_priority = 0;
|
||||
|
||||
/// Current thread priority. This may change over the course of the
|
||||
/// thread's lifetime in order to facilitate priority inheritance.
|
||||
s32 current_priority{};
|
||||
u32 current_priority = 0;
|
||||
|
||||
u64 total_cpu_time_ticks = 0; ///< Total CPU running ticks.
|
||||
s64 schedule_count{};
|
||||
@@ -720,27 +671,37 @@ private:
|
||||
Process* owner_process;
|
||||
|
||||
/// Objects that the thread is waiting on, in the same order as they were
|
||||
/// passed to WaitSynchronization. This is used for debugging only.
|
||||
std::vector<KSynchronizationObject*> wait_objects_for_debugging;
|
||||
/// passed to WaitSynchronization.
|
||||
ThreadSynchronizationObjects* wait_objects;
|
||||
|
||||
/// The current mutex wait address. This is used for debugging only.
|
||||
VAddr mutex_wait_address_for_debugging{};
|
||||
|
||||
/// The reason the thread is waiting. This is used for debugging only.
|
||||
ThreadWaitReasonForDebugging wait_reason_for_debugging{};
|
||||
|
||||
KSynchronizationObject* signaling_object;
|
||||
SynchronizationObject* signaling_object;
|
||||
ResultCode signaling_result{RESULT_SUCCESS};
|
||||
|
||||
/// List of threads that are waiting for a mutex that is held by this thread.
|
||||
MutexWaitingThreads wait_mutex_threads;
|
||||
|
||||
/// Thread that owns the lock that this thread is waiting for.
|
||||
Thread* lock_owner{};
|
||||
std::shared_ptr<Thread> lock_owner;
|
||||
|
||||
/// If waiting on a ConditionVariable, this is the ConditionVariable address
|
||||
VAddr condvar_wait_address = 0;
|
||||
/// If waiting on a Mutex, this is the mutex address
|
||||
VAddr mutex_wait_address = 0;
|
||||
/// The handle used to wait for the mutex.
|
||||
Handle wait_handle = 0;
|
||||
|
||||
/// If waiting for an AddressArbiter, this is the address being waited on.
|
||||
VAddr arb_wait_address{0};
|
||||
bool waiting_for_arbitration{};
|
||||
|
||||
/// Handle used as userdata to reference this object when inserting into the CoreTiming queue.
|
||||
Handle global_handle = 0;
|
||||
|
||||
/// Callback for HLE Events
|
||||
HLECallback hle_callback;
|
||||
Handle hle_time_event;
|
||||
SynchronizationObject* hle_object;
|
||||
|
||||
KScheduler* scheduler = nullptr;
|
||||
|
||||
std::array<QueueEntry, Core::Hardware::NUM_CPU_CORES> per_core_priority_queue_entry{};
|
||||
@@ -753,7 +714,7 @@ private:
|
||||
|
||||
u32 pausing_state = 0;
|
||||
bool is_running = false;
|
||||
bool is_cancellable = false;
|
||||
bool is_waiting_on_sync = false;
|
||||
bool is_sync_cancelled = false;
|
||||
|
||||
bool is_continuous_on_svc = false;
|
||||
@@ -764,18 +725,6 @@ private:
|
||||
|
||||
bool was_running = false;
|
||||
|
||||
bool signaled{};
|
||||
|
||||
ConditionVariableThreadTree* condvar_tree{};
|
||||
uintptr_t condvar_key{};
|
||||
VAddr address_key{};
|
||||
u32 address_key_value{};
|
||||
s32 num_kernel_waiters{};
|
||||
|
||||
using WaiterList = boost::intrusive::list<Thread>;
|
||||
WaiterList waiter_list{};
|
||||
WaiterList pinned_waiter_list{};
|
||||
|
||||
std::string name;
|
||||
};
|
||||
|
||||
|
||||
@@ -18,10 +18,12 @@ TimeManager::TimeManager(Core::System& system_) : system{system_} {
|
||||
time_manager_event_type = Core::Timing::CreateEvent(
|
||||
"Kernel::TimeManagerCallback",
|
||||
[this](std::uintptr_t thread_handle, std::chrono::nanoseconds) {
|
||||
const KScopedSchedulerLock lock(system.Kernel());
|
||||
const auto proper_handle = static_cast<Handle>(thread_handle);
|
||||
|
||||
std::shared_ptr<Thread> thread;
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
const auto proper_handle = static_cast<Handle>(thread_handle);
|
||||
if (cancelled_events[proper_handle]) {
|
||||
return;
|
||||
}
|
||||
@@ -30,7 +32,7 @@ TimeManager::TimeManager(Core::System& system_) : system{system_} {
|
||||
|
||||
if (thread) {
|
||||
// Thread can be null if process has exited
|
||||
thread->Wakeup();
|
||||
thread->OnWakeUp();
|
||||
}
|
||||
});
|
||||
}
|
||||
@@ -40,7 +42,8 @@ void TimeManager::ScheduleTimeEvent(Handle& event_handle, Thread* timetask, s64
|
||||
event_handle = timetask->GetGlobalHandle();
|
||||
if (nanoseconds > 0) {
|
||||
ASSERT(timetask);
|
||||
ASSERT(timetask->GetState() != ThreadState::Runnable);
|
||||
ASSERT(timetask->GetStatus() != ThreadStatus::Ready);
|
||||
ASSERT(timetask->GetStatus() != ThreadStatus::WaitMutex);
|
||||
system.CoreTiming().ScheduleEvent(std::chrono::nanoseconds{nanoseconds},
|
||||
time_manager_event_type, event_handle);
|
||||
} else {
|
||||
|
||||
@@ -560,14 +560,14 @@ void ISelfController::GetAccumulatedSuspendedTickChangedEvent(Kernel::HLERequest
|
||||
|
||||
AppletMessageQueue::AppletMessageQueue(Kernel::KernelCore& kernel) {
|
||||
on_new_message =
|
||||
Kernel::WritableEvent::CreateEventPair(kernel, "AMMessageQueue:OnMessageReceived");
|
||||
Kernel::WritableEvent::CreateEventPair(kernel, "AMMessageQueue:OnMessageRecieved");
|
||||
on_operation_mode_changed =
|
||||
Kernel::WritableEvent::CreateEventPair(kernel, "AMMessageQueue:OperationModeChanged");
|
||||
}
|
||||
|
||||
AppletMessageQueue::~AppletMessageQueue() = default;
|
||||
|
||||
const std::shared_ptr<Kernel::ReadableEvent>& AppletMessageQueue::GetMessageReceiveEvent() const {
|
||||
const std::shared_ptr<Kernel::ReadableEvent>& AppletMessageQueue::GetMesssageRecieveEvent() const {
|
||||
return on_new_message.readable;
|
||||
}
|
||||
|
||||
@@ -675,7 +675,7 @@ void ICommonStateGetter::GetEventHandle(Kernel::HLERequestContext& ctx) {
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 2, 1};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
rb.PushCopyObjects(msg_queue->GetMessageReceiveEvent());
|
||||
rb.PushCopyObjects(msg_queue->GetMesssageRecieveEvent());
|
||||
}
|
||||
|
||||
void ICommonStateGetter::ReceiveMessage(Kernel::HLERequestContext& ctx) {
|
||||
|
||||
@@ -55,7 +55,7 @@ public:
|
||||
explicit AppletMessageQueue(Kernel::KernelCore& kernel);
|
||||
~AppletMessageQueue();
|
||||
|
||||
const std::shared_ptr<Kernel::ReadableEvent>& GetMessageReceiveEvent() const;
|
||||
const std::shared_ptr<Kernel::ReadableEvent>& GetMesssageRecieveEvent() const;
|
||||
const std::shared_ptr<Kernel::ReadableEvent>& GetOperationModeChangedEvent() const;
|
||||
void PushMessage(AppletMessage msg);
|
||||
AppletMessage PopMessage();
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
|
||||
#include <algorithm>
|
||||
#include <cstring>
|
||||
#include <span>
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/logging/log.h"
|
||||
@@ -24,7 +25,7 @@ namespace Service::AM::Applets {
|
||||
|
||||
static Core::Frontend::ControllerParameters ConvertToFrontendParameters(
|
||||
ControllerSupportArgPrivate private_arg, ControllerSupportArgHeader header, bool enable_text,
|
||||
std::vector<IdentificationColor> identification_colors, std::vector<ExplainText> text) {
|
||||
std::span<IdentificationColor> identification_colors, std::span<ExplainText> text) {
|
||||
HID::Controller_NPad::NpadStyleSet npad_style_set;
|
||||
npad_style_set.raw = private_arg.style_set;
|
||||
|
||||
@@ -169,24 +170,18 @@ void Controller::Execute() {
|
||||
case ControllerAppletVersion::Version3:
|
||||
case ControllerAppletVersion::Version4:
|
||||
case ControllerAppletVersion::Version5:
|
||||
return ConvertToFrontendParameters(
|
||||
controller_private_arg, controller_user_arg_old.header,
|
||||
controller_user_arg_old.enable_explain_text,
|
||||
std::vector<IdentificationColor>(
|
||||
controller_user_arg_old.identification_colors.begin(),
|
||||
controller_user_arg_old.identification_colors.end()),
|
||||
std::vector<ExplainText>(controller_user_arg_old.explain_text.begin(),
|
||||
controller_user_arg_old.explain_text.end()));
|
||||
return ConvertToFrontendParameters(controller_private_arg,
|
||||
controller_user_arg_old.header,
|
||||
controller_user_arg_old.enable_explain_text,
|
||||
controller_user_arg_old.identification_colors,
|
||||
controller_user_arg_old.explain_text);
|
||||
case ControllerAppletVersion::Version7:
|
||||
default:
|
||||
return ConvertToFrontendParameters(
|
||||
controller_private_arg, controller_user_arg_new.header,
|
||||
controller_user_arg_new.enable_explain_text,
|
||||
std::vector<IdentificationColor>(
|
||||
controller_user_arg_new.identification_colors.begin(),
|
||||
controller_user_arg_new.identification_colors.end()),
|
||||
std::vector<ExplainText>(controller_user_arg_new.explain_text.begin(),
|
||||
controller_user_arg_new.explain_text.end()));
|
||||
return ConvertToFrontendParameters(controller_private_arg,
|
||||
controller_user_arg_new.header,
|
||||
controller_user_arg_new.enable_explain_text,
|
||||
controller_user_arg_new.identification_colors,
|
||||
controller_user_arg_new.explain_text);
|
||||
}
|
||||
}();
|
||||
|
||||
|
||||
@@ -56,7 +56,7 @@ APM::APM(Core::System& system_, std::shared_ptr<Module> apm_, Controller& contro
|
||||
static const FunctionInfo functions[] = {
|
||||
{0, &APM::OpenSession, "OpenSession"},
|
||||
{1, &APM::GetPerformanceMode, "GetPerformanceMode"},
|
||||
{6, &APM::IsCpuOverclockEnabled, "IsCpuOverclockEnabled"},
|
||||
{6, nullptr, "IsCpuOverclockEnabled"},
|
||||
};
|
||||
RegisterHandlers(functions);
|
||||
}
|
||||
@@ -78,14 +78,6 @@ void APM::GetPerformanceMode(Kernel::HLERequestContext& ctx) {
|
||||
rb.PushEnum(controller.GetCurrentPerformanceMode());
|
||||
}
|
||||
|
||||
void APM::IsCpuOverclockEnabled(Kernel::HLERequestContext& ctx) {
|
||||
LOG_WARNING(Service_APM, "(STUBBED) called");
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 3};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
rb.Push(false);
|
||||
}
|
||||
|
||||
APM_Sys::APM_Sys(Core::System& system_, Controller& controller_)
|
||||
: ServiceFramework{system_, "apm:sys"}, controller{controller_} {
|
||||
// clang-format off
|
||||
|
||||
@@ -20,7 +20,6 @@ public:
|
||||
private:
|
||||
void OpenSession(Kernel::HLERequestContext& ctx);
|
||||
void GetPerformanceMode(Kernel::HLERequestContext& ctx);
|
||||
void IsCpuOverclockEnabled(Kernel::HLERequestContext& ctx);
|
||||
|
||||
std::shared_ptr<Module> apm;
|
||||
Controller& controller;
|
||||
|
||||
@@ -70,10 +70,8 @@ public:
|
||||
Kernel::WritableEvent::CreateEventPair(system.Kernel(), "IAudioOutBufferReleased");
|
||||
|
||||
stream = audio_core.OpenStream(system.CoreTiming(), audio_params.sample_rate,
|
||||
audio_params.channel_count, std::move(unique_name), [this] {
|
||||
const auto guard = LockService();
|
||||
buffer_event.writable->Signal();
|
||||
});
|
||||
audio_params.channel_count, std::move(unique_name),
|
||||
[this] { buffer_event.writable->Signal(); });
|
||||
}
|
||||
|
||||
private:
|
||||
|
||||
@@ -49,16 +49,16 @@ public:
|
||||
|
||||
system_event =
|
||||
Kernel::WritableEvent::CreateEventPair(system.Kernel(), "IAudioRenderer:SystemEvent");
|
||||
renderer = std::make_unique<AudioCore::AudioRenderer>(
|
||||
system.CoreTiming(), system.Memory(), audren_params,
|
||||
[this]() {
|
||||
const auto guard = LockService();
|
||||
system_event.writable->Signal();
|
||||
},
|
||||
instance_number);
|
||||
renderer = std::make_unique<AudioCore::AudioRenderer>(system.CoreTiming(), system.Memory(),
|
||||
audren_params, system_event.writable,
|
||||
instance_number);
|
||||
}
|
||||
|
||||
private:
|
||||
void UpdateAudioCallback() {
|
||||
system_event.writable->Signal();
|
||||
}
|
||||
|
||||
void GetSampleRate(Kernel::HLERequestContext& ctx) {
|
||||
LOG_DEBUG(Service_Audio, "called");
|
||||
|
||||
|
||||
@@ -78,13 +78,11 @@ IAppletResource::IAppletResource(Core::System& system_)
|
||||
pad_update_event = Core::Timing::CreateEvent(
|
||||
"HID::UpdatePadCallback",
|
||||
[this](std::uintptr_t user_data, std::chrono::nanoseconds ns_late) {
|
||||
const auto guard = LockService();
|
||||
UpdateControllers(user_data, ns_late);
|
||||
});
|
||||
motion_update_event = Core::Timing::CreateEvent(
|
||||
"HID::MotionPadCallback",
|
||||
[this](std::uintptr_t user_data, std::chrono::nanoseconds ns_late) {
|
||||
const auto guard = LockService();
|
||||
UpdateMotion(user_data, ns_late);
|
||||
});
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user