Compare commits

..

1 Commits

Author SHA1 Message Date
Kyle Kienapfel
de406d2728 Draft: trying to tweak ZL/ZR triggering
This is related to Issue 9096
2022-10-22 17:30:13 -07:00
301 changed files with 3985 additions and 16486 deletions

View File

@@ -133,13 +133,13 @@ if (NOT ENABLE_GENERIC)
if (MSVC)
detect_architecture("_M_AMD64" x86_64)
detect_architecture("_M_IX86" x86)
detect_architecture("_M_ARM" arm)
detect_architecture("_M_ARM64" arm64)
detect_architecture("_M_ARM" ARM)
detect_architecture("_M_ARM64" ARM64)
else()
detect_architecture("__x86_64__" x86_64)
detect_architecture("__i386__" x86)
detect_architecture("__arm__" arm)
detect_architecture("__aarch64__" arm64)
detect_architecture("__arm__" ARM)
detect_architecture("__aarch64__" ARM64)
endif()
endif()
@@ -218,11 +218,11 @@ if(ENABLE_QT)
set(QT_VERSION 5.15)
# Check for system Qt on Linux, fallback to bundled Qt
if (UNIX AND NOT APPLE)
if (${CMAKE_SYSTEM_NAME} STREQUAL "Linux")
if (NOT YUZU_USE_BUNDLED_QT)
find_package(Qt5 ${QT_VERSION} COMPONENTS Widgets DBus Multimedia)
endif()
if (${CMAKE_SYSTEM_NAME} STREQUAL "Linux" AND (NOT Qt5_FOUND OR YUZU_USE_BUNDLED_QT))
if (NOT Qt5_FOUND OR YUZU_USE_BUNDLED_QT)
# Check for dependencies, then enable bundled Qt download
# Check that the system GLIBCXX version is compatible
@@ -323,7 +323,7 @@ if(ENABLE_QT)
set(YUZU_QT_NO_CMAKE_SYSTEM_PATH "NO_CMAKE_SYSTEM_PATH")
endif()
if (UNIX AND NOT APPLE AND YUZU_USE_BUNDLED_QT)
if ((${CMAKE_SYSTEM_NAME} STREQUAL "Linux") AND YUZU_USE_BUNDLED_QT)
find_package(Qt5 ${QT_VERSION} REQUIRED COMPONENTS Widgets Concurrent Multimedia DBus ${QT_PREFIX_HINT} ${YUZU_QT_NO_CMAKE_SYSTEM_PATH})
else()
find_package(Qt5 ${QT_VERSION} REQUIRED COMPONENTS Widgets Concurrent Multimedia ${QT_PREFIX_HINT} ${YUZU_QT_NO_CMAKE_SYSTEM_PATH})
@@ -541,9 +541,9 @@ add_definitions(-DBOOST_ERROR_CODE_HEADER_ONLY
# Adjustments for MSVC + Ninja
if (MSVC AND CMAKE_GENERATOR STREQUAL "Ninja")
add_compile_options(
/wd4464 # relative include path contains '..'
/wd4711 # function 'function' selected for automatic inline expansion
/wd4820 # 'bytes' bytes padding added after construct 'member_name'
/wd4464 # relative include path contains '..'
/wd4820 # 'identifier1': '4' bytes padding added after data member 'identifier2'
)
endif()

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2.6 KiB

After

Width:  |  Height:  |  Size: 4.4 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2.6 KiB

After

Width:  |  Height:  |  Size: 4.1 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2.7 KiB

After

Width:  |  Height:  |  Size: 4.4 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.6 KiB

After

Width:  |  Height:  |  Size: 3.4 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.5 KiB

After

Width:  |  Height:  |  Size: 3.3 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.7 KiB

After

Width:  |  Height:  |  Size: 3.9 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.5 KiB

After

Width:  |  Height:  |  Size: 3.3 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2.4 KiB

After

Width:  |  Height:  |  Size: 5.1 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.4 KiB

After

Width:  |  Height:  |  Size: 7.1 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.0 KiB

After

Width:  |  Height:  |  Size: 5.8 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2.2 KiB

After

Width:  |  Height:  |  Size: 4.5 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2.0 KiB

After

Width:  |  Height:  |  Size: 3.7 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 4.4 KiB

After

Width:  |  Height:  |  Size: 9.3 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 4.4 KiB

After

Width:  |  Height:  |  Size: 7.3 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.5 KiB

After

Width:  |  Height:  |  Size: 7.3 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.4 KiB

After

Width:  |  Height:  |  Size: 6.6 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.0 KiB

After

Width:  |  Height:  |  Size: 2.6 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.5 KiB

After

Width:  |  Height:  |  Size: 7.3 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.3 KiB

After

Width:  |  Height:  |  Size: 6.6 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.2 KiB

After

Width:  |  Height:  |  Size: 2.9 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.2 KiB

After

Width:  |  Height:  |  Size: 2.9 KiB

7321
dist/languages/uk.ts vendored

File diff suppressed because it is too large Load Diff

Binary file not shown.

Before

Width:  |  Height:  |  Size: 528 B

After

Width:  |  Height:  |  Size: 15 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.9 KiB

After

Width:  |  Height:  |  Size: 3.4 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 4.3 KiB

After

Width:  |  Height:  |  Size: 6.6 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.9 KiB

After

Width:  |  Height:  |  Size: 3.8 KiB

View File

@@ -7,14 +7,15 @@ include(DownloadExternals)
# xbyak
if (ARCHITECTURE_x86 OR ARCHITECTURE_x86_64)
add_subdirectory(xbyak)
add_library(xbyak INTERFACE)
file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/xbyak/include)
file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/xbyak/xbyak DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/xbyak/include)
target_include_directories(xbyak SYSTEM INTERFACE ${CMAKE_CURRENT_BINARY_DIR}/xbyak/include)
target_compile_definitions(xbyak INTERFACE XBYAK_NO_OP_NAMES)
endif()
# Dynarmic
if (ARCHITECTURE_x86_64 OR ARCHITECTURE_arm64)
if (ARCHITECTURE_arm64)
set(DYNARMIC_FRONTENDS "A32")
endif()
if (ARCHITECTURE_x86_64)
set(DYNARMIC_NO_BUNDLED_FMT ON)
set(DYNARMIC_IGNORE_ASSERTS ON CACHE BOOL "" FORCE)
add_subdirectory(dynarmic)

View File

@@ -58,11 +58,13 @@ if (MSVC)
# Warnings
/W3
/WX
/we4018 # 'expression': signed/unsigned mismatch
/we4062 # Enumerator 'identifier' in a switch of enum 'enumeration' is not handled
/we4101 # 'identifier': unreferenced local variable
/we4189 # 'identifier': local variable is initialized but not referenced
/we4265 # 'class': class has virtual functions, but destructor is not virtual
/we4267 # 'var': conversion from 'size_t' to 'type', possible loss of data
/we4305 # 'context': truncation from 'type1' to 'type2'
/we4388 # 'expression': signed/unsigned mismatch
/we4389 # 'operator': signed/unsigned mismatch
/we4456 # Declaration of 'identifier' hides previous local declaration
@@ -73,13 +75,10 @@ if (MSVC)
/we4547 # 'operator': operator before comma has no effect; expected operator with side-effect
/we4549 # 'operator1': operator before comma has no effect; did you intend 'operator2'?
/we4555 # Expression has no effect; expected expression with side-effect
/we4826 # Conversion from 'type1' to 'type2' is sign-extended. This may cause unexpected runtime behavior.
/we4715 # 'function': not all control paths return a value
/we4834 # Discarding return value of function with 'nodiscard' attribute
/we5038 # data member 'member1' will be initialized after data member 'member2'
/we5233 # explicit lambda capture 'identifier' is not used
/we5245 # 'function': unreferenced function with internal linkage has been removed
/wd4100 # 'identifier': unreferenced formal parameter
/wd4324 # 'struct_name': structure was padded due to __declspec(align())
)
if (USE_CCACHE)
@@ -100,18 +99,24 @@ if (MSVC)
set(CMAKE_EXE_LINKER_FLAGS_RELEASE "/DEBUG /MANIFEST:NO /INCREMENTAL:NO /OPT:REF,ICF" CACHE STRING "" FORCE)
else()
add_compile_options(
-Werror=all
-Werror=extra
-Wall
-Werror=array-bounds
-Werror=implicit-fallthrough
-Werror=missing-declarations
-Werror=missing-field-initializers
-Werror=reorder
-Werror=shadow
-Werror=unused
-Werror=sign-compare
-Werror=switch
-Werror=uninitialized
-Werror=unused-function
-Werror=unused-result
-Werror=unused-variable
-Wextra
-Wmissing-declarations
-Wno-attributes
-Wno-invalid-offsetof
-Wno-unused-parameter
$<$<CXX_COMPILER_ID:Clang>:-Wno-braced-scalar-init>
$<$<CXX_COMPILER_ID:Clang>:-Wno-unused-private-field>
)
if (ARCHITECTURE_x86_64)

View File

@@ -206,18 +206,27 @@ if (MSVC)
/we4244 # 'conversion': conversion from 'type1' to 'type2', possible loss of data
/we4245 # 'conversion': conversion from 'type1' to 'type2', signed/unsigned mismatch
/we4254 # 'operator': conversion from 'type1:field_bits' to 'type2:field_bits', possible loss of data
/we4800 # Implicit conversion from 'type' to bool. Possible information loss
/we4456 # Declaration of 'identifier' hides previous local declaration
/we4457 # Declaration of 'identifier' hides function parameter
/we4458 # Declaration of 'identifier' hides class member
/we4459 # Declaration of 'identifier' hides global declaration
)
else()
target_compile_options(audio_core PRIVATE
-Werror=conversion
-Werror=ignored-qualifiers
-Werror=shadow
-Werror=unused-variable
$<$<CXX_COMPILER_ID:GNU>:-Werror=unused-but-set-parameter>
$<$<CXX_COMPILER_ID:GNU>:-Werror=unused-but-set-variable>
-Wno-sign-conversion
)
endif()
target_link_libraries(audio_core PUBLIC common core)
if (ARCHITECTURE_x86_64 OR ARCHITECTURE_arm64)
if (ARCHITECTURE_x86_64)
target_link_libraries(audio_core PRIVATE dynarmic)
endif()

View File

@@ -56,7 +56,7 @@ Result System::IsConfigValid(const std::string_view device_name,
return ResultSuccess;
}
Result System::Initialize(std::string device_name, const AudioInParameter& in_params,
Result System::Initialize(std::string& device_name, const AudioInParameter& in_params,
const u32 handle_, const u64 applet_resource_user_id_) {
auto result{IsConfigValid(device_name, in_params)};
if (result.IsError()) {

View File

@@ -97,7 +97,7 @@ public:
* @param applet_resource_user_id - Unused.
* @return Result code.
*/
Result Initialize(std::string device_name, const AudioInParameter& in_params, u32 handle,
Result Initialize(std::string& device_name, const AudioInParameter& in_params, u32 handle,
u64 applet_resource_user_id);
/**

View File

@@ -49,8 +49,8 @@ Result System::IsConfigValid(std::string_view device_name,
return Service::Audio::ERR_INVALID_CHANNEL_COUNT;
}
Result System::Initialize(std::string device_name, const AudioOutParameter& in_params, u32 handle_,
u64 applet_resource_user_id_) {
Result System::Initialize(std::string& device_name, const AudioOutParameter& in_params, u32 handle_,
u64& applet_resource_user_id_) {
auto result = IsConfigValid(device_name, in_params);
if (result.IsError()) {
return result;

View File

@@ -88,8 +88,8 @@ public:
* @param applet_resource_user_id - Unused.
* @return Result code.
*/
Result Initialize(std::string device_name, const AudioOutParameter& in_params, u32 handle,
u64 applet_resource_user_id);
Result Initialize(std::string& device_name, const AudioOutParameter& in_params, u32 handle,
u64& applet_resource_user_id);
/**
* Start this system.

View File

@@ -91,7 +91,7 @@ Result InfoUpdater::UpdateVoices(VoiceContext& voice_context,
voice_info.Initialize();
for (u32 channel = 0; channel < in_param.channel_count; channel++) {
*voice_states[channel] = {};
std::memset(voice_states[channel], 0, sizeof(VoiceState));
}
}

View File

@@ -94,7 +94,7 @@ void BiquadFilterCommand::Dump([[maybe_unused]] const ADSP::CommandListProcessor
void BiquadFilterCommand::Process(const ADSP::CommandListProcessor& processor) {
auto state_{reinterpret_cast<VoiceState::BiquadFilterState*>(state)};
if (needs_init) {
*state_ = {};
std::memset(state_, 0, sizeof(VoiceState::BiquadFilterState));
}
auto input_buffer{

View File

@@ -30,7 +30,7 @@ void MultiTapBiquadFilterCommand::Process(const ADSP::CommandListProcessor& proc
for (u32 i = 0; i < filter_tap_count; i++) {
auto state{reinterpret_cast<VoiceState::BiquadFilterState*>(states[i])};
if (needs_init[i]) {
*state = {};
std::memset(state, 0, sizeof(VoiceState::BiquadFilterState));
}
ApplyBiquadFilterFloat(output_buffer, input_buffer, biquads[i].b, biquads[i].a, *state,

View File

@@ -26,7 +26,6 @@ void PerformanceManager::CreateImpl(const size_t version) {
impl = std::make_unique<
PerformanceManagerImpl<PerformanceVersion::Version1, PerformanceFrameHeaderVersion1,
PerformanceEntryVersion1, PerformanceDetailVersion1>>();
break;
}
}

View File

@@ -98,8 +98,9 @@ System::System(Core::System& core_, Kernel::KEvent* adsp_rendered_event_)
: core{core_}, adsp{core.AudioCore().GetADSP()}, adsp_rendered_event{adsp_rendered_event_} {}
Result System::Initialize(const AudioRendererParameterInternal& params,
Kernel::KTransferMemory* transfer_memory, u64 transfer_memory_size,
u32 process_handle_, u64 applet_resource_user_id_, s32 session_id_) {
Kernel::KTransferMemory* transfer_memory, const u64 transfer_memory_size,
const u32 process_handle_, const u64 applet_resource_user_id_,
const s32 session_id_) {
if (!CheckValidRevision(params.revision)) {
return Service::Audio::ERR_INVALID_REVISION;
}
@@ -353,8 +354,6 @@ Result System::Initialize(const AudioRendererParameterInternal& params,
render_time_limit_percent = 100;
drop_voice = params.voice_drop_enabled && params.execution_mode == ExecutionMode::Auto;
drop_voice_param = 1.0f;
num_voices_dropped = 0;
allocator.Align(0x40);
command_workbuffer_size = allocator.GetRemainingSize();
@@ -548,7 +547,7 @@ u32 System::GetRenderingTimeLimit() const {
return render_time_limit_percent;
}
void System::SetRenderingTimeLimit(u32 limit) {
void System::SetRenderingTimeLimit(const u32 limit) {
render_time_limit_percent = limit;
}
@@ -636,7 +635,7 @@ void System::SendCommandToDsp() {
}
u64 System::GenerateCommand(std::span<u8> in_command_buffer,
[[maybe_unused]] u64 command_buffer_size_) {
[[maybe_unused]] const u64 command_buffer_size_) {
PoolMapper::ClearUseState(memory_pool_workbuffer, memory_pool_count);
const auto start_time{core.CoreTiming().GetClockTicks()};
@@ -694,8 +693,7 @@ u64 System::GenerateCommand(std::span<u8> in_command_buffer,
voice_context.SortInfo();
const auto start_estimated_time{drop_voice_param *
static_cast<f32>(command_buffer.estimated_process_time)};
const auto start_estimated_time{command_buffer.estimated_process_time};
command_generator.GenerateVoiceCommands();
command_generator.GenerateSubMixCommands();
@@ -714,16 +712,11 @@ u64 System::GenerateCommand(std::span<u8> in_command_buffer,
render_context.behavior->IsAudioRendererProcessingTimeLimit70PercentSupported();
time_limit_percent = 70.0f;
}
const auto end_estimated_time{drop_voice_param *
static_cast<f32>(command_buffer.estimated_process_time)};
const auto estimated_time{start_estimated_time - end_estimated_time};
const auto time_limit{static_cast<u32>(
estimated_time + (((time_limit_percent / 100.0f) * 2'880'000.0) *
(static_cast<f32>(render_time_limit_percent) / 100.0f)))};
num_voices_dropped =
DropVoices(command_buffer, static_cast<u32>(start_estimated_time), time_limit);
static_cast<f32>(start_estimated_time - command_buffer.estimated_process_time) +
(((time_limit_percent / 100.0f) * 2'880'000.0) *
(static_cast<f32>(render_time_limit_percent) / 100.0f)))};
num_voices_dropped = DropVoices(command_buffer, start_estimated_time, time_limit);
}
command_list_header->buffer_size = command_buffer.size;
@@ -744,33 +737,24 @@ u64 System::GenerateCommand(std::span<u8> in_command_buffer,
return command_buffer.size;
}
f32 System::GetVoiceDropParameter() const {
return drop_voice_param;
}
void System::SetVoiceDropParameter(f32 voice_drop_) {
drop_voice_param = voice_drop_;
}
u32 System::DropVoices(CommandBuffer& command_buffer, u32 estimated_process_time, u32 time_limit) {
u32 System::DropVoices(CommandBuffer& command_buffer, const u32 estimated_process_time,
const u32 time_limit) {
u32 i{0};
auto command_list{command_buffer.command_list.data() + sizeof(CommandListHeader)};
ICommand* cmd{nullptr};
ICommand* cmd{};
// Find a first valid voice to drop
while (i < command_buffer.count) {
for (; i < command_buffer.count; i++) {
cmd = reinterpret_cast<ICommand*>(command_list);
if (cmd->type == CommandId::Performance ||
cmd->type == CommandId::DataSourcePcmInt16Version1 ||
cmd->type == CommandId::DataSourcePcmInt16Version2 ||
cmd->type == CommandId::DataSourcePcmFloatVersion1 ||
cmd->type == CommandId::DataSourcePcmFloatVersion2 ||
cmd->type == CommandId::DataSourceAdpcmVersion1 ||
cmd->type == CommandId::DataSourceAdpcmVersion2) {
if (cmd->type != CommandId::Performance &&
cmd->type != CommandId::DataSourcePcmInt16Version1 &&
cmd->type != CommandId::DataSourcePcmInt16Version2 &&
cmd->type != CommandId::DataSourcePcmFloatVersion1 &&
cmd->type != CommandId::DataSourcePcmFloatVersion2 &&
cmd->type != CommandId::DataSourceAdpcmVersion1 &&
cmd->type != CommandId::DataSourceAdpcmVersion2) {
break;
}
command_list += cmd->size;
i++;
}
if (cmd == nullptr || command_buffer.count == 0 || i >= command_buffer.count) {
@@ -783,7 +767,6 @@ u32 System::DropVoices(CommandBuffer& command_buffer, u32 estimated_process_time
const auto node_id_type{cmd->node_id >> 28};
const auto node_id_base{cmd->node_id & 0xFFF};
// If the new estimated process time falls below the limit, we're done dropping.
if (estimated_process_time <= time_limit) {
break;
}
@@ -792,7 +775,6 @@ u32 System::DropVoices(CommandBuffer& command_buffer, u32 estimated_process_time
break;
}
// Don't drop voices marked with the highest priority.
auto& voice_info{voice_context.GetInfo(node_id_base)};
if (voice_info.priority == HighestVoicePriority) {
break;
@@ -801,23 +783,18 @@ u32 System::DropVoices(CommandBuffer& command_buffer, u32 estimated_process_time
voices_dropped++;
voice_info.voice_dropped = true;
// First iteration should drop the voice, and then iterate through all of the commands tied
// to the voice. We don't need reverb on a voice which we've just removed, for example.
// Depops can't be removed otherwise we'll introduce audio popping, and we don't
// remove perf commands. Lower the estimated time for each command dropped.
while (i < command_buffer.count && cmd->node_id == node_id) {
if (cmd->type == CommandId::DepopPrepare) {
cmd->enabled = true;
} else if (cmd->enabled && cmd->type != CommandId::Performance) {
cmd->enabled = false;
estimated_process_time -= static_cast<u32>(
drop_voice_param * static_cast<f32>(cmd->estimated_process_time));
if (i < command_buffer.count) {
while (cmd->node_id == node_id) {
if (cmd->type == CommandId::DepopPrepare) {
cmd->enabled = true;
} else if (cmd->type == CommandId::Performance || !cmd->enabled) {
cmd->enabled = false;
}
i++;
command_list += cmd->size;
cmd = reinterpret_cast<ICommand*>(command_list);
}
command_list += cmd->size;
cmd = reinterpret_cast<ICommand*>(command_list);
i++;
}
i++;
}
return voices_dropped;
}

View File

@@ -196,20 +196,6 @@ public:
*/
u32 DropVoices(CommandBuffer& command_buffer, u32 estimated_process_time, u32 time_limit);
/**
* Get the current voice drop parameter.
*
* @return The current voice drop.
*/
f32 GetVoiceDropParameter() const;
/**
* Set the voice drop parameter.
*
* @param The new voice drop.
*/
void SetVoiceDropParameter(f32 voice_drop);
private:
/// Core system
Core::System& core;
@@ -315,8 +301,6 @@ private:
u32 num_voices_dropped{};
/// Tick that rendering started
u64 render_start_tick{};
/// Parameter to control the threshold for dropping voices if the audio graph gets too large
f32 drop_voice_param{1.0f};
};
} // namespace AudioRenderer

View File

@@ -74,8 +74,8 @@ void VoiceContext::SortInfo() {
}
std::ranges::sort(sorted_voice_info, [](const VoiceInfo* a, const VoiceInfo* b) {
return a->priority != b->priority ? a->priority > b->priority
: a->sort_order > b->sort_order;
return a->priority != b->priority ? a->priority < b->priority
: a->sort_order < b->sort_order;
});
}

View File

@@ -156,13 +156,12 @@ if (MSVC)
)
target_compile_options(common PRIVATE
/W4
/we4242 # 'identifier': conversion from 'type1' to 'type2', possible loss of data
/we4254 # 'operator': conversion from 'type1:field_bits' to 'type2:field_bits', possible loss of data
/we4800 # Implicit conversion from 'type' to bool. Possible information loss
/WX
)
else()
target_compile_options(common PRIVATE
-Werror
$<$<CXX_COMPILER_ID:Clang>:-fsized-deallocation>
)
endif()
@@ -170,11 +169,7 @@ endif()
create_target_directory_groups(common)
target_link_libraries(common PUBLIC ${Boost_LIBRARIES} fmt::fmt microprofile Threads::Threads)
if (TARGET lz4::lz4)
target_link_libraries(common PRIVATE lz4::lz4)
else()
target_link_libraries(common PRIVATE LZ4::lz4_shared)
endif()
target_link_libraries(common PRIVATE lz4::lz4)
if (TARGET zstd::zstd)
target_link_libraries(common PRIVATE zstd::zstd)
else()

View File

@@ -156,7 +156,6 @@ AE_FORCEINLINE void compiler_fence(memory_order order) AE_NO_TSAN {
break;
default:
assert(false);
break;
}
}

View File

@@ -141,6 +141,10 @@ public:
constexpr BitField(BitField&&) noexcept = default;
constexpr BitField& operator=(BitField&&) noexcept = default;
[[nodiscard]] constexpr operator T() const {
return Value();
}
constexpr void Assign(const T& value) {
#ifdef _MSC_VER
storage = static_cast<StorageType>((storage & ~mask) | FormatValue(value));
@@ -158,17 +162,6 @@ public:
return ExtractValue(storage);
}
template <typename ConvertedToType>
[[nodiscard]] constexpr ConvertedToType As() const {
static_assert(!std::is_same_v<T, ConvertedToType>,
"Unnecessary cast. Use Value() instead.");
return static_cast<ConvertedToType>(Value());
}
[[nodiscard]] constexpr operator T() const {
return Value();
}
[[nodiscard]] constexpr explicit operator bool() const {
return Value() != 0;
}

View File

@@ -21,6 +21,11 @@ constexpr size_t hardware_interference_size = std::hardware_destructive_interfer
constexpr size_t hardware_interference_size = 64;
#endif
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable : 4324)
#endif
template <typename T, size_t capacity = 0x400>
class MPSCQueue {
public:
@@ -155,4 +160,8 @@ private:
static_assert(std::is_nothrow_destructible_v<T>, "T must be nothrow destructible");
};
#ifdef _MSC_VER
#pragma warning(pop)
#endif
} // namespace Common

View File

@@ -31,10 +31,8 @@
#ifndef _MSC_VER
#if defined(ARCHITECTURE_x86_64)
#ifdef ARCHITECTURE_x86_64
#define Crash() __asm__ __volatile__("int $3")
#elif defined(ARCHITECTURE_arm64)
#define Crash() __asm__ __volatile__("brk #0")
#else
#define Crash() exit(1)
#endif

View File

@@ -3,14 +3,24 @@
#pragma once
#include <iterator>
#include <type_traits>
namespace Common {
// Check if type satisfies the ContiguousContainer named requirement.
// Check if type is like an STL container
template <typename T>
concept IsContiguousContainer = std::contiguous_iterator<typename T::iterator>;
concept IsSTLContainer = requires(T t) {
typename T::value_type;
typename T::iterator;
typename T::const_iterator;
// TODO(ogniK): Replace below is std::same_as<void> when MSVC supports it.
t.begin();
t.end();
t.cbegin();
t.cend();
t.data();
t.size();
};
// TODO: Replace with std::derived_from when the <concepts> header
// is available on all supported platforms.
@@ -24,12 +34,4 @@ concept DerivedFrom = requires {
template <typename From, typename To>
concept ConvertibleTo = std::is_convertible_v<From, To>;
// No equivalents in the stdlib
template <typename T>
concept IsArithmetic = std::is_arithmetic_v<T>;
template <typename T>
concept IsIntegral = std::is_integral_v<T>;
} // namespace Common

View File

@@ -4,7 +4,14 @@
// From: https://github.com/eteran/cpp-utilities/blob/master/fixed/include/cpp-utilities/fixed.h
// See also: http://stackoverflow.com/questions/79677/whats-the-best-way-to-do-fixed-point-math
#pragma once
#ifndef FIXED_H_
#define FIXED_H_
#if __cplusplus >= 201402L
#define CONSTEXPR14 constexpr
#else
#define CONSTEXPR14
#endif
#include <cstddef> // for size_t
#include <cstdint>
@@ -12,8 +19,6 @@
#include <ostream>
#include <type_traits>
#include <common/concepts.h>
namespace Common {
template <size_t I, size_t F>
@@ -52,8 +57,8 @@ struct type_from_size<64> {
static constexpr size_t size = 64;
using value_type = int64_t;
using unsigned_type = std::make_unsigned_t<value_type>;
using signed_type = std::make_signed_t<value_type>;
using unsigned_type = std::make_unsigned<value_type>::type;
using signed_type = std::make_signed<value_type>::type;
using next_size = type_from_size<128>;
};
@@ -63,8 +68,8 @@ struct type_from_size<32> {
static constexpr size_t size = 32;
using value_type = int32_t;
using unsigned_type = std::make_unsigned_t<value_type>;
using signed_type = std::make_signed_t<value_type>;
using unsigned_type = std::make_unsigned<value_type>::type;
using signed_type = std::make_signed<value_type>::type;
using next_size = type_from_size<64>;
};
@@ -74,8 +79,8 @@ struct type_from_size<16> {
static constexpr size_t size = 16;
using value_type = int16_t;
using unsigned_type = std::make_unsigned_t<value_type>;
using signed_type = std::make_signed_t<value_type>;
using unsigned_type = std::make_unsigned<value_type>::type;
using signed_type = std::make_signed<value_type>::type;
using next_size = type_from_size<32>;
};
@@ -85,8 +90,8 @@ struct type_from_size<8> {
static constexpr size_t size = 8;
using value_type = int8_t;
using unsigned_type = std::make_unsigned_t<value_type>;
using signed_type = std::make_signed_t<value_type>;
using unsigned_type = std::make_unsigned<value_type>::type;
using signed_type = std::make_signed<value_type>::type;
using next_size = type_from_size<16>;
};
@@ -101,9 +106,9 @@ constexpr B next_to_base(N rhs) {
struct divide_by_zero : std::exception {};
template <size_t I, size_t F>
constexpr FixedPoint<I, F> divide(
CONSTEXPR14 FixedPoint<I, F> divide(
FixedPoint<I, F> numerator, FixedPoint<I, F> denominator, FixedPoint<I, F>& remainder,
std::enable_if_t<type_from_size<I + F>::next_size::is_specialized>* = nullptr) {
typename std::enable_if<type_from_size<I + F>::next_size::is_specialized>::type* = nullptr) {
using next_type = typename FixedPoint<I, F>::next_type;
using base_type = typename FixedPoint<I, F>::base_type;
@@ -121,9 +126,9 @@ constexpr FixedPoint<I, F> divide(
}
template <size_t I, size_t F>
constexpr FixedPoint<I, F> divide(
CONSTEXPR14 FixedPoint<I, F> divide(
FixedPoint<I, F> numerator, FixedPoint<I, F> denominator, FixedPoint<I, F>& remainder,
std::enable_if_t<!type_from_size<I + F>::next_size::is_specialized>* = nullptr) {
typename std::enable_if<!type_from_size<I + F>::next_size::is_specialized>::type* = nullptr) {
using unsigned_type = typename FixedPoint<I, F>::unsigned_type;
@@ -191,9 +196,9 @@ constexpr FixedPoint<I, F> divide(
// this is the usual implementation of multiplication
template <size_t I, size_t F>
constexpr FixedPoint<I, F> multiply(
CONSTEXPR14 FixedPoint<I, F> multiply(
FixedPoint<I, F> lhs, FixedPoint<I, F> rhs,
std::enable_if_t<type_from_size<I + F>::next_size::is_specialized>* = nullptr) {
typename std::enable_if<type_from_size<I + F>::next_size::is_specialized>::type* = nullptr) {
using next_type = typename FixedPoint<I, F>::next_type;
using base_type = typename FixedPoint<I, F>::base_type;
@@ -210,9 +215,9 @@ constexpr FixedPoint<I, F> multiply(
// it is slightly slower, but is more robust since it doesn't
// require and upgraded type
template <size_t I, size_t F>
constexpr FixedPoint<I, F> multiply(
CONSTEXPR14 FixedPoint<I, F> multiply(
FixedPoint<I, F> lhs, FixedPoint<I, F> rhs,
std::enable_if_t<!type_from_size<I + F>::next_size::is_specialized>* = nullptr) {
typename std::enable_if<!type_from_size<I + F>::next_size::is_specialized>::type* = nullptr) {
using base_type = typename FixedPoint<I, F>::base_type;
@@ -267,20 +272,19 @@ public:
static constexpr base_type one = base_type(1) << fractional_bits;
public: // constructors
constexpr FixedPoint() = default;
FixedPoint() = default;
FixedPoint(const FixedPoint&) = default;
FixedPoint(FixedPoint&&) = default;
FixedPoint& operator=(const FixedPoint&) = default;
constexpr FixedPoint(const FixedPoint&) = default;
constexpr FixedPoint& operator=(const FixedPoint&) = default;
constexpr FixedPoint(FixedPoint&&) noexcept = default;
constexpr FixedPoint& operator=(FixedPoint&&) noexcept = default;
template <IsArithmetic Number>
constexpr FixedPoint(Number n) : data_(static_cast<base_type>(n * one)) {}
template <class Number>
constexpr FixedPoint(
Number n, typename std::enable_if<std::is_arithmetic<Number>::value>::type* = nullptr)
: data_(static_cast<base_type>(n * one)) {}
public: // conversion
template <size_t I2, size_t F2>
constexpr explicit FixedPoint(FixedPoint<I2, F2> other) {
CONSTEXPR14 explicit FixedPoint(FixedPoint<I2, F2> other) {
static_assert(I2 <= I && F2 <= F, "Scaling conversion can only upgrade types");
using T = FixedPoint<I2, F2>;
@@ -304,14 +308,36 @@ public:
}
public: // comparison operators
friend constexpr auto operator<=>(FixedPoint lhs, FixedPoint rhs) = default;
constexpr bool operator==(FixedPoint rhs) const {
return data_ == rhs.data_;
}
constexpr bool operator!=(FixedPoint rhs) const {
return data_ != rhs.data_;
}
constexpr bool operator<(FixedPoint rhs) const {
return data_ < rhs.data_;
}
constexpr bool operator>(FixedPoint rhs) const {
return data_ > rhs.data_;
}
constexpr bool operator<=(FixedPoint rhs) const {
return data_ <= rhs.data_;
}
constexpr bool operator>=(FixedPoint rhs) const {
return data_ >= rhs.data_;
}
public: // unary operators
[[nodiscard]] constexpr bool operator!() const {
constexpr bool operator!() const {
return !data_;
}
[[nodiscard]] constexpr FixedPoint operator~() const {
constexpr FixedPoint operator~() const {
// NOTE(eteran): this will often appear to "just negate" the value
// that is not an error, it is because -x == (~x+1)
// and that "+1" is adding an infinitesimally small fraction to the
@@ -319,87 +345,89 @@ public: // unary operators
return FixedPoint::from_base(~data_);
}
[[nodiscard]] constexpr FixedPoint operator-() const {
constexpr FixedPoint operator-() const {
return FixedPoint::from_base(-data_);
}
[[nodiscard]] constexpr FixedPoint operator+() const {
constexpr FixedPoint operator+() const {
return FixedPoint::from_base(+data_);
}
constexpr FixedPoint& operator++() {
CONSTEXPR14 FixedPoint& operator++() {
data_ += one;
return *this;
}
constexpr FixedPoint& operator--() {
CONSTEXPR14 FixedPoint& operator--() {
data_ -= one;
return *this;
}
constexpr FixedPoint operator++(int) {
CONSTEXPR14 FixedPoint operator++(int) {
FixedPoint tmp(*this);
data_ += one;
return tmp;
}
constexpr FixedPoint operator--(int) {
CONSTEXPR14 FixedPoint operator--(int) {
FixedPoint tmp(*this);
data_ -= one;
return tmp;
}
public: // basic math operators
constexpr FixedPoint& operator+=(FixedPoint n) {
CONSTEXPR14 FixedPoint& operator+=(FixedPoint n) {
data_ += n.data_;
return *this;
}
constexpr FixedPoint& operator-=(FixedPoint n) {
CONSTEXPR14 FixedPoint& operator-=(FixedPoint n) {
data_ -= n.data_;
return *this;
}
constexpr FixedPoint& operator*=(FixedPoint n) {
CONSTEXPR14 FixedPoint& operator*=(FixedPoint n) {
return assign(detail::multiply(*this, n));
}
constexpr FixedPoint& operator/=(FixedPoint n) {
CONSTEXPR14 FixedPoint& operator/=(FixedPoint n) {
FixedPoint temp;
return assign(detail::divide(*this, n, temp));
}
private:
constexpr FixedPoint& assign(FixedPoint rhs) {
CONSTEXPR14 FixedPoint& assign(FixedPoint rhs) {
data_ = rhs.data_;
return *this;
}
public: // binary math operators, effects underlying bit pattern since these
// don't really typically make sense for non-integer values
constexpr FixedPoint& operator&=(FixedPoint n) {
CONSTEXPR14 FixedPoint& operator&=(FixedPoint n) {
data_ &= n.data_;
return *this;
}
constexpr FixedPoint& operator|=(FixedPoint n) {
CONSTEXPR14 FixedPoint& operator|=(FixedPoint n) {
data_ |= n.data_;
return *this;
}
constexpr FixedPoint& operator^=(FixedPoint n) {
CONSTEXPR14 FixedPoint& operator^=(FixedPoint n) {
data_ ^= n.data_;
return *this;
}
template <IsIntegral Integer>
constexpr FixedPoint& operator>>=(Integer n) {
template <class Integer,
class = typename std::enable_if<std::is_integral<Integer>::value>::type>
CONSTEXPR14 FixedPoint& operator>>=(Integer n) {
data_ >>= n;
return *this;
}
template <IsIntegral Integer>
constexpr FixedPoint& operator<<=(Integer n) {
template <class Integer,
class = typename std::enable_if<std::is_integral<Integer>::value>::type>
CONSTEXPR14 FixedPoint& operator<<=(Integer n) {
data_ <<= n;
return *this;
}
@@ -409,42 +437,42 @@ public: // conversion to basic types
data_ += (data_ & fractional_mask) >> 1;
}
[[nodiscard]] constexpr int to_int() {
constexpr int to_int() {
round_up();
return static_cast<int>((data_ & integer_mask) >> fractional_bits);
}
[[nodiscard]] constexpr unsigned int to_uint() {
constexpr unsigned int to_uint() const {
round_up();
return static_cast<unsigned int>((data_ & integer_mask) >> fractional_bits);
}
[[nodiscard]] constexpr int64_t to_long() {
constexpr int64_t to_long() {
round_up();
return static_cast<int64_t>((data_ & integer_mask) >> fractional_bits);
}
[[nodiscard]] constexpr int to_int_floor() const {
constexpr int to_int_floor() const {
return static_cast<int>((data_ & integer_mask) >> fractional_bits);
}
[[nodiscard]] constexpr int64_t to_long_floor() const {
constexpr int64_t to_long_floor() {
return static_cast<int64_t>((data_ & integer_mask) >> fractional_bits);
}
[[nodiscard]] constexpr unsigned int to_uint_floor() const {
constexpr unsigned int to_uint_floor() const {
return static_cast<unsigned int>((data_ & integer_mask) >> fractional_bits);
}
[[nodiscard]] constexpr float to_float() const {
constexpr float to_float() const {
return static_cast<float>(data_) / FixedPoint::one;
}
[[nodiscard]] constexpr double to_double() const {
constexpr double to_double() const {
return static_cast<double>(data_) / FixedPoint::one;
}
[[nodiscard]] constexpr base_type to_raw() const {
constexpr base_type to_raw() const {
return data_;
}
@@ -452,27 +480,27 @@ public: // conversion to basic types
data_ &= fractional_mask;
}
[[nodiscard]] constexpr base_type get_frac() const {
constexpr base_type get_frac() const {
return data_ & fractional_mask;
}
public:
constexpr void swap(FixedPoint& rhs) noexcept {
CONSTEXPR14 void swap(FixedPoint& rhs) {
using std::swap;
swap(data_, rhs.data_);
}
public:
base_type data_{};
base_type data_;
};
// if we have the same fractional portion, but differing integer portions, we trivially upgrade the
// smaller type
template <size_t I1, size_t I2, size_t F>
constexpr std::conditional_t<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>> operator+(
FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) {
CONSTEXPR14 typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type
operator+(FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) {
using T = std::conditional_t<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>;
using T = typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type;
const T l = T::from_base(lhs.to_raw());
const T r = T::from_base(rhs.to_raw());
@@ -480,10 +508,10 @@ constexpr std::conditional_t<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>> ope
}
template <size_t I1, size_t I2, size_t F>
constexpr std::conditional_t<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>> operator-(
FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) {
CONSTEXPR14 typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type
operator-(FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) {
using T = std::conditional_t<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>;
using T = typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type;
const T l = T::from_base(lhs.to_raw());
const T r = T::from_base(rhs.to_raw());
@@ -491,10 +519,10 @@ constexpr std::conditional_t<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>> ope
}
template <size_t I1, size_t I2, size_t F>
constexpr std::conditional_t<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>> operator*(
FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) {
CONSTEXPR14 typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type
operator*(FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) {
using T = std::conditional_t<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>;
using T = typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type;
const T l = T::from_base(lhs.to_raw());
const T r = T::from_base(rhs.to_raw());
@@ -502,10 +530,10 @@ constexpr std::conditional_t<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>> ope
}
template <size_t I1, size_t I2, size_t F>
constexpr std::conditional_t<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>> operator/(
FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) {
CONSTEXPR14 typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type
operator/(FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) {
using T = std::conditional_t<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>;
using T = typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type;
const T l = T::from_base(lhs.to_raw());
const T r = T::from_base(rhs.to_raw());
@@ -520,133 +548,159 @@ std::ostream& operator<<(std::ostream& os, FixedPoint<I, F> f) {
// basic math operators
template <size_t I, size_t F>
constexpr FixedPoint<I, F> operator+(FixedPoint<I, F> lhs, FixedPoint<I, F> rhs) {
CONSTEXPR14 FixedPoint<I, F> operator+(FixedPoint<I, F> lhs, FixedPoint<I, F> rhs) {
lhs += rhs;
return lhs;
}
template <size_t I, size_t F>
constexpr FixedPoint<I, F> operator-(FixedPoint<I, F> lhs, FixedPoint<I, F> rhs) {
CONSTEXPR14 FixedPoint<I, F> operator-(FixedPoint<I, F> lhs, FixedPoint<I, F> rhs) {
lhs -= rhs;
return lhs;
}
template <size_t I, size_t F>
constexpr FixedPoint<I, F> operator*(FixedPoint<I, F> lhs, FixedPoint<I, F> rhs) {
CONSTEXPR14 FixedPoint<I, F> operator*(FixedPoint<I, F> lhs, FixedPoint<I, F> rhs) {
lhs *= rhs;
return lhs;
}
template <size_t I, size_t F>
constexpr FixedPoint<I, F> operator/(FixedPoint<I, F> lhs, FixedPoint<I, F> rhs) {
CONSTEXPR14 FixedPoint<I, F> operator/(FixedPoint<I, F> lhs, FixedPoint<I, F> rhs) {
lhs /= rhs;
return lhs;
}
template <size_t I, size_t F, IsArithmetic Number>
constexpr FixedPoint<I, F> operator+(FixedPoint<I, F> lhs, Number rhs) {
template <size_t I, size_t F, class Number,
class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
CONSTEXPR14 FixedPoint<I, F> operator+(FixedPoint<I, F> lhs, Number rhs) {
lhs += FixedPoint<I, F>(rhs);
return lhs;
}
template <size_t I, size_t F, IsArithmetic Number>
constexpr FixedPoint<I, F> operator-(FixedPoint<I, F> lhs, Number rhs) {
template <size_t I, size_t F, class Number,
class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
CONSTEXPR14 FixedPoint<I, F> operator-(FixedPoint<I, F> lhs, Number rhs) {
lhs -= FixedPoint<I, F>(rhs);
return lhs;
}
template <size_t I, size_t F, IsArithmetic Number>
constexpr FixedPoint<I, F> operator*(FixedPoint<I, F> lhs, Number rhs) {
template <size_t I, size_t F, class Number,
class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
CONSTEXPR14 FixedPoint<I, F> operator*(FixedPoint<I, F> lhs, Number rhs) {
lhs *= FixedPoint<I, F>(rhs);
return lhs;
}
template <size_t I, size_t F, IsArithmetic Number>
constexpr FixedPoint<I, F> operator/(FixedPoint<I, F> lhs, Number rhs) {
template <size_t I, size_t F, class Number,
class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
CONSTEXPR14 FixedPoint<I, F> operator/(FixedPoint<I, F> lhs, Number rhs) {
lhs /= FixedPoint<I, F>(rhs);
return lhs;
}
template <size_t I, size_t F, IsArithmetic Number>
constexpr FixedPoint<I, F> operator+(Number lhs, FixedPoint<I, F> rhs) {
template <size_t I, size_t F, class Number,
class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
CONSTEXPR14 FixedPoint<I, F> operator+(Number lhs, FixedPoint<I, F> rhs) {
FixedPoint<I, F> tmp(lhs);
tmp += rhs;
return tmp;
}
template <size_t I, size_t F, IsArithmetic Number>
constexpr FixedPoint<I, F> operator-(Number lhs, FixedPoint<I, F> rhs) {
template <size_t I, size_t F, class Number,
class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
CONSTEXPR14 FixedPoint<I, F> operator-(Number lhs, FixedPoint<I, F> rhs) {
FixedPoint<I, F> tmp(lhs);
tmp -= rhs;
return tmp;
}
template <size_t I, size_t F, IsArithmetic Number>
constexpr FixedPoint<I, F> operator*(Number lhs, FixedPoint<I, F> rhs) {
template <size_t I, size_t F, class Number,
class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
CONSTEXPR14 FixedPoint<I, F> operator*(Number lhs, FixedPoint<I, F> rhs) {
FixedPoint<I, F> tmp(lhs);
tmp *= rhs;
return tmp;
}
template <size_t I, size_t F, IsArithmetic Number>
constexpr FixedPoint<I, F> operator/(Number lhs, FixedPoint<I, F> rhs) {
template <size_t I, size_t F, class Number,
class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
CONSTEXPR14 FixedPoint<I, F> operator/(Number lhs, FixedPoint<I, F> rhs) {
FixedPoint<I, F> tmp(lhs);
tmp /= rhs;
return tmp;
}
// shift operators
template <size_t I, size_t F, IsIntegral Integer>
constexpr FixedPoint<I, F> operator<<(FixedPoint<I, F> lhs, Integer rhs) {
template <size_t I, size_t F, class Integer,
class = typename std::enable_if<std::is_integral<Integer>::value>::type>
CONSTEXPR14 FixedPoint<I, F> operator<<(FixedPoint<I, F> lhs, Integer rhs) {
lhs <<= rhs;
return lhs;
}
template <size_t I, size_t F, IsIntegral Integer>
constexpr FixedPoint<I, F> operator>>(FixedPoint<I, F> lhs, Integer rhs) {
template <size_t I, size_t F, class Integer,
class = typename std::enable_if<std::is_integral<Integer>::value>::type>
CONSTEXPR14 FixedPoint<I, F> operator>>(FixedPoint<I, F> lhs, Integer rhs) {
lhs >>= rhs;
return lhs;
}
// comparison operators
template <size_t I, size_t F, IsArithmetic Number>
template <size_t I, size_t F, class Number,
class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
constexpr bool operator>(FixedPoint<I, F> lhs, Number rhs) {
return lhs > FixedPoint<I, F>(rhs);
}
template <size_t I, size_t F, IsArithmetic Number>
template <size_t I, size_t F, class Number,
class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
constexpr bool operator<(FixedPoint<I, F> lhs, Number rhs) {
return lhs < FixedPoint<I, F>(rhs);
}
template <size_t I, size_t F, IsArithmetic Number>
template <size_t I, size_t F, class Number,
class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
constexpr bool operator>=(FixedPoint<I, F> lhs, Number rhs) {
return lhs >= FixedPoint<I, F>(rhs);
}
template <size_t I, size_t F, IsArithmetic Number>
template <size_t I, size_t F, class Number,
class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
constexpr bool operator<=(FixedPoint<I, F> lhs, Number rhs) {
return lhs <= FixedPoint<I, F>(rhs);
}
template <size_t I, size_t F, IsArithmetic Number>
template <size_t I, size_t F, class Number,
class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
constexpr bool operator==(FixedPoint<I, F> lhs, Number rhs) {
return lhs == FixedPoint<I, F>(rhs);
}
template <size_t I, size_t F, IsArithmetic Number>
template <size_t I, size_t F, class Number,
class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
constexpr bool operator!=(FixedPoint<I, F> lhs, Number rhs) {
return lhs != FixedPoint<I, F>(rhs);
}
template <size_t I, size_t F, IsArithmetic Number>
template <size_t I, size_t F, class Number,
class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
constexpr bool operator>(Number lhs, FixedPoint<I, F> rhs) {
return FixedPoint<I, F>(lhs) > rhs;
}
template <size_t I, size_t F, IsArithmetic Number>
template <size_t I, size_t F, class Number,
class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
constexpr bool operator<(Number lhs, FixedPoint<I, F> rhs) {
return FixedPoint<I, F>(lhs) < rhs;
}
template <size_t I, size_t F, IsArithmetic Number>
template <size_t I, size_t F, class Number,
class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
constexpr bool operator>=(Number lhs, FixedPoint<I, F> rhs) {
return FixedPoint<I, F>(lhs) >= rhs;
}
template <size_t I, size_t F, IsArithmetic Number>
template <size_t I, size_t F, class Number,
class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
constexpr bool operator<=(Number lhs, FixedPoint<I, F> rhs) {
return FixedPoint<I, F>(lhs) <= rhs;
}
template <size_t I, size_t F, IsArithmetic Number>
template <size_t I, size_t F, class Number,
class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
constexpr bool operator==(Number lhs, FixedPoint<I, F> rhs) {
return FixedPoint<I, F>(lhs) == rhs;
}
template <size_t I, size_t F, IsArithmetic Number>
template <size_t I, size_t F, class Number,
class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
constexpr bool operator!=(Number lhs, FixedPoint<I, F> rhs) {
return FixedPoint<I, F>(lhs) != rhs;
}
} // namespace Common
#undef CONSTEXPR14
#endif

View File

@@ -209,8 +209,8 @@ public:
/**
* Helper function which deduces the value type of a contiguous STL container used in ReadSpan.
* If T is not a contiguous container as defined by the concept IsContiguousContainer, this
* calls ReadObject and T must be a trivially copyable object.
* If T is not a contiguous STL container as defined by the concept IsSTLContainer, this calls
* ReadObject and T must be a trivially copyable object.
*
* See ReadSpan for more details if T is a contiguous container.
* See ReadObject for more details if T is a trivially copyable object.
@@ -223,7 +223,7 @@ public:
*/
template <typename T>
[[nodiscard]] size_t Read(T& data) const {
if constexpr (IsContiguousContainer<T>) {
if constexpr (IsSTLContainer<T>) {
using ContiguousType = typename T::value_type;
static_assert(std::is_trivially_copyable_v<ContiguousType>,
"Data type must be trivially copyable.");
@@ -235,8 +235,8 @@ public:
/**
* Helper function which deduces the value type of a contiguous STL container used in WriteSpan.
* If T is not a contiguous STL container as defined by the concept IsContiguousContainer, this
* calls WriteObject and T must be a trivially copyable object.
* If T is not a contiguous STL container as defined by the concept IsSTLContainer, this calls
* WriteObject and T must be a trivially copyable object.
*
* See WriteSpan for more details if T is a contiguous container.
* See WriteObject for more details if T is a trivially copyable object.
@@ -249,7 +249,7 @@ public:
*/
template <typename T>
[[nodiscard]] size_t Write(const T& data) const {
if constexpr (IsContiguousContainer<T>) {
if constexpr (IsSTLContainer<T>) {
using ContiguousType = typename T::value_type;
static_assert(std::is_trivially_copyable_v<ContiguousType>,
"Data type must be trivially copyable.");

View File

@@ -359,12 +359,6 @@ public:
}
});
long page_size = sysconf(_SC_PAGESIZE);
if (page_size != 0x1000) {
LOG_CRITICAL(HW_Memory, "page size {:#x} is incompatible with 4K paging", page_size);
throw std::bad_alloc{};
}
// Backing memory initialization
#if defined(__FreeBSD__) && __FreeBSD__ < 13
// XXX Drop after FreeBSD 12.* reaches EOL on 2024-06-30

View File

@@ -100,6 +100,7 @@ enum class CameraError {
enum class VibrationAmplificationType {
Linear,
Exponential,
Test,
};
// Analog properties for calibration
@@ -324,10 +325,6 @@ public:
return VibrationError::NotSupported;
}
virtual bool IsVibrationEnabled() {
return false;
}
virtual PollingError SetPollingMode([[maybe_unused]] PollingMode polling_mode) {
return PollingError::NotSupported;
}

View File

@@ -151,7 +151,6 @@ void UpdateRescalingInfo() {
ASSERT(false);
info.up_scale = 1;
info.down_shift = 0;
break;
}
info.up_factor = static_cast<f32>(info.up_scale) / (1U << info.down_shift);
info.down_factor = static_cast<f32>(1U << info.down_shift) / info.up_scale;

View File

@@ -190,13 +190,8 @@ add_library(core STATIC
hle/kernel/k_code_memory.h
hle/kernel/k_condition_variable.cpp
hle/kernel/k_condition_variable.h
hle/kernel/k_debug.h
hle/kernel/k_dynamic_page_manager.h
hle/kernel/k_dynamic_resource_manager.h
hle/kernel/k_dynamic_slab_heap.h
hle/kernel/k_event.cpp
hle/kernel/k_event.h
hle/kernel/k_event_info.h
hle/kernel/k_handle_table.cpp
hle/kernel/k_handle_table.h
hle/kernel/k_interrupt_manager.cpp
@@ -224,8 +219,6 @@ add_library(core STATIC
hle/kernel/k_page_group.h
hle/kernel/k_page_table.cpp
hle/kernel/k_page_table.h
hle/kernel/k_page_table_manager.h
hle/kernel/k_page_table_slab_heap.h
hle/kernel/k_port.cpp
hle/kernel/k_port.h
hle/kernel/k_priority_queue.h
@@ -247,8 +240,6 @@ add_library(core STATIC
hle/kernel/k_server_session.h
hle/kernel/k_session.cpp
hle/kernel/k_session.h
hle/kernel/k_session_request.cpp
hle/kernel/k_session_request.h
hle/kernel/k_shared_memory.cpp
hle/kernel/k_shared_memory.h
hle/kernel/k_shared_memory_info.h
@@ -258,8 +249,6 @@ add_library(core STATIC
hle/kernel/k_synchronization_object.cpp
hle/kernel/k_synchronization_object.h
hle/kernel/k_system_control.h
hle/kernel/k_system_resource.cpp
hle/kernel/k_system_resource.h
hle/kernel/k_thread.cpp
hle/kernel/k_thread.h
hle/kernel/k_thread_local_page.cpp
@@ -497,6 +486,10 @@ add_library(core STATIC
hle/service/hid/irsensor/processor_base.h
hle/service/hid/irsensor/tera_plugin_processor.cpp
hle/service/hid/irsensor/tera_plugin_processor.h
hle/service/jit/jit_context.cpp
hle/service/jit/jit_context.h
hle/service/jit/jit.cpp
hle/service/jit/jit.h
hle/service/lbl/lbl.cpp
hle/service/lbl/lbl.h
hle/service/ldn/lan_discovery.cpp
@@ -776,15 +769,19 @@ if (MSVC)
/we4244 # 'conversion': conversion from 'type1' to 'type2', possible loss of data
/we4245 # 'conversion': conversion from 'type1' to 'type2', signed/unsigned mismatch
/we4254 # 'operator': conversion from 'type1:field_bits' to 'type2:field_bits', possible loss of data
/we4800 # Implicit conversion from 'type' to bool. Possible information loss
)
else()
target_compile_options(core PRIVATE
-Werror=conversion
-Werror=ignored-qualifiers
-Wno-sign-conversion
$<$<CXX_COMPILER_ID:GNU>:-Werror=class-memaccess>
$<$<CXX_COMPILER_ID:GNU>:-Werror=unused-but-set-parameter>
$<$<CXX_COMPILER_ID:GNU>:-Werror=unused-but-set-variable>
$<$<CXX_COMPILER_ID:Clang>:-fsized-deallocation>
-Wno-sign-conversion
)
endif()
@@ -801,18 +798,14 @@ if (ENABLE_WEB_SERVICE)
target_link_libraries(core PRIVATE web_service)
endif()
if (ARCHITECTURE_x86_64 OR ARCHITECTURE_arm64)
if (ARCHITECTURE_x86_64)
target_sources(core PRIVATE
arm/dynarmic/arm_dynarmic_64.cpp
arm/dynarmic/arm_dynarmic_64.h
arm/dynarmic/arm_dynarmic_32.cpp
arm/dynarmic/arm_dynarmic_32.h
arm/dynarmic/arm_dynarmic_64.cpp
arm/dynarmic/arm_dynarmic_64.h
arm/dynarmic/arm_dynarmic_cp15.cpp
arm/dynarmic/arm_dynarmic_cp15.h
hle/service/jit/jit_context.cpp
hle/service/jit/jit_context.h
hle/service/jit/jit.cpp
hle/service/jit/jit.h
)
target_link_libraries(core PRIVATE dynarmic)
endif()

View File

@@ -134,14 +134,6 @@ void ARM_Interface::Run() {
}
system.ExitDynarmicProfile();
// If the thread is scheduled for termination, exit the thread.
if (current_thread->HasDpc()) {
if (current_thread->IsTerminationRequested()) {
current_thread->Exit();
UNREACHABLE();
}
}
// Notify the debugger and go to sleep if a breakpoint was hit,
// or if the thread is unable to continue for any reason.
if (Has(hr, breakpoint) || Has(hr, no_execute)) {

View File

@@ -301,11 +301,6 @@ std::shared_ptr<Dynarmic::A32::Jit> ARM_Dynarmic_32::MakeJit(Common::PageTable*
}
}
#ifdef ARCHITECTURE_arm64
// TODO: remove when fixed in dynarmic
config.optimizations &= ~Dynarmic::OptimizationFlag::BlockLinking;
#endif
return std::make_unique<Dynarmic::A32::Jit>(config);
}
@@ -455,7 +450,7 @@ std::vector<ARM_Interface::BacktraceEntry> ARM_Dynarmic_32::GetBacktrace(Core::S
// Frame records are two words long:
// fp+0 : pointer to previous frame record
// fp+4 : value of lr for frame
for (size_t i = 0; i < 256; i++) {
while (true) {
out.push_back({"", 0, lr, 0, ""});
if (!fp || (fp % 4 != 0) || !memory.IsValidVirtualAddressRange(fp, 8)) {
break;

View File

@@ -517,7 +517,7 @@ std::vector<ARM_Interface::BacktraceEntry> ARM_Dynarmic_64::GetBacktrace(Core::S
// Frame records are two words long:
// fp+0 : pointer to previous frame record
// fp+8 : value of lr for frame
for (size_t i = 0; i < 256; i++) {
while (true) {
out.push_back({"", 0, lr, 0, ""});
if (!fp || (fp % 4 != 0) || !memory.IsValidVirtualAddressRange(fp, 16)) {
break;

View File

@@ -52,16 +52,12 @@ CallbackOrAccessOneWord DynarmicCP15::CompileSendOneWord(bool two, unsigned opc1
case 4:
// CP15_DATA_SYNC_BARRIER
return Callback{
[](void*, std::uint32_t, std::uint32_t) -> std::uint64_t {
#if defined(_MSC_VER) && defined(ARCHITECTURE_x86_64)
[](Dynarmic::A32::Jit*, void*, std::uint32_t, std::uint32_t) -> std::uint64_t {
#ifdef _MSC_VER
_mm_mfence();
_mm_lfence();
#elif defined(ARCHITECTURE_x86_64)
asm volatile("mfence\n\tlfence\n\t" : : : "memory");
#elif defined(ARCHITECTURE_arm64)
asm volatile("dsb sy\n\t" : : : "memory");
#else
#error Unsupported architecture
asm volatile("mfence\n\tlfence\n\t" : : : "memory");
#endif
return 0;
},
@@ -70,15 +66,11 @@ CallbackOrAccessOneWord DynarmicCP15::CompileSendOneWord(bool two, unsigned opc1
case 5:
// CP15_DATA_MEMORY_BARRIER
return Callback{
[](void*, std::uint32_t, std::uint32_t) -> std::uint64_t {
#if defined(_MSC_VER) && defined(ARCHITECTURE_x86_64)
[](Dynarmic::A32::Jit*, void*, std::uint32_t, std::uint32_t) -> std::uint64_t {
#ifdef _MSC_VER
_mm_mfence();
#elif defined(ARCHITECTURE_x86_64)
asm volatile("mfence\n\t" : : : "memory");
#elif defined(ARCHITECTURE_arm64)
asm volatile("dmb sy\n\t" : : : "memory");
#else
#error Unsupported architecture
asm volatile("mfence\n\t" : : : "memory");
#endif
return 0;
},
@@ -123,7 +115,7 @@ CallbackOrAccessOneWord DynarmicCP15::CompileGetOneWord(bool two, unsigned opc1,
CallbackOrAccessTwoWords DynarmicCP15::CompileGetTwoWords(bool two, unsigned opc, CoprocReg CRm) {
if (!two && opc == 0 && CRm == CoprocReg::C14) {
// CNTPCT
const auto callback = [](void* arg, u32, u32) -> u64 {
const auto callback = [](Dynarmic::A32::Jit*, void* arg, u32, u32) -> u64 {
const auto& parent_arg = *static_cast<ARM_Dynarmic_32*>(arg);
return parent_arg.system.CoreTiming().GetClockTicks();
};

View File

@@ -1,7 +1,7 @@
// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#if defined(ARCHITECTURE_x86_64) || defined(ARCHITECTURE_arm64)
#ifdef ARCHITECTURE_x86_64
#include "core/arm/dynarmic/arm_exclusive_monitor.h"
#endif
#include "core/arm/exclusive_monitor.h"
@@ -13,7 +13,7 @@ ExclusiveMonitor::~ExclusiveMonitor() = default;
std::unique_ptr<Core::ExclusiveMonitor> MakeExclusiveMonitor(Memory::Memory& memory,
std::size_t num_cores) {
#if defined(ARCHITECTURE_x86_64) || defined(ARCHITECTURE_arm64)
#ifdef ARCHITECTURE_x86_64
return std::make_unique<Core::DynarmicExclusiveMonitor>(memory, num_cores);
#else
// TODO(merry): Passthrough exclusive monitor

View File

@@ -133,56 +133,6 @@ struct System::Impl {
: kernel{system}, fs_controller{system}, memory{system}, hid_core{}, room_network{},
cpu_manager{system}, reporter{system}, applet_manager{system}, time_manager{system} {}
void Initialize(System& system) {
device_memory = std::make_unique<Core::DeviceMemory>();
is_multicore = Settings::values.use_multi_core.GetValue();
extended_memory_layout = Settings::values.use_extended_memory_layout.GetValue();
core_timing.SetMulticore(is_multicore);
core_timing.Initialize([&system]() { system.RegisterHostThread(); });
const auto posix_time = std::chrono::system_clock::now().time_since_epoch();
const auto current_time =
std::chrono::duration_cast<std::chrono::seconds>(posix_time).count();
Settings::values.custom_rtc_differential =
Settings::values.custom_rtc.value_or(current_time) - current_time;
// Create a default fs if one doesn't already exist.
if (virtual_filesystem == nullptr) {
virtual_filesystem = std::make_shared<FileSys::RealVfsFilesystem>();
}
if (content_provider == nullptr) {
content_provider = std::make_unique<FileSys::ContentProviderUnion>();
}
// Create default implementations of applets if one is not provided.
applet_manager.SetDefaultAppletsIfMissing();
is_async_gpu = Settings::values.use_asynchronous_gpu_emulation.GetValue();
kernel.SetMulticore(is_multicore);
cpu_manager.SetMulticore(is_multicore);
cpu_manager.SetAsyncGpu(is_async_gpu);
}
void ReinitializeIfNecessary(System& system) {
const bool must_reinitialize =
is_multicore != Settings::values.use_multi_core.GetValue() ||
extended_memory_layout != Settings::values.use_extended_memory_layout.GetValue();
if (!must_reinitialize) {
return;
}
LOG_DEBUG(Kernel, "Re-initializing");
is_multicore = Settings::values.use_multi_core.GetValue();
extended_memory_layout = Settings::values.use_extended_memory_layout.GetValue();
Initialize(system);
}
SystemResultStatus Run() {
std::unique_lock<std::mutex> lk(suspend_guard);
status = SystemResultStatus::Success;
@@ -228,14 +178,37 @@ struct System::Impl {
debugger = std::make_unique<Debugger>(system, port);
}
SystemResultStatus SetupForMainProcess(System& system, Frontend::EmuWindow& emu_window) {
SystemResultStatus Init(System& system, Frontend::EmuWindow& emu_window) {
LOG_DEBUG(Core, "initialized OK");
// Setting changes may require a full system reinitialization (e.g., disabling multicore).
ReinitializeIfNecessary(system);
device_memory = std::make_unique<Core::DeviceMemory>();
is_multicore = Settings::values.use_multi_core.GetValue();
is_async_gpu = Settings::values.use_asynchronous_gpu_emulation.GetValue();
kernel.SetMulticore(is_multicore);
cpu_manager.SetMulticore(is_multicore);
cpu_manager.SetAsyncGpu(is_async_gpu);
core_timing.SetMulticore(is_multicore);
kernel.Initialize();
cpu_manager.Initialize();
core_timing.Initialize([&system]() { system.RegisterHostThread(); });
const auto posix_time = std::chrono::system_clock::now().time_since_epoch();
const auto current_time =
std::chrono::duration_cast<std::chrono::seconds>(posix_time).count();
Settings::values.custom_rtc_differential =
Settings::values.custom_rtc.value_or(current_time) - current_time;
// Create a default fs if one doesn't already exist.
if (virtual_filesystem == nullptr)
virtual_filesystem = std::make_shared<FileSys::RealVfsFilesystem>();
if (content_provider == nullptr)
content_provider = std::make_unique<FileSys::ContentProviderUnion>();
/// Create default implementations of applets if one is not provided.
applet_manager.SetDefaultAppletsIfMissing();
/// Reset all glue registrations
arp_manager.ResetAll();
@@ -280,11 +253,11 @@ struct System::Impl {
return SystemResultStatus::ErrorGetLoader;
}
SystemResultStatus init_result{SetupForMainProcess(system, emu_window)};
SystemResultStatus init_result{Init(system, emu_window)};
if (init_result != SystemResultStatus::Success) {
LOG_CRITICAL(Core, "Failed to initialize system (Error {})!",
static_cast<int>(init_result));
ShutdownMainProcess();
Shutdown();
return init_result;
}
@@ -303,7 +276,7 @@ struct System::Impl {
const auto [load_result, load_parameters] = app_loader->Load(*main_process, system);
if (load_result != Loader::ResultStatus::Success) {
LOG_CRITICAL(Core, "Failed to load ROM (Error {})!", load_result);
ShutdownMainProcess();
Shutdown();
return static_cast<SystemResultStatus>(
static_cast<u32>(SystemResultStatus::ErrorLoader) + static_cast<u32>(load_result));
@@ -362,7 +335,7 @@ struct System::Impl {
return status;
}
void ShutdownMainProcess() {
void Shutdown() {
SetShuttingDown(true);
// Log last frame performance stats if game was loded
@@ -390,14 +363,13 @@ struct System::Impl {
kernel.ShutdownCores();
cpu_manager.Shutdown();
debugger.reset();
services->KillNVNFlinger();
kernel.CloseServices();
services.reset();
service_manager.reset();
cheat_engine.reset();
telemetry_session.reset();
time_manager.Shutdown();
core_timing.ClearPendingEvents();
core_timing.Shutdown();
app_loader.reset();
audio_core.reset();
gpu_core.reset();
@@ -405,6 +377,7 @@ struct System::Impl {
perf_stats.reset();
kernel.Shutdown();
memory.Reset();
applet_manager.ClearAll();
if (auto room_member = room_network.GetRoomMember().lock()) {
Network::GameInfo game_info{};
@@ -527,7 +500,6 @@ struct System::Impl {
bool is_multicore{};
bool is_async_gpu{};
bool extended_memory_layout{};
ExecuteProgramCallback execute_program_callback;
ExitCallback exit_callback;
@@ -548,10 +520,6 @@ const CpuManager& System::GetCpuManager() const {
return impl->cpu_manager;
}
void System::Initialize() {
impl->Initialize(*this);
}
SystemResultStatus System::Run() {
return impl->Run();
}
@@ -572,8 +540,8 @@ void System::InvalidateCpuInstructionCacheRange(VAddr addr, std::size_t size) {
impl->kernel.InvalidateCpuInstructionCacheRange(addr, size);
}
void System::ShutdownMainProcess() {
impl->ShutdownMainProcess();
void System::Shutdown() {
impl->Shutdown();
}
bool System::IsShuttingDown() const {

View File

@@ -142,12 +142,6 @@ public:
System(System&&) = delete;
System& operator=(System&&) = delete;
/**
* Initializes the system
* This function will initialize core functionaility used for system emulation
*/
void Initialize();
/**
* Run the OS and Application
* This function will start emulation and run the relevant devices
@@ -172,8 +166,8 @@ public:
void InvalidateCpuInstructionCacheRange(VAddr addr, std::size_t size);
/// Shutdown the main emulated process.
void ShutdownMainProcess();
/// Shutdown the emulated system.
void Shutdown();
/// Check if the core is shutting down.
[[nodiscard]] bool IsShuttingDown() const;

View File

@@ -40,9 +40,7 @@ struct CoreTiming::Event {
CoreTiming::CoreTiming()
: clock{Common::CreateBestMatchingClock(Hardware::BASE_CLOCK_RATE, Hardware::CNTFREQ)} {}
CoreTiming::~CoreTiming() {
Reset();
}
CoreTiming::~CoreTiming() = default;
void CoreTiming::ThreadEntry(CoreTiming& instance) {
constexpr char name[] = "HostTiming";
@@ -55,7 +53,6 @@ void CoreTiming::ThreadEntry(CoreTiming& instance) {
}
void CoreTiming::Initialize(std::function<void()>&& on_thread_init_) {
Reset();
on_thread_init = std::move(on_thread_init_);
event_fifo_id = 0;
shutting_down = false;
@@ -68,8 +65,17 @@ void CoreTiming::Initialize(std::function<void()>&& on_thread_init_) {
}
}
void CoreTiming::ClearPendingEvents() {
event_queue.clear();
void CoreTiming::Shutdown() {
paused = true;
shutting_down = true;
pause_event.Set();
event.Set();
if (timer_thread) {
timer_thread->join();
}
ClearPendingEvents();
timer_thread.reset();
has_started = false;
}
void CoreTiming::Pause(bool is_paused) {
@@ -190,6 +196,10 @@ u64 CoreTiming::GetClockTicks() const {
return CpuCyclesToClockCycles(ticks);
}
void CoreTiming::ClearPendingEvents() {
event_queue.clear();
}
void CoreTiming::RemoveEvent(const std::shared_ptr<EventType>& event_type) {
std::scoped_lock lock{basic_lock};
@@ -297,18 +307,6 @@ void CoreTiming::ThreadLoop() {
}
}
void CoreTiming::Reset() {
paused = true;
shutting_down = true;
pause_event.Set();
event.Set();
if (timer_thread) {
timer_thread->join();
}
timer_thread.reset();
has_started = false;
}
std::chrono::nanoseconds CoreTiming::GetGlobalTimeNs() const {
if (is_multicore) {
return clock->GetTimeNS();

View File

@@ -61,14 +61,19 @@ public:
/// required to end slice - 1 and start slice 0 before the first cycle of code is executed.
void Initialize(std::function<void()>&& on_thread_init_);
/// Clear all pending events. This should ONLY be done on exit.
void ClearPendingEvents();
/// Tears down all timing related functionality.
void Shutdown();
/// Sets if emulation is multicore or single core, must be set before Initialize
void SetMulticore(bool is_multicore_) {
is_multicore = is_multicore_;
}
/// Check if it's using host timing.
bool IsHostTiming() const {
return is_multicore;
}
/// Pauses/Unpauses the execution of the timer thread.
void Pause(bool is_paused);
@@ -131,11 +136,12 @@ public:
private:
struct Event;
/// Clear all pending events. This should ONLY be done on exit.
void ClearPendingEvents();
static void ThreadEntry(CoreTiming& instance);
void ThreadLoop();
void Reset();
std::unique_ptr<Common::WallClock> clock;
s64 global_timer = 0;

View File

@@ -31,14 +31,12 @@ public:
DramMemoryMap::Base;
}
template <typename T>
T* GetPointer(PAddr addr) {
return reinterpret_cast<T*>(buffer.BackingBasePointer() + (addr - DramMemoryMap::Base));
u8* GetPointer(PAddr addr) {
return buffer.BackingBasePointer() + (addr - DramMemoryMap::Base);
}
template <typename T>
const T* GetPointer(PAddr addr) const {
return reinterpret_cast<T*>(buffer.BackingBasePointer() + (addr - DramMemoryMap::Base));
const u8* GetPointer(PAddr addr) const {
return buffer.BackingBasePointer() + (addr - DramMemoryMap::Base);
}
Common::HostMemory buffer;

View File

@@ -232,8 +232,8 @@ const std::vector<std::shared_ptr<NCA>>& XCI::GetNCAs() const {
std::shared_ptr<NCA> XCI::GetNCAByType(NCAContentType type) const {
const auto program_id = secure_partition->GetProgramTitleID();
const auto iter =
std::find_if(ncas.begin(), ncas.end(), [type, program_id](const std::shared_ptr<NCA>& nca) {
const auto iter = std::find_if(
ncas.begin(), ncas.end(), [this, type, program_id](const std::shared_ptr<NCA>& nca) {
return nca->GetType() == type && nca->GetTitleId() == program_id;
});
return iter == ncas.end() ? nullptr : *iter;

View File

@@ -1,7 +1,6 @@
// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "common/settings.h"
#include "common/string_util.h"
#include "common/swap.h"
#include "core/file_sys/control_metadata.h"
@@ -38,27 +37,6 @@ std::string LanguageEntry::GetDeveloperName() const {
developer_name.size());
}
constexpr std::array<Language, 18> language_to_codes = {{
Language::Japanese,
Language::AmericanEnglish,
Language::French,
Language::German,
Language::Italian,
Language::Spanish,
Language::Chinese,
Language::Korean,
Language::Dutch,
Language::Portuguese,
Language::Russian,
Language::Taiwanese,
Language::BritishEnglish,
Language::CanadianFrench,
Language::LatinAmericanSpanish,
Language::Chinese,
Language::Taiwanese,
Language::BrazilianPortuguese,
}};
NACP::NACP() = default;
NACP::NACP(VirtualFile file) {
@@ -67,13 +45,9 @@ NACP::NACP(VirtualFile file) {
NACP::~NACP() = default;
const LanguageEntry& NACP::GetLanguageEntry() const {
Language language = language_to_codes[Settings::values.language_index.GetValue()];
{
const auto& language_entry = raw.language_entries.at(static_cast<u8>(language));
if (!language_entry.GetApplicationName().empty())
return language_entry;
const LanguageEntry& NACP::GetLanguageEntry(Language language) const {
if (language != Language::Default) {
return raw.language_entries.at(static_cast<u8>(language));
}
for (const auto& language_entry : raw.language_entries) {
@@ -81,15 +55,16 @@ const LanguageEntry& NACP::GetLanguageEntry() const {
return language_entry;
}
return raw.language_entries.at(static_cast<u8>(Language::AmericanEnglish));
// Fallback to English
return GetLanguageEntry(Language::AmericanEnglish);
}
std::string NACP::GetApplicationName() const {
return GetLanguageEntry().GetApplicationName();
std::string NACP::GetApplicationName(Language language) const {
return GetLanguageEntry(language).GetApplicationName();
}
std::string NACP::GetDeveloperName() const {
return GetLanguageEntry().GetDeveloperName();
std::string NACP::GetDeveloperName(Language language) const {
return GetLanguageEntry(language).GetDeveloperName();
}
u64 NACP::GetTitleId() const {

View File

@@ -101,9 +101,9 @@ public:
explicit NACP(VirtualFile file);
~NACP();
const LanguageEntry& GetLanguageEntry() const;
std::string GetApplicationName() const;
std::string GetDeveloperName() const;
const LanguageEntry& GetLanguageEntry(Language language = Language::Default) const;
std::string GetApplicationName(Language language = Language::Default) const;
std::string GetDeveloperName(Language language = Language::Default) const;
u64 GetTitleId() const;
u64 GetDLCBaseTitleId() const;
std::string GetVersionString() const;

View File

@@ -127,7 +127,7 @@ void ProgramMetadata::LoadManual(bool is_64_bit, ProgramAddressSpaceType address
}
bool ProgramMetadata::Is64BitProgram() const {
return npdm_header.has_64_bit_instructions.As<bool>();
return npdm_header.has_64_bit_instructions;
}
ProgramAddressSpaceType ProgramMetadata::GetAddressSpaceType() const {

View File

@@ -5,7 +5,6 @@
#include "common/assert.h"
#include "common/common_types.h"
#include "common/logging/log.h"
#include "common/uuid.h"
#include "core/core.h"
#include "core/file_sys/savedata_factory.h"
#include "core/file_sys/vfs.h"
@@ -60,36 +59,6 @@ bool ShouldSaveDataBeAutomaticallyCreated(SaveDataSpaceId space, const SaveDataA
attr.title_id == 0 && attr.save_id == 0);
}
std::string GetFutureSaveDataPath(SaveDataSpaceId space_id, SaveDataType type, u64 title_id,
u128 user_id) {
// Only detect nand user saves.
const auto space_id_path = [space_id]() -> std::string_view {
switch (space_id) {
case SaveDataSpaceId::NandUser:
return "/user/save";
default:
return "";
}
}();
if (space_id_path.empty()) {
return "";
}
Common::UUID uuid;
std::memcpy(uuid.uuid.data(), user_id.data(), sizeof(Common::UUID));
// Only detect account/device saves from the future location.
switch (type) {
case SaveDataType::SaveData:
return fmt::format("{}/account/{}/{:016X}/1", space_id_path, uuid.RawString(), title_id);
case SaveDataType::DeviceSaveData:
return fmt::format("{}/device/{:016X}/1", space_id_path, title_id);
default:
return "";
}
}
} // Anonymous namespace
std::string SaveDataAttribute::DebugInfo() const {
@@ -113,7 +82,7 @@ ResultVal<VirtualDir> SaveDataFactory::Create(SaveDataSpaceId space,
PrintSaveDataAttributeWarnings(meta);
const auto save_directory =
GetFullPath(system, dir, space, meta.type, meta.title_id, meta.user_id, meta.save_id);
GetFullPath(system, space, meta.type, meta.title_id, meta.user_id, meta.save_id);
auto out = dir->CreateDirectoryRelative(save_directory);
@@ -130,7 +99,7 @@ ResultVal<VirtualDir> SaveDataFactory::Open(SaveDataSpaceId space,
const SaveDataAttribute& meta) const {
const auto save_directory =
GetFullPath(system, dir, space, meta.type, meta.title_id, meta.user_id, meta.save_id);
GetFullPath(system, space, meta.type, meta.title_id, meta.user_id, meta.save_id);
auto out = dir->GetDirectoryRelative(save_directory);
@@ -165,9 +134,9 @@ std::string SaveDataFactory::GetSaveDataSpaceIdPath(SaveDataSpaceId space) {
}
}
std::string SaveDataFactory::GetFullPath(Core::System& system, VirtualDir dir,
SaveDataSpaceId space, SaveDataType type, u64 title_id,
u128 user_id, u64 save_id) {
std::string SaveDataFactory::GetFullPath(Core::System& system, SaveDataSpaceId space,
SaveDataType type, u64 title_id, u128 user_id,
u64 save_id) {
// According to switchbrew, if a save is of type SaveData and the title id field is 0, it should
// be interpreted as the title id of the current process.
if (type == SaveDataType::SaveData || type == SaveDataType::DeviceSaveData) {
@@ -176,17 +145,6 @@ std::string SaveDataFactory::GetFullPath(Core::System& system, VirtualDir dir,
}
}
// For compat with a future impl.
if (std::string future_path =
GetFutureSaveDataPath(space, type, title_id & ~(0xFFULL), user_id);
!future_path.empty()) {
// Check if this location exists, and prefer it over the old.
if (const auto future_dir = dir->GetDirectoryRelative(future_path); future_dir != nullptr) {
LOG_INFO(Service_FS, "Using save at new location: {}", future_path);
return future_path;
}
}
std::string out = GetSaveDataSpaceIdPath(space);
switch (type) {
@@ -209,8 +167,7 @@ std::string SaveDataFactory::GetFullPath(Core::System& system, VirtualDir dir,
SaveDataSize SaveDataFactory::ReadSaveDataSize(SaveDataType type, u64 title_id,
u128 user_id) const {
const auto path =
GetFullPath(system, dir, SaveDataSpaceId::NandUser, type, title_id, user_id, 0);
const auto path = GetFullPath(system, SaveDataSpaceId::NandUser, type, title_id, user_id, 0);
const auto relative_dir = GetOrCreateDirectoryRelative(dir, path);
const auto size_file = relative_dir->GetFile(SAVE_DATA_SIZE_FILENAME);
@@ -228,8 +185,7 @@ SaveDataSize SaveDataFactory::ReadSaveDataSize(SaveDataType type, u64 title_id,
void SaveDataFactory::WriteSaveDataSize(SaveDataType type, u64 title_id, u128 user_id,
SaveDataSize new_value) const {
const auto path =
GetFullPath(system, dir, SaveDataSpaceId::NandUser, type, title_id, user_id, 0);
const auto path = GetFullPath(system, SaveDataSpaceId::NandUser, type, title_id, user_id, 0);
const auto relative_dir = GetOrCreateDirectoryRelative(dir, path);
const auto size_file = relative_dir->CreateFile(SAVE_DATA_SIZE_FILENAME);

View File

@@ -95,8 +95,8 @@ public:
VirtualDir GetSaveDataSpaceDirectory(SaveDataSpaceId space) const;
static std::string GetSaveDataSpaceIdPath(SaveDataSpaceId space);
static std::string GetFullPath(Core::System& system, VirtualDir dir, SaveDataSpaceId space,
SaveDataType type, u64 title_id, u128 user_id, u64 save_id);
static std::string GetFullPath(Core::System& system, SaveDataSpaceId space, SaveDataType type,
u64 title_id, u128 user_id, u64 save_id);
SaveDataSize ReadSaveDataSize(SaveDataType type, u64 title_id, u128 user_id) const;
void WriteSaveDataSize(SaveDataType type, u64 title_id, u128 user_id,

View File

@@ -970,7 +970,14 @@ bool EmulatedController::SetVibration(std::size_t device_index, VibrationValue v
Common::Input::VibrationError::None;
}
bool EmulatedController::IsVibrationEnabled(std::size_t device_index) {
bool EmulatedController::TestVibration(std::size_t device_index) {
if (device_index >= output_devices.size()) {
return false;
}
if (!output_devices[device_index]) {
return false;
}
const auto player_index = NpadIdTypeToIndex(npad_id_type);
const auto& player = Settings::values.players.GetValue()[player_index];
@@ -978,15 +985,31 @@ bool EmulatedController::IsVibrationEnabled(std::size_t device_index) {
return false;
}
if (device_index >= output_devices.size()) {
return false;
}
const Common::Input::VibrationStatus test_vibration = {
.low_amplitude = 0.001f,
.low_frequency = DEFAULT_VIBRATION_VALUE.low_frequency,
.high_amplitude = 0.001f,
.high_frequency = DEFAULT_VIBRATION_VALUE.high_frequency,
.type = Common::Input::VibrationAmplificationType::Test,
};
if (!output_devices[device_index]) {
return false;
}
const Common::Input::VibrationStatus zero_vibration = {
.low_amplitude = DEFAULT_VIBRATION_VALUE.low_amplitude,
.low_frequency = DEFAULT_VIBRATION_VALUE.low_frequency,
.high_amplitude = DEFAULT_VIBRATION_VALUE.high_amplitude,
.high_frequency = DEFAULT_VIBRATION_VALUE.high_frequency,
.type = Common::Input::VibrationAmplificationType::Test,
};
return output_devices[device_index]->IsVibrationEnabled();
// Send a slight vibration to test for rumble support
output_devices[device_index]->SetVibration(test_vibration);
// Wait for about 15ms to ensure the controller is ready for the stop command
std::this_thread::sleep_for(std::chrono::milliseconds(15));
// Stop any vibration and return the result
return output_devices[device_index]->SetVibration(zero_vibration) ==
Common::Input::VibrationError::None;
}
bool EmulatedController::SetPollingMode(Common::Input::PollingMode polling_mode) {
@@ -1025,7 +1048,6 @@ bool EmulatedController::HasNfc() const {
case NpadStyleIndex::JoyconRight:
case NpadStyleIndex::JoyconDual:
case NpadStyleIndex::ProController:
case NpadStyleIndex::Handheld:
break;
default:
return false;
@@ -1136,27 +1158,27 @@ bool EmulatedController::IsControllerSupported(bool use_temporary_value) const {
const auto type = is_configuring && use_temporary_value ? tmp_npad_type : npad_type;
switch (type) {
case NpadStyleIndex::ProController:
return supported_style_tag.fullkey.As<bool>();
return supported_style_tag.fullkey;
case NpadStyleIndex::Handheld:
return supported_style_tag.handheld.As<bool>();
return supported_style_tag.handheld;
case NpadStyleIndex::JoyconDual:
return supported_style_tag.joycon_dual.As<bool>();
return supported_style_tag.joycon_dual;
case NpadStyleIndex::JoyconLeft:
return supported_style_tag.joycon_left.As<bool>();
return supported_style_tag.joycon_left;
case NpadStyleIndex::JoyconRight:
return supported_style_tag.joycon_right.As<bool>();
return supported_style_tag.joycon_right;
case NpadStyleIndex::GameCube:
return supported_style_tag.gamecube.As<bool>();
return supported_style_tag.gamecube;
case NpadStyleIndex::Pokeball:
return supported_style_tag.palma.As<bool>();
return supported_style_tag.palma;
case NpadStyleIndex::NES:
return supported_style_tag.lark.As<bool>();
return supported_style_tag.lark;
case NpadStyleIndex::SNES:
return supported_style_tag.lucia.As<bool>();
return supported_style_tag.lucia;
case NpadStyleIndex::N64:
return supported_style_tag.lagoon.As<bool>();
return supported_style_tag.lagoon;
case NpadStyleIndex::SegaGenesis:
return supported_style_tag.lager.As<bool>();
return supported_style_tag.lager;
default:
return false;
}
@@ -1212,6 +1234,12 @@ bool EmulatedController::IsConnected(bool get_temporary_value) const {
return is_connected;
}
bool EmulatedController::IsVibrationEnabled() const {
const auto player_index = NpadIdTypeToIndex(npad_id_type);
const auto& player = Settings::values.players.GetValue()[player_index];
return player.vibration_enabled;
}
NpadIdType EmulatedController::GetNpadIdType() const {
std::scoped_lock lock{mutex};
return npad_id_type;

View File

@@ -206,6 +206,9 @@ public:
*/
bool IsConnected(bool get_temporary_value = false) const;
/// Returns true if vibration is enabled
bool IsVibrationEnabled() const;
/// Removes all callbacks created from input devices
void UnloadInput();
@@ -336,7 +339,7 @@ public:
* Sends a small vibration to the output device
* @return true if SetVibration was successfull
*/
bool IsVibrationEnabled(std::size_t device_index);
bool TestVibration(std::size_t device_index);
/**
* Sets the desired data to be polled from a controller

View File

@@ -86,13 +86,13 @@ public:
u32 num_domain_objects{};
const bool always_move_handles{
(static_cast<u32>(flags) & static_cast<u32>(Flags::AlwaysMoveHandles)) != 0};
if (!ctx.GetManager()->IsDomain() || always_move_handles) {
if (!ctx.Session()->IsDomain() || always_move_handles) {
num_handles_to_move = num_objects_to_move;
} else {
num_domain_objects = num_objects_to_move;
}
if (ctx.GetManager()->IsDomain()) {
if (ctx.Session()->IsDomain()) {
raw_data_size +=
static_cast<u32>(sizeof(DomainMessageHeader) / sizeof(u32) + num_domain_objects);
ctx.write_size += num_domain_objects;
@@ -125,7 +125,7 @@ public:
if (!ctx.IsTipc()) {
AlignWithPadding();
if (ctx.GetManager()->IsDomain() && ctx.HasDomainMessageHeader()) {
if (ctx.Session()->IsDomain() && ctx.HasDomainMessageHeader()) {
IPC::DomainMessageHeader domain_header{};
domain_header.num_objects = num_domain_objects;
PushRaw(domain_header);
@@ -145,18 +145,18 @@ public:
template <class T>
void PushIpcInterface(std::shared_ptr<T> iface) {
if (context->GetManager()->IsDomain()) {
if (context->Session()->IsDomain()) {
context->AddDomainObject(std::move(iface));
} else {
kernel.CurrentProcess()->GetResourceLimit()->Reserve(
Kernel::LimitableResource::SessionCountMax, 1);
Kernel::LimitableResource::Sessions, 1);
auto* session = Kernel::KSession::Create(kernel);
session->Initialize(nullptr, iface->GetServiceName());
iface->RegisterSession(&session->GetServerSession(),
std::make_shared<Kernel::SessionRequestManager>(kernel));
session->Initialize(nullptr, iface->GetServiceName(),
std::make_shared<Kernel::SessionRequestManager>(kernel));
context->AddMoveObject(&session->GetClientSession());
iface->ClientConnected(&session->GetServerSession());
}
}
@@ -386,7 +386,7 @@ public:
template <class T>
std::weak_ptr<T> PopIpcInterface() {
ASSERT(context->GetManager()->IsDomain());
ASSERT(context->Session()->IsDomain());
ASSERT(context->GetDomainMessageHeader().input_object_count > 0);
return context->GetDomainHandler<T>(Pop<u32>() - 1);
}
@@ -405,7 +405,7 @@ inline s32 RequestParser::Pop() {
}
// Ignore the -Wclass-memaccess warning on memcpy for non-trivially default constructible objects.
#if defined(__GNUC__) && !defined(__clang__) && !defined(__INTEL_COMPILER)
#if defined(__GNUC__)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wclass-memaccess"
#endif
@@ -416,7 +416,7 @@ void RequestParser::PopRaw(T& value) {
std::memcpy(&value, cmdbuf + index, sizeof(T));
index += (sizeof(T) + 3) / 4; // round up to word length
}
#if defined(__GNUC__) && !defined(__clang__) && !defined(__INTEL_COMPILER)
#if defined(__GNUC__)
#pragma GCC diagnostic pop
#endif

View File

@@ -8,10 +8,6 @@
namespace Kernel::Board::Nintendo::Nx {
class KSystemControl {
public:
// This can be overridden as needed.
static constexpr size_t SecureAppletMemorySize = 4 * 1024 * 1024; // 4_MB
public:
class Init {
public:

View File

@@ -49,26 +49,4 @@ bool GlobalSchedulerContext::IsLocked() const {
return scheduler_lock.IsLockedByCurrentThread();
}
void GlobalSchedulerContext::RegisterDummyThreadForWakeup(KThread* thread) {
ASSERT(IsLocked());
woken_dummy_threads.insert(thread);
}
void GlobalSchedulerContext::UnregisterDummyThreadForWakeup(KThread* thread) {
ASSERT(IsLocked());
woken_dummy_threads.erase(thread);
}
void GlobalSchedulerContext::WakeupWaitingDummyThreads() {
ASSERT(IsLocked());
for (auto* thread : woken_dummy_threads) {
thread->DummyThreadEndWait();
}
woken_dummy_threads.clear();
}
} // namespace Kernel

View File

@@ -4,7 +4,6 @@
#pragma once
#include <atomic>
#include <set>
#include <vector>
#include "common/common_types.h"
@@ -59,10 +58,6 @@ public:
/// Returns true if the global scheduler lock is acquired
bool IsLocked() const;
void UnregisterDummyThreadForWakeup(KThread* thread);
void RegisterDummyThreadForWakeup(KThread* thread);
void WakeupWaitingDummyThreads();
[[nodiscard]] LockType& SchedulerLock() {
return scheduler_lock;
}
@@ -81,9 +76,6 @@ private:
KSchedulerPriorityQueue priority_queue;
LockType scheduler_lock;
/// Lists dummy threads pending wakeup on lock release
std::set<KThread*> woken_dummy_threads;
/// Lists all thread ids that aren't deleted/etc.
std::vector<KThread*> thread_list;
std::mutex global_list_guard;

View File

@@ -16,39 +16,27 @@
#include "core/hle/kernel/k_auto_object.h"
#include "core/hle/kernel/k_handle_table.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_server_port.h"
#include "core/hle/kernel/k_server_session.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/kernel/service_thread.h"
#include "core/memory.h"
namespace Kernel {
SessionRequestHandler::SessionRequestHandler(KernelCore& kernel_, const char* service_name_,
ServiceThreadType thread_type)
: kernel{kernel_}, service_thread{thread_type == ServiceThreadType::CreateNew
? kernel.CreateServiceThread(service_name_)
: kernel.GetDefaultServiceThread()} {}
: kernel{kernel_} {
if (thread_type == ServiceThreadType::CreateNew) {
service_thread = kernel.CreateServiceThread(service_name_);
} else {
service_thread = kernel.GetDefaultServiceThread();
}
}
SessionRequestHandler::~SessionRequestHandler() {
kernel.ReleaseServiceThread(service_thread);
}
void SessionRequestHandler::AcceptSession(KServerPort* server_port) {
auto* server_session = server_port->AcceptSession();
ASSERT(server_session != nullptr);
RegisterSession(server_session, std::make_shared<SessionRequestManager>(kernel));
}
void SessionRequestHandler::RegisterSession(KServerSession* server_session,
std::shared_ptr<SessionRequestManager> manager) {
manager->SetSessionHandler(shared_from_this());
service_thread.RegisterServerSession(server_session, manager);
server_session->Close();
}
SessionRequestManager::SessionRequestManager(KernelCore& kernel_) : kernel{kernel_} {}
SessionRequestManager::~SessionRequestManager() = default;
@@ -68,77 +56,15 @@ bool SessionRequestManager::HasSessionRequestHandler(const HLERequestContext& co
}
}
Result SessionRequestManager::CompleteSyncRequest(KServerSession* server_session,
HLERequestContext& context) {
Result result = ResultSuccess;
void SessionRequestHandler::ClientConnected(KServerSession* session) {
session->ClientConnected(shared_from_this());
// If the session has been converted to a domain, handle the domain request
if (this->HasSessionRequestHandler(context)) {
if (IsDomain() && context.HasDomainMessageHeader()) {
result = HandleDomainSyncRequest(server_session, context);
// If there is no domain header, the regular session handler is used
} else if (this->HasSessionHandler()) {
// If this manager has an associated HLE handler, forward the request to it.
result = this->SessionHandler().HandleSyncRequest(*server_session, context);
}
} else {
ASSERT_MSG(false, "Session handler is invalid, stubbing response!");
IPC::ResponseBuilder rb(context, 2);
rb.Push(ResultSuccess);
}
if (convert_to_domain) {
ASSERT_MSG(!IsDomain(), "ServerSession is already a domain instance.");
this->ConvertToDomain();
convert_to_domain = false;
}
return result;
// Ensure our server session is tracked globally.
kernel.RegisterServerObject(session);
}
Result SessionRequestManager::HandleDomainSyncRequest(KServerSession* server_session,
HLERequestContext& context) {
if (!context.HasDomainMessageHeader()) {
return ResultSuccess;
}
// Set domain handlers in HLE context, used for domain objects (IPC interfaces) as inputs
ASSERT(context.GetManager().get() == this);
// If there is a DomainMessageHeader, then this is CommandType "Request"
const auto& domain_message_header = context.GetDomainMessageHeader();
const u32 object_id{domain_message_header.object_id};
switch (domain_message_header.command) {
case IPC::DomainMessageHeader::CommandType::SendMessage:
if (object_id > this->DomainHandlerCount()) {
LOG_CRITICAL(IPC,
"object_id {} is too big! This probably means a recent service call "
"needed to return a new interface!",
object_id);
ASSERT(false);
return ResultSuccess; // Ignore error if asserts are off
}
if (auto strong_ptr = this->DomainHandler(object_id - 1).lock()) {
return strong_ptr->HandleSyncRequest(*server_session, context);
} else {
ASSERT(false);
return ResultSuccess;
}
case IPC::DomainMessageHeader::CommandType::CloseVirtualHandle: {
LOG_DEBUG(IPC, "CloseVirtualHandle, object_id=0x{:08X}", object_id);
this->CloseDomainHandler(object_id - 1);
IPC::ResponseBuilder rb{context, 2};
rb.Push(ResultSuccess);
return ResultSuccess;
}
}
LOG_CRITICAL(IPC, "Unknown domain command={}", domain_message_header.command.Value());
ASSERT(false);
return ResultSuccess;
void SessionRequestHandler::ClientDisconnected(KServerSession* session) {
session->ClientDisconnected();
}
HLERequestContext::HLERequestContext(KernelCore& kernel_, Core::Memory::Memory& memory_,
@@ -200,7 +126,7 @@ void HLERequestContext::ParseCommandBuffer(const KHandleTable& handle_table, u32
// Padding to align to 16 bytes
rp.AlignWithPadding();
if (GetManager()->IsDomain() &&
if (Session()->IsDomain() &&
((command_header->type == IPC::CommandType::Request ||
command_header->type == IPC::CommandType::RequestWithContext) ||
!incoming)) {
@@ -209,7 +135,7 @@ void HLERequestContext::ParseCommandBuffer(const KHandleTable& handle_table, u32
if (incoming || domain_message_header) {
domain_message_header = rp.PopRaw<IPC::DomainMessageHeader>();
} else {
if (GetManager()->IsDomain()) {
if (Session()->IsDomain()) {
LOG_WARNING(IPC, "Domain request has no DomainMessageHeader!");
}
}
@@ -302,11 +228,12 @@ Result HLERequestContext::WriteToOutgoingCommandBuffer(KThread& requesting_threa
// Write the domain objects to the command buffer, these go after the raw untranslated data.
// TODO(Subv): This completely ignores C buffers.
if (GetManager()->IsDomain()) {
if (Session()->IsDomain()) {
current_offset = domain_offset - static_cast<u32>(outgoing_domain_objects.size());
for (auto& object : outgoing_domain_objects) {
GetManager()->AppendDomainHandler(std::move(object));
cmd_buf[current_offset++] = static_cast<u32_le>(GetManager()->DomainHandlerCount());
for (const auto& object : outgoing_domain_objects) {
server_session->AppendDomainHandler(object);
cmd_buf[current_offset++] =
static_cast<u32_le>(server_session->NumDomainRequestHandlers());
}
}

View File

@@ -45,13 +45,11 @@ class KAutoObject;
class KernelCore;
class KEvent;
class KHandleTable;
class KServerPort;
class KProcess;
class KServerSession;
class KThread;
class KReadableEvent;
class KSession;
class SessionRequestManager;
class ServiceThread;
enum class ThreadWakeupReason;
@@ -78,17 +76,27 @@ public:
virtual Result HandleSyncRequest(Kernel::KServerSession& session,
Kernel::HLERequestContext& context) = 0;
void AcceptSession(KServerPort* server_port);
void RegisterSession(KServerSession* server_session,
std::shared_ptr<SessionRequestManager> manager);
/**
* Signals that a client has just connected to this HLE handler and keeps the
* associated ServerSession alive for the duration of the connection.
* @param server_session Owning pointer to the ServerSession associated with the connection.
*/
void ClientConnected(KServerSession* session);
ServiceThread& GetServiceThread() const {
/**
* Signals that a client has just disconnected from this HLE handler and releases the
* associated ServerSession.
* @param server_session ServerSession associated with the connection.
*/
void ClientDisconnected(KServerSession* session);
std::weak_ptr<ServiceThread> GetServiceThread() const {
return service_thread;
}
protected:
KernelCore& kernel;
ServiceThread& service_thread;
std::weak_ptr<ServiceThread> service_thread;
};
using SessionRequestHandlerWeakPtr = std::weak_ptr<SessionRequestHandler>;
@@ -113,10 +121,6 @@ public:
is_domain = true;
}
void ConvertToDomainOnRequestEnd() {
convert_to_domain = true;
}
std::size_t DomainHandlerCount() const {
return domain_handlers.size();
}
@@ -154,17 +158,13 @@ public:
session_handler = std::move(handler);
}
ServiceThread& GetServiceThread() const {
std::weak_ptr<ServiceThread> GetServiceThread() const {
return session_handler->GetServiceThread();
}
bool HasSessionRequestHandler(const HLERequestContext& context) const;
Result HandleDomainSyncRequest(KServerSession* server_session, HLERequestContext& context);
Result CompleteSyncRequest(KServerSession* server_session, HLERequestContext& context);
private:
bool convert_to_domain{};
bool is_domain{};
SessionRequestHandlerPtr session_handler;
std::vector<SessionRequestHandlerPtr> domain_handlers;
@@ -295,7 +295,7 @@ public:
*/
template <typename T, typename = std::enable_if_t<!std::is_pointer_v<T>>>
std::size_t WriteBuffer(const T& data, std::size_t buffer_index = 0) const {
if constexpr (Common::IsContiguousContainer<T>) {
if constexpr (Common::IsSTLContainer<T>) {
using ContiguousType = typename T::value_type;
static_assert(std::is_trivially_copyable_v<ContiguousType>,
"Container to WriteBuffer must contain trivially copyable objects");
@@ -341,11 +341,11 @@ public:
template <typename T>
std::shared_ptr<T> GetDomainHandler(std::size_t index) const {
return std::static_pointer_cast<T>(GetManager()->DomainHandler(index).lock());
return std::static_pointer_cast<T>(manager.lock()->DomainHandler(index).lock());
}
void SetSessionRequestManager(std::weak_ptr<SessionRequestManager> manager_) {
manager = manager_;
manager = std::move(manager_);
}
std::string Description() const;
@@ -354,10 +354,6 @@ public:
return *thread;
}
std::shared_ptr<SessionRequestManager> GetManager() const {
return manager.lock();
}
private:
friend class IPC::ResponseBuilder;
@@ -391,7 +387,7 @@ private:
u32 handles_offset{};
u32 domain_offset{};
std::weak_ptr<SessionRequestManager> manager{};
std::weak_ptr<SessionRequestManager> manager;
KernelCore& kernel;
Core::Memory::Memory& memory;

View File

@@ -10,9 +10,7 @@
#include "core/hardware_properties.h"
#include "core/hle/kernel/init/init_slab_setup.h"
#include "core/hle/kernel/k_code_memory.h"
#include "core/hle/kernel/k_debug.h"
#include "core/hle/kernel/k_event.h"
#include "core/hle/kernel/k_event_info.h"
#include "core/hle/kernel/k_memory_layout.h"
#include "core/hle/kernel/k_memory_manager.h"
#include "core/hle/kernel/k_page_buffer.h"
@@ -20,11 +18,9 @@
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_resource_limit.h"
#include "core/hle/kernel/k_session.h"
#include "core/hle/kernel/k_session_request.h"
#include "core/hle/kernel/k_shared_memory.h"
#include "core/hle/kernel/k_shared_memory_info.h"
#include "core/hle/kernel/k_system_control.h"
#include "core/hle/kernel/k_system_resource.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/k_thread_local_page.h"
#include "core/hle/kernel/k_transfer_memory.h"
@@ -38,7 +34,6 @@ namespace Kernel::Init {
HANDLER(KThread, (SLAB_COUNT(KThread)), ##__VA_ARGS__) \
HANDLER(KEvent, (SLAB_COUNT(KEvent)), ##__VA_ARGS__) \
HANDLER(KPort, (SLAB_COUNT(KPort)), ##__VA_ARGS__) \
HANDLER(KSessionRequest, (SLAB_COUNT(KSession) * 2), ##__VA_ARGS__) \
HANDLER(KSharedMemory, (SLAB_COUNT(KSharedMemory)), ##__VA_ARGS__) \
HANDLER(KSharedMemoryInfo, (SLAB_COUNT(KSharedMemory) * 8), ##__VA_ARGS__) \
HANDLER(KTransferMemory, (SLAB_COUNT(KTransferMemory)), ##__VA_ARGS__) \
@@ -47,10 +42,7 @@ namespace Kernel::Init {
HANDLER(KThreadLocalPage, \
(SLAB_COUNT(KProcess) + (SLAB_COUNT(KProcess) + SLAB_COUNT(KThread)) / 8), \
##__VA_ARGS__) \
HANDLER(KResourceLimit, (SLAB_COUNT(KResourceLimit)), ##__VA_ARGS__) \
HANDLER(KEventInfo, (SLAB_COUNT(KThread) + SLAB_COUNT(KDebug)), ##__VA_ARGS__) \
HANDLER(KDebug, (SLAB_COUNT(KDebug)), ##__VA_ARGS__) \
HANDLER(KSecureSystemResource, (SLAB_COUNT(KProcess)), ##__VA_ARGS__)
HANDLER(KResourceLimit, (SLAB_COUNT(KResourceLimit)), ##__VA_ARGS__)
namespace {
@@ -79,20 +71,8 @@ constexpr size_t SlabCountKResourceLimit = 5;
constexpr size_t SlabCountKDebug = Core::Hardware::NUM_CPU_CORES;
constexpr size_t SlabCountKIoPool = 1;
constexpr size_t SlabCountKIoRegion = 6;
constexpr size_t SlabcountKSessionRequestMappings = 40;
constexpr size_t SlabCountExtraKThread = (1024 + 256 + 256) - SlabCountKThread;
namespace test {
static_assert(KernelPageBufferHeapSize ==
2 * PageSize + (SlabCountKProcess + SlabCountKThread +
(SlabCountKProcess + SlabCountKThread) / 8) *
PageSize);
static_assert(KernelPageBufferAdditionalSize ==
(SlabCountExtraKThread + (SlabCountExtraKThread / 8)) * PageSize);
} // namespace test
constexpr size_t SlabCountExtraKThread = 160;
/// Helper function to translate from the slab virtual address to the reserved location in physical
/// memory.
@@ -114,8 +94,8 @@ VAddr InitializeSlabHeap(Core::System& system, KMemoryLayout& memory_layout, VAd
// TODO(bunnei): Fix this once we support the kernel virtual memory layout.
if (size > 0) {
void* backing_kernel_memory{system.DeviceMemory().GetPointer<void>(
TranslateSlabAddrToPhysical(memory_layout, start))};
void* backing_kernel_memory{
system.DeviceMemory().GetPointer(TranslateSlabAddrToPhysical(memory_layout, start))};
const KMemoryRegion* region = memory_layout.FindVirtual(start + size - 1);
ASSERT(region != nullptr);
@@ -127,7 +107,7 @@ VAddr InitializeSlabHeap(Core::System& system, KMemoryLayout& memory_layout, VAd
}
size_t CalculateSlabHeapGapSize() {
constexpr size_t KernelSlabHeapGapSize = 2_MiB - 320_KiB;
constexpr size_t KernelSlabHeapGapSize = 2_MiB - 296_KiB;
static_assert(KernelSlabHeapGapSize <= KernelSlabHeapGapsSizeMax);
return KernelSlabHeapGapSize;
}
@@ -152,7 +132,6 @@ KSlabResourceCounts KSlabResourceCounts::CreateDefault() {
.num_KDebug = SlabCountKDebug,
.num_KIoPool = SlabCountKIoPool,
.num_KIoRegion = SlabCountKIoRegion,
.num_KSessionRequestMappings = SlabcountKSessionRequestMappings,
};
}
@@ -183,6 +162,29 @@ size_t CalculateTotalSlabHeapSize(const KernelCore& kernel) {
return size;
}
void InitializeKPageBufferSlabHeap(Core::System& system) {
auto& kernel = system.Kernel();
const auto& counts = kernel.SlabResourceCounts();
const size_t num_pages =
counts.num_KProcess + counts.num_KThread + (counts.num_KProcess + counts.num_KThread) / 8;
const size_t slab_size = num_pages * PageSize;
// Reserve memory from the system resource limit.
ASSERT(kernel.GetSystemResourceLimit()->Reserve(LimitableResource::PhysicalMemory, slab_size));
// Allocate memory for the slab.
constexpr auto AllocateOption = KMemoryManager::EncodeOption(
KMemoryManager::Pool::System, KMemoryManager::Direction::FromFront);
const PAddr slab_address =
kernel.MemoryManager().AllocateAndOpenContinuous(num_pages, 1, AllocateOption);
ASSERT(slab_address != 0);
// Initialize the slabheap.
KPageBuffer::InitializeSlabHeap(kernel, system.DeviceMemory().GetPointer(slab_address),
slab_size);
}
void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout) {
auto& kernel = system.Kernel();
@@ -243,7 +245,6 @@ void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout) {
// If we somehow get an invalid type, abort.
default:
ASSERT_MSG(false, "Unknown slab type: {}", slab_types[i]);
break;
}
// If we've hit the end of a gap, free it.
@@ -255,30 +256,3 @@ void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout) {
}
} // namespace Kernel::Init
namespace Kernel {
void KPageBufferSlabHeap::Initialize(Core::System& system) {
auto& kernel = system.Kernel();
const auto& counts = kernel.SlabResourceCounts();
const size_t num_pages =
counts.num_KProcess + counts.num_KThread + (counts.num_KProcess + counts.num_KThread) / 8;
const size_t slab_size = num_pages * PageSize;
// Reserve memory from the system resource limit.
ASSERT(
kernel.GetSystemResourceLimit()->Reserve(LimitableResource::PhysicalMemoryMax, slab_size));
// Allocate memory for the slab.
constexpr auto AllocateOption = KMemoryManager::EncodeOption(
KMemoryManager::Pool::System, KMemoryManager::Direction::FromFront);
const PAddr slab_address =
kernel.MemoryManager().AllocateAndOpenContinuous(num_pages, 1, AllocateOption);
ASSERT(slab_address != 0);
// Initialize the slabheap.
KPageBuffer::InitializeSlabHeap(kernel, system.DeviceMemory().GetPointer<void>(slab_address),
slab_size);
}
} // namespace Kernel

View File

@@ -33,11 +33,11 @@ struct KSlabResourceCounts {
size_t num_KDebug;
size_t num_KIoPool;
size_t num_KIoRegion;
size_t num_KSessionRequestMappings;
};
void InitializeSlabResourceCounts(KernelCore& kernel);
size_t CalculateTotalSlabHeapSize(const KernelCore& kernel);
void InitializeKPageBufferSlabHeap(Core::System& system);
void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout);
} // namespace Kernel::Init

View File

@@ -16,7 +16,6 @@
#include "core/hle/kernel/k_session.h"
#include "core/hle/kernel/k_shared_memory.h"
#include "core/hle/kernel/k_synchronization_object.h"
#include "core/hle/kernel/k_system_resource.h"
#include "core/hle/kernel/k_thread.h"
#include "core/hle/kernel/k_transfer_memory.h"
@@ -120,6 +119,4 @@ static_assert(std::is_final_v<KTransferMemory> && std::is_base_of_v<KAutoObject,
// static_assert(std::is_final_v<KCodeMemory> &&
// std::is_base_of_v<KAutoObject, KCodeMemory>);
static_assert(std::is_base_of_v<KAutoObject, KSystemResource>);
} // namespace Kernel

View File

@@ -10,8 +10,6 @@ namespace Kernel {
class KAutoObject;
class KSystemResource;
class KClassTokenGenerator {
public:
using TokenBaseType = u16;
@@ -60,7 +58,7 @@ private:
if constexpr (std::is_same<T, KAutoObject>::value) {
static_assert(T::ObjectType == ObjectType::KAutoObject);
return 0;
} else if constexpr (!std::is_final<T>::value && !std::same_as<T, KSystemResource>) {
} else if constexpr (!std::is_final<T>::value) {
static_assert(ObjectType::BaseClassesStart <= T::ObjectType &&
T::ObjectType < ObjectType::BaseClassesEnd);
constexpr auto ClassIndex = static_cast<TokenBaseType>(T::ObjectType) -
@@ -110,8 +108,6 @@ public:
KSessionRequest,
KCodeMemory,
KSystemResource,
// NOTE: True order for these has not been determined yet.
KAlpha,
KBeta,

View File

@@ -58,10 +58,11 @@ bool KClientPort::IsSignaled() const {
return num_sessions < max_sessions;
}
Result KClientPort::CreateSession(KClientSession** out) {
Result KClientPort::CreateSession(KClientSession** out,
std::shared_ptr<SessionRequestManager> session_manager) {
// Reserve a new session from the resource limit.
KScopedResourceReservation session_reservation(kernel.CurrentProcess()->GetResourceLimit(),
LimitableResource::SessionCountMax);
LimitableResource::Sessions);
R_UNLESS(session_reservation.Succeeded(), ResultLimitReached);
// Update the session counts.
@@ -103,7 +104,7 @@ Result KClientPort::CreateSession(KClientSession** out) {
}
// Initialize the session.
session->Initialize(this, parent->GetName());
session->Initialize(this, parent->GetName(), session_manager);
// Commit the session reservation.
session_reservation.Commit();

View File

@@ -52,7 +52,8 @@ public:
void Destroy() override;
bool IsSignaled() const override;
Result CreateSession(KClientSession** out);
Result CreateSession(KClientSession** out,
std::shared_ptr<SessionRequestManager> session_manager = nullptr);
private:
std::atomic<s32> num_sessions{};

View File

@@ -1,7 +1,6 @@
// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "common/scope_exit.h"
#include "core/hle/kernel/hle_ipc.h"
#include "core/hle/kernel/k_client_session.h"
#include "core/hle/kernel/k_server_session.h"
@@ -11,8 +10,6 @@
namespace Kernel {
static constexpr u32 MessageBufferSize = 0x100;
KClientSession::KClientSession(KernelCore& kernel_)
: KAutoObjectWithSlabHeapAndContainer{kernel_} {}
KClientSession::~KClientSession() = default;
@@ -25,16 +22,8 @@ void KClientSession::Destroy() {
void KClientSession::OnServerClosed() {}
Result KClientSession::SendSyncRequest() {
// Create a session request.
KSessionRequest* request = KSessionRequest::Create(kernel);
R_UNLESS(request != nullptr, ResultOutOfResource);
SCOPE_EXIT({ request->Close(); });
// Initialize the request.
request->Initialize(nullptr, GetCurrentThread(kernel).GetTLSAddress(), MessageBufferSize);
// Send the request.
return parent->GetServerSession().OnRequest(request);
// Signal the server session that new data is available
return parent->GetServerSession().OnRequest();
}
} // namespace Kernel

View File

@@ -34,7 +34,7 @@ Result KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr, si
// Clear the memory.
for (const auto& block : m_page_group.Nodes()) {
std::memset(device_memory.GetPointer<void>(block.GetAddress()), 0xFF, block.GetSize());
std::memset(device_memory.GetPointer(block.GetAddress()), 0xFF, block.GetSize());
}
// Set remaining tracking members.

View File

@@ -1,20 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include "core/hle/kernel/k_auto_object.h"
#include "core/hle/kernel/slab_helpers.h"
namespace Kernel {
class KDebug final : public KAutoObjectWithSlabHeapAndContainer<KDebug, KAutoObjectWithList> {
KERNEL_AUTOOBJECT_TRAITS(KDebug, KAutoObject);
public:
explicit KDebug(KernelCore& kernel_) : KAutoObjectWithSlabHeapAndContainer{kernel_} {}
static void PostDestroy([[maybe_unused]] uintptr_t arg) {}
};
} // namespace Kernel

View File

@@ -1,169 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <vector>
#include "common/alignment.h"
#include "common/common_types.h"
#include "core/hle/kernel/k_page_bitmap.h"
#include "core/hle/kernel/k_spin_lock.h"
#include "core/hle/kernel/memory_types.h"
#include "core/hle/kernel/svc_results.h"
namespace Kernel {
class KDynamicPageManager {
public:
class PageBuffer {
private:
u8 m_buffer[PageSize];
};
static_assert(sizeof(PageBuffer) == PageSize);
public:
KDynamicPageManager() = default;
template <typename T>
T* GetPointer(VAddr addr) {
return reinterpret_cast<T*>(m_backing_memory.data() + (addr - m_address));
}
template <typename T>
const T* GetPointer(VAddr addr) const {
return reinterpret_cast<T*>(m_backing_memory.data() + (addr - m_address));
}
Result Initialize(VAddr memory, size_t size, size_t align) {
// We need to have positive size.
R_UNLESS(size > 0, ResultOutOfMemory);
m_backing_memory.resize(size);
// Set addresses.
m_address = memory;
m_aligned_address = Common::AlignDown(memory, align);
// Calculate extents.
const size_t managed_size = m_address + size - m_aligned_address;
const size_t overhead_size = Common::AlignUp(
KPageBitmap::CalculateManagementOverheadSize(managed_size / sizeof(PageBuffer)),
sizeof(PageBuffer));
R_UNLESS(overhead_size < size, ResultOutOfMemory);
// Set tracking fields.
m_size = Common::AlignDown(size - overhead_size, sizeof(PageBuffer));
m_count = m_size / sizeof(PageBuffer);
// Clear the management region.
u64* management_ptr = GetPointer<u64>(m_address + size - overhead_size);
std::memset(management_ptr, 0, overhead_size);
// Initialize the bitmap.
const size_t allocatable_region_size =
(m_address + size - overhead_size) - m_aligned_address;
ASSERT(allocatable_region_size >= sizeof(PageBuffer));
m_page_bitmap.Initialize(management_ptr, allocatable_region_size / sizeof(PageBuffer));
// Free the pages to the bitmap.
for (size_t i = 0; i < m_count; i++) {
// Ensure the freed page is all-zero.
std::memset(GetPointer<PageBuffer>(m_address) + i, 0, PageSize);
// Set the bit for the free page.
m_page_bitmap.SetBit((m_address + (i * sizeof(PageBuffer)) - m_aligned_address) /
sizeof(PageBuffer));
}
R_SUCCEED();
}
VAddr GetAddress() const {
return m_address;
}
size_t GetSize() const {
return m_size;
}
size_t GetUsed() const {
return m_used;
}
size_t GetPeak() const {
return m_peak;
}
size_t GetCount() const {
return m_count;
}
PageBuffer* Allocate() {
// Take the lock.
// TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
KScopedSpinLock lk(m_lock);
// Find a random free block.
s64 soffset = m_page_bitmap.FindFreeBlock(true);
if (soffset < 0) [[unlikely]] {
return nullptr;
}
const size_t offset = static_cast<size_t>(soffset);
// Update our tracking.
m_page_bitmap.ClearBit(offset);
m_peak = std::max(m_peak, (++m_used));
return GetPointer<PageBuffer>(m_aligned_address) + offset;
}
PageBuffer* Allocate(size_t count) {
// Take the lock.
// TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
KScopedSpinLock lk(m_lock);
// Find a random free block.
s64 soffset = m_page_bitmap.FindFreeRange(count);
if (soffset < 0) [[likely]] {
return nullptr;
}
const size_t offset = static_cast<size_t>(soffset);
// Update our tracking.
m_page_bitmap.ClearRange(offset, count);
m_used += count;
m_peak = std::max(m_peak, m_used);
return GetPointer<PageBuffer>(m_aligned_address) + offset;
}
void Free(PageBuffer* pb) {
// Ensure all pages in the heap are zero.
std::memset(pb, 0, PageSize);
// Take the lock.
// TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
KScopedSpinLock lk(m_lock);
// Set the bit for the free page.
size_t offset = (reinterpret_cast<uintptr_t>(pb) - m_aligned_address) / sizeof(PageBuffer);
m_page_bitmap.SetBit(offset);
// Decrement our used count.
--m_used;
}
private:
KSpinLock m_lock;
KPageBitmap m_page_bitmap;
size_t m_used{};
size_t m_peak{};
size_t m_count{};
VAddr m_address{};
VAddr m_aligned_address{};
size_t m_size{};
// TODO(bunnei): Back by host memory until we emulate kernel virtual address space.
std::vector<u8> m_backing_memory;
};
} // namespace Kernel

View File

@@ -1,61 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include "common/common_funcs.h"
#include "core/hle/kernel/k_dynamic_slab_heap.h"
#include "core/hle/kernel/k_memory_block.h"
#include "core/hle/kernel/k_page_group.h"
namespace Kernel {
template <typename T, bool ClearNode = false>
class KDynamicResourceManager {
YUZU_NON_COPYABLE(KDynamicResourceManager);
YUZU_NON_MOVEABLE(KDynamicResourceManager);
public:
using DynamicSlabType = KDynamicSlabHeap<T, ClearNode>;
public:
constexpr KDynamicResourceManager() = default;
constexpr size_t GetSize() const {
return m_slab_heap->GetSize();
}
constexpr size_t GetUsed() const {
return m_slab_heap->GetUsed();
}
constexpr size_t GetPeak() const {
return m_slab_heap->GetPeak();
}
constexpr size_t GetCount() const {
return m_slab_heap->GetCount();
}
void Initialize(KDynamicPageManager* page_allocator, DynamicSlabType* slab_heap) {
m_page_allocator = page_allocator;
m_slab_heap = slab_heap;
}
T* Allocate() const {
return m_slab_heap->Allocate(m_page_allocator);
}
void Free(T* t) const {
m_slab_heap->Free(t);
}
private:
KDynamicPageManager* m_page_allocator{};
DynamicSlabType* m_slab_heap{};
};
class KBlockInfoManager : public KDynamicResourceManager<KBlockInfo> {};
class KMemoryBlockSlabManager : public KDynamicResourceManager<KMemoryBlock> {};
using KBlockInfoSlabHeap = typename KBlockInfoManager::DynamicSlabType;
using KMemoryBlockSlabHeap = typename KMemoryBlockSlabManager::DynamicSlabType;
} // namespace Kernel

View File

@@ -1,122 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <atomic>
#include "common/common_funcs.h"
#include "core/hle/kernel/k_dynamic_page_manager.h"
#include "core/hle/kernel/k_slab_heap.h"
namespace Kernel {
template <typename T, bool ClearNode = false>
class KDynamicSlabHeap : protected impl::KSlabHeapImpl {
YUZU_NON_COPYABLE(KDynamicSlabHeap);
YUZU_NON_MOVEABLE(KDynamicSlabHeap);
public:
constexpr KDynamicSlabHeap() = default;
constexpr VAddr GetAddress() const {
return m_address;
}
constexpr size_t GetSize() const {
return m_size;
}
constexpr size_t GetUsed() const {
return m_used.load();
}
constexpr size_t GetPeak() const {
return m_peak.load();
}
constexpr size_t GetCount() const {
return m_count.load();
}
constexpr bool IsInRange(VAddr addr) const {
return this->GetAddress() <= addr && addr <= this->GetAddress() + this->GetSize() - 1;
}
void Initialize(KDynamicPageManager* page_allocator, size_t num_objects) {
ASSERT(page_allocator != nullptr);
// Initialize members.
m_address = page_allocator->GetAddress();
m_size = page_allocator->GetSize();
// Initialize the base allocator.
KSlabHeapImpl::Initialize();
// Allocate until we have the correct number of objects.
while (m_count.load() < num_objects) {
auto* allocated = reinterpret_cast<T*>(page_allocator->Allocate());
ASSERT(allocated != nullptr);
for (size_t i = 0; i < sizeof(PageBuffer) / sizeof(T); i++) {
KSlabHeapImpl::Free(allocated + i);
}
m_count += sizeof(PageBuffer) / sizeof(T);
}
}
T* Allocate(KDynamicPageManager* page_allocator) {
T* allocated = static_cast<T*>(KSlabHeapImpl::Allocate());
// If we successfully allocated and we should clear the node, do so.
if constexpr (ClearNode) {
if (allocated != nullptr) [[likely]] {
reinterpret_cast<KSlabHeapImpl::Node*>(allocated)->next = nullptr;
}
}
// If we fail to allocate, try to get a new page from our next allocator.
if (allocated == nullptr) [[unlikely]] {
if (page_allocator != nullptr) {
allocated = reinterpret_cast<T*>(page_allocator->Allocate());
if (allocated != nullptr) {
// If we succeeded in getting a page, free the rest to our slab.
for (size_t i = 1; i < sizeof(PageBuffer) / sizeof(T); i++) {
KSlabHeapImpl::Free(allocated + i);
}
m_count += sizeof(PageBuffer) / sizeof(T);
}
}
}
if (allocated != nullptr) [[likely]] {
// Construct the object.
std::construct_at(allocated);
// Update our tracking.
const size_t used = ++m_used;
size_t peak = m_peak.load();
while (peak < used) {
if (m_peak.compare_exchange_weak(peak, used, std::memory_order_relaxed)) {
break;
}
}
}
return allocated;
}
void Free(T* t) {
KSlabHeapImpl::Free(t);
--m_used;
}
private:
using PageBuffer = KDynamicPageManager::PageBuffer;
private:
std::atomic<size_t> m_used{};
std::atomic<size_t> m_peak{};
std::atomic<size_t> m_count{};
VAddr m_address{};
size_t m_size{};
};
} // namespace Kernel

View File

@@ -50,7 +50,7 @@ Result KEvent::Clear() {
void KEvent::PostDestroy(uintptr_t arg) {
// Release the event count resource the owner process holds.
KProcess* owner = reinterpret_cast<KProcess*>(arg);
owner->GetResourceLimit()->Release(LimitableResource::EventCountMax, 1);
owner->GetResourceLimit()->Release(LimitableResource::Events, 1);
owner->Close();
}

View File

@@ -1,64 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <array>
#include <boost/intrusive/list.hpp>
#include "core/hle/kernel/slab_helpers.h"
#include "core/hle/kernel/svc_types.h"
namespace Kernel {
class KEventInfo : public KSlabAllocated<KEventInfo>, public boost::intrusive::list_base_hook<> {
public:
struct InfoCreateThread {
u32 thread_id{};
uintptr_t tls_address{};
};
struct InfoExitProcess {
Svc::ProcessExitReason reason{};
};
struct InfoExitThread {
Svc::ThreadExitReason reason{};
};
struct InfoException {
Svc::DebugException exception_type{};
s32 exception_data_count{};
uintptr_t exception_address{};
std::array<uintptr_t, 4> exception_data{};
};
struct InfoSystemCall {
s64 tick{};
s32 id{};
};
public:
KEventInfo() = default;
~KEventInfo() = default;
public:
Svc::DebugEvent event{};
u32 thread_id{};
u32 flags{};
bool is_attached{};
bool continue_flag{};
bool ignore_continue{};
bool close_once{};
union {
InfoCreateThread create_thread;
InfoExitProcess exit_process;
InfoExitThread exit_thread;
InfoException exception;
InfoSystemCall system_call;
} info{};
KThread* debug_thread{};
};
} // namespace Kernel

View File

@@ -5,11 +5,14 @@
namespace Kernel {
KHandleTable::KHandleTable(KernelCore& kernel_) : kernel{kernel_} {}
KHandleTable::~KHandleTable() = default;
Result KHandleTable::Finalize() {
// Get the table and clear our record of it.
u16 saved_table_size = 0;
{
KScopedDisableDispatch dd{m_kernel};
KScopedDisableDispatch dd(kernel);
KScopedSpinLock lk(m_lock);
std::swap(m_table_size, saved_table_size);
@@ -22,28 +25,28 @@ Result KHandleTable::Finalize() {
}
}
R_SUCCEED();
return ResultSuccess;
}
bool KHandleTable::Remove(Handle handle) {
// Don't allow removal of a pseudo-handle.
if (Svc::IsPseudoHandle(handle)) [[unlikely]] {
if (Svc::IsPseudoHandle(handle)) {
return false;
}
// Handles must not have reserved bits set.
const auto handle_pack = HandlePack(handle);
if (handle_pack.reserved != 0) [[unlikely]] {
if (handle_pack.reserved != 0) {
return false;
}
// Find the object and free the entry.
KAutoObject* obj = nullptr;
{
KScopedDisableDispatch dd{m_kernel};
KScopedDisableDispatch dd(kernel);
KScopedSpinLock lk(m_lock);
if (this->IsValidHandle(handle)) [[likely]] {
if (this->IsValidHandle(handle)) {
const auto index = handle_pack.index;
obj = m_objects[index];
@@ -54,13 +57,13 @@ bool KHandleTable::Remove(Handle handle) {
}
// Close the object.
m_kernel.UnregisterInUseObject(obj);
kernel.UnregisterInUseObject(obj);
obj->Close();
return true;
}
Result KHandleTable::Add(Handle* out_handle, KAutoObject* obj) {
KScopedDisableDispatch dd{m_kernel};
KScopedDisableDispatch dd(kernel);
KScopedSpinLock lk(m_lock);
// Never exceed our capacity.
@@ -79,22 +82,22 @@ Result KHandleTable::Add(Handle* out_handle, KAutoObject* obj) {
*out_handle = EncodeHandle(static_cast<u16>(index), linear_id);
}
R_SUCCEED();
return ResultSuccess;
}
Result KHandleTable::Reserve(Handle* out_handle) {
KScopedDisableDispatch dd{m_kernel};
KScopedDisableDispatch dd(kernel);
KScopedSpinLock lk(m_lock);
// Never exceed our capacity.
R_UNLESS(m_count < m_table_size, ResultOutOfHandles);
*out_handle = EncodeHandle(static_cast<u16>(this->AllocateEntry()), this->AllocateLinearId());
R_SUCCEED();
return ResultSuccess;
}
void KHandleTable::Unreserve(Handle handle) {
KScopedDisableDispatch dd{m_kernel};
KScopedDisableDispatch dd(kernel);
KScopedSpinLock lk(m_lock);
// Unpack the handle.
@@ -105,7 +108,7 @@ void KHandleTable::Unreserve(Handle handle) {
ASSERT(reserved == 0);
ASSERT(linear_id != 0);
if (index < m_table_size) [[likely]] {
if (index < m_table_size) {
// NOTE: This code does not check the linear id.
ASSERT(m_objects[index] == nullptr);
this->FreeEntry(index);
@@ -113,7 +116,7 @@ void KHandleTable::Unreserve(Handle handle) {
}
void KHandleTable::Register(Handle handle, KAutoObject* obj) {
KScopedDisableDispatch dd{m_kernel};
KScopedDisableDispatch dd(kernel);
KScopedSpinLock lk(m_lock);
// Unpack the handle.
@@ -124,7 +127,7 @@ void KHandleTable::Register(Handle handle, KAutoObject* obj) {
ASSERT(reserved == 0);
ASSERT(linear_id != 0);
if (index < m_table_size) [[likely]] {
if (index < m_table_size) {
// Set the entry.
ASSERT(m_objects[index] == nullptr);

View File

@@ -21,38 +21,33 @@ namespace Kernel {
class KernelCore;
class KHandleTable {
public:
YUZU_NON_COPYABLE(KHandleTable);
YUZU_NON_MOVEABLE(KHandleTable);
public:
static constexpr size_t MaxTableSize = 1024;
public:
explicit KHandleTable(KernelCore& kernel) : m_kernel(kernel) {}
explicit KHandleTable(KernelCore& kernel_);
~KHandleTable();
Result Initialize(s32 size) {
// Check that the table size is valid.
R_UNLESS(size <= static_cast<s32>(MaxTableSize), ResultOutOfMemory);
// Lock.
KScopedDisableDispatch dd{m_kernel};
KScopedSpinLock lk(m_lock);
// Initialize all fields.
m_max_count = 0;
m_table_size = static_cast<s16>((size <= 0) ? MaxTableSize : size);
m_table_size = static_cast<u16>((size <= 0) ? MaxTableSize : size);
m_next_linear_id = MinLinearId;
m_count = 0;
m_free_head_index = -1;
// Free all entries.
for (s32 i = 0; i < static_cast<s32>(m_table_size); ++i) {
for (s16 i = 0; i < static_cast<s16>(m_table_size); ++i) {
m_objects[i] = nullptr;
m_entry_infos[i].next_free_index = static_cast<s16>(i - 1);
m_entry_infos[i].next_free_index = i - 1;
m_free_head_index = i;
}
R_SUCCEED();
return ResultSuccess;
}
size_t GetTableSize() const {
@@ -71,13 +66,13 @@ public:
template <typename T = KAutoObject>
KScopedAutoObject<T> GetObjectWithoutPseudoHandle(Handle handle) const {
// Lock and look up in table.
KScopedDisableDispatch dd{m_kernel};
KScopedDisableDispatch dd(kernel);
KScopedSpinLock lk(m_lock);
if constexpr (std::is_same_v<T, KAutoObject>) {
return this->GetObjectImpl(handle);
} else {
if (auto* obj = this->GetObjectImpl(handle); obj != nullptr) [[likely]] {
if (auto* obj = this->GetObjectImpl(handle); obj != nullptr) {
return obj->DynamicCast<T*>();
} else {
return nullptr;
@@ -90,13 +85,13 @@ public:
// Handle pseudo-handles.
if constexpr (std::derived_from<KProcess, T>) {
if (handle == Svc::PseudoHandle::CurrentProcess) {
auto* const cur_process = m_kernel.CurrentProcess();
auto* const cur_process = kernel.CurrentProcess();
ASSERT(cur_process != nullptr);
return cur_process;
}
} else if constexpr (std::derived_from<KThread, T>) {
if (handle == Svc::PseudoHandle::CurrentThread) {
auto* const cur_thread = GetCurrentThreadPointer(m_kernel);
auto* const cur_thread = GetCurrentThreadPointer(kernel);
ASSERT(cur_thread != nullptr);
return cur_thread;
}
@@ -105,37 +100,6 @@ public:
return this->template GetObjectWithoutPseudoHandle<T>(handle);
}
KScopedAutoObject<KAutoObject> GetObjectForIpcWithoutPseudoHandle(Handle handle) const {
// Lock and look up in table.
KScopedDisableDispatch dd{m_kernel};
KScopedSpinLock lk(m_lock);
return this->GetObjectImpl(handle);
}
KScopedAutoObject<KAutoObject> GetObjectForIpc(Handle handle, KThread* cur_thread) const {
// Handle pseudo-handles.
ASSERT(cur_thread != nullptr);
if (handle == Svc::PseudoHandle::CurrentProcess) {
auto* const cur_process =
static_cast<KAutoObject*>(static_cast<void*>(cur_thread->GetOwnerProcess()));
ASSERT(cur_process != nullptr);
return cur_process;
}
if (handle == Svc::PseudoHandle::CurrentThread) {
return static_cast<KAutoObject*>(cur_thread);
}
return GetObjectForIpcWithoutPseudoHandle(handle);
}
KScopedAutoObject<KAutoObject> GetObjectByIndex(Handle* out_handle, size_t index) const {
KScopedDisableDispatch dd{m_kernel};
KScopedSpinLock lk(m_lock);
return this->GetObjectByIndexImpl(out_handle, index);
}
Result Reserve(Handle* out_handle);
void Unreserve(Handle handle);
@@ -148,7 +112,7 @@ public:
size_t num_opened;
{
// Lock the table.
KScopedDisableDispatch dd{m_kernel};
KScopedDisableDispatch dd(kernel);
KScopedSpinLock lk(m_lock);
for (num_opened = 0; num_opened < num_handles; num_opened++) {
// Get the current handle.
@@ -156,13 +120,13 @@ public:
// Get the object for the current handle.
KAutoObject* cur_object = this->GetObjectImpl(cur_handle);
if (cur_object == nullptr) [[unlikely]] {
if (cur_object == nullptr) {
break;
}
// Cast the current object to the desired type.
T* cur_t = cur_object->DynamicCast<T*>();
if (cur_t == nullptr) [[unlikely]] {
if (cur_t == nullptr) {
break;
}
@@ -173,7 +137,7 @@ public:
}
// If we converted every object, succeed.
if (num_opened == num_handles) [[likely]] {
if (num_opened == num_handles) {
return true;
}
@@ -227,21 +191,21 @@ private:
ASSERT(reserved == 0);
// Validate our indexing information.
if (raw_value == 0) [[unlikely]] {
if (raw_value == 0) {
return false;
}
if (linear_id == 0) [[unlikely]] {
if (linear_id == 0) {
return false;
}
if (index >= m_table_size) [[unlikely]] {
if (index >= m_table_size) {
return false;
}
// Check that there's an object, and our serial id is correct.
if (m_objects[index] == nullptr) [[unlikely]] {
if (m_objects[index] == nullptr) {
return false;
}
if (m_entry_infos[index].GetLinearId() != linear_id) [[unlikely]] {
if (m_entry_infos[index].GetLinearId() != linear_id) {
return false;
}
@@ -251,11 +215,11 @@ private:
KAutoObject* GetObjectImpl(Handle handle) const {
// Handles must not have reserved bits set.
const auto handle_pack = HandlePack(handle);
if (handle_pack.reserved != 0) [[unlikely]] {
if (handle_pack.reserved != 0) {
return nullptr;
}
if (this->IsValidHandle(handle)) [[likely]] {
if (this->IsValidHandle(handle)) {
return m_objects[handle_pack.index];
} else {
return nullptr;
@@ -263,8 +227,9 @@ private:
}
KAutoObject* GetObjectByIndexImpl(Handle* out_handle, size_t index) const {
// Index must be in bounds.
if (index >= m_table_size) [[unlikely]] {
if (index >= m_table_size) {
return nullptr;
}
@@ -279,15 +244,18 @@ private:
private:
union HandlePack {
constexpr HandlePack() = default;
constexpr HandlePack(Handle handle) : raw{static_cast<u32>(handle)} {}
HandlePack() = default;
HandlePack(Handle handle) : raw{static_cast<u32>(handle)} {}
u32 raw{};
u32 raw;
BitField<0, 15, u32> index;
BitField<15, 15, u32> linear_id;
BitField<30, 2, u32> reserved;
};
static constexpr u16 MinLinearId = 1;
static constexpr u16 MaxLinearId = 0x7FFF;
static constexpr Handle EncodeHandle(u16 index, u16 linear_id) {
HandlePack handle{};
handle.index.Assign(index);
@@ -296,10 +264,6 @@ private:
return handle.raw;
}
private:
static constexpr u16 MinLinearId = 1;
static constexpr u16 MaxLinearId = 0x7FFF;
union EntryInfo {
u16 linear_id;
s16 next_free_index;
@@ -307,21 +271,21 @@ private:
constexpr u16 GetLinearId() const {
return linear_id;
}
constexpr s32 GetNextFreeIndex() const {
constexpr s16 GetNextFreeIndex() const {
return next_free_index;
}
};
private:
KernelCore& m_kernel;
std::array<EntryInfo, MaxTableSize> m_entry_infos{};
std::array<KAutoObject*, MaxTableSize> m_objects{};
mutable KSpinLock m_lock;
s32 m_free_head_index{};
s32 m_free_head_index{-1};
u16 m_table_size{};
u16 m_max_count{};
u16 m_next_linear_id{};
u16 m_next_linear_id{MinLinearId};
u16 m_count{};
mutable KSpinLock m_lock;
KernelCore& kernel;
};
} // namespace Kernel

View File

@@ -11,34 +11,29 @@
namespace Kernel::KInterruptManager {
void HandleInterrupt(KernelCore& kernel, s32 core_id) {
auto* process = kernel.CurrentProcess();
if (!process) {
return;
}
// Acknowledge the interrupt.
kernel.PhysicalCore(core_id).ClearInterrupt();
auto& current_thread = GetCurrentThread(kernel);
if (auto* process = kernel.CurrentProcess(); process) {
// If the user disable count is set, we may need to pin the current thread.
if (current_thread.GetUserDisableCount() && !process->GetPinnedThread(core_id)) {
KScopedSchedulerLock sl{kernel};
// If the user disable count is set, we may need to pin the current thread.
if (current_thread.GetUserDisableCount() && !process->GetPinnedThread(core_id)) {
KScopedSchedulerLock sl{kernel};
// Pin the current thread.
process->PinCurrentThread(core_id);
// Pin the current thread.
process->PinCurrentThread(core_id);
// Set the interrupt flag for the thread.
GetCurrentThread(kernel).SetInterruptFlag();
}
// Set the interrupt flag for the thread.
GetCurrentThread(kernel).SetInterruptFlag();
}
// Request interrupt scheduling.
kernel.CurrentScheduler()->RequestScheduleOnInterrupt();
}
void SendInterProcessorInterrupt(KernelCore& kernel, u64 core_mask) {
for (std::size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; ++core_id) {
if (core_mask & (1ULL << core_id)) {
kernel.PhysicalCore(core_id).Interrupt();
}
}
}
} // namespace Kernel::KInterruptManager

View File

@@ -11,8 +11,6 @@ class KernelCore;
namespace KInterruptManager {
void HandleInterrupt(KernelCore& kernel, s32 core_id);
void SendInterProcessorInterrupt(KernelCore& kernel, u64 core_mask);
} // namespace KInterruptManager
}
} // namespace Kernel

View File

@@ -16,7 +16,6 @@ class KLinkedListNode : public boost::intrusive::list_base_hook<>,
public KSlabAllocated<KLinkedListNode> {
public:
explicit KLinkedListNode(KernelCore&) {}
KLinkedListNode() = default;
void Initialize(void* it) {

View File

@@ -6,7 +6,6 @@
#include "common/alignment.h"
#include "common/assert.h"
#include "common/common_types.h"
#include "common/intrusive_red_black_tree.h"
#include "core/hle/kernel/memory_types.h"
#include "core/hle/kernel/svc_types.h"
@@ -35,32 +34,26 @@ enum class KMemoryState : u32 {
FlagCanMapProcess = (1 << 23),
FlagCanChangeAttribute = (1 << 24),
FlagCanCodeMemory = (1 << 25),
FlagLinearMapped = (1 << 26),
FlagsData = FlagCanReprotect | FlagCanUseIpc | FlagCanUseNonDeviceIpc | FlagCanUseNonSecureIpc |
FlagMapped | FlagCanAlias | FlagCanTransfer | FlagCanQueryPhysical |
FlagCanDeviceMap | FlagCanAlignedDeviceMap | FlagCanIpcUserBuffer |
FlagReferenceCounted | FlagCanChangeAttribute | FlagLinearMapped,
FlagReferenceCounted | FlagCanChangeAttribute,
FlagsCode = FlagCanDebug | FlagCanUseIpc | FlagCanUseNonDeviceIpc | FlagCanUseNonSecureIpc |
FlagMapped | FlagCode | FlagCanQueryPhysical | FlagCanDeviceMap |
FlagCanAlignedDeviceMap | FlagReferenceCounted | FlagLinearMapped,
FlagCanAlignedDeviceMap | FlagReferenceCounted,
FlagsMisc = FlagMapped | FlagReferenceCounted | FlagCanQueryPhysical | FlagCanDeviceMap |
FlagLinearMapped,
FlagsMisc = FlagMapped | FlagReferenceCounted | FlagCanQueryPhysical | FlagCanDeviceMap,
Free = static_cast<u32>(Svc::MemoryState::Free),
Io = static_cast<u32>(Svc::MemoryState::Io) | FlagMapped | FlagCanDeviceMap |
FlagCanAlignedDeviceMap,
Io = static_cast<u32>(Svc::MemoryState::Io) | FlagMapped,
Static = static_cast<u32>(Svc::MemoryState::Static) | FlagMapped | FlagCanQueryPhysical,
Code = static_cast<u32>(Svc::MemoryState::Code) | FlagsCode | FlagCanMapProcess,
CodeData = static_cast<u32>(Svc::MemoryState::CodeData) | FlagsData | FlagCanMapProcess |
FlagCanCodeMemory,
Shared = static_cast<u32>(Svc::MemoryState::Shared) | FlagMapped | FlagReferenceCounted,
Normal = static_cast<u32>(Svc::MemoryState::Normal) | FlagsData | FlagCanCodeMemory,
Shared = static_cast<u32>(Svc::MemoryState::Shared) | FlagMapped | FlagReferenceCounted |
FlagLinearMapped,
// Alias was removed after 1.0.0.
AliasCode = static_cast<u32>(Svc::MemoryState::AliasCode) | FlagsCode | FlagCanMapProcess |
FlagCanCodeAlias,
@@ -73,18 +66,18 @@ enum class KMemoryState : u32 {
Stack = static_cast<u32>(Svc::MemoryState::Stack) | FlagsMisc | FlagCanAlignedDeviceMap |
FlagCanUseIpc | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
ThreadLocal = static_cast<u32>(Svc::MemoryState::ThreadLocal) | FlagMapped | FlagLinearMapped,
ThreadLocal =
static_cast<u32>(Svc::MemoryState::ThreadLocal) | FlagMapped | FlagReferenceCounted,
Transfered = static_cast<u32>(Svc::MemoryState::Transfered) | FlagsMisc |
Transfered = static_cast<u32>(Svc::MemoryState::Transferred) | FlagsMisc |
FlagCanAlignedDeviceMap | FlagCanChangeAttribute | FlagCanUseIpc |
FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
SharedTransfered = static_cast<u32>(Svc::MemoryState::SharedTransfered) | FlagsMisc |
SharedTransfered = static_cast<u32>(Svc::MemoryState::SharedTransferred) | FlagsMisc |
FlagCanAlignedDeviceMap | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
SharedCode = static_cast<u32>(Svc::MemoryState::SharedCode) | FlagMapped |
FlagReferenceCounted | FlagLinearMapped | FlagCanUseNonSecureIpc |
FlagCanUseNonDeviceIpc,
FlagReferenceCounted | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
Inaccessible = static_cast<u32>(Svc::MemoryState::Inaccessible),
@@ -97,69 +90,69 @@ enum class KMemoryState : u32 {
Kernel = static_cast<u32>(Svc::MemoryState::Kernel) | FlagMapped,
GeneratedCode = static_cast<u32>(Svc::MemoryState::GeneratedCode) | FlagMapped |
FlagReferenceCounted | FlagCanDebug | FlagLinearMapped,
CodeOut = static_cast<u32>(Svc::MemoryState::CodeOut) | FlagMapped | FlagReferenceCounted |
FlagLinearMapped,
FlagReferenceCounted | FlagCanDebug,
CodeOut = static_cast<u32>(Svc::MemoryState::CodeOut) | FlagMapped | FlagReferenceCounted,
Coverage = static_cast<u32>(Svc::MemoryState::Coverage) | FlagMapped,
Insecure = static_cast<u32>(Svc::MemoryState::Insecure) | FlagMapped | FlagReferenceCounted |
FlagLinearMapped | FlagCanChangeAttribute | FlagCanDeviceMap |
FlagCanAlignedDeviceMap | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
};
DECLARE_ENUM_FLAG_OPERATORS(KMemoryState);
static_assert(static_cast<u32>(KMemoryState::Free) == 0x00000000);
static_assert(static_cast<u32>(KMemoryState::Io) == 0x00182001);
static_assert(static_cast<u32>(KMemoryState::Io) == 0x00002001);
static_assert(static_cast<u32>(KMemoryState::Static) == 0x00042002);
static_assert(static_cast<u32>(KMemoryState::Code) == 0x04DC7E03);
static_assert(static_cast<u32>(KMemoryState::CodeData) == 0x07FEBD04);
static_assert(static_cast<u32>(KMemoryState::Normal) == 0x077EBD05);
static_assert(static_cast<u32>(KMemoryState::Shared) == 0x04402006);
static_assert(static_cast<u32>(KMemoryState::AliasCode) == 0x04DD7E08);
static_assert(static_cast<u32>(KMemoryState::AliasCodeData) == 0x07FFBD09);
static_assert(static_cast<u32>(KMemoryState::Ipc) == 0x045C3C0A);
static_assert(static_cast<u32>(KMemoryState::Stack) == 0x045C3C0B);
static_assert(static_cast<u32>(KMemoryState::ThreadLocal) == 0x0400200C);
static_assert(static_cast<u32>(KMemoryState::Transfered) == 0x055C3C0D);
static_assert(static_cast<u32>(KMemoryState::SharedTransfered) == 0x045C380E);
static_assert(static_cast<u32>(KMemoryState::SharedCode) == 0x0440380F);
static_assert(static_cast<u32>(KMemoryState::Code) == 0x00DC7E03);
static_assert(static_cast<u32>(KMemoryState::CodeData) == 0x03FEBD04);
static_assert(static_cast<u32>(KMemoryState::Normal) == 0x037EBD05);
static_assert(static_cast<u32>(KMemoryState::Shared) == 0x00402006);
static_assert(static_cast<u32>(KMemoryState::AliasCode) == 0x00DD7E08);
static_assert(static_cast<u32>(KMemoryState::AliasCodeData) == 0x03FFBD09);
static_assert(static_cast<u32>(KMemoryState::Ipc) == 0x005C3C0A);
static_assert(static_cast<u32>(KMemoryState::Stack) == 0x005C3C0B);
static_assert(static_cast<u32>(KMemoryState::ThreadLocal) == 0x0040200C);
static_assert(static_cast<u32>(KMemoryState::Transfered) == 0x015C3C0D);
static_assert(static_cast<u32>(KMemoryState::SharedTransfered) == 0x005C380E);
static_assert(static_cast<u32>(KMemoryState::SharedCode) == 0x0040380F);
static_assert(static_cast<u32>(KMemoryState::Inaccessible) == 0x00000010);
static_assert(static_cast<u32>(KMemoryState::NonSecureIpc) == 0x045C3811);
static_assert(static_cast<u32>(KMemoryState::NonDeviceIpc) == 0x044C2812);
static_assert(static_cast<u32>(KMemoryState::NonSecureIpc) == 0x005C3811);
static_assert(static_cast<u32>(KMemoryState::NonDeviceIpc) == 0x004C2812);
static_assert(static_cast<u32>(KMemoryState::Kernel) == 0x00002013);
static_assert(static_cast<u32>(KMemoryState::GeneratedCode) == 0x04402214);
static_assert(static_cast<u32>(KMemoryState::CodeOut) == 0x04402015);
static_assert(static_cast<u32>(KMemoryState::GeneratedCode) == 0x00402214);
static_assert(static_cast<u32>(KMemoryState::CodeOut) == 0x00402015);
static_assert(static_cast<u32>(KMemoryState::Coverage) == 0x00002016);
static_assert(static_cast<u32>(KMemoryState::Insecure) == 0x05583817);
enum class KMemoryPermission : u8 {
None = 0,
All = static_cast<u8>(~None),
Read = 1 << 0,
Write = 1 << 1,
Execute = 1 << 2,
ReadAndWrite = Read | Write,
ReadAndExecute = Read | Execute,
UserMask = static_cast<u8>(Svc::MemoryPermission::Read | Svc::MemoryPermission::Write |
Svc::MemoryPermission::Execute),
KernelShift = 3,
KernelRead = static_cast<u8>(Svc::MemoryPermission::Read) << KernelShift,
KernelWrite = static_cast<u8>(Svc::MemoryPermission::Write) << KernelShift,
KernelExecute = static_cast<u8>(Svc::MemoryPermission::Execute) << KernelShift,
KernelRead = Read << KernelShift,
KernelWrite = Write << KernelShift,
KernelExecute = Execute << KernelShift,
NotMapped = (1 << (2 * KernelShift)),
KernelReadWrite = KernelRead | KernelWrite,
KernelReadExecute = KernelRead | KernelExecute,
UserRead = static_cast<u8>(Svc::MemoryPermission::Read) | KernelRead,
UserWrite = static_cast<u8>(Svc::MemoryPermission::Write) | KernelWrite,
UserExecute = static_cast<u8>(Svc::MemoryPermission::Execute),
UserRead = Read | KernelRead,
UserWrite = Write | KernelWrite,
UserExecute = Execute,
UserReadWrite = UserRead | UserWrite,
UserReadExecute = UserRead | UserExecute,
UserMask = static_cast<u8>(Svc::MemoryPermission::Read | Svc::MemoryPermission::Write |
Svc::MemoryPermission::Execute),
IpcLockChangeMask = NotMapped | UserReadWrite,
IpcLockChangeMask = NotMapped | UserReadWrite
};
DECLARE_ENUM_FLAG_OPERATORS(KMemoryPermission);
@@ -175,8 +168,9 @@ constexpr KMemoryPermission ConvertToKMemoryPermission(Svc::MemoryPermission per
enum class KMemoryAttribute : u8 {
None = 0x00,
All = 0xFF,
UserMask = All,
Mask = 0x7F,
All = Mask,
DontCareMask = 0x80,
Locked = static_cast<u8>(Svc::MemoryAttribute::Locked),
IpcLocked = static_cast<u8>(Svc::MemoryAttribute::IpcLocked),
@@ -184,114 +178,76 @@ enum class KMemoryAttribute : u8 {
Uncached = static_cast<u8>(Svc::MemoryAttribute::Uncached),
SetMask = Uncached,
IpcAndDeviceMapped = IpcLocked | DeviceShared,
LockedAndIpcLocked = Locked | IpcLocked,
DeviceSharedAndUncached = DeviceShared | Uncached
};
DECLARE_ENUM_FLAG_OPERATORS(KMemoryAttribute);
enum class KMemoryBlockDisableMergeAttribute : u8 {
None = 0,
Normal = (1u << 0),
DeviceLeft = (1u << 1),
IpcLeft = (1u << 2),
Locked = (1u << 3),
DeviceRight = (1u << 4),
AllLeft = Normal | DeviceLeft | IpcLeft | Locked,
AllRight = DeviceRight,
};
DECLARE_ENUM_FLAG_OPERATORS(KMemoryBlockDisableMergeAttribute);
static_assert((static_cast<u8>(KMemoryAttribute::Mask) &
static_cast<u8>(KMemoryAttribute::DontCareMask)) == 0);
struct KMemoryInfo {
uintptr_t m_address;
size_t m_size;
KMemoryState m_state;
u16 m_device_disable_merge_left_count;
u16 m_device_disable_merge_right_count;
u16 m_ipc_lock_count;
u16 m_device_use_count;
u16 m_ipc_disable_merge_count;
KMemoryPermission m_permission;
KMemoryAttribute m_attribute;
KMemoryPermission m_original_permission;
KMemoryBlockDisableMergeAttribute m_disable_merge_attribute;
VAddr addr{};
std::size_t size{};
KMemoryState state{};
KMemoryPermission perm{};
KMemoryAttribute attribute{};
KMemoryPermission original_perm{};
u16 ipc_lock_count{};
u16 device_use_count{};
constexpr Svc::MemoryInfo GetSvcMemoryInfo() const {
return {
.base_address = m_address,
.size = m_size,
.state = static_cast<Svc::MemoryState>(m_state & KMemoryState::Mask),
.attribute =
static_cast<Svc::MemoryAttribute>(m_attribute & KMemoryAttribute::UserMask),
.permission =
static_cast<Svc::MemoryPermission>(m_permission & KMemoryPermission::UserMask),
.ipc_count = m_ipc_lock_count,
.device_count = m_device_use_count,
.padding = {},
addr,
size,
static_cast<Svc::MemoryState>(state & KMemoryState::Mask),
static_cast<Svc::MemoryAttribute>(attribute & KMemoryAttribute::Mask),
static_cast<Svc::MemoryPermission>(perm & KMemoryPermission::UserMask),
ipc_lock_count,
device_use_count,
};
}
constexpr uintptr_t GetAddress() const {
return m_address;
constexpr VAddr GetAddress() const {
return addr;
}
constexpr size_t GetSize() const {
return m_size;
constexpr std::size_t GetSize() const {
return size;
}
constexpr size_t GetNumPages() const {
return this->GetSize() / PageSize;
constexpr std::size_t GetNumPages() const {
return GetSize() / PageSize;
}
constexpr uintptr_t GetEndAddress() const {
return this->GetAddress() + this->GetSize();
constexpr VAddr GetEndAddress() const {
return GetAddress() + GetSize();
}
constexpr uintptr_t GetLastAddress() const {
return this->GetEndAddress() - 1;
constexpr VAddr GetLastAddress() const {
return GetEndAddress() - 1;
}
constexpr u16 GetIpcLockCount() const {
return m_ipc_lock_count;
}
constexpr u16 GetIpcDisableMergeCount() const {
return m_ipc_disable_merge_count;
}
constexpr KMemoryState GetState() const {
return m_state;
return state;
}
constexpr KMemoryPermission GetPermission() const {
return m_permission;
}
constexpr KMemoryPermission GetOriginalPermission() const {
return m_original_permission;
}
constexpr KMemoryAttribute GetAttribute() const {
return m_attribute;
return attribute;
}
constexpr KMemoryBlockDisableMergeAttribute GetDisableMergeAttribute() const {
return m_disable_merge_attribute;
constexpr KMemoryPermission GetPermission() const {
return perm;
}
};
class KMemoryBlock : public Common::IntrusiveRedBlackTreeBaseNode<KMemoryBlock> {
class KMemoryBlock final {
friend class KMemoryBlockManager;
private:
u16 m_device_disable_merge_left_count;
u16 m_device_disable_merge_right_count;
VAddr m_address;
size_t m_num_pages;
KMemoryState m_memory_state;
u16 m_ipc_lock_count;
u16 m_device_use_count;
u16 m_ipc_disable_merge_count;
KMemoryPermission m_permission;
KMemoryPermission m_original_permission;
KMemoryAttribute m_attribute;
KMemoryBlockDisableMergeAttribute m_disable_merge_attribute;
VAddr addr{};
std::size_t num_pages{};
KMemoryState state{KMemoryState::None};
u16 ipc_lock_count{};
u16 device_use_count{};
KMemoryPermission perm{KMemoryPermission::None};
KMemoryPermission original_perm{KMemoryPermission::None};
KMemoryAttribute attribute{KMemoryAttribute::None};
public:
static constexpr int Compare(const KMemoryBlock& lhs, const KMemoryBlock& rhs) {
@@ -305,361 +261,113 @@ public:
}
public:
constexpr KMemoryBlock() = default;
constexpr KMemoryBlock(VAddr addr_, std::size_t num_pages_, KMemoryState state_,
KMemoryPermission perm_, KMemoryAttribute attribute_)
: addr{addr_}, num_pages(num_pages_), state{state_}, perm{perm_}, attribute{attribute_} {}
constexpr VAddr GetAddress() const {
return m_address;
return addr;
}
constexpr size_t GetNumPages() const {
return m_num_pages;
constexpr std::size_t GetNumPages() const {
return num_pages;
}
constexpr size_t GetSize() const {
return this->GetNumPages() * PageSize;
constexpr std::size_t GetSize() const {
return GetNumPages() * PageSize;
}
constexpr VAddr GetEndAddress() const {
return this->GetAddress() + this->GetSize();
return GetAddress() + GetSize();
}
constexpr VAddr GetLastAddress() const {
return this->GetEndAddress() - 1;
}
constexpr u16 GetIpcLockCount() const {
return m_ipc_lock_count;
}
constexpr u16 GetIpcDisableMergeCount() const {
return m_ipc_disable_merge_count;
}
constexpr KMemoryPermission GetPermission() const {
return m_permission;
}
constexpr KMemoryPermission GetOriginalPermission() const {
return m_original_permission;
}
constexpr KMemoryAttribute GetAttribute() const {
return m_attribute;
return GetEndAddress() - 1;
}
constexpr KMemoryInfo GetMemoryInfo() const {
return {
.m_address = this->GetAddress(),
.m_size = this->GetSize(),
.m_state = m_memory_state,
.m_device_disable_merge_left_count = m_device_disable_merge_left_count,
.m_device_disable_merge_right_count = m_device_disable_merge_right_count,
.m_ipc_lock_count = m_ipc_lock_count,
.m_device_use_count = m_device_use_count,
.m_ipc_disable_merge_count = m_ipc_disable_merge_count,
.m_permission = m_permission,
.m_attribute = m_attribute,
.m_original_permission = m_original_permission,
.m_disable_merge_attribute = m_disable_merge_attribute,
GetAddress(), GetSize(), state, perm,
attribute, original_perm, ipc_lock_count, device_use_count,
};
}
public:
explicit KMemoryBlock() = default;
constexpr KMemoryBlock(VAddr addr, size_t np, KMemoryState ms, KMemoryPermission p,
KMemoryAttribute attr)
: Common::IntrusiveRedBlackTreeBaseNode<KMemoryBlock>(),
m_device_disable_merge_left_count(), m_device_disable_merge_right_count(),
m_address(addr), m_num_pages(np), m_memory_state(ms), m_ipc_lock_count(0),
m_device_use_count(0), m_ipc_disable_merge_count(), m_permission(p),
m_original_permission(KMemoryPermission::None), m_attribute(attr),
m_disable_merge_attribute() {}
constexpr void Initialize(VAddr addr, size_t np, KMemoryState ms, KMemoryPermission p,
KMemoryAttribute attr) {
m_device_disable_merge_left_count = 0;
m_device_disable_merge_right_count = 0;
m_address = addr;
m_num_pages = np;
m_memory_state = ms;
m_ipc_lock_count = 0;
m_device_use_count = 0;
m_permission = p;
m_original_permission = KMemoryPermission::None;
m_attribute = attr;
m_disable_merge_attribute = KMemoryBlockDisableMergeAttribute::None;
void ShareToDevice(KMemoryPermission /*new_perm*/) {
ASSERT((attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared ||
device_use_count == 0);
attribute |= KMemoryAttribute::DeviceShared;
const u16 new_use_count{++device_use_count};
ASSERT(new_use_count > 0);
}
void UnshareToDevice(KMemoryPermission /*new_perm*/) {
ASSERT((attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared);
const u16 prev_use_count{device_use_count--};
ASSERT(prev_use_count > 0);
if (prev_use_count == 1) {
attribute &= ~KMemoryAttribute::DeviceShared;
}
}
private:
constexpr bool HasProperties(KMemoryState s, KMemoryPermission p, KMemoryAttribute a) const {
constexpr auto AttributeIgnoreMask =
KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared;
return m_memory_state == s && m_permission == p &&
(m_attribute | AttributeIgnoreMask) == (a | AttributeIgnoreMask);
constexpr KMemoryAttribute AttributeIgnoreMask{KMemoryAttribute::DontCareMask |
KMemoryAttribute::IpcLocked |
KMemoryAttribute::DeviceShared};
return state == s && perm == p &&
(attribute | AttributeIgnoreMask) == (a | AttributeIgnoreMask);
}
constexpr bool HasSameProperties(const KMemoryBlock& rhs) const {
return m_memory_state == rhs.m_memory_state && m_permission == rhs.m_permission &&
m_original_permission == rhs.m_original_permission &&
m_attribute == rhs.m_attribute && m_ipc_lock_count == rhs.m_ipc_lock_count &&
m_device_use_count == rhs.m_device_use_count;
return state == rhs.state && perm == rhs.perm && original_perm == rhs.original_perm &&
attribute == rhs.attribute && ipc_lock_count == rhs.ipc_lock_count &&
device_use_count == rhs.device_use_count;
}
constexpr bool CanMergeWith(const KMemoryBlock& rhs) const {
return this->HasSameProperties(rhs) &&
(m_disable_merge_attribute & KMemoryBlockDisableMergeAttribute::AllRight) ==
KMemoryBlockDisableMergeAttribute::None &&
(rhs.m_disable_merge_attribute & KMemoryBlockDisableMergeAttribute::AllLeft) ==
KMemoryBlockDisableMergeAttribute::None;
constexpr bool Contains(VAddr start) const {
return GetAddress() <= start && start <= GetEndAddress();
}
constexpr bool Contains(VAddr addr) const {
return this->GetAddress() <= addr && addr <= this->GetEndAddress();
constexpr void Add(std::size_t count) {
ASSERT(count > 0);
ASSERT(GetAddress() + count * PageSize - 1 < GetEndAddress() + count * PageSize - 1);
num_pages += count;
}
constexpr void Add(const KMemoryBlock& added_block) {
ASSERT(added_block.GetNumPages() > 0);
ASSERT(this->GetAddress() + added_block.GetSize() - 1 <
this->GetEndAddress() + added_block.GetSize() - 1);
constexpr void Update(KMemoryState new_state, KMemoryPermission new_perm,
KMemoryAttribute new_attribute) {
ASSERT(original_perm == KMemoryPermission::None);
ASSERT((attribute & KMemoryAttribute::IpcLocked) == KMemoryAttribute::None);
m_num_pages += added_block.GetNumPages();
m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
m_disable_merge_attribute | added_block.m_disable_merge_attribute);
m_device_disable_merge_right_count = added_block.m_device_disable_merge_right_count;
state = new_state;
perm = new_perm;
attribute = static_cast<KMemoryAttribute>(
new_attribute |
(attribute & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)));
}
constexpr void Update(KMemoryState s, KMemoryPermission p, KMemoryAttribute a,
bool set_disable_merge_attr, u8 set_mask, u8 clear_mask) {
ASSERT(m_original_permission == KMemoryPermission::None);
ASSERT((m_attribute & KMemoryAttribute::IpcLocked) == KMemoryAttribute::None);
constexpr KMemoryBlock Split(VAddr split_addr) {
ASSERT(GetAddress() < split_addr);
ASSERT(Contains(split_addr));
ASSERT(Common::IsAligned(split_addr, PageSize));
m_memory_state = s;
m_permission = p;
m_attribute = static_cast<KMemoryAttribute>(
a | (m_attribute & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)));
KMemoryBlock block;
block.addr = addr;
block.num_pages = (split_addr - GetAddress()) / PageSize;
block.state = state;
block.ipc_lock_count = ipc_lock_count;
block.device_use_count = device_use_count;
block.perm = perm;
block.original_perm = original_perm;
block.attribute = attribute;
if (set_disable_merge_attr && set_mask != 0) {
m_disable_merge_attribute = m_disable_merge_attribute |
static_cast<KMemoryBlockDisableMergeAttribute>(set_mask);
}
if (clear_mask != 0) {
m_disable_merge_attribute = m_disable_merge_attribute &
static_cast<KMemoryBlockDisableMergeAttribute>(~clear_mask);
}
}
addr = split_addr;
num_pages -= block.num_pages;
constexpr void Split(KMemoryBlock* block, VAddr addr) {
ASSERT(this->GetAddress() < addr);
ASSERT(this->Contains(addr));
ASSERT(Common::IsAligned(addr, PageSize));
block->m_address = m_address;
block->m_num_pages = (addr - this->GetAddress()) / PageSize;
block->m_memory_state = m_memory_state;
block->m_ipc_lock_count = m_ipc_lock_count;
block->m_device_use_count = m_device_use_count;
block->m_permission = m_permission;
block->m_original_permission = m_original_permission;
block->m_attribute = m_attribute;
block->m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
m_disable_merge_attribute & KMemoryBlockDisableMergeAttribute::AllLeft);
block->m_ipc_disable_merge_count = m_ipc_disable_merge_count;
block->m_device_disable_merge_left_count = m_device_disable_merge_left_count;
block->m_device_disable_merge_right_count = 0;
m_address = addr;
m_num_pages -= block->m_num_pages;
m_ipc_disable_merge_count = 0;
m_device_disable_merge_left_count = 0;
m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
m_disable_merge_attribute & KMemoryBlockDisableMergeAttribute::AllRight);
}
constexpr void UpdateDeviceDisableMergeStateForShareLeft(
[[maybe_unused]] KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) {
// New permission/right aren't used.
if (left) {
m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
m_disable_merge_attribute | KMemoryBlockDisableMergeAttribute::DeviceLeft);
const u16 new_device_disable_merge_left_count = ++m_device_disable_merge_left_count;
ASSERT(new_device_disable_merge_left_count > 0);
}
}
constexpr void UpdateDeviceDisableMergeStateForShareRight(
[[maybe_unused]] KMemoryPermission new_perm, [[maybe_unused]] bool left, bool right) {
// New permission/left aren't used.
if (right) {
m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
m_disable_merge_attribute | KMemoryBlockDisableMergeAttribute::DeviceRight);
const u16 new_device_disable_merge_right_count = ++m_device_disable_merge_right_count;
ASSERT(new_device_disable_merge_right_count > 0);
}
}
constexpr void UpdateDeviceDisableMergeStateForShare(KMemoryPermission new_perm, bool left,
bool right) {
this->UpdateDeviceDisableMergeStateForShareLeft(new_perm, left, right);
this->UpdateDeviceDisableMergeStateForShareRight(new_perm, left, right);
}
constexpr void ShareToDevice([[maybe_unused]] KMemoryPermission new_perm, bool left,
bool right) {
// New permission isn't used.
// We must either be shared or have a zero lock count.
ASSERT((m_attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared ||
m_device_use_count == 0);
// Share.
const u16 new_count = ++m_device_use_count;
ASSERT(new_count > 0);
m_attribute = static_cast<KMemoryAttribute>(m_attribute | KMemoryAttribute::DeviceShared);
this->UpdateDeviceDisableMergeStateForShare(new_perm, left, right);
}
constexpr void UpdateDeviceDisableMergeStateForUnshareLeft(
[[maybe_unused]] KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) {
// New permission/right aren't used.
if (left) {
if (!m_device_disable_merge_left_count) {
return;
}
--m_device_disable_merge_left_count;
}
m_device_disable_merge_left_count =
std::min(m_device_disable_merge_left_count, m_device_use_count);
if (m_device_disable_merge_left_count == 0) {
m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
m_disable_merge_attribute & ~KMemoryBlockDisableMergeAttribute::DeviceLeft);
}
}
constexpr void UpdateDeviceDisableMergeStateForUnshareRight(
[[maybe_unused]] KMemoryPermission new_perm, [[maybe_unused]] bool left, bool right) {
// New permission/left aren't used.
if (right) {
const u16 old_device_disable_merge_right_count = m_device_disable_merge_right_count--;
ASSERT(old_device_disable_merge_right_count > 0);
if (old_device_disable_merge_right_count == 1) {
m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
m_disable_merge_attribute & ~KMemoryBlockDisableMergeAttribute::DeviceRight);
}
}
}
constexpr void UpdateDeviceDisableMergeStateForUnshare(KMemoryPermission new_perm, bool left,
bool right) {
this->UpdateDeviceDisableMergeStateForUnshareLeft(new_perm, left, right);
this->UpdateDeviceDisableMergeStateForUnshareRight(new_perm, left, right);
}
constexpr void UnshareToDevice([[maybe_unused]] KMemoryPermission new_perm, bool left,
bool right) {
// New permission isn't used.
// We must be shared.
ASSERT((m_attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared);
// Unhare.
const u16 old_count = m_device_use_count--;
ASSERT(old_count > 0);
if (old_count == 1) {
m_attribute =
static_cast<KMemoryAttribute>(m_attribute & ~KMemoryAttribute::DeviceShared);
}
this->UpdateDeviceDisableMergeStateForUnshare(new_perm, left, right);
}
constexpr void UnshareToDeviceRight([[maybe_unused]] KMemoryPermission new_perm, bool left,
bool right) {
// New permission isn't used.
// We must be shared.
ASSERT((m_attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared);
// Unhare.
const u16 old_count = m_device_use_count--;
ASSERT(old_count > 0);
if (old_count == 1) {
m_attribute =
static_cast<KMemoryAttribute>(m_attribute & ~KMemoryAttribute::DeviceShared);
}
this->UpdateDeviceDisableMergeStateForUnshareRight(new_perm, left, right);
}
constexpr void LockForIpc(KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) {
// We must either be locked or have a zero lock count.
ASSERT((m_attribute & KMemoryAttribute::IpcLocked) == KMemoryAttribute::IpcLocked ||
m_ipc_lock_count == 0);
// Lock.
const u16 new_lock_count = ++m_ipc_lock_count;
ASSERT(new_lock_count > 0);
// If this is our first lock, update our permissions.
if (new_lock_count == 1) {
ASSERT(m_original_permission == KMemoryPermission::None);
ASSERT((m_permission | new_perm | KMemoryPermission::NotMapped) ==
(m_permission | KMemoryPermission::NotMapped));
ASSERT((m_permission & KMemoryPermission::UserExecute) !=
KMemoryPermission::UserExecute ||
(new_perm == KMemoryPermission::UserRead));
m_original_permission = m_permission;
m_permission = static_cast<KMemoryPermission>(
(new_perm & KMemoryPermission::IpcLockChangeMask) |
(m_original_permission & ~KMemoryPermission::IpcLockChangeMask));
}
m_attribute = static_cast<KMemoryAttribute>(m_attribute | KMemoryAttribute::IpcLocked);
if (left) {
m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
m_disable_merge_attribute | KMemoryBlockDisableMergeAttribute::IpcLeft);
const u16 new_ipc_disable_merge_count = ++m_ipc_disable_merge_count;
ASSERT(new_ipc_disable_merge_count > 0);
}
}
constexpr void UnlockForIpc([[maybe_unused]] KMemoryPermission new_perm, bool left,
[[maybe_unused]] bool right) {
// New permission isn't used.
// We must be locked.
ASSERT((m_attribute & KMemoryAttribute::IpcLocked) == KMemoryAttribute::IpcLocked);
// Unlock.
const u16 old_lock_count = m_ipc_lock_count--;
ASSERT(old_lock_count > 0);
// If this is our last unlock, update our permissions.
if (old_lock_count == 1) {
ASSERT(m_original_permission != KMemoryPermission::None);
m_permission = m_original_permission;
m_original_permission = KMemoryPermission::None;
m_attribute = static_cast<KMemoryAttribute>(m_attribute & ~KMemoryAttribute::IpcLocked);
}
if (left) {
const u16 old_ipc_disable_merge_count = m_ipc_disable_merge_count--;
ASSERT(old_ipc_disable_merge_count > 0);
if (old_ipc_disable_merge_count == 1) {
m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
m_disable_merge_attribute & ~KMemoryBlockDisableMergeAttribute::IpcLeft);
}
}
}
constexpr KMemoryBlockDisableMergeAttribute GetDisableMergeAttribute() const {
return m_disable_merge_attribute;
return block;
}
};
static_assert(std::is_trivially_destructible<KMemoryBlock>::value);

View File

@@ -2,336 +2,221 @@
// SPDX-License-Identifier: GPL-2.0-or-later
#include "core/hle/kernel/k_memory_block_manager.h"
#include "core/hle/kernel/memory_types.h"
namespace Kernel {
KMemoryBlockManager::KMemoryBlockManager() = default;
Result KMemoryBlockManager::Initialize(VAddr st, VAddr nd, KMemoryBlockSlabManager* slab_manager) {
// Allocate a block to encapsulate the address space, insert it into the tree.
KMemoryBlock* start_block = slab_manager->Allocate();
R_UNLESS(start_block != nullptr, ResultOutOfResource);
// Set our start and end.
m_start_address = st;
m_end_address = nd;
ASSERT(Common::IsAligned(m_start_address, PageSize));
ASSERT(Common::IsAligned(m_end_address, PageSize));
// Initialize and insert the block.
start_block->Initialize(m_start_address, (m_end_address - m_start_address) / PageSize,
KMemoryState::Free, KMemoryPermission::None, KMemoryAttribute::None);
m_memory_block_tree.insert(*start_block);
R_SUCCEED();
KMemoryBlockManager::KMemoryBlockManager(VAddr start_addr_, VAddr end_addr_)
: start_addr{start_addr_}, end_addr{end_addr_} {
const u64 num_pages{(end_addr - start_addr) / PageSize};
memory_block_tree.emplace_back(start_addr, num_pages, KMemoryState::Free,
KMemoryPermission::None, KMemoryAttribute::None);
}
void KMemoryBlockManager::Finalize(KMemoryBlockSlabManager* slab_manager,
HostUnmapCallback&& host_unmap_callback) {
// Erase every block until we have none left.
auto it = m_memory_block_tree.begin();
while (it != m_memory_block_tree.end()) {
KMemoryBlock* block = std::addressof(*it);
it = m_memory_block_tree.erase(it);
slab_manager->Free(block);
host_unmap_callback(block->GetAddress(), block->GetSize());
KMemoryBlockManager::iterator KMemoryBlockManager::FindIterator(VAddr addr) {
auto node{memory_block_tree.begin()};
while (node != end()) {
const VAddr node_end_addr{node->GetNumPages() * PageSize + node->GetAddress()};
if (node->GetAddress() <= addr && node_end_addr - 1 >= addr) {
return node;
}
node = std::next(node);
}
return end();
}
VAddr KMemoryBlockManager::FindFreeArea(VAddr region_start, std::size_t region_num_pages,
std::size_t num_pages, std::size_t align,
std::size_t offset, std::size_t guard_pages) {
if (num_pages == 0) {
return {};
}
ASSERT(m_memory_block_tree.empty());
}
const VAddr region_end{region_start + region_num_pages * PageSize};
const VAddr region_last{region_end - 1};
for (auto it{FindIterator(region_start)}; it != memory_block_tree.cend(); it++) {
const auto info{it->GetMemoryInfo()};
if (region_last < info.GetAddress()) {
break;
}
VAddr KMemoryBlockManager::FindFreeArea(VAddr region_start, size_t region_num_pages,
size_t num_pages, size_t alignment, size_t offset,
size_t guard_pages) const {
if (num_pages > 0) {
const VAddr region_end = region_start + region_num_pages * PageSize;
const VAddr region_last = region_end - 1;
for (const_iterator it = this->FindIterator(region_start); it != m_memory_block_tree.cend();
it++) {
const KMemoryInfo info = it->GetMemoryInfo();
if (region_last < info.GetAddress()) {
break;
}
if (info.m_state != KMemoryState::Free) {
continue;
}
if (info.state != KMemoryState::Free) {
continue;
}
VAddr area = (info.GetAddress() <= region_start) ? region_start : info.GetAddress();
area += guard_pages * PageSize;
VAddr area{(info.GetAddress() <= region_start) ? region_start : info.GetAddress()};
area += guard_pages * PageSize;
const VAddr offset_area = Common::AlignDown(area, alignment) + offset;
area = (area <= offset_area) ? offset_area : offset_area + alignment;
const VAddr offset_area{Common::AlignDown(area, align) + offset};
area = (area <= offset_area) ? offset_area : offset_area + align;
const VAddr area_end = area + num_pages * PageSize + guard_pages * PageSize;
const VAddr area_last = area_end - 1;
const VAddr area_end{area + num_pages * PageSize + guard_pages * PageSize};
const VAddr area_last{area_end - 1};
if (info.GetAddress() <= area && area < area_last && area_last <= region_last &&
area_last <= info.GetLastAddress()) {
return area;
}
if (info.GetAddress() <= area && area < area_last && area_last <= region_last &&
area_last <= info.GetLastAddress()) {
return area;
}
}
return {};
}
void KMemoryBlockManager::CoalesceForUpdate(KMemoryBlockManagerUpdateAllocator* allocator,
VAddr address, size_t num_pages) {
// Find the iterator now that we've updated.
iterator it = this->FindIterator(address);
if (address != m_start_address) {
it--;
}
void KMemoryBlockManager::Update(VAddr addr, std::size_t num_pages, KMemoryState prev_state,
KMemoryPermission prev_perm, KMemoryAttribute prev_attribute,
KMemoryState state, KMemoryPermission perm,
KMemoryAttribute attribute) {
const VAddr update_end_addr{addr + num_pages * PageSize};
iterator node{memory_block_tree.begin()};
// Coalesce blocks that we can.
while (true) {
iterator prev = it++;
if (it == m_memory_block_tree.end()) {
prev_attribute |= KMemoryAttribute::IpcAndDeviceMapped;
while (node != memory_block_tree.end()) {
KMemoryBlock* block{&(*node)};
iterator next_node{std::next(node)};
const VAddr cur_addr{block->GetAddress()};
const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr};
if (addr < cur_end_addr && cur_addr < update_end_addr) {
if (!block->HasProperties(prev_state, prev_perm, prev_attribute)) {
node = next_node;
continue;
}
iterator new_node{node};
if (addr > cur_addr) {
memory_block_tree.insert(node, block->Split(addr));
}
if (update_end_addr < cur_end_addr) {
new_node = memory_block_tree.insert(node, block->Split(update_end_addr));
}
new_node->Update(state, perm, attribute);
MergeAdjacent(new_node, next_node);
}
if (cur_end_addr - 1 >= update_end_addr - 1) {
break;
}
if (prev->CanMergeWith(*it)) {
KMemoryBlock* block = std::addressof(*it);
m_memory_block_tree.erase(it);
prev->Add(*block);
allocator->Free(block);
it = prev;
node = next_node;
}
}
void KMemoryBlockManager::Update(VAddr addr, std::size_t num_pages, KMemoryState state,
KMemoryPermission perm, KMemoryAttribute attribute) {
const VAddr update_end_addr{addr + num_pages * PageSize};
iterator node{memory_block_tree.begin()};
while (node != memory_block_tree.end()) {
KMemoryBlock* block{&(*node)};
iterator next_node{std::next(node)};
const VAddr cur_addr{block->GetAddress()};
const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr};
if (addr < cur_end_addr && cur_addr < update_end_addr) {
iterator new_node{node};
if (addr > cur_addr) {
memory_block_tree.insert(node, block->Split(addr));
}
if (update_end_addr < cur_end_addr) {
new_node = memory_block_tree.insert(node, block->Split(update_end_addr));
}
new_node->Update(state, perm, attribute);
MergeAdjacent(new_node, next_node);
}
if (address + num_pages * PageSize < it->GetMemoryInfo().GetEndAddress()) {
if (cur_end_addr - 1 >= update_end_addr - 1) {
break;
}
node = next_node;
}
}
void KMemoryBlockManager::Update(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address,
size_t num_pages, KMemoryState state, KMemoryPermission perm,
KMemoryAttribute attr,
KMemoryBlockDisableMergeAttribute set_disable_attr,
KMemoryBlockDisableMergeAttribute clear_disable_attr) {
// Ensure for auditing that we never end up with an invalid tree.
KScopedMemoryBlockManagerAuditor auditor(this);
ASSERT(Common::IsAligned(address, PageSize));
ASSERT((attr & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)) ==
KMemoryAttribute::None);
VAddr cur_address = address;
size_t remaining_pages = num_pages;
iterator it = this->FindIterator(address);
while (remaining_pages > 0) {
const size_t remaining_size = remaining_pages * PageSize;
KMemoryInfo cur_info = it->GetMemoryInfo();
if (it->HasProperties(state, perm, attr)) {
// If we already have the right properties, just advance.
if (cur_address + remaining_size < cur_info.GetEndAddress()) {
remaining_pages = 0;
cur_address += remaining_size;
} else {
remaining_pages =
(cur_address + remaining_size - cur_info.GetEndAddress()) / PageSize;
cur_address = cur_info.GetEndAddress();
}
} else {
// If we need to, create a new block before and insert it.
if (cur_info.GetAddress() != cur_address) {
KMemoryBlock* new_block = allocator->Allocate();
it->Split(new_block, cur_address);
it = m_memory_block_tree.insert(*new_block);
it++;
cur_info = it->GetMemoryInfo();
cur_address = cur_info.GetAddress();
}
// If we need to, create a new block after and insert it.
if (cur_info.GetSize() > remaining_size) {
KMemoryBlock* new_block = allocator->Allocate();
it->Split(new_block, cur_address + remaining_size);
it = m_memory_block_tree.insert(*new_block);
cur_info = it->GetMemoryInfo();
}
// Update block state.
it->Update(state, perm, attr, cur_address == address, static_cast<u8>(set_disable_attr),
static_cast<u8>(clear_disable_attr));
cur_address += cur_info.GetSize();
remaining_pages -= cur_info.GetNumPages();
}
it++;
}
this->CoalesceForUpdate(allocator, address, num_pages);
}
void KMemoryBlockManager::UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allocator,
VAddr address, size_t num_pages, KMemoryState test_state,
KMemoryPermission test_perm, KMemoryAttribute test_attr,
KMemoryState state, KMemoryPermission perm,
KMemoryAttribute attr) {
// Ensure for auditing that we never end up with an invalid tree.
KScopedMemoryBlockManagerAuditor auditor(this);
ASSERT(Common::IsAligned(address, PageSize));
ASSERT((attr & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)) ==
KMemoryAttribute::None);
VAddr cur_address = address;
size_t remaining_pages = num_pages;
iterator it = this->FindIterator(address);
while (remaining_pages > 0) {
const size_t remaining_size = remaining_pages * PageSize;
KMemoryInfo cur_info = it->GetMemoryInfo();
if (it->HasProperties(test_state, test_perm, test_attr) &&
!it->HasProperties(state, perm, attr)) {
// If we need to, create a new block before and insert it.
if (cur_info.GetAddress() != cur_address) {
KMemoryBlock* new_block = allocator->Allocate();
it->Split(new_block, cur_address);
it = m_memory_block_tree.insert(*new_block);
it++;
cur_info = it->GetMemoryInfo();
cur_address = cur_info.GetAddress();
}
// If we need to, create a new block after and insert it.
if (cur_info.GetSize() > remaining_size) {
KMemoryBlock* new_block = allocator->Allocate();
it->Split(new_block, cur_address + remaining_size);
it = m_memory_block_tree.insert(*new_block);
cur_info = it->GetMemoryInfo();
}
// Update block state.
it->Update(state, perm, attr, false, 0, 0);
cur_address += cur_info.GetSize();
remaining_pages -= cur_info.GetNumPages();
} else {
// If we already have the right properties, just advance.
if (cur_address + remaining_size < cur_info.GetEndAddress()) {
remaining_pages = 0;
cur_address += remaining_size;
} else {
remaining_pages =
(cur_address + remaining_size - cur_info.GetEndAddress()) / PageSize;
cur_address = cur_info.GetEndAddress();
}
}
it++;
}
this->CoalesceForUpdate(allocator, address, num_pages);
}
void KMemoryBlockManager::UpdateLock(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address,
size_t num_pages, MemoryBlockLockFunction lock_func,
void KMemoryBlockManager::UpdateLock(VAddr addr, std::size_t num_pages, LockFunc&& lock_func,
KMemoryPermission perm) {
// Ensure for auditing that we never end up with an invalid tree.
KScopedMemoryBlockManagerAuditor auditor(this);
ASSERT(Common::IsAligned(address, PageSize));
const VAddr update_end_addr{addr + num_pages * PageSize};
iterator node{memory_block_tree.begin()};
VAddr cur_address = address;
size_t remaining_pages = num_pages;
iterator it = this->FindIterator(address);
while (node != memory_block_tree.end()) {
KMemoryBlock* block{&(*node)};
iterator next_node{std::next(node)};
const VAddr cur_addr{block->GetAddress()};
const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr};
const VAddr end_address = address + (num_pages * PageSize);
if (addr < cur_end_addr && cur_addr < update_end_addr) {
iterator new_node{node};
while (remaining_pages > 0) {
const size_t remaining_size = remaining_pages * PageSize;
KMemoryInfo cur_info = it->GetMemoryInfo();
if (addr > cur_addr) {
memory_block_tree.insert(node, block->Split(addr));
}
// If we need to, create a new block before and insert it.
if (cur_info.m_address != cur_address) {
KMemoryBlock* new_block = allocator->Allocate();
if (update_end_addr < cur_end_addr) {
new_node = memory_block_tree.insert(node, block->Split(update_end_addr));
}
it->Split(new_block, cur_address);
it = m_memory_block_tree.insert(*new_block);
it++;
lock_func(new_node, perm);
cur_info = it->GetMemoryInfo();
cur_address = cur_info.GetAddress();
MergeAdjacent(new_node, next_node);
}
if (cur_info.GetSize() > remaining_size) {
// If we need to, create a new block after and insert it.
KMemoryBlock* new_block = allocator->Allocate();
it->Split(new_block, cur_address + remaining_size);
it = m_memory_block_tree.insert(*new_block);
cur_info = it->GetMemoryInfo();
if (cur_end_addr - 1 >= update_end_addr - 1) {
break;
}
// Call the locked update function.
(std::addressof(*it)->*lock_func)(perm, cur_info.GetAddress() == address,
cur_info.GetEndAddress() == end_address);
cur_address += cur_info.GetSize();
remaining_pages -= cur_info.GetNumPages();
it++;
node = next_node;
}
this->CoalesceForUpdate(allocator, address, num_pages);
}
// Debug.
bool KMemoryBlockManager::CheckState() const {
// Loop over every block, ensuring that we are sorted and coalesced.
auto it = m_memory_block_tree.cbegin();
auto prev = it++;
while (it != m_memory_block_tree.cend()) {
const KMemoryInfo prev_info = prev->GetMemoryInfo();
const KMemoryInfo cur_info = it->GetMemoryInfo();
void KMemoryBlockManager::IterateForRange(VAddr start, VAddr end, IterateFunc&& func) {
const_iterator it{FindIterator(start)};
KMemoryInfo info{};
do {
info = it->GetMemoryInfo();
func(info);
it = std::next(it);
} while (info.addr + info.size - 1 < end - 1 && it != cend());
}
// Sequential blocks which can be merged should be merged.
if (prev->CanMergeWith(*it)) {
return false;
void KMemoryBlockManager::MergeAdjacent(iterator it, iterator& next_it) {
KMemoryBlock* block{&(*it)};
auto EraseIt = [&](const iterator it_to_erase) {
if (next_it == it_to_erase) {
next_it = std::next(next_it);
}
memory_block_tree.erase(it_to_erase);
};
// Sequential blocks should be sequential.
if (prev_info.GetEndAddress() != cur_info.GetAddress()) {
return false;
}
if (it != memory_block_tree.begin()) {
KMemoryBlock* prev{&(*std::prev(it))};
// If the block is ipc locked, it must have a count.
if ((cur_info.m_attribute & KMemoryAttribute::IpcLocked) != KMemoryAttribute::None &&
cur_info.m_ipc_lock_count == 0) {
return false;
}
if (block->HasSameProperties(*prev)) {
const iterator prev_it{std::prev(it)};
// If the block is device shared, it must have a count.
if ((cur_info.m_attribute & KMemoryAttribute::DeviceShared) != KMemoryAttribute::None &&
cur_info.m_device_use_count == 0) {
return false;
}
prev->Add(block->GetNumPages());
EraseIt(it);
// Advance the iterator.
prev = it++;
}
// Our loop will miss checking the last block, potentially, so check it.
if (prev != m_memory_block_tree.cend()) {
const KMemoryInfo prev_info = prev->GetMemoryInfo();
// If the block is ipc locked, it must have a count.
if ((prev_info.m_attribute & KMemoryAttribute::IpcLocked) != KMemoryAttribute::None &&
prev_info.m_ipc_lock_count == 0) {
return false;
}
// If the block is device shared, it must have a count.
if ((prev_info.m_attribute & KMemoryAttribute::DeviceShared) != KMemoryAttribute::None &&
prev_info.m_device_use_count == 0) {
return false;
it = prev_it;
block = prev;
}
}
return true;
if (it != cend()) {
const KMemoryBlock* const next{&(*std::next(it))};
if (block->HasSameProperties(*next)) {
block->Add(next->GetNumPages());
EraseIt(std::next(it));
}
}
}
} // namespace Kernel

Some files were not shown because too many files have changed in this diff Show More