Compare commits
121 Commits
__refs_pul
...
__refs_pul
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f7ecc59f92 | ||
|
|
756f013d8a | ||
|
|
9c94faaa2b | ||
|
|
6775a6ee02 | ||
|
|
1ab052952d | ||
|
|
b2772bcb0d | ||
|
|
ff4fbaf152 | ||
|
|
986336b0d8 | ||
|
|
2ddecb9631 | ||
|
|
0730dc6c44 | ||
|
|
083d913eab | ||
|
|
fbf0a9c976 | ||
|
|
11edba4974 | ||
|
|
8e84381038 | ||
|
|
e446f368d7 | ||
|
|
668eb5b8da | ||
|
|
8bdc51b620 | ||
|
|
9efd95cda5 | ||
|
|
9c96d40586 | ||
|
|
40efd2ab56 | ||
|
|
cdc846677c | ||
|
|
981bc8aa1c | ||
|
|
e1bce50d8b | ||
|
|
27c33ab73f | ||
|
|
d2cfe25b07 | ||
|
|
530fe24768 | ||
|
|
237934b736 | ||
|
|
ea5dd02db9 | ||
|
|
fbbf532d42 | ||
|
|
c2c7386dfd | ||
|
|
2fbadc7e1f | ||
|
|
0661f5ccd1 | ||
|
|
3b30f5d823 | ||
|
|
d8fc3f403b | ||
|
|
8da1a4ea22 | ||
|
|
8802646730 | ||
|
|
1476ffd865 | ||
|
|
568d523746 | ||
|
|
7017f04ee8 | ||
|
|
4b508655a4 | ||
|
|
49f6deecb8 | ||
|
|
f09d192aac | ||
|
|
9971cd1d55 | ||
|
|
c4f5615c6b | ||
|
|
50a59487eb | ||
|
|
950db851ea | ||
|
|
09da9da6fb | ||
|
|
6892a0942f | ||
|
|
f38ae8e953 | ||
|
|
cfb9672093 | ||
|
|
462c430c8b | ||
|
|
5a2dff87bf | ||
|
|
7a8a7545f2 | ||
|
|
abe2ad7aac | ||
|
|
877e8991c7 | ||
|
|
032e5b983c | ||
|
|
41d99aa89d | ||
|
|
ac3927074b | ||
|
|
c41a4baf06 | ||
|
|
6adaa0d5e2 | ||
|
|
fb49ec19c1 | ||
|
|
197d756560 | ||
|
|
8c56481249 | ||
|
|
6ff4bf9b1c | ||
|
|
dba86ee007 | ||
|
|
407dc917f1 | ||
|
|
15d573194c | ||
|
|
f28ca5361f | ||
|
|
306840a580 | ||
|
|
3d4c113037 | ||
|
|
230d118252 | ||
|
|
b9b1318bea | ||
|
|
43d909949e | ||
|
|
00d401d639 | ||
|
|
0e7e98e24e | ||
|
|
0eb3fa05e5 | ||
|
|
889454f9bf | ||
|
|
8bcaa8c2e4 | ||
|
|
c95baf92ce | ||
|
|
a7651168dd | ||
|
|
075a3d1172 | ||
|
|
6d76a54d37 | ||
|
|
a04061e6ae | ||
|
|
7187732454 | ||
|
|
5031f5b8b0 | ||
|
|
da83afdeaf | ||
|
|
026fe2e4f4 | ||
|
|
0c7149d222 | ||
|
|
05f26e1337 | ||
|
|
4c678cfbc8 | ||
|
|
8870fae674 | ||
|
|
8348c41eab | ||
|
|
638044820d | ||
|
|
1f952f6ac9 | ||
|
|
c352381ce9 | ||
|
|
9775a73d1a | ||
|
|
088c434d65 | ||
|
|
9863db9db4 | ||
|
|
6bfb4c8f71 | ||
|
|
ac6cbb7134 | ||
|
|
641783df8f | ||
|
|
c0b9e93b77 | ||
|
|
9368e17a92 | ||
|
|
91fd4e30f2 | ||
|
|
57f1d8ef8d | ||
|
|
d1b53c8d82 | ||
|
|
7322c99e5f | ||
|
|
467adc1acd | ||
|
|
0483dfae1a | ||
|
|
8d1f5bfbd2 | ||
|
|
fdf90c6d75 | ||
|
|
097c25b164 | ||
|
|
d24ab14126 | ||
|
|
3f261f22c9 | ||
|
|
b27aa2ccca | ||
|
|
44f10c8dee | ||
|
|
4e42ba54e5 | ||
|
|
e090a1c6bd | ||
|
|
e8af3f29d2 | ||
|
|
c8ad039612 | ||
|
|
44518b225c |
@@ -210,7 +210,7 @@ set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${PROJECT_BINARY_DIR}/bin)
|
||||
# =======================================================================
|
||||
|
||||
# Enforce the search mode of non-required packages for better and shorter failure messages
|
||||
find_package(Boost 1.73.0 REQUIRED context)
|
||||
find_package(Boost 1.79.0 REQUIRED context)
|
||||
find_package(enet 1.3 MODULE)
|
||||
find_package(fmt 9 REQUIRED)
|
||||
find_package(inih 52 MODULE COMPONENTS INIReader)
|
||||
@@ -222,7 +222,7 @@ find_package(ZLIB 1.2 REQUIRED)
|
||||
find_package(zstd 1.5 REQUIRED)
|
||||
|
||||
if (NOT YUZU_USE_EXTERNAL_VULKAN_HEADERS)
|
||||
find_package(Vulkan 1.3.238 REQUIRED)
|
||||
find_package(Vulkan 1.3.246 REQUIRED)
|
||||
endif()
|
||||
|
||||
if (ENABLE_LIBUSB)
|
||||
|
||||
2
externals/Vulkan-Headers
vendored
2
externals/Vulkan-Headers
vendored
Submodule externals/Vulkan-Headers updated: 00671c64ba...63af1cf1ee
2
externals/dynarmic
vendored
2
externals/dynarmic
vendored
Submodule externals/dynarmic updated: 165621a872...c08c5a9362
2
externals/vcpkg
vendored
2
externals/vcpkg
vendored
Submodule externals/vcpkg updated: 9b22b40c6c...a7b6122f6b
@@ -93,7 +93,7 @@ void DeviceSession::AppendBuffers(std::span<const AudioBuffer> buffers) const {
|
||||
stream->AppendBuffer(new_buffer, samples);
|
||||
} else {
|
||||
std::vector<s16> samples(buffer.size / sizeof(s16));
|
||||
system.Memory().ReadBlockUnsafe(buffer.samples, samples.data(), buffer.size);
|
||||
system.ApplicationMemory().ReadBlockUnsafe(buffer.samples, samples.data(), buffer.size);
|
||||
stream->AppendBuffer(new_buffer, samples);
|
||||
}
|
||||
}
|
||||
@@ -102,7 +102,7 @@ void DeviceSession::AppendBuffers(std::span<const AudioBuffer> buffers) const {
|
||||
void DeviceSession::ReleaseBuffer(const AudioBuffer& buffer) const {
|
||||
if (type == Sink::StreamType::In) {
|
||||
auto samples{stream->ReleaseBuffer(buffer.size / sizeof(s16))};
|
||||
system.Memory().WriteBlockUnsafe(buffer.samples, samples.data(), buffer.size);
|
||||
system.ApplicationMemory().WriteBlockUnsafe(buffer.samples, samples.data(), buffer.size);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -121,8 +121,7 @@ u64 DeviceSession::GetPlayedSampleCount() const {
|
||||
}
|
||||
|
||||
std::optional<std::chrono::nanoseconds> DeviceSession::ThreadFunc() {
|
||||
// Add 5ms of samples at a 48K sample rate.
|
||||
played_sample_count += 48'000 * INCREMENT_TIME / 1s;
|
||||
played_sample_count = stream->GetExpectedPlayedSampleCount();
|
||||
if (type == Sink::StreamType::Out) {
|
||||
system.AudioCore().GetAudioManager().SetEvent(Event::Type::AudioOutManager, true);
|
||||
} else {
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
namespace AudioCore::AudioRenderer::ADSP {
|
||||
|
||||
ADSP::ADSP(Core::System& system_, Sink::Sink& sink_)
|
||||
: system{system_}, memory{system.Memory()}, sink{sink_} {}
|
||||
: system{system_}, memory{system.ApplicationMemory()}, sink{sink_} {}
|
||||
|
||||
ADSP::~ADSP() {
|
||||
ClearCommandBuffers();
|
||||
|
||||
@@ -189,6 +189,8 @@ void AudioRenderer::ThreadFunc() {
|
||||
max_time = std::min(command_buffer.time_limit, max_time);
|
||||
command_list_processor.SetProcessTimeMax(max_time);
|
||||
|
||||
streams[index]->WaitFreeSpace();
|
||||
|
||||
// Process the command list
|
||||
{
|
||||
MICROPROFILE_SCOPE(Audio_Renderer);
|
||||
|
||||
@@ -10,6 +10,7 @@
|
||||
#include "audio_core/renderer/adsp/command_buffer.h"
|
||||
#include "audio_core/renderer/adsp/command_list_processor.h"
|
||||
#include "common/common_types.h"
|
||||
#include "common/polyfill_thread.h"
|
||||
#include "common/reader_writer_queue.h"
|
||||
#include "common/thread.h"
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@ namespace AudioCore::AudioRenderer::ADSP {
|
||||
void CommandListProcessor::Initialize(Core::System& system_, CpuAddr buffer, u64 size,
|
||||
Sink::SinkStream* stream_) {
|
||||
system = &system_;
|
||||
memory = &system->Memory();
|
||||
memory = &system->ApplicationMemory();
|
||||
stream = stream_;
|
||||
header = reinterpret_cast<CommandListHeader*>(buffer);
|
||||
commands = reinterpret_cast<u8*>(buffer + sizeof(CommandListHeader));
|
||||
|
||||
@@ -127,8 +127,7 @@ Result System::Initialize(const AudioRendererParameterInternal& params,
|
||||
render_device = params.rendering_device;
|
||||
execution_mode = params.execution_mode;
|
||||
|
||||
core.Memory().ZeroBlock(*core.ApplicationProcess(), transfer_memory->GetSourceAddress(),
|
||||
transfer_memory_size);
|
||||
core.ApplicationMemory().ZeroBlock(transfer_memory->GetSourceAddress(), transfer_memory_size);
|
||||
|
||||
// Note: We're not actually using the transfer memory because it's a pain to code for.
|
||||
// Allocate the memory normally instead and hope the game doesn't try to read anything back
|
||||
|
||||
@@ -15,14 +15,9 @@ MICROPROFILE_DEFINE(Audio_RenderSystemManager, "Audio", "Render System Manager",
|
||||
MP_RGB(60, 19, 97));
|
||||
|
||||
namespace AudioCore::AudioRenderer {
|
||||
constexpr std::chrono::nanoseconds RENDER_TIME{5'000'000UL};
|
||||
|
||||
SystemManager::SystemManager(Core::System& core_)
|
||||
: core{core_}, adsp{core.AudioCore().GetADSP()}, mailbox{adsp.GetRenderMailbox()},
|
||||
thread_event{Core::Timing::CreateEvent(
|
||||
"AudioRendererSystemManager", [this](std::uintptr_t, s64 time, std::chrono::nanoseconds) {
|
||||
return ThreadFunc2(time);
|
||||
})} {}
|
||||
: core{core_}, adsp{core.AudioCore().GetADSP()}, mailbox{adsp.GetRenderMailbox()} {}
|
||||
|
||||
SystemManager::~SystemManager() {
|
||||
Stop();
|
||||
@@ -33,8 +28,6 @@ bool SystemManager::InitializeUnsafe() {
|
||||
if (adsp.Start()) {
|
||||
active = true;
|
||||
thread = std::jthread([this](std::stop_token stop_token) { ThreadFunc(); });
|
||||
core.CoreTiming().ScheduleLoopingEvent(std::chrono::nanoseconds(0), RENDER_TIME,
|
||||
thread_event);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -45,7 +38,6 @@ void SystemManager::Stop() {
|
||||
if (!active) {
|
||||
return;
|
||||
}
|
||||
core.CoreTiming().UnscheduleEvent(thread_event, {});
|
||||
active = false;
|
||||
update.store(true);
|
||||
update.notify_all();
|
||||
@@ -111,16 +103,7 @@ void SystemManager::ThreadFunc() {
|
||||
|
||||
adsp.Signal();
|
||||
adsp.Wait();
|
||||
|
||||
update.wait(false);
|
||||
update.store(false);
|
||||
}
|
||||
}
|
||||
|
||||
std::optional<std::chrono::nanoseconds> SystemManager::ThreadFunc2(s64 time) {
|
||||
update.store(true);
|
||||
update.notify_all();
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
} // namespace AudioCore::AudioRenderer
|
||||
|
||||
@@ -68,11 +68,6 @@ private:
|
||||
*/
|
||||
void ThreadFunc();
|
||||
|
||||
/**
|
||||
* Signalling core timing thread to run ThreadFunc.
|
||||
*/
|
||||
std::optional<std::chrono::nanoseconds> ThreadFunc2(s64 time);
|
||||
|
||||
enum class StreamState {
|
||||
Filling,
|
||||
Steady,
|
||||
@@ -95,8 +90,6 @@ private:
|
||||
ADSP::ADSP& adsp;
|
||||
/// AudioRenderer mailbox for communication
|
||||
ADSP::AudioRenderer_Mailbox* mailbox{};
|
||||
/// Core timing event to signal main thread
|
||||
std::shared_ptr<Core::Timing::EventType> thread_event;
|
||||
/// Atomic for main thread to wait on
|
||||
std::atomic<bool> update{};
|
||||
};
|
||||
|
||||
@@ -101,8 +101,6 @@ public:
|
||||
~CubebSinkStream() override {
|
||||
LOG_DEBUG(Service_Audio, "Destructing cubeb stream {}", name);
|
||||
|
||||
Unstall();
|
||||
|
||||
if (!ctx) {
|
||||
return;
|
||||
}
|
||||
@@ -143,8 +141,6 @@ public:
|
||||
* Stop the sink stream.
|
||||
*/
|
||||
void Stop() override {
|
||||
Unstall();
|
||||
|
||||
if (!ctx || paused) {
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -88,7 +88,6 @@ public:
|
||||
* Finalize the sink stream.
|
||||
*/
|
||||
void Finalize() override {
|
||||
Unstall();
|
||||
if (device == 0) {
|
||||
return;
|
||||
}
|
||||
@@ -116,7 +115,6 @@ public:
|
||||
* Stop the sink stream.
|
||||
*/
|
||||
void Stop() override {
|
||||
Unstall();
|
||||
if (device == 0 || paused) {
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -14,6 +14,8 @@
|
||||
#include "common/fixed_point.h"
|
||||
#include "common/settings.h"
|
||||
#include "core/core.h"
|
||||
#include "core/core_timing.h"
|
||||
#include "core/core_timing_util.h"
|
||||
|
||||
namespace AudioCore::Sink {
|
||||
|
||||
@@ -149,10 +151,6 @@ void SinkStream::ProcessAudioIn(std::span<const s16> input_buffer, std::size_t n
|
||||
return;
|
||||
}
|
||||
|
||||
if (queued_buffers > max_queue_size) {
|
||||
Stall();
|
||||
}
|
||||
|
||||
while (frames_written < num_frames) {
|
||||
// If the playing buffer has been consumed or has no frames, we need a new one
|
||||
if (playing_buffer.consumed || playing_buffer.frames == 0) {
|
||||
@@ -187,10 +185,6 @@ void SinkStream::ProcessAudioIn(std::span<const s16> input_buffer, std::size_t n
|
||||
}
|
||||
|
||||
std::memcpy(&last_frame[0], &input_buffer[(frames_written - 1) * frame_size], frame_size_bytes);
|
||||
|
||||
if (queued_buffers <= max_queue_size) {
|
||||
Unstall();
|
||||
}
|
||||
}
|
||||
|
||||
void SinkStream::ProcessAudioOutAndRender(std::span<s16> output_buffer, std::size_t num_frames) {
|
||||
@@ -198,10 +192,15 @@ void SinkStream::ProcessAudioOutAndRender(std::span<s16> output_buffer, std::siz
|
||||
const std::size_t frame_size = num_channels;
|
||||
const std::size_t frame_size_bytes = frame_size * sizeof(s16);
|
||||
size_t frames_written{0};
|
||||
size_t actual_frames_written{0};
|
||||
|
||||
// If we're paused or going to shut down, we don't want to consume buffers as coretiming is
|
||||
// paused and we'll desync, so just play silence.
|
||||
if (system.IsPaused() || system.IsShuttingDown()) {
|
||||
if (system.IsShuttingDown()) {
|
||||
release_cv.notify_one();
|
||||
}
|
||||
|
||||
static constexpr std::array<s16, 6> silence{};
|
||||
for (size_t i = frames_written; i < num_frames; i++) {
|
||||
std::memcpy(&output_buffer[i * frame_size], &silence[0], frame_size_bytes);
|
||||
@@ -209,20 +208,6 @@ void SinkStream::ProcessAudioOutAndRender(std::span<s16> output_buffer, std::siz
|
||||
return;
|
||||
}
|
||||
|
||||
// Due to many frames being queued up with nvdec (5 frames or so?), a lot of buffers also get
|
||||
// queued up (30+) but not all at once, which causes constant stalling here, so just let the
|
||||
// video play out without attempting to stall.
|
||||
// Can hopefully remove this later with a more complete NVDEC implementation.
|
||||
const auto nvdec_active{system.AudioCore().IsNVDECActive()};
|
||||
|
||||
// Core timing cannot be paused in single-core mode, so Stall ends up being called over and over
|
||||
// and never recovers to a normal state, so just skip attempting to sync things on single-core.
|
||||
if (system.IsMulticore() && !nvdec_active && queued_buffers > max_queue_size) {
|
||||
Stall();
|
||||
} else if (system.IsMulticore() && queued_buffers <= max_queue_size) {
|
||||
Unstall();
|
||||
}
|
||||
|
||||
while (frames_written < num_frames) {
|
||||
// If the playing buffer has been consumed or has no frames, we need a new one
|
||||
if (playing_buffer.consumed || playing_buffer.frames == 0) {
|
||||
@@ -237,6 +222,10 @@ void SinkStream::ProcessAudioOutAndRender(std::span<s16> output_buffer, std::siz
|
||||
}
|
||||
// Successfully dequeued a new buffer.
|
||||
queued_buffers--;
|
||||
|
||||
{ std::unique_lock lk{release_mutex}; }
|
||||
|
||||
release_cv.notify_one();
|
||||
}
|
||||
|
||||
// Get the minimum frames available between the currently playing buffer, and the
|
||||
@@ -248,6 +237,7 @@ void SinkStream::ProcessAudioOutAndRender(std::span<s16> output_buffer, std::siz
|
||||
frames_available * frame_size);
|
||||
|
||||
frames_written += frames_available;
|
||||
actual_frames_written += frames_available;
|
||||
playing_buffer.frames_played += frames_available;
|
||||
|
||||
// If that's all the frames in the current buffer, add its samples and mark it as
|
||||
@@ -260,26 +250,29 @@ void SinkStream::ProcessAudioOutAndRender(std::span<s16> output_buffer, std::siz
|
||||
std::memcpy(&last_frame[0], &output_buffer[(frames_written - 1) * frame_size],
|
||||
frame_size_bytes);
|
||||
|
||||
if (system.IsMulticore() && queued_buffers <= max_queue_size) {
|
||||
Unstall();
|
||||
{
|
||||
std::scoped_lock lk{sample_count_lock};
|
||||
last_sample_count_update_time =
|
||||
Core::Timing::CyclesToUs(system.CoreTiming().GetClockTicks());
|
||||
min_played_sample_count = max_played_sample_count;
|
||||
max_played_sample_count += actual_frames_written;
|
||||
}
|
||||
}
|
||||
|
||||
void SinkStream::Stall() {
|
||||
std::scoped_lock lk{stall_guard};
|
||||
if (stalled_lock) {
|
||||
return;
|
||||
}
|
||||
stalled_lock = system.StallApplication();
|
||||
u64 SinkStream::GetExpectedPlayedSampleCount() {
|
||||
std::scoped_lock lk{sample_count_lock};
|
||||
auto cur_time{Core::Timing::CyclesToUs(system.CoreTiming().GetClockTicks())};
|
||||
auto time_delta{cur_time - last_sample_count_update_time};
|
||||
auto exp_played_sample_count{min_played_sample_count +
|
||||
(TargetSampleRate * time_delta) / std::chrono::seconds{1}};
|
||||
|
||||
return std::min<u64>(exp_played_sample_count, max_played_sample_count);
|
||||
}
|
||||
|
||||
void SinkStream::Unstall() {
|
||||
std::scoped_lock lk{stall_guard};
|
||||
if (!stalled_lock) {
|
||||
return;
|
||||
}
|
||||
system.UnstallApplication();
|
||||
stalled_lock.unlock();
|
||||
void SinkStream::WaitFreeSpace() {
|
||||
std::unique_lock lk{release_mutex};
|
||||
release_cv.wait(
|
||||
lk, [this]() { return queued_buffers < max_queue_size || system.IsShuttingDown(); });
|
||||
}
|
||||
|
||||
} // namespace AudioCore::Sink
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
|
||||
#include <array>
|
||||
#include <atomic>
|
||||
#include <chrono>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <span>
|
||||
@@ -14,6 +15,7 @@
|
||||
#include "common/common_types.h"
|
||||
#include "common/reader_writer_queue.h"
|
||||
#include "common/ring_buffer.h"
|
||||
#include "common/thread.h"
|
||||
|
||||
namespace Core {
|
||||
class System;
|
||||
@@ -53,9 +55,7 @@ struct SinkBuffer {
|
||||
class SinkStream {
|
||||
public:
|
||||
explicit SinkStream(Core::System& system_, StreamType type_) : system{system_}, type{type_} {}
|
||||
virtual ~SinkStream() {
|
||||
Unstall();
|
||||
}
|
||||
virtual ~SinkStream() {}
|
||||
|
||||
/**
|
||||
* Finalize the sink stream.
|
||||
@@ -201,14 +201,16 @@ public:
|
||||
void ProcessAudioOutAndRender(std::span<s16> output_buffer, std::size_t num_frames);
|
||||
|
||||
/**
|
||||
* Stall core processes if the audio thread falls too far behind.
|
||||
* Get the total number of samples expected to have been played by this stream.
|
||||
*
|
||||
* @return The number of samples.
|
||||
*/
|
||||
void Stall();
|
||||
u64 GetExpectedPlayedSampleCount();
|
||||
|
||||
/**
|
||||
* Unstall core processes.
|
||||
* Waits for free space in the sample ring buffer
|
||||
*/
|
||||
void Unstall();
|
||||
void WaitFreeSpace();
|
||||
|
||||
protected:
|
||||
/// Core system
|
||||
@@ -237,12 +239,21 @@ private:
|
||||
std::atomic<u32> queued_buffers{};
|
||||
/// The ring size for audio out buffers (usually 4, rarely 2 or 8)
|
||||
u32 max_queue_size{};
|
||||
/// Locks access to sample count tracking info
|
||||
std::mutex sample_count_lock;
|
||||
/// Minimum number of total samples that have been played since the last callback
|
||||
u64 min_played_sample_count{};
|
||||
/// Maximum number of total samples that can be played since the last callback
|
||||
u64 max_played_sample_count{};
|
||||
/// The time the two above tracking variables were last written to
|
||||
std::chrono::microseconds last_sample_count_update_time{};
|
||||
/// Set by the audio render/in/out system which uses this stream
|
||||
f32 system_volume{1.0f};
|
||||
/// Set via IAudioDevice service calls
|
||||
f32 device_volume{1.0f};
|
||||
std::mutex stall_guard;
|
||||
std::unique_lock<std::mutex> stalled_lock;
|
||||
/// Signalled when ring buffer entries are consumed
|
||||
std::condition_variable release_cv;
|
||||
std::mutex release_mutex;
|
||||
};
|
||||
|
||||
using SinkStreamPtr = std::unique_ptr<SinkStream>;
|
||||
|
||||
@@ -38,6 +38,7 @@ add_library(common STATIC
|
||||
common_precompiled_headers.h
|
||||
common_types.h
|
||||
concepts.h
|
||||
container_hash.h
|
||||
demangle.cpp
|
||||
demangle.h
|
||||
div_ceil.h
|
||||
@@ -132,6 +133,7 @@ add_library(common STATIC
|
||||
time_zone.h
|
||||
tiny_mt.h
|
||||
tree.h
|
||||
typed_address.h
|
||||
uint128.h
|
||||
unique_function.h
|
||||
uuid.cpp
|
||||
@@ -158,6 +160,8 @@ if(ARCHITECTURE_x86_64)
|
||||
PRIVATE
|
||||
x64/cpu_detect.cpp
|
||||
x64/cpu_detect.h
|
||||
x64/cpu_wait.cpp
|
||||
x64/cpu_wait.h
|
||||
x64/native_clock.cpp
|
||||
x64/native_clock.h
|
||||
x64/xbyak_abi.h
|
||||
|
||||
@@ -1,158 +1,249 @@
|
||||
// SPDX-FileCopyrightText: Copyright (c) 2020 Erik Rigtorp <erik@rigtorp.se>
|
||||
// SPDX-License-Identifier: MIT
|
||||
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <atomic>
|
||||
#include <bit>
|
||||
#include <condition_variable>
|
||||
#include <memory>
|
||||
#include <cstddef>
|
||||
#include <mutex>
|
||||
#include <new>
|
||||
#include <stop_token>
|
||||
#include <type_traits>
|
||||
#include <utility>
|
||||
|
||||
#include "common/polyfill_thread.h"
|
||||
|
||||
namespace Common {
|
||||
|
||||
#if defined(__cpp_lib_hardware_interference_size)
|
||||
constexpr size_t hardware_interference_size = std::hardware_destructive_interference_size;
|
||||
#else
|
||||
constexpr size_t hardware_interference_size = 64;
|
||||
#endif
|
||||
namespace detail {
|
||||
constexpr size_t DefaultCapacity = 0x1000;
|
||||
} // namespace detail
|
||||
|
||||
template <typename T, size_t Capacity = detail::DefaultCapacity>
|
||||
class SPSCQueue {
|
||||
static_assert((Capacity & (Capacity - 1)) == 0, "Capacity must be a power of two.");
|
||||
|
||||
template <typename T, size_t capacity = 0x400>
|
||||
class MPSCQueue {
|
||||
public:
|
||||
explicit MPSCQueue() : allocator{std::allocator<Slot<T>>()} {
|
||||
// Allocate one extra slot to prevent false sharing on the last slot
|
||||
slots = allocator.allocate(capacity + 1);
|
||||
// Allocators are not required to honor alignment for over-aligned types
|
||||
// (see http://eel.is/c++draft/allocator.requirements#10) so we verify
|
||||
// alignment here
|
||||
if (reinterpret_cast<uintptr_t>(slots) % alignof(Slot<T>) != 0) {
|
||||
allocator.deallocate(slots, capacity + 1);
|
||||
throw std::bad_alloc();
|
||||
}
|
||||
for (size_t i = 0; i < capacity; ++i) {
|
||||
std::construct_at(&slots[i]);
|
||||
}
|
||||
static_assert(std::has_single_bit(capacity), "capacity must be an integer power of 2");
|
||||
static_assert(alignof(Slot<T>) == hardware_interference_size,
|
||||
"Slot must be aligned to cache line boundary to prevent false sharing");
|
||||
static_assert(sizeof(Slot<T>) % hardware_interference_size == 0,
|
||||
"Slot size must be a multiple of cache line size to prevent "
|
||||
"false sharing between adjacent slots");
|
||||
static_assert(sizeof(MPSCQueue) % hardware_interference_size == 0,
|
||||
"Queue size must be a multiple of cache line size to "
|
||||
"prevent false sharing between adjacent queues");
|
||||
template <typename... Args>
|
||||
bool TryEmplace(Args&&... args) {
|
||||
return Emplace<PushMode::Try>(std::forward<Args>(args)...);
|
||||
}
|
||||
|
||||
~MPSCQueue() noexcept {
|
||||
for (size_t i = 0; i < capacity; ++i) {
|
||||
std::destroy_at(&slots[i]);
|
||||
}
|
||||
allocator.deallocate(slots, capacity + 1);
|
||||
template <typename... Args>
|
||||
void EmplaceWait(Args&&... args) {
|
||||
Emplace<PushMode::Wait>(std::forward<Args>(args)...);
|
||||
}
|
||||
|
||||
// The queue must be both non-copyable and non-movable
|
||||
MPSCQueue(const MPSCQueue&) = delete;
|
||||
MPSCQueue& operator=(const MPSCQueue&) = delete;
|
||||
|
||||
MPSCQueue(MPSCQueue&&) = delete;
|
||||
MPSCQueue& operator=(MPSCQueue&&) = delete;
|
||||
|
||||
void Push(const T& v) noexcept {
|
||||
static_assert(std::is_nothrow_copy_constructible_v<T>,
|
||||
"T must be nothrow copy constructible");
|
||||
emplace(v);
|
||||
bool TryPop(T& t) {
|
||||
return Pop<PopMode::Try>(t);
|
||||
}
|
||||
|
||||
template <typename P, typename = std::enable_if_t<std::is_nothrow_constructible_v<T, P&&>>>
|
||||
void Push(P&& v) noexcept {
|
||||
emplace(std::forward<P>(v));
|
||||
void PopWait(T& t) {
|
||||
Pop<PopMode::Wait>(t);
|
||||
}
|
||||
|
||||
void Pop(T& v, std::stop_token stop) noexcept {
|
||||
auto const tail = tail_.fetch_add(1);
|
||||
auto& slot = slots[idx(tail)];
|
||||
if (!slot.turn.test()) {
|
||||
std::unique_lock lock{cv_mutex};
|
||||
cv.wait(lock, stop, [&slot] { return slot.turn.test(); });
|
||||
}
|
||||
v = slot.move();
|
||||
slot.destroy();
|
||||
slot.turn.clear();
|
||||
slot.turn.notify_one();
|
||||
void PopWait(T& t, std::stop_token stop_token) {
|
||||
Pop<PopMode::WaitWithStopToken>(t, stop_token);
|
||||
}
|
||||
|
||||
T PopWait() {
|
||||
T t;
|
||||
Pop<PopMode::Wait>(t);
|
||||
return t;
|
||||
}
|
||||
|
||||
T PopWait(std::stop_token stop_token) {
|
||||
T t;
|
||||
Pop<PopMode::WaitWithStopToken>(t, stop_token);
|
||||
return t;
|
||||
}
|
||||
|
||||
private:
|
||||
template <typename U = T>
|
||||
struct Slot {
|
||||
~Slot() noexcept {
|
||||
if (turn.test()) {
|
||||
destroy();
|
||||
}
|
||||
}
|
||||
|
||||
template <typename... Args>
|
||||
void construct(Args&&... args) noexcept {
|
||||
static_assert(std::is_nothrow_constructible_v<U, Args&&...>,
|
||||
"T must be nothrow constructible with Args&&...");
|
||||
std::construct_at(reinterpret_cast<U*>(&storage), std::forward<Args>(args)...);
|
||||
}
|
||||
|
||||
void destroy() noexcept {
|
||||
static_assert(std::is_nothrow_destructible_v<U>, "T must be nothrow destructible");
|
||||
std::destroy_at(reinterpret_cast<U*>(&storage));
|
||||
}
|
||||
|
||||
U&& move() noexcept {
|
||||
return reinterpret_cast<U&&>(storage);
|
||||
}
|
||||
|
||||
// Align to avoid false sharing between adjacent slots
|
||||
alignas(hardware_interference_size) std::atomic_flag turn{};
|
||||
struct aligned_store {
|
||||
struct type {
|
||||
alignas(U) unsigned char data[sizeof(U)];
|
||||
};
|
||||
};
|
||||
typename aligned_store::type storage;
|
||||
enum class PushMode {
|
||||
Try,
|
||||
Wait,
|
||||
Count,
|
||||
};
|
||||
|
||||
enum class PopMode {
|
||||
Try,
|
||||
Wait,
|
||||
WaitWithStopToken,
|
||||
Count,
|
||||
};
|
||||
|
||||
template <PushMode Mode, typename... Args>
|
||||
bool Emplace(Args&&... args) {
|
||||
const size_t write_index = m_write_index.load(std::memory_order::relaxed);
|
||||
|
||||
if constexpr (Mode == PushMode::Try) {
|
||||
// Check if we have free slots to write to.
|
||||
if ((write_index - m_read_index.load(std::memory_order::acquire)) == Capacity) {
|
||||
return false;
|
||||
}
|
||||
} else if constexpr (Mode == PushMode::Wait) {
|
||||
// Wait until we have free slots to write to.
|
||||
std::unique_lock lock{producer_cv_mutex};
|
||||
producer_cv.wait(lock, [this, write_index] {
|
||||
return (write_index - m_read_index.load(std::memory_order::acquire)) < Capacity;
|
||||
});
|
||||
} else {
|
||||
static_assert(Mode < PushMode::Count, "Invalid PushMode.");
|
||||
}
|
||||
|
||||
// Determine the position to write to.
|
||||
const size_t pos = write_index % Capacity;
|
||||
|
||||
// Emplace into the queue.
|
||||
std::construct_at(std::addressof(m_data[pos]), std::forward<Args>(args)...);
|
||||
|
||||
// Increment the write index.
|
||||
++m_write_index;
|
||||
|
||||
// Notify the consumer that we have pushed into the queue.
|
||||
std::scoped_lock lock{consumer_cv_mutex};
|
||||
consumer_cv.notify_one();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
template <PopMode Mode>
|
||||
bool Pop(T& t, [[maybe_unused]] std::stop_token stop_token = {}) {
|
||||
const size_t read_index = m_read_index.load(std::memory_order::relaxed);
|
||||
|
||||
if constexpr (Mode == PopMode::Try) {
|
||||
// Check if the queue is empty.
|
||||
if (read_index == m_write_index.load(std::memory_order::acquire)) {
|
||||
return false;
|
||||
}
|
||||
} else if constexpr (Mode == PopMode::Wait) {
|
||||
// Wait until the queue is not empty.
|
||||
std::unique_lock lock{consumer_cv_mutex};
|
||||
consumer_cv.wait(lock, [this, read_index] {
|
||||
return read_index != m_write_index.load(std::memory_order::acquire);
|
||||
});
|
||||
} else if constexpr (Mode == PopMode::WaitWithStopToken) {
|
||||
// Wait until the queue is not empty.
|
||||
std::unique_lock lock{consumer_cv_mutex};
|
||||
Common::CondvarWait(consumer_cv, lock, stop_token, [this, read_index] {
|
||||
return read_index != m_write_index.load(std::memory_order::acquire);
|
||||
});
|
||||
if (stop_token.stop_requested()) {
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
static_assert(Mode < PopMode::Count, "Invalid PopMode.");
|
||||
}
|
||||
|
||||
// Determine the position to read from.
|
||||
const size_t pos = read_index % Capacity;
|
||||
|
||||
// Pop the data off the queue, moving it.
|
||||
t = std::move(m_data[pos]);
|
||||
|
||||
// Increment the read index.
|
||||
++m_read_index;
|
||||
|
||||
// Notify the producer that we have popped off the queue.
|
||||
std::scoped_lock lock{producer_cv_mutex};
|
||||
producer_cv.notify_one();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
alignas(128) std::atomic_size_t m_read_index{0};
|
||||
alignas(128) std::atomic_size_t m_write_index{0};
|
||||
|
||||
std::array<T, Capacity> m_data;
|
||||
|
||||
std::condition_variable_any producer_cv;
|
||||
std::mutex producer_cv_mutex;
|
||||
std::condition_variable_any consumer_cv;
|
||||
std::mutex consumer_cv_mutex;
|
||||
};
|
||||
|
||||
template <typename T, size_t Capacity = detail::DefaultCapacity>
|
||||
class MPSCQueue {
|
||||
public:
|
||||
template <typename... Args>
|
||||
void emplace(Args&&... args) noexcept {
|
||||
static_assert(std::is_nothrow_constructible_v<T, Args&&...>,
|
||||
"T must be nothrow constructible with Args&&...");
|
||||
auto const head = head_.fetch_add(1);
|
||||
auto& slot = slots[idx(head)];
|
||||
slot.turn.wait(true);
|
||||
slot.construct(std::forward<Args>(args)...);
|
||||
slot.turn.test_and_set();
|
||||
cv.notify_one();
|
||||
bool TryEmplace(Args&&... args) {
|
||||
std::scoped_lock lock{write_mutex};
|
||||
return spsc_queue.TryEmplace(std::forward<Args>(args)...);
|
||||
}
|
||||
|
||||
constexpr size_t idx(size_t i) const noexcept {
|
||||
return i & mask;
|
||||
template <typename... Args>
|
||||
void EmplaceWait(Args&&... args) {
|
||||
std::scoped_lock lock{write_mutex};
|
||||
spsc_queue.EmplaceWait(std::forward<Args>(args)...);
|
||||
}
|
||||
|
||||
static constexpr size_t mask = capacity - 1;
|
||||
bool TryPop(T& t) {
|
||||
return spsc_queue.TryPop(t);
|
||||
}
|
||||
|
||||
// Align to avoid false sharing between head_ and tail_
|
||||
alignas(hardware_interference_size) std::atomic<size_t> head_{0};
|
||||
alignas(hardware_interference_size) std::atomic<size_t> tail_{0};
|
||||
void PopWait(T& t) {
|
||||
spsc_queue.PopWait(t);
|
||||
}
|
||||
|
||||
std::mutex cv_mutex;
|
||||
std::condition_variable_any cv;
|
||||
void PopWait(T& t, std::stop_token stop_token) {
|
||||
spsc_queue.PopWait(t, stop_token);
|
||||
}
|
||||
|
||||
Slot<T>* slots;
|
||||
[[no_unique_address]] std::allocator<Slot<T>> allocator;
|
||||
T PopWait() {
|
||||
return spsc_queue.PopWait();
|
||||
}
|
||||
|
||||
static_assert(std::is_nothrow_copy_assignable_v<T> || std::is_nothrow_move_assignable_v<T>,
|
||||
"T must be nothrow copy or move assignable");
|
||||
T PopWait(std::stop_token stop_token) {
|
||||
return spsc_queue.PopWait(stop_token);
|
||||
}
|
||||
|
||||
static_assert(std::is_nothrow_destructible_v<T>, "T must be nothrow destructible");
|
||||
private:
|
||||
SPSCQueue<T, Capacity> spsc_queue;
|
||||
std::mutex write_mutex;
|
||||
};
|
||||
|
||||
template <typename T, size_t Capacity = detail::DefaultCapacity>
|
||||
class MPMCQueue {
|
||||
public:
|
||||
template <typename... Args>
|
||||
bool TryEmplace(Args&&... args) {
|
||||
std::scoped_lock lock{write_mutex};
|
||||
return spsc_queue.TryEmplace(std::forward<Args>(args)...);
|
||||
}
|
||||
|
||||
template <typename... Args>
|
||||
void EmplaceWait(Args&&... args) {
|
||||
std::scoped_lock lock{write_mutex};
|
||||
spsc_queue.EmplaceWait(std::forward<Args>(args)...);
|
||||
}
|
||||
|
||||
bool TryPop(T& t) {
|
||||
std::scoped_lock lock{read_mutex};
|
||||
return spsc_queue.TryPop(t);
|
||||
}
|
||||
|
||||
void PopWait(T& t) {
|
||||
std::scoped_lock lock{read_mutex};
|
||||
spsc_queue.PopWait(t);
|
||||
}
|
||||
|
||||
void PopWait(T& t, std::stop_token stop_token) {
|
||||
std::scoped_lock lock{read_mutex};
|
||||
spsc_queue.PopWait(t, stop_token);
|
||||
}
|
||||
|
||||
T PopWait() {
|
||||
std::scoped_lock lock{read_mutex};
|
||||
return spsc_queue.PopWait();
|
||||
}
|
||||
|
||||
T PopWait(std::stop_token stop_token) {
|
||||
std::scoped_lock lock{read_mutex};
|
||||
return spsc_queue.PopWait(stop_token);
|
||||
}
|
||||
|
||||
private:
|
||||
SPSCQueue<T, Capacity> spsc_queue;
|
||||
std::mutex write_mutex;
|
||||
std::mutex read_mutex;
|
||||
};
|
||||
|
||||
} // namespace Common
|
||||
|
||||
92
src/common/container_hash.h
Normal file
92
src/common/container_hash.h
Normal file
@@ -0,0 +1,92 @@
|
||||
// SPDX-FileCopyrightText: 2005-2014 Daniel James
|
||||
// SPDX-FileCopyrightText: 2016 Austin Appleby
|
||||
// SPDX-License-Identifier: BSL-1.0
|
||||
|
||||
#include <array>
|
||||
#include <climits>
|
||||
#include <cstdint>
|
||||
#include <limits>
|
||||
#include <type_traits>
|
||||
#include <vector>
|
||||
|
||||
namespace Common {
|
||||
|
||||
namespace detail {
|
||||
|
||||
template <typename T>
|
||||
requires std::is_unsigned_v<T>
|
||||
inline std::size_t HashValue(T val) {
|
||||
const unsigned int size_t_bits = std::numeric_limits<std::size_t>::digits;
|
||||
const unsigned int length =
|
||||
(std::numeric_limits<T>::digits - 1) / static_cast<unsigned int>(size_t_bits);
|
||||
|
||||
std::size_t seed = 0;
|
||||
|
||||
for (unsigned int i = length * size_t_bits; i > 0; i -= size_t_bits) {
|
||||
seed ^= static_cast<size_t>(val >> i) + (seed << 6) + (seed >> 2);
|
||||
}
|
||||
|
||||
seed ^= static_cast<size_t>(val) + (seed << 6) + (seed >> 2);
|
||||
|
||||
return seed;
|
||||
}
|
||||
|
||||
template <size_t Bits>
|
||||
struct HashCombineImpl {
|
||||
template <typename T>
|
||||
static inline T fn(T seed, T value) {
|
||||
seed ^= value + 0x9e3779b9 + (seed << 6) + (seed >> 2);
|
||||
return seed;
|
||||
}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct HashCombineImpl<64> {
|
||||
static inline std::uint64_t fn(std::uint64_t h, std::uint64_t k) {
|
||||
const std::uint64_t m = (std::uint64_t(0xc6a4a793) << 32) + 0x5bd1e995;
|
||||
const int r = 47;
|
||||
|
||||
k *= m;
|
||||
k ^= k >> r;
|
||||
k *= m;
|
||||
|
||||
h ^= k;
|
||||
h *= m;
|
||||
|
||||
// Completely arbitrary number, to prevent 0's
|
||||
// from hashing to 0.
|
||||
h += 0xe6546b64;
|
||||
|
||||
return h;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace detail
|
||||
|
||||
template <typename T>
|
||||
inline void HashCombine(std::size_t& seed, const T& v) {
|
||||
seed = detail::HashCombineImpl<sizeof(std::size_t) * CHAR_BIT>::fn(seed, detail::HashValue(v));
|
||||
}
|
||||
|
||||
template <typename It>
|
||||
inline std::size_t HashRange(It first, It last) {
|
||||
std::size_t seed = 0;
|
||||
|
||||
for (; first != last; ++first) {
|
||||
HashCombine<typename std::iterator_traits<It>::value_type>(seed, *first);
|
||||
}
|
||||
|
||||
return seed;
|
||||
}
|
||||
|
||||
template <typename T, size_t Size>
|
||||
std::size_t HashValue(const std::array<T, Size>& v) {
|
||||
return HashRange(v.cbegin(), v.cend());
|
||||
}
|
||||
|
||||
template <typename T, typename Allocator>
|
||||
std::size_t HashValue(const std::vector<T, Allocator>& v) {
|
||||
return HashRange(v.cbegin(), v.cend());
|
||||
}
|
||||
|
||||
} // namespace Common
|
||||
@@ -28,7 +28,7 @@
|
||||
#ifdef _WIN32
|
||||
#include "common/string_util.h"
|
||||
#endif
|
||||
#include "common/threadsafe_queue.h"
|
||||
#include "common/bounded_threadsafe_queue.h"
|
||||
|
||||
namespace Common::Log {
|
||||
|
||||
@@ -204,11 +204,11 @@ public:
|
||||
|
||||
void PushEntry(Class log_class, Level log_level, const char* filename, unsigned int line_num,
|
||||
const char* function, std::string&& message) {
|
||||
if (!filter.CheckMessage(log_class, log_level))
|
||||
if (!filter.CheckMessage(log_class, log_level)) {
|
||||
return;
|
||||
const Entry& entry =
|
||||
CreateEntry(log_class, log_level, filename, line_num, function, std::move(message));
|
||||
message_queue.Push(entry);
|
||||
}
|
||||
message_queue.EmplaceWait(
|
||||
CreateEntry(log_class, log_level, filename, line_num, function, std::move(message)));
|
||||
}
|
||||
|
||||
private:
|
||||
@@ -225,7 +225,7 @@ private:
|
||||
ForEachBackend([&entry](Backend& backend) { backend.Write(entry); });
|
||||
};
|
||||
while (!stop_token.stop_requested()) {
|
||||
entry = message_queue.PopWait(stop_token);
|
||||
message_queue.PopWait(entry, stop_token);
|
||||
if (entry.filename != nullptr) {
|
||||
write_logs();
|
||||
}
|
||||
@@ -233,7 +233,7 @@ private:
|
||||
// Drain the logging queue. Only writes out up to MAX_LOGS_TO_WRITE to prevent a
|
||||
// case where a system is repeatedly spamming logs even on close.
|
||||
int max_logs_to_write = filter.IsDebug() ? INT_MAX : 100;
|
||||
while (max_logs_to_write-- && message_queue.Pop(entry)) {
|
||||
while (max_logs_to_write-- && message_queue.TryPop(entry)) {
|
||||
write_logs();
|
||||
}
|
||||
});
|
||||
@@ -273,7 +273,7 @@ private:
|
||||
ColorConsoleBackend color_console_backend{};
|
||||
FileBackend file_backend;
|
||||
|
||||
MPSCQueue<Entry, true> message_queue{};
|
||||
MPSCQueue<Entry> message_queue{};
|
||||
std::chrono::steady_clock::time_point time_origin{std::chrono::steady_clock::now()};
|
||||
std::jthread backend_thread;
|
||||
};
|
||||
|
||||
@@ -38,12 +38,12 @@ public:
|
||||
Map(address, address_end, null_value);
|
||||
}
|
||||
|
||||
[[nodiscard]] size_t GetContinousSizeFrom(KeyTBase address) const {
|
||||
[[nodiscard]] size_t GetContinuousSizeFrom(KeyTBase address) const {
|
||||
const KeyT new_address = static_cast<KeyT>(address);
|
||||
if (new_address < 0) {
|
||||
return 0;
|
||||
}
|
||||
return ContinousSizeInternal(new_address);
|
||||
return ContinuousSizeInternal(new_address);
|
||||
}
|
||||
|
||||
[[nodiscard]] ValueT GetValueAt(KeyT address) const {
|
||||
@@ -59,7 +59,7 @@ private:
|
||||
using IteratorType = typename MapType::iterator;
|
||||
using ConstIteratorType = typename MapType::const_iterator;
|
||||
|
||||
size_t ContinousSizeInternal(KeyT address) const {
|
||||
size_t ContinuousSizeInternal(KeyT address) const {
|
||||
const auto it = GetFirstElementBeforeOrOn(address);
|
||||
if (it == container.end() || it->second == null_value) {
|
||||
return 0;
|
||||
|
||||
@@ -125,18 +125,18 @@ std::string ReplaceAll(std::string result, const std::string& src, const std::st
|
||||
return result;
|
||||
}
|
||||
|
||||
std::string UTF16ToUTF8(const std::u16string& input) {
|
||||
std::string UTF16ToUTF8(std::u16string_view input) {
|
||||
std::wstring_convert<std::codecvt_utf8_utf16<char16_t>, char16_t> convert;
|
||||
return convert.to_bytes(input);
|
||||
return convert.to_bytes(input.data(), input.data() + input.size());
|
||||
}
|
||||
|
||||
std::u16string UTF8ToUTF16(const std::string& input) {
|
||||
std::u16string UTF8ToUTF16(std::string_view input) {
|
||||
std::wstring_convert<std::codecvt_utf8_utf16<char16_t>, char16_t> convert;
|
||||
return convert.from_bytes(input);
|
||||
return convert.from_bytes(input.data(), input.data() + input.size());
|
||||
}
|
||||
|
||||
#ifdef _WIN32
|
||||
static std::wstring CPToUTF16(u32 code_page, const std::string& input) {
|
||||
static std::wstring CPToUTF16(u32 code_page, std::string_view input) {
|
||||
const auto size =
|
||||
MultiByteToWideChar(code_page, 0, input.data(), static_cast<int>(input.size()), nullptr, 0);
|
||||
|
||||
@@ -154,7 +154,7 @@ static std::wstring CPToUTF16(u32 code_page, const std::string& input) {
|
||||
return output;
|
||||
}
|
||||
|
||||
std::string UTF16ToUTF8(const std::wstring& input) {
|
||||
std::string UTF16ToUTF8(std::wstring_view input) {
|
||||
const auto size = WideCharToMultiByte(CP_UTF8, 0, input.data(), static_cast<int>(input.size()),
|
||||
nullptr, 0, nullptr, nullptr);
|
||||
if (size == 0) {
|
||||
@@ -172,7 +172,7 @@ std::string UTF16ToUTF8(const std::wstring& input) {
|
||||
return output;
|
||||
}
|
||||
|
||||
std::wstring UTF8ToUTF16W(const std::string& input) {
|
||||
std::wstring UTF8ToUTF16W(std::string_view input) {
|
||||
return CPToUTF16(CP_UTF8, input);
|
||||
}
|
||||
|
||||
|
||||
@@ -36,12 +36,12 @@ bool SplitPath(const std::string& full_path, std::string* _pPath, std::string* _
|
||||
[[nodiscard]] std::string ReplaceAll(std::string result, const std::string& src,
|
||||
const std::string& dest);
|
||||
|
||||
[[nodiscard]] std::string UTF16ToUTF8(const std::u16string& input);
|
||||
[[nodiscard]] std::u16string UTF8ToUTF16(const std::string& input);
|
||||
[[nodiscard]] std::string UTF16ToUTF8(std::u16string_view input);
|
||||
[[nodiscard]] std::u16string UTF8ToUTF16(std::string_view input);
|
||||
|
||||
#ifdef _WIN32
|
||||
[[nodiscard]] std::string UTF16ToUTF8(const std::wstring& input);
|
||||
[[nodiscard]] std::wstring UTF8ToUTF16W(const std::string& str);
|
||||
[[nodiscard]] std::string UTF16ToUTF8(std::wstring_view input);
|
||||
[[nodiscard]] std::wstring UTF8ToUTF16W(std::string_view str);
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
@@ -97,6 +97,7 @@ void AppendCPUInfo(FieldCollection& fc) {
|
||||
add_field("CPU_Extension_x64_PCLMULQDQ", caps.pclmulqdq);
|
||||
add_field("CPU_Extension_x64_POPCNT", caps.popcnt);
|
||||
add_field("CPU_Extension_x64_SHA", caps.sha);
|
||||
add_field("CPU_Extension_x64_WAITPKG", caps.waitpkg);
|
||||
#else
|
||||
fc.AddField(FieldType::UserSystem, "CPU_Model", "Other");
|
||||
#endif
|
||||
|
||||
320
src/common/typed_address.h
Normal file
320
src/common/typed_address.h
Normal file
@@ -0,0 +1,320 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <compare>
|
||||
#include <type_traits>
|
||||
#include <fmt/format.h>
|
||||
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace Common {
|
||||
|
||||
template <bool Virtual, typename T>
|
||||
class TypedAddress {
|
||||
public:
|
||||
// Constructors.
|
||||
constexpr inline TypedAddress() : m_address(0) {}
|
||||
constexpr inline TypedAddress(uint64_t a) : m_address(a) {}
|
||||
|
||||
template <typename U>
|
||||
constexpr inline explicit TypedAddress(const U* ptr)
|
||||
: m_address(reinterpret_cast<uint64_t>(ptr)) {}
|
||||
|
||||
// Copy constructor.
|
||||
constexpr inline TypedAddress(const TypedAddress& rhs) = default;
|
||||
|
||||
// Assignment operator.
|
||||
constexpr inline TypedAddress& operator=(const TypedAddress& rhs) = default;
|
||||
|
||||
// Arithmetic operators.
|
||||
template <typename I>
|
||||
constexpr inline TypedAddress operator+(I rhs) const {
|
||||
static_assert(std::is_integral_v<I>);
|
||||
return m_address + rhs;
|
||||
}
|
||||
|
||||
constexpr inline TypedAddress operator+(TypedAddress rhs) const {
|
||||
return m_address + rhs.m_address;
|
||||
}
|
||||
|
||||
constexpr inline TypedAddress operator++() {
|
||||
return ++m_address;
|
||||
}
|
||||
|
||||
constexpr inline TypedAddress operator++(int) {
|
||||
return m_address++;
|
||||
}
|
||||
|
||||
template <typename I>
|
||||
constexpr inline TypedAddress operator-(I rhs) const {
|
||||
static_assert(std::is_integral_v<I>);
|
||||
return m_address - rhs;
|
||||
}
|
||||
|
||||
constexpr inline ptrdiff_t operator-(TypedAddress rhs) const {
|
||||
return m_address - rhs.m_address;
|
||||
}
|
||||
|
||||
constexpr inline TypedAddress operator--() {
|
||||
return --m_address;
|
||||
}
|
||||
|
||||
constexpr inline TypedAddress operator--(int) {
|
||||
return m_address--;
|
||||
}
|
||||
|
||||
template <typename I>
|
||||
constexpr inline TypedAddress operator+=(I rhs) {
|
||||
static_assert(std::is_integral_v<I>);
|
||||
m_address += rhs;
|
||||
return *this;
|
||||
}
|
||||
|
||||
template <typename I>
|
||||
constexpr inline TypedAddress operator-=(I rhs) {
|
||||
static_assert(std::is_integral_v<I>);
|
||||
m_address -= rhs;
|
||||
return *this;
|
||||
}
|
||||
|
||||
// Logical operators.
|
||||
constexpr inline uint64_t operator&(uint64_t mask) const {
|
||||
return m_address & mask;
|
||||
}
|
||||
|
||||
constexpr inline uint64_t operator|(uint64_t mask) const {
|
||||
return m_address | mask;
|
||||
}
|
||||
|
||||
template <typename I>
|
||||
constexpr inline TypedAddress operator|=(I rhs) {
|
||||
static_assert(std::is_integral_v<I>);
|
||||
m_address |= rhs;
|
||||
return *this;
|
||||
}
|
||||
|
||||
constexpr inline uint64_t operator<<(int shift) const {
|
||||
return m_address << shift;
|
||||
}
|
||||
|
||||
constexpr inline uint64_t operator>>(int shift) const {
|
||||
return m_address >> shift;
|
||||
}
|
||||
|
||||
template <typename U>
|
||||
constexpr inline size_t operator/(U size) const {
|
||||
return m_address / size;
|
||||
}
|
||||
|
||||
constexpr explicit operator bool() const {
|
||||
return m_address != 0;
|
||||
}
|
||||
|
||||
// constexpr inline uint64_t operator%(U align) const { return m_address % align; }
|
||||
|
||||
// Comparison operators.
|
||||
constexpr bool operator==(const TypedAddress&) const = default;
|
||||
constexpr bool operator!=(const TypedAddress&) const = default;
|
||||
constexpr auto operator<=>(const TypedAddress&) const = default;
|
||||
|
||||
// For convenience, also define comparison operators versus uint64_t.
|
||||
constexpr inline bool operator==(uint64_t rhs) const {
|
||||
return m_address == rhs;
|
||||
}
|
||||
|
||||
constexpr inline bool operator!=(uint64_t rhs) const {
|
||||
return m_address != rhs;
|
||||
}
|
||||
|
||||
// Allow getting the address explicitly, for use in accessors.
|
||||
constexpr inline uint64_t GetValue() const {
|
||||
return m_address;
|
||||
}
|
||||
|
||||
private:
|
||||
uint64_t m_address{};
|
||||
};
|
||||
|
||||
struct PhysicalAddressTag {};
|
||||
struct VirtualAddressTag {};
|
||||
struct ProcessAddressTag {};
|
||||
|
||||
using PhysicalAddress = TypedAddress<false, PhysicalAddressTag>;
|
||||
using VirtualAddress = TypedAddress<true, VirtualAddressTag>;
|
||||
using ProcessAddress = TypedAddress<true, ProcessAddressTag>;
|
||||
|
||||
// Define accessors.
|
||||
template <typename T>
|
||||
concept IsTypedAddress = std::same_as<T, PhysicalAddress> || std::same_as<T, VirtualAddress> ||
|
||||
std::same_as<T, ProcessAddress>;
|
||||
|
||||
template <typename T>
|
||||
constexpr inline T Null = [] {
|
||||
if constexpr (std::is_same<T, uint64_t>::value) {
|
||||
return 0;
|
||||
} else {
|
||||
static_assert(std::is_same<T, PhysicalAddress>::value ||
|
||||
std::is_same<T, VirtualAddress>::value ||
|
||||
std::is_same<T, ProcessAddress>::value);
|
||||
return T(0);
|
||||
}
|
||||
}();
|
||||
|
||||
// Basic type validations.
|
||||
static_assert(sizeof(PhysicalAddress) == sizeof(uint64_t));
|
||||
static_assert(sizeof(VirtualAddress) == sizeof(uint64_t));
|
||||
static_assert(sizeof(ProcessAddress) == sizeof(uint64_t));
|
||||
|
||||
static_assert(std::is_trivially_copyable_v<PhysicalAddress>);
|
||||
static_assert(std::is_trivially_copyable_v<VirtualAddress>);
|
||||
static_assert(std::is_trivially_copyable_v<ProcessAddress>);
|
||||
|
||||
static_assert(std::is_trivially_copy_constructible_v<PhysicalAddress>);
|
||||
static_assert(std::is_trivially_copy_constructible_v<VirtualAddress>);
|
||||
static_assert(std::is_trivially_copy_constructible_v<ProcessAddress>);
|
||||
|
||||
static_assert(std::is_trivially_move_constructible_v<PhysicalAddress>);
|
||||
static_assert(std::is_trivially_move_constructible_v<VirtualAddress>);
|
||||
static_assert(std::is_trivially_move_constructible_v<ProcessAddress>);
|
||||
|
||||
static_assert(std::is_trivially_copy_assignable_v<PhysicalAddress>);
|
||||
static_assert(std::is_trivially_copy_assignable_v<VirtualAddress>);
|
||||
static_assert(std::is_trivially_copy_assignable_v<ProcessAddress>);
|
||||
|
||||
static_assert(std::is_trivially_move_assignable_v<PhysicalAddress>);
|
||||
static_assert(std::is_trivially_move_assignable_v<VirtualAddress>);
|
||||
static_assert(std::is_trivially_move_assignable_v<ProcessAddress>);
|
||||
|
||||
static_assert(std::is_trivially_destructible_v<PhysicalAddress>);
|
||||
static_assert(std::is_trivially_destructible_v<VirtualAddress>);
|
||||
static_assert(std::is_trivially_destructible_v<ProcessAddress>);
|
||||
|
||||
static_assert(Null<uint64_t> == 0);
|
||||
static_assert(Null<PhysicalAddress> == Null<uint64_t>);
|
||||
static_assert(Null<VirtualAddress> == Null<uint64_t>);
|
||||
static_assert(Null<ProcessAddress> == Null<uint64_t>);
|
||||
|
||||
// Constructor/assignment validations.
|
||||
static_assert([] {
|
||||
const PhysicalAddress a(5);
|
||||
PhysicalAddress b(a);
|
||||
return b;
|
||||
}() == PhysicalAddress(5));
|
||||
static_assert([] {
|
||||
const PhysicalAddress a(5);
|
||||
PhysicalAddress b(10);
|
||||
b = a;
|
||||
return b;
|
||||
}() == PhysicalAddress(5));
|
||||
|
||||
// Arithmetic validations.
|
||||
static_assert(PhysicalAddress(10) + 5 == PhysicalAddress(15));
|
||||
static_assert(PhysicalAddress(10) - 5 == PhysicalAddress(5));
|
||||
static_assert([] {
|
||||
PhysicalAddress v(10);
|
||||
v += 5;
|
||||
return v;
|
||||
}() == PhysicalAddress(15));
|
||||
static_assert([] {
|
||||
PhysicalAddress v(10);
|
||||
v -= 5;
|
||||
return v;
|
||||
}() == PhysicalAddress(5));
|
||||
static_assert(PhysicalAddress(10)++ == PhysicalAddress(10));
|
||||
static_assert(++PhysicalAddress(10) == PhysicalAddress(11));
|
||||
static_assert(PhysicalAddress(10)-- == PhysicalAddress(10));
|
||||
static_assert(--PhysicalAddress(10) == PhysicalAddress(9));
|
||||
|
||||
// Logical validations.
|
||||
static_assert((PhysicalAddress(0b11111111) >> 1) == 0b01111111);
|
||||
static_assert((PhysicalAddress(0b10101010) >> 1) == 0b01010101);
|
||||
static_assert((PhysicalAddress(0b11111111) << 1) == 0b111111110);
|
||||
static_assert((PhysicalAddress(0b01010101) << 1) == 0b10101010);
|
||||
static_assert((PhysicalAddress(0b11111111) & 0b01010101) == 0b01010101);
|
||||
static_assert((PhysicalAddress(0b11111111) & 0b10101010) == 0b10101010);
|
||||
static_assert((PhysicalAddress(0b01010101) & 0b10101010) == 0b00000000);
|
||||
static_assert((PhysicalAddress(0b00000000) | 0b01010101) == 0b01010101);
|
||||
static_assert((PhysicalAddress(0b11111111) | 0b01010101) == 0b11111111);
|
||||
static_assert((PhysicalAddress(0b10101010) | 0b01010101) == 0b11111111);
|
||||
|
||||
// Comparisons.
|
||||
static_assert(PhysicalAddress(0) == PhysicalAddress(0));
|
||||
static_assert(PhysicalAddress(0) != PhysicalAddress(1));
|
||||
static_assert(PhysicalAddress(0) < PhysicalAddress(1));
|
||||
static_assert(PhysicalAddress(0) <= PhysicalAddress(1));
|
||||
static_assert(PhysicalAddress(1) > PhysicalAddress(0));
|
||||
static_assert(PhysicalAddress(1) >= PhysicalAddress(0));
|
||||
|
||||
static_assert(!(PhysicalAddress(0) == PhysicalAddress(1)));
|
||||
static_assert(!(PhysicalAddress(0) != PhysicalAddress(0)));
|
||||
static_assert(!(PhysicalAddress(1) < PhysicalAddress(0)));
|
||||
static_assert(!(PhysicalAddress(1) <= PhysicalAddress(0)));
|
||||
static_assert(!(PhysicalAddress(0) > PhysicalAddress(1)));
|
||||
static_assert(!(PhysicalAddress(0) >= PhysicalAddress(1)));
|
||||
|
||||
} // namespace Common
|
||||
|
||||
template <bool Virtual, typename T>
|
||||
constexpr inline uint64_t GetInteger(Common::TypedAddress<Virtual, T> address) {
|
||||
return address.GetValue();
|
||||
}
|
||||
|
||||
template <>
|
||||
struct fmt::formatter<Common::PhysicalAddress> {
|
||||
constexpr auto parse(fmt::format_parse_context& ctx) {
|
||||
return ctx.begin();
|
||||
}
|
||||
template <typename FormatContext>
|
||||
auto format(const Common::PhysicalAddress& addr, FormatContext& ctx) {
|
||||
return fmt::format_to(ctx.out(), "{:#x}", static_cast<u64>(addr.GetValue()));
|
||||
}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct fmt::formatter<Common::ProcessAddress> {
|
||||
constexpr auto parse(fmt::format_parse_context& ctx) {
|
||||
return ctx.begin();
|
||||
}
|
||||
template <typename FormatContext>
|
||||
auto format(const Common::ProcessAddress& addr, FormatContext& ctx) {
|
||||
return fmt::format_to(ctx.out(), "{:#x}", static_cast<u64>(addr.GetValue()));
|
||||
}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct fmt::formatter<Common::VirtualAddress> {
|
||||
constexpr auto parse(fmt::format_parse_context& ctx) {
|
||||
return ctx.begin();
|
||||
}
|
||||
template <typename FormatContext>
|
||||
auto format(const Common::VirtualAddress& addr, FormatContext& ctx) {
|
||||
return fmt::format_to(ctx.out(), "{:#x}", static_cast<u64>(addr.GetValue()));
|
||||
}
|
||||
};
|
||||
|
||||
namespace std {
|
||||
|
||||
template <>
|
||||
struct hash<Common::PhysicalAddress> {
|
||||
size_t operator()(const Common::PhysicalAddress& k) const noexcept {
|
||||
return k.GetValue();
|
||||
}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct hash<Common::ProcessAddress> {
|
||||
size_t operator()(const Common::ProcessAddress& k) const noexcept {
|
||||
return k.GetValue();
|
||||
}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct hash<Common::VirtualAddress> {
|
||||
size_t operator()(const Common::VirtualAddress& k) const noexcept {
|
||||
return k.GetValue();
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace std
|
||||
@@ -144,6 +144,7 @@ static CPUCaps Detect() {
|
||||
caps.bmi2 = Common::Bit<8>(cpu_id[1]);
|
||||
caps.sha = Common::Bit<29>(cpu_id[1]);
|
||||
|
||||
caps.waitpkg = Common::Bit<5>(cpu_id[2]);
|
||||
caps.gfni = Common::Bit<8>(cpu_id[2]);
|
||||
|
||||
__cpuidex(cpu_id, 0x00000007, 0x00000001);
|
||||
|
||||
@@ -67,6 +67,7 @@ struct CPUCaps {
|
||||
bool pclmulqdq : 1;
|
||||
bool popcnt : 1;
|
||||
bool sha : 1;
|
||||
bool waitpkg : 1;
|
||||
};
|
||||
|
||||
/**
|
||||
|
||||
69
src/common/x64/cpu_wait.cpp
Normal file
69
src/common/x64/cpu_wait.cpp
Normal file
@@ -0,0 +1,69 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#include <thread>
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#include <intrin.h>
|
||||
#endif
|
||||
|
||||
#include "common/x64/cpu_detect.h"
|
||||
#include "common/x64/cpu_wait.h"
|
||||
|
||||
namespace Common::X64 {
|
||||
|
||||
#ifdef _MSC_VER
|
||||
__forceinline static u64 FencedRDTSC() {
|
||||
_mm_lfence();
|
||||
_ReadWriteBarrier();
|
||||
const u64 result = __rdtsc();
|
||||
_mm_lfence();
|
||||
_ReadWriteBarrier();
|
||||
return result;
|
||||
}
|
||||
|
||||
__forceinline static void TPAUSE() {
|
||||
// 100,000 cycles is a reasonable amount of time to wait to save on CPU resources.
|
||||
// For reference:
|
||||
// At 1 GHz, 100K cycles is 100us
|
||||
// At 2 GHz, 100K cycles is 50us
|
||||
// At 4 GHz, 100K cycles is 25us
|
||||
static constexpr auto PauseCycles = 100'000;
|
||||
_tpause(0, FencedRDTSC() + PauseCycles);
|
||||
}
|
||||
#else
|
||||
static u64 FencedRDTSC() {
|
||||
u64 eax;
|
||||
u64 edx;
|
||||
asm volatile("lfence\n\t"
|
||||
"rdtsc\n\t"
|
||||
"lfence\n\t"
|
||||
: "=a"(eax), "=d"(edx));
|
||||
return (edx << 32) | eax;
|
||||
}
|
||||
|
||||
static void TPAUSE() {
|
||||
// 100,000 cycles is a reasonable amount of time to wait to save on CPU resources.
|
||||
// For reference:
|
||||
// At 1 GHz, 100K cycles is 100us
|
||||
// At 2 GHz, 100K cycles is 50us
|
||||
// At 4 GHz, 100K cycles is 25us
|
||||
static constexpr auto PauseCycles = 100'000;
|
||||
const auto tsc = FencedRDTSC() + PauseCycles;
|
||||
const auto eax = static_cast<u32>(tsc & 0xFFFFFFFF);
|
||||
const auto edx = static_cast<u32>(tsc >> 32);
|
||||
asm volatile("tpause %0" : : "r"(0), "d"(edx), "a"(eax));
|
||||
}
|
||||
#endif
|
||||
|
||||
void MicroSleep() {
|
||||
static const bool has_waitpkg = GetCPUCaps().waitpkg;
|
||||
|
||||
if (has_waitpkg) {
|
||||
TPAUSE();
|
||||
} else {
|
||||
std::this_thread::yield();
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace Common::X64
|
||||
10
src/common/x64/cpu_wait.h
Normal file
10
src/common/x64/cpu_wait.h
Normal file
@@ -0,0 +1,10 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#pragma once
|
||||
|
||||
namespace Common::X64 {
|
||||
|
||||
void MicroSleep();
|
||||
|
||||
} // namespace Common::X64
|
||||
@@ -27,16 +27,13 @@ __forceinline static u64 FencedRDTSC() {
|
||||
}
|
||||
#else
|
||||
static u64 FencedRDTSC() {
|
||||
u64 result;
|
||||
u64 eax;
|
||||
u64 edx;
|
||||
asm volatile("lfence\n\t"
|
||||
"rdtsc\n\t"
|
||||
"shl $32, %%rdx\n\t"
|
||||
"or %%rdx, %0\n\t"
|
||||
"lfence"
|
||||
: "=a"(result)
|
||||
:
|
||||
: "rdx", "memory", "cc");
|
||||
return result;
|
||||
"lfence\n\t"
|
||||
: "=a"(eax), "=d"(edx));
|
||||
return (edx << 32) | eax;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
@@ -33,7 +33,7 @@ std::vector<u8> CompressDataZSTDDefault(const u8* source, std::size_t source_siz
|
||||
|
||||
std::vector<u8> DecompressDataZSTD(std::span<const u8> compressed) {
|
||||
const std::size_t decompressed_size =
|
||||
ZSTD_getDecompressedSize(compressed.data(), compressed.size());
|
||||
ZSTD_getFrameContentSize(compressed.data(), compressed.size());
|
||||
std::vector<u8> decompressed(decompressed_size);
|
||||
|
||||
const std::size_t uncompressed_result_size = ZSTD_decompress(
|
||||
|
||||
@@ -158,6 +158,7 @@ add_library(core STATIC
|
||||
hid/motion_input.h
|
||||
hle/api_version.h
|
||||
hle/ipc.h
|
||||
hle/kernel/board/nintendo/nx/k_memory_layout.cpp
|
||||
hle/kernel/board/nintendo/nx/k_memory_layout.h
|
||||
hle/kernel/board/nintendo/nx/k_system_control.cpp
|
||||
hle/kernel/board/nintendo/nx/k_system_control.h
|
||||
@@ -211,12 +212,10 @@ add_library(core STATIC
|
||||
hle/kernel/k_light_condition_variable.h
|
||||
hle/kernel/k_light_lock.cpp
|
||||
hle/kernel/k_light_lock.h
|
||||
hle/kernel/k_linked_list.h
|
||||
hle/kernel/k_memory_block.h
|
||||
hle/kernel/k_memory_block_manager.cpp
|
||||
hle/kernel/k_memory_block_manager.h
|
||||
hle/kernel/k_memory_layout.cpp
|
||||
hle/kernel/k_memory_layout.board.nintendo_nx.cpp
|
||||
hle/kernel/k_memory_layout.h
|
||||
hle/kernel/k_memory_manager.cpp
|
||||
hle/kernel/k_memory_manager.h
|
||||
@@ -279,6 +278,7 @@ add_library(core STATIC
|
||||
hle/kernel/k_trace.h
|
||||
hle/kernel/k_transfer_memory.cpp
|
||||
hle/kernel/k_transfer_memory.h
|
||||
hle/kernel/k_typed_address.h
|
||||
hle/kernel/k_worker_task.h
|
||||
hle/kernel/k_worker_task_manager.cpp
|
||||
hle/kernel/k_worker_task_manager.h
|
||||
|
||||
@@ -44,7 +44,7 @@ void ARM_Interface::SymbolicateBacktrace(Core::System& system, std::vector<Backt
|
||||
std::map<std::string, Symbols::Symbols> symbols;
|
||||
for (const auto& module : modules) {
|
||||
symbols.insert_or_assign(
|
||||
module.second, Symbols::GetSymbols(module.first, system.Memory(),
|
||||
module.second, Symbols::GetSymbols(module.first, system.ApplicationMemory(),
|
||||
system.ApplicationProcess()->Is64BitProcess()));
|
||||
}
|
||||
|
||||
@@ -168,21 +168,21 @@ void ARM_Interface::LoadWatchpointArray(const WatchpointArray& wp) {
|
||||
}
|
||||
|
||||
const Kernel::DebugWatchpoint* ARM_Interface::MatchingWatchpoint(
|
||||
VAddr addr, u64 size, Kernel::DebugWatchpointType access_type) const {
|
||||
u64 addr, u64 size, Kernel::DebugWatchpointType access_type) const {
|
||||
if (!watchpoints) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
const VAddr start_address{addr};
|
||||
const VAddr end_address{addr + size};
|
||||
const u64 start_address{addr};
|
||||
const u64 end_address{addr + size};
|
||||
|
||||
for (size_t i = 0; i < Core::Hardware::NUM_WATCHPOINTS; i++) {
|
||||
const auto& watch{(*watchpoints)[i]};
|
||||
|
||||
if (end_address <= watch.start_address) {
|
||||
if (end_address <= GetInteger(watch.start_address)) {
|
||||
continue;
|
||||
}
|
||||
if (start_address >= watch.end_address) {
|
||||
if (start_address >= GetInteger(watch.end_address)) {
|
||||
continue;
|
||||
}
|
||||
if ((access_type & watch.type) == Kernel::DebugWatchpointType::None) {
|
||||
|
||||
@@ -78,7 +78,7 @@ public:
|
||||
* @param addr Start address of the cache range to clear
|
||||
* @param size Size of the cache range to clear, starting at addr
|
||||
*/
|
||||
virtual void InvalidateCacheRange(VAddr addr, std::size_t size) = 0;
|
||||
virtual void InvalidateCacheRange(u64 addr, std::size_t size) = 0;
|
||||
|
||||
/**
|
||||
* Notifies CPU emulation that the current page table has changed.
|
||||
@@ -149,9 +149,9 @@ public:
|
||||
*/
|
||||
virtual void SetPSTATE(u32 pstate) = 0;
|
||||
|
||||
virtual VAddr GetTlsAddress() const = 0;
|
||||
virtual u64 GetTlsAddress() const = 0;
|
||||
|
||||
virtual void SetTlsAddress(VAddr address) = 0;
|
||||
virtual void SetTlsAddress(u64 address) = 0;
|
||||
|
||||
/**
|
||||
* Gets the value within the TPIDR_EL0 (read/write software thread ID) register.
|
||||
@@ -214,7 +214,7 @@ protected:
|
||||
|
||||
static void SymbolicateBacktrace(Core::System& system, std::vector<BacktraceEntry>& out);
|
||||
const Kernel::DebugWatchpoint* MatchingWatchpoint(
|
||||
VAddr addr, u64 size, Kernel::DebugWatchpointType access_type) const;
|
||||
u64 addr, u64 size, Kernel::DebugWatchpointType access_type) const;
|
||||
|
||||
virtual Dynarmic::HaltReason RunJit() = 0;
|
||||
virtual Dynarmic::HaltReason StepJit() = 0;
|
||||
|
||||
@@ -5,7 +5,6 @@
|
||||
#include <memory>
|
||||
#include <dynarmic/interface/A32/a32.h>
|
||||
#include <dynarmic/interface/A32/config.h>
|
||||
#include <dynarmic/interface/A32/context.h>
|
||||
#include "common/assert.h"
|
||||
#include "common/literals.h"
|
||||
#include "common/logging/log.h"
|
||||
@@ -28,8 +27,8 @@ using namespace Common::Literals;
|
||||
class DynarmicCallbacks32 : public Dynarmic::A32::UserCallbacks {
|
||||
public:
|
||||
explicit DynarmicCallbacks32(ARM_Dynarmic_32& parent_)
|
||||
: parent{parent_},
|
||||
memory(parent.system.Memory()), debugger_enabled{parent.system.DebuggerEnabled()},
|
||||
: parent{parent_}, memory(parent.system.ApplicationMemory()),
|
||||
debugger_enabled{parent.system.DebuggerEnabled()},
|
||||
check_memory_access{debugger_enabled ||
|
||||
!Settings::values.cpuopt_ignore_memory_aborts.GetValue()} {}
|
||||
|
||||
@@ -155,7 +154,7 @@ public:
|
||||
return std::max<s64>(parent.system.CoreTiming().GetDowncount(), 0);
|
||||
}
|
||||
|
||||
bool CheckMemoryAccess(VAddr addr, u64 size, Kernel::DebugWatchpointType type) {
|
||||
bool CheckMemoryAccess(u64 addr, u64 size, Kernel::DebugWatchpointType type) {
|
||||
if (!check_memory_access) {
|
||||
return true;
|
||||
}
|
||||
@@ -397,7 +396,7 @@ u64 ARM_Dynarmic_32::GetTlsAddress() const {
|
||||
return cp15->uro;
|
||||
}
|
||||
|
||||
void ARM_Dynarmic_32::SetTlsAddress(VAddr address) {
|
||||
void ARM_Dynarmic_32::SetTlsAddress(u64 address) {
|
||||
cp15->uro = static_cast<u32>(address);
|
||||
}
|
||||
|
||||
@@ -410,21 +409,19 @@ void ARM_Dynarmic_32::SetTPIDR_EL0(u64 value) {
|
||||
}
|
||||
|
||||
void ARM_Dynarmic_32::SaveContext(ThreadContext32& ctx) {
|
||||
Dynarmic::A32::Context context;
|
||||
jit.load()->SaveContext(context);
|
||||
ctx.cpu_registers = context.Regs();
|
||||
ctx.extension_registers = context.ExtRegs();
|
||||
ctx.cpsr = context.Cpsr();
|
||||
ctx.fpscr = context.Fpscr();
|
||||
Dynarmic::A32::Jit* j = jit.load();
|
||||
ctx.cpu_registers = j->Regs();
|
||||
ctx.extension_registers = j->ExtRegs();
|
||||
ctx.cpsr = j->Cpsr();
|
||||
ctx.fpscr = j->Fpscr();
|
||||
}
|
||||
|
||||
void ARM_Dynarmic_32::LoadContext(const ThreadContext32& ctx) {
|
||||
Dynarmic::A32::Context context;
|
||||
context.Regs() = ctx.cpu_registers;
|
||||
context.ExtRegs() = ctx.extension_registers;
|
||||
context.SetCpsr(ctx.cpsr);
|
||||
context.SetFpscr(ctx.fpscr);
|
||||
jit.load()->LoadContext(context);
|
||||
Dynarmic::A32::Jit* j = jit.load();
|
||||
j->Regs() = ctx.cpu_registers;
|
||||
j->ExtRegs() = ctx.extension_registers;
|
||||
j->SetCpsr(ctx.cpsr);
|
||||
j->SetFpscr(ctx.fpscr);
|
||||
}
|
||||
|
||||
void ARM_Dynarmic_32::SignalInterrupt() {
|
||||
@@ -439,7 +436,7 @@ void ARM_Dynarmic_32::ClearInstructionCache() {
|
||||
jit.load()->ClearCache();
|
||||
}
|
||||
|
||||
void ARM_Dynarmic_32::InvalidateCacheRange(VAddr addr, std::size_t size) {
|
||||
void ARM_Dynarmic_32::InvalidateCacheRange(u64 addr, std::size_t size) {
|
||||
jit.load()->InvalidateCacheRange(static_cast<u32>(addr), size);
|
||||
}
|
||||
|
||||
@@ -468,7 +465,7 @@ void ARM_Dynarmic_32::PageTableChanged(Common::PageTable& page_table,
|
||||
std::vector<ARM_Interface::BacktraceEntry> ARM_Dynarmic_32::GetBacktrace(Core::System& system,
|
||||
u64 fp, u64 lr, u64 pc) {
|
||||
std::vector<BacktraceEntry> out;
|
||||
auto& memory = system.Memory();
|
||||
auto& memory = system.ApplicationMemory();
|
||||
|
||||
out.push_back({"", 0, pc, 0, ""});
|
||||
|
||||
|
||||
@@ -41,8 +41,8 @@ public:
|
||||
void SetVectorReg(int index, u128 value) override;
|
||||
u32 GetPSTATE() const override;
|
||||
void SetPSTATE(u32 pstate) override;
|
||||
VAddr GetTlsAddress() const override;
|
||||
void SetTlsAddress(VAddr address) override;
|
||||
u64 GetTlsAddress() const override;
|
||||
void SetTlsAddress(u64 address) override;
|
||||
void SetTPIDR_EL0(u64 value) override;
|
||||
u64 GetTPIDR_EL0() const override;
|
||||
|
||||
@@ -60,7 +60,7 @@ public:
|
||||
void ClearExclusiveState() override;
|
||||
|
||||
void ClearInstructionCache() override;
|
||||
void InvalidateCacheRange(VAddr addr, std::size_t size) override;
|
||||
void InvalidateCacheRange(u64 addr, std::size_t size) override;
|
||||
void PageTableChanged(Common::PageTable& new_page_table,
|
||||
std::size_t new_address_space_size_in_bits) override;
|
||||
|
||||
|
||||
@@ -28,8 +28,8 @@ using namespace Common::Literals;
|
||||
class DynarmicCallbacks64 : public Dynarmic::A64::UserCallbacks {
|
||||
public:
|
||||
explicit DynarmicCallbacks64(ARM_Dynarmic_64& parent_)
|
||||
: parent{parent_},
|
||||
memory(parent.system.Memory()), debugger_enabled{parent.system.DebuggerEnabled()},
|
||||
: parent{parent_}, memory(parent.system.ApplicationMemory()),
|
||||
debugger_enabled{parent.system.DebuggerEnabled()},
|
||||
check_memory_access{debugger_enabled ||
|
||||
!Settings::values.cpuopt_ignore_memory_aborts.GetValue()} {}
|
||||
|
||||
@@ -117,7 +117,7 @@ public:
|
||||
}
|
||||
|
||||
void InstructionCacheOperationRaised(Dynarmic::A64::InstructionCacheOperation op,
|
||||
VAddr value) override {
|
||||
u64 value) override {
|
||||
switch (op) {
|
||||
case Dynarmic::A64::InstructionCacheOperation::InvalidateByVAToPoU: {
|
||||
static constexpr u64 ICACHE_LINE_SIZE = 64;
|
||||
@@ -199,7 +199,7 @@ public:
|
||||
return parent.system.CoreTiming().GetClockTicks();
|
||||
}
|
||||
|
||||
bool CheckMemoryAccess(VAddr addr, u64 size, Kernel::DebugWatchpointType type) {
|
||||
bool CheckMemoryAccess(u64 addr, u64 size, Kernel::DebugWatchpointType type) {
|
||||
if (!check_memory_access) {
|
||||
return true;
|
||||
}
|
||||
@@ -452,7 +452,7 @@ u64 ARM_Dynarmic_64::GetTlsAddress() const {
|
||||
return cb->tpidrro_el0;
|
||||
}
|
||||
|
||||
void ARM_Dynarmic_64::SetTlsAddress(VAddr address) {
|
||||
void ARM_Dynarmic_64::SetTlsAddress(u64 address) {
|
||||
cb->tpidrro_el0 = address;
|
||||
}
|
||||
|
||||
@@ -500,7 +500,7 @@ void ARM_Dynarmic_64::ClearInstructionCache() {
|
||||
jit.load()->ClearCache();
|
||||
}
|
||||
|
||||
void ARM_Dynarmic_64::InvalidateCacheRange(VAddr addr, std::size_t size) {
|
||||
void ARM_Dynarmic_64::InvalidateCacheRange(u64 addr, std::size_t size) {
|
||||
jit.load()->InvalidateCacheRange(addr, size);
|
||||
}
|
||||
|
||||
@@ -529,7 +529,7 @@ void ARM_Dynarmic_64::PageTableChanged(Common::PageTable& page_table,
|
||||
std::vector<ARM_Interface::BacktraceEntry> ARM_Dynarmic_64::GetBacktrace(Core::System& system,
|
||||
u64 fp, u64 lr, u64 pc) {
|
||||
std::vector<BacktraceEntry> out;
|
||||
auto& memory = system.Memory();
|
||||
auto& memory = system.ApplicationMemory();
|
||||
|
||||
out.push_back({"", 0, pc, 0, ""});
|
||||
|
||||
|
||||
@@ -38,8 +38,8 @@ public:
|
||||
void SetVectorReg(int index, u128 value) override;
|
||||
u32 GetPSTATE() const override;
|
||||
void SetPSTATE(u32 pstate) override;
|
||||
VAddr GetTlsAddress() const override;
|
||||
void SetTlsAddress(VAddr address) override;
|
||||
u64 GetTlsAddress() const override;
|
||||
void SetTlsAddress(u64 address) override;
|
||||
void SetTPIDR_EL0(u64 value) override;
|
||||
u64 GetTPIDR_EL0() const override;
|
||||
|
||||
@@ -53,7 +53,7 @@ public:
|
||||
void ClearExclusiveState() override;
|
||||
|
||||
void ClearInstructionCache() override;
|
||||
void InvalidateCacheRange(VAddr addr, std::size_t size) override;
|
||||
void InvalidateCacheRange(u64 addr, std::size_t size) override;
|
||||
void PageTableChanged(Common::PageTable& new_page_table,
|
||||
std::size_t new_address_space_size_in_bits) override;
|
||||
|
||||
|
||||
@@ -293,6 +293,7 @@ struct System::Impl {
|
||||
ASSERT(Kernel::KProcess::Initialize(main_process, system, "main",
|
||||
Kernel::KProcess::ProcessType::Userland, resource_limit)
|
||||
.IsSuccess());
|
||||
kernel.MakeApplicationProcess(main_process);
|
||||
const auto [load_result, load_parameters] = app_loader->Load(*main_process, system);
|
||||
if (load_result != Loader::ResultStatus::Success) {
|
||||
LOG_CRITICAL(Core, "Failed to load ROM (Error {})!", load_result);
|
||||
@@ -302,7 +303,6 @@ struct System::Impl {
|
||||
static_cast<u32>(SystemResultStatus::ErrorLoader) + static_cast<u32>(load_result));
|
||||
}
|
||||
AddGlueRegistrationForProcess(*app_loader, *main_process);
|
||||
kernel.MakeApplicationProcess(main_process);
|
||||
kernel.InitializeCores();
|
||||
|
||||
// Initialize cheat engine
|
||||
@@ -434,7 +434,7 @@ struct System::Impl {
|
||||
}
|
||||
|
||||
Service::Glue::ApplicationLaunchProperty launch{};
|
||||
launch.title_id = process.GetProgramID();
|
||||
launch.title_id = process.GetProgramId();
|
||||
|
||||
FileSys::PatchManager pm{launch.title_id, fs_controller, *content_provider};
|
||||
launch.version = pm.GetGameVersion().value_or(0);
|
||||
@@ -564,7 +564,7 @@ void System::InvalidateCpuInstructionCaches() {
|
||||
impl->kernel.InvalidateAllInstructionCaches();
|
||||
}
|
||||
|
||||
void System::InvalidateCpuInstructionCacheRange(VAddr addr, std::size_t size) {
|
||||
void System::InvalidateCpuInstructionCacheRange(u64 addr, std::size_t size) {
|
||||
impl->kernel.InvalidateCpuInstructionCacheRange(addr, size);
|
||||
}
|
||||
|
||||
@@ -681,11 +681,11 @@ const ExclusiveMonitor& System::Monitor() const {
|
||||
return impl->kernel.GetExclusiveMonitor();
|
||||
}
|
||||
|
||||
Memory::Memory& System::Memory() {
|
||||
Memory::Memory& System::ApplicationMemory() {
|
||||
return impl->memory;
|
||||
}
|
||||
|
||||
const Core::Memory::Memory& System::Memory() const {
|
||||
const Core::Memory::Memory& System::ApplicationMemory() const {
|
||||
return impl->memory;
|
||||
}
|
||||
|
||||
@@ -762,7 +762,7 @@ const Core::SpeedLimiter& System::SpeedLimiter() const {
|
||||
}
|
||||
|
||||
u64 System::GetApplicationProcessProgramID() const {
|
||||
return impl->kernel.ApplicationProcess()->GetProgramID();
|
||||
return impl->kernel.ApplicationProcess()->GetProgramId();
|
||||
}
|
||||
|
||||
Loader::ResultStatus System::GetGameName(std::string& out) const {
|
||||
@@ -794,7 +794,7 @@ FileSys::VirtualFilesystem System::GetFilesystem() const {
|
||||
}
|
||||
|
||||
void System::RegisterCheatList(const std::vector<Memory::CheatEntry>& list,
|
||||
const std::array<u8, 32>& build_id, VAddr main_region_begin,
|
||||
const std::array<u8, 32>& build_id, u64 main_region_begin,
|
||||
u64 main_region_size) {
|
||||
impl->cheat_engine = std::make_unique<Memory::CheatEngine>(*this, list, build_id);
|
||||
impl->cheat_engine->SetMainMemoryParameters(main_region_begin, main_region_size);
|
||||
|
||||
@@ -172,7 +172,7 @@ public:
|
||||
*/
|
||||
void InvalidateCpuInstructionCaches();
|
||||
|
||||
void InvalidateCpuInstructionCacheRange(VAddr addr, std::size_t size);
|
||||
void InvalidateCpuInstructionCacheRange(u64 addr, std::size_t size);
|
||||
|
||||
/// Shutdown the main emulated process.
|
||||
void ShutdownMainProcess();
|
||||
@@ -256,10 +256,10 @@ public:
|
||||
[[nodiscard]] const ExclusiveMonitor& Monitor() const;
|
||||
|
||||
/// Gets a mutable reference to the system memory instance.
|
||||
[[nodiscard]] Core::Memory::Memory& Memory();
|
||||
[[nodiscard]] Core::Memory::Memory& ApplicationMemory();
|
||||
|
||||
/// Gets a constant reference to the system memory instance.
|
||||
[[nodiscard]] const Core::Memory::Memory& Memory() const;
|
||||
[[nodiscard]] const Core::Memory::Memory& ApplicationMemory() const;
|
||||
|
||||
/// Gets a mutable reference to the GPU interface
|
||||
[[nodiscard]] Tegra::GPU& GPU();
|
||||
@@ -353,7 +353,7 @@ public:
|
||||
[[nodiscard]] FileSys::VirtualFilesystem GetFilesystem() const;
|
||||
|
||||
void RegisterCheatList(const std::vector<Memory::CheatEntry>& list,
|
||||
const std::array<u8, 0x20>& build_id, VAddr main_region_begin,
|
||||
const std::array<u8, 0x20>& build_id, u64 main_region_begin,
|
||||
u64 main_region_size);
|
||||
|
||||
void SetAppletFrontendSet(Service::AM::Applets::AppletFrontendSet&& set);
|
||||
|
||||
@@ -10,6 +10,10 @@
|
||||
#include "common/windows/timer_resolution.h"
|
||||
#endif
|
||||
|
||||
#ifdef ARCHITECTURE_x86_64
|
||||
#include "common/x64/cpu_wait.h"
|
||||
#endif
|
||||
|
||||
#include "common/microprofile.h"
|
||||
#include "core/core_timing.h"
|
||||
#include "core/core_timing_util.h"
|
||||
@@ -269,7 +273,11 @@ void CoreTiming::ThreadLoop() {
|
||||
if (wait_time >= timer_resolution_ns) {
|
||||
Common::Windows::SleepForOneTick();
|
||||
} else {
|
||||
#ifdef ARCHITECTURE_x86_64
|
||||
Common::X64::MicroSleep();
|
||||
#else
|
||||
std::this_thread::yield();
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -118,14 +118,14 @@ void GDBStub::Watchpoint(Kernel::KThread* thread, const Kernel::DebugWatchpoint&
|
||||
|
||||
switch (watch.type) {
|
||||
case Kernel::DebugWatchpointType::Read:
|
||||
SendReply(fmt::format("{}rwatch:{:x};", status, watch.start_address));
|
||||
SendReply(fmt::format("{}rwatch:{:x};", status, GetInteger(watch.start_address)));
|
||||
break;
|
||||
case Kernel::DebugWatchpointType::Write:
|
||||
SendReply(fmt::format("{}watch:{:x};", status, watch.start_address));
|
||||
SendReply(fmt::format("{}watch:{:x};", status, GetInteger(watch.start_address)));
|
||||
break;
|
||||
case Kernel::DebugWatchpointType::ReadOrWrite:
|
||||
default:
|
||||
SendReply(fmt::format("{}awatch:{:x};", status, watch.start_address));
|
||||
SendReply(fmt::format("{}awatch:{:x};", status, GetInteger(watch.start_address)));
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -261,9 +261,9 @@ void GDBStub::ExecuteCommand(std::string_view packet, std::vector<DebuggerAction
|
||||
const size_t addr{static_cast<size_t>(strtoll(command.data(), nullptr, 16))};
|
||||
const size_t size{static_cast<size_t>(strtoll(command.data() + sep, nullptr, 16))};
|
||||
|
||||
if (system.Memory().IsValidVirtualAddressRange(addr, size)) {
|
||||
if (system.ApplicationMemory().IsValidVirtualAddressRange(addr, size)) {
|
||||
std::vector<u8> mem(size);
|
||||
system.Memory().ReadBlock(addr, mem.data(), size);
|
||||
system.ApplicationMemory().ReadBlock(addr, mem.data(), size);
|
||||
|
||||
SendReply(Common::HexToString(mem));
|
||||
} else {
|
||||
@@ -281,8 +281,8 @@ void GDBStub::ExecuteCommand(std::string_view packet, std::vector<DebuggerAction
|
||||
const auto mem_substr{std::string_view(command).substr(mem_sep)};
|
||||
const auto mem{Common::HexStringToVector(mem_substr, false)};
|
||||
|
||||
if (system.Memory().IsValidVirtualAddressRange(addr, size)) {
|
||||
system.Memory().WriteBlock(addr, mem.data(), size);
|
||||
if (system.ApplicationMemory().IsValidVirtualAddressRange(addr, size)) {
|
||||
system.ApplicationMemory().WriteBlock(addr, mem.data(), size);
|
||||
system.InvalidateCpuInstructionCacheRange(addr, size);
|
||||
SendReply(GDB_STUB_REPLY_OK);
|
||||
} else {
|
||||
@@ -325,7 +325,7 @@ void GDBStub::HandleBreakpointInsert(std::string_view command) {
|
||||
const size_t addr{static_cast<size_t>(strtoll(command.data() + addr_sep, nullptr, 16))};
|
||||
const size_t size{static_cast<size_t>(strtoll(command.data() + size_sep, nullptr, 16))};
|
||||
|
||||
if (!system.Memory().IsValidVirtualAddressRange(addr, size)) {
|
||||
if (!system.ApplicationMemory().IsValidVirtualAddressRange(addr, size)) {
|
||||
SendReply(GDB_STUB_REPLY_ERR);
|
||||
return;
|
||||
}
|
||||
@@ -334,22 +334,22 @@ void GDBStub::HandleBreakpointInsert(std::string_view command) {
|
||||
|
||||
switch (type) {
|
||||
case BreakpointType::Software:
|
||||
replaced_instructions[addr] = system.Memory().Read32(addr);
|
||||
system.Memory().Write32(addr, arch->BreakpointInstruction());
|
||||
replaced_instructions[addr] = system.ApplicationMemory().Read32(addr);
|
||||
system.ApplicationMemory().Write32(addr, arch->BreakpointInstruction());
|
||||
system.InvalidateCpuInstructionCacheRange(addr, sizeof(u32));
|
||||
success = true;
|
||||
break;
|
||||
case BreakpointType::WriteWatch:
|
||||
success = system.ApplicationProcess()->InsertWatchpoint(system, addr, size,
|
||||
success = system.ApplicationProcess()->InsertWatchpoint(addr, size,
|
||||
Kernel::DebugWatchpointType::Write);
|
||||
break;
|
||||
case BreakpointType::ReadWatch:
|
||||
success = system.ApplicationProcess()->InsertWatchpoint(system, addr, size,
|
||||
success = system.ApplicationProcess()->InsertWatchpoint(addr, size,
|
||||
Kernel::DebugWatchpointType::Read);
|
||||
break;
|
||||
case BreakpointType::AccessWatch:
|
||||
success = system.ApplicationProcess()->InsertWatchpoint(
|
||||
system, addr, size, Kernel::DebugWatchpointType::ReadOrWrite);
|
||||
addr, size, Kernel::DebugWatchpointType::ReadOrWrite);
|
||||
break;
|
||||
case BreakpointType::Hardware:
|
||||
default:
|
||||
@@ -372,7 +372,7 @@ void GDBStub::HandleBreakpointRemove(std::string_view command) {
|
||||
const size_t addr{static_cast<size_t>(strtoll(command.data() + addr_sep, nullptr, 16))};
|
||||
const size_t size{static_cast<size_t>(strtoll(command.data() + size_sep, nullptr, 16))};
|
||||
|
||||
if (!system.Memory().IsValidVirtualAddressRange(addr, size)) {
|
||||
if (!system.ApplicationMemory().IsValidVirtualAddressRange(addr, size)) {
|
||||
SendReply(GDB_STUB_REPLY_ERR);
|
||||
return;
|
||||
}
|
||||
@@ -383,7 +383,7 @@ void GDBStub::HandleBreakpointRemove(std::string_view command) {
|
||||
case BreakpointType::Software: {
|
||||
const auto orig_insn{replaced_instructions.find(addr)};
|
||||
if (orig_insn != replaced_instructions.end()) {
|
||||
system.Memory().Write32(addr, orig_insn->second);
|
||||
system.ApplicationMemory().Write32(addr, orig_insn->second);
|
||||
system.InvalidateCpuInstructionCacheRange(addr, sizeof(u32));
|
||||
replaced_instructions.erase(addr);
|
||||
success = true;
|
||||
@@ -391,16 +391,16 @@ void GDBStub::HandleBreakpointRemove(std::string_view command) {
|
||||
break;
|
||||
}
|
||||
case BreakpointType::WriteWatch:
|
||||
success = system.ApplicationProcess()->RemoveWatchpoint(system, addr, size,
|
||||
success = system.ApplicationProcess()->RemoveWatchpoint(addr, size,
|
||||
Kernel::DebugWatchpointType::Write);
|
||||
break;
|
||||
case BreakpointType::ReadWatch:
|
||||
success = system.ApplicationProcess()->RemoveWatchpoint(system, addr, size,
|
||||
success = system.ApplicationProcess()->RemoveWatchpoint(addr, size,
|
||||
Kernel::DebugWatchpointType::Read);
|
||||
break;
|
||||
case BreakpointType::AccessWatch:
|
||||
success = system.ApplicationProcess()->RemoveWatchpoint(
|
||||
system, addr, size, Kernel::DebugWatchpointType::ReadOrWrite);
|
||||
addr, size, Kernel::DebugWatchpointType::ReadOrWrite);
|
||||
break;
|
||||
case BreakpointType::Hardware:
|
||||
default:
|
||||
@@ -421,7 +421,7 @@ void GDBStub::HandleBreakpointRemove(std::string_view command) {
|
||||
static std::optional<std::string> GetNameFromThreadType32(Core::Memory::Memory& memory,
|
||||
const Kernel::KThread* thread) {
|
||||
// Read thread type from TLS
|
||||
const VAddr tls_thread_type{memory.Read32(thread->GetTLSAddress() + 0x1fc)};
|
||||
const VAddr tls_thread_type{memory.Read32(thread->GetTlsAddress() + 0x1fc)};
|
||||
const VAddr argument_thread_type{thread->GetArgument()};
|
||||
|
||||
if (argument_thread_type && tls_thread_type != argument_thread_type) {
|
||||
@@ -452,7 +452,7 @@ static std::optional<std::string> GetNameFromThreadType32(Core::Memory::Memory&
|
||||
static std::optional<std::string> GetNameFromThreadType64(Core::Memory::Memory& memory,
|
||||
const Kernel::KThread* thread) {
|
||||
// Read thread type from TLS
|
||||
const VAddr tls_thread_type{memory.Read64(thread->GetTLSAddress() + 0x1f8)};
|
||||
const VAddr tls_thread_type{memory.Read64(thread->GetTlsAddress() + 0x1f8)};
|
||||
const VAddr argument_thread_type{thread->GetArgument()};
|
||||
|
||||
if (argument_thread_type && tls_thread_type != argument_thread_type) {
|
||||
@@ -483,9 +483,9 @@ static std::optional<std::string> GetNameFromThreadType64(Core::Memory::Memory&
|
||||
static std::optional<std::string> GetThreadName(Core::System& system,
|
||||
const Kernel::KThread* thread) {
|
||||
if (system.ApplicationProcess()->Is64BitProcess()) {
|
||||
return GetNameFromThreadType64(system.Memory(), thread);
|
||||
return GetNameFromThreadType64(system.ApplicationMemory(), thread);
|
||||
} else {
|
||||
return GetNameFromThreadType32(system.Memory(), thread);
|
||||
return GetNameFromThreadType32(system.ApplicationMemory(), thread);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -554,8 +554,9 @@ void GDBStub::HandleQuery(std::string_view command) {
|
||||
if (main != modules.end()) {
|
||||
SendReply(fmt::format("TextSeg={:x}", main->first));
|
||||
} else {
|
||||
SendReply(fmt::format("TextSeg={:x}",
|
||||
system.ApplicationProcess()->PageTable().GetCodeRegionStart()));
|
||||
SendReply(fmt::format(
|
||||
"TextSeg={:x}",
|
||||
GetInteger(system.ApplicationProcess()->PageTable().GetCodeRegionStart())));
|
||||
}
|
||||
} else if (command.starts_with("Xfer:libraries:read::")) {
|
||||
Loader::AppLoader::Modules modules;
|
||||
@@ -576,7 +577,7 @@ void GDBStub::HandleQuery(std::string_view command) {
|
||||
const auto& threads = system.ApplicationProcess()->GetThreadList();
|
||||
std::vector<std::string> thread_ids;
|
||||
for (const auto& thread : threads) {
|
||||
thread_ids.push_back(fmt::format("{:x}", thread->GetThreadID()));
|
||||
thread_ids.push_back(fmt::format("{:x}", thread->GetThreadId()));
|
||||
}
|
||||
SendReply(fmt::format("m{}", fmt::join(thread_ids, ",")));
|
||||
} else if (command.starts_with("sThreadInfo")) {
|
||||
@@ -591,11 +592,11 @@ void GDBStub::HandleQuery(std::string_view command) {
|
||||
for (const auto* thread : threads) {
|
||||
auto thread_name{GetThreadName(system, thread)};
|
||||
if (!thread_name) {
|
||||
thread_name = fmt::format("Thread {:d}", thread->GetThreadID());
|
||||
thread_name = fmt::format("Thread {:d}", thread->GetThreadId());
|
||||
}
|
||||
|
||||
buffer += fmt::format(R"(<thread id="{:x}" core="{:d}" name="{}">{}</thread>)",
|
||||
thread->GetThreadID(), thread->GetActiveCore(),
|
||||
thread->GetThreadId(), thread->GetActiveCore(),
|
||||
EscapeXML(*thread_name), GetThreadState(thread));
|
||||
}
|
||||
|
||||
@@ -756,18 +757,21 @@ void GDBStub::HandleRcmd(const std::vector<u8>& command) {
|
||||
|
||||
reply = fmt::format("Process: {:#x} ({})\n"
|
||||
"Program Id: {:#018x}\n",
|
||||
process->GetProcessID(), process->GetName(), process->GetProgramID());
|
||||
reply +=
|
||||
fmt::format("Layout:\n"
|
||||
" Alias: {:#012x} - {:#012x}\n"
|
||||
" Heap: {:#012x} - {:#012x}\n"
|
||||
" Aslr: {:#012x} - {:#012x}\n"
|
||||
" Stack: {:#012x} - {:#012x}\n"
|
||||
"Modules:\n",
|
||||
page_table.GetAliasRegionStart(), page_table.GetAliasRegionEnd(),
|
||||
page_table.GetHeapRegionStart(), page_table.GetHeapRegionEnd(),
|
||||
page_table.GetAliasCodeRegionStart(), page_table.GetAliasCodeRegionEnd(),
|
||||
page_table.GetStackRegionStart(), page_table.GetStackRegionEnd());
|
||||
process->GetProcessId(), process->GetName(), process->GetProgramId());
|
||||
reply += fmt::format("Layout:\n"
|
||||
" Alias: {:#012x} - {:#012x}\n"
|
||||
" Heap: {:#012x} - {:#012x}\n"
|
||||
" Aslr: {:#012x} - {:#012x}\n"
|
||||
" Stack: {:#012x} - {:#012x}\n"
|
||||
"Modules:\n",
|
||||
GetInteger(page_table.GetAliasRegionStart()),
|
||||
GetInteger(page_table.GetAliasRegionEnd()),
|
||||
GetInteger(page_table.GetHeapRegionStart()),
|
||||
GetInteger(page_table.GetHeapRegionEnd()),
|
||||
GetInteger(page_table.GetAliasCodeRegionStart()),
|
||||
GetInteger(page_table.GetAliasCodeRegionEnd()),
|
||||
GetInteger(page_table.GetStackRegionStart()),
|
||||
GetInteger(page_table.GetStackRegionEnd()));
|
||||
|
||||
for (const auto& [vaddr, name] : modules) {
|
||||
reply += fmt::format(" {:#012x} - {:#012x} {}\n", vaddr,
|
||||
@@ -819,7 +823,7 @@ void GDBStub::HandleRcmd(const std::vector<u8>& command) {
|
||||
Kernel::KThread* GDBStub::GetThreadByID(u64 thread_id) {
|
||||
const auto& threads{system.ApplicationProcess()->GetThreadList()};
|
||||
for (auto* thread : threads) {
|
||||
if (thread->GetThreadID() == thread_id) {
|
||||
if (thread->GetThreadId() == thread_id) {
|
||||
return thread;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -259,7 +259,7 @@ void GDBStubA64::WriteRegisters(Kernel::KThread* thread, std::string_view regist
|
||||
std::string GDBStubA64::ThreadStatus(const Kernel::KThread* thread, u8 signal) const {
|
||||
return fmt::format("T{:02x}{:02x}:{};{:02x}:{};{:02x}:{};thread:{:x};", signal, PC_REGISTER,
|
||||
RegRead(thread, PC_REGISTER), SP_REGISTER, RegRead(thread, SP_REGISTER),
|
||||
LR_REGISTER, RegRead(thread, LR_REGISTER), thread->GetThreadID());
|
||||
LR_REGISTER, RegRead(thread, LR_REGISTER), thread->GetThreadId());
|
||||
}
|
||||
|
||||
u32 GDBStubA64::BreakpointInstruction() const {
|
||||
@@ -469,7 +469,7 @@ void GDBStubA32::WriteRegisters(Kernel::KThread* thread, std::string_view regist
|
||||
std::string GDBStubA32::ThreadStatus(const Kernel::KThread* thread, u8 signal) const {
|
||||
return fmt::format("T{:02x}{:02x}:{};{:02x}:{};{:02x}:{};thread:{:x};", signal, PC_REGISTER,
|
||||
RegRead(thread, PC_REGISTER), SP_REGISTER, RegRead(thread, SP_REGISTER),
|
||||
LR_REGISTER, RegRead(thread, LR_REGISTER), thread->GetThreadID());
|
||||
LR_REGISTER, RegRead(thread, LR_REGISTER), thread->GetThreadId());
|
||||
}
|
||||
|
||||
u32 GDBStubA32::BreakpointInstruction() const {
|
||||
|
||||
@@ -3,8 +3,8 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "common/host_memory.h"
|
||||
#include "common/typed_address.h"
|
||||
|
||||
namespace Core {
|
||||
|
||||
@@ -25,20 +25,22 @@ public:
|
||||
DeviceMemory(const DeviceMemory&) = delete;
|
||||
|
||||
template <typename T>
|
||||
PAddr GetPhysicalAddr(const T* ptr) const {
|
||||
Common::PhysicalAddress GetPhysicalAddr(const T* ptr) const {
|
||||
return (reinterpret_cast<uintptr_t>(ptr) -
|
||||
reinterpret_cast<uintptr_t>(buffer.BackingBasePointer())) +
|
||||
DramMemoryMap::Base;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
T* GetPointer(PAddr addr) {
|
||||
return reinterpret_cast<T*>(buffer.BackingBasePointer() + (addr - DramMemoryMap::Base));
|
||||
T* GetPointer(Common::PhysicalAddress addr) {
|
||||
return reinterpret_cast<T*>(buffer.BackingBasePointer() +
|
||||
(GetInteger(addr) - DramMemoryMap::Base));
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
const T* GetPointer(PAddr addr) const {
|
||||
return reinterpret_cast<T*>(buffer.BackingBasePointer() + (addr - DramMemoryMap::Base));
|
||||
const T* GetPointer(Common::PhysicalAddress addr) const {
|
||||
return reinterpret_cast<T*>(buffer.BackingBasePointer() +
|
||||
(GetInteger(addr) - DramMemoryMap::Base));
|
||||
}
|
||||
|
||||
Common::HostMemory buffer;
|
||||
|
||||
14
src/core/frontend/applets/applet.h
Normal file
14
src/core/frontend/applets/applet.h
Normal file
@@ -0,0 +1,14 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#pragma once
|
||||
|
||||
namespace Core::Frontend {
|
||||
|
||||
class Applet {
|
||||
public:
|
||||
virtual ~Applet() = default;
|
||||
virtual void Close() const = 0;
|
||||
};
|
||||
|
||||
} // namespace Core::Frontend
|
||||
@@ -10,6 +10,8 @@ namespace Core::Frontend {
|
||||
|
||||
CabinetApplet::~CabinetApplet() = default;
|
||||
|
||||
void DefaultCabinetApplet::Close() const {}
|
||||
|
||||
void DefaultCabinetApplet::ShowCabinetApplet(
|
||||
const CabinetCallback& callback, const CabinetParameters& parameters,
|
||||
std::shared_ptr<Service::NFP::NfpDevice> nfp_device) const {
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
#pragma once
|
||||
|
||||
#include <functional>
|
||||
#include "core/frontend/applets/applet.h"
|
||||
#include "core/hle/service/nfp/nfp_types.h"
|
||||
|
||||
namespace Service::NFP {
|
||||
@@ -20,7 +21,7 @@ struct CabinetParameters {
|
||||
|
||||
using CabinetCallback = std::function<void(bool, const std::string&)>;
|
||||
|
||||
class CabinetApplet {
|
||||
class CabinetApplet : public Applet {
|
||||
public:
|
||||
virtual ~CabinetApplet();
|
||||
virtual void ShowCabinetApplet(const CabinetCallback& callback,
|
||||
@@ -30,6 +31,7 @@ public:
|
||||
|
||||
class DefaultCabinetApplet final : public CabinetApplet {
|
||||
public:
|
||||
void Close() const override;
|
||||
void ShowCabinetApplet(const CabinetCallback& callback, const CabinetParameters& parameters,
|
||||
std::shared_ptr<Service::NFP::NfpDevice> nfp_device) const override;
|
||||
};
|
||||
|
||||
@@ -16,6 +16,8 @@ DefaultControllerApplet::DefaultControllerApplet(HID::HIDCore& hid_core_) : hid_
|
||||
|
||||
DefaultControllerApplet::~DefaultControllerApplet() = default;
|
||||
|
||||
void DefaultControllerApplet::Close() const {}
|
||||
|
||||
void DefaultControllerApplet::ReconfigureControllers(ReconfigureCallback callback,
|
||||
const ControllerParameters& parameters) const {
|
||||
LOG_INFO(Service_HID, "called, deducing the best configuration based on the given parameters!");
|
||||
@@ -69,7 +71,7 @@ void DefaultControllerApplet::ReconfigureControllers(ReconfigureCallback callbac
|
||||
}
|
||||
}
|
||||
|
||||
callback();
|
||||
callback(true);
|
||||
}
|
||||
|
||||
} // namespace Core::Frontend
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
#include <vector>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "core/frontend/applets/applet.h"
|
||||
|
||||
namespace Core::HID {
|
||||
class HIDCore;
|
||||
@@ -34,9 +35,9 @@ struct ControllerParameters {
|
||||
bool allow_gamecube_controller{};
|
||||
};
|
||||
|
||||
class ControllerApplet {
|
||||
class ControllerApplet : public Applet {
|
||||
public:
|
||||
using ReconfigureCallback = std::function<void()>;
|
||||
using ReconfigureCallback = std::function<void(bool)>;
|
||||
|
||||
virtual ~ControllerApplet();
|
||||
|
||||
@@ -49,6 +50,7 @@ public:
|
||||
explicit DefaultControllerApplet(HID::HIDCore& hid_core_);
|
||||
~DefaultControllerApplet() override;
|
||||
|
||||
void Close() const override;
|
||||
void ReconfigureControllers(ReconfigureCallback callback,
|
||||
const ControllerParameters& parameters) const override;
|
||||
|
||||
|
||||
@@ -8,6 +8,8 @@ namespace Core::Frontend {
|
||||
|
||||
ErrorApplet::~ErrorApplet() = default;
|
||||
|
||||
void DefaultErrorApplet::Close() const {}
|
||||
|
||||
void DefaultErrorApplet::ShowError(Result error, FinishedCallback finished) const {
|
||||
LOG_CRITICAL(Service_Fatal, "Application requested error display: {:04}-{:04} (raw={:08X})",
|
||||
error.module.Value(), error.description.Value(), error.raw);
|
||||
|
||||
@@ -6,11 +6,12 @@
|
||||
#include <chrono>
|
||||
#include <functional>
|
||||
|
||||
#include "core/frontend/applets/applet.h"
|
||||
#include "core/hle/result.h"
|
||||
|
||||
namespace Core::Frontend {
|
||||
|
||||
class ErrorApplet {
|
||||
class ErrorApplet : public Applet {
|
||||
public:
|
||||
using FinishedCallback = std::function<void()>;
|
||||
|
||||
@@ -28,6 +29,7 @@ public:
|
||||
|
||||
class DefaultErrorApplet final : public ErrorApplet {
|
||||
public:
|
||||
void Close() const override;
|
||||
void ShowError(Result error, FinishedCallback finished) const override;
|
||||
void ShowErrorWithTimestamp(Result error, std::chrono::seconds time,
|
||||
FinishedCallback finished) const override;
|
||||
|
||||
@@ -10,6 +10,8 @@ ParentalControlsApplet::~ParentalControlsApplet() = default;
|
||||
|
||||
DefaultParentalControlsApplet::~DefaultParentalControlsApplet() = default;
|
||||
|
||||
void DefaultParentalControlsApplet::Close() const {}
|
||||
|
||||
void DefaultParentalControlsApplet::VerifyPIN(std::function<void(bool)> finished,
|
||||
bool suspend_future_verification_temporarily) {
|
||||
LOG_INFO(Service_AM,
|
||||
@@ -39,6 +41,8 @@ PhotoViewerApplet::~PhotoViewerApplet() = default;
|
||||
|
||||
DefaultPhotoViewerApplet::~DefaultPhotoViewerApplet() = default;
|
||||
|
||||
void DefaultPhotoViewerApplet::Close() const {}
|
||||
|
||||
void DefaultPhotoViewerApplet::ShowPhotosForApplication(u64 title_id,
|
||||
std::function<void()> finished) const {
|
||||
LOG_INFO(Service_AM,
|
||||
|
||||
@@ -6,9 +6,11 @@
|
||||
#include <functional>
|
||||
#include "common/common_types.h"
|
||||
|
||||
#include "core/frontend/applets/applet.h"
|
||||
|
||||
namespace Core::Frontend {
|
||||
|
||||
class ParentalControlsApplet {
|
||||
class ParentalControlsApplet : public Applet {
|
||||
public:
|
||||
virtual ~ParentalControlsApplet();
|
||||
|
||||
@@ -33,6 +35,7 @@ class DefaultParentalControlsApplet final : public ParentalControlsApplet {
|
||||
public:
|
||||
~DefaultParentalControlsApplet() override;
|
||||
|
||||
void Close() const override;
|
||||
void VerifyPIN(std::function<void(bool)> finished,
|
||||
bool suspend_future_verification_temporarily) override;
|
||||
void VerifyPINForSettings(std::function<void(bool)> finished) override;
|
||||
@@ -40,7 +43,7 @@ public:
|
||||
void ChangePIN(std::function<void()> finished) override;
|
||||
};
|
||||
|
||||
class PhotoViewerApplet {
|
||||
class PhotoViewerApplet : public Applet {
|
||||
public:
|
||||
virtual ~PhotoViewerApplet();
|
||||
|
||||
@@ -52,6 +55,7 @@ class DefaultPhotoViewerApplet final : public PhotoViewerApplet {
|
||||
public:
|
||||
~DefaultPhotoViewerApplet() override;
|
||||
|
||||
void Close() const override;
|
||||
void ShowPhotosForApplication(u64 title_id, std::function<void()> finished) const override;
|
||||
void ShowAllPhotos(std::function<void()> finished) const override;
|
||||
};
|
||||
|
||||
@@ -8,6 +8,8 @@ namespace Core::Frontend {
|
||||
|
||||
MiiEditApplet::~MiiEditApplet() = default;
|
||||
|
||||
void DefaultMiiEditApplet::Close() const {}
|
||||
|
||||
void DefaultMiiEditApplet::ShowMiiEdit(const MiiEditCallback& callback) const {
|
||||
LOG_WARNING(Service_AM, "(STUBBED) called");
|
||||
|
||||
|
||||
@@ -5,9 +5,11 @@
|
||||
|
||||
#include <functional>
|
||||
|
||||
#include "core/frontend/applets/applet.h"
|
||||
|
||||
namespace Core::Frontend {
|
||||
|
||||
class MiiEditApplet {
|
||||
class MiiEditApplet : public Applet {
|
||||
public:
|
||||
using MiiEditCallback = std::function<void()>;
|
||||
|
||||
@@ -18,6 +20,7 @@ public:
|
||||
|
||||
class DefaultMiiEditApplet final : public MiiEditApplet {
|
||||
public:
|
||||
void Close() const override;
|
||||
void ShowMiiEdit(const MiiEditCallback& callback) const override;
|
||||
};
|
||||
|
||||
|
||||
@@ -9,7 +9,10 @@ namespace Core::Frontend {
|
||||
|
||||
ProfileSelectApplet::~ProfileSelectApplet() = default;
|
||||
|
||||
void DefaultProfileSelectApplet::SelectProfile(SelectProfileCallback callback) const {
|
||||
void DefaultProfileSelectApplet::Close() const {}
|
||||
|
||||
void DefaultProfileSelectApplet::SelectProfile(SelectProfileCallback callback,
|
||||
const ProfileSelectParameters& parameters) const {
|
||||
Service::Account::ProfileManager manager;
|
||||
callback(manager.GetUser(Settings::values.current_user.GetValue()).value_or(Common::UUID{}));
|
||||
LOG_INFO(Service_ACC, "called, selecting current user instead of prompting...");
|
||||
|
||||
@@ -5,22 +5,35 @@
|
||||
|
||||
#include <functional>
|
||||
#include <optional>
|
||||
|
||||
#include "common/uuid.h"
|
||||
#include "core/frontend/applets/applet.h"
|
||||
#include "core/hle/service/am/applets/applet_profile_select.h"
|
||||
|
||||
namespace Core::Frontend {
|
||||
|
||||
class ProfileSelectApplet {
|
||||
struct ProfileSelectParameters {
|
||||
Service::AM::Applets::UiMode mode;
|
||||
std::array<Common::UUID, 8> invalid_uid_list;
|
||||
Service::AM::Applets::UiSettingsDisplayOptions display_options;
|
||||
Service::AM::Applets::UserSelectionPurpose purpose;
|
||||
};
|
||||
|
||||
class ProfileSelectApplet : public Applet {
|
||||
public:
|
||||
using SelectProfileCallback = std::function<void(std::optional<Common::UUID>)>;
|
||||
|
||||
virtual ~ProfileSelectApplet();
|
||||
|
||||
virtual void SelectProfile(SelectProfileCallback callback) const = 0;
|
||||
virtual void SelectProfile(SelectProfileCallback callback,
|
||||
const ProfileSelectParameters& parameters) const = 0;
|
||||
};
|
||||
|
||||
class DefaultProfileSelectApplet final : public ProfileSelectApplet {
|
||||
public:
|
||||
void SelectProfile(SelectProfileCallback callback) const override;
|
||||
void Close() const override;
|
||||
void SelectProfile(SelectProfileCallback callback,
|
||||
const ProfileSelectParameters& parameters) const override;
|
||||
};
|
||||
|
||||
} // namespace Core::Frontend
|
||||
|
||||
@@ -13,6 +13,8 @@ SoftwareKeyboardApplet::~SoftwareKeyboardApplet() = default;
|
||||
|
||||
DefaultSoftwareKeyboardApplet::~DefaultSoftwareKeyboardApplet() = default;
|
||||
|
||||
void DefaultSoftwareKeyboardApplet::Close() const {}
|
||||
|
||||
void DefaultSoftwareKeyboardApplet::InitializeKeyboard(
|
||||
bool is_inline, KeyboardInitializeParameters initialize_parameters,
|
||||
SubmitNormalCallback submit_normal_callback_, SubmitInlineCallback submit_inline_callback_) {
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
|
||||
#include "common/common_types.h"
|
||||
|
||||
#include "core/frontend/applets/applet.h"
|
||||
#include "core/hle/service/am/applets/applet_software_keyboard_types.h"
|
||||
|
||||
namespace Core::Frontend {
|
||||
@@ -52,7 +53,7 @@ struct InlineTextParameters {
|
||||
s32 cursor_position;
|
||||
};
|
||||
|
||||
class SoftwareKeyboardApplet {
|
||||
class SoftwareKeyboardApplet : public Applet {
|
||||
public:
|
||||
using SubmitInlineCallback =
|
||||
std::function<void(Service::AM::Applets::SwkbdReplyType, std::u16string, s32)>;
|
||||
@@ -84,6 +85,8 @@ class DefaultSoftwareKeyboardApplet final : public SoftwareKeyboardApplet {
|
||||
public:
|
||||
~DefaultSoftwareKeyboardApplet() override;
|
||||
|
||||
void Close() const override;
|
||||
|
||||
void InitializeKeyboard(bool is_inline, KeyboardInitializeParameters initialize_parameters,
|
||||
SubmitNormalCallback submit_normal_callback_,
|
||||
SubmitInlineCallback submit_inline_callback_) override;
|
||||
|
||||
@@ -10,6 +10,8 @@ WebBrowserApplet::~WebBrowserApplet() = default;
|
||||
|
||||
DefaultWebBrowserApplet::~DefaultWebBrowserApplet() = default;
|
||||
|
||||
void DefaultWebBrowserApplet::Close() const {}
|
||||
|
||||
void DefaultWebBrowserApplet::OpenLocalWebPage(const std::string& local_url,
|
||||
ExtractROMFSCallback extract_romfs_callback,
|
||||
OpenWebPageCallback callback) const {
|
||||
|
||||
@@ -5,11 +5,12 @@
|
||||
|
||||
#include <functional>
|
||||
|
||||
#include "core/frontend/applets/applet.h"
|
||||
#include "core/hle/service/am/applets/applet_web_browser_types.h"
|
||||
|
||||
namespace Core::Frontend {
|
||||
|
||||
class WebBrowserApplet {
|
||||
class WebBrowserApplet : public Applet {
|
||||
public:
|
||||
using ExtractROMFSCallback = std::function<void()>;
|
||||
using OpenWebPageCallback =
|
||||
@@ -29,6 +30,8 @@ class DefaultWebBrowserApplet final : public WebBrowserApplet {
|
||||
public:
|
||||
~DefaultWebBrowserApplet() override;
|
||||
|
||||
void Close() const override;
|
||||
|
||||
void OpenLocalWebPage(const std::string& local_url, ExtractROMFSCallback extract_romfs_callback,
|
||||
OpenWebPageCallback callback) const override;
|
||||
|
||||
|
||||
@@ -76,22 +76,24 @@ void SetupDevicePhysicalMemoryRegions(KMemoryLayout& memory_layout) {
|
||||
|
||||
void SetupDramPhysicalMemoryRegions(KMemoryLayout& memory_layout) {
|
||||
const size_t intended_memory_size = KSystemControl::Init::GetIntendedMemorySize();
|
||||
const PAddr physical_memory_base_address =
|
||||
const KPhysicalAddress physical_memory_base_address =
|
||||
KSystemControl::Init::GetKernelPhysicalBaseAddress(DramPhysicalAddress);
|
||||
|
||||
// Insert blocks into the tree.
|
||||
ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert(
|
||||
physical_memory_base_address, intended_memory_size, KMemoryRegionType_Dram));
|
||||
GetInteger(physical_memory_base_address), intended_memory_size, KMemoryRegionType_Dram));
|
||||
ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert(
|
||||
physical_memory_base_address, ReservedEarlyDramSize, KMemoryRegionType_DramReservedEarly));
|
||||
GetInteger(physical_memory_base_address), ReservedEarlyDramSize,
|
||||
KMemoryRegionType_DramReservedEarly));
|
||||
|
||||
// Insert the KTrace block at the end of Dram, if KTrace is enabled.
|
||||
static_assert(!IsKTraceEnabled || KTraceBufferSize > 0);
|
||||
if constexpr (IsKTraceEnabled) {
|
||||
const PAddr ktrace_buffer_phys_addr =
|
||||
const KPhysicalAddress ktrace_buffer_phys_addr =
|
||||
physical_memory_base_address + intended_memory_size - KTraceBufferSize;
|
||||
ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert(
|
||||
ktrace_buffer_phys_addr, KTraceBufferSize, KMemoryRegionType_KernelTraceBuffer));
|
||||
GetInteger(ktrace_buffer_phys_addr), KTraceBufferSize,
|
||||
KMemoryRegionType_KernelTraceBuffer));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,10 +3,10 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/kernel/k_typed_address.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
constexpr inline PAddr MainMemoryAddress = 0x80000000;
|
||||
constexpr inline KPhysicalAddress MainMemoryAddress = 0x80000000;
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -61,7 +61,7 @@ size_t KSystemControl::Init::GetIntendedMemorySize() {
|
||||
}
|
||||
}
|
||||
|
||||
PAddr KSystemControl::Init::GetKernelPhysicalBaseAddress(u64 base_address) {
|
||||
KPhysicalAddress KSystemControl::Init::GetKernelPhysicalBaseAddress(KPhysicalAddress base_address) {
|
||||
const size_t real_dram_size = KSystemControl::Init::GetRealMemorySize();
|
||||
const size_t intended_dram_size = KSystemControl::Init::GetIntendedMemorySize();
|
||||
if (intended_dram_size * 2 < real_dram_size) {
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/kernel/k_typed_address.h"
|
||||
|
||||
namespace Kernel::Board::Nintendo::Nx {
|
||||
|
||||
@@ -18,7 +18,7 @@ public:
|
||||
// Initialization.
|
||||
static std::size_t GetRealMemorySize();
|
||||
static std::size_t GetIntendedMemorySize();
|
||||
static PAddr GetKernelPhysicalBaseAddress(u64 base_address);
|
||||
static KPhysicalAddress GetKernelPhysicalBaseAddress(KPhysicalAddress base_address);
|
||||
static bool ShouldIncreaseThreadResourceLimit();
|
||||
static std::size_t GetApplicationPoolSize();
|
||||
static std::size_t GetAppletPoolSize();
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
|
||||
#include <cstddef>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/kernel/k_typed_address.h"
|
||||
#include "core/hle/kernel/physical_memory.h"
|
||||
|
||||
namespace Kernel {
|
||||
@@ -36,7 +36,7 @@ struct CodeSet final {
|
||||
std::size_t offset = 0;
|
||||
|
||||
/// The address to map this segment to.
|
||||
VAddr addr = 0;
|
||||
KProcessAddress addr = 0;
|
||||
|
||||
/// The size of this segment in bytes.
|
||||
u32 size = 0;
|
||||
@@ -82,7 +82,7 @@ struct CodeSet final {
|
||||
std::array<Segment, 3> segments;
|
||||
|
||||
/// The entry point address for this code set.
|
||||
VAddr entrypoint = 0;
|
||||
KProcessAddress entrypoint = 0;
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -12,20 +12,19 @@
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
GlobalSchedulerContext::GlobalSchedulerContext(KernelCore& kernel_)
|
||||
: kernel{kernel_}, scheduler_lock{kernel_} {}
|
||||
GlobalSchedulerContext::GlobalSchedulerContext(KernelCore& kernel)
|
||||
: m_kernel{kernel}, m_scheduler_lock{kernel} {}
|
||||
|
||||
GlobalSchedulerContext::~GlobalSchedulerContext() = default;
|
||||
|
||||
void GlobalSchedulerContext::AddThread(KThread* thread) {
|
||||
std::scoped_lock lock{global_list_guard};
|
||||
thread_list.push_back(thread);
|
||||
std::scoped_lock lock{m_global_list_guard};
|
||||
m_thread_list.push_back(thread);
|
||||
}
|
||||
|
||||
void GlobalSchedulerContext::RemoveThread(KThread* thread) {
|
||||
std::scoped_lock lock{global_list_guard};
|
||||
thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread),
|
||||
thread_list.end());
|
||||
std::scoped_lock lock{m_global_list_guard};
|
||||
std::erase(m_thread_list, thread);
|
||||
}
|
||||
|
||||
void GlobalSchedulerContext::PreemptThreads() {
|
||||
@@ -38,37 +37,37 @@ void GlobalSchedulerContext::PreemptThreads() {
|
||||
63,
|
||||
};
|
||||
|
||||
ASSERT(IsLocked());
|
||||
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
|
||||
for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
|
||||
const u32 priority = preemption_priorities[core_id];
|
||||
KScheduler::RotateScheduledQueue(kernel, core_id, priority);
|
||||
KScheduler::RotateScheduledQueue(m_kernel, core_id, priority);
|
||||
}
|
||||
}
|
||||
|
||||
bool GlobalSchedulerContext::IsLocked() const {
|
||||
return scheduler_lock.IsLockedByCurrentThread();
|
||||
return m_scheduler_lock.IsLockedByCurrentThread();
|
||||
}
|
||||
|
||||
void GlobalSchedulerContext::RegisterDummyThreadForWakeup(KThread* thread) {
|
||||
ASSERT(IsLocked());
|
||||
ASSERT(this->IsLocked());
|
||||
|
||||
woken_dummy_threads.insert(thread);
|
||||
m_woken_dummy_threads.insert(thread);
|
||||
}
|
||||
|
||||
void GlobalSchedulerContext::UnregisterDummyThreadForWakeup(KThread* thread) {
|
||||
ASSERT(IsLocked());
|
||||
ASSERT(this->IsLocked());
|
||||
|
||||
woken_dummy_threads.erase(thread);
|
||||
m_woken_dummy_threads.erase(thread);
|
||||
}
|
||||
|
||||
void GlobalSchedulerContext::WakeupWaitingDummyThreads() {
|
||||
ASSERT(IsLocked());
|
||||
ASSERT(this->IsLocked());
|
||||
|
||||
for (auto* thread : woken_dummy_threads) {
|
||||
for (auto* thread : m_woken_dummy_threads) {
|
||||
thread->DummyThreadEndWait();
|
||||
}
|
||||
|
||||
woken_dummy_threads.clear();
|
||||
m_woken_dummy_threads.clear();
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -33,7 +33,7 @@ class GlobalSchedulerContext final {
|
||||
public:
|
||||
using LockType = KAbstractSchedulerLock<KScheduler>;
|
||||
|
||||
explicit GlobalSchedulerContext(KernelCore& kernel_);
|
||||
explicit GlobalSchedulerContext(KernelCore& kernel);
|
||||
~GlobalSchedulerContext();
|
||||
|
||||
/// Adds a new thread to the scheduler
|
||||
@@ -43,8 +43,9 @@ public:
|
||||
void RemoveThread(KThread* thread);
|
||||
|
||||
/// Returns a list of all threads managed by the scheduler
|
||||
[[nodiscard]] const std::vector<KThread*>& GetThreadList() const {
|
||||
return thread_list;
|
||||
/// This is only safe to iterate while holding the scheduler lock
|
||||
const std::vector<KThread*>& GetThreadList() const {
|
||||
return m_thread_list;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -63,30 +64,26 @@ public:
|
||||
void RegisterDummyThreadForWakeup(KThread* thread);
|
||||
void WakeupWaitingDummyThreads();
|
||||
|
||||
[[nodiscard]] LockType& SchedulerLock() {
|
||||
return scheduler_lock;
|
||||
}
|
||||
|
||||
[[nodiscard]] const LockType& SchedulerLock() const {
|
||||
return scheduler_lock;
|
||||
LockType& SchedulerLock() {
|
||||
return m_scheduler_lock;
|
||||
}
|
||||
|
||||
private:
|
||||
friend class KScopedSchedulerLock;
|
||||
friend class KScopedSchedulerLockAndSleep;
|
||||
|
||||
KernelCore& kernel;
|
||||
KernelCore& m_kernel;
|
||||
|
||||
std::atomic_bool scheduler_update_needed{};
|
||||
KSchedulerPriorityQueue priority_queue;
|
||||
LockType scheduler_lock;
|
||||
std::atomic_bool m_scheduler_update_needed{};
|
||||
KSchedulerPriorityQueue m_priority_queue;
|
||||
LockType m_scheduler_lock;
|
||||
|
||||
/// Lists dummy threads pending wakeup on lock release
|
||||
std::set<KThread*> woken_dummy_threads;
|
||||
std::set<KThread*> m_woken_dummy_threads;
|
||||
|
||||
/// Lists all thread ids that aren't deleted/etc.
|
||||
std::vector<KThread*> thread_list;
|
||||
std::mutex global_list_guard;
|
||||
std::vector<KThread*> m_thread_list;
|
||||
std::mutex m_global_list_guard;
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -4,7 +4,6 @@
|
||||
#include "common/alignment.h"
|
||||
#include "common/assert.h"
|
||||
#include "common/common_funcs.h"
|
||||
#include "common/common_types.h"
|
||||
#include "core/core.h"
|
||||
#include "core/device_memory.h"
|
||||
#include "core/hardware_properties.h"
|
||||
@@ -30,6 +29,7 @@
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/k_thread_local_page.h"
|
||||
#include "core/hle/kernel/k_transfer_memory.h"
|
||||
#include "core/hle/kernel/k_typed_address.h"
|
||||
|
||||
namespace Kernel::Init {
|
||||
|
||||
@@ -104,17 +104,18 @@ static_assert(KernelPageBufferAdditionalSize ==
|
||||
|
||||
/// Helper function to translate from the slab virtual address to the reserved location in physical
|
||||
/// memory.
|
||||
static PAddr TranslateSlabAddrToPhysical(KMemoryLayout& memory_layout, VAddr slab_addr) {
|
||||
slab_addr -= memory_layout.GetSlabRegionAddress();
|
||||
return slab_addr + Core::DramMemoryMap::SlabHeapBase;
|
||||
static KPhysicalAddress TranslateSlabAddrToPhysical(KMemoryLayout& memory_layout,
|
||||
KVirtualAddress slab_addr) {
|
||||
slab_addr -= GetInteger(memory_layout.GetSlabRegionAddress());
|
||||
return GetInteger(slab_addr) + Core::DramMemoryMap::SlabHeapBase;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
VAddr InitializeSlabHeap(Core::System& system, KMemoryLayout& memory_layout, VAddr address,
|
||||
size_t num_objects) {
|
||||
KVirtualAddress InitializeSlabHeap(Core::System& system, KMemoryLayout& memory_layout,
|
||||
KVirtualAddress address, size_t num_objects) {
|
||||
|
||||
const size_t size = Common::AlignUp(sizeof(T) * num_objects, alignof(void*));
|
||||
VAddr start = Common::AlignUp(address, alignof(T));
|
||||
KVirtualAddress start = Common::AlignUp(GetInteger(address), alignof(T));
|
||||
|
||||
// This should use the virtual memory address passed in, but currently, we do not setup the
|
||||
// kernel virtual memory layout. Instead, we simply map these at a region of physical memory
|
||||
@@ -195,7 +196,7 @@ void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout) {
|
||||
auto& kernel = system.Kernel();
|
||||
|
||||
// Get the start of the slab region, since that's where we'll be working.
|
||||
VAddr address = memory_layout.GetSlabRegionAddress();
|
||||
KVirtualAddress address = memory_layout.GetSlabRegionAddress();
|
||||
|
||||
// Initialize slab type array to be in sorted order.
|
||||
std::array<KSlabType, KSlabType_Count> slab_types;
|
||||
@@ -228,7 +229,7 @@ void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout) {
|
||||
}
|
||||
|
||||
// Track the gaps, so that we can free them to the unused slab tree.
|
||||
VAddr gap_start = address;
|
||||
KVirtualAddress gap_start = address;
|
||||
size_t gap_size = 0;
|
||||
|
||||
for (size_t i = 0; i < slab_gaps.size(); i++) {
|
||||
@@ -280,7 +281,7 @@ void KPageBufferSlabHeap::Initialize(Core::System& system) {
|
||||
// Allocate memory for the slab.
|
||||
constexpr auto AllocateOption = KMemoryManager::EncodeOption(
|
||||
KMemoryManager::Pool::System, KMemoryManager::Direction::FromFront);
|
||||
const PAddr slab_address =
|
||||
const KPhysicalAddress slab_address =
|
||||
kernel.MemoryManager().AllocateAndOpenContinuous(num_pages, 1, AllocateOption);
|
||||
ASSERT(slab_address != 0);
|
||||
|
||||
|
||||
@@ -14,7 +14,7 @@ using namespace Common::Literals;
|
||||
|
||||
constexpr std::size_t InitialProcessBinarySizeMax = 12_MiB;
|
||||
|
||||
static inline PAddr GetInitialProcessBinaryPhysicalAddress() {
|
||||
static inline KPhysicalAddress GetInitialProcessBinaryPhysicalAddress() {
|
||||
return Kernel::Board::Nintendo::Nx::KSystemControl::Init::GetKernelPhysicalBaseAddress(
|
||||
MainMemoryAddress);
|
||||
}
|
||||
|
||||
@@ -8,24 +8,25 @@
|
||||
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/k_thread_queue.h"
|
||||
#include "core/hle/kernel/k_typed_address.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/svc_results.h"
|
||||
#include "core/memory.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
KAddressArbiter::KAddressArbiter(Core::System& system_)
|
||||
: system{system_}, kernel{system.Kernel()} {}
|
||||
KAddressArbiter::KAddressArbiter(Core::System& system)
|
||||
: m_system{system}, m_kernel{system.Kernel()} {}
|
||||
KAddressArbiter::~KAddressArbiter() = default;
|
||||
|
||||
namespace {
|
||||
|
||||
bool ReadFromUser(Core::System& system, s32* out, VAddr address) {
|
||||
*out = system.Memory().Read32(address);
|
||||
bool ReadFromUser(KernelCore& kernel, s32* out, KProcessAddress address) {
|
||||
*out = GetCurrentMemory(kernel).Read32(GetInteger(address));
|
||||
return true;
|
||||
}
|
||||
|
||||
bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 value) {
|
||||
bool DecrementIfLessThan(Core::System& system, s32* out, KProcessAddress address, s32 value) {
|
||||
auto& monitor = system.Monitor();
|
||||
const auto current_core = system.Kernel().CurrentPhysicalCoreIndex();
|
||||
|
||||
@@ -34,22 +35,30 @@ bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 valu
|
||||
|
||||
// TODO(bunnei): We should call CanAccessAtomic(..) here.
|
||||
|
||||
// Load the value from the address.
|
||||
const s32 current_value = static_cast<s32>(monitor.ExclusiveRead32(current_core, address));
|
||||
s32 current_value{};
|
||||
|
||||
// Compare it to the desired one.
|
||||
if (current_value < value) {
|
||||
// If less than, we want to try to decrement.
|
||||
const s32 decrement_value = current_value - 1;
|
||||
while (true) {
|
||||
// Load the value from the address.
|
||||
current_value =
|
||||
static_cast<s32>(monitor.ExclusiveRead32(current_core, GetInteger(address)));
|
||||
|
||||
// Compare it to the desired one.
|
||||
if (current_value < value) {
|
||||
// If less than, we want to try to decrement.
|
||||
const s32 decrement_value = current_value - 1;
|
||||
|
||||
// Decrement and try to store.
|
||||
if (monitor.ExclusiveWrite32(current_core, GetInteger(address),
|
||||
static_cast<u32>(decrement_value))) {
|
||||
break;
|
||||
}
|
||||
|
||||
// Decrement and try to store.
|
||||
if (!monitor.ExclusiveWrite32(current_core, address, static_cast<u32>(decrement_value))) {
|
||||
// If we failed to store, try again.
|
||||
DecrementIfLessThan(system, out, address, value);
|
||||
} else {
|
||||
// Otherwise, clear our exclusive hold and finish
|
||||
monitor.ClearExclusive(current_core);
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
// Otherwise, clear our exclusive hold and finish
|
||||
monitor.ClearExclusive(current_core);
|
||||
}
|
||||
|
||||
// We're done.
|
||||
@@ -57,7 +66,8 @@ bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 valu
|
||||
return true;
|
||||
}
|
||||
|
||||
bool UpdateIfEqual(Core::System& system, s32* out, VAddr address, s32 value, s32 new_value) {
|
||||
bool UpdateIfEqual(Core::System& system, s32* out, KProcessAddress address, s32 value,
|
||||
s32 new_value) {
|
||||
auto& monitor = system.Monitor();
|
||||
const auto current_core = system.Kernel().CurrentPhysicalCoreIndex();
|
||||
|
||||
@@ -66,21 +76,29 @@ bool UpdateIfEqual(Core::System& system, s32* out, VAddr address, s32 value, s32
|
||||
|
||||
// TODO(bunnei): We should call CanAccessAtomic(..) here.
|
||||
|
||||
s32 current_value{};
|
||||
|
||||
// Load the value from the address.
|
||||
const s32 current_value = static_cast<s32>(monitor.ExclusiveRead32(current_core, address));
|
||||
while (true) {
|
||||
current_value =
|
||||
static_cast<s32>(monitor.ExclusiveRead32(current_core, GetInteger(address)));
|
||||
|
||||
// Compare it to the desired one.
|
||||
if (current_value == value) {
|
||||
// If equal, we want to try to write the new value.
|
||||
// Compare it to the desired one.
|
||||
if (current_value == value) {
|
||||
// If equal, we want to try to write the new value.
|
||||
|
||||
// Try to store.
|
||||
if (monitor.ExclusiveWrite32(current_core, GetInteger(address),
|
||||
static_cast<u32>(new_value))) {
|
||||
break;
|
||||
}
|
||||
|
||||
// Try to store.
|
||||
if (!monitor.ExclusiveWrite32(current_core, address, static_cast<u32>(new_value))) {
|
||||
// If we failed to store, try again.
|
||||
UpdateIfEqual(system, out, address, value, new_value);
|
||||
} else {
|
||||
// Otherwise, clear our exclusive hold and finish.
|
||||
monitor.ClearExclusive(current_core);
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
// Otherwise, clear our exclusive hold and finish.
|
||||
monitor.ClearExclusive(current_core);
|
||||
}
|
||||
|
||||
// We're done.
|
||||
@@ -90,8 +108,8 @@ bool UpdateIfEqual(Core::System& system, s32* out, VAddr address, s32 value, s32
|
||||
|
||||
class ThreadQueueImplForKAddressArbiter final : public KThreadQueue {
|
||||
public:
|
||||
explicit ThreadQueueImplForKAddressArbiter(KernelCore& kernel_, KAddressArbiter::ThreadTree* t)
|
||||
: KThreadQueue(kernel_), m_tree(t) {}
|
||||
explicit ThreadQueueImplForKAddressArbiter(KernelCore& kernel, KAddressArbiter::ThreadTree* t)
|
||||
: KThreadQueue(kernel), m_tree(t) {}
|
||||
|
||||
void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override {
|
||||
// If the thread is waiting on an address arbiter, remove it from the tree.
|
||||
@@ -105,19 +123,19 @@ public:
|
||||
}
|
||||
|
||||
private:
|
||||
KAddressArbiter::ThreadTree* m_tree;
|
||||
KAddressArbiter::ThreadTree* m_tree{};
|
||||
};
|
||||
|
||||
} // namespace
|
||||
|
||||
Result KAddressArbiter::Signal(VAddr addr, s32 count) {
|
||||
Result KAddressArbiter::Signal(uint64_t addr, s32 count) {
|
||||
// Perform signaling.
|
||||
s32 num_waiters{};
|
||||
{
|
||||
KScopedSchedulerLock sl(kernel);
|
||||
KScopedSchedulerLock sl(m_kernel);
|
||||
|
||||
auto it = thread_tree.nfind_key({addr, -1});
|
||||
while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
|
||||
auto it = m_tree.nfind_key({addr, -1});
|
||||
while ((it != m_tree.end()) && (count <= 0 || num_waiters < count) &&
|
||||
(it->GetAddressArbiterKey() == addr)) {
|
||||
// End the thread's wait.
|
||||
KThread* target_thread = std::addressof(*it);
|
||||
@@ -126,31 +144,27 @@ Result KAddressArbiter::Signal(VAddr addr, s32 count) {
|
||||
ASSERT(target_thread->IsWaitingForAddressArbiter());
|
||||
target_thread->ClearAddressArbiter();
|
||||
|
||||
it = thread_tree.erase(it);
|
||||
it = m_tree.erase(it);
|
||||
++num_waiters;
|
||||
}
|
||||
}
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KAddressArbiter::SignalAndIncrementIfEqual(VAddr addr, s32 value, s32 count) {
|
||||
Result KAddressArbiter::SignalAndIncrementIfEqual(uint64_t addr, s32 value, s32 count) {
|
||||
// Perform signaling.
|
||||
s32 num_waiters{};
|
||||
{
|
||||
KScopedSchedulerLock sl(kernel);
|
||||
KScopedSchedulerLock sl(m_kernel);
|
||||
|
||||
// Check the userspace value.
|
||||
s32 user_value{};
|
||||
if (!UpdateIfEqual(system, &user_value, addr, value, value + 1)) {
|
||||
LOG_ERROR(Kernel, "Invalid current memory!");
|
||||
return ResultInvalidCurrentMemory;
|
||||
}
|
||||
if (user_value != value) {
|
||||
return ResultInvalidState;
|
||||
}
|
||||
R_UNLESS(UpdateIfEqual(m_system, std::addressof(user_value), addr, value, value + 1),
|
||||
ResultInvalidCurrentMemory);
|
||||
R_UNLESS(user_value == value, ResultInvalidState);
|
||||
|
||||
auto it = thread_tree.nfind_key({addr, -1});
|
||||
while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
|
||||
auto it = m_tree.nfind_key({addr, -1});
|
||||
while ((it != m_tree.end()) && (count <= 0 || num_waiters < count) &&
|
||||
(it->GetAddressArbiterKey() == addr)) {
|
||||
// End the thread's wait.
|
||||
KThread* target_thread = std::addressof(*it);
|
||||
@@ -159,33 +173,33 @@ Result KAddressArbiter::SignalAndIncrementIfEqual(VAddr addr, s32 value, s32 cou
|
||||
ASSERT(target_thread->IsWaitingForAddressArbiter());
|
||||
target_thread->ClearAddressArbiter();
|
||||
|
||||
it = thread_tree.erase(it);
|
||||
it = m_tree.erase(it);
|
||||
++num_waiters;
|
||||
}
|
||||
}
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 value, s32 count) {
|
||||
Result KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(uint64_t addr, s32 value, s32 count) {
|
||||
// Perform signaling.
|
||||
s32 num_waiters{};
|
||||
{
|
||||
[[maybe_unused]] const KScopedSchedulerLock sl(kernel);
|
||||
KScopedSchedulerLock sl(m_kernel);
|
||||
|
||||
auto it = thread_tree.nfind_key({addr, -1});
|
||||
auto it = m_tree.nfind_key({addr, -1});
|
||||
// Determine the updated value.
|
||||
s32 new_value{};
|
||||
if (count <= 0) {
|
||||
if (it != thread_tree.end() && it->GetAddressArbiterKey() == addr) {
|
||||
if (it != m_tree.end() && it->GetAddressArbiterKey() == addr) {
|
||||
new_value = value - 2;
|
||||
} else {
|
||||
new_value = value + 1;
|
||||
}
|
||||
} else {
|
||||
if (it != thread_tree.end() && it->GetAddressArbiterKey() == addr) {
|
||||
if (it != m_tree.end() && it->GetAddressArbiterKey() == addr) {
|
||||
auto tmp_it = it;
|
||||
s32 tmp_num_waiters{};
|
||||
while (++tmp_it != thread_tree.end() && tmp_it->GetAddressArbiterKey() == addr) {
|
||||
while (++tmp_it != m_tree.end() && tmp_it->GetAddressArbiterKey() == addr) {
|
||||
if (tmp_num_waiters++ >= count) {
|
||||
break;
|
||||
}
|
||||
@@ -205,20 +219,15 @@ Result KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 val
|
||||
s32 user_value{};
|
||||
bool succeeded{};
|
||||
if (value != new_value) {
|
||||
succeeded = UpdateIfEqual(system, &user_value, addr, value, new_value);
|
||||
succeeded = UpdateIfEqual(m_system, std::addressof(user_value), addr, value, new_value);
|
||||
} else {
|
||||
succeeded = ReadFromUser(system, &user_value, addr);
|
||||
succeeded = ReadFromUser(m_kernel, std::addressof(user_value), addr);
|
||||
}
|
||||
|
||||
if (!succeeded) {
|
||||
LOG_ERROR(Kernel, "Invalid current memory!");
|
||||
return ResultInvalidCurrentMemory;
|
||||
}
|
||||
if (user_value != value) {
|
||||
return ResultInvalidState;
|
||||
}
|
||||
R_UNLESS(succeeded, ResultInvalidCurrentMemory);
|
||||
R_UNLESS(user_value == value, ResultInvalidState);
|
||||
|
||||
while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
|
||||
while ((it != m_tree.end()) && (count <= 0 || num_waiters < count) &&
|
||||
(it->GetAddressArbiterKey() == addr)) {
|
||||
// End the thread's wait.
|
||||
KThread* target_thread = std::addressof(*it);
|
||||
@@ -227,57 +236,57 @@ Result KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 val
|
||||
ASSERT(target_thread->IsWaitingForAddressArbiter());
|
||||
target_thread->ClearAddressArbiter();
|
||||
|
||||
it = thread_tree.erase(it);
|
||||
it = m_tree.erase(it);
|
||||
++num_waiters;
|
||||
}
|
||||
}
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout) {
|
||||
Result KAddressArbiter::WaitIfLessThan(uint64_t addr, s32 value, bool decrement, s64 timeout) {
|
||||
// Prepare to wait.
|
||||
KThread* cur_thread = GetCurrentThreadPointer(kernel);
|
||||
KThread* cur_thread = GetCurrentThreadPointer(m_kernel);
|
||||
KHardwareTimer* timer{};
|
||||
ThreadQueueImplForKAddressArbiter wait_queue(kernel, std::addressof(thread_tree));
|
||||
ThreadQueueImplForKAddressArbiter wait_queue(m_kernel, std::addressof(m_tree));
|
||||
|
||||
{
|
||||
KScopedSchedulerLockAndSleep slp{kernel, std::addressof(timer), cur_thread, timeout};
|
||||
KScopedSchedulerLockAndSleep slp{m_kernel, std::addressof(timer), cur_thread, timeout};
|
||||
|
||||
// Check that the thread isn't terminating.
|
||||
if (cur_thread->IsTerminationRequested()) {
|
||||
slp.CancelSleep();
|
||||
return ResultTerminationRequested;
|
||||
R_THROW(ResultTerminationRequested);
|
||||
}
|
||||
|
||||
// Read the value from userspace.
|
||||
s32 user_value{};
|
||||
bool succeeded{};
|
||||
if (decrement) {
|
||||
succeeded = DecrementIfLessThan(system, &user_value, addr, value);
|
||||
succeeded = DecrementIfLessThan(m_system, std::addressof(user_value), addr, value);
|
||||
} else {
|
||||
succeeded = ReadFromUser(system, &user_value, addr);
|
||||
succeeded = ReadFromUser(m_kernel, std::addressof(user_value), addr);
|
||||
}
|
||||
|
||||
if (!succeeded) {
|
||||
slp.CancelSleep();
|
||||
return ResultInvalidCurrentMemory;
|
||||
R_THROW(ResultInvalidCurrentMemory);
|
||||
}
|
||||
|
||||
// Check that the value is less than the specified one.
|
||||
if (user_value >= value) {
|
||||
slp.CancelSleep();
|
||||
return ResultInvalidState;
|
||||
R_THROW(ResultInvalidState);
|
||||
}
|
||||
|
||||
// Check that the timeout is non-zero.
|
||||
if (timeout == 0) {
|
||||
slp.CancelSleep();
|
||||
return ResultTimedOut;
|
||||
R_THROW(ResultTimedOut);
|
||||
}
|
||||
|
||||
// Set the arbiter.
|
||||
cur_thread->SetAddressArbiter(&thread_tree, addr);
|
||||
thread_tree.insert(*cur_thread);
|
||||
cur_thread->SetAddressArbiter(std::addressof(m_tree), addr);
|
||||
m_tree.insert(*cur_thread);
|
||||
|
||||
// Wait for the thread to finish.
|
||||
wait_queue.SetHardwareTimer(timer);
|
||||
@@ -289,43 +298,43 @@ Result KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement, s6
|
||||
return cur_thread->GetWaitResult();
|
||||
}
|
||||
|
||||
Result KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) {
|
||||
Result KAddressArbiter::WaitIfEqual(uint64_t addr, s32 value, s64 timeout) {
|
||||
// Prepare to wait.
|
||||
KThread* cur_thread = GetCurrentThreadPointer(kernel);
|
||||
KThread* cur_thread = GetCurrentThreadPointer(m_kernel);
|
||||
KHardwareTimer* timer{};
|
||||
ThreadQueueImplForKAddressArbiter wait_queue(kernel, std::addressof(thread_tree));
|
||||
ThreadQueueImplForKAddressArbiter wait_queue(m_kernel, std::addressof(m_tree));
|
||||
|
||||
{
|
||||
KScopedSchedulerLockAndSleep slp{kernel, std::addressof(timer), cur_thread, timeout};
|
||||
KScopedSchedulerLockAndSleep slp{m_kernel, std::addressof(timer), cur_thread, timeout};
|
||||
|
||||
// Check that the thread isn't terminating.
|
||||
if (cur_thread->IsTerminationRequested()) {
|
||||
slp.CancelSleep();
|
||||
return ResultTerminationRequested;
|
||||
R_THROW(ResultTerminationRequested);
|
||||
}
|
||||
|
||||
// Read the value from userspace.
|
||||
s32 user_value{};
|
||||
if (!ReadFromUser(system, &user_value, addr)) {
|
||||
if (!ReadFromUser(m_kernel, std::addressof(user_value), addr)) {
|
||||
slp.CancelSleep();
|
||||
return ResultInvalidCurrentMemory;
|
||||
R_THROW(ResultInvalidCurrentMemory);
|
||||
}
|
||||
|
||||
// Check that the value is equal.
|
||||
if (value != user_value) {
|
||||
slp.CancelSleep();
|
||||
return ResultInvalidState;
|
||||
R_THROW(ResultInvalidState);
|
||||
}
|
||||
|
||||
// Check that the timeout is non-zero.
|
||||
if (timeout == 0) {
|
||||
slp.CancelSleep();
|
||||
return ResultTimedOut;
|
||||
R_THROW(ResultTimedOut);
|
||||
}
|
||||
|
||||
// Set the arbiter.
|
||||
cur_thread->SetAddressArbiter(&thread_tree, addr);
|
||||
thread_tree.insert(*cur_thread);
|
||||
cur_thread->SetAddressArbiter(std::addressof(m_tree), addr);
|
||||
m_tree.insert(*cur_thread);
|
||||
|
||||
// Wait for the thread to finish.
|
||||
wait_queue.SetHardwareTimer(timer);
|
||||
|
||||
@@ -22,47 +22,46 @@ class KAddressArbiter {
|
||||
public:
|
||||
using ThreadTree = KConditionVariable::ThreadTree;
|
||||
|
||||
explicit KAddressArbiter(Core::System& system_);
|
||||
explicit KAddressArbiter(Core::System& system);
|
||||
~KAddressArbiter();
|
||||
|
||||
[[nodiscard]] Result SignalToAddress(VAddr addr, Svc::SignalType type, s32 value, s32 count) {
|
||||
Result SignalToAddress(uint64_t addr, Svc::SignalType type, s32 value, s32 count) {
|
||||
switch (type) {
|
||||
case Svc::SignalType::Signal:
|
||||
return Signal(addr, count);
|
||||
R_RETURN(this->Signal(addr, count));
|
||||
case Svc::SignalType::SignalAndIncrementIfEqual:
|
||||
return SignalAndIncrementIfEqual(addr, value, count);
|
||||
R_RETURN(this->SignalAndIncrementIfEqual(addr, value, count));
|
||||
case Svc::SignalType::SignalAndModifyByWaitingCountIfEqual:
|
||||
return SignalAndModifyByWaitingCountIfEqual(addr, value, count);
|
||||
R_RETURN(this->SignalAndModifyByWaitingCountIfEqual(addr, value, count));
|
||||
default:
|
||||
UNREACHABLE();
|
||||
}
|
||||
ASSERT(false);
|
||||
return ResultUnknown;
|
||||
}
|
||||
|
||||
[[nodiscard]] Result WaitForAddress(VAddr addr, Svc::ArbitrationType type, s32 value,
|
||||
s64 timeout) {
|
||||
Result WaitForAddress(uint64_t addr, Svc::ArbitrationType type, s32 value, s64 timeout) {
|
||||
switch (type) {
|
||||
case Svc::ArbitrationType::WaitIfLessThan:
|
||||
return WaitIfLessThan(addr, value, false, timeout);
|
||||
R_RETURN(WaitIfLessThan(addr, value, false, timeout));
|
||||
case Svc::ArbitrationType::DecrementAndWaitIfLessThan:
|
||||
return WaitIfLessThan(addr, value, true, timeout);
|
||||
R_RETURN(WaitIfLessThan(addr, value, true, timeout));
|
||||
case Svc::ArbitrationType::WaitIfEqual:
|
||||
return WaitIfEqual(addr, value, timeout);
|
||||
R_RETURN(WaitIfEqual(addr, value, timeout));
|
||||
default:
|
||||
UNREACHABLE();
|
||||
}
|
||||
ASSERT(false);
|
||||
return ResultUnknown;
|
||||
}
|
||||
|
||||
private:
|
||||
[[nodiscard]] Result Signal(VAddr addr, s32 count);
|
||||
[[nodiscard]] Result SignalAndIncrementIfEqual(VAddr addr, s32 value, s32 count);
|
||||
[[nodiscard]] Result SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 value, s32 count);
|
||||
[[nodiscard]] Result WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout);
|
||||
[[nodiscard]] Result WaitIfEqual(VAddr addr, s32 value, s64 timeout);
|
||||
Result Signal(uint64_t addr, s32 count);
|
||||
Result SignalAndIncrementIfEqual(uint64_t addr, s32 value, s32 count);
|
||||
Result SignalAndModifyByWaitingCountIfEqual(uint64_t addr, s32 value, s32 count);
|
||||
Result WaitIfLessThan(uint64_t addr, s32 value, bool decrement, s64 timeout);
|
||||
Result WaitIfEqual(uint64_t addr, s32 value, s64 timeout);
|
||||
|
||||
ThreadTree thread_tree;
|
||||
|
||||
Core::System& system;
|
||||
KernelCore& kernel;
|
||||
private:
|
||||
ThreadTree m_tree;
|
||||
Core::System& m_system;
|
||||
KernelCore& m_kernel;
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -13,40 +13,40 @@ class KAffinityMask {
|
||||
public:
|
||||
constexpr KAffinityMask() = default;
|
||||
|
||||
[[nodiscard]] constexpr u64 GetAffinityMask() const {
|
||||
return this->mask;
|
||||
constexpr u64 GetAffinityMask() const {
|
||||
return m_mask;
|
||||
}
|
||||
|
||||
constexpr void SetAffinityMask(u64 new_mask) {
|
||||
ASSERT((new_mask & ~AllowedAffinityMask) == 0);
|
||||
this->mask = new_mask;
|
||||
m_mask = new_mask;
|
||||
}
|
||||
|
||||
[[nodiscard]] constexpr bool GetAffinity(s32 core) const {
|
||||
return (this->mask & GetCoreBit(core)) != 0;
|
||||
constexpr bool GetAffinity(s32 core) const {
|
||||
return (m_mask & GetCoreBit(core)) != 0;
|
||||
}
|
||||
|
||||
constexpr void SetAffinity(s32 core, bool set) {
|
||||
if (set) {
|
||||
this->mask |= GetCoreBit(core);
|
||||
m_mask |= GetCoreBit(core);
|
||||
} else {
|
||||
this->mask &= ~GetCoreBit(core);
|
||||
m_mask &= ~GetCoreBit(core);
|
||||
}
|
||||
}
|
||||
|
||||
constexpr void SetAll() {
|
||||
this->mask = AllowedAffinityMask;
|
||||
m_mask = AllowedAffinityMask;
|
||||
}
|
||||
|
||||
private:
|
||||
[[nodiscard]] static constexpr u64 GetCoreBit(s32 core) {
|
||||
static constexpr u64 GetCoreBit(s32 core) {
|
||||
ASSERT(0 <= core && core < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
|
||||
return (1ULL << core);
|
||||
}
|
||||
|
||||
static constexpr u64 AllowedAffinityMask = (1ULL << Core::Hardware::NUM_CPU_CORES) - 1;
|
||||
|
||||
u64 mask{};
|
||||
u64 m_mask{};
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -12,11 +12,11 @@ KAutoObject* KAutoObject::Create(KAutoObject* obj) {
|
||||
}
|
||||
|
||||
void KAutoObject::RegisterWithKernel() {
|
||||
kernel.RegisterKernelObject(this);
|
||||
m_kernel.RegisterKernelObject(this);
|
||||
}
|
||||
|
||||
void KAutoObject::UnregisterWithKernel() {
|
||||
kernel.UnregisterKernelObject(this);
|
||||
m_kernel.UnregisterKernelObject(this);
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -80,7 +80,7 @@ private:
|
||||
KERNEL_AUTOOBJECT_TRAITS_IMPL(KAutoObject, KAutoObject, const);
|
||||
|
||||
public:
|
||||
explicit KAutoObject(KernelCore& kernel_) : kernel(kernel_) {
|
||||
explicit KAutoObject(KernelCore& kernel) : m_kernel(kernel) {
|
||||
RegisterWithKernel();
|
||||
}
|
||||
virtual ~KAutoObject() = default;
|
||||
@@ -164,17 +164,12 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
const std::string& GetName() const {
|
||||
return name;
|
||||
}
|
||||
|
||||
private:
|
||||
void RegisterWithKernel();
|
||||
void UnregisterWithKernel();
|
||||
|
||||
protected:
|
||||
KernelCore& kernel;
|
||||
std::string name;
|
||||
KernelCore& m_kernel;
|
||||
|
||||
private:
|
||||
std::atomic<u32> m_ref_count{};
|
||||
@@ -184,7 +179,7 @@ class KAutoObjectWithListContainer;
|
||||
|
||||
class KAutoObjectWithList : public KAutoObject, public boost::intrusive::set_base_hook<> {
|
||||
public:
|
||||
explicit KAutoObjectWithList(KernelCore& kernel_) : KAutoObject(kernel_) {}
|
||||
explicit KAutoObjectWithList(KernelCore& kernel) : KAutoObject(kernel) {}
|
||||
|
||||
static int Compare(const KAutoObjectWithList& lhs, const KAutoObjectWithList& rhs) {
|
||||
const u64 lid = lhs.GetId();
|
||||
@@ -200,7 +195,7 @@ public:
|
||||
}
|
||||
|
||||
friend bool operator<(const KAutoObjectWithList& left, const KAutoObjectWithList& right) {
|
||||
return &left < &right;
|
||||
return KAutoObjectWithList::Compare(left, right) < 0;
|
||||
}
|
||||
|
||||
public:
|
||||
@@ -208,10 +203,6 @@ public:
|
||||
return reinterpret_cast<u64>(this);
|
||||
}
|
||||
|
||||
virtual const std::string& GetName() const {
|
||||
return name;
|
||||
}
|
||||
|
||||
private:
|
||||
friend class KAutoObjectWithListContainer;
|
||||
};
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
Result KCapabilities::InitializeForKIP(std::span<const u32> kern_caps, KPageTable* page_table) {
|
||||
Result KCapabilities::InitializeForKip(std::span<const u32> kern_caps, KPageTable* page_table) {
|
||||
// We're initializing an initial process.
|
||||
m_svc_access_flags.reset();
|
||||
m_irq_access_flags.reset();
|
||||
|
||||
@@ -22,7 +22,7 @@ class KCapabilities {
|
||||
public:
|
||||
constexpr explicit KCapabilities() = default;
|
||||
|
||||
Result InitializeForKIP(std::span<const u32> kern_caps, KPageTable* page_table);
|
||||
Result InitializeForKip(std::span<const u32> kern_caps, KPageTable* page_table);
|
||||
Result InitializeForUser(std::span<const u32> user_caps, KPageTable* page_table);
|
||||
|
||||
static Result CheckCapabilities(KernelCore& kernel, std::span<const u32> user_caps);
|
||||
|
||||
@@ -11,26 +11,21 @@
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
KClientPort::KClientPort(KernelCore& kernel_) : KSynchronizationObject{kernel_} {}
|
||||
KClientPort::KClientPort(KernelCore& kernel) : KSynchronizationObject{kernel} {}
|
||||
KClientPort::~KClientPort() = default;
|
||||
|
||||
void KClientPort::Initialize(KPort* parent_port_, s32 max_sessions_, std::string&& name_) {
|
||||
void KClientPort::Initialize(KPort* parent, s32 max_sessions) {
|
||||
// Set member variables.
|
||||
num_sessions = 0;
|
||||
peak_sessions = 0;
|
||||
parent = parent_port_;
|
||||
max_sessions = max_sessions_;
|
||||
name = std::move(name_);
|
||||
m_num_sessions = 0;
|
||||
m_peak_sessions = 0;
|
||||
m_parent = parent;
|
||||
m_max_sessions = max_sessions;
|
||||
}
|
||||
|
||||
void KClientPort::OnSessionFinalized() {
|
||||
KScopedSchedulerLock sl{kernel};
|
||||
KScopedSchedulerLock sl{m_kernel};
|
||||
|
||||
// This might happen if a session was improperly used with this port.
|
||||
ASSERT_MSG(num_sessions > 0, "num_sessions is invalid");
|
||||
|
||||
const auto prev = num_sessions--;
|
||||
if (prev == max_sessions) {
|
||||
if (const auto prev = m_num_sessions--; prev == m_max_sessions) {
|
||||
this->NotifyAvailable();
|
||||
}
|
||||
}
|
||||
@@ -47,81 +42,81 @@ bool KClientPort::IsServerClosed() const {
|
||||
|
||||
void KClientPort::Destroy() {
|
||||
// Note with our parent that we're closed.
|
||||
parent->OnClientClosed();
|
||||
m_parent->OnClientClosed();
|
||||
|
||||
// Close our reference to our parent.
|
||||
parent->Close();
|
||||
m_parent->Close();
|
||||
}
|
||||
|
||||
bool KClientPort::IsSignaled() const {
|
||||
return num_sessions < max_sessions;
|
||||
return m_num_sessions.load() < m_max_sessions;
|
||||
}
|
||||
|
||||
Result KClientPort::CreateSession(KClientSession** out) {
|
||||
// Declare the session we're going to allocate.
|
||||
KSession* session{};
|
||||
|
||||
// Reserve a new session from the resource limit.
|
||||
//! FIXME: we are reserving this from the wrong resource limit!
|
||||
KScopedResourceReservation session_reservation(kernel.ApplicationProcess()->GetResourceLimit(),
|
||||
LimitableResource::SessionCountMax);
|
||||
KScopedResourceReservation session_reservation(
|
||||
m_kernel.ApplicationProcess()->GetResourceLimit(), LimitableResource::SessionCountMax);
|
||||
R_UNLESS(session_reservation.Succeeded(), ResultLimitReached);
|
||||
|
||||
// Allocate a session normally.
|
||||
session = KSession::Create(m_kernel);
|
||||
|
||||
// Check that we successfully created a session.
|
||||
R_UNLESS(session != nullptr, ResultOutOfResource);
|
||||
|
||||
// Update the session counts.
|
||||
{
|
||||
ON_RESULT_FAILURE {
|
||||
session->Close();
|
||||
};
|
||||
|
||||
// Atomically increment the number of sessions.
|
||||
s32 new_sessions{};
|
||||
{
|
||||
const auto max = max_sessions;
|
||||
auto cur_sessions = num_sessions.load(std::memory_order_acquire);
|
||||
const auto max = m_max_sessions;
|
||||
auto cur_sessions = m_num_sessions.load(std::memory_order_acquire);
|
||||
do {
|
||||
R_UNLESS(cur_sessions < max, ResultOutOfSessions);
|
||||
new_sessions = cur_sessions + 1;
|
||||
} while (!num_sessions.compare_exchange_weak(cur_sessions, new_sessions,
|
||||
std::memory_order_relaxed));
|
||||
} while (!m_num_sessions.compare_exchange_weak(cur_sessions, new_sessions,
|
||||
std::memory_order_relaxed));
|
||||
}
|
||||
|
||||
// Atomically update the peak session tracking.
|
||||
{
|
||||
auto peak = peak_sessions.load(std::memory_order_acquire);
|
||||
auto peak = m_peak_sessions.load(std::memory_order_acquire);
|
||||
do {
|
||||
if (peak >= new_sessions) {
|
||||
break;
|
||||
}
|
||||
} while (!peak_sessions.compare_exchange_weak(peak, new_sessions,
|
||||
std::memory_order_relaxed));
|
||||
} while (!m_peak_sessions.compare_exchange_weak(peak, new_sessions,
|
||||
std::memory_order_relaxed));
|
||||
}
|
||||
}
|
||||
|
||||
// Create a new session.
|
||||
KSession* session = KSession::Create(kernel);
|
||||
if (session == nullptr) {
|
||||
// Decrement the session count.
|
||||
const auto prev = num_sessions--;
|
||||
if (prev == max_sessions) {
|
||||
this->NotifyAvailable();
|
||||
}
|
||||
|
||||
return ResultOutOfResource;
|
||||
}
|
||||
|
||||
// Initialize the session.
|
||||
session->Initialize(this, parent->GetName());
|
||||
session->Initialize(this, m_parent->GetName());
|
||||
|
||||
// Commit the session reservation.
|
||||
session_reservation.Commit();
|
||||
|
||||
// Register the session.
|
||||
KSession::Register(kernel, session);
|
||||
auto session_guard = SCOPE_GUARD({
|
||||
KSession::Register(m_kernel, session);
|
||||
ON_RESULT_FAILURE {
|
||||
session->GetClientSession().Close();
|
||||
session->GetServerSession().Close();
|
||||
});
|
||||
};
|
||||
|
||||
// Enqueue the session with our parent.
|
||||
R_TRY(parent->EnqueueSession(std::addressof(session->GetServerSession())));
|
||||
R_TRY(m_parent->EnqueueSession(std::addressof(session->GetServerSession())));
|
||||
|
||||
// We succeeded, so set the output.
|
||||
session_guard.Cancel();
|
||||
*out = std::addressof(session->GetClientSession());
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -4,7 +4,6 @@
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <string>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/kernel/k_synchronization_object.h"
|
||||
@@ -20,28 +19,28 @@ class KClientPort final : public KSynchronizationObject {
|
||||
KERNEL_AUTOOBJECT_TRAITS(KClientPort, KSynchronizationObject);
|
||||
|
||||
public:
|
||||
explicit KClientPort(KernelCore& kernel_);
|
||||
explicit KClientPort(KernelCore& kernel);
|
||||
~KClientPort() override;
|
||||
|
||||
void Initialize(KPort* parent_, s32 max_sessions_, std::string&& name_);
|
||||
void Initialize(KPort* parent, s32 max_sessions);
|
||||
void OnSessionFinalized();
|
||||
void OnServerClosed();
|
||||
|
||||
const KPort* GetParent() const {
|
||||
return parent;
|
||||
return m_parent;
|
||||
}
|
||||
KPort* GetParent() {
|
||||
return parent;
|
||||
return m_parent;
|
||||
}
|
||||
|
||||
s32 GetNumSessions() const {
|
||||
return num_sessions;
|
||||
return m_num_sessions;
|
||||
}
|
||||
s32 GetPeakSessions() const {
|
||||
return peak_sessions;
|
||||
return m_peak_sessions;
|
||||
}
|
||||
s32 GetMaxSessions() const {
|
||||
return max_sessions;
|
||||
return m_max_sessions;
|
||||
}
|
||||
|
||||
bool IsLight() const;
|
||||
@@ -54,10 +53,10 @@ public:
|
||||
Result CreateSession(KClientSession** out);
|
||||
|
||||
private:
|
||||
std::atomic<s32> num_sessions{};
|
||||
std::atomic<s32> peak_sessions{};
|
||||
s32 max_sessions{};
|
||||
KPort* parent{};
|
||||
std::atomic<s32> m_num_sessions{};
|
||||
std::atomic<s32> m_peak_sessions{};
|
||||
s32 m_max_sessions{};
|
||||
KPort* m_parent{};
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -12,28 +12,28 @@ namespace Kernel {
|
||||
|
||||
static constexpr u32 MessageBufferSize = 0x100;
|
||||
|
||||
KClientSession::KClientSession(KernelCore& kernel_)
|
||||
: KAutoObjectWithSlabHeapAndContainer{kernel_} {}
|
||||
KClientSession::KClientSession(KernelCore& kernel) : KAutoObjectWithSlabHeapAndContainer{kernel} {}
|
||||
KClientSession::~KClientSession() = default;
|
||||
|
||||
void KClientSession::Destroy() {
|
||||
parent->OnClientClosed();
|
||||
parent->Close();
|
||||
m_parent->OnClientClosed();
|
||||
m_parent->Close();
|
||||
}
|
||||
|
||||
void KClientSession::OnServerClosed() {}
|
||||
|
||||
Result KClientSession::SendSyncRequest() {
|
||||
// Create a session request.
|
||||
KSessionRequest* request = KSessionRequest::Create(kernel);
|
||||
KSessionRequest* request = KSessionRequest::Create(m_kernel);
|
||||
R_UNLESS(request != nullptr, ResultOutOfResource);
|
||||
SCOPE_EXIT({ request->Close(); });
|
||||
|
||||
// Initialize the request.
|
||||
request->Initialize(nullptr, GetCurrentThread(kernel).GetTLSAddress(), MessageBufferSize);
|
||||
request->Initialize(nullptr, GetInteger(GetCurrentThread(m_kernel).GetTlsAddress()),
|
||||
MessageBufferSize);
|
||||
|
||||
// Send the request.
|
||||
return parent->GetServerSession().OnRequest(request);
|
||||
R_RETURN(m_parent->GetServerSession().OnRequest(request));
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -30,20 +30,19 @@ class KClientSession final
|
||||
KERNEL_AUTOOBJECT_TRAITS(KClientSession, KAutoObject);
|
||||
|
||||
public:
|
||||
explicit KClientSession(KernelCore& kernel_);
|
||||
explicit KClientSession(KernelCore& kernel);
|
||||
~KClientSession() override;
|
||||
|
||||
void Initialize(KSession* parent_session_, std::string&& name_) {
|
||||
void Initialize(KSession* parent) {
|
||||
// Set member variables.
|
||||
parent = parent_session_;
|
||||
name = std::move(name_);
|
||||
m_parent = parent;
|
||||
}
|
||||
|
||||
void Destroy() override;
|
||||
static void PostDestroy([[maybe_unused]] uintptr_t arg) {}
|
||||
static void PostDestroy(uintptr_t arg) {}
|
||||
|
||||
KSession* GetParent() const {
|
||||
return parent;
|
||||
return m_parent;
|
||||
}
|
||||
|
||||
Result SendSyncRequest();
|
||||
@@ -51,7 +50,7 @@ public:
|
||||
void OnServerClosed();
|
||||
|
||||
private:
|
||||
KSession* parent{};
|
||||
KSession* m_parent{};
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -16,18 +16,19 @@
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
KCodeMemory::KCodeMemory(KernelCore& kernel_)
|
||||
: KAutoObjectWithSlabHeapAndContainer{kernel_}, m_lock(kernel_) {}
|
||||
KCodeMemory::KCodeMemory(KernelCore& kernel)
|
||||
: KAutoObjectWithSlabHeapAndContainer{kernel}, m_lock(kernel) {}
|
||||
|
||||
Result KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr, size_t size) {
|
||||
Result KCodeMemory::Initialize(Core::DeviceMemory& device_memory, KProcessAddress addr,
|
||||
size_t size) {
|
||||
// Set members.
|
||||
m_owner = GetCurrentProcessPointer(kernel);
|
||||
m_owner = GetCurrentProcessPointer(m_kernel);
|
||||
|
||||
// Get the owner page table.
|
||||
auto& page_table = m_owner->PageTable();
|
||||
|
||||
// Construct the page group.
|
||||
m_page_group.emplace(kernel, page_table.GetBlockInfoManager());
|
||||
m_page_group.emplace(m_kernel, page_table.GetBlockInfoManager());
|
||||
|
||||
// Lock the memory.
|
||||
R_TRY(page_table.LockForCodeMemory(std::addressof(*m_page_group), addr, size))
|
||||
@@ -45,7 +46,7 @@ Result KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr, si
|
||||
m_is_mapped = false;
|
||||
|
||||
// We succeeded.
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
void KCodeMemory::Finalize() {
|
||||
@@ -63,7 +64,7 @@ void KCodeMemory::Finalize() {
|
||||
m_owner->Close();
|
||||
}
|
||||
|
||||
Result KCodeMemory::Map(VAddr address, size_t size) {
|
||||
Result KCodeMemory::Map(KProcessAddress address, size_t size) {
|
||||
// Validate the size.
|
||||
R_UNLESS(m_page_group->GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
|
||||
|
||||
@@ -74,16 +75,16 @@ Result KCodeMemory::Map(VAddr address, size_t size) {
|
||||
R_UNLESS(!m_is_mapped, ResultInvalidState);
|
||||
|
||||
// Map the memory.
|
||||
R_TRY(GetCurrentProcess(kernel).PageTable().MapPageGroup(
|
||||
R_TRY(GetCurrentProcess(m_kernel).PageTable().MapPageGroup(
|
||||
address, *m_page_group, KMemoryState::CodeOut, KMemoryPermission::UserReadWrite));
|
||||
|
||||
// Mark ourselves as mapped.
|
||||
m_is_mapped = true;
|
||||
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KCodeMemory::Unmap(VAddr address, size_t size) {
|
||||
Result KCodeMemory::Unmap(KProcessAddress address, size_t size) {
|
||||
// Validate the size.
|
||||
R_UNLESS(m_page_group->GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
|
||||
|
||||
@@ -91,16 +92,16 @@ Result KCodeMemory::Unmap(VAddr address, size_t size) {
|
||||
KScopedLightLock lk(m_lock);
|
||||
|
||||
// Unmap the memory.
|
||||
R_TRY(GetCurrentProcess(kernel).PageTable().UnmapPageGroup(address, *m_page_group,
|
||||
KMemoryState::CodeOut));
|
||||
R_TRY(GetCurrentProcess(m_kernel).PageTable().UnmapPageGroup(address, *m_page_group,
|
||||
KMemoryState::CodeOut));
|
||||
|
||||
// Mark ourselves as unmapped.
|
||||
m_is_mapped = false;
|
||||
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission perm) {
|
||||
Result KCodeMemory::MapToOwner(KProcessAddress address, size_t size, Svc::MemoryPermission perm) {
|
||||
// Validate the size.
|
||||
R_UNLESS(m_page_group->GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
|
||||
|
||||
@@ -131,10 +132,10 @@ Result KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission
|
||||
// Mark ourselves as mapped.
|
||||
m_is_owner_mapped = true;
|
||||
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KCodeMemory::UnmapFromOwner(VAddr address, size_t size) {
|
||||
Result KCodeMemory::UnmapFromOwner(KProcessAddress address, size_t size) {
|
||||
// Validate the size.
|
||||
R_UNLESS(m_page_group->GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
|
||||
|
||||
@@ -147,7 +148,7 @@ Result KCodeMemory::UnmapFromOwner(VAddr address, size_t size) {
|
||||
// Mark ourselves as unmapped.
|
||||
m_is_owner_mapped = false;
|
||||
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -5,12 +5,12 @@
|
||||
|
||||
#include <optional>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "core/device_memory.h"
|
||||
#include "core/hle/kernel/k_auto_object.h"
|
||||
#include "core/hle/kernel/k_light_lock.h"
|
||||
#include "core/hle/kernel/k_page_group.h"
|
||||
#include "core/hle/kernel/k_process.h"
|
||||
#include "core/hle/kernel/k_typed_address.h"
|
||||
#include "core/hle/kernel/slab_helpers.h"
|
||||
#include "core/hle/kernel/svc_types.h"
|
||||
#include "core/hle/result.h"
|
||||
@@ -29,25 +29,25 @@ class KCodeMemory final
|
||||
KERNEL_AUTOOBJECT_TRAITS(KCodeMemory, KAutoObject);
|
||||
|
||||
public:
|
||||
explicit KCodeMemory(KernelCore& kernel_);
|
||||
explicit KCodeMemory(KernelCore& kernel);
|
||||
|
||||
Result Initialize(Core::DeviceMemory& device_memory, VAddr address, size_t size);
|
||||
Result Initialize(Core::DeviceMemory& device_memory, KProcessAddress address, size_t size);
|
||||
void Finalize() override;
|
||||
|
||||
Result Map(VAddr address, size_t size);
|
||||
Result Unmap(VAddr address, size_t size);
|
||||
Result MapToOwner(VAddr address, size_t size, Svc::MemoryPermission perm);
|
||||
Result UnmapFromOwner(VAddr address, size_t size);
|
||||
Result Map(KProcessAddress address, size_t size);
|
||||
Result Unmap(KProcessAddress address, size_t size);
|
||||
Result MapToOwner(KProcessAddress address, size_t size, Svc::MemoryPermission perm);
|
||||
Result UnmapFromOwner(KProcessAddress address, size_t size);
|
||||
|
||||
bool IsInitialized() const override {
|
||||
return m_is_initialized;
|
||||
}
|
||||
static void PostDestroy([[maybe_unused]] uintptr_t arg) {}
|
||||
static void PostDestroy(uintptr_t arg) {}
|
||||
|
||||
KProcess* GetOwner() const override {
|
||||
return m_owner;
|
||||
}
|
||||
VAddr GetSourceAddress() const {
|
||||
KProcessAddress GetSourceAddress() const {
|
||||
return m_address;
|
||||
}
|
||||
size_t GetSize() const {
|
||||
@@ -57,7 +57,7 @@ public:
|
||||
private:
|
||||
std::optional<KPageGroup> m_page_group{};
|
||||
KProcess* m_owner{};
|
||||
VAddr m_address{};
|
||||
KProcessAddress m_address{};
|
||||
KLightLock m_lock;
|
||||
bool m_is_initialized{};
|
||||
bool m_is_owner_mapped{};
|
||||
|
||||
@@ -4,7 +4,6 @@
|
||||
#include "core/arm/exclusive_monitor.h"
|
||||
#include "core/core.h"
|
||||
#include "core/hle/kernel/k_condition_variable.h"
|
||||
#include "core/hle/kernel/k_linked_list.h"
|
||||
#include "core/hle/kernel/k_process.h"
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
|
||||
@@ -19,36 +18,41 @@ namespace Kernel {
|
||||
|
||||
namespace {
|
||||
|
||||
bool ReadFromUser(Core::System& system, u32* out, VAddr address) {
|
||||
*out = system.Memory().Read32(address);
|
||||
bool ReadFromUser(KernelCore& kernel, u32* out, KProcessAddress address) {
|
||||
*out = GetCurrentMemory(kernel).Read32(GetInteger(address));
|
||||
return true;
|
||||
}
|
||||
|
||||
bool WriteToUser(Core::System& system, VAddr address, const u32* p) {
|
||||
system.Memory().Write32(address, *p);
|
||||
bool WriteToUser(KernelCore& kernel, KProcessAddress address, const u32* p) {
|
||||
GetCurrentMemory(kernel).Write32(GetInteger(address), *p);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool UpdateLockAtomic(Core::System& system, u32* out, VAddr address, u32 if_zero,
|
||||
bool UpdateLockAtomic(Core::System& system, u32* out, KProcessAddress address, u32 if_zero,
|
||||
u32 new_orr_mask) {
|
||||
auto& monitor = system.Monitor();
|
||||
const auto current_core = system.Kernel().CurrentPhysicalCoreIndex();
|
||||
|
||||
// Load the value from the address.
|
||||
const auto expected = monitor.ExclusiveRead32(current_core, address);
|
||||
u32 expected{};
|
||||
|
||||
// Orr in the new mask.
|
||||
u32 value = expected | new_orr_mask;
|
||||
while (true) {
|
||||
// Load the value from the address.
|
||||
expected = monitor.ExclusiveRead32(current_core, GetInteger(address));
|
||||
|
||||
// If the value is zero, use the if_zero value, otherwise use the newly orr'd value.
|
||||
if (!expected) {
|
||||
value = if_zero;
|
||||
}
|
||||
// Orr in the new mask.
|
||||
u32 value = expected | new_orr_mask;
|
||||
|
||||
// If the value is zero, use the if_zero value, otherwise use the newly orr'd value.
|
||||
if (!expected) {
|
||||
value = if_zero;
|
||||
}
|
||||
|
||||
// Try to store.
|
||||
if (monitor.ExclusiveWrite32(current_core, GetInteger(address), value)) {
|
||||
break;
|
||||
}
|
||||
|
||||
// Try to store.
|
||||
if (!monitor.ExclusiveWrite32(current_core, address, value)) {
|
||||
// If we failed to store, try again.
|
||||
return UpdateLockAtomic(system, out, address, if_zero, new_orr_mask);
|
||||
}
|
||||
|
||||
// We're done.
|
||||
@@ -58,8 +62,8 @@ bool UpdateLockAtomic(Core::System& system, u32* out, VAddr address, u32 if_zero
|
||||
|
||||
class ThreadQueueImplForKConditionVariableWaitForAddress final : public KThreadQueue {
|
||||
public:
|
||||
explicit ThreadQueueImplForKConditionVariableWaitForAddress(KernelCore& kernel_)
|
||||
: KThreadQueue(kernel_) {}
|
||||
explicit ThreadQueueImplForKConditionVariableWaitForAddress(KernelCore& kernel)
|
||||
: KThreadQueue(kernel) {}
|
||||
|
||||
void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override {
|
||||
// Remove the thread as a waiter from its owner.
|
||||
@@ -76,8 +80,8 @@ private:
|
||||
|
||||
public:
|
||||
explicit ThreadQueueImplForKConditionVariableWaitConditionVariable(
|
||||
KernelCore& kernel_, KConditionVariable::ThreadTree* t)
|
||||
: KThreadQueue(kernel_), m_tree(t) {}
|
||||
KernelCore& kernel, KConditionVariable::ThreadTree* t)
|
||||
: KThreadQueue(kernel), m_tree(t) {}
|
||||
|
||||
void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override {
|
||||
// Remove the thread as a waiter from its owner.
|
||||
@@ -98,17 +102,17 @@ public:
|
||||
|
||||
} // namespace
|
||||
|
||||
KConditionVariable::KConditionVariable(Core::System& system_)
|
||||
: system{system_}, kernel{system.Kernel()} {}
|
||||
KConditionVariable::KConditionVariable(Core::System& system)
|
||||
: m_system{system}, m_kernel{system.Kernel()} {}
|
||||
|
||||
KConditionVariable::~KConditionVariable() = default;
|
||||
|
||||
Result KConditionVariable::SignalToAddress(VAddr addr) {
|
||||
KThread* owner_thread = GetCurrentThreadPointer(kernel);
|
||||
Result KConditionVariable::SignalToAddress(KProcessAddress addr) {
|
||||
KThread* owner_thread = GetCurrentThreadPointer(m_kernel);
|
||||
|
||||
// Signal the address.
|
||||
{
|
||||
KScopedSchedulerLock sl(kernel);
|
||||
KScopedSchedulerLock sl(m_kernel);
|
||||
|
||||
// Remove waiter thread.
|
||||
bool has_waiters{};
|
||||
@@ -129,7 +133,7 @@ Result KConditionVariable::SignalToAddress(VAddr addr) {
|
||||
|
||||
// Write the value to userspace.
|
||||
Result result{ResultSuccess};
|
||||
if (WriteToUser(system, addr, std::addressof(next_value))) [[likely]] {
|
||||
if (WriteToUser(m_kernel, addr, std::addressof(next_value))) [[likely]] {
|
||||
result = ResultSuccess;
|
||||
} else {
|
||||
result = ResultInvalidCurrentMemory;
|
||||
@@ -144,27 +148,28 @@ Result KConditionVariable::SignalToAddress(VAddr addr) {
|
||||
}
|
||||
}
|
||||
|
||||
Result KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 value) {
|
||||
KThread* cur_thread = GetCurrentThreadPointer(kernel);
|
||||
ThreadQueueImplForKConditionVariableWaitForAddress wait_queue(kernel);
|
||||
Result KConditionVariable::WaitForAddress(Handle handle, KProcessAddress addr, u32 value) {
|
||||
KThread* cur_thread = GetCurrentThreadPointer(m_kernel);
|
||||
ThreadQueueImplForKConditionVariableWaitForAddress wait_queue(m_kernel);
|
||||
|
||||
// Wait for the address.
|
||||
KThread* owner_thread{};
|
||||
{
|
||||
KScopedSchedulerLock sl(kernel);
|
||||
KScopedSchedulerLock sl(m_kernel);
|
||||
|
||||
// Check if the thread should terminate.
|
||||
R_UNLESS(!cur_thread->IsTerminationRequested(), ResultTerminationRequested);
|
||||
|
||||
// Read the tag from userspace.
|
||||
u32 test_tag{};
|
||||
R_UNLESS(ReadFromUser(system, std::addressof(test_tag), addr), ResultInvalidCurrentMemory);
|
||||
R_UNLESS(ReadFromUser(m_kernel, std::addressof(test_tag), addr),
|
||||
ResultInvalidCurrentMemory);
|
||||
|
||||
// If the tag isn't the handle (with wait mask), we're done.
|
||||
R_SUCCEED_IF(test_tag != (handle | Svc::HandleWaitMask));
|
||||
|
||||
// Get the lock owner thread.
|
||||
owner_thread = GetCurrentProcess(kernel)
|
||||
owner_thread = GetCurrentProcess(m_kernel)
|
||||
.GetHandleTable()
|
||||
.GetObjectWithoutPseudoHandle<KThread>(handle)
|
||||
.ReleasePointerUnsafe();
|
||||
@@ -177,22 +182,21 @@ Result KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 value)
|
||||
// Begin waiting.
|
||||
cur_thread->BeginWait(std::addressof(wait_queue));
|
||||
cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::ConditionVar);
|
||||
cur_thread->SetMutexWaitAddressForDebugging(addr);
|
||||
}
|
||||
|
||||
// Close our reference to the owner thread, now that the wait is over.
|
||||
owner_thread->Close();
|
||||
|
||||
// Get the wait result.
|
||||
return cur_thread->GetWaitResult();
|
||||
R_RETURN(cur_thread->GetWaitResult());
|
||||
}
|
||||
|
||||
void KConditionVariable::SignalImpl(KThread* thread) {
|
||||
// Check pre-conditions.
|
||||
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
||||
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
|
||||
|
||||
// Update the tag.
|
||||
VAddr address = thread->GetAddressKey();
|
||||
KProcessAddress address = thread->GetAddressKey();
|
||||
u32 own_tag = thread->GetAddressKeyValue();
|
||||
|
||||
u32 prev_tag{};
|
||||
@@ -204,7 +208,7 @@ void KConditionVariable::SignalImpl(KThread* thread) {
|
||||
// TODO(bunnei): We should call CanAccessAtomic(..) here.
|
||||
can_access = true;
|
||||
if (can_access) [[likely]] {
|
||||
UpdateLockAtomic(system, std::addressof(prev_tag), address, own_tag,
|
||||
UpdateLockAtomic(m_system, std::addressof(prev_tag), address, own_tag,
|
||||
Svc::HandleWaitMask);
|
||||
}
|
||||
}
|
||||
@@ -215,7 +219,7 @@ void KConditionVariable::SignalImpl(KThread* thread) {
|
||||
thread->EndWait(ResultSuccess);
|
||||
} else {
|
||||
// Get the previous owner.
|
||||
KThread* owner_thread = GetCurrentProcess(kernel)
|
||||
KThread* owner_thread = GetCurrentProcess(m_kernel)
|
||||
.GetHandleTable()
|
||||
.GetObjectWithoutPseudoHandle<KThread>(
|
||||
static_cast<Handle>(prev_tag & ~Svc::HandleWaitMask))
|
||||
@@ -240,14 +244,14 @@ void KConditionVariable::Signal(u64 cv_key, s32 count) {
|
||||
// Perform signaling.
|
||||
s32 num_waiters{};
|
||||
{
|
||||
KScopedSchedulerLock sl(kernel);
|
||||
KScopedSchedulerLock sl(m_kernel);
|
||||
|
||||
auto it = thread_tree.nfind_key({cv_key, -1});
|
||||
while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
|
||||
auto it = m_tree.nfind_key({cv_key, -1});
|
||||
while ((it != m_tree.end()) && (count <= 0 || num_waiters < count) &&
|
||||
(it->GetConditionVariableKey() == cv_key)) {
|
||||
KThread* target_thread = std::addressof(*it);
|
||||
|
||||
it = thread_tree.erase(it);
|
||||
it = m_tree.erase(it);
|
||||
target_thread->ClearConditionVariable();
|
||||
|
||||
this->SignalImpl(target_thread);
|
||||
@@ -256,27 +260,27 @@ void KConditionVariable::Signal(u64 cv_key, s32 count) {
|
||||
}
|
||||
|
||||
// If we have no waiters, clear the has waiter flag.
|
||||
if (it == thread_tree.end() || it->GetConditionVariableKey() != cv_key) {
|
||||
if (it == m_tree.end() || it->GetConditionVariableKey() != cv_key) {
|
||||
const u32 has_waiter_flag{};
|
||||
WriteToUser(system, cv_key, std::addressof(has_waiter_flag));
|
||||
WriteToUser(m_kernel, cv_key, std::addressof(has_waiter_flag));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Result KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) {
|
||||
Result KConditionVariable::Wait(KProcessAddress addr, u64 key, u32 value, s64 timeout) {
|
||||
// Prepare to wait.
|
||||
KThread* cur_thread = GetCurrentThreadPointer(kernel);
|
||||
KThread* cur_thread = GetCurrentThreadPointer(m_kernel);
|
||||
KHardwareTimer* timer{};
|
||||
ThreadQueueImplForKConditionVariableWaitConditionVariable wait_queue(
|
||||
kernel, std::addressof(thread_tree));
|
||||
ThreadQueueImplForKConditionVariableWaitConditionVariable wait_queue(m_kernel,
|
||||
std::addressof(m_tree));
|
||||
|
||||
{
|
||||
KScopedSchedulerLockAndSleep slp(kernel, std::addressof(timer), cur_thread, timeout);
|
||||
KScopedSchedulerLockAndSleep slp(m_kernel, std::addressof(timer), cur_thread, timeout);
|
||||
|
||||
// Check that the thread isn't terminating.
|
||||
if (cur_thread->IsTerminationRequested()) {
|
||||
slp.CancelSleep();
|
||||
return ResultTerminationRequested;
|
||||
R_THROW(ResultTerminationRequested);
|
||||
}
|
||||
|
||||
// Update the value and process for the next owner.
|
||||
@@ -302,14 +306,14 @@ Result KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) {
|
||||
// Write to the cv key.
|
||||
{
|
||||
const u32 has_waiter_flag = 1;
|
||||
WriteToUser(system, key, std::addressof(has_waiter_flag));
|
||||
// TODO(bunnei): We should call DataMemoryBarrier(..) here.
|
||||
WriteToUser(m_kernel, key, std::addressof(has_waiter_flag));
|
||||
std::atomic_thread_fence(std::memory_order_seq_cst);
|
||||
}
|
||||
|
||||
// Write the value to userspace.
|
||||
if (!WriteToUser(system, addr, std::addressof(next_value))) {
|
||||
if (!WriteToUser(m_kernel, addr, std::addressof(next_value))) {
|
||||
slp.CancelSleep();
|
||||
return ResultInvalidCurrentMemory;
|
||||
R_THROW(ResultInvalidCurrentMemory);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -317,18 +321,17 @@ Result KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) {
|
||||
R_UNLESS(timeout != 0, ResultTimedOut);
|
||||
|
||||
// Update condition variable tracking.
|
||||
cur_thread->SetConditionVariable(std::addressof(thread_tree), addr, key, value);
|
||||
thread_tree.insert(*cur_thread);
|
||||
cur_thread->SetConditionVariable(std::addressof(m_tree), addr, key, value);
|
||||
m_tree.insert(*cur_thread);
|
||||
|
||||
// Begin waiting.
|
||||
wait_queue.SetHardwareTimer(timer);
|
||||
cur_thread->BeginWait(std::addressof(wait_queue));
|
||||
cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::ConditionVar);
|
||||
cur_thread->SetMutexWaitAddressForDebugging(addr);
|
||||
}
|
||||
|
||||
// Get the wait result.
|
||||
return cur_thread->GetWaitResult();
|
||||
R_RETURN(cur_thread->GetWaitResult());
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -4,10 +4,10 @@
|
||||
#pragma once
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/common_types.h"
|
||||
|
||||
#include "core/hle/kernel/k_scheduler.h"
|
||||
#include "core/hle/kernel/k_thread.h"
|
||||
#include "core/hle/kernel/k_typed_address.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/result.h"
|
||||
|
||||
@@ -21,36 +21,36 @@ class KConditionVariable {
|
||||
public:
|
||||
using ThreadTree = typename KThread::ConditionVariableThreadTreeType;
|
||||
|
||||
explicit KConditionVariable(Core::System& system_);
|
||||
explicit KConditionVariable(Core::System& system);
|
||||
~KConditionVariable();
|
||||
|
||||
// Arbitration
|
||||
[[nodiscard]] Result SignalToAddress(VAddr addr);
|
||||
[[nodiscard]] Result WaitForAddress(Handle handle, VAddr addr, u32 value);
|
||||
Result SignalToAddress(KProcessAddress addr);
|
||||
Result WaitForAddress(Handle handle, KProcessAddress addr, u32 value);
|
||||
|
||||
// Condition variable
|
||||
void Signal(u64 cv_key, s32 count);
|
||||
[[nodiscard]] Result Wait(VAddr addr, u64 key, u32 value, s64 timeout);
|
||||
Result Wait(KProcessAddress addr, u64 key, u32 value, s64 timeout);
|
||||
|
||||
private:
|
||||
void SignalImpl(KThread* thread);
|
||||
|
||||
ThreadTree thread_tree;
|
||||
|
||||
Core::System& system;
|
||||
KernelCore& kernel;
|
||||
private:
|
||||
Core::System& m_system;
|
||||
KernelCore& m_kernel;
|
||||
ThreadTree m_tree{};
|
||||
};
|
||||
|
||||
inline void BeforeUpdatePriority(const KernelCore& kernel, KConditionVariable::ThreadTree* tree,
|
||||
inline void BeforeUpdatePriority(KernelCore& kernel, KConditionVariable::ThreadTree* tree,
|
||||
KThread* thread) {
|
||||
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
||||
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel));
|
||||
|
||||
tree->erase(tree->iterator_to(*thread));
|
||||
}
|
||||
|
||||
inline void AfterUpdatePriority(const KernelCore& kernel, KConditionVariable::ThreadTree* tree,
|
||||
inline void AfterUpdatePriority(KernelCore& kernel, KConditionVariable::ThreadTree* tree,
|
||||
KThread* thread) {
|
||||
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
||||
ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel));
|
||||
|
||||
tree->insert(*thread);
|
||||
}
|
||||
|
||||
@@ -12,9 +12,9 @@ class KDebug final : public KAutoObjectWithSlabHeapAndContainer<KDebug, KAutoObj
|
||||
KERNEL_AUTOOBJECT_TRAITS(KDebug, KAutoObject);
|
||||
|
||||
public:
|
||||
explicit KDebug(KernelCore& kernel_) : KAutoObjectWithSlabHeapAndContainer{kernel_} {}
|
||||
explicit KDebug(KernelCore& kernel) : KAutoObjectWithSlabHeapAndContainer{kernel} {}
|
||||
|
||||
static void PostDestroy([[maybe_unused]] uintptr_t arg) {}
|
||||
static void PostDestroy(uintptr_t arg) {}
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -9,8 +9,8 @@
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
KDeviceAddressSpace::KDeviceAddressSpace(KernelCore& kernel_)
|
||||
: KAutoObjectWithSlabHeapAndContainer(kernel_), m_lock(kernel_), m_is_initialized(false) {}
|
||||
KDeviceAddressSpace::KDeviceAddressSpace(KernelCore& kernel)
|
||||
: KAutoObjectWithSlabHeapAndContainer(kernel), m_lock(kernel), m_is_initialized(false) {}
|
||||
KDeviceAddressSpace::~KDeviceAddressSpace() = default;
|
||||
|
||||
void KDeviceAddressSpace::Initialize() {
|
||||
@@ -54,8 +54,8 @@ Result KDeviceAddressSpace::Detach(Svc::DeviceName device_name) {
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KDeviceAddressSpace::Map(KPageTable* page_table, VAddr process_address, size_t size,
|
||||
u64 device_address, u32 option, bool is_aligned) {
|
||||
Result KDeviceAddressSpace::Map(KPageTable* page_table, KProcessAddress process_address,
|
||||
size_t size, u64 device_address, u32 option, bool is_aligned) {
|
||||
// Check that the address falls within the space.
|
||||
R_UNLESS((m_space_address <= device_address &&
|
||||
device_address + size - 1 <= m_space_address + m_space_size - 1),
|
||||
@@ -113,8 +113,8 @@ Result KDeviceAddressSpace::Map(KPageTable* page_table, VAddr process_address, s
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KDeviceAddressSpace::Unmap(KPageTable* page_table, VAddr process_address, size_t size,
|
||||
u64 device_address) {
|
||||
Result KDeviceAddressSpace::Unmap(KPageTable* page_table, KProcessAddress process_address,
|
||||
size_t size, u64 device_address) {
|
||||
// Check that the address falls within the space.
|
||||
R_UNLESS((m_space_address <= device_address &&
|
||||
device_address + size - 1 <= m_space_address + m_space_size - 1),
|
||||
|
||||
@@ -5,8 +5,8 @@
|
||||
|
||||
#include <string>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/kernel/k_page_table.h"
|
||||
#include "core/hle/kernel/k_typed_address.h"
|
||||
#include "core/hle/kernel/slab_helpers.h"
|
||||
#include "core/hle/result.h"
|
||||
|
||||
@@ -31,23 +31,24 @@ public:
|
||||
Result Attach(Svc::DeviceName device_name);
|
||||
Result Detach(Svc::DeviceName device_name);
|
||||
|
||||
Result MapByForce(KPageTable* page_table, VAddr process_address, size_t size,
|
||||
Result MapByForce(KPageTable* page_table, KProcessAddress process_address, size_t size,
|
||||
u64 device_address, u32 option) {
|
||||
R_RETURN(this->Map(page_table, process_address, size, device_address, option, false));
|
||||
}
|
||||
|
||||
Result MapAligned(KPageTable* page_table, VAddr process_address, size_t size,
|
||||
Result MapAligned(KPageTable* page_table, KProcessAddress process_address, size_t size,
|
||||
u64 device_address, u32 option) {
|
||||
R_RETURN(this->Map(page_table, process_address, size, device_address, option, true));
|
||||
}
|
||||
|
||||
Result Unmap(KPageTable* page_table, VAddr process_address, size_t size, u64 device_address);
|
||||
Result Unmap(KPageTable* page_table, KProcessAddress process_address, size_t size,
|
||||
u64 device_address);
|
||||
|
||||
static void Initialize();
|
||||
|
||||
private:
|
||||
Result Map(KPageTable* page_table, VAddr process_address, size_t size, u64 device_address,
|
||||
u32 option, bool is_aligned);
|
||||
Result Map(KPageTable* page_table, KProcessAddress process_address, size_t size,
|
||||
u64 device_address, u32 option, bool is_aligned);
|
||||
|
||||
private:
|
||||
KLightLock m_lock;
|
||||
|
||||
@@ -6,9 +6,9 @@
|
||||
#include <vector>
|
||||
|
||||
#include "common/alignment.h"
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/kernel/k_page_bitmap.h"
|
||||
#include "core/hle/kernel/k_spin_lock.h"
|
||||
#include "core/hle/kernel/k_typed_address.h"
|
||||
#include "core/hle/kernel/memory_types.h"
|
||||
#include "core/hle/kernel/svc_results.h"
|
||||
|
||||
@@ -26,23 +26,23 @@ public:
|
||||
KDynamicPageManager() = default;
|
||||
|
||||
template <typename T>
|
||||
T* GetPointer(VAddr addr) {
|
||||
T* GetPointer(KVirtualAddress addr) {
|
||||
return reinterpret_cast<T*>(m_backing_memory.data() + (addr - m_address));
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
const T* GetPointer(VAddr addr) const {
|
||||
const T* GetPointer(KVirtualAddress addr) const {
|
||||
return reinterpret_cast<T*>(m_backing_memory.data() + (addr - m_address));
|
||||
}
|
||||
|
||||
Result Initialize(VAddr memory, size_t size, size_t align) {
|
||||
Result Initialize(KVirtualAddress memory, size_t size, size_t align) {
|
||||
// We need to have positive size.
|
||||
R_UNLESS(size > 0, ResultOutOfMemory);
|
||||
m_backing_memory.resize(size);
|
||||
|
||||
// Set addresses.
|
||||
m_address = memory;
|
||||
m_aligned_address = Common::AlignDown(memory, align);
|
||||
m_aligned_address = Common::AlignDown(GetInteger(memory), align);
|
||||
|
||||
// Calculate extents.
|
||||
const size_t managed_size = m_address + size - m_aligned_address;
|
||||
@@ -79,7 +79,7 @@ public:
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
VAddr GetAddress() const {
|
||||
KVirtualAddress GetAddress() const {
|
||||
return m_address;
|
||||
}
|
||||
size_t GetSize() const {
|
||||
@@ -145,7 +145,8 @@ public:
|
||||
KScopedSpinLock lk(m_lock);
|
||||
|
||||
// Set the bit for the free page.
|
||||
size_t offset = (reinterpret_cast<uintptr_t>(pb) - m_aligned_address) / sizeof(PageBuffer);
|
||||
size_t offset =
|
||||
(reinterpret_cast<uint64_t>(pb) - GetInteger(m_aligned_address)) / sizeof(PageBuffer);
|
||||
m_page_bitmap.SetBit(offset);
|
||||
|
||||
// Decrement our used count.
|
||||
@@ -158,8 +159,8 @@ private:
|
||||
size_t m_used{};
|
||||
size_t m_peak{};
|
||||
size_t m_count{};
|
||||
VAddr m_address{};
|
||||
VAddr m_aligned_address{};
|
||||
KVirtualAddress m_address{};
|
||||
KVirtualAddress m_aligned_address{};
|
||||
size_t m_size{};
|
||||
|
||||
// TODO(bunnei): Back by host memory until we emulate kernel virtual address space.
|
||||
|
||||
@@ -19,7 +19,7 @@ class KDynamicSlabHeap : protected impl::KSlabHeapImpl {
|
||||
public:
|
||||
constexpr KDynamicSlabHeap() = default;
|
||||
|
||||
constexpr VAddr GetAddress() const {
|
||||
constexpr KVirtualAddress GetAddress() const {
|
||||
return m_address;
|
||||
}
|
||||
constexpr size_t GetSize() const {
|
||||
@@ -35,7 +35,7 @@ public:
|
||||
return m_count.load();
|
||||
}
|
||||
|
||||
constexpr bool IsInRange(VAddr addr) const {
|
||||
constexpr bool IsInRange(KVirtualAddress addr) const {
|
||||
return this->GetAddress() <= addr && addr <= this->GetAddress() + this->GetSize() - 1;
|
||||
}
|
||||
|
||||
@@ -115,7 +115,7 @@ private:
|
||||
std::atomic<size_t> m_used{};
|
||||
std::atomic<size_t> m_peak{};
|
||||
std::atomic<size_t> m_count{};
|
||||
VAddr m_address{};
|
||||
KVirtualAddress m_address{};
|
||||
size_t m_size{};
|
||||
};
|
||||
|
||||
|
||||
@@ -7,8 +7,8 @@
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
KEvent::KEvent(KernelCore& kernel_)
|
||||
: KAutoObjectWithSlabHeapAndContainer{kernel_}, m_readable_event{kernel_} {}
|
||||
KEvent::KEvent(KernelCore& kernel)
|
||||
: KAutoObjectWithSlabHeapAndContainer{kernel}, m_readable_event{kernel} {}
|
||||
|
||||
KEvent::~KEvent() = default;
|
||||
|
||||
@@ -36,7 +36,7 @@ void KEvent::Finalize() {
|
||||
}
|
||||
|
||||
Result KEvent::Signal() {
|
||||
KScopedSchedulerLock sl{kernel};
|
||||
KScopedSchedulerLock sl{m_kernel};
|
||||
|
||||
R_SUCCEED_IF(m_readable_event_destroyed);
|
||||
|
||||
@@ -44,7 +44,7 @@ Result KEvent::Signal() {
|
||||
}
|
||||
|
||||
Result KEvent::Clear() {
|
||||
KScopedSchedulerLock sl{kernel};
|
||||
KScopedSchedulerLock sl{m_kernel};
|
||||
|
||||
R_SUCCEED_IF(m_readable_event_destroyed);
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ class KEvent final : public KAutoObjectWithSlabHeapAndContainer<KEvent, KAutoObj
|
||||
KERNEL_AUTOOBJECT_TRAITS(KEvent, KAutoObject);
|
||||
|
||||
public:
|
||||
explicit KEvent(KernelCore& kernel_);
|
||||
explicit KEvent(KernelCore& kernel);
|
||||
~KEvent() override;
|
||||
|
||||
void Initialize(KProcess* owner);
|
||||
|
||||
@@ -13,9 +13,9 @@ namespace {
|
||||
|
||||
class ThreadQueueImplForKLightConditionVariable final : public KThreadQueue {
|
||||
public:
|
||||
ThreadQueueImplForKLightConditionVariable(KernelCore& kernel_, KThread::WaiterList* wl,
|
||||
ThreadQueueImplForKLightConditionVariable(KernelCore& kernel, KThread::WaiterList* wl,
|
||||
bool term)
|
||||
: KThreadQueue(kernel_), m_wait_list(wl), m_allow_terminating_thread(term) {}
|
||||
: KThreadQueue(kernel), m_wait_list(wl), m_allow_terminating_thread(term) {}
|
||||
|
||||
void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override {
|
||||
// Only process waits if we're allowed to.
|
||||
@@ -39,15 +39,15 @@ private:
|
||||
|
||||
void KLightConditionVariable::Wait(KLightLock* lock, s64 timeout, bool allow_terminating_thread) {
|
||||
// Create thread queue.
|
||||
KThread* owner = GetCurrentThreadPointer(kernel);
|
||||
KThread* owner = GetCurrentThreadPointer(m_kernel);
|
||||
KHardwareTimer* timer{};
|
||||
|
||||
ThreadQueueImplForKLightConditionVariable wait_queue(kernel, std::addressof(wait_list),
|
||||
ThreadQueueImplForKLightConditionVariable wait_queue(m_kernel, std::addressof(m_wait_list),
|
||||
allow_terminating_thread);
|
||||
|
||||
// Sleep the thread.
|
||||
{
|
||||
KScopedSchedulerLockAndSleep lk(kernel, std::addressof(timer), owner, timeout);
|
||||
KScopedSchedulerLockAndSleep lk(m_kernel, std::addressof(timer), owner, timeout);
|
||||
|
||||
if (!allow_terminating_thread && owner->IsTerminationRequested()) {
|
||||
lk.CancelSleep();
|
||||
@@ -57,7 +57,7 @@ void KLightConditionVariable::Wait(KLightLock* lock, s64 timeout, bool allow_ter
|
||||
lock->Unlock();
|
||||
|
||||
// Add the thread to the queue.
|
||||
wait_list.push_back(*owner);
|
||||
m_wait_list.push_back(*owner);
|
||||
|
||||
// Begin waiting.
|
||||
wait_queue.SetHardwareTimer(timer);
|
||||
@@ -69,10 +69,10 @@ void KLightConditionVariable::Wait(KLightLock* lock, s64 timeout, bool allow_ter
|
||||
}
|
||||
|
||||
void KLightConditionVariable::Broadcast() {
|
||||
KScopedSchedulerLock lk(kernel);
|
||||
KScopedSchedulerLock lk(m_kernel);
|
||||
|
||||
// Signal all threads.
|
||||
for (auto it = wait_list.begin(); it != wait_list.end(); it = wait_list.erase(it)) {
|
||||
for (auto it = m_wait_list.begin(); it != m_wait_list.end(); it = m_wait_list.erase(it)) {
|
||||
it->EndWait(ResultSuccess);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -13,13 +13,13 @@ class KLightLock;
|
||||
|
||||
class KLightConditionVariable {
|
||||
public:
|
||||
explicit KLightConditionVariable(KernelCore& kernel_) : kernel{kernel_} {}
|
||||
explicit KLightConditionVariable(KernelCore& kernel) : m_kernel{kernel} {}
|
||||
|
||||
void Wait(KLightLock* lock, s64 timeout = -1, bool allow_terminating_thread = true);
|
||||
void Broadcast();
|
||||
|
||||
private:
|
||||
KernelCore& kernel;
|
||||
KThread::WaiterList wait_list{};
|
||||
KernelCore& m_kernel;
|
||||
KThread::WaiterList m_wait_list{};
|
||||
};
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -13,7 +13,7 @@ namespace {
|
||||
|
||||
class ThreadQueueImplForKLightLock final : public KThreadQueue {
|
||||
public:
|
||||
explicit ThreadQueueImplForKLightLock(KernelCore& kernel_) : KThreadQueue(kernel_) {}
|
||||
explicit ThreadQueueImplForKLightLock(KernelCore& kernel) : KThreadQueue(kernel) {}
|
||||
|
||||
void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override {
|
||||
// Remove the thread as a waiter from its owner.
|
||||
@@ -29,13 +29,13 @@ public:
|
||||
} // namespace
|
||||
|
||||
void KLightLock::Lock() {
|
||||
const uintptr_t cur_thread = reinterpret_cast<uintptr_t>(GetCurrentThreadPointer(kernel));
|
||||
const uintptr_t cur_thread = reinterpret_cast<uintptr_t>(GetCurrentThreadPointer(m_kernel));
|
||||
|
||||
while (true) {
|
||||
uintptr_t old_tag = tag.load(std::memory_order_relaxed);
|
||||
uintptr_t old_tag = m_tag.load(std::memory_order_relaxed);
|
||||
|
||||
while (!tag.compare_exchange_weak(old_tag, (old_tag == 0) ? cur_thread : (old_tag | 1),
|
||||
std::memory_order_acquire)) {
|
||||
while (!m_tag.compare_exchange_weak(old_tag, (old_tag == 0) ? cur_thread : (old_tag | 1),
|
||||
std::memory_order_acquire)) {
|
||||
}
|
||||
|
||||
if (old_tag == 0 || this->LockSlowPath(old_tag | 1, cur_thread)) {
|
||||
@@ -45,30 +45,30 @@ void KLightLock::Lock() {
|
||||
}
|
||||
|
||||
void KLightLock::Unlock() {
|
||||
const uintptr_t cur_thread = reinterpret_cast<uintptr_t>(GetCurrentThreadPointer(kernel));
|
||||
const uintptr_t cur_thread = reinterpret_cast<uintptr_t>(GetCurrentThreadPointer(m_kernel));
|
||||
|
||||
uintptr_t expected = cur_thread;
|
||||
if (!tag.compare_exchange_strong(expected, 0, std::memory_order_release)) {
|
||||
if (!m_tag.compare_exchange_strong(expected, 0, std::memory_order_release)) {
|
||||
this->UnlockSlowPath(cur_thread);
|
||||
}
|
||||
}
|
||||
|
||||
bool KLightLock::LockSlowPath(uintptr_t _owner, uintptr_t _cur_thread) {
|
||||
KThread* cur_thread = reinterpret_cast<KThread*>(_cur_thread);
|
||||
ThreadQueueImplForKLightLock wait_queue(kernel);
|
||||
ThreadQueueImplForKLightLock wait_queue(m_kernel);
|
||||
|
||||
// Pend the current thread waiting on the owner thread.
|
||||
{
|
||||
KScopedSchedulerLock sl{kernel};
|
||||
KScopedSchedulerLock sl{m_kernel};
|
||||
|
||||
// Ensure we actually have locking to do.
|
||||
if (tag.load(std::memory_order_relaxed) != _owner) {
|
||||
if (m_tag.load(std::memory_order_relaxed) != _owner) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Add the current thread as a waiter on the owner.
|
||||
KThread* owner_thread = reinterpret_cast<KThread*>(_owner & ~1ULL);
|
||||
cur_thread->SetKernelAddressKey(reinterpret_cast<uintptr_t>(std::addressof(tag)));
|
||||
cur_thread->SetKernelAddressKey(reinterpret_cast<uintptr_t>(std::addressof(m_tag)));
|
||||
owner_thread->AddWaiter(cur_thread);
|
||||
|
||||
// Begin waiting to hold the lock.
|
||||
@@ -87,12 +87,12 @@ void KLightLock::UnlockSlowPath(uintptr_t _cur_thread) {
|
||||
|
||||
// Unlock.
|
||||
{
|
||||
KScopedSchedulerLock sl(kernel);
|
||||
KScopedSchedulerLock sl(m_kernel);
|
||||
|
||||
// Get the next owner.
|
||||
bool has_waiters;
|
||||
KThread* next_owner = owner_thread->RemoveKernelWaiterByKey(
|
||||
std::addressof(has_waiters), reinterpret_cast<uintptr_t>(std::addressof(tag)));
|
||||
std::addressof(has_waiters), reinterpret_cast<uintptr_t>(std::addressof(m_tag)));
|
||||
|
||||
// Pass the lock to the next owner.
|
||||
uintptr_t next_tag = 0;
|
||||
@@ -114,12 +114,13 @@ void KLightLock::UnlockSlowPath(uintptr_t _cur_thread) {
|
||||
}
|
||||
|
||||
// Write the new tag value.
|
||||
tag.store(next_tag, std::memory_order_release);
|
||||
m_tag.store(next_tag, std::memory_order_release);
|
||||
}
|
||||
}
|
||||
|
||||
bool KLightLock::IsLockedByCurrentThread() const {
|
||||
return (tag | 1ULL) == (reinterpret_cast<uintptr_t>(GetCurrentThreadPointer(kernel)) | 1ULL);
|
||||
return (m_tag.load() | 1ULL) ==
|
||||
(reinterpret_cast<uintptr_t>(GetCurrentThreadPointer(m_kernel)) | 1ULL);
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
|
||||
@@ -13,7 +13,7 @@ class KernelCore;
|
||||
|
||||
class KLightLock {
|
||||
public:
|
||||
explicit KLightLock(KernelCore& kernel_) : kernel{kernel_} {}
|
||||
explicit KLightLock(KernelCore& kernel) : m_kernel{kernel} {}
|
||||
|
||||
void Lock();
|
||||
|
||||
@@ -24,14 +24,14 @@ public:
|
||||
void UnlockSlowPath(uintptr_t cur_thread);
|
||||
|
||||
bool IsLocked() const {
|
||||
return tag != 0;
|
||||
return m_tag.load() != 0;
|
||||
}
|
||||
|
||||
bool IsLockedByCurrentThread() const;
|
||||
|
||||
private:
|
||||
std::atomic<uintptr_t> tag{};
|
||||
KernelCore& kernel;
|
||||
std::atomic<uintptr_t> m_tag{};
|
||||
KernelCore& m_kernel;
|
||||
};
|
||||
|
||||
using KScopedLightLock = KScopedLock<KLightLock>;
|
||||
|
||||
@@ -1,238 +0,0 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <boost/intrusive/list.hpp>
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "core/hle/kernel/slab_helpers.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class KernelCore;
|
||||
|
||||
class KLinkedListNode : public boost::intrusive::list_base_hook<>,
|
||||
public KSlabAllocated<KLinkedListNode> {
|
||||
|
||||
public:
|
||||
explicit KLinkedListNode(KernelCore&) {}
|
||||
KLinkedListNode() = default;
|
||||
|
||||
void Initialize(void* it) {
|
||||
m_item = it;
|
||||
}
|
||||
|
||||
void* GetItem() const {
|
||||
return m_item;
|
||||
}
|
||||
|
||||
private:
|
||||
void* m_item = nullptr;
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
class KLinkedList : private boost::intrusive::list<KLinkedListNode> {
|
||||
private:
|
||||
using BaseList = boost::intrusive::list<KLinkedListNode>;
|
||||
|
||||
public:
|
||||
template <bool Const>
|
||||
class Iterator;
|
||||
|
||||
using value_type = T;
|
||||
using size_type = size_t;
|
||||
using difference_type = ptrdiff_t;
|
||||
using pointer = value_type*;
|
||||
using const_pointer = const value_type*;
|
||||
using reference = value_type&;
|
||||
using const_reference = const value_type&;
|
||||
using iterator = Iterator<false>;
|
||||
using const_iterator = Iterator<true>;
|
||||
using reverse_iterator = std::reverse_iterator<iterator>;
|
||||
using const_reverse_iterator = std::reverse_iterator<const_iterator>;
|
||||
|
||||
template <bool Const>
|
||||
class Iterator {
|
||||
private:
|
||||
using BaseIterator = BaseList::iterator;
|
||||
friend class KLinkedList;
|
||||
|
||||
public:
|
||||
using iterator_category = std::bidirectional_iterator_tag;
|
||||
using value_type = typename KLinkedList::value_type;
|
||||
using difference_type = typename KLinkedList::difference_type;
|
||||
using pointer = std::conditional_t<Const, KLinkedList::const_pointer, KLinkedList::pointer>;
|
||||
using reference =
|
||||
std::conditional_t<Const, KLinkedList::const_reference, KLinkedList::reference>;
|
||||
|
||||
public:
|
||||
explicit Iterator(BaseIterator it) : m_base_it(it) {}
|
||||
|
||||
pointer GetItem() const {
|
||||
return static_cast<pointer>(m_base_it->GetItem());
|
||||
}
|
||||
|
||||
bool operator==(const Iterator& rhs) const {
|
||||
return m_base_it == rhs.m_base_it;
|
||||
}
|
||||
|
||||
bool operator!=(const Iterator& rhs) const {
|
||||
return !(*this == rhs);
|
||||
}
|
||||
|
||||
pointer operator->() const {
|
||||
return this->GetItem();
|
||||
}
|
||||
|
||||
reference operator*() const {
|
||||
return *this->GetItem();
|
||||
}
|
||||
|
||||
Iterator& operator++() {
|
||||
++m_base_it;
|
||||
return *this;
|
||||
}
|
||||
|
||||
Iterator& operator--() {
|
||||
--m_base_it;
|
||||
return *this;
|
||||
}
|
||||
|
||||
Iterator operator++(int) {
|
||||
const Iterator it{*this};
|
||||
++(*this);
|
||||
return it;
|
||||
}
|
||||
|
||||
Iterator operator--(int) {
|
||||
const Iterator it{*this};
|
||||
--(*this);
|
||||
return it;
|
||||
}
|
||||
|
||||
operator Iterator<true>() const {
|
||||
return Iterator<true>(m_base_it);
|
||||
}
|
||||
|
||||
private:
|
||||
BaseIterator m_base_it;
|
||||
};
|
||||
|
||||
public:
|
||||
constexpr KLinkedList(KernelCore& kernel_) : BaseList(), kernel{kernel_} {}
|
||||
|
||||
~KLinkedList() {
|
||||
// Erase all elements.
|
||||
for (auto it = begin(); it != end(); it = erase(it)) {
|
||||
}
|
||||
|
||||
// Ensure we succeeded.
|
||||
ASSERT(this->empty());
|
||||
}
|
||||
|
||||
// Iterator accessors.
|
||||
iterator begin() {
|
||||
return iterator(BaseList::begin());
|
||||
}
|
||||
|
||||
const_iterator begin() const {
|
||||
return const_iterator(BaseList::begin());
|
||||
}
|
||||
|
||||
iterator end() {
|
||||
return iterator(BaseList::end());
|
||||
}
|
||||
|
||||
const_iterator end() const {
|
||||
return const_iterator(BaseList::end());
|
||||
}
|
||||
|
||||
const_iterator cbegin() const {
|
||||
return this->begin();
|
||||
}
|
||||
|
||||
const_iterator cend() const {
|
||||
return this->end();
|
||||
}
|
||||
|
||||
reverse_iterator rbegin() {
|
||||
return reverse_iterator(this->end());
|
||||
}
|
||||
|
||||
const_reverse_iterator rbegin() const {
|
||||
return const_reverse_iterator(this->end());
|
||||
}
|
||||
|
||||
reverse_iterator rend() {
|
||||
return reverse_iterator(this->begin());
|
||||
}
|
||||
|
||||
const_reverse_iterator rend() const {
|
||||
return const_reverse_iterator(this->begin());
|
||||
}
|
||||
|
||||
const_reverse_iterator crbegin() const {
|
||||
return this->rbegin();
|
||||
}
|
||||
|
||||
const_reverse_iterator crend() const {
|
||||
return this->rend();
|
||||
}
|
||||
|
||||
// Content management.
|
||||
using BaseList::empty;
|
||||
using BaseList::size;
|
||||
|
||||
reference back() {
|
||||
return *(--this->end());
|
||||
}
|
||||
|
||||
const_reference back() const {
|
||||
return *(--this->end());
|
||||
}
|
||||
|
||||
reference front() {
|
||||
return *this->begin();
|
||||
}
|
||||
|
||||
const_reference front() const {
|
||||
return *this->begin();
|
||||
}
|
||||
|
||||
iterator insert(const_iterator pos, reference ref) {
|
||||
KLinkedListNode* new_node = KLinkedListNode::Allocate(kernel);
|
||||
ASSERT(new_node != nullptr);
|
||||
new_node->Initialize(std::addressof(ref));
|
||||
return iterator(BaseList::insert(pos.m_base_it, *new_node));
|
||||
}
|
||||
|
||||
void push_back(reference ref) {
|
||||
this->insert(this->end(), ref);
|
||||
}
|
||||
|
||||
void push_front(reference ref) {
|
||||
this->insert(this->begin(), ref);
|
||||
}
|
||||
|
||||
void pop_back() {
|
||||
this->erase(--this->end());
|
||||
}
|
||||
|
||||
void pop_front() {
|
||||
this->erase(this->begin());
|
||||
}
|
||||
|
||||
iterator erase(const iterator pos) {
|
||||
KLinkedListNode* freed_node = std::addressof(*pos.m_base_it);
|
||||
iterator ret = iterator(BaseList::erase(pos.m_base_it));
|
||||
KLinkedListNode::Free(kernel, freed_node);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
private:
|
||||
KernelCore& kernel;
|
||||
};
|
||||
|
||||
} // namespace Kernel
|
||||
@@ -5,8 +5,8 @@
|
||||
|
||||
#include "common/alignment.h"
|
||||
#include "common/assert.h"
|
||||
#include "common/common_types.h"
|
||||
#include "common/intrusive_red_black_tree.h"
|
||||
#include "core/hle/kernel/k_typed_address.h"
|
||||
#include "core/hle/kernel/memory_types.h"
|
||||
#include "core/hle/kernel/svc_types.h"
|
||||
|
||||
@@ -282,7 +282,7 @@ class KMemoryBlock : public Common::IntrusiveRedBlackTreeBaseNode<KMemoryBlock>
|
||||
private:
|
||||
u16 m_device_disable_merge_left_count{};
|
||||
u16 m_device_disable_merge_right_count{};
|
||||
VAddr m_address{};
|
||||
KProcessAddress m_address{};
|
||||
size_t m_num_pages{};
|
||||
KMemoryState m_memory_state{KMemoryState::None};
|
||||
u16 m_ipc_lock_count{};
|
||||
@@ -306,7 +306,7 @@ public:
|
||||
}
|
||||
|
||||
public:
|
||||
constexpr VAddr GetAddress() const {
|
||||
constexpr KProcessAddress GetAddress() const {
|
||||
return m_address;
|
||||
}
|
||||
|
||||
@@ -318,11 +318,11 @@ public:
|
||||
return this->GetNumPages() * PageSize;
|
||||
}
|
||||
|
||||
constexpr VAddr GetEndAddress() const {
|
||||
constexpr KProcessAddress GetEndAddress() const {
|
||||
return this->GetAddress() + this->GetSize();
|
||||
}
|
||||
|
||||
constexpr VAddr GetLastAddress() const {
|
||||
constexpr KProcessAddress GetLastAddress() const {
|
||||
return this->GetEndAddress() - 1;
|
||||
}
|
||||
|
||||
@@ -348,7 +348,7 @@ public:
|
||||
|
||||
constexpr KMemoryInfo GetMemoryInfo() const {
|
||||
return {
|
||||
.m_address = this->GetAddress(),
|
||||
.m_address = GetInteger(this->GetAddress()),
|
||||
.m_size = this->GetSize(),
|
||||
.m_state = m_memory_state,
|
||||
.m_device_disable_merge_left_count = m_device_disable_merge_left_count,
|
||||
@@ -366,12 +366,12 @@ public:
|
||||
public:
|
||||
explicit KMemoryBlock() = default;
|
||||
|
||||
constexpr KMemoryBlock(VAddr addr, size_t np, KMemoryState ms, KMemoryPermission p,
|
||||
constexpr KMemoryBlock(KProcessAddress addr, size_t np, KMemoryState ms, KMemoryPermission p,
|
||||
KMemoryAttribute attr)
|
||||
: Common::IntrusiveRedBlackTreeBaseNode<KMemoryBlock>(), m_address(addr), m_num_pages(np),
|
||||
m_memory_state(ms), m_permission(p), m_attribute(attr) {}
|
||||
|
||||
constexpr void Initialize(VAddr addr, size_t np, KMemoryState ms, KMemoryPermission p,
|
||||
constexpr void Initialize(KProcessAddress addr, size_t np, KMemoryState ms, KMemoryPermission p,
|
||||
KMemoryAttribute attr) {
|
||||
m_device_disable_merge_left_count = 0;
|
||||
m_device_disable_merge_right_count = 0;
|
||||
@@ -408,7 +408,7 @@ public:
|
||||
KMemoryBlockDisableMergeAttribute::None;
|
||||
}
|
||||
|
||||
constexpr bool Contains(VAddr addr) const {
|
||||
constexpr bool Contains(KProcessAddress addr) const {
|
||||
return this->GetAddress() <= addr && addr <= this->GetEndAddress();
|
||||
}
|
||||
|
||||
@@ -443,10 +443,10 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
constexpr void Split(KMemoryBlock* block, VAddr addr) {
|
||||
constexpr void Split(KMemoryBlock* block, KProcessAddress addr) {
|
||||
ASSERT(this->GetAddress() < addr);
|
||||
ASSERT(this->Contains(addr));
|
||||
ASSERT(Common::IsAligned(addr, PageSize));
|
||||
ASSERT(Common::IsAligned(GetInteger(addr), PageSize));
|
||||
|
||||
block->m_address = m_address;
|
||||
block->m_num_pages = (addr - this->GetAddress()) / PageSize;
|
||||
@@ -471,8 +471,8 @@ public:
|
||||
m_disable_merge_attribute & KMemoryBlockDisableMergeAttribute::AllRight);
|
||||
}
|
||||
|
||||
constexpr void UpdateDeviceDisableMergeStateForShareLeft(
|
||||
[[maybe_unused]] KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) {
|
||||
constexpr void UpdateDeviceDisableMergeStateForShareLeft(KMemoryPermission new_perm, bool left,
|
||||
bool right) {
|
||||
// New permission/right aren't used.
|
||||
if (left) {
|
||||
m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
|
||||
@@ -482,8 +482,8 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
constexpr void UpdateDeviceDisableMergeStateForShareRight(
|
||||
[[maybe_unused]] KMemoryPermission new_perm, [[maybe_unused]] bool left, bool right) {
|
||||
constexpr void UpdateDeviceDisableMergeStateForShareRight(KMemoryPermission new_perm, bool left,
|
||||
bool right) {
|
||||
// New permission/left aren't used.
|
||||
if (right) {
|
||||
m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
|
||||
@@ -499,8 +499,7 @@ public:
|
||||
this->UpdateDeviceDisableMergeStateForShareRight(new_perm, left, right);
|
||||
}
|
||||
|
||||
constexpr void ShareToDevice([[maybe_unused]] KMemoryPermission new_perm, bool left,
|
||||
bool right) {
|
||||
constexpr void ShareToDevice(KMemoryPermission new_perm, bool left, bool right) {
|
||||
// New permission isn't used.
|
||||
|
||||
// We must either be shared or have a zero lock count.
|
||||
@@ -516,8 +515,8 @@ public:
|
||||
this->UpdateDeviceDisableMergeStateForShare(new_perm, left, right);
|
||||
}
|
||||
|
||||
constexpr void UpdateDeviceDisableMergeStateForUnshareLeft(
|
||||
[[maybe_unused]] KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) {
|
||||
constexpr void UpdateDeviceDisableMergeStateForUnshareLeft(KMemoryPermission new_perm,
|
||||
bool left, bool right) {
|
||||
// New permission/right aren't used.
|
||||
|
||||
if (left) {
|
||||
@@ -536,8 +535,8 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
constexpr void UpdateDeviceDisableMergeStateForUnshareRight(
|
||||
[[maybe_unused]] KMemoryPermission new_perm, [[maybe_unused]] bool left, bool right) {
|
||||
constexpr void UpdateDeviceDisableMergeStateForUnshareRight(KMemoryPermission new_perm,
|
||||
bool left, bool right) {
|
||||
// New permission/left aren't used.
|
||||
|
||||
if (right) {
|
||||
@@ -556,8 +555,7 @@ public:
|
||||
this->UpdateDeviceDisableMergeStateForUnshareRight(new_perm, left, right);
|
||||
}
|
||||
|
||||
constexpr void UnshareToDevice([[maybe_unused]] KMemoryPermission new_perm, bool left,
|
||||
bool right) {
|
||||
constexpr void UnshareToDevice(KMemoryPermission new_perm, bool left, bool right) {
|
||||
// New permission isn't used.
|
||||
|
||||
// We must be shared.
|
||||
@@ -575,8 +573,7 @@ public:
|
||||
this->UpdateDeviceDisableMergeStateForUnshare(new_perm, left, right);
|
||||
}
|
||||
|
||||
constexpr void UnshareToDeviceRight([[maybe_unused]] KMemoryPermission new_perm, bool left,
|
||||
bool right) {
|
||||
constexpr void UnshareToDeviceRight(KMemoryPermission new_perm, bool left, bool right) {
|
||||
// New permission isn't used.
|
||||
|
||||
// We must be shared.
|
||||
@@ -594,7 +591,7 @@ public:
|
||||
this->UpdateDeviceDisableMergeStateForUnshareRight(new_perm, left, right);
|
||||
}
|
||||
|
||||
constexpr void LockForIpc(KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) {
|
||||
constexpr void LockForIpc(KMemoryPermission new_perm, bool left, bool right) {
|
||||
// We must either be locked or have a zero lock count.
|
||||
ASSERT((m_attribute & KMemoryAttribute::IpcLocked) == KMemoryAttribute::IpcLocked ||
|
||||
m_ipc_lock_count == 0);
|
||||
@@ -626,8 +623,7 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
constexpr void UnlockForIpc([[maybe_unused]] KMemoryPermission new_perm, bool left,
|
||||
[[maybe_unused]] bool right) {
|
||||
constexpr void UnlockForIpc(KMemoryPermission new_perm, bool left, bool right) {
|
||||
// New permission isn't used.
|
||||
|
||||
// We must be locked.
|
||||
|
||||
@@ -7,7 +7,8 @@ namespace Kernel {
|
||||
|
||||
KMemoryBlockManager::KMemoryBlockManager() = default;
|
||||
|
||||
Result KMemoryBlockManager::Initialize(VAddr st, VAddr nd, KMemoryBlockSlabManager* slab_manager) {
|
||||
Result KMemoryBlockManager::Initialize(KProcessAddress st, KProcessAddress nd,
|
||||
KMemoryBlockSlabManager* slab_manager) {
|
||||
// Allocate a block to encapsulate the address space, insert it into the tree.
|
||||
KMemoryBlock* start_block = slab_manager->Allocate();
|
||||
R_UNLESS(start_block != nullptr, ResultOutOfResource);
|
||||
@@ -15,8 +16,8 @@ Result KMemoryBlockManager::Initialize(VAddr st, VAddr nd, KMemoryBlockSlabManag
|
||||
// Set our start and end.
|
||||
m_start_address = st;
|
||||
m_end_address = nd;
|
||||
ASSERT(Common::IsAligned(m_start_address, PageSize));
|
||||
ASSERT(Common::IsAligned(m_end_address, PageSize));
|
||||
ASSERT(Common::IsAligned(GetInteger(m_start_address), PageSize));
|
||||
ASSERT(Common::IsAligned(GetInteger(m_end_address), PageSize));
|
||||
|
||||
// Initialize and insert the block.
|
||||
start_block->Initialize(m_start_address, (m_end_address - m_start_address) / PageSize,
|
||||
@@ -40,12 +41,13 @@ void KMemoryBlockManager::Finalize(KMemoryBlockSlabManager* slab_manager,
|
||||
ASSERT(m_memory_block_tree.empty());
|
||||
}
|
||||
|
||||
VAddr KMemoryBlockManager::FindFreeArea(VAddr region_start, size_t region_num_pages,
|
||||
size_t num_pages, size_t alignment, size_t offset,
|
||||
size_t guard_pages) const {
|
||||
KProcessAddress KMemoryBlockManager::FindFreeArea(KProcessAddress region_start,
|
||||
size_t region_num_pages, size_t num_pages,
|
||||
size_t alignment, size_t offset,
|
||||
size_t guard_pages) const {
|
||||
if (num_pages > 0) {
|
||||
const VAddr region_end = region_start + region_num_pages * PageSize;
|
||||
const VAddr region_last = region_end - 1;
|
||||
const KProcessAddress region_end = region_start + region_num_pages * PageSize;
|
||||
const KProcessAddress region_last = region_end - 1;
|
||||
for (const_iterator it = this->FindIterator(region_start); it != m_memory_block_tree.cend();
|
||||
it++) {
|
||||
const KMemoryInfo info = it->GetMemoryInfo();
|
||||
@@ -56,17 +58,19 @@ VAddr KMemoryBlockManager::FindFreeArea(VAddr region_start, size_t region_num_pa
|
||||
continue;
|
||||
}
|
||||
|
||||
VAddr area = (info.GetAddress() <= region_start) ? region_start : info.GetAddress();
|
||||
KProcessAddress area =
|
||||
(info.GetAddress() <= GetInteger(region_start)) ? region_start : info.GetAddress();
|
||||
area += guard_pages * PageSize;
|
||||
|
||||
const VAddr offset_area = Common::AlignDown(area, alignment) + offset;
|
||||
const KProcessAddress offset_area =
|
||||
Common::AlignDown(GetInteger(area), alignment) + offset;
|
||||
area = (area <= offset_area) ? offset_area : offset_area + alignment;
|
||||
|
||||
const VAddr area_end = area + num_pages * PageSize + guard_pages * PageSize;
|
||||
const VAddr area_last = area_end - 1;
|
||||
const KProcessAddress area_end = area + num_pages * PageSize + guard_pages * PageSize;
|
||||
const KProcessAddress area_last = area_end - 1;
|
||||
|
||||
if (info.GetAddress() <= area && area < area_last && area_last <= region_last &&
|
||||
area_last <= info.GetLastAddress()) {
|
||||
if (info.GetAddress() <= GetInteger(area) && area < area_last &&
|
||||
area_last <= region_last && area_last <= info.GetLastAddress()) {
|
||||
return area;
|
||||
}
|
||||
}
|
||||
@@ -76,7 +80,7 @@ VAddr KMemoryBlockManager::FindFreeArea(VAddr region_start, size_t region_num_pa
|
||||
}
|
||||
|
||||
void KMemoryBlockManager::CoalesceForUpdate(KMemoryBlockManagerUpdateAllocator* allocator,
|
||||
VAddr address, size_t num_pages) {
|
||||
KProcessAddress address, size_t num_pages) {
|
||||
// Find the iterator now that we've updated.
|
||||
iterator it = this->FindIterator(address);
|
||||
if (address != m_start_address) {
|
||||
@@ -104,18 +108,18 @@ void KMemoryBlockManager::CoalesceForUpdate(KMemoryBlockManagerUpdateAllocator*
|
||||
}
|
||||
}
|
||||
|
||||
void KMemoryBlockManager::Update(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address,
|
||||
size_t num_pages, KMemoryState state, KMemoryPermission perm,
|
||||
KMemoryAttribute attr,
|
||||
void KMemoryBlockManager::Update(KMemoryBlockManagerUpdateAllocator* allocator,
|
||||
KProcessAddress address, size_t num_pages, KMemoryState state,
|
||||
KMemoryPermission perm, KMemoryAttribute attr,
|
||||
KMemoryBlockDisableMergeAttribute set_disable_attr,
|
||||
KMemoryBlockDisableMergeAttribute clear_disable_attr) {
|
||||
// Ensure for auditing that we never end up with an invalid tree.
|
||||
KScopedMemoryBlockManagerAuditor auditor(this);
|
||||
ASSERT(Common::IsAligned(address, PageSize));
|
||||
ASSERT(Common::IsAligned(GetInteger(address), PageSize));
|
||||
ASSERT((attr & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)) ==
|
||||
KMemoryAttribute::None);
|
||||
|
||||
VAddr cur_address = address;
|
||||
KProcessAddress cur_address = address;
|
||||
size_t remaining_pages = num_pages;
|
||||
iterator it = this->FindIterator(address);
|
||||
|
||||
@@ -168,17 +172,17 @@ void KMemoryBlockManager::Update(KMemoryBlockManagerUpdateAllocator* allocator,
|
||||
}
|
||||
|
||||
void KMemoryBlockManager::UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allocator,
|
||||
VAddr address, size_t num_pages, KMemoryState test_state,
|
||||
KMemoryPermission test_perm, KMemoryAttribute test_attr,
|
||||
KMemoryState state, KMemoryPermission perm,
|
||||
KMemoryAttribute attr) {
|
||||
KProcessAddress address, size_t num_pages,
|
||||
KMemoryState test_state, KMemoryPermission test_perm,
|
||||
KMemoryAttribute test_attr, KMemoryState state,
|
||||
KMemoryPermission perm, KMemoryAttribute attr) {
|
||||
// Ensure for auditing that we never end up with an invalid tree.
|
||||
KScopedMemoryBlockManagerAuditor auditor(this);
|
||||
ASSERT(Common::IsAligned(address, PageSize));
|
||||
ASSERT(Common::IsAligned(GetInteger(address), PageSize));
|
||||
ASSERT((attr & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)) ==
|
||||
KMemoryAttribute::None);
|
||||
|
||||
VAddr cur_address = address;
|
||||
KProcessAddress cur_address = address;
|
||||
size_t remaining_pages = num_pages;
|
||||
iterator it = this->FindIterator(address);
|
||||
|
||||
@@ -230,18 +234,18 @@ void KMemoryBlockManager::UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allo
|
||||
this->CoalesceForUpdate(allocator, address, num_pages);
|
||||
}
|
||||
|
||||
void KMemoryBlockManager::UpdateLock(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address,
|
||||
size_t num_pages, MemoryBlockLockFunction lock_func,
|
||||
KMemoryPermission perm) {
|
||||
void KMemoryBlockManager::UpdateLock(KMemoryBlockManagerUpdateAllocator* allocator,
|
||||
KProcessAddress address, size_t num_pages,
|
||||
MemoryBlockLockFunction lock_func, KMemoryPermission perm) {
|
||||
// Ensure for auditing that we never end up with an invalid tree.
|
||||
KScopedMemoryBlockManagerAuditor auditor(this);
|
||||
ASSERT(Common::IsAligned(address, PageSize));
|
||||
ASSERT(Common::IsAligned(GetInteger(address), PageSize));
|
||||
|
||||
VAddr cur_address = address;
|
||||
KProcessAddress cur_address = address;
|
||||
size_t remaining_pages = num_pages;
|
||||
iterator it = this->FindIterator(address);
|
||||
|
||||
const VAddr end_address = address + (num_pages * PageSize);
|
||||
const KProcessAddress end_address = address + (num_pages * PageSize);
|
||||
|
||||
while (remaining_pages > 0) {
|
||||
const size_t remaining_size = remaining_pages * PageSize;
|
||||
|
||||
@@ -7,9 +7,9 @@
|
||||
#include <functional>
|
||||
|
||||
#include "common/common_funcs.h"
|
||||
#include "common/common_types.h"
|
||||
#include "core/hle/kernel/k_dynamic_resource_manager.h"
|
||||
#include "core/hle/kernel/k_memory_block.h"
|
||||
#include "core/hle/kernel/k_typed_address.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
@@ -85,9 +85,10 @@ public:
|
||||
public:
|
||||
KMemoryBlockManager();
|
||||
|
||||
using HostUnmapCallback = std::function<void(VAddr, u64)>;
|
||||
using HostUnmapCallback = std::function<void(Common::ProcessAddress, u64)>;
|
||||
|
||||
Result Initialize(VAddr st, VAddr nd, KMemoryBlockSlabManager* slab_manager);
|
||||
Result Initialize(KProcessAddress st, KProcessAddress nd,
|
||||
KMemoryBlockSlabManager* slab_manager);
|
||||
void Finalize(KMemoryBlockSlabManager* slab_manager, HostUnmapCallback&& host_unmap_callback);
|
||||
|
||||
iterator end() {
|
||||
@@ -100,27 +101,28 @@ public:
|
||||
return m_memory_block_tree.cend();
|
||||
}
|
||||
|
||||
VAddr FindFreeArea(VAddr region_start, size_t region_num_pages, size_t num_pages,
|
||||
size_t alignment, size_t offset, size_t guard_pages) const;
|
||||
KProcessAddress FindFreeArea(KProcessAddress region_start, size_t region_num_pages,
|
||||
size_t num_pages, size_t alignment, size_t offset,
|
||||
size_t guard_pages) const;
|
||||
|
||||
void Update(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address, size_t num_pages,
|
||||
KMemoryState state, KMemoryPermission perm, KMemoryAttribute attr,
|
||||
void Update(KMemoryBlockManagerUpdateAllocator* allocator, KProcessAddress address,
|
||||
size_t num_pages, KMemoryState state, KMemoryPermission perm, KMemoryAttribute attr,
|
||||
KMemoryBlockDisableMergeAttribute set_disable_attr,
|
||||
KMemoryBlockDisableMergeAttribute clear_disable_attr);
|
||||
void UpdateLock(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address, size_t num_pages,
|
||||
MemoryBlockLockFunction lock_func, KMemoryPermission perm);
|
||||
void UpdateLock(KMemoryBlockManagerUpdateAllocator* allocator, KProcessAddress address,
|
||||
size_t num_pages, MemoryBlockLockFunction lock_func, KMemoryPermission perm);
|
||||
|
||||
void UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address,
|
||||
void UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allocator, KProcessAddress address,
|
||||
size_t num_pages, KMemoryState test_state, KMemoryPermission test_perm,
|
||||
KMemoryAttribute test_attr, KMemoryState state, KMemoryPermission perm,
|
||||
KMemoryAttribute attr);
|
||||
|
||||
iterator FindIterator(VAddr address) const {
|
||||
iterator FindIterator(KProcessAddress address) const {
|
||||
return m_memory_block_tree.find(KMemoryBlock(
|
||||
address, 1, KMemoryState::Free, KMemoryPermission::None, KMemoryAttribute::None));
|
||||
}
|
||||
|
||||
const KMemoryBlock* FindBlock(VAddr address) const {
|
||||
const KMemoryBlock* FindBlock(KProcessAddress address) const {
|
||||
if (const_iterator it = this->FindIterator(address); it != m_memory_block_tree.end()) {
|
||||
return std::addressof(*it);
|
||||
}
|
||||
@@ -132,12 +134,12 @@ public:
|
||||
bool CheckState() const;
|
||||
|
||||
private:
|
||||
void CoalesceForUpdate(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address,
|
||||
void CoalesceForUpdate(KMemoryBlockManagerUpdateAllocator* allocator, KProcessAddress address,
|
||||
size_t num_pages);
|
||||
|
||||
MemoryBlockTree m_memory_block_tree;
|
||||
VAddr m_start_address{};
|
||||
VAddr m_end_address{};
|
||||
KProcessAddress m_start_address{};
|
||||
KProcessAddress m_end_address{};
|
||||
};
|
||||
|
||||
class KScopedMemoryBlockManagerAuditor {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user