gpu: Move flush and invalidate to GPU thread.

This commit is contained in:
bunnei
2019-01-12 01:28:16 -05:00
parent 9799dcdb7f
commit 0bad8394e6
11 changed files with 71 additions and 41 deletions

View File

@@ -46,7 +46,7 @@ void KeplerMemory::ProcessData(u32 data) {
// We have to invalidate the destination region to evict any outdated surfaces from the cache.
// We do this before actually writing the new data because the destination address might contain
// a dirty surface that will have to be written back to memory.
rasterizer.InvalidateRegion(dest_address, sizeof(u32));
Core::System::GetInstance().GPU().InvalidateRegion(dest_address, sizeof(u32));
Memory::Write32(dest_address, data);
Core::System::GetInstance().GPU().Maxwell3D().dirty_flags.OnMemoryWrite();

View File

@@ -87,12 +87,12 @@ void MaxwellDMA::HandleCopy() {
const auto FlushAndInvalidate = [&](u32 src_size, u64 dst_size) {
// TODO(Subv): For now, manually flush the regions until we implement GPU-accelerated
// copying.
rasterizer.FlushRegion(source_cpu, src_size);
Core::System::GetInstance().GPU().FlushRegion(source_cpu, src_size);
// We have to invalidate the destination region to evict any outdated surfaces from the
// cache. We do this before actually writing the new data because the destination address
// might contain a dirty surface that will have to be written back to memory.
rasterizer.InvalidateRegion(dest_cpu, dst_size);
Core::System::GetInstance().GPU().InvalidateRegion(dest_cpu, dst_size);
};
if (regs.exec.is_dst_linear && !regs.exec.is_src_linear) {

View File

@@ -86,11 +86,19 @@ void GPU::SwapBuffers(
}
}
void GPU::WaitUntilIdle(std::function<void()> callback) {
void GPU::FlushRegion(VAddr addr, u64 size) {
if (Settings::values.use_asynchronous_gpu_emulation) {
gpu_thread->WaitUntilIdle(std::move(callback));
gpu_thread->FlushRegion(addr, size);
} else {
callback();
renderer.Rasterizer().FlushRegion(addr, size);
}
}
void GPU::InvalidateRegion(VAddr addr, u64 size) {
if (Settings::values.use_asynchronous_gpu_emulation) {
gpu_thread->InvalidateRegion(addr, size);
} else {
renderer.Rasterizer().InvalidateRegion(addr, size);
}
}

View File

@@ -5,7 +5,6 @@
#pragma once
#include <array>
#include <functional>
#include <memory>
#include <vector>
#include "common/common_types.h"
@@ -165,8 +164,11 @@ public:
void SwapBuffers(
std::optional<std::reference_wrapper<const Tegra::FramebufferConfig>> framebuffer);
/// Waits the caller until the thread is idle, and then calls the callback
void WaitUntilIdle(std::function<void()> callback);
/// Notify rasterizer that any caches of the specified region should be flushed to Switch memory
void FlushRegion(VAddr addr, u64 size);
/// Notify rasterizer that any caches of the specified region should be invalidated
void InvalidateRegion(VAddr addr, u64 size);
private:
std::unique_ptr<Tegra::DmaPusher> dma_pusher;

View File

@@ -39,10 +39,26 @@ static void RunThread(VideoCore::RendererBase& renderer, Tegra::DmaPusher& dma_p
if (is_dma_pending) {
// Process pending DMA pushbuffer commands
std::lock_guard<std::recursive_mutex> lock{state.running_mutex};
std::lock_guard<std::mutex> lock{state.running_mutex};
dma_pusher.DispatchCalls();
}
{
// Cache management
std::lock_guard<std::recursive_mutex> lock{state.cache_mutex};
for (const auto& region : state.flush_regions) {
renderer.Rasterizer().FlushRegion(region.addr, region.size);
}
for (const auto& region : state.invalidate_regions) {
renderer.Rasterizer().InvalidateRegion(region.addr, region.size);
}
state.flush_regions.clear();
state.invalidate_regions.clear();
}
if (is_swapbuffers_pending) {
// Process pending SwapBuffers
renderer.SwapBuffers(state.pending_swapbuffers_config);
@@ -106,10 +122,14 @@ void GPUThread::SwapBuffers(
}
}
void GPUThread::WaitUntilIdle(std::function<void()> callback) {
// Needs to be a recursive mutex, as this can be called by the GPU thread
std::unique_lock<std::recursive_mutex> lock{state.running_mutex};
callback();
void GPUThread::FlushRegion(VAddr addr, u64 size) {
std::lock_guard<std::recursive_mutex> lock{state.cache_mutex};
state.flush_regions.push_back({addr, size});
}
void GPUThread::InvalidateRegion(VAddr addr, u64 size) {
std::lock_guard<std::recursive_mutex> lock{state.cache_mutex};
state.invalidate_regions.push_back({addr, size});
}
} // namespace VideoCore

View File

@@ -5,7 +5,6 @@
#pragma once
#include <condition_variable>
#include <functional>
#include <memory>
#include <mutex>
#include <optional>
@@ -29,7 +28,16 @@ struct GPUThreadState final {
std::condition_variable signal_condition;
std::condition_variable running_condition;
std::mutex signal_mutex;
std::recursive_mutex running_mutex;
std::mutex running_mutex;
std::recursive_mutex cache_mutex;
struct MemoryRegion final {
const VAddr addr;
const u64 size;
};
std::vector<MemoryRegion> flush_regions;
std::vector<MemoryRegion> invalidate_regions;
};
class GPUThread final {
@@ -44,8 +52,11 @@ public:
void SwapBuffers(
std::optional<std::reference_wrapper<const Tegra::FramebufferConfig>> framebuffer);
/// Waits the caller until the thread is idle, and then calls the callback
void WaitUntilIdle(std::function<void()> callback);
/// Notify rasterizer that any caches of the specified region should be flushed to Switch memory
void FlushRegion(VAddr addr, u64 size);
/// Notify rasterizer that any caches of the specified region should be invalidated
void InvalidateRegion(VAddr addr, u64 size);
private:
GPUThreadState state;

View File

@@ -30,10 +30,6 @@ public:
/// Notify rasterizer that any caches of the specified region should be invalidated
virtual void InvalidateRegion(VAddr addr, u64 size) = 0;
/// Notify rasterizer that any caches of the specified region should be flushed to Switch memory
/// and invalidated
virtual void FlushAndInvalidateRegion(VAddr addr, u64 size) = 0;
/// Attempt to use a faster method to perform a surface copy
virtual bool AccelerateSurfaceCopy(const Tegra::Engines::Fermi2D::Regs::Surface& src,
const Tegra::Engines::Fermi2D::Regs::Surface& dst) {

View File

@@ -751,24 +751,16 @@ void RasterizerOpenGL::FlushRegion(VAddr addr, u64 size) {
if (Settings::values.use_accurate_gpu_emulation) {
// Only flush if use_accurate_gpu_emulation is enabled, as it incurs a performance hit
Core::System::GetInstance().GPU().WaitUntilIdle(
[this, addr, size]() { res_cache.FlushRegion(addr, size); });
res_cache.FlushRegion(addr, size);
}
}
void RasterizerOpenGL::InvalidateRegion(VAddr addr, u64 size) {
MICROPROFILE_SCOPE(OpenGL_CacheManagement);
Core::System::GetInstance().GPU().WaitUntilIdle([this, addr, size]() {
res_cache.InvalidateRegion(addr, size);
shader_cache.InvalidateRegion(addr, size);
global_cache.InvalidateRegion(addr, size);
buffer_cache.InvalidateRegion(addr, size);
});
}
void RasterizerOpenGL::FlushAndInvalidateRegion(VAddr addr, u64 size) {
FlushRegion(addr, size);
InvalidateRegion(addr, size);
res_cache.InvalidateRegion(addr, size);
shader_cache.InvalidateRegion(addr, size);
global_cache.InvalidateRegion(addr, size);
buffer_cache.InvalidateRegion(addr, size);
}
bool RasterizerOpenGL::AccelerateSurfaceCopy(const Tegra::Engines::Fermi2D::Regs::Surface& src,

View File

@@ -53,7 +53,6 @@ public:
void FlushAll() override;
void FlushRegion(VAddr addr, u64 size) override;
void InvalidateRegion(VAddr addr, u64 size) override;
void FlushAndInvalidateRegion(VAddr addr, u64 size) override;
bool AccelerateSurfaceCopy(const Tegra::Engines::Fermi2D::Regs::Surface& src,
const Tegra::Engines::Fermi2D::Regs::Surface& dst) override;
bool AccelerateFill(const void* config) override;