gpu: Move flush and invalidate to GPU thread.
This commit is contained in:
@@ -178,7 +178,8 @@ u32 nvhost_as_gpu::UnmapBuffer(const std::vector<u8>& input, std::vector<u8>& ou
|
|||||||
auto& gpu = system_instance.GPU();
|
auto& gpu = system_instance.GPU();
|
||||||
auto cpu_addr = gpu.MemoryManager().GpuToCpuAddress(params.offset);
|
auto cpu_addr = gpu.MemoryManager().GpuToCpuAddress(params.offset);
|
||||||
ASSERT(cpu_addr);
|
ASSERT(cpu_addr);
|
||||||
system_instance.Renderer().Rasterizer().FlushAndInvalidateRegion(*cpu_addr, itr->second.size);
|
gpu.FlushRegion(*cpu_addr, itr->second.size);
|
||||||
|
gpu.InvalidateRegion(*cpu_addr, itr->second.size);
|
||||||
|
|
||||||
params.offset = gpu.MemoryManager().UnmapBuffer(params.offset, itr->second.size);
|
params.offset = gpu.MemoryManager().UnmapBuffer(params.offset, itr->second.size);
|
||||||
|
|
||||||
|
|||||||
@@ -351,16 +351,17 @@ void RasterizerFlushVirtualRegion(VAddr start, u64 size, FlushMode mode) {
|
|||||||
const VAddr overlap_end = std::min(end, region_end);
|
const VAddr overlap_end = std::min(end, region_end);
|
||||||
const VAddr overlap_size = overlap_end - overlap_start;
|
const VAddr overlap_size = overlap_end - overlap_start;
|
||||||
|
|
||||||
auto& rasterizer = system_instance.Renderer().Rasterizer();
|
auto& gpu = system_instance.GPU();
|
||||||
switch (mode) {
|
switch (mode) {
|
||||||
case FlushMode::Flush:
|
case FlushMode::Flush:
|
||||||
rasterizer.FlushRegion(overlap_start, overlap_size);
|
gpu.FlushRegion(overlap_start, overlap_size);
|
||||||
break;
|
break;
|
||||||
case FlushMode::Invalidate:
|
case FlushMode::Invalidate:
|
||||||
rasterizer.InvalidateRegion(overlap_start, overlap_size);
|
gpu.InvalidateRegion(overlap_start, overlap_size);
|
||||||
break;
|
break;
|
||||||
case FlushMode::FlushAndInvalidate:
|
case FlushMode::FlushAndInvalidate:
|
||||||
rasterizer.FlushAndInvalidateRegion(overlap_start, overlap_size);
|
gpu.FlushRegion(overlap_start, overlap_size);
|
||||||
|
gpu.InvalidateRegion(overlap_start, overlap_size);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -46,7 +46,7 @@ void KeplerMemory::ProcessData(u32 data) {
|
|||||||
// We have to invalidate the destination region to evict any outdated surfaces from the cache.
|
// We have to invalidate the destination region to evict any outdated surfaces from the cache.
|
||||||
// We do this before actually writing the new data because the destination address might contain
|
// We do this before actually writing the new data because the destination address might contain
|
||||||
// a dirty surface that will have to be written back to memory.
|
// a dirty surface that will have to be written back to memory.
|
||||||
rasterizer.InvalidateRegion(dest_address, sizeof(u32));
|
Core::System::GetInstance().GPU().InvalidateRegion(dest_address, sizeof(u32));
|
||||||
|
|
||||||
Memory::Write32(dest_address, data);
|
Memory::Write32(dest_address, data);
|
||||||
Core::System::GetInstance().GPU().Maxwell3D().dirty_flags.OnMemoryWrite();
|
Core::System::GetInstance().GPU().Maxwell3D().dirty_flags.OnMemoryWrite();
|
||||||
|
|||||||
@@ -87,12 +87,12 @@ void MaxwellDMA::HandleCopy() {
|
|||||||
const auto FlushAndInvalidate = [&](u32 src_size, u64 dst_size) {
|
const auto FlushAndInvalidate = [&](u32 src_size, u64 dst_size) {
|
||||||
// TODO(Subv): For now, manually flush the regions until we implement GPU-accelerated
|
// TODO(Subv): For now, manually flush the regions until we implement GPU-accelerated
|
||||||
// copying.
|
// copying.
|
||||||
rasterizer.FlushRegion(source_cpu, src_size);
|
Core::System::GetInstance().GPU().FlushRegion(source_cpu, src_size);
|
||||||
|
|
||||||
// We have to invalidate the destination region to evict any outdated surfaces from the
|
// We have to invalidate the destination region to evict any outdated surfaces from the
|
||||||
// cache. We do this before actually writing the new data because the destination address
|
// cache. We do this before actually writing the new data because the destination address
|
||||||
// might contain a dirty surface that will have to be written back to memory.
|
// might contain a dirty surface that will have to be written back to memory.
|
||||||
rasterizer.InvalidateRegion(dest_cpu, dst_size);
|
Core::System::GetInstance().GPU().InvalidateRegion(dest_cpu, dst_size);
|
||||||
};
|
};
|
||||||
|
|
||||||
if (regs.exec.is_dst_linear && !regs.exec.is_src_linear) {
|
if (regs.exec.is_dst_linear && !regs.exec.is_src_linear) {
|
||||||
|
|||||||
@@ -86,11 +86,19 @@ void GPU::SwapBuffers(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void GPU::WaitUntilIdle(std::function<void()> callback) {
|
void GPU::FlushRegion(VAddr addr, u64 size) {
|
||||||
if (Settings::values.use_asynchronous_gpu_emulation) {
|
if (Settings::values.use_asynchronous_gpu_emulation) {
|
||||||
gpu_thread->WaitUntilIdle(std::move(callback));
|
gpu_thread->FlushRegion(addr, size);
|
||||||
} else {
|
} else {
|
||||||
callback();
|
renderer.Rasterizer().FlushRegion(addr, size);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void GPU::InvalidateRegion(VAddr addr, u64 size) {
|
||||||
|
if (Settings::values.use_asynchronous_gpu_emulation) {
|
||||||
|
gpu_thread->InvalidateRegion(addr, size);
|
||||||
|
} else {
|
||||||
|
renderer.Rasterizer().InvalidateRegion(addr, size);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -5,7 +5,6 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <array>
|
#include <array>
|
||||||
#include <functional>
|
|
||||||
#include <memory>
|
#include <memory>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include "common/common_types.h"
|
#include "common/common_types.h"
|
||||||
@@ -165,8 +164,11 @@ public:
|
|||||||
void SwapBuffers(
|
void SwapBuffers(
|
||||||
std::optional<std::reference_wrapper<const Tegra::FramebufferConfig>> framebuffer);
|
std::optional<std::reference_wrapper<const Tegra::FramebufferConfig>> framebuffer);
|
||||||
|
|
||||||
/// Waits the caller until the thread is idle, and then calls the callback
|
/// Notify rasterizer that any caches of the specified region should be flushed to Switch memory
|
||||||
void WaitUntilIdle(std::function<void()> callback);
|
void FlushRegion(VAddr addr, u64 size);
|
||||||
|
|
||||||
|
/// Notify rasterizer that any caches of the specified region should be invalidated
|
||||||
|
void InvalidateRegion(VAddr addr, u64 size);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
std::unique_ptr<Tegra::DmaPusher> dma_pusher;
|
std::unique_ptr<Tegra::DmaPusher> dma_pusher;
|
||||||
|
|||||||
@@ -39,10 +39,26 @@ static void RunThread(VideoCore::RendererBase& renderer, Tegra::DmaPusher& dma_p
|
|||||||
|
|
||||||
if (is_dma_pending) {
|
if (is_dma_pending) {
|
||||||
// Process pending DMA pushbuffer commands
|
// Process pending DMA pushbuffer commands
|
||||||
std::lock_guard<std::recursive_mutex> lock{state.running_mutex};
|
std::lock_guard<std::mutex> lock{state.running_mutex};
|
||||||
dma_pusher.DispatchCalls();
|
dma_pusher.DispatchCalls();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
// Cache management
|
||||||
|
std::lock_guard<std::recursive_mutex> lock{state.cache_mutex};
|
||||||
|
|
||||||
|
for (const auto& region : state.flush_regions) {
|
||||||
|
renderer.Rasterizer().FlushRegion(region.addr, region.size);
|
||||||
|
}
|
||||||
|
|
||||||
|
for (const auto& region : state.invalidate_regions) {
|
||||||
|
renderer.Rasterizer().InvalidateRegion(region.addr, region.size);
|
||||||
|
}
|
||||||
|
|
||||||
|
state.flush_regions.clear();
|
||||||
|
state.invalidate_regions.clear();
|
||||||
|
}
|
||||||
|
|
||||||
if (is_swapbuffers_pending) {
|
if (is_swapbuffers_pending) {
|
||||||
// Process pending SwapBuffers
|
// Process pending SwapBuffers
|
||||||
renderer.SwapBuffers(state.pending_swapbuffers_config);
|
renderer.SwapBuffers(state.pending_swapbuffers_config);
|
||||||
@@ -106,10 +122,14 @@ void GPUThread::SwapBuffers(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void GPUThread::WaitUntilIdle(std::function<void()> callback) {
|
void GPUThread::FlushRegion(VAddr addr, u64 size) {
|
||||||
// Needs to be a recursive mutex, as this can be called by the GPU thread
|
std::lock_guard<std::recursive_mutex> lock{state.cache_mutex};
|
||||||
std::unique_lock<std::recursive_mutex> lock{state.running_mutex};
|
state.flush_regions.push_back({addr, size});
|
||||||
callback();
|
}
|
||||||
|
|
||||||
|
void GPUThread::InvalidateRegion(VAddr addr, u64 size) {
|
||||||
|
std::lock_guard<std::recursive_mutex> lock{state.cache_mutex};
|
||||||
|
state.invalidate_regions.push_back({addr, size});
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace VideoCore
|
} // namespace VideoCore
|
||||||
|
|||||||
@@ -5,7 +5,6 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <condition_variable>
|
#include <condition_variable>
|
||||||
#include <functional>
|
|
||||||
#include <memory>
|
#include <memory>
|
||||||
#include <mutex>
|
#include <mutex>
|
||||||
#include <optional>
|
#include <optional>
|
||||||
@@ -29,7 +28,16 @@ struct GPUThreadState final {
|
|||||||
std::condition_variable signal_condition;
|
std::condition_variable signal_condition;
|
||||||
std::condition_variable running_condition;
|
std::condition_variable running_condition;
|
||||||
std::mutex signal_mutex;
|
std::mutex signal_mutex;
|
||||||
std::recursive_mutex running_mutex;
|
std::mutex running_mutex;
|
||||||
|
std::recursive_mutex cache_mutex;
|
||||||
|
|
||||||
|
struct MemoryRegion final {
|
||||||
|
const VAddr addr;
|
||||||
|
const u64 size;
|
||||||
|
};
|
||||||
|
|
||||||
|
std::vector<MemoryRegion> flush_regions;
|
||||||
|
std::vector<MemoryRegion> invalidate_regions;
|
||||||
};
|
};
|
||||||
|
|
||||||
class GPUThread final {
|
class GPUThread final {
|
||||||
@@ -44,8 +52,11 @@ public:
|
|||||||
void SwapBuffers(
|
void SwapBuffers(
|
||||||
std::optional<std::reference_wrapper<const Tegra::FramebufferConfig>> framebuffer);
|
std::optional<std::reference_wrapper<const Tegra::FramebufferConfig>> framebuffer);
|
||||||
|
|
||||||
/// Waits the caller until the thread is idle, and then calls the callback
|
/// Notify rasterizer that any caches of the specified region should be flushed to Switch memory
|
||||||
void WaitUntilIdle(std::function<void()> callback);
|
void FlushRegion(VAddr addr, u64 size);
|
||||||
|
|
||||||
|
/// Notify rasterizer that any caches of the specified region should be invalidated
|
||||||
|
void InvalidateRegion(VAddr addr, u64 size);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
GPUThreadState state;
|
GPUThreadState state;
|
||||||
|
|||||||
@@ -30,10 +30,6 @@ public:
|
|||||||
/// Notify rasterizer that any caches of the specified region should be invalidated
|
/// Notify rasterizer that any caches of the specified region should be invalidated
|
||||||
virtual void InvalidateRegion(VAddr addr, u64 size) = 0;
|
virtual void InvalidateRegion(VAddr addr, u64 size) = 0;
|
||||||
|
|
||||||
/// Notify rasterizer that any caches of the specified region should be flushed to Switch memory
|
|
||||||
/// and invalidated
|
|
||||||
virtual void FlushAndInvalidateRegion(VAddr addr, u64 size) = 0;
|
|
||||||
|
|
||||||
/// Attempt to use a faster method to perform a surface copy
|
/// Attempt to use a faster method to perform a surface copy
|
||||||
virtual bool AccelerateSurfaceCopy(const Tegra::Engines::Fermi2D::Regs::Surface& src,
|
virtual bool AccelerateSurfaceCopy(const Tegra::Engines::Fermi2D::Regs::Surface& src,
|
||||||
const Tegra::Engines::Fermi2D::Regs::Surface& dst) {
|
const Tegra::Engines::Fermi2D::Regs::Surface& dst) {
|
||||||
|
|||||||
@@ -751,24 +751,16 @@ void RasterizerOpenGL::FlushRegion(VAddr addr, u64 size) {
|
|||||||
|
|
||||||
if (Settings::values.use_accurate_gpu_emulation) {
|
if (Settings::values.use_accurate_gpu_emulation) {
|
||||||
// Only flush if use_accurate_gpu_emulation is enabled, as it incurs a performance hit
|
// Only flush if use_accurate_gpu_emulation is enabled, as it incurs a performance hit
|
||||||
Core::System::GetInstance().GPU().WaitUntilIdle(
|
res_cache.FlushRegion(addr, size);
|
||||||
[this, addr, size]() { res_cache.FlushRegion(addr, size); });
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void RasterizerOpenGL::InvalidateRegion(VAddr addr, u64 size) {
|
void RasterizerOpenGL::InvalidateRegion(VAddr addr, u64 size) {
|
||||||
MICROPROFILE_SCOPE(OpenGL_CacheManagement);
|
MICROPROFILE_SCOPE(OpenGL_CacheManagement);
|
||||||
Core::System::GetInstance().GPU().WaitUntilIdle([this, addr, size]() {
|
res_cache.InvalidateRegion(addr, size);
|
||||||
res_cache.InvalidateRegion(addr, size);
|
shader_cache.InvalidateRegion(addr, size);
|
||||||
shader_cache.InvalidateRegion(addr, size);
|
global_cache.InvalidateRegion(addr, size);
|
||||||
global_cache.InvalidateRegion(addr, size);
|
buffer_cache.InvalidateRegion(addr, size);
|
||||||
buffer_cache.InvalidateRegion(addr, size);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
void RasterizerOpenGL::FlushAndInvalidateRegion(VAddr addr, u64 size) {
|
|
||||||
FlushRegion(addr, size);
|
|
||||||
InvalidateRegion(addr, size);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool RasterizerOpenGL::AccelerateSurfaceCopy(const Tegra::Engines::Fermi2D::Regs::Surface& src,
|
bool RasterizerOpenGL::AccelerateSurfaceCopy(const Tegra::Engines::Fermi2D::Regs::Surface& src,
|
||||||
|
|||||||
@@ -53,7 +53,6 @@ public:
|
|||||||
void FlushAll() override;
|
void FlushAll() override;
|
||||||
void FlushRegion(VAddr addr, u64 size) override;
|
void FlushRegion(VAddr addr, u64 size) override;
|
||||||
void InvalidateRegion(VAddr addr, u64 size) override;
|
void InvalidateRegion(VAddr addr, u64 size) override;
|
||||||
void FlushAndInvalidateRegion(VAddr addr, u64 size) override;
|
|
||||||
bool AccelerateSurfaceCopy(const Tegra::Engines::Fermi2D::Regs::Surface& src,
|
bool AccelerateSurfaceCopy(const Tegra::Engines::Fermi2D::Regs::Surface& src,
|
||||||
const Tegra::Engines::Fermi2D::Regs::Surface& dst) override;
|
const Tegra::Engines::Fermi2D::Regs::Surface& dst) override;
|
||||||
bool AccelerateFill(const void* config) override;
|
bool AccelerateFill(const void* config) override;
|
||||||
|
|||||||
Reference in New Issue
Block a user