pineapple-src/src/video_core/gpu_thread.cpp

133 lines
4.9 KiB
C++
Raw Normal View History

2022-04-23 14:49:07 -04:00
// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
2020-12-28 10:15:37 -05:00
#include "common/assert.h"
#include "common/microprofile.h"
2020-12-29 20:38:14 -05:00
#include "common/scope_exit.h"
2021-04-14 22:05:28 -04:00
#include "common/settings.h"
2020-12-28 10:15:37 -05:00
#include "common/thread.h"
#include "core/core.h"
#include "core/frontend/emu_window.h"
#include "video_core/dma_pusher.h"
#include "video_core/gpu.h"
#include "video_core/gpu_thread.h"
#include "video_core/renderer_base.h"
namespace VideoCommon::GPUThread {
/// Runs the GPU thread
2021-09-16 00:04:40 -04:00
static void RunThread(std::stop_token stop_token, Core::System& system,
VideoCore::RendererBase& renderer, Core::Frontend::GraphicsContext& context,
Tegra::DmaPusher& dma_pusher, SynchState& state) {
2020-12-28 10:15:37 -05:00
std::string name = "yuzu:GPU";
MicroProfileOnThreadCreate(name.c_str());
2020-12-29 20:38:14 -05:00
SCOPE_EXIT({ MicroProfileOnThreadExit(); });
2020-12-28 10:15:37 -05:00
Common::SetCurrentThreadName(name.c_str());
Common::SetCurrentThreadPriority(Common::ThreadPriority::High);
system.RegisterHostThread();
auto current_context = context.Acquire();
2021-01-16 21:19:34 -05:00
VideoCore::RasterizerInterface* const rasterizer = renderer.ReadRasterizer();
2020-12-28 10:15:37 -05:00
2021-09-16 00:04:40 -04:00
while (!stop_token.stop_requested()) {
2021-10-26 23:22:08 -04:00
CommandDataContainer next = state.queue.PopWait(stop_token);
2021-09-16 00:04:40 -04:00
if (stop_token.stop_requested()) {
break;
}
2020-12-28 10:15:37 -05:00
if (auto* submit_list = std::get_if<SubmitListCommand>(&next.data)) {
dma_pusher.Push(std::move(submit_list->entries));
dma_pusher.DispatchCalls();
} else if (const auto* data = std::get_if<SwapBuffersCommand>(&next.data)) {
renderer.SwapBuffers(data->framebuffer ? &*data->framebuffer : nullptr);
} else if (std::holds_alternative<OnCommandListEndCommand>(next.data)) {
2021-01-16 21:19:34 -05:00
rasterizer->ReleaseFences();
2020-12-28 10:15:37 -05:00
} else if (std::holds_alternative<GPUTickCommand>(next.data)) {
system.GPU().TickWork();
} else if (const auto* flush = std::get_if<FlushRegionCommand>(&next.data)) {
2021-01-16 21:19:34 -05:00
rasterizer->FlushRegion(flush->addr, flush->size);
2020-12-28 10:15:37 -05:00
} else if (const auto* invalidate = std::get_if<InvalidateRegionCommand>(&next.data)) {
2021-01-16 21:19:34 -05:00
rasterizer->OnCPUWrite(invalidate->addr, invalidate->size);
2020-12-28 10:15:37 -05:00
} else {
UNREACHABLE();
}
state.signaled_fence.store(next.fence);
2021-04-07 14:28:12 -04:00
if (next.block) {
// We have to lock the write_lock to ensure that the condition_variable wait not get a
// race between the check and the lock itself.
2022-04-09 05:30:20 -04:00
std::scoped_lock lk{state.write_lock};
2021-04-07 14:28:12 -04:00
state.cv.notify_all();
}
2020-12-28 10:15:37 -05:00
}
}
2020-12-29 20:38:14 -05:00
ThreadManager::ThreadManager(Core::System& system_, bool is_async_)
: system{system_}, is_async{is_async_} {}
2020-12-28 10:15:37 -05:00
2021-09-16 00:04:40 -04:00
ThreadManager::~ThreadManager() = default;
2020-12-28 10:15:37 -05:00
void ThreadManager::StartThread(VideoCore::RendererBase& renderer,
Core::Frontend::GraphicsContext& context,
2021-03-01 05:12:51 -05:00
Tegra::DmaPusher& dma_pusher) {
2021-01-16 21:19:34 -05:00
rasterizer = renderer.ReadRasterizer();
2021-09-16 00:04:40 -04:00
thread = std::jthread(RunThread, std::ref(system), std::ref(renderer), std::ref(context),
std::ref(dma_pusher), std::ref(state));
2020-12-28 10:15:37 -05:00
}
void ThreadManager::SubmitList(Tegra::CommandList&& entries) {
PushCommand(SubmitListCommand(std::move(entries)));
}
void ThreadManager::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) {
PushCommand(SwapBuffersCommand(framebuffer ? std::make_optional(*framebuffer) : std::nullopt));
}
void ThreadManager::FlushRegion(VAddr addr, u64 size) {
2020-12-29 20:38:14 -05:00
if (!is_async) {
// Always flush with synchronous GPU mode
2020-12-28 10:15:37 -05:00
PushCommand(FlushRegionCommand(addr, size));
return;
}
2021-06-06 21:57:28 -04:00
if (!Settings::IsGPULevelExtreme()) {
return;
2020-12-28 10:15:37 -05:00
}
2021-06-06 21:57:28 -04:00
auto& gpu = system.GPU();
u64 fence = gpu.RequestFlush(addr, size);
PushCommand(GPUTickCommand(), true);
2021-09-08 18:11:45 -04:00
ASSERT(fence <= gpu.CurrentFlushRequestFence());
2020-12-28 10:15:37 -05:00
}
void ThreadManager::InvalidateRegion(VAddr addr, u64 size) {
2021-01-16 21:19:34 -05:00
rasterizer->OnCPUWrite(addr, size);
2020-12-28 10:15:37 -05:00
}
void ThreadManager::FlushAndInvalidateRegion(VAddr addr, u64 size) {
// Skip flush on asynch mode, as FlushAndInvalidateRegion is not used for anything too important
2021-01-16 21:19:34 -05:00
rasterizer->OnCPUWrite(addr, size);
2020-12-28 10:15:37 -05:00
}
void ThreadManager::OnCommandListEnd() {
PushCommand(OnCommandListEndCommand());
}
2021-04-07 14:28:12 -04:00
u64 ThreadManager::PushCommand(CommandData&& command_data, bool block) {
2020-12-29 20:38:14 -05:00
if (!is_async) {
// In synchronous GPU mode, block the caller until the command has executed
2021-04-07 14:28:12 -04:00
block = true;
}
2021-04-07 17:06:52 -04:00
std::unique_lock lk(state.write_lock);
2021-04-07 14:28:12 -04:00
const u64 fence{++state.last_fence};
2021-10-26 23:22:08 -04:00
state.queue.Push(CommandDataContainer(std::move(command_data), fence, block));
2021-04-07 14:28:12 -04:00
if (block) {
2021-09-16 00:04:40 -04:00
state.cv.wait(lk, thread.get_stop_token(), [this, fence] {
return fence <= state.signaled_fence.load(std::memory_order_relaxed);
2021-04-07 14:28:12 -04:00
});
2020-12-29 20:38:14 -05:00
}
2020-12-28 10:15:37 -05:00
return fence;
}
} // namespace VideoCommon::GPUThread