mirror of
https://github.com/pineappleEA/pineapple-src.git
synced 2024-11-25 05:48:26 -05:00
early-access version 4100
This commit is contained in:
parent
ddb8d1a149
commit
8b3524268f
@ -322,6 +322,10 @@ if (ARCHITECTURE_x86 OR ARCHITECTURE_x86_64)
|
|||||||
find_package(xbyak 6 CONFIG)
|
find_package(xbyak 6 CONFIG)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
if (ARCHITECTURE_arm64)
|
||||||
|
find_package(oaknut 2.0.1 CONFIG)
|
||||||
|
endif()
|
||||||
|
|
||||||
if (ARCHITECTURE_x86_64 OR ARCHITECTURE_arm64)
|
if (ARCHITECTURE_x86_64 OR ARCHITECTURE_arm64)
|
||||||
find_package(dynarmic 6.4.0 CONFIG)
|
find_package(dynarmic 6.4.0 CONFIG)
|
||||||
endif()
|
endif()
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
yuzu emulator early access
|
yuzu emulator early access
|
||||||
=============
|
=============
|
||||||
|
|
||||||
This is the source code for early-access 4099.
|
This is the source code for early-access 4100.
|
||||||
|
|
||||||
## Legal Notice
|
## Legal Notice
|
||||||
|
|
||||||
|
5
externals/CMakeLists.txt
vendored
5
externals/CMakeLists.txt
vendored
@ -14,16 +14,17 @@ set(BUILD_SHARED_LIBS OFF)
|
|||||||
# Skip install rules for all externals
|
# Skip install rules for all externals
|
||||||
set_directory_properties(PROPERTIES EXCLUDE_FROM_ALL ON)
|
set_directory_properties(PROPERTIES EXCLUDE_FROM_ALL ON)
|
||||||
|
|
||||||
# xbyak
|
# Xbyak (also used by Dynarmic, so needs to be added first)
|
||||||
if ((ARCHITECTURE_x86 OR ARCHITECTURE_x86_64) AND NOT TARGET xbyak::xbyak)
|
if ((ARCHITECTURE_x86 OR ARCHITECTURE_x86_64) AND NOT TARGET xbyak::xbyak)
|
||||||
add_subdirectory(xbyak)
|
add_subdirectory(xbyak)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
# Dynarmic
|
# Oaknut (also used by Dynarmic, so needs to be added first)
|
||||||
if (ARCHITECTURE_arm64 AND NOT TARGET merry::oaknut)
|
if (ARCHITECTURE_arm64 AND NOT TARGET merry::oaknut)
|
||||||
add_subdirectory(oaknut)
|
add_subdirectory(oaknut)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
# Dynarmic
|
||||||
if ((ARCHITECTURE_x86_64 OR ARCHITECTURE_arm64) AND NOT TARGET dynarmic::dynarmic)
|
if ((ARCHITECTURE_x86_64 OR ARCHITECTURE_arm64) AND NOT TARGET dynarmic::dynarmic)
|
||||||
set(DYNARMIC_IGNORE_ASSERTS ON)
|
set(DYNARMIC_IGNORE_ASSERTS ON)
|
||||||
add_subdirectory(dynarmic)
|
add_subdirectory(dynarmic)
|
||||||
|
@ -261,7 +261,7 @@ object NativeLibrary {
|
|||||||
/**
|
/**
|
||||||
* Begins emulation.
|
* Begins emulation.
|
||||||
*/
|
*/
|
||||||
external fun run(path: String?, programIndex: Int = 0)
|
external fun run(path: String?, programIndex: Int, frontendInitiated: Boolean)
|
||||||
|
|
||||||
// Surface Handling
|
// Surface Handling
|
||||||
external fun surfaceChanged(surf: Surface?)
|
external fun surfaceChanged(surf: Surface?)
|
||||||
|
@ -927,7 +927,7 @@ class EmulationFragment : Fragment(), SurfaceHolder.Callback {
|
|||||||
emulationThread.join()
|
emulationThread.join()
|
||||||
emulationThread = Thread({
|
emulationThread = Thread({
|
||||||
Log.debug("[EmulationFragment] Starting emulation thread.")
|
Log.debug("[EmulationFragment] Starting emulation thread.")
|
||||||
NativeLibrary.run(gamePath, programIndex)
|
NativeLibrary.run(gamePath, programIndex, false)
|
||||||
}, "NativeEmulation")
|
}, "NativeEmulation")
|
||||||
emulationThread.start()
|
emulationThread.start()
|
||||||
}
|
}
|
||||||
@ -981,7 +981,7 @@ class EmulationFragment : Fragment(), SurfaceHolder.Callback {
|
|||||||
State.STOPPED -> {
|
State.STOPPED -> {
|
||||||
emulationThread = Thread({
|
emulationThread = Thread({
|
||||||
Log.debug("[EmulationFragment] Starting emulation thread.")
|
Log.debug("[EmulationFragment] Starting emulation thread.")
|
||||||
NativeLibrary.run(gamePath, programIndex)
|
NativeLibrary.run(gamePath, programIndex, true)
|
||||||
}, "NativeEmulation")
|
}, "NativeEmulation")
|
||||||
emulationThread.start()
|
emulationThread.start()
|
||||||
}
|
}
|
||||||
|
@ -219,7 +219,8 @@ void EmulationSession::SetAppletId(int applet_id) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
Core::SystemResultStatus EmulationSession::InitializeEmulation(const std::string& filepath,
|
Core::SystemResultStatus EmulationSession::InitializeEmulation(const std::string& filepath,
|
||||||
const std::size_t program_index) {
|
const std::size_t program_index,
|
||||||
|
const bool frontend_initiated) {
|
||||||
std::scoped_lock lock(m_mutex);
|
std::scoped_lock lock(m_mutex);
|
||||||
|
|
||||||
// Create the render window.
|
// Create the render window.
|
||||||
@ -251,6 +252,8 @@ Core::SystemResultStatus EmulationSession::InitializeEmulation(const std::string
|
|||||||
// Load the ROM.
|
// Load the ROM.
|
||||||
Service::AM::FrontendAppletParameters params{
|
Service::AM::FrontendAppletParameters params{
|
||||||
.applet_id = static_cast<Service::AM::AppletId>(m_applet_id),
|
.applet_id = static_cast<Service::AM::AppletId>(m_applet_id),
|
||||||
|
.launch_type = frontend_initiated ? Service::AM::LaunchType::FrontendInitiated
|
||||||
|
: Service::AM::LaunchType::ApplicationInitiated,
|
||||||
.program_index = static_cast<s32>(program_index),
|
.program_index = static_cast<s32>(program_index),
|
||||||
};
|
};
|
||||||
m_load_result = m_system.Load(EmulationSession::GetInstance().Window(), filepath, params);
|
m_load_result = m_system.Load(EmulationSession::GetInstance().Window(), filepath, params);
|
||||||
@ -447,7 +450,8 @@ u64 EmulationSession::GetProgramId(JNIEnv* env, jstring jprogramId) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
static Core::SystemResultStatus RunEmulation(const std::string& filepath,
|
static Core::SystemResultStatus RunEmulation(const std::string& filepath,
|
||||||
const size_t program_index = 0) {
|
const size_t program_index,
|
||||||
|
const bool frontend_initiated) {
|
||||||
MicroProfileOnThreadCreate("EmuThread");
|
MicroProfileOnThreadCreate("EmuThread");
|
||||||
SCOPE_EXIT({ MicroProfileShutdown(); });
|
SCOPE_EXIT({ MicroProfileShutdown(); });
|
||||||
|
|
||||||
@ -460,7 +464,8 @@ static Core::SystemResultStatus RunEmulation(const std::string& filepath,
|
|||||||
|
|
||||||
SCOPE_EXIT({ EmulationSession::GetInstance().ShutdownEmulation(); });
|
SCOPE_EXIT({ EmulationSession::GetInstance().ShutdownEmulation(); });
|
||||||
|
|
||||||
jconst result = EmulationSession::GetInstance().InitializeEmulation(filepath, program_index);
|
jconst result = EmulationSession::GetInstance().InitializeEmulation(filepath, program_index,
|
||||||
|
frontend_initiated);
|
||||||
if (result != Core::SystemResultStatus::Success) {
|
if (result != Core::SystemResultStatus::Success) {
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
@ -757,10 +762,12 @@ void Java_org_yuzu_yuzu_1emu_NativeLibrary_logSettings(JNIEnv* env, jobject jobj
|
|||||||
}
|
}
|
||||||
|
|
||||||
void Java_org_yuzu_yuzu_1emu_NativeLibrary_run(JNIEnv* env, jobject jobj, jstring j_path,
|
void Java_org_yuzu_yuzu_1emu_NativeLibrary_run(JNIEnv* env, jobject jobj, jstring j_path,
|
||||||
jint j_program_index) {
|
jint j_program_index,
|
||||||
|
jboolean j_frontend_initiated) {
|
||||||
const std::string path = GetJString(env, j_path);
|
const std::string path = GetJString(env, j_path);
|
||||||
|
|
||||||
const Core::SystemResultStatus result{RunEmulation(path, j_program_index)};
|
const Core::SystemResultStatus result{
|
||||||
|
RunEmulation(path, j_program_index, j_frontend_initiated)};
|
||||||
if (result != Core::SystemResultStatus::Success) {
|
if (result != Core::SystemResultStatus::Success) {
|
||||||
env->CallStaticVoidMethod(IDCache::GetNativeLibraryClass(),
|
env->CallStaticVoidMethod(IDCache::GetNativeLibraryClass(),
|
||||||
IDCache::GetExitEmulationActivity(), static_cast<int>(result));
|
IDCache::GetExitEmulationActivity(), static_cast<int>(result));
|
||||||
|
@ -47,7 +47,8 @@ public:
|
|||||||
void InitializeSystem(bool reload);
|
void InitializeSystem(bool reload);
|
||||||
void SetAppletId(int applet_id);
|
void SetAppletId(int applet_id);
|
||||||
Core::SystemResultStatus InitializeEmulation(const std::string& filepath,
|
Core::SystemResultStatus InitializeEmulation(const std::string& filepath,
|
||||||
const std::size_t program_index = 0);
|
const std::size_t program_index,
|
||||||
|
const bool frontend_initiated);
|
||||||
|
|
||||||
bool IsHandheldOnly();
|
bool IsHandheldOnly();
|
||||||
void SetDeviceType([[maybe_unused]] int index, int type);
|
void SetDeviceType([[maybe_unused]] int index, int type);
|
||||||
|
@ -122,14 +122,14 @@ struct RequestLayout {
|
|||||||
u32 domain_interface_count;
|
u32 domain_interface_count;
|
||||||
};
|
};
|
||||||
|
|
||||||
template <ArgumentType Type1, ArgumentType Type2, typename MethodArguments, size_t PrevAlign = 1, size_t DataOffset = 0, size_t ArgIndex = 0>
|
template <typename MethodArguments, size_t PrevAlign = 1, size_t DataOffset = 0, size_t ArgIndex = 0>
|
||||||
constexpr u32 GetArgumentRawDataSize() {
|
constexpr u32 GetInRawDataSize() {
|
||||||
if constexpr (ArgIndex >= std::tuple_size_v<MethodArguments>) {
|
if constexpr (ArgIndex >= std::tuple_size_v<MethodArguments>) {
|
||||||
return static_cast<u32>(DataOffset);
|
return static_cast<u32>(DataOffset);
|
||||||
} else {
|
} else {
|
||||||
using ArgType = std::tuple_element_t<ArgIndex, MethodArguments>;
|
using ArgType = std::tuple_element_t<ArgIndex, MethodArguments>;
|
||||||
|
|
||||||
if constexpr (ArgumentTraits<ArgType>::Type == Type1 || ArgumentTraits<ArgType>::Type == Type2) {
|
if constexpr (ArgumentTraits<ArgType>::Type == ArgumentType::InData || ArgumentTraits<ArgType>::Type == ArgumentType::InProcessId) {
|
||||||
constexpr size_t ArgAlign = alignof(ArgType);
|
constexpr size_t ArgAlign = alignof(ArgType);
|
||||||
constexpr size_t ArgSize = sizeof(ArgType);
|
constexpr size_t ArgSize = sizeof(ArgType);
|
||||||
|
|
||||||
@ -138,9 +138,33 @@ constexpr u32 GetArgumentRawDataSize() {
|
|||||||
constexpr size_t ArgOffset = Common::AlignUp(DataOffset, ArgAlign);
|
constexpr size_t ArgOffset = Common::AlignUp(DataOffset, ArgAlign);
|
||||||
constexpr size_t ArgEnd = ArgOffset + ArgSize;
|
constexpr size_t ArgEnd = ArgOffset + ArgSize;
|
||||||
|
|
||||||
return GetArgumentRawDataSize<Type1, Type2, MethodArguments, ArgAlign, ArgEnd, ArgIndex + 1>();
|
return GetInRawDataSize<MethodArguments, ArgAlign, ArgEnd, ArgIndex + 1>();
|
||||||
} else {
|
} else {
|
||||||
return GetArgumentRawDataSize<Type1, Type2, MethodArguments, PrevAlign, DataOffset, ArgIndex + 1>();
|
return GetInRawDataSize<MethodArguments, PrevAlign, DataOffset, ArgIndex + 1>();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename MethodArguments, size_t PrevAlign = 1, size_t DataOffset = 0, size_t ArgIndex = 0>
|
||||||
|
constexpr u32 GetOutRawDataSize() {
|
||||||
|
if constexpr (ArgIndex >= std::tuple_size_v<MethodArguments>) {
|
||||||
|
return static_cast<u32>(DataOffset);
|
||||||
|
} else {
|
||||||
|
using ArgType = std::tuple_element_t<ArgIndex, MethodArguments>;
|
||||||
|
|
||||||
|
if constexpr (ArgumentTraits<ArgType>::Type == ArgumentType::OutData) {
|
||||||
|
using RawArgType = typename ArgType::Type;
|
||||||
|
constexpr size_t ArgAlign = alignof(RawArgType);
|
||||||
|
constexpr size_t ArgSize = sizeof(RawArgType);
|
||||||
|
|
||||||
|
static_assert(PrevAlign <= ArgAlign, "Output argument is not ordered by alignment");
|
||||||
|
|
||||||
|
constexpr size_t ArgOffset = Common::AlignUp(DataOffset, ArgAlign);
|
||||||
|
constexpr size_t ArgEnd = ArgOffset + ArgSize;
|
||||||
|
|
||||||
|
return GetOutRawDataSize<MethodArguments, ArgAlign, ArgEnd, ArgIndex + 1>();
|
||||||
|
} else {
|
||||||
|
return GetOutRawDataSize<MethodArguments, PrevAlign, DataOffset, ArgIndex + 1>();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -165,7 +189,7 @@ constexpr RequestLayout GetNonDomainReplyInLayout() {
|
|||||||
return RequestLayout{
|
return RequestLayout{
|
||||||
.copy_handle_count = GetArgumentTypeCount<ArgumentType::InCopyHandle, MethodArguments>(),
|
.copy_handle_count = GetArgumentTypeCount<ArgumentType::InCopyHandle, MethodArguments>(),
|
||||||
.move_handle_count = 0,
|
.move_handle_count = 0,
|
||||||
.cmif_raw_data_size = GetArgumentRawDataSize<ArgumentType::InData, ArgumentType::InProcessId, MethodArguments>(),
|
.cmif_raw_data_size = GetInRawDataSize<MethodArguments>(),
|
||||||
.domain_interface_count = 0,
|
.domain_interface_count = 0,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
@ -175,7 +199,7 @@ constexpr RequestLayout GetDomainReplyInLayout() {
|
|||||||
return RequestLayout{
|
return RequestLayout{
|
||||||
.copy_handle_count = GetArgumentTypeCount<ArgumentType::InCopyHandle, MethodArguments>(),
|
.copy_handle_count = GetArgumentTypeCount<ArgumentType::InCopyHandle, MethodArguments>(),
|
||||||
.move_handle_count = 0,
|
.move_handle_count = 0,
|
||||||
.cmif_raw_data_size = GetArgumentRawDataSize<ArgumentType::InData, ArgumentType::InProcessId, MethodArguments>(),
|
.cmif_raw_data_size = GetInRawDataSize<MethodArguments>(),
|
||||||
.domain_interface_count = GetArgumentTypeCount<ArgumentType::InInterface, MethodArguments>(),
|
.domain_interface_count = GetArgumentTypeCount<ArgumentType::InInterface, MethodArguments>(),
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
@ -185,7 +209,7 @@ constexpr RequestLayout GetNonDomainReplyOutLayout() {
|
|||||||
return RequestLayout{
|
return RequestLayout{
|
||||||
.copy_handle_count = GetArgumentTypeCount<ArgumentType::OutCopyHandle, MethodArguments>(),
|
.copy_handle_count = GetArgumentTypeCount<ArgumentType::OutCopyHandle, MethodArguments>(),
|
||||||
.move_handle_count = GetArgumentTypeCount<ArgumentType::OutMoveHandle, MethodArguments>() + GetArgumentTypeCount<ArgumentType::OutInterface, MethodArguments>(),
|
.move_handle_count = GetArgumentTypeCount<ArgumentType::OutMoveHandle, MethodArguments>() + GetArgumentTypeCount<ArgumentType::OutInterface, MethodArguments>(),
|
||||||
.cmif_raw_data_size = GetArgumentRawDataSize<ArgumentType::OutData, ArgumentType::OutData, MethodArguments>(),
|
.cmif_raw_data_size = GetOutRawDataSize<MethodArguments>(),
|
||||||
.domain_interface_count = 0,
|
.domain_interface_count = 0,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
@ -195,7 +219,7 @@ constexpr RequestLayout GetDomainReplyOutLayout() {
|
|||||||
return RequestLayout{
|
return RequestLayout{
|
||||||
.copy_handle_count = GetArgumentTypeCount<ArgumentType::OutCopyHandle, MethodArguments>(),
|
.copy_handle_count = GetArgumentTypeCount<ArgumentType::OutCopyHandle, MethodArguments>(),
|
||||||
.move_handle_count = GetArgumentTypeCount<ArgumentType::OutMoveHandle, MethodArguments>(),
|
.move_handle_count = GetArgumentTypeCount<ArgumentType::OutMoveHandle, MethodArguments>(),
|
||||||
.cmif_raw_data_size = GetArgumentRawDataSize<ArgumentType::OutData, ArgumentType::OutData, MethodArguments>(),
|
.cmif_raw_data_size = GetOutRawDataSize<MethodArguments>(),
|
||||||
.domain_interface_count = GetArgumentTypeCount<ArgumentType::OutInterface, MethodArguments>(),
|
.domain_interface_count = GetArgumentTypeCount<ArgumentType::OutInterface, MethodArguments>(),
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
@ -337,13 +361,15 @@ void WriteOutArgument(bool is_domain, CallArguments& args, u8* raw_data, HLERequ
|
|||||||
using ArgType = std::tuple_element_t<ArgIndex, MethodArguments>;
|
using ArgType = std::tuple_element_t<ArgIndex, MethodArguments>;
|
||||||
|
|
||||||
if constexpr (ArgumentTraits<ArgType>::Type == ArgumentType::OutData) {
|
if constexpr (ArgumentTraits<ArgType>::Type == ArgumentType::OutData) {
|
||||||
constexpr size_t ArgAlign = alignof(ArgType);
|
using RawArgType = decltype(std::get<ArgIndex>(args).raw);
|
||||||
constexpr size_t ArgSize = sizeof(ArgType);
|
constexpr size_t ArgAlign = alignof(RawArgType);
|
||||||
|
constexpr size_t ArgSize = sizeof(RawArgType);
|
||||||
|
|
||||||
static_assert(PrevAlign <= ArgAlign, "Output argument is not ordered by alignment");
|
static_assert(PrevAlign <= ArgAlign, "Output argument is not ordered by alignment");
|
||||||
static_assert(!RawDataFinished, "All output interface arguments must appear after raw data");
|
static_assert(!RawDataFinished, "All output interface arguments must appear after raw data");
|
||||||
static_assert(!std::is_pointer_v<ArgType>, "Output raw data must not be a pointer");
|
static_assert(!std::is_pointer_v<ArgType>, "Output raw data must not be a pointer");
|
||||||
static_assert(std::is_trivially_copyable_v<decltype(std::get<ArgIndex>(args).raw)>, "Output raw data must be trivially copyable");
|
static_assert(!std::is_pointer_v<RawArgType>, "Output raw data must not be a pointer");
|
||||||
|
static_assert(std::is_trivially_copyable_v<RawArgType>, "Output raw data must be trivially copyable");
|
||||||
|
|
||||||
constexpr size_t ArgOffset = Common::AlignUp(DataOffset, ArgAlign);
|
constexpr size_t ArgOffset = Common::AlignUp(DataOffset, ArgAlign);
|
||||||
constexpr size_t ArgEnd = ArgOffset + ArgSize;
|
constexpr size_t ArgEnd = ArgOffset + ArgSize;
|
||||||
|
@ -83,7 +83,9 @@ SessionId Container::OpenSession(Kernel::KProcess* process) {
|
|||||||
|
|
||||||
// Check if this memory block is heap.
|
// Check if this memory block is heap.
|
||||||
if (svc_mem_info.state == Kernel::Svc::MemoryState::Normal) {
|
if (svc_mem_info.state == Kernel::Svc::MemoryState::Normal) {
|
||||||
if (svc_mem_info.size > region_size) {
|
if (region_start + region_size == svc_mem_info.base_address) {
|
||||||
|
region_size += svc_mem_info.size;
|
||||||
|
} else if (svc_mem_info.size > region_size) {
|
||||||
region_size = svc_mem_info.size;
|
region_size = svc_mem_info.size;
|
||||||
region_start = svc_mem_info.base_address;
|
region_start = svc_mem_info.base_address;
|
||||||
}
|
}
|
||||||
|
@ -24,6 +24,7 @@ enum class Errno : u32 {
|
|||||||
CONNRESET = 104,
|
CONNRESET = 104,
|
||||||
NOTCONN = 107,
|
NOTCONN = 107,
|
||||||
TIMEDOUT = 110,
|
TIMEDOUT = 110,
|
||||||
|
CONNREFUSED = 111,
|
||||||
INPROGRESS = 115,
|
INPROGRESS = 115,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -25,6 +25,8 @@ Errno Translate(Network::Errno value) {
|
|||||||
return Errno::MFILE;
|
return Errno::MFILE;
|
||||||
case Network::Errno::PIPE:
|
case Network::Errno::PIPE:
|
||||||
return Errno::PIPE;
|
return Errno::PIPE;
|
||||||
|
case Network::Errno::CONNREFUSED:
|
||||||
|
return Errno::CONNREFUSED;
|
||||||
case Network::Errno::NOTCONN:
|
case Network::Errno::NOTCONN:
|
||||||
return Errno::NOTCONN;
|
return Errno::NOTCONN;
|
||||||
case Network::Errno::TIMEDOUT:
|
case Network::Errno::TIMEDOUT:
|
||||||
|
@ -693,20 +693,23 @@ std::pair<SocketBase::AcceptResult, Errno> Socket::Accept() {
|
|||||||
sockaddr_in addr;
|
sockaddr_in addr;
|
||||||
socklen_t addrlen = sizeof(addr);
|
socklen_t addrlen = sizeof(addr);
|
||||||
|
|
||||||
std::vector<WSAPOLLFD> host_pollfds{
|
const bool wait_for_accept = !is_non_blocking;
|
||||||
WSAPOLLFD{fd, POLLIN, 0},
|
if (wait_for_accept) {
|
||||||
WSAPOLLFD{GetInterruptSocket(), POLLIN, 0},
|
std::vector<WSAPOLLFD> host_pollfds{
|
||||||
};
|
WSAPOLLFD{fd, POLLIN, 0},
|
||||||
|
WSAPOLLFD{GetInterruptSocket(), POLLIN, 0},
|
||||||
|
};
|
||||||
|
|
||||||
while (true) {
|
while (true) {
|
||||||
const int pollres =
|
const int pollres =
|
||||||
WSAPoll(host_pollfds.data(), static_cast<ULONG>(host_pollfds.size()), -1);
|
WSAPoll(host_pollfds.data(), static_cast<ULONG>(host_pollfds.size()), -1);
|
||||||
if (host_pollfds[1].revents != 0) {
|
if (host_pollfds[1].revents != 0) {
|
||||||
// Interrupt signaled before a client could be accepted, break
|
// Interrupt signaled before a client could be accepted, break
|
||||||
return {AcceptResult{}, Errno::AGAIN};
|
return {AcceptResult{}, Errno::AGAIN};
|
||||||
}
|
}
|
||||||
if (pollres > 0) {
|
if (pollres > 0) {
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -913,6 +916,7 @@ Errno Socket::SetRcvTimeo(u32 value) {
|
|||||||
|
|
||||||
Errno Socket::SetNonBlock(bool enable) {
|
Errno Socket::SetNonBlock(bool enable) {
|
||||||
if (EnableNonBlock(fd, enable)) {
|
if (EnableNonBlock(fd, enable)) {
|
||||||
|
is_non_blocking = enable;
|
||||||
return Errno::SUCCESS;
|
return Errno::SUCCESS;
|
||||||
}
|
}
|
||||||
return GetAndLogLastError();
|
return GetAndLogLastError();
|
||||||
|
@ -166,6 +166,9 @@ public:
|
|||||||
bool IsOpened() const override;
|
bool IsOpened() const override;
|
||||||
|
|
||||||
void HandleProxyPacket(const ProxyPacket& packet) override;
|
void HandleProxyPacket(const ProxyPacket& packet) override;
|
||||||
|
|
||||||
|
private:
|
||||||
|
bool is_non_blocking = false;
|
||||||
};
|
};
|
||||||
|
|
||||||
std::pair<s32, Errno> Poll(std::vector<PollFD>& poll_fds, s32 timeout);
|
std::pair<s32, Errno> Poll(std::vector<PollFD>& poll_fds, s32 timeout);
|
||||||
|
@ -1431,7 +1431,8 @@ ImageId TextureCache<P>::JoinImages(const ImageInfo& info, GPUVAddr gpu_addr, DA
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
ForEachSparseImageInRegion(gpu_addr, size_bytes, region_check_gpu);
|
ForEachSparseImageInRegion(channel_state->gpu_memory.GetID(), gpu_addr, size_bytes,
|
||||||
|
region_check_gpu);
|
||||||
|
|
||||||
bool can_rescale = info.rescaleable;
|
bool can_rescale = info.rescaleable;
|
||||||
bool any_rescaled = false;
|
bool any_rescaled = false;
|
||||||
@ -1842,7 +1843,7 @@ void TextureCache<P>::ForEachImageInRegionGPU(size_t as_id, GPUVAddr gpu_addr, s
|
|||||||
if (!storage_id) {
|
if (!storage_id) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
auto& gpu_page_table = gpu_page_table_storage[*storage_id];
|
auto& gpu_page_table = gpu_page_table_storage[*storage_id * 2];
|
||||||
ForEachGPUPage(gpu_addr, size,
|
ForEachGPUPage(gpu_addr, size,
|
||||||
[this, &gpu_page_table, &images, gpu_addr, size, func](u64 page) {
|
[this, &gpu_page_table, &images, gpu_addr, size, func](u64 page) {
|
||||||
const auto it = gpu_page_table.find(page);
|
const auto it = gpu_page_table.find(page);
|
||||||
@ -1882,41 +1883,48 @@ void TextureCache<P>::ForEachImageInRegionGPU(size_t as_id, GPUVAddr gpu_addr, s
|
|||||||
|
|
||||||
template <class P>
|
template <class P>
|
||||||
template <typename Func>
|
template <typename Func>
|
||||||
void TextureCache<P>::ForEachSparseImageInRegion(GPUVAddr gpu_addr, size_t size, Func&& func) {
|
void TextureCache<P>::ForEachSparseImageInRegion(size_t as_id, GPUVAddr gpu_addr, size_t size,
|
||||||
|
Func&& func) {
|
||||||
using FuncReturn = typename std::invoke_result<Func, ImageId, Image&>::type;
|
using FuncReturn = typename std::invoke_result<Func, ImageId, Image&>::type;
|
||||||
static constexpr bool BOOL_BREAK = std::is_same_v<FuncReturn, bool>;
|
static constexpr bool BOOL_BREAK = std::is_same_v<FuncReturn, bool>;
|
||||||
boost::container::small_vector<ImageId, 8> images;
|
boost::container::small_vector<ImageId, 8> images;
|
||||||
ForEachGPUPage(gpu_addr, size, [this, &images, gpu_addr, size, func](u64 page) {
|
auto storage_id = getStorageID(as_id);
|
||||||
const auto it = sparse_page_table.find(page);
|
if (!storage_id) {
|
||||||
if (it == sparse_page_table.end()) {
|
return;
|
||||||
if constexpr (BOOL_BREAK) {
|
}
|
||||||
return false;
|
auto& sparse_page_table = gpu_page_table_storage[*storage_id * 2 + 1];
|
||||||
} else {
|
ForEachGPUPage(gpu_addr, size,
|
||||||
return;
|
[this, &sparse_page_table, &images, gpu_addr, size, func](u64 page) {
|
||||||
}
|
const auto it = sparse_page_table.find(page);
|
||||||
}
|
if (it == sparse_page_table.end()) {
|
||||||
for (const ImageId image_id : it->second) {
|
if constexpr (BOOL_BREAK) {
|
||||||
Image& image = slot_images[image_id];
|
return false;
|
||||||
if (True(image.flags & ImageFlagBits::Picked)) {
|
} else {
|
||||||
continue;
|
return;
|
||||||
}
|
}
|
||||||
if (!image.OverlapsGPU(gpu_addr, size)) {
|
}
|
||||||
continue;
|
for (const ImageId image_id : it->second) {
|
||||||
}
|
Image& image = slot_images[image_id];
|
||||||
image.flags |= ImageFlagBits::Picked;
|
if (True(image.flags & ImageFlagBits::Picked)) {
|
||||||
images.push_back(image_id);
|
continue;
|
||||||
if constexpr (BOOL_BREAK) {
|
}
|
||||||
if (func(image_id, image)) {
|
if (!image.OverlapsGPU(gpu_addr, size)) {
|
||||||
return true;
|
continue;
|
||||||
}
|
}
|
||||||
} else {
|
image.flags |= ImageFlagBits::Picked;
|
||||||
func(image_id, image);
|
images.push_back(image_id);
|
||||||
}
|
if constexpr (BOOL_BREAK) {
|
||||||
}
|
if (func(image_id, image)) {
|
||||||
if constexpr (BOOL_BREAK) {
|
return true;
|
||||||
return false;
|
}
|
||||||
}
|
} else {
|
||||||
});
|
func(image_id, image);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if constexpr (BOOL_BREAK) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
});
|
||||||
for (const ImageId image_id : images) {
|
for (const ImageId image_id : images) {
|
||||||
slot_images[image_id].flags &= ~ImageFlagBits::Picked;
|
slot_images[image_id].flags &= ~ImageFlagBits::Picked;
|
||||||
}
|
}
|
||||||
@ -1988,8 +1996,9 @@ void TextureCache<P>::RegisterImage(ImageId image_id) {
|
|||||||
sparse_maps.push_back(map_id);
|
sparse_maps.push_back(map_id);
|
||||||
});
|
});
|
||||||
sparse_views.emplace(image_id, std::move(sparse_maps));
|
sparse_views.emplace(image_id, std::move(sparse_maps));
|
||||||
ForEachGPUPage(image.gpu_addr, image.guest_size_bytes,
|
ForEachGPUPage(image.gpu_addr, image.guest_size_bytes, [this, image_id](u64 page) {
|
||||||
[this, image_id](u64 page) { sparse_page_table[page].push_back(image_id); });
|
(*channel_state->sparse_page_table)[page].push_back(image_id);
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
template <class P>
|
template <class P>
|
||||||
@ -2042,7 +2051,7 @@ void TextureCache<P>::UnregisterImage(ImageId image_id) {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
ForEachGPUPage(image.gpu_addr, image.guest_size_bytes, [this, &clear_page_table](u64 page) {
|
ForEachGPUPage(image.gpu_addr, image.guest_size_bytes, [this, &clear_page_table](u64 page) {
|
||||||
clear_page_table(page, sparse_page_table);
|
clear_page_table(page, (*channel_state->sparse_page_table));
|
||||||
});
|
});
|
||||||
auto it = sparse_views.find(image_id);
|
auto it = sparse_views.find(image_id);
|
||||||
ASSERT(it != sparse_views.end());
|
ASSERT(it != sparse_views.end());
|
||||||
@ -2496,13 +2505,15 @@ void TextureCache<P>::CreateChannel(struct Tegra::Control::ChannelState& channel
|
|||||||
const auto it = channel_map.find(channel.bind_id);
|
const auto it = channel_map.find(channel.bind_id);
|
||||||
auto* this_state = &channel_storage[it->second];
|
auto* this_state = &channel_storage[it->second];
|
||||||
const auto& this_as_ref = address_spaces[channel.memory_manager->GetID()];
|
const auto& this_as_ref = address_spaces[channel.memory_manager->GetID()];
|
||||||
this_state->gpu_page_table = &gpu_page_table_storage[this_as_ref.storage_id];
|
this_state->gpu_page_table = &gpu_page_table_storage[this_as_ref.storage_id * 2];
|
||||||
|
this_state->sparse_page_table = &gpu_page_table_storage[this_as_ref.storage_id * 2 + 1];
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Bind a channel for execution.
|
/// Bind a channel for execution.
|
||||||
template <class P>
|
template <class P>
|
||||||
void TextureCache<P>::OnGPUASRegister([[maybe_unused]] size_t map_id) {
|
void TextureCache<P>::OnGPUASRegister([[maybe_unused]] size_t map_id) {
|
||||||
gpu_page_table_storage.emplace_back();
|
gpu_page_table_storage.emplace_back();
|
||||||
|
gpu_page_table_storage.emplace_back();
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace VideoCommon
|
} // namespace VideoCommon
|
||||||
|
@ -86,6 +86,7 @@ public:
|
|||||||
std::unordered_map<TSCEntry, SamplerId> samplers;
|
std::unordered_map<TSCEntry, SamplerId> samplers;
|
||||||
|
|
||||||
TextureCacheGPUMap* gpu_page_table;
|
TextureCacheGPUMap* gpu_page_table;
|
||||||
|
TextureCacheGPUMap* sparse_page_table;
|
||||||
};
|
};
|
||||||
|
|
||||||
template <class P>
|
template <class P>
|
||||||
@ -357,7 +358,7 @@ private:
|
|||||||
void ForEachImageInRegionGPU(size_t as_id, GPUVAddr gpu_addr, size_t size, Func&& func);
|
void ForEachImageInRegionGPU(size_t as_id, GPUVAddr gpu_addr, size_t size, Func&& func);
|
||||||
|
|
||||||
template <typename Func>
|
template <typename Func>
|
||||||
void ForEachSparseImageInRegion(GPUVAddr gpu_addr, size_t size, Func&& func);
|
void ForEachSparseImageInRegion(size_t as_id, GPUVAddr gpu_addr, size_t size, Func&& func);
|
||||||
|
|
||||||
/// Iterates over all the images in a region calling func
|
/// Iterates over all the images in a region calling func
|
||||||
template <typename Func>
|
template <typename Func>
|
||||||
@ -431,7 +432,6 @@ private:
|
|||||||
std::unordered_map<RenderTargets, FramebufferId> framebuffers;
|
std::unordered_map<RenderTargets, FramebufferId> framebuffers;
|
||||||
|
|
||||||
std::unordered_map<u64, std::vector<ImageMapId>, Common::IdentityHash<u64>> page_table;
|
std::unordered_map<u64, std::vector<ImageMapId>, Common::IdentityHash<u64>> page_table;
|
||||||
std::unordered_map<u64, std::vector<ImageId>, Common::IdentityHash<u64>> sparse_page_table;
|
|
||||||
std::unordered_map<ImageId, boost::container::small_vector<ImageViewId, 16>> sparse_views;
|
std::unordered_map<ImageId, boost::container::small_vector<ImageViewId, 16>> sparse_views;
|
||||||
|
|
||||||
DAddr virtual_invalid_space{};
|
DAddr virtual_invalid_space{};
|
||||||
|
Loading…
Reference in New Issue
Block a user