Dirty workaround for a buffer overflow crash (with LM3)
Running `Luigi's Mansion 3` results in the following crash at game startup: ``` $ gdb --args ./bin/yuzu (gdb) r # and load luigi's mansion 3 game ... *** buffer overflow detected ***: /home/user/repos/yuzu-mainline/build/bin/yuzu terminated Thread 27 "EmuThread" received signal SIGABRT, Aborted. [Switching to Thread 0x7fffcf7fe700 (LWP 2435)] __GI_raise (sig=sig@entry=6) at ../sysdeps/unix/sysv/linux/raise.c:50 50 ../sysdeps/unix/sysv/linux/raise.c: No such file or directory. (gdb) bt (gdb) bt #0 0x00007ffff62ef3eb in __GI_raise (sig=sig@entry=6) at ../sysdeps/unix/sysv/linux/raise.c:50 #1 0x00007ffff62ce899 in __GI_abort () at abort.c:79 #2 0x00007ffff633938e in __libc_message (action=action@entry=(do_abort | do_backtrace), fmt=fmt@entry=0x7ffff6462199 "*** %s ***: %s terminated\n") at ../sysdeps/posix/libc_fatal.c:181 #3 0x00007ffff63dbe57 in __GI___fortify_fail_abort (need_backtrace=need_backtrace@entry=true, msg=msg@entry=0x7ffff6462125 "buffer overflow detected") at fortify_fail.c:33 #4 0x00007ffff63dbe77 in __GI___fortify_fail (msg=msg@entry=0x7ffff6462125 "buffer overflow detected") at fortify_fail.c:44 #5 0x00007ffff63da6b6 in __GI___chk_fail () at chk_fail.c:28 #6 0x0000555555a00639 in memcpy (__len=<optimized out>, __src=<optimized out>, __dest=0x7fffcf7fd2f0) at /usr/include/x86_64-linux-gnu/bits/string_fortified.h:34 #7 0x0000555555a00639 in Service::Nvidia::Devices::nvhost_ctrl_gpu::GetGpuTime(std::vector<unsigned char, std::allocator<unsigned char> > const&, std::vector<unsigned char, std::allocator<unsigned char> >&) (this=this@entry=0x555557a689e0, input=std::vector of length 16, capacity 16 = {...}, output=std::vector of length 16, capacity 16 = {...}) at /home/user/repos/yuzu-mainline/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp:197 at ../sysdeps/posix/libc_fatal.c:181 at /home/user/repos/yuzu-mainline/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp:197 #8 0x0000555555a008b6 in Service::Nvidia::Devices::nvhost_ctrl_gpu::ioctl(Service::Nvidia::Devices::nvdevice::Ioctl, std::vector<unsigned char, std::allocator<unsigned char> > const&, std::vector<unsigned char, std::allocator<unsigned char> > const&, std::vector<unsigned char, std::allocator<unsigned char> >&, std::vector<unsigned char, std::allocator<unsigned char> >&, Service::Nvidia::IoctlCtrl&, Service::Nvidia::IoctlVersion) (this=0x555557a689e0, command=..., input=std::vector of length 16, capacity 16 = {...}, input2=..., output=std::vector of length 16, capacity 16 = {...}, output2=std::vector of length 0, capacity 0, ctrl=..., version=Service::Nvidia::IoctlVersion::Version1) at /home/user/repos/yuzu-mainline/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp:42 ``` I am running Ubuntu 19.10 with `nvidia-driver-440` driver here. Hopefully, this information will enable you to find the right fix!
This commit is contained in:
committed by
Dhiru Kholia
parent
34f8881d3e
commit
fd882d301c
@@ -160,8 +160,9 @@ private:
|
||||
|
||||
struct IoctlGetGpuTime {
|
||||
u64_le gpu_time;
|
||||
u64_le reserved;
|
||||
};
|
||||
static_assert(sizeof(IoctlGetGpuTime) == 8, "IoctlGetGpuTime is incorrect size");
|
||||
static_assert(sizeof(IoctlGetGpuTime) == 16, "IoctlGetGpuTime is incorrect size");
|
||||
|
||||
u32 GetCharacteristics(const std::vector<u8>& input, std::vector<u8>& output,
|
||||
std::vector<u8>& output2, IoctlVersion version);
|
||||
|
||||
Reference in New Issue
Block a user