From ef75e3cdd1a7678b1ee726d29a42a871bf39da33 Mon Sep 17 00:00:00 2001 From: RSDuck Date: Tue, 5 Jan 2021 14:36:15 +0100 Subject: JIT A64: fixes also update Switch code for latest libnx --- src/ARMJIT_A64/ARMJIT_Compiler.cpp | 9 +++++--- src/ARMJIT_A64/ARMJIT_LoadStore.cpp | 41 +++++++++++++++++-------------------- 2 files changed, 25 insertions(+), 25 deletions(-) (limited to 'src/ARMJIT_A64') diff --git a/src/ARMJIT_A64/ARMJIT_Compiler.cpp b/src/ARMJIT_A64/ARMJIT_Compiler.cpp index 5fe3fe7..880a6fc 100644 --- a/src/ARMJIT_A64/ARMJIT_Compiler.cpp +++ b/src/ARMJIT_A64/ARMJIT_Compiler.cpp @@ -187,7 +187,8 @@ Compiler::Compiler() JitRWBase = aligned_alloc(0x1000, JitMemSize); JitRXStart = (u8*)&__start__ - JitMemSize - 0x1000; - JitRWStart = virtmemReserve(JitMemSize); + virtmemLock(); + JitRWStart = virtmemFindAslr(JitMemSize, 0x1000); MemoryInfo info = {0}; u32 pageInfo = {0}; int i = 0; @@ -214,6 +215,8 @@ Compiler::Compiler() succeded = R_SUCCEEDED(svcMapProcessMemory(JitRWStart, envGetOwnProcessHandle(), (u64)JitRXStart, JitMemSize)); assert(succeded); + virtmemUnlock(); + SetCodeBase((u8*)JitRWStart, (u8*)JitRXStart); JitMemMainSize = JitMemSize; #else @@ -426,7 +429,6 @@ Compiler::~Compiler() { bool succeded = R_SUCCEEDED(svcUnmapProcessMemory(JitRWStart, envGetOwnProcessHandle(), (u64)JitRXStart, JitMemSize)); assert(succeded); - virtmemFree(JitRWStart, JitMemSize); succeded = R_SUCCEEDED(svcUnmapProcessCodeMemory(envGetOwnProcessHandle(), (u64)JitRXStart, (u64)JitRWBase, JitMemSize)); assert(succeded); free(JitRWBase); @@ -753,7 +755,8 @@ JitBlockEntry Compiler::CompileBlock(ARM* cpu, bool thumb, FetchedInstr instrs[] FixupBranch skipNop = B(); SetJumpTarget(skipExecute); - Comp_AddCycles_C(); + if (IrregularCycles) + Comp_AddCycles_C(true); Comp_BranchSpecialBehaviour(false); diff --git a/src/ARMJIT_A64/ARMJIT_LoadStore.cpp b/src/ARMJIT_A64/ARMJIT_LoadStore.cpp index 2c14dc6..3d30759 100644 --- a/src/ARMJIT_A64/ARMJIT_LoadStore.cpp +++ b/src/ARMJIT_A64/ARMJIT_LoadStore.cpp @@ -472,31 +472,24 @@ s32 Compiler::Comp_MemAccessBlock(int rn, BitSet16 regs, bool store, bool preinc bool compileFastPath = Config::JIT_FastMemory && store && !usermode && (CurInstr.Cond() < 0xE || ARMJIT_Memory::IsFastmemCompatible(expectedTarget)); - if (decrement) { - s32 offset = -regsCount * 4 + (preinc ? 0 : 4); + s32 offset = decrement + ? -regsCount * 4 + (preinc ? 0 : 4) + : (preinc ? 4 : 0); + if (offset) - { ADDI2R(W0, MapReg(rn), offset); - ANDI2R(W0, W0, ~3); - } - else - { + else if (compileFastPath) ANDI2R(W0, MapReg(rn), ~3); - } - } - else - { - ANDI2R(W0, MapReg(rn), ~3); - if (preinc) - ADD(W0, W0, 4); + else + MOV(W0, MapReg(rn)); } u8* patchFunc; if (compileFastPath) { ptrdiff_t fastPathStart = GetCodeOffset(); - ptrdiff_t loadStoreOffsets[16]; + ptrdiff_t loadStoreOffsets[8]; MOVP2R(X1, Num == 0 ? ARMJIT_Memory::FastMem9Start : ARMJIT_Memory::FastMem7Start); ADD(X1, X1, X0); @@ -547,16 +540,19 @@ s32 Compiler::Comp_MemAccessBlock(int rn, BitSet16 regs, bool store, bool preinc LoadReg(nextReg, second); loadStoreOffsets[i++] = GetCodeOffset(); - if (store) + { STP(INDEX_SIGNED, first, second, X1, offset); + } else + { LDP(INDEX_SIGNED, first, second, X1, offset); - - if (!(RegCache.LoadedRegs & (1 << reg)) && !store) - SaveReg(reg, first); - if (!(RegCache.LoadedRegs & (1 << nextReg)) && !store) - SaveReg(nextReg, second); + + if (!(RegCache.LoadedRegs & (1 << reg))) + SaveReg(reg, first); + if (!(RegCache.LoadedRegs & (1 << nextReg))) + SaveReg(nextReg, second); + } offset += 8; } @@ -566,7 +562,8 @@ s32 Compiler::Comp_MemAccessBlock(int rn, BitSet16 regs, bool store, bool preinc SwapCodeRegion(); patchFunc = (u8*)GetRXPtr(); patch.PatchFunc = patchFunc; - for (i = 0; i < regsCount; i++) + u32 numLoadStores = i; + for (i = 0; i < numLoadStores; i++) { patch.PatchOffset = fastPathStart - loadStoreOffsets[i]; LoadStorePatches[loadStoreOffsets[i]] = patch; -- cgit v1.2.3