diff options
author | Jesse Talavera-Greenberg <jesse@jesse.tg> | 2023-11-18 10:40:54 -0500 |
---|---|---|
committer | GitHub <noreply@github.com> | 2023-11-18 16:40:54 +0100 |
commit | 544fefa27f698f3a0d799a782dc03d3eb47561db (patch) | |
tree | b4907fca30677cc4e1befb02301392f172eed543 /src/ARMJIT_x64 | |
parent | f2d7a290156b5aa62edc00644c55b00de73b6229 (diff) |
Refactor the JIT to be object-oriented (#1879)
* Move TinyVector to a new file
- So it's less sensitive to #include ordering
* Forgot to include assert.h
* Refactor ARMJIT_Memory into an object
* Oops, forgot a declaration
* Refactor ARMJIT to be contained in an object
* Remove an unused function declaration
* Add a missing #include
* Remove a now-unused global
* Use ARMJIT_Memory's own memory access functions
* Fix some omissions in the ARM JIT
* Move libandroid to be a member of ARMJIT_Memory instead of a global
* Default-initialize most fields in ARMJIT_Compiler.h
* Define NOOP_IF_NO_JIT
* Finish refactoring the JIT to be object-oriented
Diffstat (limited to 'src/ARMJIT_x64')
-rw-r--r-- | src/ARMJIT_x64/ARMJIT_ALU.cpp | 1 | ||||
-rw-r--r-- | src/ARMJIT_x64/ARMJIT_Branch.cpp | 1 | ||||
-rw-r--r-- | src/ARMJIT_x64/ARMJIT_Compiler.cpp | 7 | ||||
-rw-r--r-- | src/ARMJIT_x64/ARMJIT_Compiler.h | 58 | ||||
-rw-r--r-- | src/ARMJIT_x64/ARMJIT_LoadStore.cpp | 31 |
5 files changed, 54 insertions, 44 deletions
diff --git a/src/ARMJIT_x64/ARMJIT_ALU.cpp b/src/ARMJIT_x64/ARMJIT_ALU.cpp index 069dd53..bdc17e8 100644 --- a/src/ARMJIT_x64/ARMJIT_ALU.cpp +++ b/src/ARMJIT_x64/ARMJIT_ALU.cpp @@ -17,6 +17,7 @@ */ #include "ARMJIT_Compiler.h" +#include "../ARM.h" using namespace Gen; diff --git a/src/ARMJIT_x64/ARMJIT_Branch.cpp b/src/ARMJIT_x64/ARMJIT_Branch.cpp index b36f5b7..ae7d1ae 100644 --- a/src/ARMJIT_x64/ARMJIT_Branch.cpp +++ b/src/ARMJIT_x64/ARMJIT_Branch.cpp @@ -17,6 +17,7 @@ */ #include "ARMJIT_Compiler.h" +#include "../ARM.h" using namespace Gen; diff --git a/src/ARMJIT_x64/ARMJIT_Compiler.cpp b/src/ARMJIT_x64/ARMJIT_Compiler.cpp index 45a2751..5506db7 100644 --- a/src/ARMJIT_x64/ARMJIT_Compiler.cpp +++ b/src/ARMJIT_x64/ARMJIT_Compiler.cpp @@ -18,6 +18,7 @@ #include "ARMJIT_Compiler.h" +#include "../ARMJIT.h" #include "../ARMInterpreter.h" #include <assert.h> @@ -232,7 +233,7 @@ void Compiler::A_Comp_MSR() */ u8 CodeMemory[1024 * 1024 * 32]; -Compiler::Compiler() +Compiler::Compiler(ARMJIT& jit) : XEmitter(), JIT(jit) { { #ifdef _WIN32 @@ -712,12 +713,12 @@ JitBlockEntry Compiler::CompileBlock(ARM* cpu, bool thumb, FetchedInstr instrs[] if (NearSize - (GetCodePtr() - NearStart) < 1024 * 32) // guess... { Log(LogLevel::Debug, "near reset\n"); - ResetBlockCache(); + JIT.ResetBlockCache(); } if (FarSize - (FarCode - FarStart) < 1024 * 32) // guess... { Log(LogLevel::Debug, "far reset\n"); - ResetBlockCache(); + JIT.ResetBlockCache(); } ConstantCycles = 0; diff --git a/src/ARMJIT_x64/ARMJIT_Compiler.h b/src/ARMJIT_x64/ARMJIT_Compiler.h index 680146f..84efb35 100644 --- a/src/ARMJIT_x64/ARMJIT_Compiler.h +++ b/src/ARMJIT_x64/ARMJIT_Compiler.h @@ -21,7 +21,6 @@ #include "../dolphin/x64Emitter.h" -#include "../ARMJIT.h" #include "../ARMJIT_Internal.h" #include "../ARMJIT_RegisterCache.h" @@ -31,9 +30,11 @@ #include <unordered_map> +class ARMJIT_Memory; + namespace ARMJIT { - +class ARMJIT; const Gen::X64Reg RCPU = Gen::RBP; const Gen::X64Reg RCPSR = Gen::R15; @@ -79,7 +80,11 @@ struct Op2 class Compiler : public Gen::XEmitter { public: - Compiler(); +#ifdef JIT_ENABLED + explicit Compiler(ARMJIT& jit); +#else + explicit Compiler(ARMJIT& jit) : XEmitter(), JIT(jit) {} +#endif void Reset(); @@ -238,42 +243,43 @@ public: void CreateMethod(const char* namefmt, void* start, ...); #endif - u8* FarCode; - u8* NearCode; - u32 FarSize; - u32 NearSize; + ARMJIT& JIT; + u8* FarCode {}; + u8* NearCode {}; + u32 FarSize {}; + u32 NearSize {}; - u8* NearStart; - u8* FarStart; + u8* NearStart {}; + u8* FarStart {}; - void* PatchedStoreFuncs[2][2][3][16]; - void* PatchedLoadFuncs[2][2][3][2][16]; + void* PatchedStoreFuncs[2][2][3][16] {}; + void* PatchedLoadFuncs[2][2][3][2][16] {}; - std::unordered_map<u8*, LoadStorePatch> LoadStorePatches; + std::unordered_map<u8*, LoadStorePatch> LoadStorePatches {}; - u8* ResetStart; - u32 CodeMemSize; + u8* ResetStart {}; + u32 CodeMemSize {}; - bool Exit; - bool IrregularCycles; + bool Exit {}; + bool IrregularCycles {}; - void* ReadBanked; - void* WriteBanked; + void* ReadBanked {}; + void* WriteBanked {}; bool CPSRDirty = false; - FetchedInstr CurInstr; + FetchedInstr CurInstr {}; - RegisterCache<Compiler, Gen::X64Reg> RegCache; + RegisterCache<Compiler, Gen::X64Reg> RegCache {}; - bool Thumb; - u32 Num; - u32 R15; - u32 CodeRegion; + bool Thumb {}; + u32 Num {}; + u32 R15 {}; + u32 CodeRegion {}; - u32 ConstantCycles; + u32 ConstantCycles {}; - ARM* CurCPU; + ARM* CurCPU {}; }; } diff --git a/src/ARMJIT_x64/ARMJIT_LoadStore.cpp b/src/ARMJIT_x64/ARMJIT_LoadStore.cpp index 718f1bc..1433429 100644 --- a/src/ARMJIT_x64/ARMJIT_LoadStore.cpp +++ b/src/ARMJIT_x64/ARMJIT_LoadStore.cpp @@ -17,6 +17,7 @@ */ #include "ARMJIT_Compiler.h" +#include "../ARMJIT.h" using namespace Gen; @@ -67,9 +68,9 @@ u8* Compiler::RewriteMemAccess(u8* pc) bool Compiler::Comp_MemLoadLiteral(int size, bool signExtend, int rd, u32 addr) { - u32 localAddr = LocaliseCodeAddress(Num, addr); + u32 localAddr = JIT.LocaliseCodeAddress(Num, addr); - int invalidLiteralIdx = InvalidLiterals.Find(localAddr); + int invalidLiteralIdx = JIT.InvalidLiterals.Find(localAddr); if (invalidLiteralIdx != -1) { return false; @@ -117,7 +118,7 @@ void Compiler::Comp_MemAccess(int rd, int rn, const Op2& op2, int size, int flag if (size == 16) addressMask = ~1; - if (LiteralOptimizations && rn == 15 && rd != 15 && op2.IsImm && !(flags & (memop_Post|memop_Store|memop_Writeback))) + if (JIT.LiteralOptimizations && rn == 15 && rd != 15 && op2.IsImm && !(flags & (memop_Post|memop_Store|memop_Writeback))) { u32 addr = R15 + op2.Imm * ((flags & memop_SubtractOffset) ? -1 : 1); @@ -134,7 +135,7 @@ void Compiler::Comp_MemAccess(int rd, int rn, const Op2& op2, int size, int flag Comp_AddCycles_CDI(); } - bool addrIsStatic = LiteralOptimizations + bool addrIsStatic = JIT.LiteralOptimizations && RegCache.IsLiteral(rn) && op2.IsImm && !(flags & (memop_Writeback|memop_Post)); u32 staticAddress; if (addrIsStatic) @@ -195,10 +196,10 @@ void Compiler::Comp_MemAccess(int rd, int rn, const Op2& op2, int size, int flag MOV(32, rnMapped, R(finalAddr)); u32 expectedTarget = Num == 0 - ? ARMJIT_Memory::ClassifyAddress9(CurInstr.DataRegion) - : ARMJIT_Memory::ClassifyAddress7(CurInstr.DataRegion); + ? JIT.Memory.ClassifyAddress9(CurInstr.DataRegion) + : JIT.Memory.ClassifyAddress7(CurInstr.DataRegion); - if (ARMJIT::FastMemory && ((!Thumb && CurInstr.Cond() != 0xE) || ARMJIT_Memory::IsFastmemCompatible(expectedTarget))) + if (JIT.FastMemory && ((!Thumb && CurInstr.Cond() != 0xE) || JIT.Memory.IsFastmemCompatible(expectedTarget))) { if (rdMapped.IsImm()) { @@ -216,7 +217,7 @@ void Compiler::Comp_MemAccess(int rd, int rn, const Op2& op2, int size, int flag assert(patch.PatchFunc != NULL); - MOV(64, R(RSCRATCH), ImmPtr(Num == 0 ? ARMJIT_Memory::FastMem9Start : ARMJIT_Memory::FastMem7Start)); + MOV(64, R(RSCRATCH), ImmPtr(Num == 0 ? JIT.Memory.FastMem9Start : JIT.Memory.FastMem7Start)); X64Reg maskedAddr = RSCRATCH3; if (size > 8) @@ -267,7 +268,7 @@ void Compiler::Comp_MemAccess(int rd, int rn, const Op2& op2, int size, int flag void* func = NULL; if (addrIsStatic) - func = ARMJIT_Memory::GetFuncForAddr(CurCPU, staticAddress, flags & memop_Store, size); + func = JIT.Memory.GetFuncForAddr(CurCPU, staticAddress, flags & memop_Store, size); if (func) { @@ -421,16 +422,16 @@ s32 Compiler::Comp_MemAccessBlock(int rn, BitSet16 regs, bool store, bool preinc s32 offset = (regsCount * 4) * (decrement ? -1 : 1); int expectedTarget = Num == 0 - ? ARMJIT_Memory::ClassifyAddress9(CurInstr.DataRegion) - : ARMJIT_Memory::ClassifyAddress7(CurInstr.DataRegion); + ? JIT.Memory.ClassifyAddress9(CurInstr.DataRegion) + : JIT.Memory.ClassifyAddress7(CurInstr.DataRegion); if (!store) Comp_AddCycles_CDI(); else Comp_AddCycles_CD(); - bool compileFastPath = FastMemory - && !usermode && (CurInstr.Cond() < 0xE || ARMJIT_Memory::IsFastmemCompatible(expectedTarget)); + bool compileFastPath = JIT.FastMemory + && !usermode && (CurInstr.Cond() < 0xE || JIT.Memory.IsFastmemCompatible(expectedTarget)); // we need to make sure that the stack stays aligned to 16 bytes #ifdef _WIN32 @@ -453,7 +454,7 @@ s32 Compiler::Comp_MemAccessBlock(int rn, BitSet16 regs, bool store, bool preinc u8* fastPathStart = GetWritableCodePtr(); u8* loadStoreAddr[16]; - MOV(64, R(RSCRATCH2), ImmPtr(Num == 0 ? ARMJIT_Memory::FastMem9Start : ARMJIT_Memory::FastMem7Start)); + MOV(64, R(RSCRATCH2), ImmPtr(Num == 0 ? JIT.Memory.FastMem9Start : JIT.Memory.FastMem7Start)); ADD(64, R(RSCRATCH2), R(RSCRATCH4)); u32 offset = 0; @@ -807,7 +808,7 @@ void Compiler::T_Comp_LoadPCRel() { u32 offset = (CurInstr.Instr & 0xFF) << 2; u32 addr = (R15 & ~0x2) + offset; - if (!LiteralOptimizations || !Comp_MemLoadLiteral(32, false, CurInstr.T_Reg(8), addr)) + if (!JIT.LiteralOptimizations || !Comp_MemLoadLiteral(32, false, CurInstr.T_Reg(8), addr)) Comp_MemAccess(CurInstr.T_Reg(8), 15, Op2(offset), 32, 0); } |