aboutsummaryrefslogtreecommitdiff
path: root/src/ARMJIT_x64
diff options
context:
space:
mode:
authorRSDuck <rsduck@users.noreply.github.com>2020-06-14 21:04:25 +0200
committerRSDuck <rsduck@users.noreply.github.com>2020-06-16 12:11:19 +0200
commite335a8ca7615c702cfa2dcdb71deb69468088fd8 (patch)
treec09dcec016d87e7d82a6aec377f8eb3fa9949026 /src/ARMJIT_x64
parentfea9f95bba7475b2cd3b624a3ccd6cdee00a33f1 (diff)
first steps in bringing over the JIT refactor/fastmem
Diffstat (limited to 'src/ARMJIT_x64')
-rw-r--r--src/ARMJIT_x64/ARMJIT_Compiler.cpp92
-rw-r--r--src/ARMJIT_x64/ARMJIT_Compiler.h11
-rw-r--r--src/ARMJIT_x64/ARMJIT_LoadStore.cpp45
3 files changed, 40 insertions, 108 deletions
diff --git a/src/ARMJIT_x64/ARMJIT_Compiler.cpp b/src/ARMJIT_x64/ARMJIT_Compiler.cpp
index fd3fb70..34c1c91 100644
--- a/src/ARMJIT_x64/ARMJIT_Compiler.cpp
+++ b/src/ARMJIT_x64/ARMJIT_Compiler.cpp
@@ -301,24 +301,6 @@ Compiler::Compiler()
RET();
}
- {
- CPSRDirty = true;
- BranchStub[0] = GetWritableCodePtr();
- SaveCPSR();
- MOV(64, R(ABI_PARAM1), R(RCPU));
- CALL((u8*)ARMJIT::LinkBlock<0>);
- LoadCPSR();
- JMP((u8*)ARM_Ret, true);
-
- CPSRDirty = true;
- BranchStub[1] = GetWritableCodePtr();
- SaveCPSR();
- MOV(64, R(ABI_PARAM1), R(RCPU));
- CALL((u8*)ARMJIT::LinkBlock<1>);
- LoadCPSR();
- JMP((u8*)ARM_Ret, true);
- }
-
// move the region forward to prevent overwriting the generated functions
CodeMemSize -= GetWritableCodePtr() - ResetStart;
ResetStart = GetWritableCodePtr();
@@ -520,6 +502,11 @@ void Compiler::Reset()
FarCode = FarStart;
}
+bool Compiler::IsJITFault(u64 addr)
+{
+ return addr >= (u64)CodeMemory && addr < (u64)CodeMemory + sizeof(CodeMemory);
+}
+
void Compiler::Comp_SpecialBranchBehaviour(bool taken)
{
if (taken && CurInstr.BranchFlags & branch_IdleBranch)
@@ -531,32 +518,11 @@ void Compiler::Comp_SpecialBranchBehaviour(bool taken)
RegCache.PrepareExit();
SUB(32, MDisp(RCPU, offsetof(ARM, Cycles)), Imm32(ConstantCycles));
-
- if (Config::JIT_BrancheOptimisations == 2 && !(CurInstr.BranchFlags & branch_IdleBranch)
- && (!taken || (CurInstr.BranchFlags & branch_StaticTarget)))
- {
- FixupBranch ret = J_CC(CC_S);
- CMP(32, MDisp(RCPU, offsetof(ARM, StopExecution)), Imm8(0));
- FixupBranch ret2 = J_CC(CC_NZ);
-
- u8* rewritePart = GetWritableCodePtr();
- NOP(5);
-
- MOV(32, R(ABI_PARAM2), Imm32(rewritePart - ResetStart));
- JMP((u8*)BranchStub[Num], true);
-
- SetJumpTarget(ret);
- SetJumpTarget(ret2);
- JMP((u8*)ARM_Ret, true);
- }
- else
- {
- JMP((u8*)&ARM_Ret, true);
- }
+ JMP((u8*)&ARM_Ret, true);
}
}
-JitBlockEntry Compiler::CompileBlock(u32 translatedAddr, ARM* cpu, bool thumb, FetchedInstr instrs[], int instrsCount)
+JitBlockEntry Compiler::CompileBlock(ARM* cpu, bool thumb, FetchedInstr instrs[], int instrsCount)
{
if (NearSize - (NearCode - NearStart) < 1024 * 32) // guess...
{
@@ -575,7 +541,7 @@ JitBlockEntry Compiler::CompileBlock(u32 translatedAddr, ARM* cpu, bool thumb, F
CodeRegion = instrs[0].Addr >> 24;
CurCPU = cpu;
// CPSR might have been modified in a previous block
- CPSRDirty = Config::JIT_BrancheOptimisations == 2;
+ CPSRDirty = false;
JitBlockEntry res = (JitBlockEntry)GetWritableCodePtr();
@@ -685,31 +651,7 @@ JitBlockEntry Compiler::CompileBlock(u32 translatedAddr, ARM* cpu, bool thumb, F
RegCache.Flush();
SUB(32, MDisp(RCPU, offsetof(ARM, Cycles)), Imm32(ConstantCycles));
-
- if (Config::JIT_BrancheOptimisations == 2
- && !(instrs[instrsCount - 1].BranchFlags & branch_IdleBranch)
- && (!instrs[instrsCount - 1].Info.Branches()
- || instrs[instrsCount - 1].BranchFlags & branch_FollowCondNotTaken
- || (instrs[instrsCount - 1].BranchFlags & branch_FollowCondTaken && instrs[instrsCount - 1].BranchFlags & branch_StaticTarget)))
- {
- FixupBranch ret = J_CC(CC_S);
- CMP(32, MDisp(RCPU, offsetof(ARM, StopExecution)), Imm8(0));
- FixupBranch ret2 = J_CC(CC_NZ);
-
- u8* rewritePart = GetWritableCodePtr();
- NOP(5);
-
- MOV(32, R(ABI_PARAM2), Imm32(rewritePart - ResetStart));
- JMP((u8*)BranchStub[Num], true);
-
- SetJumpTarget(ret);
- SetJumpTarget(ret2);
- JMP((u8*)ARM_Ret, true);
- }
- else
- {
- JMP((u8*)ARM_Ret, true);
- }
+ JMP((u8*)ARM_Ret, true);
/*FILE* codeout = fopen("codeout", "a");
fprintf(codeout, "beginning block argargarg__ %x!!!", instrs[0].Addr);
@@ -720,22 +662,6 @@ JitBlockEntry Compiler::CompileBlock(u32 translatedAddr, ARM* cpu, bool thumb, F
return res;
}
-void Compiler::LinkBlock(u32 offset, JitBlockEntry entry)
-{
- u8* curPtr = GetWritableCodePtr();
- SetCodePtr(ResetStart + offset);
- JMP((u8*)entry, true);
- SetCodePtr(curPtr);
-}
-
-void Compiler::UnlinkBlock(u32 offset)
-{
- u8* curPtr = GetWritableCodePtr();
- SetCodePtr(ResetStart + offset);
- NOP(5);
- SetCodePtr(curPtr);
-}
-
void Compiler::Comp_AddCycles_C(bool forceNonConstant)
{
s32 cycles = Num ?
diff --git a/src/ARMJIT_x64/ARMJIT_Compiler.h b/src/ARMJIT_x64/ARMJIT_Compiler.h
index f2fc301..09ac257 100644
--- a/src/ARMJIT_x64/ARMJIT_Compiler.h
+++ b/src/ARMJIT_x64/ARMJIT_Compiler.h
@@ -52,10 +52,7 @@ public:
void Reset();
- void LinkBlock(u32 offset, JitBlockEntry entry);
- void UnlinkBlock(u32 offset);
-
- JitBlockEntry CompileBlock(u32 translatedAddr, ARM* cpu, bool thumb, FetchedInstr instrs[], int instrsCount);
+ JitBlockEntry CompileBlock(ARM* cpu, bool thumb, FetchedInstr instrs[], int instrsCount);
void LoadReg(int reg, Gen::X64Reg nativeReg);
void SaveReg(int reg, Gen::X64Reg nativeReg);
@@ -202,6 +199,10 @@ public:
SetCodePtr(FarCode);
}
+ bool IsJITFault(u64 addr);
+
+ s32 RewriteMemAccess(u64 pc);
+
u8* FarCode;
u8* NearCode;
u32 FarSize;
@@ -216,8 +217,6 @@ public:
bool Exit;
bool IrregularCycles;
- void* BranchStub[2];
-
void* ReadBanked;
void* WriteBanked;
diff --git a/src/ARMJIT_x64/ARMJIT_LoadStore.cpp b/src/ARMJIT_x64/ARMJIT_LoadStore.cpp
index cf0bd23..0bf2f83 100644
--- a/src/ARMJIT_x64/ARMJIT_LoadStore.cpp
+++ b/src/ARMJIT_x64/ARMJIT_LoadStore.cpp
@@ -15,6 +15,11 @@ int squeezePointer(T* ptr)
return truncated;
}
+s32 Compiler::RewriteMemAccess(u64 pc)
+{
+ return 0;
+}
+
/*
According to DeSmuME and my own research, approx. 99% (seriously, that's an empirical number)
of all memory load and store instructions always access addresses in the same region as
@@ -27,14 +32,15 @@ int squeezePointer(T* ptr)
bool Compiler::Comp_MemLoadLiteral(int size, int rd, u32 addr)
{
- u32 translatedAddr = Num == 0 ? TranslateAddr9(addr) : TranslateAddr7(addr);
+ return false;
+ //u32 translatedAddr = Num == 0 ? TranslateAddr9(addr) : TranslateAddr7(addr);
- int invalidLiteralIdx = InvalidLiterals.Find(translatedAddr);
+ /*int invalidLiteralIdx = InvalidLiterals.Find(translatedAddr);
if (invalidLiteralIdx != -1)
{
InvalidLiterals.Remove(invalidLiteralIdx);
return false;
- }
+ }*/
u32 val;
// make sure arm7 bios is accessible
@@ -95,7 +101,7 @@ void Compiler::Comp_MemAccess(int rd, int rn, const ComplexOperand& op2, int siz
staticAddress = RegCache.LiteralValues[rn] + op2.Imm * ((flags & memop_SubtractOffset) ? -1 : 1);
OpArg rdMapped = MapReg(rd);
- if (!addrIsStatic)
+ if (true)
{
OpArg rnMapped = MapReg(rn);
if (Thumb && rn == 15)
@@ -145,7 +151,7 @@ void Compiler::Comp_MemAccess(int rd, int rn, const ComplexOperand& op2, int siz
MOV(32, rnMapped, R(finalAddr));
}
- int expectedTarget = Num == 0
+ /*int expectedTarget = Num == 0
? ClassifyAddress9(addrIsStatic ? staticAddress : CurInstr.DataRegion)
: ClassifyAddress7(addrIsStatic ? staticAddress : CurInstr.DataRegion);
if (CurInstr.Cond() < 0xE)
@@ -184,8 +190,8 @@ void Compiler::Comp_MemAccess(int rd, int rn, const ComplexOperand& op2, int siz
if (addrIsStatic && compileSlowPath)
MOV(32, R(RSCRATCH3), Imm32(staticAddress));
-
- if (compileFastPath)
+*/
+ /*if (compileFastPath)
{
FixupBranch slowPath;
if (compileSlowPath)
@@ -357,15 +363,16 @@ void Compiler::Comp_MemAccess(int rd, int rn, const ComplexOperand& op2, int siz
SetJumpTarget(slowPath);
}
}
-
- if (compileSlowPath)
+*/
+ if (true)
{
PushRegs(false);
if (Num == 0)
{
- MOV(32, R(ABI_PARAM2), R(RSCRATCH3));
- MOV(64, R(ABI_PARAM1), R(RCPU));
+ MOV(64, R(ABI_PARAM2), R(RCPU));
+ if (ABI_PARAM1 != RSCRATCH3)
+ MOV(32, R(ABI_PARAM1), R(RSCRATCH3));
if (flags & memop_Store)
{
MOV(32, R(ABI_PARAM3), rdMapped);
@@ -423,13 +430,13 @@ void Compiler::Comp_MemAccess(int rd, int rn, const ComplexOperand& op2, int siz
MOVZX(32, size, rdMapped.GetSimpleReg(), R(RSCRATCH));
}
}
-
+/*
if (compileFastPath && compileSlowPath)
{
FixupBranch ret = J(true);
SwitchToNearCode();
SetJumpTarget(ret);
- }
+ }*/
if (!(flags & memop_Store) && rd == 15)
{
@@ -458,7 +465,7 @@ s32 Compiler::Comp_MemAccessBlock(int rn, BitSet16 regs, bool store, bool preinc
u32 stackAlloc = ((regsCount + 1) & ~1) * 8;
#endif
u32 allocOffset = stackAlloc - regsCount * 8;
-
+/*
int expectedTarget = Num == 0
? ClassifyAddress9(CurInstr.DataRegion)
: ClassifyAddress7(CurInstr.DataRegion);
@@ -479,7 +486,7 @@ s32 Compiler::Comp_MemAccessBlock(int rn, BitSet16 regs, bool store, bool preinc
default:
break;
}
-
+*/
if (!store)
Comp_AddCycles_CDI();
else
@@ -492,7 +499,7 @@ s32 Compiler::Comp_MemAccessBlock(int rn, BitSet16 regs, bool store, bool preinc
}
else
MOV(32, R(RSCRATCH4), MapReg(rn));
-
+/*
if (compileFastPath)
{
assert(!usermode);
@@ -570,7 +577,7 @@ s32 Compiler::Comp_MemAccessBlock(int rn, BitSet16 regs, bool store, bool preinc
SwitchToFarCode();
SetJumpTarget(slowPath);
- }
+ }*/
if (!store)
{
@@ -696,13 +703,13 @@ s32 Compiler::Comp_MemAccessBlock(int rn, BitSet16 regs, bool store, bool preinc
PopRegs(false);
}
-
+/*
if (compileFastPath)
{
FixupBranch ret = J(true);
SwitchToNearCode();
SetJumpTarget(ret);
- }
+ }*/
if (!store && regs[15])
{