aboutsummaryrefslogtreecommitdiff
path: root/src/ARMJIT_x64
diff options
context:
space:
mode:
authorRSDuck <rsduck@users.noreply.github.com>2019-08-17 16:50:48 +0200
committerRSDuck <rsduck@users.noreply.github.com>2020-04-26 13:04:59 +0200
commit3001d9492c6e7e83e82843a4b9c6186b0b58f5e5 (patch)
tree87ba6724548a3711ba59776e01e01e9102b920e8 /src/ARMJIT_x64
parentec21172cd9932805f02d84f41599c7a23e3b23f5 (diff)
abandon pipelining on jit
fixes Golden Sun Dawn this makes the cpu state incompatible between interpreter and JIT. That's why switching cpu mode requires a restart(not requiring is stupid anyway) and the pipeline is manually filled when making a save state.
Diffstat (limited to 'src/ARMJIT_x64')
-rw-r--r--src/ARMJIT_x64/ARMJIT_Branch.cpp39
-rw-r--r--src/ARMJIT_x64/ARMJIT_Compiler.cpp5
-rw-r--r--src/ARMJIT_x64/ARMJIT_LoadStore.cpp5
3 files changed, 17 insertions, 32 deletions
diff --git a/src/ARMJIT_x64/ARMJIT_Branch.cpp b/src/ARMJIT_x64/ARMJIT_Branch.cpp
index 9d4c1e2..30b18d7 100644
--- a/src/ARMJIT_x64/ARMJIT_Branch.cpp
+++ b/src/ARMJIT_x64/ARMJIT_Branch.cpp
@@ -4,6 +4,14 @@ using namespace Gen;
namespace ARMJIT
{
+
+template <typename T>
+int squeezePointer(T* ptr)
+{
+ int truncated = (int)((u64)ptr);
+ assert((T*)((u64)truncated) == ptr);
+ return truncated;
+}
void Compiler::Comp_JumpTo(u32 addr, bool forceNonConstantCycles)
{
@@ -12,9 +20,7 @@ void Compiler::Comp_JumpTo(u32 addr, bool forceNonConstantCycles)
// we'll see how it works out
u32 newPC;
- u32 nextInstr[2];
u32 cycles = 0;
- bool setupRegion = false;
if (addr & 0x1 && !Thumb)
{
@@ -40,7 +46,7 @@ void Compiler::Comp_JumpTo(u32 addr, bool forceNonConstantCycles)
MOV(32, MDisp(RCPU, offsetof(ARMv5, RegionCodeCycles)), Imm32(regionCodeCycles));
- setupRegion = newregion != oldregion;
+ bool setupRegion = newregion != oldregion;
if (setupRegion)
cpu9->SetupCodeMem(addr);
@@ -53,15 +59,14 @@ void Compiler::Comp_JumpTo(u32 addr, bool forceNonConstantCycles)
// doesn't matter if we put garbage in the MSbs there
if (addr & 0x2)
{
- nextInstr[0] = cpu9->CodeRead32(addr-2, true) >> 16;
+ cpu9->CodeRead32(addr-2, true);
cycles += cpu9->CodeCycles;
- nextInstr[1] = cpu9->CodeRead32(addr+2, false);
+ cpu9->CodeRead32(addr+2, false);
cycles += CurCPU->CodeCycles;
}
else
{
- nextInstr[0] = cpu9->CodeRead32(addr, true);
- nextInstr[1] = nextInstr[0] >> 16;
+ cpu9->CodeRead32(addr, true);
cycles += cpu9->CodeCycles;
}
}
@@ -70,12 +75,15 @@ void Compiler::Comp_JumpTo(u32 addr, bool forceNonConstantCycles)
addr &= ~0x3;
newPC = addr+4;
- nextInstr[0] = cpu9->CodeRead32(addr, true);
+ cpu9->CodeRead32(addr, true);
cycles += cpu9->CodeCycles;
- nextInstr[1] = cpu9->CodeRead32(addr+4, false);
+ cpu9->CodeRead32(addr+4, false);
cycles += cpu9->CodeCycles;
}
+ MOV(64, MDisp(RCPU, offsetof(ARM, CodeMem.Mem)), Imm32(squeezePointer(cpu9->CodeMem.Mem)));
+ MOV(32, MDisp(RCPU, offsetof(ARM, CodeMem.Mask)), Imm32(cpu9->CodeMem.Mask));
+
cpu9->RegionCodeCycles = compileTimeCodeCycles;
if (setupRegion)
cpu9->SetupCodeMem(R15);
@@ -102,8 +110,6 @@ void Compiler::Comp_JumpTo(u32 addr, bool forceNonConstantCycles)
u32 compileTimePC = CurCPU->R[15];
CurCPU->R[15] = newPC;
- nextInstr[0] = ((ARMv4*)CurCPU)->CodeRead16(addr);
- nextInstr[1] = ((ARMv4*)CurCPU)->CodeRead16(addr+2);
cycles += NDS::ARM7MemTimings[codeCycles][0] + NDS::ARM7MemTimings[codeCycles][1];
CurCPU->R[15] = compileTimePC;
@@ -116,8 +122,6 @@ void Compiler::Comp_JumpTo(u32 addr, bool forceNonConstantCycles)
u32 compileTimePC = CurCPU->R[15];
CurCPU->R[15] = newPC;
- nextInstr[0] = cpu7->CodeRead32(addr);
- nextInstr[1] = cpu7->CodeRead32(addr+4);
cycles += NDS::ARM7MemTimings[codeCycles][2] + NDS::ARM7MemTimings[codeCycles][3];
CurCPU->R[15] = compileTimePC;
@@ -128,19 +132,10 @@ void Compiler::Comp_JumpTo(u32 addr, bool forceNonConstantCycles)
}
MOV(32, MDisp(RCPU, offsetof(ARM, R[15])), Imm32(newPC));
- MOV(32, MDisp(RCPU, offsetof(ARM, NextInstr[0])), Imm32(nextInstr[0]));
- MOV(32, MDisp(RCPU, offsetof(ARM, NextInstr[1])), Imm32(nextInstr[1]));
if ((Thumb || CurInstr.Cond() >= 0xE) && !forceNonConstantCycles)
ConstantCycles += cycles;
else
ADD(32, MDisp(RCPU, offsetof(ARM, Cycles)), Imm8(cycles));
-
- if (setupRegion)
- {
- MOV(64, R(ABI_PARAM1), R(RCPU));
- MOV(32, R(ABI_PARAM2), Imm32(newPC));
- CALL((void*)&ARMv5::SetupCodeMem);
- }
}
void Compiler::Comp_JumpTo(Gen::X64Reg addr, bool restoreCPSR)
diff --git a/src/ARMJIT_x64/ARMJIT_Compiler.cpp b/src/ARMJIT_x64/ARMJIT_Compiler.cpp
index 0fbcfda..ab13cb6 100644
--- a/src/ARMJIT_x64/ARMJIT_Compiler.cpp
+++ b/src/ARMJIT_x64/ARMJIT_Compiler.cpp
@@ -395,11 +395,6 @@ CompiledBlock Compiler::CompileBlock(ARM* cpu, FetchedInstr instrs[], int instrs
MOV(32, MDisp(RCPU, offsetof(ARM, R[15])), Imm32(R15));
MOV(32, MDisp(RCPU, offsetof(ARM, CodeCycles)), Imm32(CurInstr.CodeCycles));
MOV(32, MDisp(RCPU, offsetof(ARM, CurInstr)), Imm32(CurInstr.Instr));
- if (i == instrsCount - 1)
- {
- MOV(32, MDisp(RCPU, offsetof(ARM, NextInstr[0])), Imm32(CurInstr.NextInstr[0]));
- MOV(32, MDisp(RCPU, offsetof(ARM, NextInstr[1])), Imm32(CurInstr.NextInstr[1]));
- }
if (comp == NULL)
SaveCPSR();
diff --git a/src/ARMJIT_x64/ARMJIT_LoadStore.cpp b/src/ARMJIT_x64/ARMJIT_LoadStore.cpp
index 6386f8b..3b4cb7d 100644
--- a/src/ARMJIT_x64/ARMJIT_LoadStore.cpp
+++ b/src/ARMJIT_x64/ARMJIT_LoadStore.cpp
@@ -457,11 +457,6 @@ void Compiler::Comp_MemAccess(OpArg rd, bool signExtend, bool store, int size)
}
}
-void printStuff2(u32 a, u32 b)
-{
- printf("b %x %x\n", a, b);
-}
-
s32 Compiler::Comp_MemAccessBlock(int rn, BitSet16 regs, bool store, bool preinc, bool decrement, bool usermode)
{
int regsCount = regs.Count();