From c5c342c0091d9bf36500950a21585c5c98dd7d9d Mon Sep 17 00:00:00 2001 From: RSDuck Date: Sat, 22 Jun 2019 01:28:32 +0200 Subject: JIT: base all instructions are interpreted --- src/ARM_InstrInfo.cpp | 376 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 376 insertions(+) create mode 100644 src/ARM_InstrInfo.cpp (limited to 'src/ARM_InstrInfo.cpp') diff --git a/src/ARM_InstrInfo.cpp b/src/ARM_InstrInfo.cpp new file mode 100644 index 0000000..41c46e1 --- /dev/null +++ b/src/ARM_InstrInfo.cpp @@ -0,0 +1,376 @@ +#include "ARM_InstrInfo.h" + +#include + +namespace ARMInstrInfo +{ + +#define ak(x) ((x) << 13) + +enum { + A_Read0 = 1 << 0, + A_Read16 = 1 << 1, + A_Read8 = 1 << 2, + A_Read12 = 1 << 3, + + A_Write12 = 1 << 4, + A_Write16 = 1 << 5, + A_MemWriteback = 1 << 6, + + A_BranchAlways = 1 << 7, + + // for STRD/LDRD + A_Read12Double = 1 << 8, + A_Write12Double = 1 << 9, + + A_Link = 1 << 10, + + A_LDMSTM = 1 << 11, + + A_ARM9Only = 1 << 12, +}; + +#define A_BIOP A_Read16 +#define A_MONOOP 0 + +#define A_IMPLEMENT_ALU_OP(x,k) \ + const u32 A_##x##_IMM = A_Write12 | A_##k | ak(ak_##x##_IMM); \ + const u32 A_##x##_REG_LSL_IMM = A_Write12 | A_##k | A_Read0 | ak(ak_##x##_REG_LSL_IMM); \ + const u32 A_##x##_REG_LSR_IMM = A_Write12 | A_##k | A_Read0 | ak(ak_##x##_REG_LSR_IMM); \ + const u32 A_##x##_REG_ASR_IMM = A_Write12 | A_##k | A_Read0 | ak(ak_##x##_REG_ASR_IMM); \ + const u32 A_##x##_REG_ROR_IMM = A_Write12 | A_##k | A_Read0 | ak(ak_##x##_REG_ROR_IMM); \ + const u32 A_##x##_REG_LSL_REG = A_Write12 | A_##k | A_Read0 | A_Read8 | ak(ak_##x##_REG_LSL_REG); \ + const u32 A_##x##_REG_LSR_REG = A_Write12 | A_##k | A_Read0 | A_Read8 | ak(ak_##x##_REG_LSR_REG); \ + const u32 A_##x##_REG_ASR_REG = A_Write12 | A_##k | A_Read0 | A_Read8 | ak(ak_##x##_REG_ASR_REG); \ + const u32 A_##x##_REG_ROR_REG = A_Write12 | A_##k | A_Read0 | A_Read8 | ak(ak_##x##_REG_ROR_REG); \ + \ + const u32 A_##x##_IMM_S = A_Write12 | A_##k | ak(ak_##x##_IMM_S); \ + const u32 A_##x##_REG_LSL_IMM_S = A_Write12 | A_##k | A_Read0 | ak(ak_##x##_REG_LSL_IMM_S); \ + const u32 A_##x##_REG_LSR_IMM_S = A_Write12 | A_##k | A_Read0 | ak(ak_##x##_REG_LSR_IMM_S); \ + const u32 A_##x##_REG_ASR_IMM_S = A_Write12 | A_##k | A_Read0 | ak(ak_##x##_REG_ASR_IMM_S); \ + const u32 A_##x##_REG_ROR_IMM_S = A_Write12 | A_##k | A_Read0 | ak(ak_##x##_REG_ROR_IMM_S); \ + const u32 A_##x##_REG_LSL_REG_S = A_Write12 | A_##k | A_Read0 | A_Read8 | ak(ak_##x##_REG_LSL_REG_S); \ + const u32 A_##x##_REG_LSR_REG_S = A_Write12 | A_##k | A_Read0 | A_Read8 | ak(ak_##x##_REG_LSR_REG_S); \ + const u32 A_##x##_REG_ASR_REG_S = A_Write12 | A_##k | A_Read0 | A_Read8 | ak(ak_##x##_REG_ASR_REG_S); \ + const u32 A_##x##_REG_ROR_REG_S = A_Write12 | A_##k | A_Read0 | A_Read8 | ak(ak_##x##_REG_ROR_REG_S); + +A_IMPLEMENT_ALU_OP(AND,BIOP) +A_IMPLEMENT_ALU_OP(EOR,BIOP) +A_IMPLEMENT_ALU_OP(SUB,BIOP) +A_IMPLEMENT_ALU_OP(RSB,BIOP) +A_IMPLEMENT_ALU_OP(ADD,BIOP) +A_IMPLEMENT_ALU_OP(ADC,BIOP) +A_IMPLEMENT_ALU_OP(SBC,BIOP) +A_IMPLEMENT_ALU_OP(RSC,BIOP) +A_IMPLEMENT_ALU_OP(ORR,BIOP) +A_IMPLEMENT_ALU_OP(MOV,MONOOP) +A_IMPLEMENT_ALU_OP(BIC,BIOP) +A_IMPLEMENT_ALU_OP(MVN,MONOOP) + +const u32 A_MOV_REG_LSL_IMM_DBG = A_MOV_REG_LSL_IMM; + +#define A_IMPLEMENT_ALU_TEST(x) \ + const u32 A_##x##_IMM = A_Read16 | A_Read0 | ak(ak_##x##_IMM); \ + const u32 A_##x##_REG_LSL_IMM = A_Read16 | A_Read0 | ak(ak_##x##_REG_LSL_IMM); \ + const u32 A_##x##_REG_LSR_IMM = A_Read16 | A_Read0 | ak(ak_##x##_REG_LSR_IMM); \ + const u32 A_##x##_REG_ASR_IMM = A_Read16 | A_Read0 | ak(ak_##x##_REG_ASR_IMM); \ + const u32 A_##x##_REG_ROR_IMM = A_Read16 | A_Read0 | ak(ak_##x##_REG_ROR_IMM); \ + const u32 A_##x##_REG_LSL_REG = A_Read16 | A_Read0 | A_Read8 | ak(ak_##x##_REG_LSL_REG); \ + const u32 A_##x##_REG_LSR_REG = A_Read16 | A_Read0 | A_Read8 | ak(ak_##x##_REG_LSR_REG); \ + const u32 A_##x##_REG_ASR_REG = A_Read16 | A_Read0 | A_Read8 | ak(ak_##x##_REG_ASR_REG); \ + const u32 A_##x##_REG_ROR_REG = A_Read16 | A_Read0 | A_Read8 | ak(ak_##x##_REG_ROR_REG); + +A_IMPLEMENT_ALU_TEST(TST) +A_IMPLEMENT_ALU_TEST(TEQ) +A_IMPLEMENT_ALU_TEST(CMP) +A_IMPLEMENT_ALU_TEST(CMN) + +const u32 A_MUL = A_Write16 | A_Read0 | A_Read8 | ak(ak_MUL); +const u32 A_MLA = A_Write16 | A_Read0 | A_Read8 | A_Read12 | ak(ak_MLA); +const u32 A_UMULL = A_Write16 | A_Write12 | A_Read0 | A_Read8 | ak(ak_UMULL); +const u32 A_UMLAL = A_Write16 | A_Write12 | A_Read16 | A_Read12 | A_Read0 | A_Read8 | ak(ak_UMLAL); +const u32 A_SMULL = A_Write16 | A_Write12 | A_Read0 | A_Read8 | ak(ak_SMULL); +const u32 A_SMLAL = A_Write16 | A_Write12 | A_Read16 | A_Read12 | A_Read0 | A_Read8 | ak(ak_SMLAL); +const u32 A_SMLAxy = A_Write16 | A_Read0 | A_Read8 | A_Read12 | ak(ak_SMLALxy); +const u32 A_SMLAWy = A_Write16 | A_Read0 | A_Read8 | A_Read12 | ak(ak_SMLAWy); +const u32 A_SMULWy = A_Write16 | A_Read0 | A_Read8 | ak(ak_SMULWy); +const u32 A_SMLALxy = A_Write16 | A_Write12 | A_Read16 | A_Read12 | A_Read0 | A_Read8 | ak(ak_SMLALxy); +const u32 A_SMULxy = A_Write16 | A_Read0 | A_Read8 | ak(ak_SMULxy); + +const u32 A_CLZ = A_Write12 | A_Read0 | A_ARM9Only | ak(ak_CLZ); + +const u32 A_QADD = A_Write12 | A_Read0 | A_Read16 | A_ARM9Only | ak(ak_QADD); +const u32 A_QSUB = A_Write12 | A_Read0 | A_Read16 | A_ARM9Only | ak(ak_QSUB); +const u32 A_QDADD = A_Write12 | A_Read0 | A_Read16 | A_ARM9Only | ak(ak_QDADD); +const u32 A_QDSUB = A_Write12 | A_Read0 | A_Read16 | A_ARM9Only | ak(ak_QDSUB); + +#define A_LDR A_Write12 +#define A_STR A_Read12 + +#define A_IMPLEMENT_WB_LDRSTR(x,k) \ + const u32 A_##x##_IMM = A_##k | A_Read16 | A_MemWriteback | ak(ak_##x##_IMM); \ + const u32 A_##x##_REG_LSL = A_##k | A_Read16 | A_MemWriteback | A_Read0 | ak(ak_##x##_REG_LSL); \ + const u32 A_##x##_REG_LSR = A_##k | A_Read16 | A_MemWriteback | A_Read0 | ak(ak_##x##_REG_LSR); \ + const u32 A_##x##_REG_ASR = A_##k | A_Read16 | A_MemWriteback | A_Read0 | ak(ak_##x##_REG_ASR); \ + const u32 A_##x##_REG_ROR = A_##k | A_Read16 | A_MemWriteback | A_Read0 | ak(ak_##x##_REG_ROR); \ + \ + const u32 A_##x##_POST_IMM = A_##k | A_Read16 | A_Write16 | ak(ak_##x##_POST_IMM); \ + const u32 A_##x##_POST_REG_LSL = A_##k | A_Read16 | A_Write16 | A_Read0 | ak(ak_##x##_POST_REG_LSL); \ + const u32 A_##x##_POST_REG_LSR = A_##k | A_Read16 | A_Write16 | A_Read0 | ak(ak_##x##_POST_REG_LSR); \ + const u32 A_##x##_POST_REG_ASR = A_##k | A_Read16 | A_Write16 | A_Read0 | ak(ak_##x##_POST_REG_ASR); \ + const u32 A_##x##_POST_REG_ROR = A_##k | A_Read16 | A_Write16 | A_Read0 | ak(ak_##x##_POST_REG_ROR); + +A_IMPLEMENT_WB_LDRSTR(STR,STR) +A_IMPLEMENT_WB_LDRSTR(STRB,STR) +A_IMPLEMENT_WB_LDRSTR(LDR,LDR) +A_IMPLEMENT_WB_LDRSTR(LDRB,LDR) + +#define A_LDRD A_Write12Double +#define A_STRD A_Read12Double + +#define A_IMPLEMENT_HD_LDRSTR(x,k) \ + const u32 A_##x##_IMM = A_##k | A_Read16 | A_Write16 | ak(ak_##x##_IMM); \ + const u32 A_##x##_REG = A_##k | A_Read16 | A_Write16 | A_Read0 | ak(ak_##x##_REG); \ + const u32 A_##x##_POST_IMM = A_##k | A_Read16 | A_Write16 | ak(ak_##x##_POST_IMM); \ + const u32 A_##x##_POST_REG = A_##k | A_Read16 | A_Write16 | A_Read0 | ak(ak_##x##_POST_REG); + +A_IMPLEMENT_HD_LDRSTR(STRH,STR) +A_IMPLEMENT_HD_LDRSTR(LDRD,LDRD) +A_IMPLEMENT_HD_LDRSTR(STRD,STRD) +A_IMPLEMENT_HD_LDRSTR(LDRH,LDR) +A_IMPLEMENT_HD_LDRSTR(LDRSB,LDR) +A_IMPLEMENT_HD_LDRSTR(LDRSH,LDR) + +const u32 A_SWP = A_Write12 | A_Read16 | A_Read0 | ak(ak_SWP); +const u32 A_SWPB = A_Write12 | A_Read16 | A_Read0 | ak(ak_SWPB); + +const u32 A_LDM = A_Read16 | A_LDMSTM | ak(ak_LDM); +const u32 A_STM = A_Read16 | A_LDMSTM | ak(ak_STM); + +const u32 A_B = A_BranchAlways | ak(ak_B); +const u32 A_BL = A_BranchAlways | A_Link | ak(ak_BL); +const u32 A_BLX_IMM = A_BranchAlways | A_Link | ak(ak_BLX_IMM); +const u32 A_BX = A_BranchAlways | A_Read0 | ak(ak_BX); +const u32 A_BLX_REG = A_BranchAlways | A_Link | A_Read0 | ak(ak_BLX_REG); + +const u32 A_UNK = A_BranchAlways | A_Link | ak(ak_UNK); +const u32 A_MSR_IMM = A_ARM9Only | ak(ak_MSR_IMM); +const u32 A_MSR_REG = A_Read0 | A_ARM9Only | ak(ak_MSR_REG); +const u32 A_MRS = A_Write12 | A_ARM9Only | ak(ak_MRS); +const u32 A_MCR = A_Read12 | A_ARM9Only | ak(ak_MCR); +const u32 A_MRC = A_Write12 | A_ARM9Only | ak(ak_MRC); +const u32 A_SVC = A_BranchAlways | A_Link | ak(ak_SVC); + +// THUMB + +#define tk(x) ((x) << 16) + +enum { + T_Read0 = 1 << 0, + T_Read3 = 1 << 1, + T_Read6 = 1 << 2, + T_Read8 = 1 << 3, + + T_Write0 = 1 << 4, + T_Write8 = 1 << 5, + + T_ReadHi0 = 1 << 6, + T_ReadHi3 = 1 << 7, + T_WriteHi0 = 1 << 8, + + T_ReadR13 = 1 << 9, + T_WriteR13 = 1 << 10, + T_ReadR15 = 1 << 11, + + T_BranchAlways = 1 << 12, + T_ReadR14 = 1 << 13, + T_WriteR14 = 1 << 14, + + T_PopPC = 1 << 15 +}; + +const u32 T_LSL_IMM = T_Write0 | T_Read3 | tk(tk_LSL_IMM); +const u32 T_LSR_IMM = T_Write0 | T_Read3 | tk(tk_LSR_IMM); +const u32 T_ASR_IMM = T_Write0 | T_Read3 | tk(tk_ASR_IMM); + +const u32 T_ADD_REG_ = T_Write0 | T_Read3 | T_Read6 | tk(tk_ADD_REG_); +const u32 T_SUB_REG_ = T_Write0 | T_Read3 | T_Read6 | tk(tk_SUB_REG_); +const u32 T_ADD_IMM_ = T_Write0 | T_Read3 | tk(tk_ADD_IMM_); +const u32 T_SUB_IMM_ = T_Write0 | T_Read3 | tk(tk_SUB_IMM_); + +const u32 T_MOV_IMM = T_Write8 | tk(tk_MOV_IMM); +const u32 T_CMP_IMM = T_Write8 | tk(tk_CMP_IMM); +const u32 T_ADD_IMM = T_Write8 | T_Read8 | tk(tk_ADD_IMM); +const u32 T_SUB_IMM = T_Write8 | T_Read8 | tk(tk_SUB_IMM); + +const u32 T_AND_REG = T_Write0 | T_Read0 | T_Read3 | tk(tk_AND_REG); +const u32 T_EOR_REG = T_Write0 | T_Read0 | T_Read3 | tk(tk_EOR_REG); +const u32 T_LSL_REG = T_Write0 | T_Read0 | T_Read3 | tk(tk_LSL_REG); +const u32 T_LSR_REG = T_Write0 | T_Read0 | T_Read3 | tk(tk_LSR_REG); +const u32 T_ASR_REG = T_Write0 | T_Read0 | T_Read3 | tk(tk_ASR_REG); +const u32 T_ADC_REG = T_Write0 | T_Read0 | T_Read3 | tk(tk_ADC_REG); +const u32 T_SBC_REG = T_Write0 | T_Read0 | T_Read3 | tk(tk_SBC_REG); +const u32 T_ROR_REG = T_Write0 | T_Read0 | T_Read3 | tk(tk_ROR_REG); +const u32 T_TST_REG = T_Read0 | T_Read3 | tk(tk_TST_REG); +const u32 T_NEG_REG = T_Write0 | T_Read3 | tk(tk_NEG_REG); +const u32 T_CMP_REG = T_Read0 | T_Read3 | tk(tk_CMP_REG); +const u32 T_CMN_REG = T_Read0 | T_Read3 | tk(tk_CMN_REG); +const u32 T_ORR_REG = T_Write0 | T_Read0 | T_Read3 | tk(tk_ORR_REG); +const u32 T_MUL_REG = T_Write0 | T_Read0 | T_Read3 | tk(tk_MUL_REG); +const u32 T_BIC_REG = T_Write0 | T_Read0 | T_Read3 | tk(tk_BIC_REG); +const u32 T_MVN_REG = T_Write0 | T_Read3 | tk(tk_MVN_REG); + +const u32 T_ADD_HIREG = T_WriteHi0 | T_ReadHi0 | T_ReadHi3 | tk(tk_ADD_HIREG); +const u32 T_CMP_HIREG = T_ReadHi0 | T_ReadHi3 | tk(tk_CMP_HIREG); +const u32 T_MOV_HIREG = T_WriteHi0 | T_ReadHi3 | tk(tk_MOV_HIREG); + +const u32 T_ADD_PCREL = T_Write8 | T_ReadR15 | tk(tk_ADD_PCREL); +const u32 T_ADD_SPREL = T_Write8 | T_ReadR13 | tk(tk_ADD_SPREL); +const u32 T_ADD_SP = T_WriteR13 | tk(tk_ADD_SP); + +const u32 T_LDR_PCREL = T_Write8 | tk(tk_LDR_PCREL); + +const u32 T_STR_REG = T_Read0 | T_Read3 | T_Read6 | tk(tk_STR_REG); +const u32 T_STRB_REG = T_Read0 | T_Read3 | T_Read6 | tk(tk_STRB_REG); +const u32 T_LDR_REG = T_Write0 | T_Read3 | T_Read6 | tk(tk_LDR_REG); +const u32 T_LDRB_REG = T_Write0 | T_Read3 | T_Read6 | tk(tk_LDRB_REG); +const u32 T_STRH_REG = T_Read0 | T_Read3 | T_Read6 | tk(tk_STRH_REG); +const u32 T_LDRSB_REG = T_Write0 | T_Read3 | T_Read6 | tk(tk_LDRSB_REG); +const u32 T_LDRH_REG = T_Write0 | T_Read3 | T_Read6 | tk(tk_LDRH_REG); +const u32 T_LDRSH_REG = T_Write0 | T_Read3 | T_Read6 | tk(tk_LDRSH_REG); + +const u32 T_STR_IMM = T_Read0 | T_Read3 | tk(tk_STR_IMM); +const u32 T_LDR_IMM = T_Write0 | T_Read3 | tk(tk_LDR_IMM); +const u32 T_STRB_IMM = T_Read0 | T_Read3 | tk(tk_STRB_IMM); +const u32 T_LDRB_IMM = T_Write0 | T_Read3 | tk(tk_LDRB_IMM); +const u32 T_STRH_IMM = T_Read0 | T_Read3 | tk(tk_STRH_IMM); +const u32 T_LDRH_IMM = T_Write0 | T_Read3 | tk(tk_LDRH_IMM); + +const u32 T_STR_SPREL = T_Read8 | T_ReadR13 | tk(tk_STR_SPREL); +const u32 T_LDR_SPREL = T_Write8 | T_ReadR13 | tk(tk_LDR_SPREL); + +const u32 T_PUSH = T_ReadR15 | T_ReadR13 | T_WriteR13 | tk(tk_PUSH); +const u32 T_POP = T_PopPC | T_ReadR13 | T_WriteR13 | tk(tk_POP); + +const u32 T_LDMIA = T_Read8 | T_Write8 | tk(tk_LDMIA); +const u32 T_STMIA = T_Read8 | T_Write8 | tk(tk_STMIA); + +const u32 T_BCOND = T_BranchAlways | tk(tk_BCOND); +const u32 T_BX = T_BranchAlways | T_ReadHi3 | tk(tk_BX); +const u32 T_BLX_REG = T_BranchAlways | T_ReadR15 | T_WriteR14 | T_ReadHi3 | tk(tk_BLX_REG); +const u32 T_B = T_BranchAlways | tk(tk_B); +const u32 T_BL_LONG_1 = T_WriteR14 | T_ReadR15 | tk(tk_BL_LONG_1); +const u32 T_BL_LONG_2 = T_BranchAlways | T_ReadR14 | T_WriteR14 | T_ReadR15 | tk(tk_BL_LONG_2); + +const u32 T_UNK = T_BranchAlways | T_WriteR14 | tk(tk_UNK); +const u32 T_SVC = T_BranchAlways | T_WriteR14 | T_ReadR15 | tk(tk_SVC); + +#define INSTRFUNC_PROTO(x) u32 x +#include "ARM_InstrTable.h" +#undef INSTRFUNC_PROTO + +Info Decode(bool thumb, u32 num, u32 instr) +{ + Info res = {0}; + if (thumb) + { + u32 data = THUMBInstrTable[(instr >> 6) & 0x3FF]; + + if (data & T_Read0) + res.SrcRegs |= 1 << (instr & 0x7); + if (data & T_Read3) + res.SrcRegs |= 1 << ((instr >> 3) & 0x7); + if (data & T_Read6) + res.SrcRegs |= 1 << ((instr >> 6) & 0x7); + if (data & T_Read8) + res.SrcRegs |= 1 << ((instr >> 8) & 0x7); + + if (data & T_Write0) + res.DstRegs |= 1 << (instr & 0x7); + if (data & T_Write8) + res.DstRegs |= 1 << ((instr >> 8) & 0x7); + + if (data & T_ReadHi0) + res.SrcRegs |= 1 << ((instr & 0x7) | ((instr >> 4) & 0x8)); + if (data & T_ReadHi3) + res.SrcRegs |= 1 << ((instr >> 3) & 0xF); + if (data & T_WriteHi0) + res.DstRegs |= 1 << ((instr & 0x7) | ((instr >> 4) & 0x8)); + + if (data & T_ReadR13) + res.SrcRegs |= (1 << 13); + if (data & T_WriteR13) + res.DstRegs |= (1 << 13); + if (data & T_ReadR15) + res.SrcRegs |= (1 << 15); + + if (data & T_BranchAlways) + res.DstRegs |= (1 << 15); + + if (data & T_PopPC && instr & (1 << 8)) + res.DstRegs |= 1 << 15; + + res.Kind = (data >> 16) & 0x3F; + + return res; + } + else + { + u32 data = ARMInstrTable[((instr >> 4) & 0xF) | ((instr >> 16) & 0xFF0)]; + if ((instr & 0xFE000000) == 0xFA000000) + data = A_BLX_IMM; + + if (data & A_ARM9Only && num != 0) + data |= A_BranchAlways | A_Link; + + if (data & A_Read0) + res.SrcRegs |= 1 << (instr & 0xF); + if (data & A_Read16) + res.SrcRegs |= 1 << ((instr >> 16) & 0xF); + if (data & A_Read8) + res.SrcRegs |= 1 << ((instr >> 8) & 0xF); + if (data & A_Read12) + res.SrcRegs |= 1 << ((instr >> 12) & 0xF); + + if (data & A_Write12) + res.DstRegs |= 1 << ((instr >> 12) & 0xF); + if (data & A_Write16) + res.DstRegs |= 1 << ((instr >> 16) & 0xF); + + if (data & A_MemWriteback && instr & (1 << 21)) + res.DstRegs |= 1 << ((instr >> 16) & 0xF); + + if (data & A_BranchAlways) + res.DstRegs |= 1 << 15; + + if (data & A_Read12Double) + { + res.SrcRegs |= 1 << ((instr >> 12) & 0xF); + res.SrcRegs |= 1 << (((instr >> 12) & 0xF) + 1); + } + if (data & A_Write12Double) + { + res.DstRegs |= 1 << ((instr >> 12) & 0xF); + res.DstRegs |= 1 << (((instr >> 12) & 0xF) + 1); + } + + if (data & A_Link) + { + res.DstRegs |= 1 << 14; + res.SrcRegs |= 1 << 15; + } + + if (data & A_LDMSTM) + { + res.DstRegs |= instr & (!!(instr & (1 << 20)) << 15); + if (instr & (1 << 21)) + res.DstRegs |= 1 << ((instr >> 16) & 0xF); + } + + res.Kind = (data >> 13) & 0x1FF; + + return res; + } +} + +} -- cgit v1.2.3 From 2c44bf927c230efbbd1b27920de062ddcc631fcf Mon Sep 17 00:00:00 2001 From: RSDuck Date: Sat, 6 Jul 2019 01:48:42 +0200 Subject: JIT: most mem instructions working + branching --- src/ARM.cpp | 10 +- src/ARMJIT.cpp | 7 +- src/ARMJIT.h | 2 +- src/ARMJIT_RegCache.h | 2 +- src/ARMJIT_x64/ARMJIT_ALU.cpp | 322 ++++++++------- src/ARMJIT_x64/ARMJIT_Compiler.cpp | 145 ++++--- src/ARMJIT_x64/ARMJIT_Compiler.h | 42 +- src/ARMJIT_x64/ARMJIT_LoadStore.cpp | 805 +++++++++++++++--------------------- src/ARM_InstrInfo.cpp | 2 +- src/NDS.cpp | 2 + 10 files changed, 653 insertions(+), 686 deletions(-) (limited to 'src/ARM_InstrInfo.cpp') diff --git a/src/ARM.cpp b/src/ARM.cpp index 420257a..f7ca26d 100644 --- a/src/ARM.cpp +++ b/src/ARM.cpp @@ -522,8 +522,9 @@ void ARMv5::Execute() ARMJIT::CompiledBlock block = ARMJIT::LookUpBlock(0, R[15] - ((CPSR&0x20)?2:4)); if (block == NULL) - block = ARMJIT::CompileBlock(this); - Cycles += block(); + ARMJIT::CompileBlock(this); + else + Cycles += block(); // TODO optimize this shit!!! if (Halted) @@ -607,8 +608,9 @@ void ARMv4::Execute() ARMJIT::CompiledBlock block = ARMJIT::LookUpBlock(1, R[15] - ((CPSR&0x20)?2:4)); if (block == NULL) - block = ARMJIT::CompileBlock(this); - Cycles += block(); + ARMJIT::CompileBlock(this); + else + Cycles += block(); // TODO optimize this shit!!! if (Halted) diff --git a/src/ARMJIT.cpp b/src/ARMJIT.cpp index 4da781c..6afa967 100644 --- a/src/ARMJIT.cpp +++ b/src/ARMJIT.cpp @@ -121,12 +121,13 @@ void DeInit() delete compiler; } -CompiledBlock CompileBlock(ARM* cpu) +void CompileBlock(ARM* cpu) { bool thumb = cpu->CPSR & 0x20; FetchedInstr instrs[12]; int i = 0; + u32 r15Initial = cpu->R[15]; u32 r15 = cpu->R[15]; u32 nextInstr[2] = {cpu->NextInstr[0], cpu->NextInstr[1]}; //printf("block %x %d\n", r15, thumb); @@ -169,9 +170,7 @@ CompiledBlock CompileBlock(ARM* cpu) CompiledBlock block = compiler->CompileBlock(cpu, instrs, i); - InsertBlock(cpu->Num, cpu->R[15] - (thumb ? 2 : 4), block); - - return block; + InsertBlock(cpu->Num, r15Initial - (thumb ? 2 : 4), block); } void ResetBlocks() diff --git a/src/ARMJIT.h b/src/ARMJIT.h index 45bb4ed..71188f9 100644 --- a/src/ARMJIT.h +++ b/src/ARMJIT.h @@ -109,7 +109,7 @@ inline void InsertBlock(u32 num, u32 addr, CompiledBlock func) void Init(); void DeInit(); -CompiledBlock CompileBlock(ARM* cpu); +void CompileBlock(ARM* cpu); void ResetBlocks(); diff --git a/src/ARMJIT_RegCache.h b/src/ARMJIT_RegCache.h index ea9fb30..556d27b 100644 --- a/src/ARMJIT_RegCache.h +++ b/src/ARMJIT_RegCache.h @@ -114,7 +114,7 @@ public: for (int reg : needToBeLoaded) LoadRegister(reg); } - DirtyRegs |= Instr.Info.DstRegs; + DirtyRegs |= Instr.Info.DstRegs & ~(1 << 15); } static const Reg NativeRegAllocOrder[]; diff --git a/src/ARMJIT_x64/ARMJIT_ALU.cpp b/src/ARMJIT_x64/ARMJIT_ALU.cpp index 6294e1d..c22751e 100644 --- a/src/ARMJIT_x64/ARMJIT_ALU.cpp +++ b/src/ARMJIT_x64/ARMJIT_ALU.cpp @@ -71,30 +71,30 @@ void Compiler::Comp_CmpOp(int op, Gen::OpArg rn, Gen::OpArg op2, bool carryUsed) { switch (op) { - case 0: // TST - if (rn.IsImm()) - { - MOV(32, R(RSCRATCH3), rn); - rn = R(RSCRATCH3); - } - TEST(32, rn, op2); - break; - case 1: // TEQ + case 0: // TST + if (rn.IsImm()) + { MOV(32, R(RSCRATCH3), rn); - XOR(32, R(RSCRATCH3), op2); - break; - case 2: // CMP - if (rn.IsImm()) - { - MOV(32, R(RSCRATCH3), rn); - rn = R(RSCRATCH3); - } - CMP(32, rn, op2); - break; - case 3: // CMN + rn = R(RSCRATCH3); + } + TEST(32, rn, op2); + break; + case 1: // TEQ + MOV(32, R(RSCRATCH3), rn); + XOR(32, R(RSCRATCH3), op2); + break; + case 2: // CMP + if (rn.IsImm()) + { MOV(32, R(RSCRATCH3), rn); - ADD(32, R(RSCRATCH3), op2); - break; + rn = R(RSCRATCH3); + } + CMP(32, rn, op2); + break; + case 3: // CMN + MOV(32, R(RSCRATCH3), rn); + ADD(32, R(RSCRATCH3), op2); + break; } Comp_RetriveFlags(op == 2, op >= 2, carryUsed); @@ -103,38 +103,38 @@ void Compiler::Comp_CmpOp(int op, Gen::OpArg rn, Gen::OpArg op2, bool carryUsed) // also calculates cycles OpArg Compiler::A_Comp_GetALUOp2(bool S, bool& carryUsed) { - if (CurrentInstr.Instr & (1 << 25)) + if (CurInstr.Instr & (1 << 25)) { Comp_AddCycles_C(); carryUsed = false; - return Imm32(ROR(CurrentInstr.Instr & 0xFF, (CurrentInstr.Instr >> 7) & 0x1E)); + return Imm32(ROR(CurInstr.Instr & 0xFF, (CurInstr.Instr >> 7) & 0x1E)); } else { - int op = (CurrentInstr.Instr >> 5) & 0x3; - if (CurrentInstr.Instr & (1 << 4)) + int op = (CurInstr.Instr >> 5) & 0x3; + if (CurInstr.Instr & (1 << 4)) { Comp_AddCycles_CI(1); - OpArg rm = MapReg(CurrentInstr.A_Reg(0)); - if (rm.IsImm() && CurrentInstr.A_Reg(0) == 15) + OpArg rm = MapReg(CurInstr.A_Reg(0)); + if (rm.IsImm() && CurInstr.A_Reg(0) == 15) rm = Imm32(rm.Imm32() + 4); - return Comp_RegShiftReg(op, MapReg(CurrentInstr.A_Reg(8)), rm, S, carryUsed); + return Comp_RegShiftReg(op, MapReg(CurInstr.A_Reg(8)), rm, S, carryUsed); } else { Comp_AddCycles_C(); - return Comp_RegShiftImm(op, (CurrentInstr.Instr >> 7) & 0x1F, - MapReg(CurrentInstr.A_Reg(0)), S, carryUsed); + return Comp_RegShiftImm(op, (CurInstr.Instr >> 7) & 0x1F, + MapReg(CurInstr.A_Reg(0)), S, carryUsed); } } } void Compiler::A_Comp_CmpOp() { - u32 op = (CurrentInstr.Instr >> 21) & 0xF; + u32 op = (CurInstr.Instr >> 21) & 0xF; bool carryUsed; - OpArg rn = MapReg(CurrentInstr.A_Reg(16)); + OpArg rn = MapReg(CurInstr.A_Reg(16)); OpArg op2 = A_Comp_GetALUOp2((1 << op) & 0xF303, carryUsed); Comp_CmpOp(op - 0x8, rn, op2, carryUsed); @@ -142,12 +142,12 @@ void Compiler::A_Comp_CmpOp() void Compiler::A_Comp_Arith() { - bool S = CurrentInstr.Instr & (1 << 20); - u32 op = (CurrentInstr.Instr >> 21) & 0xF; + bool S = CurInstr.Instr & (1 << 20); + u32 op = (CurInstr.Instr >> 21) & 0xF; bool carryUsed; - OpArg rn = MapReg(CurrentInstr.A_Reg(16)); - OpArg rd = MapReg(CurrentInstr.A_Reg(12)); + OpArg rn = MapReg(CurInstr.A_Reg(16)); + OpArg rd = MapReg(CurInstr.A_Reg(12)); OpArg op2 = A_Comp_GetALUOp2(S && (1 << op) & 0xF303, carryUsed); u32 sFlag = S ? opSetsFlags : 0; @@ -155,13 +155,13 @@ void Compiler::A_Comp_Arith() { case 0x0: // AND Comp_ArithTriOp(AND, rd, rn, op2, carryUsed, opSymmetric|sFlag); - return; + break; case 0x1: // EOR Comp_ArithTriOp(XOR, rd, rn, op2, carryUsed, opSymmetric|sFlag); - return; + break; case 0x2: // SUB Comp_ArithTriOp(SUB, rd, rn, op2, carryUsed, sFlag|opRetriveCV|opInvertCarry); - return; + break; case 0x3: // RSB if (op2.IsZero()) { @@ -173,41 +173,44 @@ void Compiler::A_Comp_Arith() } else Comp_ArithTriOpReverse(SUB, rd, rn, op2, carryUsed, sFlag|opRetriveCV|opInvertCarry); - return; + break; case 0x4: // ADD Comp_ArithTriOp(ADD, rd, rn, op2, carryUsed, opSymmetric|sFlag|opRetriveCV); - return; + break; case 0x5: // ADC Comp_ArithTriOp(ADC, rd, rn, op2, carryUsed, opSymmetric|sFlag|opRetriveCV|opSyncCarry); - return; + break; case 0x6: // SBC Comp_ArithTriOp(SBB, rd, rn, op2, carryUsed, opSymmetric|sFlag|opRetriveCV|opSyncCarry|opInvertCarry); - return; + break; case 0x7: // RSC Comp_ArithTriOpReverse(SBB, rd, rn, op2, carryUsed, sFlag|opRetriveCV|opInvertCarry|opSyncCarry); - return; + break; case 0xC: // ORR Comp_ArithTriOp(OR, rd, rn, op2, carryUsed, opSymmetric|sFlag); - return; + break; case 0xE: // BIC Comp_ArithTriOp(AND, rd, rn, op2, carryUsed, sFlag|opSymmetric|opInvertOp2); - return; + break; default: assert("unimplemented"); } + + if (CurInstr.A_Reg(12) == 15) + Comp_JumpTo(rd.GetSimpleReg(), S); } void Compiler::A_Comp_MovOp() { bool carryUsed; - bool S = CurrentInstr.Instr & (1 << 20); + bool S = CurInstr.Instr & (1 << 20); OpArg op2 = A_Comp_GetALUOp2(S, carryUsed); - OpArg rd = MapReg(CurrentInstr.A_Reg(12)); + OpArg rd = MapReg(CurInstr.A_Reg(12)); if (rd != op2) MOV(32, rd, op2); - if (((CurrentInstr.Instr >> 21) & 0xF) == 0xF) + if (((CurInstr.Instr >> 21) & 0xF) == 0xF) NOT(32, rd); if (S) @@ -215,6 +218,9 @@ void Compiler::A_Comp_MovOp() TEST(32, rd, rd); Comp_RetriveFlags(false, false, carryUsed); } + + if (CurInstr.A_Reg(12) == 15) + Comp_JumpTo(rd.GetSimpleReg(), S); } void Compiler::Comp_RetriveFlags(bool sign, bool retriveCV, bool carryUsed) @@ -230,7 +236,7 @@ void Compiler::Comp_RetriveFlags(bool sign, bool retriveCV, bool carryUsed) } if (carryUsed == 983298) - printf("etwas ist faul im lande daenemark %x\n", CurrentInstr.Instr); + printf("etwas ist faul im lande daenemark %x\n", CurInstr.Instr); SETcc(CC_S, R(RSCRATCH)); SETcc(CC_Z, R(RSCRATCH3)); @@ -324,61 +330,61 @@ OpArg Compiler::Comp_RegShiftImm(int op, int amount, OpArg rm, bool S, bool& car switch (op) { - case 0: // LSL - if (amount > 0) - { - MOV(32, R(RSCRATCH), rm); - SHL(32, R(RSCRATCH), Imm8(amount)); - if (S) - SETcc(CC_C, R(RSCRATCH2)); - - return R(RSCRATCH); - } - else - { - carryUsed = false; - return rm; - } - case 1: // LSR - if (amount > 0) - { - MOV(32, R(RSCRATCH), rm); - SHR(32, R(RSCRATCH), Imm8(amount)); - if (S) - SETcc(CC_C, R(RSCRATCH2)); - return R(RSCRATCH); - } - else - { - if (S) - { - MOV(32, R(RSCRATCH2), rm); - SHR(32, R(RSCRATCH2), Imm8(31)); - } - return Imm32(0); - } - case 2: // ASR + case 0: // LSL + if (amount > 0) + { MOV(32, R(RSCRATCH), rm); - SAR(32, R(RSCRATCH), Imm8(amount ? amount : 31)); + SHL(32, R(RSCRATCH), Imm8(amount)); if (S) - { - if (amount == 0) - BT(32, rm, Imm8(31)); SETcc(CC_C, R(RSCRATCH2)); - } + return R(RSCRATCH); - case 3: // ROR + } + else + { + carryUsed = false; + return rm; + } + case 1: // LSR + if (amount > 0) + { MOV(32, R(RSCRATCH), rm); - if (amount > 0) - ROR_(32, R(RSCRATCH), Imm8(amount)); - else - { - BT(32, R(RCPSR), Imm8(29)); - RCR(32, R(RSCRATCH), Imm8(1)); - } + SHR(32, R(RSCRATCH), Imm8(amount)); if (S) SETcc(CC_C, R(RSCRATCH2)); return R(RSCRATCH); + } + else + { + if (S) + { + MOV(32, R(RSCRATCH2), rm); + SHR(32, R(RSCRATCH2), Imm8(31)); + } + return Imm32(0); + } + case 2: // ASR + MOV(32, R(RSCRATCH), rm); + SAR(32, R(RSCRATCH), Imm8(amount ? amount : 31)); + if (S) + { + if (amount == 0) + BT(32, rm, Imm8(31)); + SETcc(CC_C, R(RSCRATCH2)); + } + return R(RSCRATCH); + case 3: // ROR + MOV(32, R(RSCRATCH), rm); + if (amount > 0) + ROR_(32, R(RSCRATCH), Imm8(amount)); + else + { + BT(32, R(RCPSR), Imm8(29)); + RCR(32, R(RSCRATCH), Imm8(1)); + } + if (S) + SETcc(CC_C, R(RSCRATCH2)); + return R(RSCRATCH); } assert(false); @@ -386,11 +392,11 @@ OpArg Compiler::Comp_RegShiftImm(int op, int amount, OpArg rm, bool S, bool& car void Compiler::T_Comp_ShiftImm() { - OpArg rd = MapReg(CurrentInstr.T_Reg(0)); - OpArg rs = MapReg(CurrentInstr.T_Reg(3)); + OpArg rd = MapReg(CurInstr.T_Reg(0)); + OpArg rs = MapReg(CurInstr.T_Reg(3)); - int op = (CurrentInstr.Instr >> 11) & 0x3; - int amount = (CurrentInstr.Instr >> 6) & 0x1F; + int op = (CurInstr.Instr >> 11) & 0x3; + int amount = (CurInstr.Instr >> 6) & 0x1F; Comp_AddCycles_C(); @@ -406,12 +412,12 @@ void Compiler::T_Comp_ShiftImm() void Compiler::T_Comp_AddSub_() { - OpArg rd = MapReg(CurrentInstr.T_Reg(0)); - OpArg rs = MapReg(CurrentInstr.T_Reg(3)); + OpArg rd = MapReg(CurInstr.T_Reg(0)); + OpArg rs = MapReg(CurInstr.T_Reg(3)); - int op = (CurrentInstr.Instr >> 9) & 0x3; + int op = (CurInstr.Instr >> 9) & 0x3; - OpArg rn = op >= 2 ? Imm32((CurrentInstr.Instr >> 6) & 0x7) : MapReg(CurrentInstr.T_Reg(6)); + OpArg rn = op >= 2 ? Imm32((CurInstr.Instr >> 6) & 0x7) : MapReg(CurInstr.T_Reg(6)); Comp_AddCycles_C(); @@ -423,38 +429,38 @@ void Compiler::T_Comp_AddSub_() void Compiler::T_Comp_ALU_Imm8() { - OpArg rd = MapReg(CurrentInstr.T_Reg(8)); + OpArg rd = MapReg(CurInstr.T_Reg(8)); - u32 op = (CurrentInstr.Instr >> 11) & 0x3; - OpArg imm = Imm32(CurrentInstr.Instr & 0xFF); + u32 op = (CurInstr.Instr >> 11) & 0x3; + OpArg imm = Imm32(CurInstr.Instr & 0xFF); Comp_AddCycles_C(); switch (op) { - case 0x0: - MOV(32, rd, imm); - TEST(32, rd, rd); - Comp_RetriveFlags(false, false, false); - return; - case 0x1: - Comp_CmpOp(2, rd, imm, false); - return; - case 0x2: - Comp_ArithTriOp(ADD, rd, rd, imm, false, opSetsFlags|opSymmetric|opRetriveCV); - return; - case 0x3: - Comp_ArithTriOp(SUB, rd, rd, imm, false, opSetsFlags|opInvertCarry|opRetriveCV); - return; + case 0x0: + MOV(32, rd, imm); + TEST(32, rd, rd); + Comp_RetriveFlags(false, false, false); + return; + case 0x1: + Comp_CmpOp(2, rd, imm, false); + return; + case 0x2: + Comp_ArithTriOp(ADD, rd, rd, imm, false, opSetsFlags|opSymmetric|opRetriveCV); + return; + case 0x3: + Comp_ArithTriOp(SUB, rd, rd, imm, false, opSetsFlags|opInvertCarry|opRetriveCV); + return; } } void Compiler::T_Comp_ALU() { - OpArg rd = MapReg(CurrentInstr.T_Reg(0)); - OpArg rs = MapReg(CurrentInstr.T_Reg(3)); + OpArg rd = MapReg(CurInstr.T_Reg(0)); + OpArg rs = MapReg(CurInstr.T_Reg(3)); - u32 op = (CurrentInstr.Instr >> 6) & 0xF; + u32 op = (CurInstr.Instr >> 6) & 0xF; if ((op >= 0x2 && op < 0x4) || op == 0x7) Comp_AddCycles_CI(1); @@ -522,28 +528,62 @@ void Compiler::T_Comp_ALU() void Compiler::T_Comp_ALU_HiReg() { - OpArg rd = MapReg(((CurrentInstr.Instr & 0x7) | ((CurrentInstr.Instr >> 4) & 0x8))); - OpArg rs = MapReg((CurrentInstr.Instr >> 3) & 0xF); + u32 rd = ((CurInstr.Instr & 0x7) | ((CurInstr.Instr >> 4) & 0x8)); + OpArg rdMapped = MapReg(rd); + OpArg rs = MapReg((CurInstr.Instr >> 3) & 0xF); - u32 op = (CurrentInstr.Instr >> 8) & 0x3; + u32 op = (CurInstr.Instr >> 8) & 0x3; Comp_AddCycles_C(); switch (op) { - case 0x0: // ADD - Comp_ArithTriOp(ADD, rd, rd, rs, false, opSymmetric|opRetriveCV); - return; - case 0x1: // CMP - Comp_CmpOp(2, rd, rs, false); - return; - case 0x2: // MOV - if (rd != rs) - MOV(32, rd, rs); - TEST(32, rd, rd); - Comp_RetriveFlags(false, false, false); - return; + case 0x0: // ADD + Comp_ArithTriOp(ADD, rdMapped, rdMapped, rs, false, opSymmetric|opRetriveCV); + break; + case 0x1: // CMP + Comp_CmpOp(2, rdMapped, rs, false); + return; // this is on purpose + case 0x2: // MOV + if (rdMapped != rs) + MOV(32, rdMapped, rs); + TEST(32, rdMapped, rdMapped); + Comp_RetriveFlags(false, false, false); + break; + } + + if (rd == 15) + { + OR(32, rdMapped, Imm8(1)); + Comp_JumpTo(rdMapped.GetSimpleReg()); } } +void Compiler::T_Comp_AddSP() +{ + Comp_AddCycles_C(); + + OpArg sp = MapReg(13); + OpArg offset = Imm32((CurInstr.Instr & 0x7F) << 2); + if (CurInstr.Instr & (1 << 7)) + SUB(32, sp, offset); + else + ADD(32, sp, offset); +} + +void Compiler::T_Comp_RelAddr() +{ + Comp_AddCycles_C(); + + OpArg rd = MapReg(CurInstr.T_Reg(8)); + u32 offset = (CurInstr.Instr & 0xFF) << 2; + if (CurInstr.Instr & (1 << 11)) + { + OpArg sp = MapReg(13); + LEA(32, rd.GetSimpleReg(), MDisp(sp.GetSimpleReg(), offset)); + } + else + MOV(32, rd, Imm32((R15 & ~2) + offset)); +} + } \ No newline at end of file diff --git a/src/ARMJIT_x64/ARMJIT_Compiler.cpp b/src/ARMJIT_x64/ARMJIT_Compiler.cpp index 9096397..b7358a2 100644 --- a/src/ARMJIT_x64/ARMJIT_Compiler.cpp +++ b/src/ARMJIT_x64/ARMJIT_Compiler.cpp @@ -9,7 +9,7 @@ using namespace Gen; namespace ARMJIT { template <> -const X64Reg RegCache::NativeRegAllocOrder[] = +const X64Reg RegCache::NativeRegAllocOrder[] = { #ifdef _WIN32 RBX, RSI, RDI, R12, R13 @@ -18,7 +18,7 @@ const X64Reg RegCache::NativeRegAllocOrder[] = #endif }; template <> -const int RegCache::NativeRegsAvailable = +const int RegCache::NativeRegsAvailable = #ifdef _WIN32 5 #else @@ -30,24 +30,33 @@ Compiler::Compiler() { AllocCodeSpace(1024 * 1024 * 16); - for (int i = 0; i < 15; i++) + for (int i = 0; i < 3; i++) { - ReadMemFuncs9[i] = Gen_MemoryRoutine9(false, 32, 0x1000000 * i); - WriteMemFuncs9[i] = Gen_MemoryRoutine9(true, 32, 0x1000000 * i); for (int j = 0; j < 2; j++) { - ReadMemFuncs7[j][i] = Gen_MemoryRoutine7(false, 32, j, 0x1000000 * i); - WriteMemFuncs7[j][i] = Gen_MemoryRoutine7(true, 32, j, 0x1000000 * i); + MemoryFuncs9[i][j] = Gen_MemoryRoutine9(j, 8 << i); + MemoryFuncs7[i][j][0] = Gen_MemoryRoutine7(j, false, 8 << i); + MemoryFuncs7[i][j][1] = Gen_MemoryRoutine7(j, true, 8 << i); } } - ReadMemFuncs9[15] = Gen_MemoryRoutine9(false, 32, 0xFF000000); - WriteMemFuncs9[15] = Gen_MemoryRoutine9(true, 32, 0xFF000000); - ReadMemFuncs7[15][0] = ReadMemFuncs7[15][1] = Gen_MemoryRoutine7(false, 32, false, 0xFF000000); - WriteMemFuncs7[15][0] = WriteMemFuncs7[15][1] = Gen_MemoryRoutine7(true, 32, false, 0xFF000000); ResetStart = GetWritableCodePtr(); } +DataRegion Compiler::ClassifyAddress(u32 addr) +{ + if (Num == 0 && addr >= ((ARMv5*)CurCPU)->DTCMBase && addr < ((ARMv5*)CurCPU)->DTCMBase) + return dataRegionDTCM; + switch (addr & 0xFF000000) + { + case 0x02000000: return dataRegionMainRAM; + case 0x03000000: return Num == 1 && (addr & 0xF00000) == 0x800000 ? dataRegionWRAM7 : dataRegionSWRAM; + case 0x04000000: return dataRegionIO; + case 0x06000000: return dataRegionVRAM; + } + return dataRegionGeneric; +} + void Compiler::LoadCPSR() { assert(!CPSRDirty); @@ -92,6 +101,7 @@ CompiledBlock Compiler::CompileBlock(ARM* cpu, FetchedInstr instrs[], int instrs Num = cpu->Num; R15 = cpu->R[15]; CodeRegion = cpu->CodeRegion; + CurCPU = cpu; ABI_PushRegistersAndAdjustStack({ABI_ALL_CALLEE_SAVED & ABI_ALL_GPRS}, 8, 16); @@ -106,27 +116,32 @@ CompiledBlock Compiler::CompileBlock(ARM* cpu, FetchedInstr instrs[], int instrs for (int i = 0; i < instrsCount; i++) { R15 += Thumb ? 2 : 4; - CurrentInstr = instrs[i]; - - CompileFunc comp = GetCompFunc(CurrentInstr.Info.Kind); + CurInstr = instrs[i]; - if (CurrentInstr.Info.Branches()) - comp = NULL; + CompileFunc comp = GetCompFunc(CurInstr.Info.Kind); if (comp == NULL || i == instrsCount - 1) { MOV(32, MDisp(RCPU, offsetof(ARM, R[15])), Imm32(R15)); - MOV(32, MDisp(RCPU, offsetof(ARM, CodeCycles)), Imm32(CurrentInstr.CodeCycles)); - MOV(32, MDisp(RCPU, offsetof(ARM, CurInstr)), Imm32(CurrentInstr.Instr)); + MOV(32, MDisp(RCPU, offsetof(ARM, CodeCycles)), Imm32(CurInstr.CodeCycles)); + MOV(32, MDisp(RCPU, offsetof(ARM, CurInstr)), Imm32(CurInstr.Instr)); if (i == instrsCount - 1) { - MOV(32, MDisp(RCPU, offsetof(ARM, NextInstr[0])), Imm32(CurrentInstr.NextInstr[0])); - MOV(32, MDisp(RCPU, offsetof(ARM, NextInstr[1])), Imm32(CurrentInstr.NextInstr[1])); + MOV(32, MDisp(RCPU, offsetof(ARM, NextInstr[0])), Imm32(CurInstr.NextInstr[0])); + MOV(32, MDisp(RCPU, offsetof(ARM, NextInstr[1])), Imm32(CurInstr.NextInstr[1])); } - SaveCPSR(); + if (comp == NULL || CurInstr.Info.Branches()) + SaveCPSR(); } + // run interpreter + cpu->CodeCycles = CurInstr.CodeCycles; + cpu->R[15] = R15; + cpu->CurInstr = CurInstr.Instr; + cpu->NextInstr[0] = CurInstr.NextInstr[0]; + cpu->NextInstr[1] = CurInstr.NextInstr[1]; + if (comp != NULL) RegCache.Prepare(i); else @@ -134,26 +149,33 @@ CompiledBlock Compiler::CompileBlock(ARM* cpu, FetchedInstr instrs[], int instrs if (Thumb) { + u32 icode = (CurInstr.Instr >> 6) & 0x3FF; if (comp == NULL) { MOV(64, R(ABI_PARAM1), R(RCPU)); - u32 icode = (CurrentInstr.Instr >> 6) & 0x3FF; ABI_CallFunction(ARMInterpreter::THUMBInstrTable[icode]); } else (this->*comp)(); + + ARMInterpreter::THUMBInstrTable[icode](cpu); } else { - u32 cond = CurrentInstr.Cond(); - if (CurrentInstr.Info.Kind == ARMInstrInfo::ak_BLX_IMM) + u32 cond = CurInstr.Cond(); + if (CurInstr.Info.Kind == ARMInstrInfo::ak_BLX_IMM) { MOV(64, R(ABI_PARAM1), R(RCPU)); ABI_CallFunction(ARMInterpreter::A_BLX_IMM); + + ARMInterpreter::A_BLX_IMM(cpu); } else if (cond == 0xF) + { Comp_AddCycles_C(); + cpu->AddCycles_C(); + } else { FixupBranch skipExecute; @@ -180,18 +202,18 @@ CompiledBlock Compiler::CompileBlock(ARM* cpu, FetchedInstr instrs[], int instrs } + u32 icode = ((CurInstr.Instr >> 4) & 0xF) | ((CurInstr.Instr >> 16) & 0xFF0); if (comp == NULL) { MOV(64, R(ABI_PARAM1), R(RCPU)); - u32 icode = ((CurrentInstr.Instr >> 4) & 0xF) | ((CurrentInstr.Instr >> 16) & 0xFF0); ABI_CallFunction(ARMInterpreter::ARMInstrTable[icode]); } else (this->*comp)(); FixupBranch skipFailed; - if (CurrentInstr.Cond() < 0xE) + if (CurInstr.Cond() < 0xE) { skipFailed = J(); SetJumpTarget(skipExecute); @@ -200,13 +222,17 @@ CompiledBlock Compiler::CompileBlock(ARM* cpu, FetchedInstr instrs[], int instrs SetJumpTarget(skipFailed); } + + if (cpu->CheckCondition(cond)) + ARMInterpreter::ARMInstrTable[icode](cpu); + else + cpu->AddCycles_C(); } } /* we don't need to collect the interpreted cycles, - since all functions only add to it, the dispatcher - takes care of it. + since cpu->Cycles is taken into account by the dispatcher. */ if (comp == NULL && i != instrsCount - 1) @@ -277,29 +303,29 @@ CompileFunc Compiler::GetCompFunc(int kind) // Mul NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, // ARMv5 stuff - NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, // STR A_Comp_MemWB, A_Comp_MemWB, A_Comp_MemWB, A_Comp_MemWB, A_Comp_MemWB, A_Comp_MemWB, A_Comp_MemWB, A_Comp_MemWB, A_Comp_MemWB, A_Comp_MemWB, // STRB - NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + A_Comp_MemWB, A_Comp_MemWB, A_Comp_MemWB, A_Comp_MemWB, A_Comp_MemWB, A_Comp_MemWB, A_Comp_MemWB, A_Comp_MemWB, A_Comp_MemWB, A_Comp_MemWB, // LDR A_Comp_MemWB, A_Comp_MemWB, A_Comp_MemWB, A_Comp_MemWB, A_Comp_MemWB, A_Comp_MemWB, A_Comp_MemWB, A_Comp_MemWB, A_Comp_MemWB, A_Comp_MemWB, // LDRB - NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + A_Comp_MemWB, A_Comp_MemWB, A_Comp_MemWB, A_Comp_MemWB, A_Comp_MemWB, A_Comp_MemWB, A_Comp_MemWB, A_Comp_MemWB, A_Comp_MemWB, A_Comp_MemWB, // STRH - NULL, NULL, NULL, NULL, + A_Comp_MemHalf, A_Comp_MemHalf, A_Comp_MemHalf, A_Comp_MemHalf, // LDRD NULL, NULL, NULL, NULL, // STRD NULL, NULL, NULL, NULL, // LDRH - NULL, NULL, NULL, NULL, + A_Comp_MemHalf, A_Comp_MemHalf, A_Comp_MemHalf, A_Comp_MemHalf, // LDRSB - NULL, NULL, NULL, NULL, + A_Comp_MemHalf, A_Comp_MemHalf, A_Comp_MemHalf, A_Comp_MemHalf, // LDRSH - NULL, NULL, NULL, NULL, + A_Comp_MemHalf, A_Comp_MemHalf, A_Comp_MemHalf, A_Comp_MemHalf, // swap - NULL, NULL, + NULL, NULL, // LDM/STM NULL, NULL, // Branch @@ -314,26 +340,26 @@ CompileFunc Compiler::GetCompFunc(int kind) // Three operand ADD/SUB T_Comp_AddSub_, T_Comp_AddSub_, T_Comp_AddSub_, T_Comp_AddSub_, // 8 bit imm - T_Comp_ALU_Imm8, T_Comp_ALU_Imm8, T_Comp_ALU_Imm8, T_Comp_ALU_Imm8, + T_Comp_ALU_Imm8, T_Comp_ALU_Imm8, T_Comp_ALU_Imm8, T_Comp_ALU_Imm8, // general ALU - T_Comp_ALU, T_Comp_ALU, T_Comp_ALU, T_Comp_ALU, T_Comp_ALU, T_Comp_ALU, T_Comp_ALU, T_Comp_ALU, - T_Comp_ALU, T_Comp_ALU, T_Comp_ALU, T_Comp_ALU, + T_Comp_ALU, T_Comp_ALU, T_Comp_ALU, T_Comp_ALU, + T_Comp_ALU, T_Comp_ALU, T_Comp_ALU, T_Comp_ALU, T_Comp_ALU, NULL, T_Comp_ALU, T_Comp_ALU, // hi reg T_Comp_ALU_HiReg, T_Comp_ALU_HiReg, T_Comp_ALU_HiReg, // pc/sp relative - NULL, NULL, NULL, + T_Comp_RelAddr, T_Comp_RelAddr, T_Comp_AddSP, // LDR pcrel - NULL, + NULL, // LDR/STR reg offset - T_Comp_MemReg, NULL, T_Comp_MemReg, NULL, - // LDR/STR sign extended, half - NULL, NULL, NULL, NULL, + T_Comp_MemReg, T_Comp_MemReg, T_Comp_MemReg, T_Comp_MemReg, + // LDR/STR sign extended, half + T_Comp_MemRegHalf, T_Comp_MemRegHalf, T_Comp_MemRegHalf, T_Comp_MemRegHalf, // LDR/STR imm offset - T_Comp_MemImm, T_Comp_MemImm, NULL, NULL, + T_Comp_MemImm, T_Comp_MemImm, T_Comp_MemImm, T_Comp_MemImm, // LDR/STR half imm offset - NULL, NULL, + T_Comp_MemImmHalf, T_Comp_MemImmHalf, // branch, etc. NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, @@ -346,10 +372,10 @@ CompileFunc Compiler::GetCompFunc(int kind) void Compiler::Comp_AddCycles_C() { s32 cycles = Num ? - NDS::ARM7MemTimings[CurrentInstr.CodeCycles][Thumb ? 1 : 3] - : ((R15 & 0x2) ? 0 : CurrentInstr.CodeCycles); + NDS::ARM7MemTimings[CurInstr.CodeCycles][Thumb ? 1 : 3] + : ((R15 & 0x2) ? 0 : CurInstr.CodeCycles); - if (CurrentInstr.Cond() < 0xE) + if (CurInstr.Cond() < 0xE) ADD(32, R(RCycles), Imm8(cycles)); else ConstantCycles += cycles; @@ -358,13 +384,26 @@ void Compiler::Comp_AddCycles_C() void Compiler::Comp_AddCycles_CI(u32 i) { s32 cycles = (Num ? - NDS::ARM7MemTimings[CurrentInstr.CodeCycles][Thumb ? 0 : 2] - : ((R15 & 0x2) ? 0 : CurrentInstr.CodeCycles)) + i; - - if (CurrentInstr.Cond() < 0xE) + NDS::ARM7MemTimings[CurInstr.CodeCycles][Thumb ? 0 : 2] + : ((R15 & 0x2) ? 0 : CurInstr.CodeCycles)) + i; + + if (CurInstr.Cond() < 0xE) ADD(32, R(RCycles), Imm8(cycles)); else ConstantCycles += cycles; } +void Compiler::Comp_JumpTo(Gen::X64Reg addr, bool restoreCPSR) +{ + SaveCPSR(); + + MOV(64, R(ABI_PARAM1), R(RCPU)); + MOV(32, R(ABI_PARAM2), R(addr)); + MOV(32, R(ABI_PARAM3), Imm32(restoreCPSR)); + if (Num == 0) + CALL((void*)&ARMv5::JumpTo); + else + CALL((void*)&ARMv4::JumpTo); +} + } \ No newline at end of file diff --git a/src/ARMJIT_x64/ARMJIT_Compiler.h b/src/ARMJIT_x64/ARMJIT_Compiler.h index 7ab9b25..9395a29 100644 --- a/src/ARMJIT_x64/ARMJIT_Compiler.h +++ b/src/ARMJIT_x64/ARMJIT_Compiler.h @@ -6,6 +6,8 @@ #include "../ARMJIT.h" #include "../ARMJIT_RegCache.h" +#include + namespace ARMJIT { @@ -21,6 +23,19 @@ class Compiler; typedef void (Compiler::*CompileFunc)(); +enum DataRegion +{ + dataRegionGeneric, // hey, that's me! + dataRegionMainRAM, + dataRegionSWRAM, + dataRegionVRAM, + dataRegionIO, + dataRegionExclusive, + dataRegionsCount, + dataRegionDTCM = dataRegionExclusive, + dataRegionWRAM7 = dataRegionExclusive, +}; + class Compiler : public Gen::X64CodeBlock { public: @@ -34,6 +49,8 @@ public: private: CompileFunc GetCompFunc(int kind); + void Comp_JumpTo(Gen::X64Reg addr, bool restoreCPSR = false); + void Comp_AddCycles_C(); void Comp_AddCycles_CI(u32 i); @@ -47,11 +64,14 @@ private: opInvertOp2 = 1 << 5, }; + DataRegion ClassifyAddress(u32 addr); + void A_Comp_Arith(); void A_Comp_MovOp(); void A_Comp_CmpOp(); void A_Comp_MemWB(); + void A_Comp_MemHalf(); void T_Comp_ShiftImm(); void T_Comp_AddSub_(); @@ -59,8 +79,15 @@ private: void T_Comp_ALU(); void T_Comp_ALU_HiReg(); + void T_Comp_RelAddr(); + void T_Comp_AddSP(); + void T_Comp_MemReg(); void T_Comp_MemImm(); + void T_Comp_MemRegHalf(); + void T_Comp_MemImmHalf(); + + void Comp_MemAccess(Gen::OpArg rd, bool signExtend, bool store, int size); void Comp_ArithTriOp(void (Compiler::*op)(int, const Gen::OpArg&, const Gen::OpArg&), Gen::OpArg rd, Gen::OpArg rn, Gen::OpArg op2, bool carryUsed, int opFlags); @@ -70,8 +97,8 @@ private: void Comp_RetriveFlags(bool sign, bool retriveCV, bool carryUsed); - void* Gen_MemoryRoutine9(bool store, int size, u32 region); - void* Gen_MemoryRoutine7(bool store, int size, bool mainRAMCode, u32 region); + void* Gen_MemoryRoutine9(bool store, int size); + void* Gen_MemoryRoutine7(bool store, bool codeMainRAM, int size); Gen::OpArg Comp_RegShiftImm(int op, int amount, Gen::OpArg rm, bool S, bool& carryUsed); Gen::OpArg Comp_RegShiftReg(int op, Gen::OpArg rs, Gen::OpArg rm, bool S, bool& carryUsed); @@ -92,10 +119,12 @@ private: } void* ResetStart; + void* MemoryFuncs9[3][2]; + void* MemoryFuncs7[3][2][2]; bool CPSRDirty = false; - FetchedInstr CurrentInstr; + FetchedInstr CurInstr; RegCache RegCache; @@ -105,12 +134,9 @@ private: u32 CodeRegion; u32 ConstantCycles; -}; -extern void* ReadMemFuncs9[16]; -extern void* ReadMemFuncs7[2][16]; -extern void* WriteMemFuncs9[16]; -extern void* WriteMemFuncs7[2][16]; + ARM* CurCPU; +}; } diff --git a/src/ARMJIT_x64/ARMJIT_LoadStore.cpp b/src/ARMJIT_x64/ARMJIT_LoadStore.cpp index d534269..69746e2 100644 --- a/src/ARMJIT_x64/ARMJIT_LoadStore.cpp +++ b/src/ARMJIT_x64/ARMJIT_LoadStore.cpp @@ -5,7 +5,6 @@ namespace NDS { -#define MAIN_RAM_SIZE 0x400000 extern u8* SWRAM_ARM9; extern u32 SWRAM_ARM9Mask; extern u8* SWRAM_ARM7; @@ -19,11 +18,6 @@ using namespace Gen; namespace ARMJIT { -void* ReadMemFuncs9[16]; -void* ReadMemFuncs7[2][16]; -void* WriteMemFuncs9[16]; -void* WriteMemFuncs7[2][16]; - template int squeezePointer(T* ptr) { @@ -32,569 +26,434 @@ int squeezePointer(T* ptr) return truncated; } -u32 ReadVRAM9(u32 addr) -{ - switch (addr & 0x00E00000) - { - case 0x00000000: return GPU::ReadVRAM_ABG(addr); - case 0x00200000: return GPU::ReadVRAM_BBG(addr); - case 0x00400000: return GPU::ReadVRAM_AOBJ(addr); - case 0x00600000: return GPU::ReadVRAM_BOBJ(addr); - default: return GPU::ReadVRAM_LCDC(addr); - } -} +/* + According to DeSmuME and my own research, approx. 99% (seriously, that's an empirical number) + of all memory load and store instructions always access addresses in the same region as + during the their first execution. -void WriteVRAM9(u32 addr, u32 val) -{ - switch (addr & 0x00E00000) - { - case 0x00000000: GPU::WriteVRAM_ABG(addr, val); return; - case 0x00200000: GPU::WriteVRAM_BBG(addr, val); return; - case 0x00400000: GPU::WriteVRAM_AOBJ(addr, val); return; - case 0x00600000: GPU::WriteVRAM_BOBJ(addr, val); return; - default: GPU::WriteVRAM_LCDC(addr, val); return; - } -} + I tried multiple optimisations, which would benefit from this behaviour + (having fast paths for the first region, …), though none of them yielded a measureable + improvement. +*/ /* - R11 - data to write (store only) - RSCRATCH2 - address - RSCRATCH3 - code cycles + address - ABI_PARAM1 (a.k.a. ECX = RSCRATCH3 on Windows) + store value - ABI_PARAM2 (a.k.a. RDX = RSCRATCH2 on Windows) + code cycles - ABI_PARAM3 */ -void* Compiler::Gen_MemoryRoutine9(bool store, int size, u32 region) +void* Compiler::Gen_MemoryRoutine9(bool store, int size) { + u32 addressMask = ~(size == 32 ? 3 : (size == 16 ? 1 : 0)); AlignCode4(); - void* res = (void*)GetWritableCodePtr(); + void* res = GetWritableCodePtr(); - if (!store) - { - MOV(32, R(RSCRATCH), R(RSCRATCH2)); - AND(32, R(RSCRATCH), Imm8(0x3)); - SHL(32, R(RSCRATCH), Imm8(3)); - // enter the shadow realm! - MOV(32, MDisp(RSP, 8), R(RSCRATCH)); - } + MOV(32, R(RSCRATCH), R(ABI_PARAM1)); + SUB(32, R(RSCRATCH), MDisp(RCPU, offsetof(ARMv5, DTCMBase))); + CMP(32, R(RSCRATCH), MDisp(RCPU, offsetof(ARMv5, DTCMSize))); + FixupBranch insideDTCM = J_CC(CC_B); - // cycle counting! - // this is AddCycles_CDI - MOV(32, R(R10), R(RSCRATCH2)); - SHR(32, R(R10), Imm8(12)); - MOVZX(32, 8, R10, MComplex(RCPU, R10, SCALE_1, offsetof(ARMv5, MemTimings) + 2)); - LEA(32, RSCRATCH, MComplex(RSCRATCH3, R10, SCALE_1, -6)); - CMP(32, R(R10), R(RSCRATCH3)); - CMOVcc(32, RSCRATCH3, R(R10), CC_G); - CMP(32, R(RSCRATCH), R(RSCRATCH3)); - CMOVcc(32, RSCRATCH3, R(RSCRATCH), CC_G); - ADD(32, R(RCycles), R(RSCRATCH3)); - - if (!store) - XOR(32, R(RSCRATCH), R(RSCRATCH)); - AND(32, R(RSCRATCH2), Imm32(~3)); + CMP(32, R(ABI_PARAM1), MDisp(RCPU, offsetof(ARMv5, ITCMSize))); + FixupBranch insideITCM = J_CC(CC_B); + // cycle counting! + MOV(32, R(RSCRATCH), R(ABI_PARAM1)); + SHR(32, R(RSCRATCH), Imm8(12)); + MOVZX(32, 8, RSCRATCH, MComplex(RCPU, RSCRATCH, SCALE_1, offsetof(ARMv5, MemTimings) + (size == 32 ? 2 : 0))); + LEA(32, ABI_PARAM4, MComplex(RSCRATCH, ABI_PARAM3, SCALE_1, -6)); + CMP(32, R(ABI_PARAM3), R(RSCRATCH)); + CMOVcc(32, RSCRATCH, R(ABI_PARAM3), CC_G); + CMP(32, R(ABI_PARAM4), R(RSCRATCH)); + CMOVcc(32, RSCRATCH, R(ABI_PARAM4), CC_G); + ADD(32, R(RCycles), R(RSCRATCH)); + + if (store) { - MOV(32, R(RSCRATCH3), R(RSCRATCH2)); - SUB(32, R(RSCRATCH2), MDisp(RCPU, offsetof(ARMv5, DTCMBase))); - CMP(32, R(RSCRATCH2), MDisp(RCPU, offsetof(ARMv5, DTCMSize))); - FixupBranch outsideDTCM = J_CC(CC_AE); - AND(32, R(RSCRATCH2), Imm32(0x3FFF)); - if (!store) + if (size > 8) + AND(32, R(ABI_PARAM1), Imm32(addressMask)); + switch (size) { - MOV(32, R(RSCRATCH), MComplex(RCPU, RSCRATCH2, SCALE_1, offsetof(ARMv5, DTCM))); - MOV(32, R(ECX), MDisp(RSP, 8)); - ROR_(32, R(RSCRATCH), R(ECX)); + case 32: JMP((u8*)NDS::ARM9Write32, true); break; + case 16: JMP((u8*)NDS::ARM9Write16, true); break; + case 8: JMP((u8*)NDS::ARM9Write8, true); break; } - else - MOV(32, MComplex(RCPU, RSCRATCH2, SCALE_1, offsetof(ARMv5, DTCM)), R(R11)); - RET(); - SetJumpTarget(outsideDTCM); - MOV(32, R(RSCRATCH2), R(RSCRATCH3)); } - - switch (region) + else { - case 0x00000000: - case 0x01000000: - { - CMP(32, R(RSCRATCH2), MDisp(RCPU, offsetof(ARMv5, ITCMSize))); - FixupBranch insideITCM = J_CC(CC_B); - RET(); - SetJumpTarget(insideITCM); - AND(32, R(RSCRATCH2), Imm32(0x7FFF)); - if (!store) - MOV(32, R(RSCRATCH), MComplex(RCPU, RSCRATCH2, SCALE_1, offsetof(ARMv5, ITCM))); - else - { - MOV(32, MComplex(RCPU, RSCRATCH2, SCALE_1, offsetof(ARMv5, ITCM)), R(R11)); - MOV(64, MScaled(RSCRATCH2, SCALE_4, squeezePointer(cache.ARM9_ITCM)), Imm32(0)); - MOV(64, MScaled(RSCRATCH2, SCALE_4, squeezePointer(cache.ARM9_ITCM) + 8), Imm32(0)); - } - } - break; - case 0x02000000: - AND(32, R(RSCRATCH2), Imm32(MAIN_RAM_SIZE - 1)); - if (!store) - MOV(32, R(RSCRATCH), MDisp(RSCRATCH2, squeezePointer(NDS::MainRAM))); - else - { - MOV(32, MDisp(RSCRATCH2, squeezePointer(NDS::MainRAM)), R(R11)); - MOV(64, MScaled(RSCRATCH2, SCALE_4, squeezePointer(cache.MainRAM)), Imm32(0)); - MOV(64, MScaled(RSCRATCH2, SCALE_4, squeezePointer(cache.MainRAM) + 8), Imm32(0)); - } - break; - case 0x03000000: - { - MOV(64, R(RSCRATCH3), M(&NDS::SWRAM_ARM9)); - TEST(64, R(RSCRATCH3), R(RSCRATCH3)); - FixupBranch notMapped = J_CC(CC_Z); - AND(32, R(RSCRATCH2), M(&NDS::SWRAM_ARM9Mask)); - if (!store) - MOV(32, R(RSCRATCH), MRegSum(RSCRATCH2, RSCRATCH3)); - else - { - MOV(32, MRegSum(RSCRATCH2, RSCRATCH3), R(R11)); - MOV(64, MScaled(RSCRATCH2, SCALE_4, squeezePointer(cache.SWRAM)), Imm32(0)); - MOV(64, MScaled(RSCRATCH2, SCALE_4, squeezePointer(cache.SWRAM) + 8), Imm32(0)); - } - SetJumpTarget(notMapped); - } - break; - case 0x04000000: - MOV(32, R(ABI_PARAM1), R(RSCRATCH2)); - if (!store) - { - ABI_PushRegistersAndAdjustStack({}, 8, 0); - ABI_CallFunction(NDS::ARM9IORead32); - ABI_PopRegistersAndAdjustStack({}, 8, 0); - } - else - { - MOV(32, R(ABI_PARAM2), R(R11)); - JMP((u8*)NDS::ARM9IOWrite32, true); - } - break; - case 0x05000000: - { - MOV(32, R(RSCRATCH), Imm32(1<<1)); - MOV(32, R(RSCRATCH3), Imm32(1<<9)); - TEST(32, R(RSCRATCH2), Imm32(0x400)); - CMOVcc(32, RSCRATCH, R(RSCRATCH3), CC_NZ); - TEST(16, R(RSCRATCH), M(&NDS::PowerControl9)); - FixupBranch available = J_CC(CC_NZ); - RET(); - SetJumpTarget(available); - AND(32, R(RSCRATCH2), Imm32(0x7FF)); - if (!store) - MOV(32, R(RSCRATCH), MDisp(RSCRATCH2, squeezePointer(GPU::Palette))); - else - MOV(32, MDisp(RSCRATCH2, squeezePointer(GPU::Palette)), R(R11)); - } - break; - case 0x06000000: - MOV(32, R(ABI_PARAM1), R(RSCRATCH2)); - if (!store) - { - ABI_PushRegistersAndAdjustStack({}, 8); - ABI_CallFunction(ReadVRAM9); - ABI_PopRegistersAndAdjustStack({}, 8); - } - else - { - MOV(32, R(ABI_PARAM2), R(R11)); - JMP((u8*)WriteVRAM9, true); - } - break; - case 0x07000000: + if (size == 32) { - MOV(32, R(RSCRATCH), Imm32(1<<1)); - MOV(32, R(RSCRATCH3), Imm32(1<<9)); - TEST(32, R(RSCRATCH2), Imm32(0x400)); - CMOVcc(32, RSCRATCH, R(RSCRATCH3), CC_NZ); - TEST(16, R(RSCRATCH), M(&NDS::PowerControl9)); - FixupBranch available = J_CC(CC_NZ); + ABI_PushRegistersAndAdjustStack({ABI_PARAM1}, 8); + AND(32, R(ABI_PARAM1), Imm32(addressMask)); + // everything's already in the appropriate register + ABI_CallFunction(NDS::ARM9Read32); + ABI_PopRegistersAndAdjustStack({ECX}, 8); + AND(32, R(ECX), Imm8(3)); + SHL(32, R(ECX), Imm8(3)); + ROR_(32, R(RSCRATCH), R(ECX)); RET(); - SetJumpTarget(available); - AND(32, R(RSCRATCH2), Imm32(0x7FF)); - if (!store) - MOV(32, R(RSCRATCH), MDisp(RSCRATCH2, squeezePointer(GPU::OAM))); - else - MOV(32, MDisp(RSCRATCH2, squeezePointer(GPU::OAM)), R(R11)); } - break; - case 0x08000000: - case 0x09000000: - case 0x0A000000: - if (!store) - MOV(32, R(RSCRATCH), Imm32(0xFFFFFFFF)); - break; - case 0xFF000000: - if (!store) - { - AND(32, R(RSCRATCH2), Imm32(0xFFF)); - MOV(32, R(RSCRATCH), MDisp(RSCRATCH2, squeezePointer(NDS::ARM9BIOS))); - } - break; - default: - MOV(32, R(ABI_PARAM1), R(RSCRATCH2)); - if (!store) + else if (size == 16) { - ABI_PushRegistersAndAdjustStack({}, 8, 0); - ABI_CallFunction(NDS::ARM9Read32); - ABI_PopRegistersAndAdjustStack({}, 8, 0); + AND(32, R(ABI_PARAM1), Imm32(addressMask)); + JMP((u8*)NDS::ARM9Read16, true); } else + JMP((u8*)NDS::ARM9Read8, true); + } + + SetJumpTarget(insideDTCM); + ADD(32, R(RCycles), R(ABI_PARAM3)); + AND(32, R(RSCRATCH), Imm32(0x3FFF & addressMask)); + if (store) + MOV(size, MComplex(RCPU, RSCRATCH, SCALE_1, offsetof(ARMv5, DTCM)), R(ABI_PARAM2)); + else + { + MOVZX(32, size, RSCRATCH, MComplex(RCPU, RSCRATCH, SCALE_1, offsetof(ARMv5, DTCM))); + if (size == 32) { - MOV(32, R(ABI_PARAM2), R(R11)); - JMP((u8*)NDS::ARM9Write32, true); + if (ABI_PARAM1 != ECX) + MOV(32, R(ECX), R(ABI_PARAM1)); + AND(32, R(ECX), Imm8(3)); + SHL(32, R(ECX), Imm8(3)); + ROR_(32, R(RSCRATCH), R(ECX)); } - break; } + RET(); - if (!store) + SetJumpTarget(insideITCM); + ADD(32, R(RCycles), R(ABI_PARAM3)); + MOV(32, R(ABI_PARAM3), R(ABI_PARAM1)); // free up ECX + AND(32, R(ABI_PARAM3), Imm32(0x7FFF & addressMask)); + if (store) { - MOV(32, R(ECX), MDisp(RSP, 8)); - ROR_(32, R(RSCRATCH), R(ECX)); + MOV(size, MComplex(RCPU, ABI_PARAM3, SCALE_1, offsetof(ARMv5, ITCM)), R(ABI_PARAM2)); + XOR(32, R(RSCRATCH), R(RSCRATCH)); + MOV(64, MScaled(ABI_PARAM3, SCALE_4, squeezePointer(cache.ARM9_ITCM)), R(RSCRATCH)); + if (size == 32) + MOV(64, MScaled(ABI_PARAM3, SCALE_4, squeezePointer(cache.ARM9_ITCM) + 8), R(RSCRATCH)); + } + else + { + MOVZX(32, size, RSCRATCH, MComplex(RCPU, ABI_PARAM3, SCALE_1, offsetof(ARMv5, ITCM))); + if (size == 32) + { + if (ABI_PARAM1 != ECX) + MOV(32, R(ECX), R(ABI_PARAM1)); + AND(32, R(ECX), Imm8(3)); + SHL(32, R(ECX), Imm8(3)); + ROR_(32, R(RSCRATCH), R(ECX)); + } } - RET(); + static_assert(RSCRATCH == EAX); + return res; } -void* Compiler::Gen_MemoryRoutine7(bool store, int size, bool mainRAMCode, u32 region) +void* Compiler::Gen_MemoryRoutine7(bool store, bool codeMainRAM, int size) { + u32 addressMask = ~(size == 32 ? 3 : (size == 16 ? 1 : 0)); AlignCode4(); void* res = GetWritableCodePtr(); - if (!store) - { - MOV(32, R(RSCRATCH), R(RSCRATCH2)); - AND(32, R(RSCRATCH), Imm8(0x3)); - SHL(32, R(RSCRATCH), Imm8(3)); - // enter the shadow realm! - MOV(32, MDisp(RSP, 8), R(RSCRATCH)); - } - - // AddCycles_CDI - MOV(32, R(RSCRATCH), R(RSCRATCH2)); + MOV(32, R(RSCRATCH), R(ABI_PARAM1)); SHR(32, R(RSCRATCH), Imm8(15)); - MOVZX(32, 8, RSCRATCH, MDisp(RSCRATCH, squeezePointer(NDS::ARM7MemTimings + 2))); - if ((region == 0x02000000 && mainRAMCode) || (region != 0x02000000 && !mainRAMCode)) + MOVZX(32, 8, ABI_PARAM4, MDisp(RSCRATCH, (size == 32 ? 2 : 0) + squeezePointer(NDS::ARM7MemTimings))); + + MOV(32, R(RSCRATCH), R(ABI_PARAM1)); + AND(32, R(RSCRATCH), Imm32(0xFF000000)); + CMP(32, R(RSCRATCH), Imm32(0x02000000)); + FixupBranch outsideMainRAM = J_CC(CC_NE); + if (codeMainRAM) { - if (!store && region != 0x02000000) - LEA(32, RSCRATCH3, MComplex(RSCRATCH, RSCRATCH3, SCALE_1, 1)); - ADD(32, R(RCycles), R(RSCRATCH3)); + LEA(32, RSCRATCH, MRegSum(ABI_PARAM4, ABI_PARAM3)); + ADD(32, R(RCycles), R(RSCRATCH)); } else { if (!store) - ADD(32, R(region == 0x02000000 ? RSCRATCH2 : RSCRATCH), Imm8(1)); - LEA(32, R10, MComplex(RSCRATCH, RSCRATCH3, SCALE_1, -3)); - CMP(32, R(RSCRATCH3), R(RSCRATCH)); - CMOVcc(32, RSCRATCH, R(RSCRATCH3), CC_G); - CMP(32, R(R10), R(RSCRATCH)); - CMOVcc(32, RSCRATCH, R(R10), CC_G); + ADD(32, R(ABI_PARAM3), Imm8(1)); + LEA(32, RSCRATCH, MComplex(ABI_PARAM4, ABI_PARAM3, SCALE_1, -3)); + CMP(32, R(ABI_PARAM4), R(ABI_PARAM3)); + CMOVcc(32, ABI_PARAM3, R(ABI_PARAM4), CC_G); + CMP(32, R(ABI_PARAM3), R(RSCRATCH)); + CMOVcc(32, RSCRATCH, R(ABI_PARAM3), CC_G); ADD(32, R(RCycles), R(RSCRATCH)); } - - if (!store) + MOV(32, R(ABI_PARAM3), R(ABI_PARAM1)); + AND(32, R(ABI_PARAM3), Imm32((MAIN_RAM_SIZE - 1) & addressMask)); + if (store) + { + MOV(size, MDisp(ABI_PARAM3, squeezePointer(NDS::MainRAM)), R(ABI_PARAM2)); XOR(32, R(RSCRATCH), R(RSCRATCH)); - AND(32, R(RSCRATCH2), Imm32(~3)); + MOV(64, MScaled(ABI_PARAM3, SCALE_4, squeezePointer(cache.MainRAM)), R(RSCRATCH)); + if (size == 32) + MOV(64, MScaled(ABI_PARAM3, SCALE_4, squeezePointer(cache.MainRAM) + 8), R(RSCRATCH)); + } + else + { + MOVZX(32, size, RSCRATCH, MDisp(ABI_PARAM3, squeezePointer(NDS::MainRAM))); + if (size == 32) + { + if (ABI_PARAM1 != ECX) + MOV(32, R(ECX), R(ABI_PARAM1)); + AND(32, R(ECX), Imm8(3)); + SHL(32, R(ECX), Imm8(3)); + ROR_(32, R(RSCRATCH), R(ECX)); + } + } + RET(); - switch (region) + SetJumpTarget(outsideMainRAM); + if (codeMainRAM) + { + if (!store) + ADD(32, R(ABI_PARAM4), Imm8(1)); + LEA(32, RSCRATCH, MComplex(ABI_PARAM4, ABI_PARAM3, SCALE_1, -3)); + CMP(32, R(ABI_PARAM4), R(ABI_PARAM3)); + CMOVcc(32, ABI_PARAM3, R(ABI_PARAM4), CC_G); + CMP(32, R(ABI_PARAM3), R(RSCRATCH)); + CMOVcc(32, RSCRATCH, R(ABI_PARAM3), CC_G); + ADD(32, R(RCycles), R(RSCRATCH)); + } + else + { + LEA(32, RSCRATCH, MComplex(ABI_PARAM4, ABI_PARAM3, SCALE_1, store ? 0 : 1)); + ADD(32, R(RCycles), R(RSCRATCH)); + } + if (store) + { + if (size > 8) + AND(32, R(ABI_PARAM1), Imm32(addressMask)); + switch (size) + { + case 32: JMP((u8*)NDS::ARM7Write32, true); break; + case 16: JMP((u8*)NDS::ARM7Write16, true); break; + case 8: JMP((u8*)NDS::ARM7Write8, true); break; + } + } + else { - case 0x00000000: - if (!store) { - CMP(32, R(RSCRATCH2), Imm32(0x4000)); - FixupBranch outsideBIOS1 = J_CC(CC_AE); - - MOV(32, R(RSCRATCH), MDisp(RCPU, offsetof(ARM, R[15]))); - CMP(32, R(RSCRATCH), Imm32(0x4000)); - FixupBranch outsideBIOS2 = J_CC(CC_AE); - MOV(32, R(RSCRATCH3), M(&NDS::ARM7BIOSProt)); - CMP(32, R(RSCRATCH2), R(RSCRATCH3)); - FixupBranch notDenied1 = J_CC(CC_AE); - CMP(32, R(RSCRATCH), R(RSCRATCH3)); - FixupBranch notDenied2 = J_CC(CC_B); - SetJumpTarget(outsideBIOS2); - MOV(32, R(RSCRATCH), Imm32(0xFFFFFFFF)); - RET(); - - SetJumpTarget(notDenied1); - SetJumpTarget(notDenied2); - MOV(32, R(RSCRATCH), MDisp(RSCRATCH2, squeezePointer(NDS::ARM7BIOS))); - MOV(32, R(ECX), MDisp(RSP, 8)); - ROR_(32, R(RSCRATCH), R(ECX)); - RET(); - - SetJumpTarget(outsideBIOS1); - } - break; - case 0x02000000: - AND(32, R(RSCRATCH2), Imm32(MAIN_RAM_SIZE - 1)); - if (!store) - MOV(32, R(RSCRATCH), MDisp(RSCRATCH2, squeezePointer(NDS::MainRAM))); - else - { - MOV(32, MDisp(RSCRATCH2, squeezePointer(NDS::MainRAM)), R(R11)); - MOV(64, MScaled(RSCRATCH2, SCALE_4, squeezePointer(cache.MainRAM)), Imm32(0)); - MOV(64, MScaled(RSCRATCH2, SCALE_4, squeezePointer(cache.MainRAM) + 8), Imm32(0)); - } - break; - case 0x03000000: - { - TEST(32, R(RSCRATCH2), Imm32(0x800000)); - FixupBranch region = J_CC(CC_NZ); - MOV(64, R(RSCRATCH), M(&NDS::SWRAM_ARM7)); - TEST(64, R(RSCRATCH), R(RSCRATCH)); - FixupBranch notMapped = J_CC(CC_Z); - AND(32, R(RSCRATCH2), M(&NDS::SWRAM_ARM7Mask)); - if (!store) - { - MOV(32, R(RSCRATCH), MRegSum(RSCRATCH, RSCRATCH2)); - MOV(32, R(ECX), MDisp(RSP, 8)); - ROR_(32, R(RSCRATCH), R(ECX)); - } - else - { - MOV(32, MRegSum(RSCRATCH, RSCRATCH2), R(R11)); - MOV(64, MScaled(RSCRATCH2, SCALE_4, squeezePointer(cache.SWRAM)), Imm32(0)); - MOV(64, MScaled(RSCRATCH2, SCALE_4, squeezePointer(cache.SWRAM) + 8), Imm32(0)); - } - RET(); - SetJumpTarget(region); - SetJumpTarget(notMapped); - AND(32, R(RSCRATCH2), Imm32(0xFFFF)); - if (!store) - MOV(32, R(RSCRATCH), MDisp(RSCRATCH2, squeezePointer(NDS::ARM7WRAM))); - else - { - MOV(32, MDisp(RSCRATCH2, squeezePointer(NDS::ARM7WRAM)), R(R11)); - MOV(64, MScaled(RSCRATCH2, SCALE_4, squeezePointer(cache.ARM7_WRAM)), Imm32(0)); - MOV(64, MScaled(RSCRATCH2, SCALE_4, squeezePointer(cache.ARM7_WRAM) + 8), Imm32(0)); - } - } - break; - case 0x04000000: - { - TEST(32, R(RSCRATCH2), Imm32(0x800000)); - FixupBranch region = J_CC(CC_NZ); - MOV(32, R(ABI_PARAM1), R(RSCRATCH2)); - if (!store) - { - ABI_PushRegistersAndAdjustStack({}, 8); - ABI_CallFunction(NDS::ARM7IORead32); - ABI_PopRegistersAndAdjustStack({}, 8); - - MOV(32, R(ECX), MDisp(RSP, 8)); - ROR_(32, R(RSCRATCH), R(ECX)); - RET(); - } - else - { - MOV(32, R(ABI_PARAM2), R(R11)); - JMP((u8*)NDS::ARM7IOWrite32, true); - } - SetJumpTarget(region); - - if (!store) - { - ABI_PushRegistersAndAdjustStack({RSCRATCH2}, 8); - MOV(32, R(ABI_PARAM1), R(RSCRATCH2)); - ABI_CallFunction(Wifi::Read); - ABI_PopRegistersAndAdjustStack({RSCRATCH2}, 8); - - ADD(32, R(RSCRATCH2), Imm8(2)); - ABI_PushRegistersAndAdjustStack({EAX}, 8); - MOV(32, R(ABI_PARAM1), R(RSCRATCH2)); - ABI_CallFunction(Wifi::Read); - MOV(32, R(RSCRATCH2), R(EAX)); - SHL(32, R(RSCRATCH2), Imm8(16)); - ABI_PopRegistersAndAdjustStack({EAX}, 8); - OR(32, R(EAX), R(RSCRATCH2)); - } - else - { - ABI_PushRegistersAndAdjustStack({RSCRATCH2, R11}, 8); - MOV(32, R(ABI_PARAM1), R(RSCRATCH2)); - MOVZX(32, 16, ABI_PARAM2, R(R11)); - ABI_CallFunction(Wifi::Write); - ABI_PopRegistersAndAdjustStack({RSCRATCH2, R11}, 8); - SHR(32, R(R11), Imm8(16)); - ADD(32, R(RSCRATCH2), Imm8(2)); - ABI_PushRegistersAndAdjustStack({RSCRATCH2, R11}, 8); - MOV(32, R(ABI_PARAM1), R(RSCRATCH2)); - MOVZX(32, 16, ABI_PARAM2, R(R11)); - ABI_CallFunction(Wifi::Write); - ABI_PopRegistersAndAdjustStack({RSCRATCH2, R11}, 8); - } - } - break; - case 0x06000000: - MOV(32, R(ABI_PARAM1), R(RSCRATCH2)); - if (!store) - { - ABI_PushRegistersAndAdjustStack({}, 8); - ABI_CallFunction(GPU::ReadVRAM_ARM7); - ABI_PopRegistersAndAdjustStack({}, 8); - } - else - { - AND(32, R(ABI_PARAM1), Imm32(0x40000 - 1)); - MOV(64, MScaled(ABI_PARAM1, SCALE_4, squeezePointer(cache.ARM7_WVRAM)), Imm32(0)); - MOV(64, MScaled(ABI_PARAM1, SCALE_4, squeezePointer(cache.ARM7_WVRAM) + 8), Imm32(0)); - MOV(32, R(ABI_PARAM2), R(R11)); - JMP((u8*)GPU::WriteVRAM_ARM7, true); - } - break; - case 0x08000000: - case 0x09000000: - case 0x0A000000: - if (!store) - MOV(32, R(RSCRATCH), Imm32(0xFFFFFFFF)); - break; - /*default: - ABI_PushRegistersAndAdjustStack({}, 8, 0); - MOV(32, R(ABI_PARAM1), R(RSCRATCH2)); + if (size == 32) + { + ABI_PushRegistersAndAdjustStack({ABI_PARAM1}, 8); + AND(32, R(ABI_PARAM1), Imm32(addressMask)); ABI_CallFunction(NDS::ARM7Read32); - ABI_PopRegistersAndAdjustStack({}, 8, 0); - break;*/ + ABI_PopRegistersAndAdjustStack({ECX}, 8); + AND(32, R(ECX), Imm8(3)); + SHL(32, R(ECX), Imm8(3)); + ROR_(32, R(RSCRATCH), R(ECX)); + RET(); + } + else if (size == 16) + { + AND(32, R(ABI_PARAM1), Imm32(addressMask)); + JMP((u8*)NDS::ARM7Read16, true); + } + else + JMP((u8*)NDS::ARM7Read8, true); } + return res; +} + +void Compiler::Comp_MemAccess(Gen::OpArg rd, bool signExtend, bool store, int size) +{ + if (store) + MOV(32, R(ABI_PARAM2), rd); + u32 cycles = Num + ? NDS::ARM7MemTimings[CurInstr.CodeCycles][Thumb ? 0 : 2] + : (R15 & 0x2 ? 0 : CurInstr.CodeCycles); + MOV(32, R(ABI_PARAM3), Imm32(cycles)); + CALL(Num == 0 + ? MemoryFuncs9[size >> 4][store] + : MemoryFuncs7[size >> 4][store][CodeRegion == 0x02]); + if (!store) { - MOV(32, R(ECX), MDisp(RSP, 8)); - ROR_(32, R(RSCRATCH), R(ECX)); + if (signExtend) + MOVSX(32, size, rd.GetSimpleReg(), R(RSCRATCH)); + else + MOVZX(32, size, rd.GetSimpleReg(), R(RSCRATCH)); } - - RET(); - - return res; } OpArg Compiler::A_Comp_GetMemWBOffset() { - if (!(CurrentInstr.Instr & (1 << 25))) - return Imm32(CurrentInstr.Instr & 0xFFF); + if (!(CurInstr.Instr & (1 << 25))) + { + u32 imm = CurInstr.Instr & 0xFFF; + return Imm32(imm); + } else { - int op = (CurrentInstr.Instr >> 5) & 0x3; - int amount = (CurrentInstr.Instr >> 7) & 0x1F; - OpArg rm = MapReg(CurrentInstr.A_Reg(0)); + int op = (CurInstr.Instr >> 5) & 0x3; + int amount = (CurInstr.Instr >> 7) & 0x1F; + OpArg rm = MapReg(CurInstr.A_Reg(0)); bool carryUsed; + return Comp_RegShiftImm(op, amount, rm, false, carryUsed); } } void Compiler::A_Comp_MemWB() -{ - OpArg rn = MapReg(CurrentInstr.A_Reg(16)); - OpArg rd = MapReg(CurrentInstr.A_Reg(12)); - bool load = CurrentInstr.Instr & (1 << 20); +{ + OpArg rn = MapReg(CurInstr.A_Reg(16)); + OpArg rd = MapReg(CurInstr.A_Reg(12)); + bool load = CurInstr.Instr & (1 << 20); + bool byte = CurInstr.Instr & (1 << 22); + int size = byte ? 8 : 32; - MOV(32, R(RSCRATCH2), rn); - if (CurrentInstr.Instr & (1 << 24)) + if (CurInstr.Instr & (1 << 24)) { OpArg offset = A_Comp_GetMemWBOffset(); - if (CurrentInstr.Instr & (1 << 23)) - ADD(32, R(RSCRATCH2), offset); + if (CurInstr.Instr & (1 << 23)) + MOV_sum(32, ABI_PARAM1, rn, offset); else - SUB(32, R(RSCRATCH2), offset); + { + MOV(32, R(ABI_PARAM1), rn); + SUB(32, R(ABI_PARAM1), offset); + } - if (CurrentInstr.Instr & (1 << 21)) - MOV(32, rn, R(RSCRATCH2)); + if (CurInstr.Instr & (1 << 21)) + MOV(32, rn, R(ABI_PARAM1)); } - - u32 cycles = Num ? NDS::ARM7MemTimings[CurrentInstr.CodeCycles][2] : CurrentInstr.CodeCycles; - MOV(32, R(RSCRATCH3), Imm32(cycles)); - MOV(32, R(RSCRATCH), R(RSCRATCH2)); - SHR(32, R(RSCRATCH), Imm8(24)); - AND(32, R(RSCRATCH), Imm8(0xF)); - void** funcArray; - if (load) - funcArray = Num ? ReadMemFuncs7[CodeRegion == 0x02] : ReadMemFuncs9; else + MOV(32, R(ABI_PARAM1), rn); + + if (!(CurInstr.Instr & (1 << 24))) + { + OpArg offset = A_Comp_GetMemWBOffset(); + + if (CurInstr.Instr & (1 << 23)) + ADD(32, rn, offset); + else + SUB(32, rn, offset); + } + + Comp_MemAccess(rd, false, !load, byte ? 8 : 32); + if (load && CurInstr.A_Reg(12) == 15) + { + if (byte) + printf("!!! LDRB PC %08X\n", R15); + else + { + if (Num == 1) + AND(32, rd, Imm8(0xFE)); // immediate is sign extended + Comp_JumpTo(rd.GetSimpleReg()); + } + } +} + +void Compiler::A_Comp_MemHalf() +{ + OpArg rn = MapReg(CurInstr.A_Reg(16)); + OpArg rd = MapReg(CurInstr.A_Reg(12)); + + OpArg offset = CurInstr.Instr & (1 << 22) + ? Imm32(CurInstr.Instr & 0xF | ((CurInstr.Instr >> 4) & 0xF0)) + : MapReg(CurInstr.A_Reg(0)); + + if (CurInstr.Instr & (1 << 24)) { - funcArray = Num ? WriteMemFuncs7[CodeRegion == 0x02] : WriteMemFuncs9; - MOV(32, R(R11), rd); + if (CurInstr.Instr & (1 << 23)) + MOV_sum(32, ABI_PARAM1, rn, offset); + else + { + MOV(32, R(ABI_PARAM1), rn); + SUB(32, R(ABI_PARAM1), offset); + } + + if (CurInstr.Instr & (1 << 21)) + MOV(32, rn, R(ABI_PARAM1)); } - CALLptr(MScaled(RSCRATCH, SCALE_8, squeezePointer(funcArray))); + else + MOV(32, R(ABI_PARAM1), rn); - if (load) - MOV(32, R(RSCRATCH2), R(RSCRATCH)); + int op = (CurInstr.Instr >> 5) & 0x3; + bool load = CurInstr.Instr & (1 << 20); - if (!(CurrentInstr.Instr & (1 << 24))) + bool signExtend = false; + int size; + if (!load && op == 1) + size = 16; + else if (load) { - OpArg offset = A_Comp_GetMemWBOffset(); + size = op == 2 ? 8 : 16; + signExtend = op > 1; + } - if (CurrentInstr.Instr & (1 << 23)) + if (!(CurInstr.Instr & (1 << 24))) + { + if (CurInstr.Instr & (1 << 23)) ADD(32, rn, offset); else SUB(32, rn, offset); } - if (load) - MOV(32, rd, R(RSCRATCH2)); + Comp_MemAccess(rd, signExtend, !load, size); + + if (load && CurInstr.A_Reg(12) == 15) + printf("!!! MemHalf op PC %08X\n", R15);; } void Compiler::T_Comp_MemReg() { - OpArg rd = MapReg(CurrentInstr.T_Reg(0)); - OpArg rb = MapReg(CurrentInstr.T_Reg(3)); - OpArg ro = MapReg(CurrentInstr.T_Reg(6)); + OpArg rd = MapReg(CurInstr.T_Reg(0)); + OpArg rb = MapReg(CurInstr.T_Reg(3)); + OpArg ro = MapReg(CurInstr.T_Reg(6)); - int op = (CurrentInstr.Instr >> 10) & 0x3; + int op = (CurInstr.Instr >> 10) & 0x3; bool load = op & 0x2; - - MOV(32, R(RSCRATCH2), rb); - ADD(32, R(RSCRATCH2), ro); - - u32 cycles = Num ? NDS::ARM7MemTimings[CurrentInstr.CodeCycles][0] : (R15 & 0x2 ? 0 : CurrentInstr.CodeCycles); - MOV(32, R(RSCRATCH3), Imm32(cycles)); - MOV(32, R(RSCRATCH), R(RSCRATCH2)); - SHR(32, R(RSCRATCH), Imm8(24)); - AND(32, R(RSCRATCH), Imm8(0xF)); - void** funcArray; - if (load) - funcArray = Num ? ReadMemFuncs7[CodeRegion == 0x02] : ReadMemFuncs9; - else - { - funcArray = Num ? WriteMemFuncs7[CodeRegion == 0x02] : WriteMemFuncs9; - MOV(32, R(R11), rd); - } - CALLptr(MScaled(RSCRATCH, SCALE_8, squeezePointer(funcArray))); + bool byte = op & 0x1; + + MOV_sum(32, ABI_PARAM1, rb, ro); - if (load) - MOV(32, rd, R(RSCRATCH)); + Comp_MemAccess(rd, false, !load, byte ? 8 : 32); } void Compiler::T_Comp_MemImm() { - // TODO: aufräumen!!! - OpArg rd = MapReg(CurrentInstr.T_Reg(0)); - OpArg rb = MapReg(CurrentInstr.T_Reg(3)); - - int op = (CurrentInstr.Instr >> 11) & 0x3; - u32 offset = ((CurrentInstr.Instr >> 6) & 0x1F) * 4; + OpArg rd = MapReg(CurInstr.T_Reg(0)); + OpArg rb = MapReg(CurInstr.T_Reg(3)); + + int op = (CurInstr.Instr >> 11) & 0x3; bool load = op & 0x1; + bool byte = op & 0x2; + u32 offset = ((CurInstr.Instr >> 6) & 0x1F) * (byte ? 1 : 4); - LEA(32, RSCRATCH2, MDisp(rb.GetSimpleReg(), offset)); - u32 cycles = Num ? NDS::ARM7MemTimings[CurrentInstr.CodeCycles][0] : (R15 & 0x2 ? 0 : CurrentInstr.CodeCycles); - MOV(32, R(RSCRATCH3), Imm32(cycles)); - MOV(32, R(RSCRATCH), R(RSCRATCH2)); - SHR(32, R(RSCRATCH), Imm8(24)); - AND(32, R(RSCRATCH), Imm8(0xF)); - void** funcArray; - if (load) - funcArray = Num ? ReadMemFuncs7[CodeRegion == 0x02] : ReadMemFuncs9; - else - { - funcArray = Num ? WriteMemFuncs7[CodeRegion == 0x02] : WriteMemFuncs9; - MOV(32, R(R11), rd); - } - CALLptr(MScaled(RSCRATCH, SCALE_8, squeezePointer(funcArray))); + LEA(32, ABI_PARAM1, MDisp(rb.GetSimpleReg(), offset)); + + Comp_MemAccess(rd, false, !load, byte ? 8 : 32); +} + +void Compiler::T_Comp_MemRegHalf() +{ + OpArg rd = MapReg(CurInstr.T_Reg(0)); + OpArg rb = MapReg(CurInstr.T_Reg(3)); + OpArg ro = MapReg(CurInstr.T_Reg(6)); + + int op = (CurInstr.Instr >> 10) & 0x3; + bool load = op != 0; + int size = op != 1 ? 16 : 8; + bool signExtend = op & 1; + + MOV_sum(32, ABI_PARAM1, rb, ro); + + Comp_MemAccess(rd, signExtend, !load, size); +} + +void Compiler::T_Comp_MemImmHalf() +{ + OpArg rd = MapReg(CurInstr.T_Reg(0)); + OpArg rb = MapReg(CurInstr.T_Reg(3)); + + u32 offset = (CurInstr.Instr >> 5) & 0x3E; + bool load = CurInstr.Instr & (1 << 11); + + LEA(32, ABI_PARAM1, MDisp(rb.GetSimpleReg(), offset)); - if (load) - MOV(32, rd, R(RSCRATCH)); + Comp_MemAccess(rd, false, !load, 16); } } \ No newline at end of file diff --git a/src/ARM_InstrInfo.cpp b/src/ARM_InstrInfo.cpp index 41c46e1..32a9645 100644 --- a/src/ARM_InstrInfo.cpp +++ b/src/ARM_InstrInfo.cpp @@ -317,7 +317,7 @@ Info Decode(bool thumb, u32 num, u32 instr) else { u32 data = ARMInstrTable[((instr >> 4) & 0xF) | ((instr >> 16) & 0xFF0)]; - if ((instr & 0xFE000000) == 0xFA000000) + if (num == 0 && (instr & 0xFE000000) == 0xFA000000) data = A_BLX_IMM; if (data & A_ARM9Only && num != 0) diff --git a/src/NDS.cpp b/src/NDS.cpp index b8fd8cb..baa5e0d 100644 --- a/src/NDS.cpp +++ b/src/NDS.cpp @@ -524,6 +524,8 @@ void Reset() KeyCnt = 0; RCnt = 0; + ARMJIT::ResetBlocks(); + NDSCart::Reset(); GBACart::Reset(); GPU::Reset(); -- cgit v1.2.3 From ff9721111441e69b4a276a34c757476b625213c6 Mon Sep 17 00:00:00 2001 From: RSDuck Date: Wed, 10 Jul 2019 00:57:59 +0200 Subject: jit: thumb block transfer working also pc and sp relative loads and some refactoring --- src/ARMJIT_RegCache.h | 136 ---------- src/ARMJIT_RegisterCache.h | 136 ++++++++++ src/ARMJIT_x64/ARMJIT_Compiler.cpp | 82 ++++-- src/ARMJIT_x64/ARMJIT_Compiler.h | 19 +- src/ARMJIT_x64/ARMJIT_LoadStore.cpp | 515 +++++++++++++++++++++++++++++++----- src/ARM_InstrInfo.cpp | 46 ++-- 6 files changed, 682 insertions(+), 252 deletions(-) delete mode 100644 src/ARMJIT_RegCache.h create mode 100644 src/ARMJIT_RegisterCache.h (limited to 'src/ARM_InstrInfo.cpp') diff --git a/src/ARMJIT_RegCache.h b/src/ARMJIT_RegCache.h deleted file mode 100644 index 556d27b..0000000 --- a/src/ARMJIT_RegCache.h +++ /dev/null @@ -1,136 +0,0 @@ -#ifndef ARMJIT_REGCACHE_H -#define ARMJIT_REGCACHE_H - -#include "ARMJIT.h" - -// TODO: replace this in the future -#include "dolphin/BitSet.h" - -#include - -namespace ARMJIT -{ - -template -class RegCache -{ -public: - RegCache() - {} - - RegCache(T* compiler, FetchedInstr instrs[], int instrsCount) - : Compiler(compiler), Instrs(instrs), InstrsCount(instrsCount) - { - for (int i = 0; i < 16; i++) - Mapping[i] = (Reg)-1; - } - - void UnloadRegister(int reg) - { - assert(Mapping[reg] != -1); - - if (DirtyRegs & (1 << reg)) - Compiler->SaveReg(reg, Mapping[reg]); - - DirtyRegs &= ~(1 << reg); - LoadedRegs &= ~(1 << reg); - NativeRegsUsed &= ~(1 << (int)Mapping[reg]); - Mapping[reg] = (Reg)-1; - } - - void LoadRegister(int reg) - { - assert(Mapping[reg] == -1); - for (int i = 0; i < NativeRegsAvailable; i++) - { - Reg nativeReg = NativeRegAllocOrder[i]; - if (!(NativeRegsUsed & (1 << nativeReg))) - { - Mapping[reg] = nativeReg; - NativeRegsUsed |= 1 << (int)nativeReg; - LoadedRegs |= 1 << reg; - - Compiler->LoadReg(reg, nativeReg); - - return; - } - } - - assert("Welp!"); - } - - void Flush() - { - BitSet16 loadedSet(LoadedRegs); - for (int reg : loadedSet) - UnloadRegister(reg); - } - - void Prepare(int i) - { - u16 futureNeeded = 0; - int ranking[16]; - for (int j = 0; j < 16; j++) - ranking[j] = 0; - for (int j = i; j < InstrsCount; j++) - { - BitSet16 regsNeeded((Instrs[j].Info.SrcRegs & ~(1 << 15)) | Instrs[j].Info.DstRegs); - futureNeeded |= regsNeeded.m_val; - for (int reg : regsNeeded) - ranking[reg]++; - } - - // we'll unload all registers which are never used again - BitSet16 neverNeededAgain(LoadedRegs & ~futureNeeded); - for (int reg : neverNeededAgain) - UnloadRegister(reg); - - FetchedInstr Instr = Instrs[i]; - u16 necessaryRegs = (Instr.Info.SrcRegs & ~(1 << 15)) | Instr.Info.DstRegs; - BitSet16 needToBeLoaded(necessaryRegs & ~LoadedRegs); - if (needToBeLoaded != BitSet16(0)) - { - int neededCount = needToBeLoaded.Count(); - BitSet16 loadedSet(LoadedRegs); - while (loadedSet.Count() + neededCount > NativeRegsAvailable) - { - int leastReg = -1; - int rank = 1000; - for (int reg : loadedSet) - { - if (!((1 << reg) & necessaryRegs) && ranking[reg] < rank) - { - leastReg = reg; - rank = ranking[reg]; - } - } - - assert(leastReg != -1); - UnloadRegister(leastReg); - - loadedSet.m_val = LoadedRegs; - } - - for (int reg : needToBeLoaded) - LoadRegister(reg); - } - DirtyRegs |= Instr.Info.DstRegs & ~(1 << 15); - } - - static const Reg NativeRegAllocOrder[]; - static const int NativeRegsAvailable; - - Reg Mapping[16]; - u32 NativeRegsUsed = 0; - u16 LoadedRegs = 0; - u16 DirtyRegs = 0; - - T* Compiler; - - FetchedInstr* Instrs; - int InstrsCount; -}; - -} - -#endif \ No newline at end of file diff --git a/src/ARMJIT_RegisterCache.h b/src/ARMJIT_RegisterCache.h new file mode 100644 index 0000000..04c1eda --- /dev/null +++ b/src/ARMJIT_RegisterCache.h @@ -0,0 +1,136 @@ +#ifndef ARMJIT_REGCACHE_H +#define ARMJIT_REGCACHE_H + +#include "ARMJIT.h" + +// TODO: replace this in the future +#include "dolphin/BitSet.h" + +#include + +namespace ARMJIT +{ + +template +class RegisterCache +{ +public: + RegisterCache() + {} + + RegisterCache(T* compiler, FetchedInstr instrs[], int instrsCount) + : Compiler(compiler), Instrs(instrs), InstrsCount(instrsCount) + { + for (int i = 0; i < 16; i++) + Mapping[i] = (Reg)-1; + } + + void UnloadRegister(int reg) + { + assert(Mapping[reg] != -1); + + if (DirtyRegs & (1 << reg)) + Compiler->SaveReg(reg, Mapping[reg]); + + DirtyRegs &= ~(1 << reg); + LoadedRegs &= ~(1 << reg); + NativeRegsUsed &= ~(1 << (int)Mapping[reg]); + Mapping[reg] = (Reg)-1; + } + + void LoadRegister(int reg) + { + assert(Mapping[reg] == -1); + for (int i = 0; i < NativeRegsAvailable; i++) + { + Reg nativeReg = NativeRegAllocOrder[i]; + if (!(NativeRegsUsed & (1 << nativeReg))) + { + Mapping[reg] = nativeReg; + NativeRegsUsed |= 1 << (int)nativeReg; + LoadedRegs |= 1 << reg; + + Compiler->LoadReg(reg, nativeReg); + + return; + } + } + + assert("Welp!"); + } + + void Flush() + { + BitSet16 loadedSet(LoadedRegs); + for (int reg : loadedSet) + UnloadRegister(reg); + } + + void Prepare(int i) + { + u16 futureNeeded = 0; + int ranking[16]; + for (int j = 0; j < 16; j++) + ranking[j] = 0; + for (int j = i; j < InstrsCount; j++) + { + BitSet16 regsNeeded((Instrs[j].Info.SrcRegs & ~(1 << 15)) | Instrs[j].Info.DstRegs); + futureNeeded |= regsNeeded.m_val; + for (int reg : regsNeeded) + ranking[reg]++; + } + + // we'll unload all registers which are never used again + BitSet16 neverNeededAgain(LoadedRegs & ~futureNeeded); + for (int reg : neverNeededAgain) + UnloadRegister(reg); + + FetchedInstr Instr = Instrs[i]; + u16 necessaryRegs = (Instr.Info.SrcRegs & ~(1 << 15)) | Instr.Info.DstRegs; + BitSet16 needToBeLoaded(necessaryRegs & ~LoadedRegs); + if (needToBeLoaded != BitSet16(0)) + { + int neededCount = needToBeLoaded.Count(); + BitSet16 loadedSet(LoadedRegs); + while (loadedSet.Count() + neededCount > NativeRegsAvailable) + { + int leastReg = -1; + int rank = 1000; + for (int reg : loadedSet) + { + if (!((1 << reg) & necessaryRegs) && ranking[reg] < rank) + { + leastReg = reg; + rank = ranking[reg]; + } + } + + assert(leastReg != -1); + UnloadRegister(leastReg); + + loadedSet.m_val = LoadedRegs; + } + + for (int reg : needToBeLoaded) + LoadRegister(reg); + } + DirtyRegs |= Instr.Info.DstRegs & ~(1 << 15); + } + + static const Reg NativeRegAllocOrder[]; + static const int NativeRegsAvailable; + + Reg Mapping[16]; + u32 NativeRegsUsed = 0; + u16 LoadedRegs = 0; + u16 DirtyRegs = 0; + + T* Compiler; + + FetchedInstr* Instrs; + int InstrsCount; +}; + +} + +#endif \ No newline at end of file diff --git a/src/ARMJIT_x64/ARMJIT_Compiler.cpp b/src/ARMJIT_x64/ARMJIT_Compiler.cpp index b7358a2..4fe0c70 100644 --- a/src/ARMJIT_x64/ARMJIT_Compiler.cpp +++ b/src/ARMJIT_x64/ARMJIT_Compiler.cpp @@ -9,20 +9,20 @@ using namespace Gen; namespace ARMJIT { template <> -const X64Reg RegCache::NativeRegAllocOrder[] = +const X64Reg RegisterCache::NativeRegAllocOrder[] = { #ifdef _WIN32 - RBX, RSI, RDI, R12, R13 + RBX, RSI, RDI, R12, R13, R14 #else - RBX, R12, R13 + RBX, R12, R13, R14 // this is sad #endif }; template <> -const int RegCache::NativeRegsAvailable = +const int RegisterCache::NativeRegsAvailable = #ifdef _WIN32 - 5 + 6 #else - 3 + 4 #endif ; @@ -39,10 +39,47 @@ Compiler::Compiler() MemoryFuncs7[i][j][1] = Gen_MemoryRoutine7(j, true, 8 << i); } } + for (int i = 0; i < 2; i++) + for (int j = 0; j < 2; j++) + { + MemoryFuncsSeq9[i][j] = Gen_MemoryRoutineSeq9(i, j); + MemoryFuncsSeq7[i][j][0] = Gen_MemoryRoutineSeq7(i, j, false); + MemoryFuncsSeq7[i][j][1] = Gen_MemoryRoutineSeq7(i, j, true); + } ResetStart = GetWritableCodePtr(); } +void* Compiler::Gen_ChangeCPSRRoutine() +{ + void* res = (void*)GetWritableCodePtr(); + + MOV(32, R(RSCRATCH), R(RCPSR)); + AND(32, R(RSCRATCH), Imm8(0x1F)); + CMP(32, R(RSCRATCH), Imm8(0x11)); + FixupBranch fiq = J_CC(CC_E); + CMP(32, R(RSCRATCH), Imm8(0x12)); + FixupBranch irq = J_CC(CC_E); + CMP(32, R(RSCRATCH), Imm8(0x13)); + FixupBranch svc = J_CC(CC_E); + CMP(32, R(RSCRATCH), Imm8(0x17)); + FixupBranch abt = J_CC(CC_E); + CMP(32, R(RSCRATCH), Imm8(0x1B)); + FixupBranch und = J_CC(CC_E); + + SetJumpTarget(fiq); + + SetJumpTarget(irq); + + SetJumpTarget(svc); + + SetJumpTarget(abt); + + SetJumpTarget(und); + + return res; +} + DataRegion Compiler::ClassifyAddress(u32 addr) { if (Num == 0 && addr >= ((ARMv5*)CurCPU)->DTCMBase && addr < ((ARMv5*)CurCPU)->DTCMBase) @@ -106,12 +143,11 @@ CompiledBlock Compiler::CompileBlock(ARM* cpu, FetchedInstr instrs[], int instrs ABI_PushRegistersAndAdjustStack({ABI_ALL_CALLEE_SAVED & ABI_ALL_GPRS}, 8, 16); MOV(64, R(RCPU), ImmPtr(cpu)); - XOR(32, R(RCycles), R(RCycles)); LoadCPSR(); // TODO: this is ugly as a whole, do better - RegCache = ARMJIT::RegCache(this, instrs, instrsCount); + RegCache = RegisterCache(this, instrs, instrsCount); for (int i = 0; i < instrsCount; i++) { @@ -242,7 +278,7 @@ CompiledBlock Compiler::CompileBlock(ARM* cpu, FetchedInstr instrs[], int instrs RegCache.Flush(); SaveCPSR(); - LEA(32, RAX, MDisp(RCycles, ConstantCycles)); + MOV(32, R(RAX), Imm32(ConstantCycles)); ABI_PopRegistersAndAdjustStack({ABI_ALL_CALLEE_SAVED & ABI_ALL_GPRS}, 8, 16); RET(); @@ -306,18 +342,20 @@ CompileFunc Compiler::GetCompFunc(int kind) NULL, NULL, NULL, NULL, NULL, // STR A_Comp_MemWB, A_Comp_MemWB, A_Comp_MemWB, A_Comp_MemWB, A_Comp_MemWB, A_Comp_MemWB, A_Comp_MemWB, A_Comp_MemWB, A_Comp_MemWB, A_Comp_MemWB, + //NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, // STRB + //NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, A_Comp_MemWB, A_Comp_MemWB, A_Comp_MemWB, A_Comp_MemWB, A_Comp_MemWB, A_Comp_MemWB, A_Comp_MemWB, A_Comp_MemWB, A_Comp_MemWB, A_Comp_MemWB, // LDR + //NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, A_Comp_MemWB, A_Comp_MemWB, A_Comp_MemWB, A_Comp_MemWB, A_Comp_MemWB, A_Comp_MemWB, A_Comp_MemWB, A_Comp_MemWB, A_Comp_MemWB, A_Comp_MemWB, // LDRB + //NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, A_Comp_MemWB, A_Comp_MemWB, A_Comp_MemWB, A_Comp_MemWB, A_Comp_MemWB, A_Comp_MemWB, A_Comp_MemWB, A_Comp_MemWB, A_Comp_MemWB, A_Comp_MemWB, // STRH A_Comp_MemHalf, A_Comp_MemHalf, A_Comp_MemHalf, A_Comp_MemHalf, - // LDRD - NULL, NULL, NULL, NULL, - // STRD - NULL, NULL, NULL, NULL, + // LDRD, STRD never used by anything so they stay interpreted (by anything I mean the 5 games I checked) + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, // LDRH A_Comp_MemHalf, A_Comp_MemHalf, A_Comp_MemHalf, A_Comp_MemHalf, // LDRSB @@ -360,10 +398,14 @@ CompileFunc Compiler::GetCompFunc(int kind) T_Comp_MemImm, T_Comp_MemImm, T_Comp_MemImm, T_Comp_MemImm, // LDR/STR half imm offset T_Comp_MemImmHalf, T_Comp_MemImmHalf, - // branch, etc. - NULL, NULL, NULL, NULL, NULL, NULL, - NULL, NULL, NULL, NULL, NULL, NULL, - NULL, NULL + // LDR/STR sp rel + NULL, NULL, + // PUSH/POP + NULL, NULL, + // LDMIA, STMIA + NULL, NULL, + NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL }; return Thumb ? T_Comp[kind] : A_Comp[kind]; @@ -376,7 +418,7 @@ void Compiler::Comp_AddCycles_C() : ((R15 & 0x2) ? 0 : CurInstr.CodeCycles); if (CurInstr.Cond() < 0xE) - ADD(32, R(RCycles), Imm8(cycles)); + ADD(32, MDisp(RCPU, offsetof(ARM, Cycles)), Imm8(cycles)); else ConstantCycles += cycles; } @@ -388,13 +430,15 @@ void Compiler::Comp_AddCycles_CI(u32 i) : ((R15 & 0x2) ? 0 : CurInstr.CodeCycles)) + i; if (CurInstr.Cond() < 0xE) - ADD(32, R(RCycles), Imm8(cycles)); + ADD(32, MDisp(RCPU, offsetof(ARM, Cycles)), Imm8(cycles)); else ConstantCycles += cycles; } void Compiler::Comp_JumpTo(Gen::X64Reg addr, bool restoreCPSR) { + // potentieller Bug: falls ein Register das noch gecacht ist, beim Modeswitch gespeichert + // wird der alte Wert gespeichert SaveCPSR(); MOV(64, R(ABI_PARAM1), R(RCPU)); diff --git a/src/ARMJIT_x64/ARMJIT_Compiler.h b/src/ARMJIT_x64/ARMJIT_Compiler.h index 9395a29..a751737 100644 --- a/src/ARMJIT_x64/ARMJIT_Compiler.h +++ b/src/ARMJIT_x64/ARMJIT_Compiler.h @@ -4,7 +4,7 @@ #include "../dolphin/x64Emitter.h" #include "../ARMJIT.h" -#include "../ARMJIT_RegCache.h" +#include "../ARMJIT_RegisterCache.h" #include @@ -12,7 +12,6 @@ namespace ARMJIT { const Gen::X64Reg RCPU = Gen::RBP; -const Gen::X64Reg RCycles = Gen::R14; const Gen::X64Reg RCPSR = Gen::R15; const Gen::X64Reg RSCRATCH = Gen::EAX; @@ -72,6 +71,7 @@ private: void A_Comp_MemWB(); void A_Comp_MemHalf(); + void A_Comp_LDM_STM(); void T_Comp_ShiftImm(); void T_Comp_AddSub_(); @@ -86,8 +86,13 @@ private: void T_Comp_MemImm(); void T_Comp_MemRegHalf(); void T_Comp_MemImmHalf(); + void T_Comp_LoadPCRel(); + void T_Comp_MemSPRel(); + void T_Comp_PUSH_POP(); + void T_Comp_LDMIA_STMIA(); void Comp_MemAccess(Gen::OpArg rd, bool signExtend, bool store, int size); + s32 Comp_MemAccessBlock(Gen::OpArg rb, BitSet16 regs, bool store, bool preinc, bool decrement, bool usermode); void Comp_ArithTriOp(void (Compiler::*op)(int, const Gen::OpArg&, const Gen::OpArg&), Gen::OpArg rd, Gen::OpArg rn, Gen::OpArg op2, bool carryUsed, int opFlags); @@ -100,6 +105,11 @@ private: void* Gen_MemoryRoutine9(bool store, int size); void* Gen_MemoryRoutine7(bool store, bool codeMainRAM, int size); + void* Gen_MemoryRoutineSeq9(bool store, bool preinc); + void* Gen_MemoryRoutineSeq7(bool store, bool preinc, bool codeMainRAM); + + void* Gen_ChangeCPSRRoutine(); + Gen::OpArg Comp_RegShiftImm(int op, int amount, Gen::OpArg rm, bool S, bool& carryUsed); Gen::OpArg Comp_RegShiftReg(int op, Gen::OpArg rs, Gen::OpArg rm, bool S, bool& carryUsed); @@ -122,11 +132,14 @@ private: void* MemoryFuncs9[3][2]; void* MemoryFuncs7[3][2][2]; + void* MemoryFuncsSeq9[2][2]; + void* MemoryFuncsSeq7[2][2][2]; + bool CPSRDirty = false; FetchedInstr CurInstr; - RegCache RegCache; + RegisterCache RegCache; bool Thumb; u32 Num; diff --git a/src/ARMJIT_x64/ARMJIT_LoadStore.cpp b/src/ARMJIT_x64/ARMJIT_LoadStore.cpp index 69746e2..20e1893 100644 --- a/src/ARMJIT_x64/ARMJIT_LoadStore.cpp +++ b/src/ARMJIT_x64/ARMJIT_LoadStore.cpp @@ -3,16 +3,6 @@ #include "../GPU.h" #include "../Wifi.h" -namespace NDS -{ -extern u8* SWRAM_ARM9; -extern u32 SWRAM_ARM9Mask; -extern u8* SWRAM_ARM7; -extern u32 SWRAM_ARM7Mask; -extern u8 ARM7WRAM[]; -extern u16 ARM7BIOSProt; -} - using namespace Gen; namespace ARMJIT @@ -41,6 +31,49 @@ int squeezePointer(T* ptr) store value - ABI_PARAM2 (a.k.a. RDX = RSCRATCH2 on Windows) code cycles - ABI_PARAM3 */ + +#define CALC_CYCLES_9(numC, numD, scratch) \ + LEA(32, scratch, MComplex(numD, numC, SCALE_1, -6)); \ + CMP(32, R(numC), R(numD)); \ + CMOVcc(32, numD, R(numC), CC_G); \ + CMP(32, R(numD), R(scratch)); \ + CMOVcc(32, scratch, R(numD), CC_G); \ + ADD(32, MDisp(RCPU, offsetof(ARM, Cycles)), R(scratch)); +#define CALC_CYCLES_7_DATA_MAIN_RAM(numC, numD, scratch) \ + if (codeMainRAM) \ + { \ + LEA(32, scratch, MRegSum(numD, numC)); \ + ADD(32, MDisp(RCPU, offsetof(ARM, Cycles)), R(scratch)); \ + } \ + else \ + { \ + if (!store) \ + ADD(32, R(numC), Imm8(1)); \ + LEA(32, scratch, MComplex(numD, numC, SCALE_1, -3)); \ + CMP(32, R(numD), R(numC)); \ + CMOVcc(32, numC, R(numD), CC_G); \ + CMP(32, R(numC), R(scratch)); \ + CMOVcc(32, scratch, R(numC), CC_G); \ + ADD(32, MDisp(RCPU, offsetof(ARM, Cycles)), R(scratch)); \ + } +#define CALC_CYCLES_7_DATA_NON_MAIN_RAM(numC, numD, scratch) \ + if (codeMainRAM) \ + { \ + if (!store) \ + ADD(32, R(numD), Imm8(1)); \ + LEA(32, scratch, MComplex(numD, numC, SCALE_1, -3)); \ + CMP(32, R(numD), R(numC)); \ + CMOVcc(32, numC, R(numD), CC_G); \ + CMP(32, R(numC), R(scratch)); \ + CMOVcc(32, scratch, R(numC), CC_G); \ + ADD(32, MDisp(RCPU, offsetof(ARM, Cycles)), R(scratch)); \ + } \ + else \ + { \ + LEA(32, scratch, MComplex(numD, numC, SCALE_1, store ? 0 : 1)); \ + ADD(32, MDisp(RCPU, offsetof(ARM, Cycles)), R(scratch)); \ + } + void* Compiler::Gen_MemoryRoutine9(bool store, int size) { u32 addressMask = ~(size == 32 ? 3 : (size == 16 ? 1 : 0)); @@ -56,15 +89,10 @@ void* Compiler::Gen_MemoryRoutine9(bool store, int size) FixupBranch insideITCM = J_CC(CC_B); // cycle counting! - MOV(32, R(RSCRATCH), R(ABI_PARAM1)); - SHR(32, R(RSCRATCH), Imm8(12)); - MOVZX(32, 8, RSCRATCH, MComplex(RCPU, RSCRATCH, SCALE_1, offsetof(ARMv5, MemTimings) + (size == 32 ? 2 : 0))); - LEA(32, ABI_PARAM4, MComplex(RSCRATCH, ABI_PARAM3, SCALE_1, -6)); - CMP(32, R(ABI_PARAM3), R(RSCRATCH)); - CMOVcc(32, RSCRATCH, R(ABI_PARAM3), CC_G); - CMP(32, R(ABI_PARAM4), R(RSCRATCH)); - CMOVcc(32, RSCRATCH, R(ABI_PARAM4), CC_G); - ADD(32, R(RCycles), R(RSCRATCH)); + MOV(32, R(ABI_PARAM4), R(ABI_PARAM1)); + SHR(32, R(ABI_PARAM4), Imm8(12)); + MOVZX(32, 8, ABI_PARAM4, MComplex(RCPU, ABI_PARAM4, SCALE_4, offsetof(ARMv5, MemTimings) + (size == 32 ? 2 : 1))); + CALC_CYCLES_9(ABI_PARAM3, ABI_PARAM4, RSCRATCH) if (store) { @@ -101,7 +129,7 @@ void* Compiler::Gen_MemoryRoutine9(bool store, int size) } SetJumpTarget(insideDTCM); - ADD(32, R(RCycles), R(ABI_PARAM3)); + ADD(32, MDisp(RCPU, offsetof(ARM, Cycles)), R(ABI_PARAM3)); AND(32, R(RSCRATCH), Imm32(0x3FFF & addressMask)); if (store) MOV(size, MComplex(RCPU, RSCRATCH, SCALE_1, offsetof(ARMv5, DTCM)), R(ABI_PARAM2)); @@ -120,7 +148,7 @@ void* Compiler::Gen_MemoryRoutine9(bool store, int size) RET(); SetJumpTarget(insideITCM); - ADD(32, R(RCycles), R(ABI_PARAM3)); + ADD(32, MDisp(RCPU, offsetof(ARM, Cycles)), R(ABI_PARAM3)); MOV(32, R(ABI_PARAM3), R(ABI_PARAM1)); // free up ECX AND(32, R(ABI_PARAM3), Imm32(0x7FFF & addressMask)); if (store) @@ -158,28 +186,13 @@ void* Compiler::Gen_MemoryRoutine7(bool store, bool codeMainRAM, int size) MOV(32, R(RSCRATCH), R(ABI_PARAM1)); SHR(32, R(RSCRATCH), Imm8(15)); - MOVZX(32, 8, ABI_PARAM4, MDisp(RSCRATCH, (size == 32 ? 2 : 0) + squeezePointer(NDS::ARM7MemTimings))); + MOVZX(32, 8, ABI_PARAM4, MScaled(RSCRATCH, SCALE_4, (size == 32 ? 2 : 0) + squeezePointer(NDS::ARM7MemTimings))); MOV(32, R(RSCRATCH), R(ABI_PARAM1)); AND(32, R(RSCRATCH), Imm32(0xFF000000)); CMP(32, R(RSCRATCH), Imm32(0x02000000)); FixupBranch outsideMainRAM = J_CC(CC_NE); - if (codeMainRAM) - { - LEA(32, RSCRATCH, MRegSum(ABI_PARAM4, ABI_PARAM3)); - ADD(32, R(RCycles), R(RSCRATCH)); - } - else - { - if (!store) - ADD(32, R(ABI_PARAM3), Imm8(1)); - LEA(32, RSCRATCH, MComplex(ABI_PARAM4, ABI_PARAM3, SCALE_1, -3)); - CMP(32, R(ABI_PARAM4), R(ABI_PARAM3)); - CMOVcc(32, ABI_PARAM3, R(ABI_PARAM4), CC_G); - CMP(32, R(ABI_PARAM3), R(RSCRATCH)); - CMOVcc(32, RSCRATCH, R(ABI_PARAM3), CC_G); - ADD(32, R(RCycles), R(RSCRATCH)); - } + CALC_CYCLES_7_DATA_MAIN_RAM(ABI_PARAM3, ABI_PARAM4, RSCRATCH) MOV(32, R(ABI_PARAM3), R(ABI_PARAM1)); AND(32, R(ABI_PARAM3), Imm32((MAIN_RAM_SIZE - 1) & addressMask)); if (store) @@ -205,22 +218,7 @@ void* Compiler::Gen_MemoryRoutine7(bool store, bool codeMainRAM, int size) RET(); SetJumpTarget(outsideMainRAM); - if (codeMainRAM) - { - if (!store) - ADD(32, R(ABI_PARAM4), Imm8(1)); - LEA(32, RSCRATCH, MComplex(ABI_PARAM4, ABI_PARAM3, SCALE_1, -3)); - CMP(32, R(ABI_PARAM4), R(ABI_PARAM3)); - CMOVcc(32, ABI_PARAM3, R(ABI_PARAM4), CC_G); - CMP(32, R(ABI_PARAM3), R(RSCRATCH)); - CMOVcc(32, RSCRATCH, R(ABI_PARAM3), CC_G); - ADD(32, R(RCycles), R(RSCRATCH)); - } - else - { - LEA(32, RSCRATCH, MComplex(ABI_PARAM4, ABI_PARAM3, SCALE_1, store ? 0 : 1)); - ADD(32, R(RCycles), R(RSCRATCH)); - } + CALC_CYCLES_7_DATA_NON_MAIN_RAM(ABI_PARAM3, ABI_PARAM4, RSCRATCH) if (store) { if (size > 8) @@ -257,7 +255,189 @@ void* Compiler::Gen_MemoryRoutine7(bool store, bool codeMainRAM, int size) return res; } -void Compiler::Comp_MemAccess(Gen::OpArg rd, bool signExtend, bool store, int size) +#define MEMORY_SEQ_WHILE_COND \ + if (!store) \ + MOV(32, currentElement, R(EAX));\ + if (!preinc) \ + ADD(32, R(ABI_PARAM1), Imm8(4)); \ + \ + SUB(32, R(ABI_PARAM3), Imm8(1)); \ + J_CC(CC_NZ, repeat); + +/* + ABI_PARAM1 address + ABI_PARAM2 address where registers are stored + ABI_PARAM3 how many values to read/write + ABI_PARAM4 code cycles + + Dolphin x64CodeEmitter is my favourite assembler + */ +void* Compiler::Gen_MemoryRoutineSeq9(bool store, bool preinc) +{ + const u8* zero = GetCodePtr(); + ADD(32, MDisp(RCPU, offsetof(ARM, Cycles)), R(ABI_PARAM4)); + RET(); + + void* res = (void*)GetWritableCodePtr(); + + TEST(32, R(ABI_PARAM3), R(ABI_PARAM3)); + J_CC(CC_Z, zero); + + PUSH(ABI_PARAM3); + PUSH(ABI_PARAM4); // we need you later + + const u8* repeat = GetCodePtr(); + + if (preinc) + ADD(32, R(ABI_PARAM1), Imm8(4)); + + MOV(32, R(RSCRATCH), R(ABI_PARAM1)); + SUB(32, R(RSCRATCH), MDisp(RCPU, offsetof(ARMv5, DTCMBase))); + CMP(32, R(RSCRATCH), MDisp(RCPU, offsetof(ARMv5, DTCMSize))); + FixupBranch insideDTCM = J_CC(CC_B); + + CMP(32, R(ABI_PARAM1), MDisp(RCPU, offsetof(ARMv5, ITCMSize))); + FixupBranch insideITCM = J_CC(CC_B); + + OpArg currentElement = MComplex(ABI_PARAM2, ABI_PARAM3, SCALE_8, -8); // wasting stack space like a gangster + + ABI_PushRegistersAndAdjustStack({ABI_PARAM1, ABI_PARAM2, ABI_PARAM3}, 8); + AND(32, R(ABI_PARAM1), Imm8(~3)); + if (store) + { + MOV(32, R(ABI_PARAM2), currentElement); + CALL((void*)NDS::ARM9Write32); + } + else + CALL((void*)NDS::ARM9Read32); + ABI_PopRegistersAndAdjustStack({ABI_PARAM1, ABI_PARAM2, ABI_PARAM3}, 8); + + MEMORY_SEQ_WHILE_COND + MOV(32, R(RSCRATCH), R(ABI_PARAM1)); + SHR(32, R(RSCRATCH), Imm8(12)); + MOVZX(32, 8, ABI_PARAM2, MComplex(RCPU, RSCRATCH, SCALE_4, 2 + offsetof(ARMv5, MemTimings))); + MOVZX(32, 8, RSCRATCH, MComplex(RCPU, RSCRATCH, SCALE_4, 3 + offsetof(ARMv5, MemTimings))); + + FixupBranch finishIt1 = J(); + + SetJumpTarget(insideDTCM); + AND(32, R(RSCRATCH), Imm32(0x3FFF & ~3)); + if (store) + { + MOV(32, R(ABI_PARAM4), currentElement); + MOV(32, MComplex(RCPU, RSCRATCH, SCALE_1, offsetof(ARMv5, DTCM)), R(ABI_PARAM4)); + } + else + MOV(32, R(RSCRATCH), MComplex(RCPU, RSCRATCH, SCALE_1, offsetof(ARMv5, DTCM))); + + MEMORY_SEQ_WHILE_COND + MOV(32, R(RSCRATCH), Imm32(1)); // sequential access time + MOV(32, R(ABI_PARAM2), Imm32(1)); // non sequential + FixupBranch finishIt2 = J(); + + SetJumpTarget(insideITCM); + MOV(32, R(RSCRATCH), R(ABI_PARAM1)); + AND(32, R(RSCRATCH), Imm32(0x7FFF & ~3)); + if (store) + { + MOV(32, R(ABI_PARAM4), currentElement); + MOV(32, MComplex(RCPU, RSCRATCH, SCALE_1, offsetof(ARMv5, ITCM)), R(ABI_PARAM4)); + XOR(32, R(ABI_PARAM4), R(ABI_PARAM4)); + MOV(64, MScaled(RSCRATCH, SCALE_4, squeezePointer(cache.ARM9_ITCM)), R(ABI_PARAM4)); + MOV(64, MScaled(RSCRATCH, SCALE_4, squeezePointer(cache.ARM9_ITCM) + 8), R(ABI_PARAM4)); + } + else + MOV(32, R(RSCRATCH), MComplex(RCPU, RSCRATCH, SCALE_1, offsetof(ARMv5, ITCM))); + + MEMORY_SEQ_WHILE_COND + MOV(32, R(RSCRATCH), Imm32(1)); + MOV(32, R(ABI_PARAM2), Imm32(1)); + + SetJumpTarget(finishIt1); + SetJumpTarget(finishIt2); + + POP(ABI_PARAM4); + POP(ABI_PARAM3); + + CMP(32, R(ABI_PARAM3), Imm8(1)); + FixupBranch skipSequential = J_CC(CC_E); + SUB(32, R(ABI_PARAM3), Imm8(1)); + IMUL(32, R(ABI_PARAM3)); + ADD(32, R(ABI_PARAM2), R(RSCRATCH)); + SetJumpTarget(skipSequential); + + CALC_CYCLES_9(ABI_PARAM4, ABI_PARAM2, RSCRATCH) + RET(); + + return res; +} + +void* Compiler::Gen_MemoryRoutineSeq7(bool store, bool preinc, bool codeMainRAM) +{ + const u8* zero = GetCodePtr(); + ADD(32, MDisp(RCPU, offsetof(ARM, Cycles)), R(ABI_PARAM4)); + RET(); + + void* res = (void*)GetWritableCodePtr(); + + TEST(32, R(ABI_PARAM3), R(ABI_PARAM3)); + J_CC(CC_Z, zero); + + PUSH(ABI_PARAM3); + PUSH(ABI_PARAM4); // we need you later + + const u8* repeat = GetCodePtr(); + + if (preinc) + ADD(32, R(ABI_PARAM1), Imm8(4)); + + OpArg currentElement = MComplex(ABI_PARAM2, ABI_PARAM3, SCALE_8, -8); + + ABI_PushRegistersAndAdjustStack({ABI_PARAM1, ABI_PARAM2, ABI_PARAM3}, 8); + AND(32, R(ABI_PARAM1), Imm8(~3)); + if (store) + { + MOV(32, R(ABI_PARAM2), currentElement); + CALL((void*)NDS::ARM7Write32); + } + else + CALL((void*)NDS::ARM7Read32); + ABI_PopRegistersAndAdjustStack({ABI_PARAM1, ABI_PARAM2, ABI_PARAM3}, 8); + + MEMORY_SEQ_WHILE_COND + MOV(32, R(RSCRATCH), R(ABI_PARAM1)); + SHR(32, R(RSCRATCH), Imm8(15)); + MOVZX(32, 8, ABI_PARAM2, MScaled(RSCRATCH, SCALE_4, 2 + squeezePointer(NDS::ARM7MemTimings))); + MOVZX(32, 8, RSCRATCH, MScaled(RSCRATCH, SCALE_4, 3 + squeezePointer(NDS::ARM7MemTimings))); + + POP(ABI_PARAM4); + POP(ABI_PARAM3); + + CMP(32, R(ABI_PARAM3), Imm8(1)); + FixupBranch skipSequential = J_CC(CC_E); + SUB(32, R(ABI_PARAM3), Imm8(1)); + IMUL(32, R(ABI_PARAM3)); + ADD(32, R(ABI_PARAM2), R(RSCRATCH)); + SetJumpTarget(skipSequential); + + MOV(32, R(RSCRATCH), R(ABI_PARAM1)); + AND(32, R(RSCRATCH), Imm32(0xFF000000)); + CMP(32, R(RSCRATCH), Imm32(0x02000000)); + FixupBranch outsideMainRAM = J_CC(CC_NE); + CALC_CYCLES_7_DATA_MAIN_RAM(ABI_PARAM4, ABI_PARAM2, RSCRATCH) + RET(); + + SetJumpTarget(outsideMainRAM); + CALC_CYCLES_7_DATA_NON_MAIN_RAM(ABI_PARAM4, ABI_PARAM2, RSCRATCH) + RET(); + + return res; +} + +#undef CALC_CYCLES_9 +#undef MEMORY_SEQ_WHILE_COND + +void Compiler::Comp_MemAccess(OpArg rd, bool signExtend, bool store, int size) { if (store) MOV(32, R(ABI_PARAM2), rd); @@ -278,6 +458,129 @@ void Compiler::Comp_MemAccess(Gen::OpArg rd, bool signExtend, bool store, int si } } +s32 Compiler::Comp_MemAccessBlock(OpArg rb, BitSet16 regs, bool store, bool preinc, bool decrement, bool usermode) +{ + int regsCount = regs.Count(); + + const u8 userModeOffsets[] = + { + offsetof(ARM, R[8]), offsetof(ARM, R[9]), offsetof(ARM, R[10]), offsetof(ARM, R[11]), + offsetof(ARM, R[12]), offsetof(ARM, R[13]), offsetof(ARM, R[14]), 0, + + offsetof(ARM, R_FIQ[0]), offsetof(ARM, R_FIQ[1]), offsetof(ARM, R_FIQ[2]), offsetof(ARM, R_FIQ[3]), + offsetof(ARM, R_FIQ[4]), offsetof(ARM, R_FIQ[5]), offsetof(ARM, R_FIQ[6]), 0, + + offsetof(ARM, R[8]), offsetof(ARM, R[9]), offsetof(ARM, R[10]), offsetof(ARM, R[11]), + offsetof(ARM, R[12]), offsetof(ARM, R_IRQ[13]), offsetof(ARM, R_IRQ[14]), 0, + + offsetof(ARM, R[8]), offsetof(ARM, R[9]), offsetof(ARM, R[10]), offsetof(ARM, R[11]), + offsetof(ARM, R[12]), offsetof(ARM, R_SVC[13]), offsetof(ARM, R_SVC[14]), 0, + + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + + offsetof(ARM, R[8]), offsetof(ARM, R[9]), offsetof(ARM, R[10]), offsetof(ARM, R[11]), + offsetof(ARM, R[12]), offsetof(ARM, R_ABT[13]), offsetof(ARM, R_ABT[14]), 0, + + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + + offsetof(ARM, R[8]), offsetof(ARM, R[9]), offsetof(ARM, R[10]), offsetof(ARM, R[11]), + offsetof(ARM, R[12]), offsetof(ARM, R_UND[13]), offsetof(ARM, R_UND[14]), 0, + }; + + if (decrement) + { + MOV_sum(32, ABI_PARAM1, rb, Imm32(-regsCount * 4)); + preinc = !preinc; + } + else + MOV(32, R(ABI_PARAM1), rb); + + MOV(32, R(ABI_PARAM3), Imm32(regsCount)); + u32 cycles = Num + ? NDS::ARM7MemTimings[CurInstr.CodeCycles][Thumb ? 0 : 2] + : (R15 & 0x2 ? 0 : CurInstr.CodeCycles); + MOV(32, R(ABI_PARAM4), Imm32(cycles)); + if (!store) + { + SUB(32, R(RSP), regsCount < 16 ? Imm8(regsCount * 8) : Imm32(regsCount * 8)); + MOV(64, R(ABI_PARAM2), R(RSP)); + + CALL(Num == 0 + ? MemoryFuncsSeq9[0][preinc] + : MemoryFuncsSeq7[0][preinc][CodeRegion == 0x02]); + + for (int reg = 15; reg >= 0; reg--) + { + if (regs[reg]) + { + if (usermode && reg >= 8 && reg < 15) + { + MOV(32, R(RSCRATCH2), R(RCPSR)); + AND(32, R(RSCRATCH2), Imm8(0x1F)); + // (RSCRATCH2 - 0x11) * 8 + squeezePointer(userModeOffsets) + (reg - 8), algebra is great! + MOVZX(32, 8, RSCRATCH2, MScaled(RSCRATCH2, SCALE_8, squeezePointer(userModeOffsets) - 0x11 * 8 + (reg - 8))); + POP(RSCRATCH); + MOV(32, MRegSum(RCPU, RSCRATCH2), R(RSCRATCH)); + } + else if (RegCache.Mapping[reg] == INVALID_REG) + { + assert(reg != 15); + + POP(RSCRATCH); + SaveReg(reg, RSCRATCH); + } + else + { + if (reg != 15) + RegCache.DirtyRegs |= (1 << reg); + POP(MapReg(reg).GetSimpleReg()); + } + } + } + + if (regs[15]) + { + if (Num == 1) + OR(32, MapReg(15), Imm8(1)); + Comp_JumpTo(MapReg(15).GetSimpleReg(), usermode); + } + } + else + { + for (int reg : regs) + { + if (usermode && reg >= 8 && reg < 15) + { + MOV(32, R(RSCRATCH), R(RCPSR)); + AND(32, R(RSCRATCH), Imm8(0x1F)); + // (RSCRATCH2 - 0x11) * 8 + squeezePointer(userModeOffsets) + (reg - 8), algebra is great! + MOVZX(32, 8, RSCRATCH, MScaled(RSCRATCH, SCALE_8, squeezePointer(userModeOffsets) - 0x11 * 8 + (reg - 8))); + MOV(32, R(RSCRATCH), MRegSum(RCPU, RSCRATCH)); + PUSH(RSCRATCH); + } + else if (RegCache.Mapping[reg] == INVALID_REG) + { + LoadReg(reg, RSCRATCH); + PUSH(RSCRATCH); + } + else + PUSH(MapReg(reg).GetSimpleReg()); + } + MOV(64, R(ABI_PARAM2), R(RSP)); + + CALL(Num == 0 + ? MemoryFuncsSeq9[1][preinc] + : MemoryFuncsSeq7[1][preinc][CodeRegion == 0x02]); + + ADD(32, R(RSP), regsCount < 16 ? Imm8(regsCount * 8) : Imm32(regsCount * 8)); + } + + return (regsCount * 4) * (decrement ? -1 : 1); +} + OpArg Compiler::A_Comp_GetMemWBOffset() { if (!(CurInstr.Instr & (1 << 25))) @@ -354,6 +657,25 @@ void Compiler::A_Comp_MemHalf() ? Imm32(CurInstr.Instr & 0xF | ((CurInstr.Instr >> 4) & 0xF0)) : MapReg(CurInstr.A_Reg(0)); + int op = (CurInstr.Instr >> 5) & 0x3; + bool load = CurInstr.Instr & (1 << 20); + + bool signExtend = false; + int size; + if (!load) + { + size = op == 1 ? 16 : 32; + load = op == 2; + } + else if (load) + { + size = op == 2 ? 8 : 16; + signExtend = op > 1; + } + + if (size == 32 && Num == 1) + return; // NOP + if (CurInstr.Instr & (1 << 24)) { if (CurInstr.Instr & (1 << 23)) @@ -370,19 +692,6 @@ void Compiler::A_Comp_MemHalf() else MOV(32, R(ABI_PARAM1), rn); - int op = (CurInstr.Instr >> 5) & 0x3; - bool load = CurInstr.Instr & (1 << 20); - - bool signExtend = false; - int size; - if (!load && op == 1) - size = 16; - else if (load) - { - size = op == 2 ? 8 : 16; - signExtend = op > 1; - } - if (!(CurInstr.Instr & (1 << 24))) { if (CurInstr.Instr & (1 << 23)) @@ -412,6 +721,24 @@ void Compiler::T_Comp_MemReg() Comp_MemAccess(rd, false, !load, byte ? 8 : 32); } +void Compiler::A_Comp_LDM_STM() +{ + BitSet16 regs(CurInstr.Instr & 0xFFFF); + + bool load = (CurInstr.Instr >> 20) & 1; + bool pre = (CurInstr.Instr >> 24) & 1; + bool add = (CurInstr.Instr >> 23) & 1; + bool writeback = (CurInstr.Instr >> 21) & 1; + bool usermode = (CurInstr.Instr >> 22) & 1; + + OpArg rn = MapReg(CurInstr.A_Reg(16)); + + s32 offset = Comp_MemAccessBlock(rn, regs, !load, pre, !add, false); + + if (writeback) + ADD(32, rn, offset >= INT8_MIN && offset < INT8_MAX ? Imm8(offset) : Imm32(offset)); +} + void Compiler::T_Comp_MemImm() { OpArg rd = MapReg(CurInstr.T_Reg(0)); @@ -456,4 +783,56 @@ void Compiler::T_Comp_MemImmHalf() Comp_MemAccess(rd, false, !load, 16); } +void Compiler::T_Comp_LoadPCRel() +{ + OpArg rd = MapReg(CurInstr.T_Reg(8)); + u32 addr = (R15 & ~0x2) + ((CurInstr.Instr & 0xFF) << 2); + + // hopefully this doesn't break + u32 val; CurCPU->DataRead32(addr, &val); + MOV(32, rd, Imm32(val)); +} + +void Compiler::T_Comp_MemSPRel() +{ + u32 offset = (CurInstr.Instr & 0xFF) * 4; + OpArg rd = MapReg(CurInstr.T_Reg(8)); + bool load = CurInstr.Instr & (1 << 11); + + LEA(32, ABI_PARAM1, MDisp(MapReg(13).GetSimpleReg(), offset)); + + Comp_MemAccess(rd, false, !load, 32); +} + +void Compiler::T_Comp_PUSH_POP() +{ + bool load = CurInstr.Instr & (1 << 11); + BitSet16 regs(CurInstr.Instr & 0xFF); + if (CurInstr.Instr & (1 << 8)) + { + if (load) + regs[15] = true; + else + regs[14] = true; + } + + OpArg sp = MapReg(13); + + s32 offset = Comp_MemAccessBlock(sp, regs, !load, !load, !load, false); + + ADD(32, sp, Imm8(offset)); // offset will be always be in range since PUSH accesses 9 regs max +} + +void Compiler::T_Comp_LDMIA_STMIA() +{ + BitSet16 regs(CurInstr.Instr & 0xFF); + OpArg rb = MapReg(CurInstr.T_Reg(8)); + bool load = CurInstr.Instr & (1 << 11); + + s32 offset = Comp_MemAccessBlock(rb, regs, !load, false, false, false); + + if (!load || !regs[CurInstr.T_Reg(8)]) + ADD(32, rb, Imm8(offset)); +} + } \ No newline at end of file diff --git a/src/ARM_InstrInfo.cpp b/src/ARM_InstrInfo.cpp index 32a9645..c519229 100644 --- a/src/ARM_InstrInfo.cpp +++ b/src/ARM_InstrInfo.cpp @@ -25,9 +25,7 @@ enum { A_Link = 1 << 10, - A_LDMSTM = 1 << 11, - - A_ARM9Only = 1 << 12, + A_UnkOnARM7 = 1 << 11, }; #define A_BIOP A_Read16 @@ -97,12 +95,12 @@ const u32 A_SMULWy = A_Write16 | A_Read0 | A_Read8 | ak(ak_SMULWy); const u32 A_SMLALxy = A_Write16 | A_Write12 | A_Read16 | A_Read12 | A_Read0 | A_Read8 | ak(ak_SMLALxy); const u32 A_SMULxy = A_Write16 | A_Read0 | A_Read8 | ak(ak_SMULxy); -const u32 A_CLZ = A_Write12 | A_Read0 | A_ARM9Only | ak(ak_CLZ); +const u32 A_CLZ = A_Write12 | A_Read0 | A_UnkOnARM7 | ak(ak_CLZ); -const u32 A_QADD = A_Write12 | A_Read0 | A_Read16 | A_ARM9Only | ak(ak_QADD); -const u32 A_QSUB = A_Write12 | A_Read0 | A_Read16 | A_ARM9Only | ak(ak_QSUB); -const u32 A_QDADD = A_Write12 | A_Read0 | A_Read16 | A_ARM9Only | ak(ak_QDADD); -const u32 A_QDSUB = A_Write12 | A_Read0 | A_Read16 | A_ARM9Only | ak(ak_QDSUB); +const u32 A_QADD = A_Write12 | A_Read0 | A_Read16 | A_UnkOnARM7 | ak(ak_QADD); +const u32 A_QSUB = A_Write12 | A_Read0 | A_Read16 | A_UnkOnARM7 | ak(ak_QSUB); +const u32 A_QDADD = A_Write12 | A_Read0 | A_Read16 | A_UnkOnARM7 | ak(ak_QDADD); +const u32 A_QDSUB = A_Write12 | A_Read0 | A_Read16 | A_UnkOnARM7 | ak(ak_QDSUB); #define A_LDR A_Write12 #define A_STR A_Read12 @@ -144,8 +142,8 @@ A_IMPLEMENT_HD_LDRSTR(LDRSH,LDR) const u32 A_SWP = A_Write12 | A_Read16 | A_Read0 | ak(ak_SWP); const u32 A_SWPB = A_Write12 | A_Read16 | A_Read0 | ak(ak_SWPB); -const u32 A_LDM = A_Read16 | A_LDMSTM | ak(ak_LDM); -const u32 A_STM = A_Read16 | A_LDMSTM | ak(ak_STM); +const u32 A_LDM = A_Read16 | A_MemWriteback | ak(ak_LDM); +const u32 A_STM = A_Read16 | A_MemWriteback | ak(ak_STM); const u32 A_B = A_BranchAlways | ak(ak_B); const u32 A_BL = A_BranchAlways | A_Link | ak(ak_BL); @@ -154,11 +152,11 @@ const u32 A_BX = A_BranchAlways | A_Read0 | ak(ak_BX); const u32 A_BLX_REG = A_BranchAlways | A_Link | A_Read0 | ak(ak_BLX_REG); const u32 A_UNK = A_BranchAlways | A_Link | ak(ak_UNK); -const u32 A_MSR_IMM = A_ARM9Only | ak(ak_MSR_IMM); -const u32 A_MSR_REG = A_Read0 | A_ARM9Only | ak(ak_MSR_REG); -const u32 A_MRS = A_Write12 | A_ARM9Only | ak(ak_MRS); -const u32 A_MCR = A_Read12 | A_ARM9Only | ak(ak_MCR); -const u32 A_MRC = A_Write12 | A_ARM9Only | ak(ak_MRC); +const u32 A_MSR_IMM = A_UnkOnARM7 | ak(ak_MSR_IMM); +const u32 A_MSR_REG = A_Read0 | A_UnkOnARM7 | ak(ak_MSR_REG); +const u32 A_MRS = A_Write12 | A_UnkOnARM7 | ak(ak_MRS); +const u32 A_MCR = A_Read12 | A_UnkOnARM7 | ak(ak_MCR); +const u32 A_MRC = A_Write12 | A_UnkOnARM7 | ak(ak_MRC); const u32 A_SVC = A_BranchAlways | A_Link | ak(ak_SVC); // THUMB @@ -249,7 +247,7 @@ const u32 T_LDRH_IMM = T_Write0 | T_Read3 | tk(tk_LDRH_IMM); const u32 T_STR_SPREL = T_Read8 | T_ReadR13 | tk(tk_STR_SPREL); const u32 T_LDR_SPREL = T_Write8 | T_ReadR13 | tk(tk_LDR_SPREL); -const u32 T_PUSH = T_ReadR15 | T_ReadR13 | T_WriteR13 | tk(tk_PUSH); +const u32 T_PUSH = T_ReadR13 | T_WriteR13 | tk(tk_PUSH); const u32 T_POP = T_PopPC | T_ReadR13 | T_WriteR13 | tk(tk_POP); const u32 T_LDMIA = T_Read8 | T_Write8 | tk(tk_LDMIA); @@ -320,8 +318,10 @@ Info Decode(bool thumb, u32 num, u32 instr) if (num == 0 && (instr & 0xFE000000) == 0xFA000000) data = A_BLX_IMM; - if (data & A_ARM9Only && num != 0) - data |= A_BranchAlways | A_Link; + if (data & A_UnkOnARM7 && num != 0) + data = A_UNK; + + res.Kind = (data >> 13) & 0x1FF; if (data & A_Read0) res.SrcRegs |= 1 << (instr & 0xF); @@ -360,14 +360,8 @@ Info Decode(bool thumb, u32 num, u32 instr) res.SrcRegs |= 1 << 15; } - if (data & A_LDMSTM) - { - res.DstRegs |= instr & (!!(instr & (1 << 20)) << 15); - if (instr & (1 << 21)) - res.DstRegs |= 1 << ((instr >> 16) & 0xF); - } - - res.Kind = (data >> 13) & 0x1FF; + if (res.Kind == ak_LDM) + res.DstRegs |= instr & (1 << 15); // this is right return res; } -- cgit v1.2.3 From c58fdbd66bab9f1b97e9522afa5436f212540b6d Mon Sep 17 00:00:00 2001 From: RSDuck Date: Thu, 11 Jul 2019 16:22:47 +0200 Subject: jit: branch instructions --- src/ARM.cpp | 12 +- src/ARMJIT.cpp | 4 +- src/ARMJIT.h | 2 +- src/ARMJIT_x64/ARMJIT_Branch.cpp | 267 ++++++++++++++++++++++++++++++++++++ src/ARMJIT_x64/ARMJIT_Compiler.cpp | 185 ++++++++----------------- src/ARMJIT_x64/ARMJIT_Compiler.h | 30 ++-- src/ARMJIT_x64/ARMJIT_LoadStore.cpp | 42 +----- src/ARM_InstrInfo.cpp | 6 +- src/ARM_InstrInfo.h | 1 + src/CMakeLists.txt | 1 + 10 files changed, 363 insertions(+), 187 deletions(-) create mode 100644 src/ARMJIT_x64/ARMJIT_Branch.cpp (limited to 'src/ARM_InstrInfo.cpp') diff --git a/src/ARM.cpp b/src/ARM.cpp index f7ca26d..aca876d 100644 --- a/src/ARM.cpp +++ b/src/ARM.cpp @@ -521,11 +521,8 @@ void ARMv5::Execute() printf("aaarg ungempappter raum %x\n", R[15]);*/ ARMJIT::CompiledBlock block = ARMJIT::LookUpBlock(0, R[15] - ((CPSR&0x20)?2:4)); - if (block == NULL) - ARMJIT::CompileBlock(this); - else - Cycles += block(); - + Cycles += (block ? block : ARMJIT::CompileBlock(this))(); + // TODO optimize this shit!!! if (Halted) { @@ -607,10 +604,7 @@ void ARMv4::Execute() printf("aaarg ungempappter raum %x\n", R[15]);*/ ARMJIT::CompiledBlock block = ARMJIT::LookUpBlock(1, R[15] - ((CPSR&0x20)?2:4)); - if (block == NULL) - ARMJIT::CompileBlock(this); - else - Cycles += block(); + Cycles += (block ? block : ARMJIT::CompileBlock(this))(); // TODO optimize this shit!!! if (Halted) diff --git a/src/ARMJIT.cpp b/src/ARMJIT.cpp index 6afa967..47b425f 100644 --- a/src/ARMJIT.cpp +++ b/src/ARMJIT.cpp @@ -121,7 +121,7 @@ void DeInit() delete compiler; } -void CompileBlock(ARM* cpu) +CompiledBlock CompileBlock(ARM* cpu) { bool thumb = cpu->CPSR & 0x20; @@ -171,6 +171,8 @@ void CompileBlock(ARM* cpu) CompiledBlock block = compiler->CompileBlock(cpu, instrs, i); InsertBlock(cpu->Num, r15Initial - (thumb ? 2 : 4), block); + + return block; } void ResetBlocks() diff --git a/src/ARMJIT.h b/src/ARMJIT.h index 71188f9..45bb4ed 100644 --- a/src/ARMJIT.h +++ b/src/ARMJIT.h @@ -109,7 +109,7 @@ inline void InsertBlock(u32 num, u32 addr, CompiledBlock func) void Init(); void DeInit(); -void CompileBlock(ARM* cpu); +CompiledBlock CompileBlock(ARM* cpu); void ResetBlocks(); diff --git a/src/ARMJIT_x64/ARMJIT_Branch.cpp b/src/ARMJIT_x64/ARMJIT_Branch.cpp new file mode 100644 index 0000000..fb2acba --- /dev/null +++ b/src/ARMJIT_x64/ARMJIT_Branch.cpp @@ -0,0 +1,267 @@ +#include "ARMJIT_Compiler.h" + +using namespace Gen; + +namespace ARMJIT +{ + +void Compiler::Comp_JumpTo(u32 addr, bool forceNonConstantCycles) +{ + // we can simplify constant branches by a lot + // it's not completely safe to assume stuff like, which instructions to preload + // we'll see how it works out + + u32 newPC; + u32 nextInstr[2]; + u32 cycles = 0; + bool setupRegion = false; + + if (addr & 0x1 && !Thumb) + { + CPSRDirty = true; + OR(32, R(RCPSR), Imm8(0x20)); + } + else if (!(addr & 0x1) && Thumb) + { + CPSRDirty = true; + AND(32, R(RCPSR), Imm32(~0x20)); + } + + if (Num == 0) + { + ARMv5* cpu9 = (ARMv5*)CurCPU; + + u32 oldregion = R15 >> 24; + u32 newregion = addr >> 24; + + u32 regionCodeCycles = cpu9->MemTimings[addr >> 12][0]; + cpu9->RegionCodeCycles = regionCodeCycles; + + MOV(32, MDisp(RCPU, offsetof(ARMv5, RegionCodeCycles)), Imm32(regionCodeCycles)); + + setupRegion = newregion != oldregion; + if (setupRegion) + cpu9->SetupCodeMem(addr); + + if (addr & 0x1) + { + addr &= ~0x1; + newPC = addr+2; + + // two-opcodes-at-once fetch + // doesn't matter if we put garbage in the MSbs there + if (addr & 0x2) + { + nextInstr[0] = cpu9->CodeRead32(addr-2, true) >> 16; + cycles += CurCPU->CodeCycles; + nextInstr[1] = cpu9->CodeRead32(addr+2, false); + cycles += CurCPU->CodeCycles; + } + else + { + nextInstr[0] = cpu9->CodeRead32(addr, true); + nextInstr[1] = nextInstr[0] >> 16; + cycles += CurCPU->CodeCycles; + } + } + else + { + addr &= ~0x3; + newPC = addr+4; + + nextInstr[0] = cpu9->CodeRead32(addr, true); + cycles += cpu9->CodeCycles; + nextInstr[1] = cpu9->CodeRead32(addr+4, false); + cycles += cpu9->CodeCycles; + } + } + else + { + ARMv4* cpu7 = (ARMv4*)CurCPU; + + u32 codeRegion = addr >> 24; + u32 codeCycles = addr >> 15; // cheato + + cpu7->CodeRegion = codeRegion; + cpu7->CodeCycles = codeCycles; + + MOV(32, MDisp(RCPU, offsetof(ARM, CodeRegion)), Imm32(codeRegion)); + MOV(32, MDisp(RCPU, offsetof(ARM, CodeRegion)), Imm32(codeCycles)); + + if (addr & 0x1) + { + addr &= ~0x1; + newPC = addr+2; + + nextInstr[0] = ((ARMv4*)CurCPU)->CodeRead16(addr); + nextInstr[1] = ((ARMv4*)CurCPU)->CodeRead16(addr+2); + cycles += NDS::ARM7MemTimings[codeCycles][0] + NDS::ARM7MemTimings[codeCycles][1]; + } + else + { + addr &= ~0x3; + newPC = addr+4; + + nextInstr[0] = cpu7->CodeRead32(addr); + nextInstr[1] = cpu7->CodeRead32(addr+4); + cycles += NDS::ARM7MemTimings[codeCycles][2] + NDS::ARM7MemTimings[codeCycles][3]; + } + } + + MOV(32, MDisp(RCPU, offsetof(ARM, R[15])), Imm32(newPC)); + MOV(32, MDisp(RCPU, offsetof(ARM, NextInstr[0])), Imm32(nextInstr[0])); + MOV(32, MDisp(RCPU, offsetof(ARM, NextInstr[1])), Imm32(nextInstr[1])); + if ((Thumb || CurInstr.Cond() >= 0xE) && !forceNonConstantCycles) + ConstantCycles += cycles; + else + ADD(32, MDisp(RCPU, offsetof(ARM, Cycles)), Imm8(cycles)); + + if (setupRegion) + { + MOV(32, R(ABI_PARAM1), R(RCPU)); + MOV(32, R(ABI_PARAM2), Imm32(newPC)); + CALL((void*)&ARMv5::SetupCodeMem); + } +} + +void Compiler::Comp_JumpTo(Gen::X64Reg addr, bool restoreCPSR) +{ + BitSet16 hiRegsLoaded(RegCache.DirtyRegs & 0xFFFF0000); + bool previouslyDirty = CPSRDirty; + SaveCPSR(); + + if (restoreCPSR) + { + if (Thumb || CurInstr.Cond() >= 0xE) + { + for (int reg : hiRegsLoaded) + RegCache.UnloadRegister(reg); + } + else + { + // the ugly way... + // we only save them, to load and save them again + for (int reg : hiRegsLoaded) + SaveReg(reg, RegCache.Mapping[reg]); + } + } + + MOV(64, R(ABI_PARAM1), R(RCPU)); + MOV(32, R(ABI_PARAM2), R(addr)); + if (!restoreCPSR) + XOR(32, R(ABI_PARAM3), R(ABI_PARAM3)); + else + MOV(32, R(ABI_PARAM3), Imm32(restoreCPSR)); + if (Num == 0) + CALL((void*)&ARMv5::JumpTo); + else + CALL((void*)&ARMv4::JumpTo); + + if (!Thumb && restoreCPSR && CurInstr.Cond() < 0xE) + { + for (int reg : hiRegsLoaded) + LoadReg(reg, RegCache.Mapping[reg]); + } + + if (previouslyDirty) + LoadCPSR(); + CPSRDirty = previouslyDirty; +} + +void Compiler::A_Comp_BranchImm() +{ + int op = (CurInstr.Instr >> 24) & 1; + s32 offset = (s32)(CurInstr.Instr << 8) >> 6; + u32 target = R15 + offset; + bool link = op; + + if (CurInstr.Cond() == 0xF) // BLX_imm + { + target += (op << 1) + 1; + link = true; + } + + if (link) + MOV(32, MapReg(14), Imm32(R15 - 4)); + + Comp_JumpTo(target); +} + +void Compiler::A_Comp_BranchXchangeReg() +{ + OpArg rn = MapReg(CurInstr.A_Reg(0)); + if ((CurInstr.Instr & 0xF0) == 0x30) // BLX_reg + MOV(32, MapReg(14), Imm32(R15 - 4)); + Comp_JumpTo(rn.GetSimpleReg()); +} + +void Compiler::T_Comp_BCOND() +{ + u32 cond = (CurInstr.Instr >> 8) & 0xF; + FixupBranch skipExecute = CheckCondition(cond); + + s32 offset = (s32)(CurInstr.Instr << 24) >> 23; + Comp_JumpTo(R15 + offset + 1, true); + + FixupBranch skipFailed = J(); + SetJumpTarget(skipExecute); + Comp_AddCycles_C(true); + SetJumpTarget(skipFailed); +} + +void Compiler::T_Comp_B() +{ + s32 offset = (s32)((CurInstr.Instr & 0x7FF) << 21) >> 20; + Comp_JumpTo(R15 + offset + 1); +} + +void Compiler::T_Comp_BranchXchangeReg() +{ + bool link = CurInstr.Instr & (1 << 7); + if (link && Num == 1) + { + printf("BLX unsupported on ARM7!!!\n"); + return; + } + + OpArg rn = MapReg(CurInstr.A_Reg(3)); + if (link) + MOV(32, MapReg(14), Imm32(R15 - 1)); + Comp_JumpTo(rn.GetSimpleReg()); +} + +void Compiler::T_Comp_BL_LONG_1() +{ + s32 offset = (s32)((CurInstr.Instr & 0x7FF) << 21) >> 9; + MOV(32, MapReg(14), Imm32(R15 + offset)); + Comp_AddCycles_C(); +} + +void Compiler::T_Comp_BL_LONG_2() +{ + OpArg lr = MapReg(14); + s32 offset = (CurInstr.Instr & 0x7FF) << 1; + LEA(32, RSCRATCH, MDisp(lr.GetSimpleReg(), offset)); + MOV(32, lr, Imm32((R15 - 2) | 1)); + if (Num == 1 || CurInstr.Instr & (1 << 12)) + OR(32, R(RSCRATCH), Imm8(1)); + Comp_JumpTo(RSCRATCH); +} + +void Compiler::T_Comp_BL_Merged(FetchedInstr part1) +{ + assert(part1.Info.Kind == ARMInstrInfo::tk_BL_LONG_1); + Comp_AddCycles_C(); + + u32 target = (R15 - 2) + ((s32)((part1.Instr & 0x7FF) << 21) >> 9); + target += (CurInstr.Instr & 0x7FF) << 1; + + if (Num == 1 || CurInstr.Instr & (1 << 12)) + target |= 1; + + MOV(32, MapReg(14), Imm32((R15 - 2) | 1)); + + Comp_JumpTo(target); +} + +} \ No newline at end of file diff --git a/src/ARMJIT_x64/ARMJIT_Compiler.cpp b/src/ARMJIT_x64/ARMJIT_Compiler.cpp index 4fe0c70..6799a90 100644 --- a/src/ARMJIT_x64/ARMJIT_Compiler.cpp +++ b/src/ARMJIT_x64/ARMJIT_Compiler.cpp @@ -50,50 +50,6 @@ Compiler::Compiler() ResetStart = GetWritableCodePtr(); } -void* Compiler::Gen_ChangeCPSRRoutine() -{ - void* res = (void*)GetWritableCodePtr(); - - MOV(32, R(RSCRATCH), R(RCPSR)); - AND(32, R(RSCRATCH), Imm8(0x1F)); - CMP(32, R(RSCRATCH), Imm8(0x11)); - FixupBranch fiq = J_CC(CC_E); - CMP(32, R(RSCRATCH), Imm8(0x12)); - FixupBranch irq = J_CC(CC_E); - CMP(32, R(RSCRATCH), Imm8(0x13)); - FixupBranch svc = J_CC(CC_E); - CMP(32, R(RSCRATCH), Imm8(0x17)); - FixupBranch abt = J_CC(CC_E); - CMP(32, R(RSCRATCH), Imm8(0x1B)); - FixupBranch und = J_CC(CC_E); - - SetJumpTarget(fiq); - - SetJumpTarget(irq); - - SetJumpTarget(svc); - - SetJumpTarget(abt); - - SetJumpTarget(und); - - return res; -} - -DataRegion Compiler::ClassifyAddress(u32 addr) -{ - if (Num == 0 && addr >= ((ARMv5*)CurCPU)->DTCMBase && addr < ((ARMv5*)CurCPU)->DTCMBase) - return dataRegionDTCM; - switch (addr & 0xFF000000) - { - case 0x02000000: return dataRegionMainRAM; - case 0x03000000: return Num == 1 && (addr & 0xF00000) == 0x800000 ? dataRegionWRAM7 : dataRegionSWRAM; - case 0x04000000: return dataRegionIO; - case 0x06000000: return dataRegionVRAM; - } - return dataRegionGeneric; -} - void Compiler::LoadCPSR() { assert(!CPSRDirty); @@ -123,6 +79,29 @@ void Compiler::SaveReg(int reg, X64Reg nativeReg) MOV(32, MDisp(RCPU, offsetof(ARM, R[reg])), R(nativeReg)); } +// invalidates RSCRATCH and RSCRATCH3 +Gen::FixupBranch Compiler::CheckCondition(u32 cond) +{ + if (cond >= 0x8) + { + static_assert(RSCRATCH3 == ECX); + MOV(32, R(RSCRATCH3), R(RCPSR)); + SHR(32, R(RSCRATCH3), Imm8(28)); + MOV(32, R(RSCRATCH), Imm32(1)); + SHL(32, R(RSCRATCH), R(RSCRATCH3)); + TEST(32, R(RSCRATCH), Imm32(ARM::ConditionTable[cond])); + + return J_CC(CC_Z); + } + else + { + // could have used a LUT, but then where would be the fun? + TEST(32, R(RCPSR), Imm32(1 << (28 + ((~(cond >> 1) & 1) << 1 | (cond >> 2 & 1) ^ (cond >> 1 & 1))))); + + return J_CC(cond & 1 ? CC_NZ : CC_Z); + } +} + CompiledBlock Compiler::CompileBlock(ARM* cpu, FetchedInstr instrs[], int instrsCount) { if (IsAlmostFull()) @@ -140,6 +119,8 @@ CompiledBlock Compiler::CompileBlock(ARM* cpu, FetchedInstr instrs[], int instrs CodeRegion = cpu->CodeRegion; CurCPU = cpu; + bool mergedThumbBL = false; + ABI_PushRegistersAndAdjustStack({ABI_ALL_CALLEE_SAVED & ABI_ALL_GPRS}, 8, 16); MOV(64, R(RCPU), ImmPtr(cpu)); @@ -167,17 +148,10 @@ CompiledBlock Compiler::CompileBlock(ARM* cpu, FetchedInstr instrs[], int instrs MOV(32, MDisp(RCPU, offsetof(ARM, NextInstr[1])), Imm32(CurInstr.NextInstr[1])); } - if (comp == NULL || CurInstr.Info.Branches()) + if (comp == NULL) SaveCPSR(); } - - // run interpreter - cpu->CodeCycles = CurInstr.CodeCycles; - cpu->R[15] = R15; - cpu->CurInstr = CurInstr.Instr; - cpu->NextInstr[0] = CurInstr.NextInstr[0]; - cpu->NextInstr[1] = CurInstr.NextInstr[1]; - + if (comp != NULL) RegCache.Prepare(i); else @@ -185,58 +159,44 @@ CompiledBlock Compiler::CompileBlock(ARM* cpu, FetchedInstr instrs[], int instrs if (Thumb) { - u32 icode = (CurInstr.Instr >> 6) & 0x3FF; - if (comp == NULL) + if (i < instrsCount - 1 && CurInstr.Info.Kind == ARMInstrInfo::tk_BL_LONG_1 + && instrs[i + 1].Info.Kind == ARMInstrInfo::tk_BL_LONG_2) + mergedThumbBL = true; + else { - MOV(64, R(ABI_PARAM1), R(RCPU)); + u32 icode = (CurInstr.Instr >> 6) & 0x3FF; + if (comp == NULL) + { + MOV(64, R(ABI_PARAM1), R(RCPU)); - ABI_CallFunction(ARMInterpreter::THUMBInstrTable[icode]); + ABI_CallFunction(ARMInterpreter::THUMBInstrTable[icode]); + } + else if (mergedThumbBL) + T_Comp_BL_Merged(instrs[i - 1]); + else + (this->*comp)(); } - else - (this->*comp)(); - - ARMInterpreter::THUMBInstrTable[icode](cpu); } else { u32 cond = CurInstr.Cond(); if (CurInstr.Info.Kind == ARMInstrInfo::ak_BLX_IMM) { - MOV(64, R(ABI_PARAM1), R(RCPU)); - ABI_CallFunction(ARMInterpreter::A_BLX_IMM); - - ARMInterpreter::A_BLX_IMM(cpu); + if (comp) + (this->*comp)(); + else + { + MOV(64, R(ABI_PARAM1), R(RCPU)); + ABI_CallFunction(ARMInterpreter::A_BLX_IMM); + } } else if (cond == 0xF) - { Comp_AddCycles_C(); - cpu->AddCycles_C(); - } else { FixupBranch skipExecute; if (cond < 0xE) - { - if (cond >= 0x8) - { - static_assert(RSCRATCH3 == ECX); - MOV(32, R(RSCRATCH3), R(RCPSR)); - SHR(32, R(RSCRATCH3), Imm8(28)); - MOV(32, R(RSCRATCH), Imm32(1)); - SHL(32, R(RSCRATCH), R(RSCRATCH3)); - TEST(32, R(RSCRATCH), Imm32(ARM::ConditionTable[cond])); - - skipExecute = J_CC(CC_Z); - } - else - { - // could have used a LUT, but then where would be the fun? - TEST(32, R(RCPSR), Imm32(1 << (28 + ((~(cond >> 1) & 1) << 1 | (cond >> 2 & 1) ^ (cond >> 1 & 1))))); - - skipExecute = J_CC(cond & 1 ? CC_NZ : CC_Z); - } - - } + skipExecute = CheckCondition(cond); u32 icode = ((CurInstr.Instr >> 4) & 0xF) | ((CurInstr.Instr >> 16) & 0xFF0); if (comp == NULL) @@ -258,19 +218,9 @@ CompiledBlock Compiler::CompileBlock(ARM* cpu, FetchedInstr instrs[], int instrs SetJumpTarget(skipFailed); } - - if (cpu->CheckCondition(cond)) - ARMInterpreter::ARMInstrTable[icode](cpu); - else - cpu->AddCycles_C(); } } - /* - we don't need to collect the interpreted cycles, - since cpu->Cycles is taken into account by the dispatcher. - */ - if (comp == NULL && i != instrsCount - 1) LoadCPSR(); } @@ -367,7 +317,7 @@ CompileFunc Compiler::GetCompFunc(int kind) // LDM/STM NULL, NULL, // Branch - NULL, NULL, NULL, NULL, NULL, + A_Comp_BranchImm, A_Comp_BranchImm, A_Comp_BranchImm, A_Comp_BranchXchangeReg, A_Comp_BranchXchangeReg, // system stuff NULL, NULL, NULL, NULL, NULL, NULL, NULL, }; @@ -389,7 +339,7 @@ CompileFunc Compiler::GetCompFunc(int kind) // pc/sp relative T_Comp_RelAddr, T_Comp_RelAddr, T_Comp_AddSP, // LDR pcrel - NULL, + T_Comp_LoadPCRel, // LDR/STR reg offset T_Comp_MemReg, T_Comp_MemReg, T_Comp_MemReg, T_Comp_MemReg, // LDR/STR sign extended, half @@ -399,25 +349,27 @@ CompileFunc Compiler::GetCompFunc(int kind) // LDR/STR half imm offset T_Comp_MemImmHalf, T_Comp_MemImmHalf, // LDR/STR sp rel - NULL, NULL, + T_Comp_MemSPRel, T_Comp_MemSPRel, // PUSH/POP - NULL, NULL, + T_Comp_PUSH_POP, T_Comp_PUSH_POP, // LDMIA, STMIA - NULL, NULL, - NULL, NULL, - NULL, NULL, NULL, NULL, NULL, NULL + T_Comp_LDMIA_STMIA, T_Comp_LDMIA_STMIA, + // Branch + T_Comp_BCOND, T_Comp_BranchXchangeReg, T_Comp_BranchXchangeReg, T_Comp_B, T_Comp_BL_LONG_1, T_Comp_BL_LONG_2, + // Unk, SVC + NULL, NULL }; return Thumb ? T_Comp[kind] : A_Comp[kind]; } -void Compiler::Comp_AddCycles_C() +void Compiler::Comp_AddCycles_C(bool forceNonConstant) { s32 cycles = Num ? NDS::ARM7MemTimings[CurInstr.CodeCycles][Thumb ? 1 : 3] : ((R15 & 0x2) ? 0 : CurInstr.CodeCycles); - if (CurInstr.Cond() < 0xE) + if ((!Thumb && CurInstr.Cond() < 0xE) || forceNonConstant) ADD(32, MDisp(RCPU, offsetof(ARM, Cycles)), Imm8(cycles)); else ConstantCycles += cycles; @@ -429,25 +381,10 @@ void Compiler::Comp_AddCycles_CI(u32 i) NDS::ARM7MemTimings[CurInstr.CodeCycles][Thumb ? 0 : 2] : ((R15 & 0x2) ? 0 : CurInstr.CodeCycles)) + i; - if (CurInstr.Cond() < 0xE) + if (!Thumb && CurInstr.Cond() < 0xE) ADD(32, MDisp(RCPU, offsetof(ARM, Cycles)), Imm8(cycles)); else ConstantCycles += cycles; } -void Compiler::Comp_JumpTo(Gen::X64Reg addr, bool restoreCPSR) -{ - // potentieller Bug: falls ein Register das noch gecacht ist, beim Modeswitch gespeichert - // wird der alte Wert gespeichert - SaveCPSR(); - - MOV(64, R(ABI_PARAM1), R(RCPU)); - MOV(32, R(ABI_PARAM2), R(addr)); - MOV(32, R(ABI_PARAM3), Imm32(restoreCPSR)); - if (Num == 0) - CALL((void*)&ARMv5::JumpTo); - else - CALL((void*)&ARMv4::JumpTo); -} - } \ No newline at end of file diff --git a/src/ARMJIT_x64/ARMJIT_Compiler.h b/src/ARMJIT_x64/ARMJIT_Compiler.h index a751737..45b488a 100644 --- a/src/ARMJIT_x64/ARMJIT_Compiler.h +++ b/src/ARMJIT_x64/ARMJIT_Compiler.h @@ -22,19 +22,6 @@ class Compiler; typedef void (Compiler::*CompileFunc)(); -enum DataRegion -{ - dataRegionGeneric, // hey, that's me! - dataRegionMainRAM, - dataRegionSWRAM, - dataRegionVRAM, - dataRegionIO, - dataRegionExclusive, - dataRegionsCount, - dataRegionDTCM = dataRegionExclusive, - dataRegionWRAM7 = dataRegionExclusive, -}; - class Compiler : public Gen::X64CodeBlock { public: @@ -49,8 +36,9 @@ private: CompileFunc GetCompFunc(int kind); void Comp_JumpTo(Gen::X64Reg addr, bool restoreCPSR = false); + void Comp_JumpTo(u32 addr, bool forceNonConstantCycles = false); - void Comp_AddCycles_C(); + void Comp_AddCycles_C(bool forceNonConstant = false); void Comp_AddCycles_CI(u32 i); enum @@ -63,8 +51,6 @@ private: opInvertOp2 = 1 << 5, }; - DataRegion ClassifyAddress(u32 addr); - void A_Comp_Arith(); void A_Comp_MovOp(); void A_Comp_CmpOp(); @@ -73,6 +59,9 @@ private: void A_Comp_MemHalf(); void A_Comp_LDM_STM(); + void A_Comp_BranchImm(); + void A_Comp_BranchXchangeReg(); + void T_Comp_ShiftImm(); void T_Comp_AddSub_(); void T_Comp_ALU_Imm8(); @@ -91,6 +80,13 @@ private: void T_Comp_PUSH_POP(); void T_Comp_LDMIA_STMIA(); + void T_Comp_BCOND(); + void T_Comp_B(); + void T_Comp_BranchXchangeReg(); + void T_Comp_BL_LONG_1(); + void T_Comp_BL_LONG_2(); + void T_Comp_BL_Merged(FetchedInstr prefix); + void Comp_MemAccess(Gen::OpArg rd, bool signExtend, bool store, int size); s32 Comp_MemAccessBlock(Gen::OpArg rb, BitSet16 regs, bool store, bool preinc, bool decrement, bool usermode); @@ -119,6 +115,8 @@ private: void LoadCPSR(); void SaveCPSR(); + Gen::FixupBranch CheckCondition(u32 cond); + Gen::OpArg MapReg(int reg) { if (reg == 15 && RegCache.Mapping[reg] == Gen::INVALID_REG) diff --git a/src/ARMJIT_x64/ARMJIT_LoadStore.cpp b/src/ARMJIT_x64/ARMJIT_LoadStore.cpp index 20e1893..69b324c 100644 --- a/src/ARMJIT_x64/ARMJIT_LoadStore.cpp +++ b/src/ARMJIT_x64/ARMJIT_LoadStore.cpp @@ -462,38 +462,10 @@ s32 Compiler::Comp_MemAccessBlock(OpArg rb, BitSet16 regs, bool store, bool prei { int regsCount = regs.Count(); - const u8 userModeOffsets[] = - { - offsetof(ARM, R[8]), offsetof(ARM, R[9]), offsetof(ARM, R[10]), offsetof(ARM, R[11]), - offsetof(ARM, R[12]), offsetof(ARM, R[13]), offsetof(ARM, R[14]), 0, - - offsetof(ARM, R_FIQ[0]), offsetof(ARM, R_FIQ[1]), offsetof(ARM, R_FIQ[2]), offsetof(ARM, R_FIQ[3]), - offsetof(ARM, R_FIQ[4]), offsetof(ARM, R_FIQ[5]), offsetof(ARM, R_FIQ[6]), 0, - - offsetof(ARM, R[8]), offsetof(ARM, R[9]), offsetof(ARM, R[10]), offsetof(ARM, R[11]), - offsetof(ARM, R[12]), offsetof(ARM, R_IRQ[13]), offsetof(ARM, R_IRQ[14]), 0, - - offsetof(ARM, R[8]), offsetof(ARM, R[9]), offsetof(ARM, R[10]), offsetof(ARM, R[11]), - offsetof(ARM, R[12]), offsetof(ARM, R_SVC[13]), offsetof(ARM, R_SVC[14]), 0, - - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - - offsetof(ARM, R[8]), offsetof(ARM, R[9]), offsetof(ARM, R[10]), offsetof(ARM, R[11]), - offsetof(ARM, R[12]), offsetof(ARM, R_ABT[13]), offsetof(ARM, R_ABT[14]), 0, - - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - - offsetof(ARM, R[8]), offsetof(ARM, R[9]), offsetof(ARM, R[10]), offsetof(ARM, R[11]), - offsetof(ARM, R[12]), offsetof(ARM, R_UND[13]), offsetof(ARM, R_UND[14]), 0, - }; - if (decrement) { MOV_sum(32, ABI_PARAM1, rb, Imm32(-regsCount * 4)); - preinc = !preinc; + preinc ^= true; } else MOV(32, R(ABI_PARAM1), rb); @@ -516,16 +488,16 @@ s32 Compiler::Comp_MemAccessBlock(OpArg rb, BitSet16 regs, bool store, bool prei { if (regs[reg]) { - if (usermode && reg >= 8 && reg < 15) + /*if (usermode && reg >= 8 && reg < 15) { MOV(32, R(RSCRATCH2), R(RCPSR)); AND(32, R(RSCRATCH2), Imm8(0x1F)); // (RSCRATCH2 - 0x11) * 8 + squeezePointer(userModeOffsets) + (reg - 8), algebra is great! - MOVZX(32, 8, RSCRATCH2, MScaled(RSCRATCH2, SCALE_8, squeezePointer(userModeOffsets) - 0x11 * 8 + (reg - 8))); + MOVZX(32, 8, RSCRATCH2, MScaled(RSCRATCH2, SCALE_8, squeezePointer(userModeOffsets) - 0x10 * 8 + (reg - 8))); POP(RSCRATCH); MOV(32, MRegSum(RCPU, RSCRATCH2), R(RSCRATCH)); } - else if (RegCache.Mapping[reg] == INVALID_REG) + else */if (RegCache.Mapping[reg] == INVALID_REG) { assert(reg != 15); @@ -552,16 +524,16 @@ s32 Compiler::Comp_MemAccessBlock(OpArg rb, BitSet16 regs, bool store, bool prei { for (int reg : regs) { - if (usermode && reg >= 8 && reg < 15) + /*if (usermode && reg >= 8 && reg < 15) { MOV(32, R(RSCRATCH), R(RCPSR)); AND(32, R(RSCRATCH), Imm8(0x1F)); // (RSCRATCH2 - 0x11) * 8 + squeezePointer(userModeOffsets) + (reg - 8), algebra is great! - MOVZX(32, 8, RSCRATCH, MScaled(RSCRATCH, SCALE_8, squeezePointer(userModeOffsets) - 0x11 * 8 + (reg - 8))); + MOVZX(32, 8, RSCRATCH, MScaled(RSCRATCH, SCALE_8, squeezePointer(userModeOffsets) - 0x10 * 8 + (reg - 8))); MOV(32, R(RSCRATCH), MRegSum(RCPU, RSCRATCH)); PUSH(RSCRATCH); } - else if (RegCache.Mapping[reg] == INVALID_REG) + else */if (RegCache.Mapping[reg] == INVALID_REG) { LoadReg(reg, RSCRATCH); PUSH(RSCRATCH); diff --git a/src/ARM_InstrInfo.cpp b/src/ARM_InstrInfo.cpp index c519229..b8dff00 100644 --- a/src/ARM_InstrInfo.cpp +++ b/src/ARM_InstrInfo.cpp @@ -255,7 +255,7 @@ const u32 T_STMIA = T_Read8 | T_Write8 | tk(tk_STMIA); const u32 T_BCOND = T_BranchAlways | tk(tk_BCOND); const u32 T_BX = T_BranchAlways | T_ReadHi3 | tk(tk_BX); -const u32 T_BLX_REG = T_BranchAlways | T_ReadR15 | T_WriteR14 | T_ReadHi3 | tk(tk_BLX_REG); +const u32 T_BLX_REG = T_BranchAlways | T_WriteR14 | T_ReadHi3 | tk(tk_BLX_REG); const u32 T_B = T_BranchAlways | tk(tk_B); const u32 T_BL_LONG_1 = T_WriteR14 | T_ReadR15 | tk(tk_BL_LONG_1); const u32 T_BL_LONG_2 = T_BranchAlways | T_ReadR14 | T_WriteR14 | T_ReadR15 | tk(tk_BL_LONG_2); @@ -301,6 +301,10 @@ Info Decode(bool thumb, u32 num, u32 instr) res.DstRegs |= (1 << 13); if (data & T_ReadR15) res.SrcRegs |= (1 << 15); + if (data & T_WriteR14) + res.DstRegs |= (1 << 14); + if (data & T_ReadR14) + res.SrcRegs |= (1 << 14); if (data & T_BranchAlways) res.DstRegs |= (1 << 15); diff --git a/src/ARM_InstrInfo.h b/src/ARM_InstrInfo.h index dcd938b..51dcfa2 100644 --- a/src/ARM_InstrInfo.h +++ b/src/ARM_InstrInfo.h @@ -202,6 +202,7 @@ enum tk_POP, tk_LDMIA, tk_STMIA, + tk_BCOND, tk_BX, tk_BLX_REG, diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 662ed5c..9401220 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -35,6 +35,7 @@ add_library(core STATIC ARMJIT_x64/ARMJIT_Compiler.cpp ARMJIT_x64/ARMJIT_ALU.cpp ARMJIT_x64/ARMJIT_LoadStore.cpp + ARMJIT_x64/ARMJIT_Branch.cpp dolphin/CommonFuncs.cpp dolphin/x64ABI.cpp -- cgit v1.2.3 From 8ddc4d5904bafa72a6822bb2f487c9d7f100eb16 Mon Sep 17 00:00:00 2001 From: RSDuck Date: Mon, 15 Jul 2019 19:17:10 +0200 Subject: jit: fix BLX_reg with rn=lr --- src/ARMJIT_x64/ARMJIT_Branch.cpp | 3 ++- src/ARM_InstrInfo.cpp | 3 --- 2 files changed, 2 insertions(+), 4 deletions(-) (limited to 'src/ARM_InstrInfo.cpp') diff --git a/src/ARMJIT_x64/ARMJIT_Branch.cpp b/src/ARMJIT_x64/ARMJIT_Branch.cpp index 05c8ec6..1f95a90 100644 --- a/src/ARMJIT_x64/ARMJIT_Branch.cpp +++ b/src/ARMJIT_x64/ARMJIT_Branch.cpp @@ -187,9 +187,10 @@ void Compiler::A_Comp_BranchImm() void Compiler::A_Comp_BranchXchangeReg() { OpArg rn = MapReg(CurInstr.A_Reg(0)); + MOV(32, R(RSCRATCH), rn); if ((CurInstr.Instr & 0xF0) == 0x30) // BLX_reg MOV(32, MapReg(14), Imm32(R15 - 4)); - Comp_JumpTo(rn.GetSimpleReg()); + Comp_JumpTo(RSCRATCH); } void Compiler::T_Comp_BCOND() diff --git a/src/ARM_InstrInfo.cpp b/src/ARM_InstrInfo.cpp index b8dff00..c36d6c1 100644 --- a/src/ARM_InstrInfo.cpp +++ b/src/ARM_InstrInfo.cpp @@ -359,10 +359,7 @@ Info Decode(bool thumb, u32 num, u32 instr) } if (data & A_Link) - { res.DstRegs |= 1 << 14; - res.SrcRegs |= 1 << 15; - } if (res.Kind == ak_LDM) res.DstRegs |= instr & (1 << 15); // this is right -- cgit v1.2.3 From be8846e31a80bef098cfa03cef5748d3d8011715 Mon Sep 17 00:00:00 2001 From: RSDuck Date: Wed, 17 Jul 2019 03:18:37 +0200 Subject: jit: fix misc static branch things --- src/ARMJIT_x64/ARMJIT_Branch.cpp | 27 +++++++++++++++++++++++---- src/ARMJIT_x64/ARMJIT_Compiler.cpp | 15 ++++++++++----- src/ARM_InstrInfo.cpp | 11 ++++------- 3 files changed, 37 insertions(+), 16 deletions(-) (limited to 'src/ARM_InstrInfo.cpp') diff --git a/src/ARMJIT_x64/ARMJIT_Branch.cpp b/src/ARMJIT_x64/ARMJIT_Branch.cpp index 1f95a90..6ae4aad 100644 --- a/src/ARMJIT_x64/ARMJIT_Branch.cpp +++ b/src/ARMJIT_x64/ARMJIT_Branch.cpp @@ -35,6 +35,7 @@ void Compiler::Comp_JumpTo(u32 addr, bool forceNonConstantCycles) u32 newregion = addr >> 24; u32 regionCodeCycles = cpu9->MemTimings[addr >> 12][0]; + u32 compileTimeCodeCycles = cpu9->RegionCodeCycles; cpu9->RegionCodeCycles = regionCodeCycles; MOV(32, MDisp(RCPU, offsetof(ARMv5, RegionCodeCycles)), Imm32(regionCodeCycles)); @@ -53,7 +54,7 @@ void Compiler::Comp_JumpTo(u32 addr, bool forceNonConstantCycles) if (addr & 0x2) { nextInstr[0] = cpu9->CodeRead32(addr-2, true) >> 16; - cycles += CurCPU->CodeCycles; + cycles += cpu9->CodeCycles; nextInstr[1] = cpu9->CodeRead32(addr+2, false); cycles += CurCPU->CodeCycles; } @@ -61,7 +62,7 @@ void Compiler::Comp_JumpTo(u32 addr, bool forceNonConstantCycles) { nextInstr[0] = cpu9->CodeRead32(addr, true); nextInstr[1] = nextInstr[0] >> 16; - cycles += CurCPU->CodeCycles; + cycles += cpu9->CodeCycles; } } else @@ -74,6 +75,10 @@ void Compiler::Comp_JumpTo(u32 addr, bool forceNonConstantCycles) nextInstr[1] = cpu9->CodeRead32(addr+4, false); cycles += cpu9->CodeCycles; } + + cpu9->RegionCodeCycles = compileTimeCodeCycles; + if (setupRegion) + cpu9->SetupCodeMem(R15); } else { @@ -86,26 +91,40 @@ void Compiler::Comp_JumpTo(u32 addr, bool forceNonConstantCycles) cpu7->CodeCycles = codeCycles; MOV(32, MDisp(RCPU, offsetof(ARM, CodeRegion)), Imm32(codeRegion)); - MOV(32, MDisp(RCPU, offsetof(ARM, CodeRegion)), Imm32(codeCycles)); + MOV(32, MDisp(RCPU, offsetof(ARM, CodeCycles)), Imm32(codeCycles)); if (addr & 0x1) { addr &= ~0x1; newPC = addr+2; + // this is necessary because ARM7 bios protection + u32 compileTimePC = CurCPU->R[15]; + CurCPU->R[15] = newPC; + nextInstr[0] = ((ARMv4*)CurCPU)->CodeRead16(addr); nextInstr[1] = ((ARMv4*)CurCPU)->CodeRead16(addr+2); cycles += NDS::ARM7MemTimings[codeCycles][0] + NDS::ARM7MemTimings[codeCycles][1]; + + CurCPU->R[15] = compileTimePC; } else { addr &= ~0x3; newPC = addr+4; + u32 compileTimePC = CurCPU->R[15]; + CurCPU->R[15] = newPC; + nextInstr[0] = cpu7->CodeRead32(addr); nextInstr[1] = cpu7->CodeRead32(addr+4); cycles += NDS::ARM7MemTimings[codeCycles][2] + NDS::ARM7MemTimings[codeCycles][3]; + + CurCPU->R[15] = compileTimePC; } + + cpu7->CodeRegion = R15 >> 24; + cpu7->CodeCycles = addr >> 15; } MOV(32, MDisp(RCPU, offsetof(ARM, R[15])), Imm32(newPC)); @@ -204,7 +223,7 @@ void Compiler::T_Comp_BCOND() FixupBranch skipFailed = J(); SetJumpTarget(skipExecute); Comp_AddCycles_C(true); - SetJumpTarget(skipFailed); + SetJumpTarget(skipFailed); } void Compiler::T_Comp_B() diff --git a/src/ARMJIT_x64/ARMJIT_Compiler.cpp b/src/ARMJIT_x64/ARMJIT_Compiler.cpp index 18cb27e..1e871fd 100644 --- a/src/ARMJIT_x64/ARMJIT_Compiler.cpp +++ b/src/ARMJIT_x64/ARMJIT_Compiler.cpp @@ -354,8 +354,6 @@ CompiledBlock Compiler::CompileBlock(ARM* cpu, FetchedInstr instrs[], int instrs if (IsAlmostFull()) InvalidateBlockCache(); - CompiledBlock res = (CompiledBlock)GetWritableCodePtr(); - ConstantCycles = 0; Thumb = cpu->CPSR & 0x20; Num = cpu->Num; @@ -363,6 +361,13 @@ CompiledBlock Compiler::CompileBlock(ARM* cpu, FetchedInstr instrs[], int instrs CodeRegion = cpu->CodeRegion; CurCPU = cpu; + CompiledBlock res = (CompiledBlock)GetWritableCodePtr(); + + if (!IsMapped(Num, R15 - Thumb ? 2 : 4)) + { + printf("Trying to compile a block in unmapped memory\n"); + } + bool mergedThumbBL = false; ABI_PushRegistersAndAdjustStack(BitSet32(ABI_ALL_CALLEE_SAVED & ABI_ALL_GPRS & ~BitSet32({RSP})), 8); @@ -383,7 +388,8 @@ CompiledBlock Compiler::CompileBlock(ARM* cpu, FetchedInstr instrs[], int instrs ? T_Comp[CurInstr.Info.Kind] : A_Comp[CurInstr.Info.Kind]; - if (comp == NULL || i == instrsCount - 1) + bool isConditional = Thumb ? CurInstr.Info.Kind == ARMInstrInfo::tk_BCOND : CurInstr.Cond() < 0xE; + if (comp == NULL || (i == instrsCount - 1 && (!CurInstr.Info.Branches() || isConditional))) { MOV(32, MDisp(RCPU, offsetof(ARM, R[15])), Imm32(R15)); MOV(32, MDisp(RCPU, offsetof(ARM, CodeCycles)), Imm32(CurInstr.CodeCycles)); @@ -454,10 +460,9 @@ CompiledBlock Compiler::CompileBlock(ARM* cpu, FetchedInstr instrs[], int instrs else (this->*comp)(); - FixupBranch skipFailed; if (CurInstr.Cond() < 0xE) { - skipFailed = J(); + FixupBranch skipFailed = J(); SetJumpTarget(skipExecute); Comp_AddCycles_C(); diff --git a/src/ARM_InstrInfo.cpp b/src/ARM_InstrInfo.cpp index c36d6c1..5db2471 100644 --- a/src/ARM_InstrInfo.cpp +++ b/src/ARM_InstrInfo.cpp @@ -178,7 +178,6 @@ enum { T_ReadR13 = 1 << 9, T_WriteR13 = 1 << 10, - T_ReadR15 = 1 << 11, T_BranchAlways = 1 << 12, T_ReadR14 = 1 << 13, @@ -222,7 +221,7 @@ const u32 T_ADD_HIREG = T_WriteHi0 | T_ReadHi0 | T_ReadHi3 | tk(tk_ADD_HIREG); const u32 T_CMP_HIREG = T_ReadHi0 | T_ReadHi3 | tk(tk_CMP_HIREG); const u32 T_MOV_HIREG = T_WriteHi0 | T_ReadHi3 | tk(tk_MOV_HIREG); -const u32 T_ADD_PCREL = T_Write8 | T_ReadR15 | tk(tk_ADD_PCREL); +const u32 T_ADD_PCREL = T_Write8 | tk(tk_ADD_PCREL); const u32 T_ADD_SPREL = T_Write8 | T_ReadR13 | tk(tk_ADD_SPREL); const u32 T_ADD_SP = T_WriteR13 | tk(tk_ADD_SP); @@ -257,11 +256,11 @@ const u32 T_BCOND = T_BranchAlways | tk(tk_BCOND); const u32 T_BX = T_BranchAlways | T_ReadHi3 | tk(tk_BX); const u32 T_BLX_REG = T_BranchAlways | T_WriteR14 | T_ReadHi3 | tk(tk_BLX_REG); const u32 T_B = T_BranchAlways | tk(tk_B); -const u32 T_BL_LONG_1 = T_WriteR14 | T_ReadR15 | tk(tk_BL_LONG_1); -const u32 T_BL_LONG_2 = T_BranchAlways | T_ReadR14 | T_WriteR14 | T_ReadR15 | tk(tk_BL_LONG_2); +const u32 T_BL_LONG_1 = T_WriteR14 | tk(tk_BL_LONG_1); +const u32 T_BL_LONG_2 = T_BranchAlways | T_ReadR14 | T_WriteR14 | tk(tk_BL_LONG_2); const u32 T_UNK = T_BranchAlways | T_WriteR14 | tk(tk_UNK); -const u32 T_SVC = T_BranchAlways | T_WriteR14 | T_ReadR15 | tk(tk_SVC); +const u32 T_SVC = T_BranchAlways | T_WriteR14 | tk(tk_SVC); #define INSTRFUNC_PROTO(x) u32 x #include "ARM_InstrTable.h" @@ -299,8 +298,6 @@ Info Decode(bool thumb, u32 num, u32 instr) res.SrcRegs |= (1 << 13); if (data & T_WriteR13) res.DstRegs |= (1 << 13); - if (data & T_ReadR15) - res.SrcRegs |= (1 << 15); if (data & T_WriteR14) res.DstRegs |= (1 << 14); if (data & T_ReadR14) -- cgit v1.2.3 From 4a0f6b3b4bd60815d0c8259e4ec2a944bfb716be Mon Sep 17 00:00:00 2001 From: RSDuck Date: Sun, 21 Jul 2019 17:28:16 +0200 Subject: jit: fix thumb hi reg alu and mcr halt + mcr/mrc aren't always, msr_imm is never unk on ARM7 --- src/ARMJIT.cpp | 2 +- src/ARMJIT_x64/ARMJIT_ALU.cpp | 4 +--- src/ARMJIT_x64/ARMJIT_Branch.cpp | 21 ++++++++++++++------- src/ARM_InstrInfo.cpp | 33 ++++++++++++++++++++++++++++----- src/ARM_InstrInfo.h | 1 + 5 files changed, 45 insertions(+), 16 deletions(-) (limited to 'src/ARM_InstrInfo.cpp') diff --git a/src/ARMJIT.cpp b/src/ARMJIT.cpp index aad14c0..6948eee 100644 --- a/src/ARMJIT.cpp +++ b/src/ARMJIT.cpp @@ -174,7 +174,7 @@ CompiledBlock CompileBlock(ARM* cpu) instrs[i].Info = ARMInstrInfo::Decode(thumb, cpu->Num, instrs[i].Instr); i++; - } while(!instrs[i - 1].Info.Branches() && i < Config::JIT_MaxBlockSize); + } while(!instrs[i - 1].Info.EndBlock && i < Config::JIT_MaxBlockSize); CompiledBlock block = compiler->CompileBlock(cpu, instrs, i); diff --git a/src/ARMJIT_x64/ARMJIT_ALU.cpp b/src/ARMJIT_x64/ARMJIT_ALU.cpp index 013f54c..bdf06f7 100644 --- a/src/ARMJIT_x64/ARMJIT_ALU.cpp +++ b/src/ARMJIT_x64/ARMJIT_ALU.cpp @@ -663,7 +663,7 @@ void Compiler::T_Comp_ALU_HiReg() switch (op) { case 0x0: // ADD - Comp_ArithTriOp(&Compiler::ADD, rdMapped, rdMapped, rs, false, opSymmetric|opRetriveCV); + Comp_ArithTriOp(&Compiler::ADD, rdMapped, rdMapped, rs, false, opSymmetric); break; case 0x1: // CMP Comp_CmpOp(2, rdMapped, rs, false); @@ -671,8 +671,6 @@ void Compiler::T_Comp_ALU_HiReg() case 0x2: // MOV if (rdMapped != rs) MOV(32, rdMapped, rs); - TEST(32, rdMapped, rdMapped); - Comp_RetriveFlags(false, false, false); break; } diff --git a/src/ARMJIT_x64/ARMJIT_Branch.cpp b/src/ARMJIT_x64/ARMJIT_Branch.cpp index 6ae4aad..9d4c1e2 100644 --- a/src/ARMJIT_x64/ARMJIT_Branch.cpp +++ b/src/ARMJIT_x64/ARMJIT_Branch.cpp @@ -235,16 +235,23 @@ void Compiler::T_Comp_B() void Compiler::T_Comp_BranchXchangeReg() { bool link = CurInstr.Instr & (1 << 7); - if (link && Num == 1) - { - printf("BLX unsupported on ARM7!!!\n"); - return; - } - OpArg rn = MapReg(CurInstr.A_Reg(3)); if (link) + { + if (Num == 1) + { + printf("BLX unsupported on ARM7!!!\n"); + return; + } + MOV(32, R(RSCRATCH), MapReg(CurInstr.A_Reg(3))); MOV(32, MapReg(14), Imm32(R15 - 1)); - Comp_JumpTo(rn.GetSimpleReg()); + Comp_JumpTo(RSCRATCH); + } + else + { + OpArg rn = MapReg(CurInstr.A_Reg(3)); + Comp_JumpTo(rn.GetSimpleReg()); + } } void Compiler::T_Comp_BL_LONG_1() diff --git a/src/ARM_InstrInfo.cpp b/src/ARM_InstrInfo.cpp index 5db2471..b70c8dc 100644 --- a/src/ARM_InstrInfo.cpp +++ b/src/ARM_InstrInfo.cpp @@ -152,11 +152,11 @@ const u32 A_BX = A_BranchAlways | A_Read0 | ak(ak_BX); const u32 A_BLX_REG = A_BranchAlways | A_Link | A_Read0 | ak(ak_BLX_REG); const u32 A_UNK = A_BranchAlways | A_Link | ak(ak_UNK); -const u32 A_MSR_IMM = A_UnkOnARM7 | ak(ak_MSR_IMM); -const u32 A_MSR_REG = A_Read0 | A_UnkOnARM7 | ak(ak_MSR_REG); -const u32 A_MRS = A_Write12 | A_UnkOnARM7 | ak(ak_MRS); -const u32 A_MCR = A_Read12 | A_UnkOnARM7 | ak(ak_MCR); -const u32 A_MRC = A_Write12 | A_UnkOnARM7 | ak(ak_MRC); +const u32 A_MSR_IMM = ak(ak_MSR_IMM); +const u32 A_MSR_REG = A_Read0 | ak(ak_MSR_REG); +const u32 A_MRS = A_Write12 | ak(ak_MRS); +const u32 A_MCR = A_Read12 | ak(ak_MCR); +const u32 A_MRC = A_Write12 | ak(ak_MRC); const u32 A_SVC = A_BranchAlways | A_Link | ak(ak_SVC); // THUMB @@ -310,6 +310,7 @@ Info Decode(bool thumb, u32 num, u32 instr) res.DstRegs |= 1 << 15; res.Kind = (data >> 16) & 0x3F; + res.EndBlock = res.Branches(); return res; } @@ -324,6 +325,26 @@ Info Decode(bool thumb, u32 num, u32 instr) res.Kind = (data >> 13) & 0x1FF; + if (res.Kind == ak_MCR) + { + u32 cn = (instr >> 16) & 0xF; + u32 cm = instr & 0xF; + u32 cpinfo = (instr >> 5) & 0x7; + u32 id = (cn<<8)|(cm<<4)|cpinfo; + if (id == 0x704 || id == 0x782) + res.EndBlock |= true; + } + if (res.Kind == ak_MCR || res.Kind == ak_MRC) + { + u32 cp = ((instr >> 8) & 0xF); + if ((num == 0 && cp != 15) || (num == 1 && cp != 14)) + { + printf("happens\n"); + data = A_UNK; + res.Kind = ak_UNK; + } + } + if (data & A_Read0) res.SrcRegs |= 1 << (instr & 0xF); if (data & A_Read16) @@ -361,6 +382,8 @@ Info Decode(bool thumb, u32 num, u32 instr) if (res.Kind == ak_LDM) res.DstRegs |= instr & (1 << 15); // this is right + res.EndBlock |= res.Branches(); + return res; } } diff --git a/src/ARM_InstrInfo.h b/src/ARM_InstrInfo.h index 51dcfa2..4fe9b10 100644 --- a/src/ARM_InstrInfo.h +++ b/src/ARM_InstrInfo.h @@ -220,6 +220,7 @@ struct Info u16 DstRegs, SrcRegs; u16 Kind; + bool EndBlock; bool Branches() { return DstRegs & (1 << 15); -- cgit v1.2.3 From ec21172cd9932805f02d84f41599c7a23e3b23f5 Mon Sep 17 00:00:00 2001 From: RSDuck Date: Sat, 17 Aug 2019 14:58:37 +0200 Subject: fix register alloc for half word loads fixes Mega Man Star Force 2 with cheat applied it probably used a pc relative load which were interpreted as branches --- src/ARM_InstrInfo.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'src/ARM_InstrInfo.cpp') diff --git a/src/ARM_InstrInfo.cpp b/src/ARM_InstrInfo.cpp index b70c8dc..4813799 100644 --- a/src/ARM_InstrInfo.cpp +++ b/src/ARM_InstrInfo.cpp @@ -127,8 +127,8 @@ A_IMPLEMENT_WB_LDRSTR(LDRB,LDR) #define A_STRD A_Read12Double #define A_IMPLEMENT_HD_LDRSTR(x,k) \ - const u32 A_##x##_IMM = A_##k | A_Read16 | A_Write16 | ak(ak_##x##_IMM); \ - const u32 A_##x##_REG = A_##k | A_Read16 | A_Write16 | A_Read0 | ak(ak_##x##_REG); \ + const u32 A_##x##_IMM = A_##k | A_Read16 | A_MemWriteback | ak(ak_##x##_IMM); \ + const u32 A_##x##_REG = A_##k | A_Read16 | A_MemWriteback | A_Read0 | ak(ak_##x##_REG); \ const u32 A_##x##_POST_IMM = A_##k | A_Read16 | A_Write16 | ak(ak_##x##_POST_IMM); \ const u32 A_##x##_POST_REG = A_##k | A_Read16 | A_Write16 | A_Read0 | ak(ak_##x##_POST_REG); -- cgit v1.2.3 From 5ea91b8a039e0735ac5cb102e2375c26c4f7a150 Mon Sep 17 00:00:00 2001 From: RSDuck Date: Sun, 25 Aug 2019 12:28:48 +0200 Subject: optimise away unneeded flag sets - especially useful for thumb code and larger max block sizes - can still be improved upon --- src/ARMJIT.cpp | 24 ++++ src/ARMJIT.h | 1 + src/ARMJIT_x64/ARMJIT_ALU.cpp | 64 +++++++--- src/ARMJIT_x64/ARMJIT_Compiler.cpp | 9 ++ src/ARMJIT_x64/ARMJIT_Compiler.h | 6 +- src/ARM_InstrInfo.cpp | 238 +++++++++++++++++++++++-------------- src/ARM_InstrInfo.h | 13 ++ src/libui_sdl/main.cpp | 2 + 8 files changed, 248 insertions(+), 109 deletions(-) (limited to 'src/ARM_InstrInfo.cpp') diff --git a/src/ARMJIT.cpp b/src/ARMJIT.cpp index 949bc1c..3b6bc2e 100644 --- a/src/ARMJIT.cpp +++ b/src/ARMJIT.cpp @@ -126,6 +126,24 @@ void DeInit() delete compiler; } +void floodFillSetFlags(FetchedInstr instrs[], int start, u8 flags) +{ + for (int j = start; j >= 0; j--) + { + u8 match = instrs[j].Info.WriteFlags & flags; + u8 matchMaybe = (instrs[j].Info.WriteFlags >> 4) & flags; + if (matchMaybe) // writes flags maybe + instrs[j].SetFlags |= matchMaybe; + if (match) + { + instrs[j].SetFlags |= match; + flags &= ~match; + if (!flags) + return; + } + } +} + CompiledBlock CompileBlock(ARM* cpu) { bool thumb = cpu->CPSR & 0x20; @@ -175,8 +193,14 @@ CompiledBlock CompileBlock(ARM* cpu) instrs[i].Info = ARMInstrInfo::Decode(thumb, cpu->Num, instrs[i].Instr); i++; + + bool canCompile = compiler->CanCompile(thumb, instrs[i - 1].Info.Kind); + if (instrs[i - 1].Info.ReadFlags != 0 || !canCompile) + floodFillSetFlags(instrs, i - 2, canCompile ? instrs[i - 1].Info.ReadFlags : 0xF); } while(!instrs[i - 1].Info.EndBlock && i < Config::JIT_MaxBlockSize); + floodFillSetFlags(instrs, i - 1, 0xF); + CompiledBlock block = compiler->CompileBlock(cpu, instrs, i); if (cpu->Num == 0) diff --git a/src/ARMJIT.h b/src/ARMJIT.h index 0fc1c38..6197695 100644 --- a/src/ARMJIT.h +++ b/src/ARMJIT.h @@ -28,6 +28,7 @@ struct FetchedInstr return Instr >> 28; } + u8 SetFlags; u32 Instr; u32 NextInstr[2]; diff --git a/src/ARMJIT_x64/ARMJIT_ALU.cpp b/src/ARMJIT_x64/ARMJIT_ALU.cpp index f0bcf8e..6a7d711 100644 --- a/src/ARMJIT_x64/ARMJIT_ALU.cpp +++ b/src/ARMJIT_x64/ARMJIT_ALU.cpp @@ -111,6 +111,8 @@ OpArg Compiler::A_Comp_GetALUOp2(bool S, bool& carryUsed) } else { + S = S && (CurInstr.SetFlags & 0x2); + int op = (CurInstr.Instr >> 5) & 0x3; if (CurInstr.Instr & (1 << 4)) { @@ -215,7 +217,8 @@ void Compiler::A_Comp_MovOp() if (S) { - TEST(32, rd, rd); + if (FlagsNZRequired()) + TEST(32, rd, rd); Comp_RetriveFlags(false, false, carryUsed); } @@ -263,12 +266,14 @@ void Compiler::Comp_MulOp(bool S, bool add, Gen::OpArg rd, Gen::OpArg rm, Gen::O { IMUL(32, RSCRATCH, rs); LEA(32, rd.GetSimpleReg(), MRegSum(RSCRATCH, rn.GetSimpleReg())); - TEST(32, rd, rd); + if (S && FlagsNZRequired()) + TEST(32, rd, rd); } else { IMUL(32, RSCRATCH, rs); MOV(32, rd, R(RSCRATCH)); + if (S && FlagsNZRequired()) TEST(32, R(RSCRATCH), R(RSCRATCH)); } @@ -331,7 +336,7 @@ void Compiler::A_Comp_SMULL_SMLAL() else { IMUL(64, RSCRATCH2, R(RSCRATCH3)); - if (S) + if (S && FlagsNZRequired()) TEST(64, R(RSCRATCH2), R(RSCRATCH2)); } @@ -345,9 +350,20 @@ void Compiler::A_Comp_SMULL_SMLAL() void Compiler::Comp_RetriveFlags(bool sign, bool retriveCV, bool carryUsed) { - CPSRDirty = true; + if (CurInstr.SetFlags == 0) + return; + if (retriveCV && !(CurInstr.SetFlags & 0x3)) + retriveCV = false; bool carryOnly = !retriveCV && carryUsed; + if (carryOnly && !(CurInstr.SetFlags & 0x2)) + { + carryUsed = false; + carryOnly = false; + } + + CPSRDirty = true; + if (retriveCV) { SETcc(CC_O, R(RSCRATCH)); @@ -355,19 +371,28 @@ void Compiler::Comp_RetriveFlags(bool sign, bool retriveCV, bool carryUsed) LEA(32, RSCRATCH2, MComplex(RSCRATCH, RSCRATCH3, SCALE_2, 0)); } - SETcc(CC_S, R(RSCRATCH)); - SETcc(CC_Z, R(RSCRATCH3)); - LEA(32, RSCRATCH, MComplex(RSCRATCH3, RSCRATCH, SCALE_2, 0)); - int shiftAmount = 30; - if (retriveCV || carryUsed) + if (FlagsNZRequired()) { - LEA(32, RSCRATCH, MComplex(RSCRATCH2, RSCRATCH, carryOnly ? SCALE_2 : SCALE_4, 0)); - shiftAmount = carryOnly ? 29 : 28; - } - SHL(32, R(RSCRATCH), Imm8(shiftAmount)); + SETcc(CC_S, R(RSCRATCH)); + SETcc(CC_Z, R(RSCRATCH3)); + LEA(32, RSCRATCH, MComplex(RSCRATCH3, RSCRATCH, SCALE_2, 0)); + int shiftAmount = 30; + if (retriveCV || carryUsed) + { + LEA(32, RSCRATCH, MComplex(RSCRATCH2, RSCRATCH, carryOnly ? SCALE_2 : SCALE_4, 0)); + shiftAmount = carryOnly ? 29 : 28; + } + SHL(32, R(RSCRATCH), Imm8(shiftAmount)); - AND(32, R(RCPSR), Imm32(0x3FFFFFFF & ~(carryUsed << 29) & ~((retriveCV ? 3 : 0) << 28))); - OR(32, R(RCPSR), R(RSCRATCH)); + AND(32, R(RCPSR), Imm32(0x3FFFFFFF & ~(carryUsed << 29) & ~((retriveCV ? 3 : 0) << 28))); + OR(32, R(RCPSR), R(RSCRATCH)); + } + else + { + SHL(32, R(RSCRATCH2), Imm8(carryOnly ? 29 : 28)); + AND(32, R(RCPSR), Imm32(0xFFFFFFFF & ~(carryUsed << 29) & ~((retriveCV ? 3 : 0) << 28))); + OR(32, R(RCPSR), R(RSCRATCH2)); + } } // always uses RSCRATCH, RSCRATCH2 only if S == true @@ -523,7 +548,8 @@ void Compiler::T_Comp_ShiftImm() if (shifted != rd) MOV(32, rd, shifted); - TEST(32, rd, rd); + if (FlagsNZRequired()) + TEST(32, rd, rd); Comp_RetriveFlags(false, false, carryUsed); } @@ -557,7 +583,8 @@ void Compiler::T_Comp_ALU_Imm8() { case 0x0: MOV(32, rd, imm); - TEST(32, rd, rd); + if (FlagsNZRequired()) + TEST(32, rd, rd); Comp_RetriveFlags(false, false, false); return; case 0x1: @@ -607,7 +634,8 @@ void Compiler::T_Comp_ALU() int shiftOp = op == 0x7 ? 3 : op - 0x2; bool carryUsed; OpArg shifted = Comp_RegShiftReg(shiftOp, rs, rd, true, carryUsed); - TEST(32, shifted, shifted); + if (FlagsNZRequired()) + TEST(32, shifted, shifted); MOV(32, rd, shifted); Comp_RetriveFlags(false, false, true); } diff --git a/src/ARMJIT_x64/ARMJIT_Compiler.cpp b/src/ARMJIT_x64/ARMJIT_Compiler.cpp index ab13cb6..6abb2bb 100644 --- a/src/ARMJIT_x64/ARMJIT_Compiler.cpp +++ b/src/ARMJIT_x64/ARMJIT_Compiler.cpp @@ -342,6 +342,11 @@ const Compiler::CompileFunc T_Comp[ARMInstrInfo::tk_Count] = { }; #undef F +bool Compiler::CanCompile(bool thumb, u16 kind) +{ + return (thumb ? T_Comp[kind] : A_Comp[kind]) != NULL; +} + void Compiler::Reset() { memset(ResetStart, 0xcc, CodeMemSize); @@ -380,11 +385,15 @@ CompiledBlock Compiler::CompileBlock(ARM* cpu, FetchedInstr instrs[], int instrs // TODO: this is ugly as a whole, do better RegCache = RegisterCache(this, instrs, instrsCount); + printf("block start %d\n", Thumb); + for (int i = 0; i < instrsCount; i++) { R15 += Thumb ? 2 : 4; CurInstr = instrs[i]; + printf("%x %d %d %d\n", CurInstr.Instr, CurInstr.SetFlags, CurInstr.Info.WriteFlags, CurInstr.Info.ReadFlags); + CompileFunc comp = Thumb ? T_Comp[CurInstr.Info.Kind] : A_Comp[CurInstr.Info.Kind]; diff --git a/src/ARMJIT_x64/ARMJIT_Compiler.h b/src/ARMJIT_x64/ARMJIT_Compiler.h index 3151cbc..8861884 100644 --- a/src/ARMJIT_x64/ARMJIT_Compiler.h +++ b/src/ARMJIT_x64/ARMJIT_Compiler.h @@ -29,6 +29,8 @@ public: void LoadReg(int reg, Gen::X64Reg nativeReg); void SaveReg(int reg, Gen::X64Reg nativeReg); + bool CanCompile(bool thumb, u16 kind); + typedef void (Compiler::*CompileFunc)(); void Comp_JumpTo(Gen::X64Reg addr, bool restoreCPSR = false); @@ -64,7 +66,6 @@ public: void A_Comp_BranchImm(); void A_Comp_BranchXchangeReg(); - void T_Comp_ShiftImm(); void T_Comp_AddSub_(); void T_Comp_ALU_Imm8(); @@ -121,6 +122,9 @@ public: void LoadCPSR(); void SaveCPSR(); + bool FlagsNZRequired() + { return CurInstr.SetFlags & 0xC; } + Gen::FixupBranch CheckCondition(u32 cond); Gen::OpArg MapReg(int reg) diff --git a/src/ARM_InstrInfo.cpp b/src/ARM_InstrInfo.cpp index 4813799..ea6d827 100644 --- a/src/ARM_InstrInfo.cpp +++ b/src/ARM_InstrInfo.cpp @@ -5,7 +5,7 @@ namespace ARMInstrInfo { -#define ak(x) ((x) << 13) +#define ak(x) ((x) << 18) enum { A_Read0 = 1 << 0, @@ -26,69 +26,81 @@ enum { A_Link = 1 << 10, A_UnkOnARM7 = 1 << 11, + + A_SetNZ = 1 << 12, + A_SetCV = 1 << 13, + A_SetMaybeC = 1 << 14, + A_MulFlags = 1 << 15, + A_ReadC = 1 << 16, + A_RRXReadC = 1 << 17, }; #define A_BIOP A_Read16 #define A_MONOOP 0 -#define A_IMPLEMENT_ALU_OP(x,k) \ - const u32 A_##x##_IMM = A_Write12 | A_##k | ak(ak_##x##_IMM); \ - const u32 A_##x##_REG_LSL_IMM = A_Write12 | A_##k | A_Read0 | ak(ak_##x##_REG_LSL_IMM); \ - const u32 A_##x##_REG_LSR_IMM = A_Write12 | A_##k | A_Read0 | ak(ak_##x##_REG_LSR_IMM); \ - const u32 A_##x##_REG_ASR_IMM = A_Write12 | A_##k | A_Read0 | ak(ak_##x##_REG_ASR_IMM); \ - const u32 A_##x##_REG_ROR_IMM = A_Write12 | A_##k | A_Read0 | ak(ak_##x##_REG_ROR_IMM); \ - const u32 A_##x##_REG_LSL_REG = A_Write12 | A_##k | A_Read0 | A_Read8 | ak(ak_##x##_REG_LSL_REG); \ - const u32 A_##x##_REG_LSR_REG = A_Write12 | A_##k | A_Read0 | A_Read8 | ak(ak_##x##_REG_LSR_REG); \ - const u32 A_##x##_REG_ASR_REG = A_Write12 | A_##k | A_Read0 | A_Read8 | ak(ak_##x##_REG_ASR_REG); \ - const u32 A_##x##_REG_ROR_REG = A_Write12 | A_##k | A_Read0 | A_Read8 | ak(ak_##x##_REG_ROR_REG); \ +#define A_ARITH A_SetCV +#define A_LOGIC A_SetMaybeC +#define A_ARITH_IMM A_SetCV +#define A_LOGIC_IMM 0 + +#define A_IMPLEMENT_ALU_OP(x,k,a,c) \ + const u32 A_##x##_IMM = A_Write12 | c | A_##k | ak(ak_##x##_IMM); \ + const u32 A_##x##_REG_LSL_IMM = A_Write12 | c | A_##k | A_Read0 | ak(ak_##x##_REG_LSL_IMM); \ + const u32 A_##x##_REG_LSR_IMM = A_Write12 | c | A_##k | A_Read0 | ak(ak_##x##_REG_LSR_IMM); \ + const u32 A_##x##_REG_ASR_IMM = A_Write12 | c | A_##k | A_Read0 | ak(ak_##x##_REG_ASR_IMM); \ + const u32 A_##x##_REG_ROR_IMM = A_RRXReadC | A_Write12 | c | A_##k | A_Read0 | ak(ak_##x##_REG_ROR_IMM); \ + const u32 A_##x##_REG_LSL_REG = A_Write12 | c | A_##k | A_Read0 | A_Read8 | ak(ak_##x##_REG_LSL_REG); \ + const u32 A_##x##_REG_LSR_REG = A_Write12 | c | A_##k | A_Read0 | A_Read8 | ak(ak_##x##_REG_LSR_REG); \ + const u32 A_##x##_REG_ASR_REG = A_Write12 | c | A_##k | A_Read0 | A_Read8 | ak(ak_##x##_REG_ASR_REG); \ + const u32 A_##x##_REG_ROR_REG = A_Write12 | c | A_##k | A_Read0 | A_Read8 | ak(ak_##x##_REG_ROR_REG); \ \ - const u32 A_##x##_IMM_S = A_Write12 | A_##k | ak(ak_##x##_IMM_S); \ - const u32 A_##x##_REG_LSL_IMM_S = A_Write12 | A_##k | A_Read0 | ak(ak_##x##_REG_LSL_IMM_S); \ - const u32 A_##x##_REG_LSR_IMM_S = A_Write12 | A_##k | A_Read0 | ak(ak_##x##_REG_LSR_IMM_S); \ - const u32 A_##x##_REG_ASR_IMM_S = A_Write12 | A_##k | A_Read0 | ak(ak_##x##_REG_ASR_IMM_S); \ - const u32 A_##x##_REG_ROR_IMM_S = A_Write12 | A_##k | A_Read0 | ak(ak_##x##_REG_ROR_IMM_S); \ - const u32 A_##x##_REG_LSL_REG_S = A_Write12 | A_##k | A_Read0 | A_Read8 | ak(ak_##x##_REG_LSL_REG_S); \ - const u32 A_##x##_REG_LSR_REG_S = A_Write12 | A_##k | A_Read0 | A_Read8 | ak(ak_##x##_REG_LSR_REG_S); \ - const u32 A_##x##_REG_ASR_REG_S = A_Write12 | A_##k | A_Read0 | A_Read8 | ak(ak_##x##_REG_ASR_REG_S); \ - const u32 A_##x##_REG_ROR_REG_S = A_Write12 | A_##k | A_Read0 | A_Read8 | ak(ak_##x##_REG_ROR_REG_S); - -A_IMPLEMENT_ALU_OP(AND,BIOP) -A_IMPLEMENT_ALU_OP(EOR,BIOP) -A_IMPLEMENT_ALU_OP(SUB,BIOP) -A_IMPLEMENT_ALU_OP(RSB,BIOP) -A_IMPLEMENT_ALU_OP(ADD,BIOP) -A_IMPLEMENT_ALU_OP(ADC,BIOP) -A_IMPLEMENT_ALU_OP(SBC,BIOP) -A_IMPLEMENT_ALU_OP(RSC,BIOP) -A_IMPLEMENT_ALU_OP(ORR,BIOP) -A_IMPLEMENT_ALU_OP(MOV,MONOOP) -A_IMPLEMENT_ALU_OP(BIC,BIOP) -A_IMPLEMENT_ALU_OP(MVN,MONOOP) + const u32 A_##x##_IMM_S = A_SetNZ | c | A_##a##_IMM | A_Write12 | A_##k | ak(ak_##x##_IMM_S); \ + const u32 A_##x##_REG_LSL_IMM_S = A_SetNZ | c | A_##a | A_Write12 | A_##k | A_Read0 | ak(ak_##x##_REG_LSL_IMM_S); \ + const u32 A_##x##_REG_LSR_IMM_S = A_SetNZ | c | A_##a | A_Write12 | A_##k | A_Read0 | ak(ak_##x##_REG_LSR_IMM_S); \ + const u32 A_##x##_REG_ASR_IMM_S = A_SetNZ | c | A_##a | A_Write12 | A_##k | A_Read0 | ak(ak_##x##_REG_ASR_IMM_S); \ + const u32 A_##x##_REG_ROR_IMM_S = A_RRXReadC | A_SetNZ | c | A_##a | A_Write12 | A_##k | A_Read0 | ak(ak_##x##_REG_ROR_IMM_S); \ + const u32 A_##x##_REG_LSL_REG_S = A_SetNZ | c | A_##a | A_Write12 | A_##k | A_Read0 | A_Read8 | ak(ak_##x##_REG_LSL_REG_S); \ + const u32 A_##x##_REG_LSR_REG_S = A_SetNZ | c | A_##a | A_Write12 | A_##k | A_Read0 | A_Read8 | ak(ak_##x##_REG_LSR_REG_S); \ + const u32 A_##x##_REG_ASR_REG_S = A_SetNZ | c | A_##a | A_Write12 | A_##k | A_Read0 | A_Read8 | ak(ak_##x##_REG_ASR_REG_S); \ + const u32 A_##x##_REG_ROR_REG_S = A_SetNZ | c | A_##a | A_Write12 | A_##k | A_Read0 | A_Read8 | ak(ak_##x##_REG_ROR_REG_S); + +A_IMPLEMENT_ALU_OP(AND,BIOP,LOGIC,0) +A_IMPLEMENT_ALU_OP(EOR,BIOP,LOGIC,0) +A_IMPLEMENT_ALU_OP(SUB,BIOP,ARITH,0) +A_IMPLEMENT_ALU_OP(RSB,BIOP,ARITH,0) +A_IMPLEMENT_ALU_OP(ADD,BIOP,ARITH,0) +A_IMPLEMENT_ALU_OP(ADC,BIOP,ARITH,A_ReadC) +A_IMPLEMENT_ALU_OP(SBC,BIOP,ARITH,A_ReadC) +A_IMPLEMENT_ALU_OP(RSC,BIOP,ARITH,A_ReadC) +A_IMPLEMENT_ALU_OP(ORR,BIOP,LOGIC,0) +A_IMPLEMENT_ALU_OP(MOV,MONOOP,LOGIC,0) +A_IMPLEMENT_ALU_OP(BIC,BIOP,LOGIC,0) +A_IMPLEMENT_ALU_OP(MVN,MONOOP,LOGIC,0) const u32 A_MOV_REG_LSL_IMM_DBG = A_MOV_REG_LSL_IMM; -#define A_IMPLEMENT_ALU_TEST(x) \ - const u32 A_##x##_IMM = A_Read16 | A_Read0 | ak(ak_##x##_IMM); \ - const u32 A_##x##_REG_LSL_IMM = A_Read16 | A_Read0 | ak(ak_##x##_REG_LSL_IMM); \ - const u32 A_##x##_REG_LSR_IMM = A_Read16 | A_Read0 | ak(ak_##x##_REG_LSR_IMM); \ - const u32 A_##x##_REG_ASR_IMM = A_Read16 | A_Read0 | ak(ak_##x##_REG_ASR_IMM); \ - const u32 A_##x##_REG_ROR_IMM = A_Read16 | A_Read0 | ak(ak_##x##_REG_ROR_IMM); \ - const u32 A_##x##_REG_LSL_REG = A_Read16 | A_Read0 | A_Read8 | ak(ak_##x##_REG_LSL_REG); \ - const u32 A_##x##_REG_LSR_REG = A_Read16 | A_Read0 | A_Read8 | ak(ak_##x##_REG_LSR_REG); \ - const u32 A_##x##_REG_ASR_REG = A_Read16 | A_Read0 | A_Read8 | ak(ak_##x##_REG_ASR_REG); \ - const u32 A_##x##_REG_ROR_REG = A_Read16 | A_Read0 | A_Read8 | ak(ak_##x##_REG_ROR_REG); - -A_IMPLEMENT_ALU_TEST(TST) -A_IMPLEMENT_ALU_TEST(TEQ) -A_IMPLEMENT_ALU_TEST(CMP) -A_IMPLEMENT_ALU_TEST(CMN) - -const u32 A_MUL = A_Write16 | A_Read0 | A_Read8 | ak(ak_MUL); -const u32 A_MLA = A_Write16 | A_Read0 | A_Read8 | A_Read12 | ak(ak_MLA); -const u32 A_UMULL = A_Write16 | A_Write12 | A_Read0 | A_Read8 | ak(ak_UMULL); -const u32 A_UMLAL = A_Write16 | A_Write12 | A_Read16 | A_Read12 | A_Read0 | A_Read8 | ak(ak_UMLAL); -const u32 A_SMULL = A_Write16 | A_Write12 | A_Read0 | A_Read8 | ak(ak_SMULL); -const u32 A_SMLAL = A_Write16 | A_Write12 | A_Read16 | A_Read12 | A_Read0 | A_Read8 | ak(ak_SMLAL); +#define A_IMPLEMENT_ALU_TEST(x,a) \ + const u32 A_##x##_IMM = A_SetNZ | A_Read16 | A_##a | A_Read0 | ak(ak_##x##_IMM); \ + const u32 A_##x##_REG_LSL_IMM = A_SetNZ | A_Read16 | A_##a | A_Read0 | ak(ak_##x##_REG_LSL_IMM); \ + const u32 A_##x##_REG_LSR_IMM = A_SetNZ | A_Read16 | A_##a | A_Read0 | ak(ak_##x##_REG_LSR_IMM); \ + const u32 A_##x##_REG_ASR_IMM = A_SetNZ | A_Read16 | A_##a | A_Read0 | ak(ak_##x##_REG_ASR_IMM); \ + const u32 A_##x##_REG_ROR_IMM = A_RRXReadC | A_SetNZ | A_Read16 | A_##a | A_Read0 | ak(ak_##x##_REG_ROR_IMM); \ + const u32 A_##x##_REG_LSL_REG = A_SetNZ | A_Read16 | A_##a | A_Read0 | A_Read8 | ak(ak_##x##_REG_LSL_REG); \ + const u32 A_##x##_REG_LSR_REG = A_SetNZ | A_Read16 | A_##a | A_Read0 | A_Read8 | ak(ak_##x##_REG_LSR_REG); \ + const u32 A_##x##_REG_ASR_REG = A_SetNZ | A_Read16 | A_##a | A_Read0 | A_Read8 | ak(ak_##x##_REG_ASR_REG); \ + const u32 A_##x##_REG_ROR_REG = A_SetNZ | A_Read16 | A_##a | A_Read0 | A_Read8 | ak(ak_##x##_REG_ROR_REG); + +A_IMPLEMENT_ALU_TEST(TST,LOGIC) +A_IMPLEMENT_ALU_TEST(TEQ,LOGIC) +A_IMPLEMENT_ALU_TEST(CMP,ARITH) +A_IMPLEMENT_ALU_TEST(CMN,ARITH) + +const u32 A_MUL = A_MulFlags | A_Write16 | A_Read0 | A_Read8 | ak(ak_MUL); +const u32 A_MLA = A_MulFlags | A_Write16 | A_Read0 | A_Read8 | A_Read12 | ak(ak_MLA); +const u32 A_UMULL = A_MulFlags | A_Write16 | A_Write12 | A_Read0 | A_Read8 | ak(ak_UMULL); +const u32 A_UMLAL = A_MulFlags | A_Write16 | A_Write12 | A_Read16 | A_Read12 | A_Read0 | A_Read8 | ak(ak_UMLAL); +const u32 A_SMULL = A_MulFlags | A_Write16 | A_Write12 | A_Read0 | A_Read8 | ak(ak_SMULL); +const u32 A_SMLAL = A_MulFlags | A_Write16 | A_Write12 | A_Read16 | A_Read12 | A_Read0 | A_Read8 | ak(ak_SMLAL); const u32 A_SMLAxy = A_Write16 | A_Read0 | A_Read8 | A_Read12 | ak(ak_SMLALxy); const u32 A_SMLAWy = A_Write16 | A_Read0 | A_Read8 | A_Read12 | ak(ak_SMLAWy); const u32 A_SMULWy = A_Write16 | A_Read0 | A_Read8 | ak(ak_SMULWy); @@ -161,7 +173,7 @@ const u32 A_SVC = A_BranchAlways | A_Link | ak(ak_SVC); // THUMB -#define tk(x) ((x) << 16) +#define tk(x) ((x) << 20) enum { T_Read0 = 1 << 0, @@ -183,42 +195,47 @@ enum { T_ReadR14 = 1 << 13, T_WriteR14 = 1 << 14, - T_PopPC = 1 << 15 + T_PopPC = 1 << 15, + + T_SetNZ = 1 << 16, + T_SetCV = 1 << 17, + T_SetMaybeC = 1 << 18, + T_ReadC = 1 << 19 }; -const u32 T_LSL_IMM = T_Write0 | T_Read3 | tk(tk_LSL_IMM); -const u32 T_LSR_IMM = T_Write0 | T_Read3 | tk(tk_LSR_IMM); -const u32 T_ASR_IMM = T_Write0 | T_Read3 | tk(tk_ASR_IMM); - -const u32 T_ADD_REG_ = T_Write0 | T_Read3 | T_Read6 | tk(tk_ADD_REG_); -const u32 T_SUB_REG_ = T_Write0 | T_Read3 | T_Read6 | tk(tk_SUB_REG_); -const u32 T_ADD_IMM_ = T_Write0 | T_Read3 | tk(tk_ADD_IMM_); -const u32 T_SUB_IMM_ = T_Write0 | T_Read3 | tk(tk_SUB_IMM_); - -const u32 T_MOV_IMM = T_Write8 | tk(tk_MOV_IMM); -const u32 T_CMP_IMM = T_Write8 | tk(tk_CMP_IMM); -const u32 T_ADD_IMM = T_Write8 | T_Read8 | tk(tk_ADD_IMM); -const u32 T_SUB_IMM = T_Write8 | T_Read8 | tk(tk_SUB_IMM); - -const u32 T_AND_REG = T_Write0 | T_Read0 | T_Read3 | tk(tk_AND_REG); -const u32 T_EOR_REG = T_Write0 | T_Read0 | T_Read3 | tk(tk_EOR_REG); -const u32 T_LSL_REG = T_Write0 | T_Read0 | T_Read3 | tk(tk_LSL_REG); -const u32 T_LSR_REG = T_Write0 | T_Read0 | T_Read3 | tk(tk_LSR_REG); -const u32 T_ASR_REG = T_Write0 | T_Read0 | T_Read3 | tk(tk_ASR_REG); -const u32 T_ADC_REG = T_Write0 | T_Read0 | T_Read3 | tk(tk_ADC_REG); -const u32 T_SBC_REG = T_Write0 | T_Read0 | T_Read3 | tk(tk_SBC_REG); -const u32 T_ROR_REG = T_Write0 | T_Read0 | T_Read3 | tk(tk_ROR_REG); -const u32 T_TST_REG = T_Read0 | T_Read3 | tk(tk_TST_REG); -const u32 T_NEG_REG = T_Write0 | T_Read3 | tk(tk_NEG_REG); -const u32 T_CMP_REG = T_Read0 | T_Read3 | tk(tk_CMP_REG); -const u32 T_CMN_REG = T_Read0 | T_Read3 | tk(tk_CMN_REG); -const u32 T_ORR_REG = T_Write0 | T_Read0 | T_Read3 | tk(tk_ORR_REG); -const u32 T_MUL_REG = T_Write0 | T_Read0 | T_Read3 | tk(tk_MUL_REG); -const u32 T_BIC_REG = T_Write0 | T_Read0 | T_Read3 | tk(tk_BIC_REG); -const u32 T_MVN_REG = T_Write0 | T_Read3 | tk(tk_MVN_REG); +const u32 T_LSL_IMM = T_SetNZ | T_SetMaybeC | T_Write0 | T_Read3 | tk(tk_LSL_IMM); +const u32 T_LSR_IMM = T_SetNZ | T_SetMaybeC | T_Write0 | T_Read3 | tk(tk_LSR_IMM); +const u32 T_ASR_IMM = T_SetNZ | T_SetMaybeC | T_Write0 | T_Read3 | tk(tk_ASR_IMM); + +const u32 T_ADD_REG_ = T_SetNZ | T_SetCV | T_Write0 | T_Read3 | T_Read6 | tk(tk_ADD_REG_); +const u32 T_SUB_REG_ = T_SetNZ | T_SetCV | T_Write0 | T_Read3 | T_Read6 | tk(tk_SUB_REG_); +const u32 T_ADD_IMM_ = T_SetNZ | T_SetCV | T_Write0 | T_Read3 | tk(tk_ADD_IMM_); +const u32 T_SUB_IMM_ = T_SetNZ | T_SetCV | T_Write0 | T_Read3 | tk(tk_SUB_IMM_); + +const u32 T_MOV_IMM = T_SetNZ | T_Write8 | tk(tk_MOV_IMM); +const u32 T_CMP_IMM = T_SetNZ | T_SetCV | T_Write8 | tk(tk_CMP_IMM); +const u32 T_ADD_IMM = T_SetNZ | T_SetCV | T_Write8 | T_Read8 | tk(tk_ADD_IMM); +const u32 T_SUB_IMM = T_SetNZ | T_SetCV | T_Write8 | T_Read8 | tk(tk_SUB_IMM); + +const u32 T_AND_REG = T_SetNZ | T_Write0 | T_Read0 | T_Read3 | tk(tk_AND_REG); +const u32 T_EOR_REG = T_SetNZ | T_Write0 | T_Read0 | T_Read3 | tk(tk_EOR_REG); +const u32 T_LSL_REG = T_SetNZ | T_SetMaybeC | T_Write0 | T_Read0 | T_Read3 | tk(tk_LSL_REG); +const u32 T_LSR_REG = T_SetNZ | T_SetMaybeC | T_Write0 | T_Read0 | T_Read3 | tk(tk_LSR_REG); +const u32 T_ASR_REG = T_SetNZ | T_SetMaybeC | T_Write0 | T_Read0 | T_Read3 | tk(tk_ASR_REG); +const u32 T_ADC_REG = T_ReadC | T_SetNZ | T_SetCV | T_Write0 | T_Read0 | T_Read3 | tk(tk_ADC_REG); +const u32 T_SBC_REG = T_ReadC | T_SetNZ | T_SetCV | T_Write0 | T_Read0 | T_Read3 | tk(tk_SBC_REG); +const u32 T_ROR_REG = T_SetNZ | T_SetMaybeC | T_Write0 | T_Read0 | T_Read3 | tk(tk_ROR_REG); +const u32 T_TST_REG = T_SetNZ | T_Read0 | T_Read3 | tk(tk_TST_REG); +const u32 T_NEG_REG = T_SetNZ | T_SetCV | T_Write0 | T_Read3 | tk(tk_NEG_REG); +const u32 T_CMP_REG = T_SetNZ | T_SetCV | T_Read0 | T_Read3 | tk(tk_CMP_REG); +const u32 T_CMN_REG = T_SetNZ | T_SetCV | T_Read0 | T_Read3 | tk(tk_CMN_REG); +const u32 T_ORR_REG = T_SetNZ | T_Write0 | T_Read0 | T_Read3 | tk(tk_ORR_REG); +const u32 T_MUL_REG = T_SetNZ | T_Write0 | T_Read0 | T_Read3 | tk(tk_MUL_REG); +const u32 T_BIC_REG = T_SetNZ | T_Write0 | T_Read0 | T_Read3 | tk(tk_BIC_REG); +const u32 T_MVN_REG = T_SetNZ | T_Write0 | T_Read3 | tk(tk_MVN_REG); const u32 T_ADD_HIREG = T_WriteHi0 | T_ReadHi0 | T_ReadHi3 | tk(tk_ADD_HIREG); -const u32 T_CMP_HIREG = T_ReadHi0 | T_ReadHi3 | tk(tk_CMP_HIREG); +const u32 T_CMP_HIREG = T_SetNZ | T_SetCV | T_ReadHi0 | T_ReadHi3 | tk(tk_CMP_HIREG); const u32 T_MOV_HIREG = T_WriteHi0 | T_ReadHi3 | tk(tk_MOV_HIREG); const u32 T_ADD_PCREL = T_Write8 | tk(tk_ADD_PCREL); @@ -268,10 +285,20 @@ const u32 T_SVC = T_BranchAlways | T_WriteR14 | tk(tk_SVC); Info Decode(bool thumb, u32 num, u32 instr) { + const u8 FlagsReadPerCond[7] = { + flag_Z, + flag_C, + flag_N, + flag_V, + flag_C | flag_Z, + flag_N | flag_V, + flag_Z | flag_N | flag_V}; + Info res = {0}; if (thumb) { u32 data = THUMBInstrTable[(instr >> 6) & 0x3FF]; + res.Kind = (data >> 20) & 0x3F; if (data & T_Read0) res.SrcRegs |= 1 << (instr & 0x7); @@ -309,7 +336,18 @@ Info Decode(bool thumb, u32 num, u32 instr) if (data & T_PopPC && instr & (1 << 8)) res.DstRegs |= 1 << 15; - res.Kind = (data >> 16) & 0x3F; + if (data & T_SetNZ) + res.WriteFlags |= flag_N | flag_Z; + if (data & T_SetCV) + res.WriteFlags |= flag_C | flag_V; + if (data & T_SetMaybeC) + res.WriteFlags |= flag_C << 4; + if (data & T_ReadC) + res.ReadFlags |= flag_C; + + if (res.Kind == tk_BCOND) + res.ReadFlags |= FlagsReadPerCond[(instr >> 9) & 0x7]; + res.EndBlock = res.Branches(); return res; @@ -323,7 +361,7 @@ Info Decode(bool thumb, u32 num, u32 instr) if (data & A_UnkOnARM7 && num != 0) data = A_UNK; - res.Kind = (data >> 13) & 0x1FF; + res.Kind = (data >> 18) & 0x1FF; if (res.Kind == ak_MCR) { @@ -382,6 +420,26 @@ Info Decode(bool thumb, u32 num, u32 instr) if (res.Kind == ak_LDM) res.DstRegs |= instr & (1 << 15); // this is right + if (data & A_SetNZ) + res.WriteFlags |= flag_N | flag_Z; + if (data & A_SetCV) + res.WriteFlags |= flag_C | flag_V; + if (data & A_SetMaybeC) + res.WriteFlags |= flag_C << 4; + if ((data & A_MulFlags) && (instr & (1 << 20))) + res.WriteFlags |= flag_N | flag_Z; + if (data & A_ReadC) + res.ReadFlags |= flag_C; + if ((data & A_RRXReadC) && !((instr >> 7) & 0x1F)) + res.ReadFlags |= flag_C; + + if ((instr >> 28) < 0xE) + { + // make non conditional flag sets conditional + res.WriteFlags = res.WriteFlags | (res.WriteFlags << 4); + res.ReadFlags |= FlagsReadPerCond[instr >> 29]; + } + res.EndBlock |= res.Branches(); return res; diff --git a/src/ARM_InstrInfo.h b/src/ARM_InstrInfo.h index 4fe9b10..5336837 100644 --- a/src/ARM_InstrInfo.h +++ b/src/ARM_InstrInfo.h @@ -215,11 +215,24 @@ enum tk_Count }; +enum +{ + flag_N = 1 << 3, + flag_Z = 1 << 2, + flag_C = 1 << 1, + flag_V = 1 << 0, +}; + struct Info { u16 DstRegs, SrcRegs; u16 Kind; + u8 ReadFlags; + // lower 4 bits - set always + // upper 4 bits - might set flag + u8 WriteFlags; + bool EndBlock; bool Branches() { diff --git a/src/libui_sdl/main.cpp b/src/libui_sdl/main.cpp index 0066668..c3db88d 100644 --- a/src/libui_sdl/main.cpp +++ b/src/libui_sdl/main.cpp @@ -2675,6 +2675,8 @@ void RecreateMainWindow(bool opengl) int main(int argc, char** argv) { + freopen("miauz.txt", "w", stdout); + srand(time(NULL)); printf("melonDS " MELONDS_VERSION "\n"); -- cgit v1.2.3 From ea562d2fec9f4ab73e9ff3f519ff5ecb65736cd7 Mon Sep 17 00:00:00 2001 From: RSDuck Date: Sun, 25 Aug 2019 13:06:27 +0200 Subject: fixes for flag optimisation --- src/ARMJIT.cpp | 1 + src/ARMJIT_x64/ARMJIT_ALU.cpp | 2 +- src/ARM_InstrInfo.cpp | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) (limited to 'src/ARM_InstrInfo.cpp') diff --git a/src/ARMJIT.cpp b/src/ARMJIT.cpp index 3b6bc2e..5d92e47 100644 --- a/src/ARMJIT.cpp +++ b/src/ARMJIT.cpp @@ -163,6 +163,7 @@ CompiledBlock CompileBlock(ARM* cpu) { r15 += thumb ? 2 : 4; + instrs[i].SetFlags = 0; instrs[i].Instr = nextInstr[0]; instrs[i].NextInstr[0] = nextInstr[0] = nextInstr[1]; diff --git a/src/ARMJIT_x64/ARMJIT_ALU.cpp b/src/ARMJIT_x64/ARMJIT_ALU.cpp index 6a7d711..f868ddf 100644 --- a/src/ARMJIT_x64/ARMJIT_ALU.cpp +++ b/src/ARMJIT_x64/ARMJIT_ALU.cpp @@ -387,7 +387,7 @@ void Compiler::Comp_RetriveFlags(bool sign, bool retriveCV, bool carryUsed) AND(32, R(RCPSR), Imm32(0x3FFFFFFF & ~(carryUsed << 29) & ~((retriveCV ? 3 : 0) << 28))); OR(32, R(RCPSR), R(RSCRATCH)); } - else + else if (carryUsed || retriveCV) { SHL(32, R(RSCRATCH2), Imm8(carryOnly ? 29 : 28)); AND(32, R(RCPSR), Imm32(0xFFFFFFFF & ~(carryUsed << 29) & ~((retriveCV ? 3 : 0) << 28))); diff --git a/src/ARM_InstrInfo.cpp b/src/ARM_InstrInfo.cpp index ea6d827..3634c35 100644 --- a/src/ARM_InstrInfo.cpp +++ b/src/ARM_InstrInfo.cpp @@ -436,7 +436,7 @@ Info Decode(bool thumb, u32 num, u32 instr) if ((instr >> 28) < 0xE) { // make non conditional flag sets conditional - res.WriteFlags = res.WriteFlags | (res.WriteFlags << 4); + res.WriteFlags = (res.WriteFlags | (res.WriteFlags << 4)) & 0xF0; res.ReadFlags |= FlagsReadPerCond[instr >> 29]; } -- cgit v1.2.3 From 2ef776883f286f938fe03700780544c56867e467 Mon Sep 17 00:00:00 2001 From: RSDuck Date: Sun, 8 Sep 2019 14:09:00 +0200 Subject: more fixes for flag optimisation + small cycle counting optimisation --- src/ARMJIT_x64/ARMJIT_Branch.cpp | 4 ++ src/ARMJIT_x64/ARMJIT_Compiler.cpp | 28 ++++++++--- src/ARMJIT_x64/ARMJIT_Compiler.h | 2 + src/ARMJIT_x64/ARMJIT_LoadStore.cpp | 4 ++ src/ARM_InstrInfo.cpp | 92 ++++++++++++++++++++++--------------- 5 files changed, 86 insertions(+), 44 deletions(-) (limited to 'src/ARM_InstrInfo.cpp') diff --git a/src/ARMJIT_x64/ARMJIT_Branch.cpp b/src/ARMJIT_x64/ARMJIT_Branch.cpp index 30b18d7..c0a8f1f 100644 --- a/src/ARMJIT_x64/ARMJIT_Branch.cpp +++ b/src/ARMJIT_x64/ARMJIT_Branch.cpp @@ -19,6 +19,8 @@ void Compiler::Comp_JumpTo(u32 addr, bool forceNonConstantCycles) // it's not completely safe to assume stuff like, which instructions to preload // we'll see how it works out + IrregularCycles = true; + u32 newPC; u32 cycles = 0; @@ -140,6 +142,8 @@ void Compiler::Comp_JumpTo(u32 addr, bool forceNonConstantCycles) void Compiler::Comp_JumpTo(Gen::X64Reg addr, bool restoreCPSR) { + IrregularCycles = true; + BitSet16 hiRegsLoaded(RegCache.DirtyRegs & 0xFF00); bool previouslyDirty = CPSRDirty; SaveCPSR(); diff --git a/src/ARMJIT_x64/ARMJIT_Compiler.cpp b/src/ARMJIT_x64/ARMJIT_Compiler.cpp index 5e05446..d585f39 100644 --- a/src/ARMJIT_x64/ARMJIT_Compiler.cpp +++ b/src/ARMJIT_x64/ARMJIT_Compiler.cpp @@ -447,6 +447,8 @@ CompiledBlock Compiler::CompileBlock(ARM* cpu, FetchedInstr instrs[], int instrs Comp_AddCycles_C(); else { + IrregularCycles = false; + FixupBranch skipExecute; if (cond < 0xE) skipExecute = CheckCondition(cond); @@ -463,13 +465,19 @@ CompiledBlock Compiler::CompileBlock(ARM* cpu, FetchedInstr instrs[], int instrs if (CurInstr.Cond() < 0xE) { - FixupBranch skipFailed = J(); - SetJumpTarget(skipExecute); + if (IrregularCycles) + { + FixupBranch skipFailed = J(); + SetJumpTarget(skipExecute); - Comp_AddCycles_C(); + Comp_AddCycles_C(true); - SetJumpTarget(skipFailed); + SetJumpTarget(skipFailed); + } + else + SetJumpTarget(skipExecute); } + } } @@ -518,8 +526,16 @@ void Compiler::Comp_AddCycles_CI(Gen::X64Reg i, int add) NDS::ARM7MemTimings[CurInstr.CodeCycles][Thumb ? 0 : 2] : ((R15 & 0x2) ? 0 : CurInstr.CodeCycles); - LEA(32, RSCRATCH, MDisp(i, add + cycles)); - ADD(32, MDisp(RCPU, offsetof(ARM, Cycles)), R(RSCRATCH)); + if (!Thumb && CurInstr.Cond() < 0xE) + { + LEA(32, RSCRATCH, MDisp(i, add + cycles)); + ADD(32, MDisp(RCPU, offsetof(ARM, Cycles)), R(RSCRATCH)); + } + else + { + ConstantCycles += i + cycles; + ADD(32, MDisp(RCPU, offsetof(ARM, Cycles)), R(i)); + } } } \ No newline at end of file diff --git a/src/ARMJIT_x64/ARMJIT_Compiler.h b/src/ARMJIT_x64/ARMJIT_Compiler.h index 8861884..a62f043 100644 --- a/src/ARMJIT_x64/ARMJIT_Compiler.h +++ b/src/ARMJIT_x64/ARMJIT_Compiler.h @@ -139,6 +139,8 @@ public: u8* ResetStart; u32 CodeMemSize; + bool IrregularCycles; + void* MemoryFuncs9[3][2]; void* MemoryFuncs7[3][2][2]; diff --git a/src/ARMJIT_x64/ARMJIT_LoadStore.cpp b/src/ARMJIT_x64/ARMJIT_LoadStore.cpp index 3b4cb7d..bf8280d 100644 --- a/src/ARMJIT_x64/ARMJIT_LoadStore.cpp +++ b/src/ARMJIT_x64/ARMJIT_LoadStore.cpp @@ -438,6 +438,8 @@ void* Compiler::Gen_MemoryRoutineSeq7(bool store, bool preinc, bool codeMainRAM) void Compiler::Comp_MemAccess(OpArg rd, bool signExtend, bool store, int size) { + IrregularCycles = true; + if (store) MOV(32, R(ABI_PARAM2), rd); u32 cycles = Num @@ -459,6 +461,8 @@ void Compiler::Comp_MemAccess(OpArg rd, bool signExtend, bool store, int size) s32 Compiler::Comp_MemAccessBlock(int rn, BitSet16 regs, bool store, bool preinc, bool decrement, bool usermode) { + IrregularCycles = true; + int regsCount = regs.Count(); if (decrement) diff --git a/src/ARM_InstrInfo.cpp b/src/ARM_InstrInfo.cpp index 3634c35..9239e29 100644 --- a/src/ARM_InstrInfo.cpp +++ b/src/ARM_InstrInfo.cpp @@ -5,7 +5,7 @@ namespace ARMInstrInfo { -#define ak(x) ((x) << 18) +#define ak(x) ((x) << 21) enum { A_Read0 = 1 << 0, @@ -33,13 +33,21 @@ enum { A_MulFlags = 1 << 15, A_ReadC = 1 << 16, A_RRXReadC = 1 << 17, + A_StaticShiftSetC = 1 << 18, + A_SetC = 1 << 19, + + A_WriteMemory = 1 << 20, }; #define A_BIOP A_Read16 #define A_MONOOP 0 -#define A_ARITH A_SetCV -#define A_LOGIC A_SetMaybeC +#define A_ARITH_LSL_IMM A_SetCV +#define A_LOGIC_LSL_IMM A_StaticShiftSetC +#define A_ARITH_SHIFT_IMM A_SetCV +#define A_LOGIC_SHIFT_IMM A_SetC +#define A_ARITH_SHIFT_REG A_SetCV +#define A_LOGIC_SHIFT_REG A_SetMaybeC #define A_ARITH_IMM A_SetCV #define A_LOGIC_IMM 0 @@ -55,14 +63,14 @@ enum { const u32 A_##x##_REG_ROR_REG = A_Write12 | c | A_##k | A_Read0 | A_Read8 | ak(ak_##x##_REG_ROR_REG); \ \ const u32 A_##x##_IMM_S = A_SetNZ | c | A_##a##_IMM | A_Write12 | A_##k | ak(ak_##x##_IMM_S); \ - const u32 A_##x##_REG_LSL_IMM_S = A_SetNZ | c | A_##a | A_Write12 | A_##k | A_Read0 | ak(ak_##x##_REG_LSL_IMM_S); \ - const u32 A_##x##_REG_LSR_IMM_S = A_SetNZ | c | A_##a | A_Write12 | A_##k | A_Read0 | ak(ak_##x##_REG_LSR_IMM_S); \ - const u32 A_##x##_REG_ASR_IMM_S = A_SetNZ | c | A_##a | A_Write12 | A_##k | A_Read0 | ak(ak_##x##_REG_ASR_IMM_S); \ - const u32 A_##x##_REG_ROR_IMM_S = A_RRXReadC | A_SetNZ | c | A_##a | A_Write12 | A_##k | A_Read0 | ak(ak_##x##_REG_ROR_IMM_S); \ - const u32 A_##x##_REG_LSL_REG_S = A_SetNZ | c | A_##a | A_Write12 | A_##k | A_Read0 | A_Read8 | ak(ak_##x##_REG_LSL_REG_S); \ - const u32 A_##x##_REG_LSR_REG_S = A_SetNZ | c | A_##a | A_Write12 | A_##k | A_Read0 | A_Read8 | ak(ak_##x##_REG_LSR_REG_S); \ - const u32 A_##x##_REG_ASR_REG_S = A_SetNZ | c | A_##a | A_Write12 | A_##k | A_Read0 | A_Read8 | ak(ak_##x##_REG_ASR_REG_S); \ - const u32 A_##x##_REG_ROR_REG_S = A_SetNZ | c | A_##a | A_Write12 | A_##k | A_Read0 | A_Read8 | ak(ak_##x##_REG_ROR_REG_S); + const u32 A_##x##_REG_LSL_IMM_S = A_SetNZ | c | A_##a##_LSL_IMM | A_Write12 | A_##k | A_Read0 | ak(ak_##x##_REG_LSL_IMM_S); \ + const u32 A_##x##_REG_LSR_IMM_S = A_SetNZ | c | A_##a##_SHIFT_IMM | A_Write12 | A_##k | A_Read0 | ak(ak_##x##_REG_LSR_IMM_S); \ + const u32 A_##x##_REG_ASR_IMM_S = A_SetNZ | c | A_##a##_SHIFT_IMM | A_Write12 | A_##k | A_Read0 | ak(ak_##x##_REG_ASR_IMM_S); \ + const u32 A_##x##_REG_ROR_IMM_S = A_RRXReadC | A_SetNZ | c | A_##a##_SHIFT_IMM | A_Write12 | A_##k | A_Read0 | ak(ak_##x##_REG_ROR_IMM_S); \ + const u32 A_##x##_REG_LSL_REG_S = A_SetNZ | c | A_##a##_SHIFT_REG | A_Write12 | A_##k | A_Read0 | A_Read8 | ak(ak_##x##_REG_LSL_REG_S); \ + const u32 A_##x##_REG_LSR_REG_S = A_SetNZ | c | A_##a##_SHIFT_REG | A_Write12 | A_##k | A_Read0 | A_Read8 | ak(ak_##x##_REG_LSR_REG_S); \ + const u32 A_##x##_REG_ASR_REG_S = A_SetNZ | c | A_##a##_SHIFT_REG | A_Write12 | A_##k | A_Read0 | A_Read8 | ak(ak_##x##_REG_ASR_REG_S); \ + const u32 A_##x##_REG_ROR_REG_S = A_SetNZ | c | A_##a##_SHIFT_REG | A_Write12 | A_##k | A_Read0 | A_Read8 | ak(ak_##x##_REG_ROR_REG_S); A_IMPLEMENT_ALU_OP(AND,BIOP,LOGIC,0) A_IMPLEMENT_ALU_OP(EOR,BIOP,LOGIC,0) @@ -80,15 +88,15 @@ A_IMPLEMENT_ALU_OP(MVN,MONOOP,LOGIC,0) const u32 A_MOV_REG_LSL_IMM_DBG = A_MOV_REG_LSL_IMM; #define A_IMPLEMENT_ALU_TEST(x,a) \ - const u32 A_##x##_IMM = A_SetNZ | A_Read16 | A_##a | A_Read0 | ak(ak_##x##_IMM); \ - const u32 A_##x##_REG_LSL_IMM = A_SetNZ | A_Read16 | A_##a | A_Read0 | ak(ak_##x##_REG_LSL_IMM); \ - const u32 A_##x##_REG_LSR_IMM = A_SetNZ | A_Read16 | A_##a | A_Read0 | ak(ak_##x##_REG_LSR_IMM); \ - const u32 A_##x##_REG_ASR_IMM = A_SetNZ | A_Read16 | A_##a | A_Read0 | ak(ak_##x##_REG_ASR_IMM); \ - const u32 A_##x##_REG_ROR_IMM = A_RRXReadC | A_SetNZ | A_Read16 | A_##a | A_Read0 | ak(ak_##x##_REG_ROR_IMM); \ - const u32 A_##x##_REG_LSL_REG = A_SetNZ | A_Read16 | A_##a | A_Read0 | A_Read8 | ak(ak_##x##_REG_LSL_REG); \ - const u32 A_##x##_REG_LSR_REG = A_SetNZ | A_Read16 | A_##a | A_Read0 | A_Read8 | ak(ak_##x##_REG_LSR_REG); \ - const u32 A_##x##_REG_ASR_REG = A_SetNZ | A_Read16 | A_##a | A_Read0 | A_Read8 | ak(ak_##x##_REG_ASR_REG); \ - const u32 A_##x##_REG_ROR_REG = A_SetNZ | A_Read16 | A_##a | A_Read0 | A_Read8 | ak(ak_##x##_REG_ROR_REG); + const u32 A_##x##_IMM = A_SetNZ | A_Read16 | A_##a##_IMM | ak(ak_##x##_IMM); \ + const u32 A_##x##_REG_LSL_IMM = A_SetNZ | A_Read16 | A_##a##_LSL_IMM | A_Read0 | ak(ak_##x##_REG_LSL_IMM); \ + const u32 A_##x##_REG_LSR_IMM = A_SetNZ | A_Read16 | A_##a##_SHIFT_IMM | A_Read0 | ak(ak_##x##_REG_LSR_IMM); \ + const u32 A_##x##_REG_ASR_IMM = A_SetNZ | A_Read16 | A_##a##_SHIFT_IMM | A_Read0 | ak(ak_##x##_REG_ASR_IMM); \ + const u32 A_##x##_REG_ROR_IMM = A_RRXReadC | A_SetNZ | A_Read16 | A_##a##_SHIFT_IMM | A_Read0 | ak(ak_##x##_REG_ROR_IMM); \ + const u32 A_##x##_REG_LSL_REG = A_SetNZ | A_Read16 | A_##a##_SHIFT_REG | A_Read0 | A_Read8 | ak(ak_##x##_REG_LSL_REG); \ + const u32 A_##x##_REG_LSR_REG = A_SetNZ | A_Read16 | A_##a##_SHIFT_REG | A_Read0 | A_Read8 | ak(ak_##x##_REG_LSR_REG); \ + const u32 A_##x##_REG_ASR_REG = A_SetNZ | A_Read16 | A_##a##_SHIFT_REG | A_Read0 | A_Read8 | ak(ak_##x##_REG_ASR_REG); \ + const u32 A_##x##_REG_ROR_REG = A_SetNZ | A_Read16 | A_##a##_SHIFT_REG | A_Read0 | A_Read8 | ak(ak_##x##_REG_ROR_REG); A_IMPLEMENT_ALU_TEST(TST,LOGIC) A_IMPLEMENT_ALU_TEST(TEQ,LOGIC) @@ -115,20 +123,20 @@ const u32 A_QDADD = A_Write12 | A_Read0 | A_Read16 | A_UnkOnARM7 | ak(ak_QDADD); const u32 A_QDSUB = A_Write12 | A_Read0 | A_Read16 | A_UnkOnARM7 | ak(ak_QDSUB); #define A_LDR A_Write12 -#define A_STR A_Read12 +#define A_STR A_Read12 | A_WriteMemory #define A_IMPLEMENT_WB_LDRSTR(x,k) \ const u32 A_##x##_IMM = A_##k | A_Read16 | A_MemWriteback | ak(ak_##x##_IMM); \ const u32 A_##x##_REG_LSL = A_##k | A_Read16 | A_MemWriteback | A_Read0 | ak(ak_##x##_REG_LSL); \ const u32 A_##x##_REG_LSR = A_##k | A_Read16 | A_MemWriteback | A_Read0 | ak(ak_##x##_REG_LSR); \ const u32 A_##x##_REG_ASR = A_##k | A_Read16 | A_MemWriteback | A_Read0 | ak(ak_##x##_REG_ASR); \ - const u32 A_##x##_REG_ROR = A_##k | A_Read16 | A_MemWriteback | A_Read0 | ak(ak_##x##_REG_ROR); \ + const u32 A_##x##_REG_ROR = A_##k | A_RRXReadC | A_Read16 | A_MemWriteback | A_Read0 | ak(ak_##x##_REG_ROR); \ \ const u32 A_##x##_POST_IMM = A_##k | A_Read16 | A_Write16 | ak(ak_##x##_POST_IMM); \ const u32 A_##x##_POST_REG_LSL = A_##k | A_Read16 | A_Write16 | A_Read0 | ak(ak_##x##_POST_REG_LSL); \ const u32 A_##x##_POST_REG_LSR = A_##k | A_Read16 | A_Write16 | A_Read0 | ak(ak_##x##_POST_REG_LSR); \ const u32 A_##x##_POST_REG_ASR = A_##k | A_Read16 | A_Write16 | A_Read0 | ak(ak_##x##_POST_REG_ASR); \ - const u32 A_##x##_POST_REG_ROR = A_##k | A_Read16 | A_Write16 | A_Read0 | ak(ak_##x##_POST_REG_ROR); + const u32 A_##x##_POST_REG_ROR = A_##k | A_RRXReadC | A_Read16 | A_Write16 | A_Read0 | ak(ak_##x##_POST_REG_ROR); A_IMPLEMENT_WB_LDRSTR(STR,STR) A_IMPLEMENT_WB_LDRSTR(STRB,STR) @@ -136,7 +144,7 @@ A_IMPLEMENT_WB_LDRSTR(LDR,LDR) A_IMPLEMENT_WB_LDRSTR(LDRB,LDR) #define A_LDRD A_Write12Double -#define A_STRD A_Read12Double +#define A_STRD A_Read12Double | A_WriteMemory #define A_IMPLEMENT_HD_LDRSTR(x,k) \ const u32 A_##x##_IMM = A_##k | A_Read16 | A_MemWriteback | ak(ak_##x##_IMM); \ @@ -151,11 +159,11 @@ A_IMPLEMENT_HD_LDRSTR(LDRH,LDR) A_IMPLEMENT_HD_LDRSTR(LDRSB,LDR) A_IMPLEMENT_HD_LDRSTR(LDRSH,LDR) -const u32 A_SWP = A_Write12 | A_Read16 | A_Read0 | ak(ak_SWP); -const u32 A_SWPB = A_Write12 | A_Read16 | A_Read0 | ak(ak_SWPB); +const u32 A_SWP = A_Write12 | A_Read16 | A_Read0 | A_WriteMemory | ak(ak_SWP); +const u32 A_SWPB = A_Write12 | A_Read16 | A_Read0 | A_WriteMemory | ak(ak_SWPB); const u32 A_LDM = A_Read16 | A_MemWriteback | ak(ak_LDM); -const u32 A_STM = A_Read16 | A_MemWriteback | ak(ak_STM); +const u32 A_STM = A_Read16 | A_MemWriteback | A_WriteMemory | ak(ak_STM); const u32 A_B = A_BranchAlways | ak(ak_B); const u32 A_BL = A_BranchAlways | A_Link | ak(ak_BL); @@ -173,7 +181,7 @@ const u32 A_SVC = A_BranchAlways | A_Link | ak(ak_SVC); // THUMB -#define tk(x) ((x) << 20) +#define tk(x) ((x) << 21) enum { T_Read0 = 1 << 0, @@ -200,12 +208,13 @@ enum { T_SetNZ = 1 << 16, T_SetCV = 1 << 17, T_SetMaybeC = 1 << 18, - T_ReadC = 1 << 19 + T_ReadC = 1 << 19, + T_SetC = 1 << 20, }; const u32 T_LSL_IMM = T_SetNZ | T_SetMaybeC | T_Write0 | T_Read3 | tk(tk_LSL_IMM); -const u32 T_LSR_IMM = T_SetNZ | T_SetMaybeC | T_Write0 | T_Read3 | tk(tk_LSR_IMM); -const u32 T_ASR_IMM = T_SetNZ | T_SetMaybeC | T_Write0 | T_Read3 | tk(tk_ASR_IMM); +const u32 T_LSR_IMM = T_SetNZ | T_SetC | T_Write0 | T_Read3 | tk(tk_LSR_IMM); +const u32 T_ASR_IMM = T_SetNZ | T_SetC | T_Write0 | T_Read3 | tk(tk_ASR_IMM); const u32 T_ADD_REG_ = T_SetNZ | T_SetCV | T_Write0 | T_Read3 | T_Read6 | tk(tk_ADD_REG_); const u32 T_SUB_REG_ = T_SetNZ | T_SetCV | T_Write0 | T_Read3 | T_Read6 | tk(tk_SUB_REG_); @@ -213,7 +222,7 @@ const u32 T_ADD_IMM_ = T_SetNZ | T_SetCV | T_Write0 | T_Read3 | tk(tk_ADD_IMM_); const u32 T_SUB_IMM_ = T_SetNZ | T_SetCV | T_Write0 | T_Read3 | tk(tk_SUB_IMM_); const u32 T_MOV_IMM = T_SetNZ | T_Write8 | tk(tk_MOV_IMM); -const u32 T_CMP_IMM = T_SetNZ | T_SetCV | T_Write8 | tk(tk_CMP_IMM); +const u32 T_CMP_IMM = T_SetNZ | T_SetCV | T_Read8 | tk(tk_CMP_IMM); const u32 T_ADD_IMM = T_SetNZ | T_SetCV | T_Write8 | T_Read8 | tk(tk_ADD_IMM); const u32 T_SUB_IMM = T_SetNZ | T_SetCV | T_Write8 | T_Read8 | tk(tk_SUB_IMM); @@ -240,7 +249,7 @@ const u32 T_MOV_HIREG = T_WriteHi0 | T_ReadHi3 | tk(tk_MOV_HIREG); const u32 T_ADD_PCREL = T_Write8 | tk(tk_ADD_PCREL); const u32 T_ADD_SPREL = T_Write8 | T_ReadR13 | tk(tk_ADD_SPREL); -const u32 T_ADD_SP = T_WriteR13 | tk(tk_ADD_SP); +const u32 T_ADD_SP = T_WriteR13 | T_ReadR13 | tk(tk_ADD_SP); const u32 T_LDR_PCREL = T_Write8 | tk(tk_LDR_PCREL); @@ -298,7 +307,7 @@ Info Decode(bool thumb, u32 num, u32 instr) if (thumb) { u32 data = THUMBInstrTable[(instr >> 6) & 0x3FF]; - res.Kind = (data >> 20) & 0x3F; + res.Kind = (data >> 21) & 0x3F; if (data & T_Read0) res.SrcRegs |= 1 << (instr & 0x7); @@ -344,12 +353,14 @@ Info Decode(bool thumb, u32 num, u32 instr) res.WriteFlags |= flag_C << 4; if (data & T_ReadC) res.ReadFlags |= flag_C; + if (data & T_SetC) + res.WriteFlags |= flag_C; + + res.EndBlock |= res.Branches(); if (res.Kind == tk_BCOND) res.ReadFlags |= FlagsReadPerCond[(instr >> 9) & 0x7]; - res.EndBlock = res.Branches(); - return res; } else @@ -361,7 +372,7 @@ Info Decode(bool thumb, u32 num, u32 instr) if (data & A_UnkOnARM7 && num != 0) data = A_UNK; - res.Kind = (data >> 18) & 0x1FF; + res.Kind = (data >> 21) & 0x1FF; if (res.Kind == ak_MCR) { @@ -369,7 +380,7 @@ Info Decode(bool thumb, u32 num, u32 instr) u32 cm = instr & 0xF; u32 cpinfo = (instr >> 5) & 0x7; u32 id = (cn<<8)|(cm<<4)|cpinfo; - if (id == 0x704 || id == 0x782) + if (id == 0x704 || id == 0x782 || id == 0x750 || id == 0x751 || id == 0x752) res.EndBlock |= true; } if (res.Kind == ak_MCR || res.Kind == ak_MRC) @@ -420,6 +431,9 @@ Info Decode(bool thumb, u32 num, u32 instr) if (res.Kind == ak_LDM) res.DstRegs |= instr & (1 << 15); // this is right + if (res.Kind == ak_STM) + res.SrcRegs |= instr & (1 << 15); + if (data & A_SetNZ) res.WriteFlags |= flag_N | flag_Z; if (data & A_SetCV) @@ -432,6 +446,8 @@ Info Decode(bool thumb, u32 num, u32 instr) res.ReadFlags |= flag_C; if ((data & A_RRXReadC) && !((instr >> 7) & 0x1F)) res.ReadFlags |= flag_C; + if ((data & A_SetC) || (data & A_StaticShiftSetC) && ((instr >> 7) & 0x1F)) + res.WriteFlags |= flag_C; if ((instr >> 28) < 0xE) { -- cgit v1.2.3 From a687be9879e5cab4ea5d8646c8cf47c214b18856 Mon Sep 17 00:00:00 2001 From: RSDuck Date: Thu, 3 Oct 2019 01:10:59 +0200 Subject: new block cache and much more... - more reliable code invalidation detection - blocks aren't stopped at any branch, but are being followed if possible to get larger blocks - idle loop recognition - optimised literal loads, load/store cycle counting and loads/stores from constant addresses --- src/ARM.cpp | 44 ++- src/ARM.h | 16 +- src/ARMInterpreter.h | 9 + src/ARMJIT.cpp | 755 ++++++++++++++++++++++++++++++------ src/ARMJIT.h | 141 ++----- src/ARMJIT_Internal.h | 198 ++++++++++ src/ARMJIT_RegisterCache.h | 36 +- src/ARMJIT_x64/ARMJIT_ALU.cpp | 16 +- src/ARMJIT_x64/ARMJIT_Branch.cpp | 43 +- src/ARMJIT_x64/ARMJIT_Compiler.cpp | 184 +++++++-- src/ARMJIT_x64/ARMJIT_Compiler.h | 51 ++- src/ARMJIT_x64/ARMJIT_LoadStore.cpp | 629 ++++++++++++++---------------- src/ARM_InstrInfo.cpp | 47 ++- src/ARM_InstrInfo.h | 11 +- src/CP15.cpp | 12 +- src/Config.cpp | 2 + src/Config.h | 1 + src/NDS.cpp | 22 +- src/libui_sdl/DlgEmuSettings.cpp | 22 +- 19 files changed, 1550 insertions(+), 689 deletions(-) create mode 100644 src/ARMJIT_Internal.h (limited to 'src/ARM_InstrInfo.cpp') diff --git a/src/ARM.cpp b/src/ARM.cpp index e404943..423c940 100644 --- a/src/ARM.cpp +++ b/src/ARM.cpp @@ -580,21 +580,26 @@ void ARMv5::ExecuteJIT() return; } - ARMJIT::CompiledBlock block = ARMJIT::LookUpBlock<0>(instrAddr); - Cycles += (block ? block : ARMJIT::CompileBlock(this))(); + ARMJIT::JitBlockEntry block = ARMJIT::LookUpBlock<0>(instrAddr); + if (block) + Cycles += block(); + else + ARMJIT::CompileBlock(this); + + NDS::ARM9Timestamp += Cycles; + Cycles = 0; + if (IRQ) TriggerIRQ(); if (Halted) { - if (Halted == 1 && NDS::ARM9Timestamp < NDS::ARM9Target) + bool idleLoop = Halted & 0x20; + Halted &= ~0x20; + if ((Halted == 1 || idleLoop) && NDS::ARM9Timestamp < NDS::ARM9Target) { NDS::ARM9Timestamp = NDS::ARM9Target; } break; } - if (IRQ) TriggerIRQ(); - - NDS::ARM9Timestamp += Cycles; - Cycles = 0; } if (Halted == 2) @@ -710,23 +715,28 @@ void ARMv4::ExecuteJIT() printf("ARMv4 PC in non executable region %08X\n", R[15]); return; } - ARMJIT::CompiledBlock block = ARMJIT::LookUpBlock<1>(instrAddr); - Cycles += (block ? block : ARMJIT::CompileBlock(this))(); + + ARMJIT::JitBlockEntry block = ARMJIT::LookUpBlock<1>(instrAddr); + if (block) + Cycles += block(); + else + ARMJIT::CompileBlock(this); + + NDS::ARM7Timestamp += Cycles; + Cycles = 0; // TODO optimize this shit!!! + if (IRQ) TriggerIRQ(); if (Halted) { - if (Halted == 1 && NDS::ARM7Timestamp < NDS::ARM7Target) + bool idleLoop = Halted & 0x20; + Halted &= ~0x20; + if ((Halted == 1 || idleLoop) && NDS::ARM7Timestamp < NDS::ARM7Target) { NDS::ARM7Timestamp = NDS::ARM7Target; } break; } - - if (IRQ) TriggerIRQ(); - - NDS::ARM7Timestamp += Cycles; - Cycles = 0; } if (Halted == 2) @@ -736,6 +746,8 @@ void ARMv4::ExecuteJIT() void ARMv5::FillPipeline() { + SetupCodeMem(R[15]); + if (CPSR & 0x20) { if ((R[15] - 2) & 0x2) @@ -758,6 +770,8 @@ void ARMv5::FillPipeline() void ARMv4::FillPipeline() { + SetupCodeMem(R[15]); + if (CPSR & 0x20) { NextInstr[0] = CodeRead16(R[15] - 2); diff --git a/src/ARM.h b/src/ARM.h index 4d387bc..8a01068 100644 --- a/src/ARM.h +++ b/src/ARM.h @@ -299,7 +299,7 @@ public: { *val = NDS::ARM7Read8(addr); DataRegion = addr >> 24; - DataCycles = NDS::ARM7MemTimings[DataRegion][0]; + DataCycles = NDS::ARM7MemTimings[addr >> 15][0]; } void DataRead16(u32 addr, u32* val) @@ -308,7 +308,7 @@ public: *val = NDS::ARM7Read16(addr); DataRegion = addr >> 24; - DataCycles = NDS::ARM7MemTimings[DataRegion][0]; + DataCycles = NDS::ARM7MemTimings[addr >> 15][0]; } void DataRead32(u32 addr, u32* val) @@ -317,7 +317,7 @@ public: *val = NDS::ARM7Read32(addr); DataRegion = addr >> 24; - DataCycles = NDS::ARM7MemTimings[DataRegion][2]; + DataCycles = NDS::ARM7MemTimings[addr >> 15][2]; } void DataRead32S(u32 addr, u32* val) @@ -325,14 +325,14 @@ public: addr &= ~3; *val = NDS::ARM7Read32(addr); - DataCycles += NDS::ARM7MemTimings[DataRegion][3]; + DataCycles += NDS::ARM7MemTimings[addr >> 15][3]; } void DataWrite8(u32 addr, u8 val) { NDS::ARM7Write8(addr, val); DataRegion = addr >> 24; - DataCycles = NDS::ARM7MemTimings[DataRegion][0]; + DataCycles = NDS::ARM7MemTimings[addr >> 15][0]; } void DataWrite16(u32 addr, u16 val) @@ -341,7 +341,7 @@ public: NDS::ARM7Write16(addr, val); DataRegion = addr >> 24; - DataCycles = NDS::ARM7MemTimings[DataRegion][0]; + DataCycles = NDS::ARM7MemTimings[addr >> 15][0]; } void DataWrite32(u32 addr, u32 val) @@ -350,7 +350,7 @@ public: NDS::ARM7Write32(addr, val); DataRegion = addr >> 24; - DataCycles = NDS::ARM7MemTimings[DataRegion][2]; + DataCycles = NDS::ARM7MemTimings[addr >> 15][2]; } void DataWrite32S(u32 addr, u32 val) @@ -358,7 +358,7 @@ public: addr &= ~3; NDS::ARM7Write32(addr, val); - DataCycles += NDS::ARM7MemTimings[DataRegion][3]; + DataCycles += NDS::ARM7MemTimings[addr >> 15][3]; } diff --git a/src/ARMInterpreter.h b/src/ARMInterpreter.h index 7244238..2bf8167 100644 --- a/src/ARMInterpreter.h +++ b/src/ARMInterpreter.h @@ -28,6 +28,15 @@ namespace ARMInterpreter extern void (*ARMInstrTable[4096])(ARM* cpu); extern void (*THUMBInstrTable[1024])(ARM* cpu); +void A_MSR_IMM(ARM* cpu); +void A_MSR_REG(ARM* cpu); +void A_MRS(ARM* cpu); +void A_MCR(ARM* cpu); +void A_MRC(ARM* cpu); +void A_SVC(ARM* cpu); + +void T_SVC(ARM* cpu); + void A_BLX_IMM(ARM* cpu); // I'm a special one look at me } diff --git a/src/ARMJIT.cpp b/src/ARMJIT.cpp index 85cadf3..686bdd6 100644 --- a/src/ARMJIT.cpp +++ b/src/ARMJIT.cpp @@ -1,122 +1,137 @@ #include "ARMJIT.h" #include +#include #include "Config.h" +#include "ARMJIT_Internal.h" #include "ARMJIT_x64/ARMJIT_Compiler.h" +#include "ARMInterpreter_ALU.h" +#include "ARMInterpreter_LoadStore.h" +#include "ARMInterpreter_Branch.h" +#include "ARMInterpreter.h" + +#include "GPU3D.h" +#include "SPU.h" +#include "Wifi.h" + namespace ARMJIT { +#define JIT_DEBUGPRINT(msg, ...) + Compiler* compiler; -BlockCache cache; -#define DUP2(x) x, x +const u32 ExeMemRegionSizes[] = { + 0x8000, // Unmapped Region (dummy) + 0x8000, // ITCM + 4*1024*1024, // Main RAM + 0x8000, // SWRAM + 0xA4000, // LCDC + 0x8000, // ARM9 BIOS + 0x4000, // ARM7 BIOS + 0x10000, // ARM7 WRAM + 0x40000 // ARM7 WVRAM +}; -static ptrdiff_t JIT_MEM[2][32] = { - //arm9 - { - /* 0X*/ DUP2(offsetof(BlockCache, ARM9_ITCM)), - /* 1X*/ DUP2(offsetof(BlockCache, ARM9_ITCM)), // mirror - /* 2X*/ DUP2(offsetof(BlockCache, MainRAM)), - /* 3X*/ DUP2(offsetof(BlockCache, SWRAM)), - /* 4X*/ DUP2(-1), - /* 5X*/ DUP2(-1), - /* 6X*/ -1, - offsetof(BlockCache, ARM9_LCDC), // Plain ARM9-CPU Access (LCDC mode) (max 656KB) - /* 7X*/ DUP2(-1), - /* 8X*/ DUP2(-1), - /* 9X*/ DUP2(-1), - /* AX*/ DUP2(-1), - /* BX*/ DUP2(-1), - /* CX*/ DUP2(-1), - /* DX*/ DUP2(-1), - /* EX*/ DUP2(-1), - /* FX*/ DUP2(offsetof(BlockCache, ARM9_BIOS)) - }, - //arm7 - { - /* 0X*/ DUP2(offsetof(BlockCache, ARM7_BIOS)), - /* 1X*/ DUP2(-1), - /* 2X*/ DUP2(offsetof(BlockCache, MainRAM)), - /* 3X*/ offsetof(BlockCache, SWRAM), - offsetof(BlockCache, ARM7_WRAM), - /* 4X*/ DUP2(-1), - /* 5X*/ DUP2(-1), - /* 6X*/ DUP2(offsetof(BlockCache, ARM7_WVRAM)), /* contrary to Gbatek, melonDS and itself, - DeSmuME doesn't mirror the 64 MB region at 0x6800000 */ - /* 7X*/ DUP2(-1), - /* 8X*/ DUP2(-1), - /* 9X*/ DUP2(-1), - /* AX*/ DUP2(-1), - /* BX*/ DUP2(-1), - /* CX*/ DUP2(-1), - /* DX*/ DUP2(-1), - /* EX*/ DUP2(-1), - /* FX*/ DUP2(-1) - } +const u32 ExeMemRegionOffsets[] = { + 0, + 0x8000, + 0x10000, + 0x410000, + 0x418000, + 0x4BC000, + 0x4C4000, + 0x4C8000, + 0x4D8000, + 0x518000, }; -static u32 JIT_MASK[2][32] = { +#define DUP2(x) x, x + +const static ExeMemKind JIT_MEM[2][32] = { //arm9 { - /* 0X*/ DUP2(0x00007FFF), - /* 1X*/ DUP2(0x00007FFF), - /* 2X*/ DUP2(0x003FFFFF), - /* 3X*/ DUP2(0x00007FFF), - /* 4X*/ DUP2(0x00000000), - /* 5X*/ DUP2(0x00000000), - /* 6X*/ 0x00000000, - 0x000FFFFF, - /* 7X*/ DUP2(0x00000000), - /* 8X*/ DUP2(0x00000000), - /* 9X*/ DUP2(0x00000000), - /* AX*/ DUP2(0x00000000), - /* BX*/ DUP2(0x00000000), - /* CX*/ DUP2(0x00000000), - /* DX*/ DUP2(0x00000000), - /* EX*/ DUP2(0x00000000), - /* FX*/ DUP2(0x00007FFF) + /* 0X*/ DUP2(exeMem_ITCM), + /* 1X*/ DUP2(exeMem_ITCM), // mirror + /* 2X*/ DUP2(exeMem_MainRAM), + /* 3X*/ DUP2(exeMem_SWRAM), + /* 4X*/ DUP2(exeMem_Unmapped), + /* 5X*/ DUP2(exeMem_Unmapped), + /* 6X*/ exeMem_Unmapped, + exeMem_LCDC, // Plain ARM9-CPU Access (LCDC mode) (max 656KB) + /* 7X*/ DUP2(exeMem_Unmapped), + /* 8X*/ DUP2(exeMem_Unmapped), + /* 9X*/ DUP2(exeMem_Unmapped), + /* AX*/ DUP2(exeMem_Unmapped), + /* BX*/ DUP2(exeMem_Unmapped), + /* CX*/ DUP2(exeMem_Unmapped), + /* DX*/ DUP2(exeMem_Unmapped), + /* EX*/ DUP2(exeMem_Unmapped), + /* FX*/ DUP2(exeMem_ARM9_BIOS) }, //arm7 { - /* 0X*/ DUP2(0x00003FFF), - /* 1X*/ DUP2(0x00000000), - /* 2X*/ DUP2(0x003FFFFF), - /* 3X*/ 0x00007FFF, - 0x0000FFFF, - /* 4X*/ 0x00000000, - 0x0000FFFF, - /* 5X*/ DUP2(0x00000000), - /* 6X*/ DUP2(0x0003FFFF), - /* 7X*/ DUP2(0x00000000), - /* 8X*/ DUP2(0x00000000), - /* 9X*/ DUP2(0x00000000), - /* AX*/ DUP2(0x00000000), - /* BX*/ DUP2(0x00000000), - /* CX*/ DUP2(0x00000000), - /* DX*/ DUP2(0x00000000), - /* EX*/ DUP2(0x00000000), - /* FX*/ DUP2(0x00000000) + /* 0X*/ DUP2(exeMem_ARM7_BIOS), + /* 1X*/ DUP2(exeMem_Unmapped), + /* 2X*/ DUP2(exeMem_MainRAM), + /* 3X*/ exeMem_SWRAM, + exeMem_ARM7_WRAM, + /* 4X*/ DUP2(exeMem_Unmapped), + /* 5X*/ DUP2(exeMem_Unmapped), + /* 6X*/ DUP2(exeMem_ARM7_WVRAM), /* contrary to Gbatek, melonDS and itself, + DeSmuME doesn't mirror the 64 MB region at 0x6800000 */ + /* 7X*/ DUP2(exeMem_Unmapped), + /* 8X*/ DUP2(exeMem_Unmapped), + /* 9X*/ DUP2(exeMem_Unmapped), + /* AX*/ DUP2(exeMem_Unmapped), + /* BX*/ DUP2(exeMem_Unmapped), + /* CX*/ DUP2(exeMem_Unmapped), + /* DX*/ DUP2(exeMem_Unmapped), + /* EX*/ DUP2(exeMem_Unmapped), + /* FX*/ DUP2(exeMem_Unmapped) } }; #undef DUP2 +/* + translates address to pseudo physical address + - more compact, eliminates mirroring, everything comes in a row + - we only need one translation table +*/ +u32 AddrTranslate9[0x2000]; +u32 AddrTranslate7[0x4000]; -void Init() +JitBlockEntry FastBlockAccess[ExeMemSpaceSize / 2]; +AddressRange CodeRanges[ExeMemSpaceSize / 256]; + +TinyVector JitBlocks; +JitBlock* RestoreCandidates[0x1000] = {NULL}; + +u32 HashRestoreCandidate(u32 pseudoPhysicalAddr) { - memset(&cache, 0, sizeof(BlockCache)); + return (u32)(((u64)pseudoPhysicalAddr * 11400714819323198485llu) >> 53); +} +void Init() +{ for (int i = 0; i < 0x2000; i++) - cache.AddrMapping9[i] = JIT_MEM[0][i >> 8] == -1 ? NULL : - (CompiledBlock*)((u8*)&cache + JIT_MEM[0][i >> 8]) - + (((i << 15) & JIT_MASK[0][i >> 8]) >> 1); + { + ExeMemKind kind = JIT_MEM[0][i >> 8]; + u32 size = ExeMemRegionSizes[kind]; + + AddrTranslate9[i] = ExeMemRegionOffsets[kind] + ((i << 15) & (size - 1)); + } for (int i = 0; i < 0x4000; i++) - cache.AddrMapping7[i] = JIT_MEM[1][i >> 9] == -1 ? NULL : - (CompiledBlock*)((u8*)&cache + JIT_MEM[1][i >> 9]) - + (((i << 14) & JIT_MASK[1][i >> 9]) >> 1); + { + ExeMemKind kind = JIT_MEM[1][i >> 9]; + u32 size = ExeMemRegionSizes[kind]; + + AddrTranslate7[i] = ExeMemRegionOffsets[kind] + ((i << 14) & (size - 1)); + } compiler = new Compiler(); } @@ -126,7 +141,7 @@ void DeInit() delete compiler; } -void floodFillSetFlags(FetchedInstr instrs[], int start, u8 flags) +void FloodFillSetFlags(FetchedInstr instrs[], int start, u8 flags) { for (int j = start; j >= 0; j--) { @@ -144,7 +159,154 @@ void floodFillSetFlags(FetchedInstr instrs[], int start, u8 flags) } } -CompiledBlock CompileBlock(ARM* cpu) +bool DecodeBranch(bool thumb, const FetchedInstr& instr, u32& cond, u32& targetAddr) +{ + if (thumb) + { + u32 r15 = instr.Addr + 4; + cond = 0xE; + + if (instr.Info.Kind == ARMInstrInfo::tk_BL_LONG && !(instr.Instr & (1 << 12))) + { + targetAddr = r15 + ((s32)((instr.Instr & 0x7FF) << 21) >> 9); + targetAddr += ((instr.Instr >> 16) & 0x7FF) << 1; + return true; + } + else if (instr.Info.Kind == ARMInstrInfo::tk_B) + { + s32 offset = (s32)((instr.Instr & 0x7FF) << 21) >> 20; + targetAddr = r15 + offset; + return true; + } + else if (instr.Info.Kind == ARMInstrInfo::tk_BCOND) + { + cond = (instr.Instr >> 8) & 0xF; + s32 offset = (s32)(instr.Instr << 24) >> 23; + targetAddr = r15 + offset; + return true; + } + } + else + { + cond = instr.Cond(); + if (instr.Info.Kind == ARMInstrInfo::ak_BL + || instr.Info.Kind == ARMInstrInfo::ak_B) + { + s32 offset = (s32)(instr.Instr << 8) >> 6; + u32 r15 = instr.Addr + 8; + targetAddr = r15 + offset; + return true; + } + } + return false; +} + +bool IsIdleLoop(FetchedInstr* instrs, int instrsCount) +{ + // see https://github.com/dolphin-emu/dolphin/blob/master/Source/Core/Core/PowerPC/PPCAnalyst.cpp#L678 + // it basically checks if one iteration of a loop depends on another + // the rules are quite simple + + u16 regsWrittenTo = 0; + u16 regsDisallowedToWrite = 0; + for (int i = 0; i < instrsCount; i++) + { + //printf("instr %d %x regs(%x %x) %x %x\n", i, instrs[i].Instr, instrs[i].Info.DstRegs, instrs[i].Info.SrcRegs, regsWrittenTo, regsDisallowedToWrite); + if (instrs[i].Info.SpecialKind == ARMInstrInfo::special_WriteMem) + return false; + if (i < instrsCount - 1 && instrs[i].Info.Branches()) + return false; + + u16 srcRegs = instrs[i].Info.SrcRegs & ~(1 << 15); + u16 dstRegs = instrs[i].Info.DstRegs & ~(1 << 15); + + regsDisallowedToWrite |= srcRegs & ~regsWrittenTo; + + if (dstRegs & regsDisallowedToWrite) + return false; + regsWrittenTo |= dstRegs; + } + return true; +} + +typedef void (*InterpreterFunc)(ARM* cpu); + +#define F(x) &ARMInterpreter::A_##x +#define F_ALU(name, s) \ + F(name##_REG_LSL_IMM##s), F(name##_REG_LSR_IMM##s), F(name##_REG_ASR_IMM##s), F(name##_REG_ROR_IMM##s), \ + F(name##_REG_LSL_REG##s), F(name##_REG_LSR_REG##s), F(name##_REG_ASR_REG##s), F(name##_REG_ROR_REG##s), F(name##_IMM##s) +#define F_MEM_WB(name) \ + F(name##_REG_LSL), F(name##_REG_LSR), F(name##_REG_ASR), F(name##_REG_ROR), F(name##_IMM), \ + F(name##_POST_REG_LSL), F(name##_POST_REG_LSR), F(name##_POST_REG_ASR), F(name##_POST_REG_ROR), F(name##_POST_IMM) +#define F_MEM_HD(name) \ + F(name##_REG), F(name##_IMM), F(name##_POST_REG), F(name##_POST_IMM) +InterpreterFunc InterpretARM[ARMInstrInfo::ak_Count] = +{ + F_ALU(AND,), F_ALU(AND,_S), + F_ALU(EOR,), F_ALU(EOR,_S), + F_ALU(SUB,), F_ALU(SUB,_S), + F_ALU(RSB,), F_ALU(RSB,_S), + F_ALU(ADD,), F_ALU(ADD,_S), + F_ALU(ADC,), F_ALU(ADC,_S), + F_ALU(SBC,), F_ALU(SBC,_S), + F_ALU(RSC,), F_ALU(RSC,_S), + F_ALU(ORR,), F_ALU(ORR,_S), + F_ALU(MOV,), F_ALU(MOV,_S), + F_ALU(BIC,), F_ALU(BIC,_S), + F_ALU(MVN,), F_ALU(MVN,_S), + F_ALU(TST,), + F_ALU(TEQ,), + F_ALU(CMP,), + F_ALU(CMN,), + + F(MUL), F(MLA), F(UMULL), F(UMLAL), F(SMULL), F(SMLAL), F(SMLAxy), F(SMLAWy), F(SMULWy), F(SMLALxy), F(SMULxy), + F(CLZ), F(QADD), F(QDADD), F(QSUB), F(QDSUB), + + F_MEM_WB(STR), + F_MEM_WB(STRB), + F_MEM_WB(LDR), + F_MEM_WB(LDRB), + + F_MEM_HD(STRH), + F_MEM_HD(LDRD), + F_MEM_HD(STRD), + F_MEM_HD(LDRH), + F_MEM_HD(LDRSB), + F_MEM_HD(LDRSH), + + F(SWP), F(SWPB), + F(LDM), F(STM), + + F(B), F(BL), F(BLX_IMM), F(BX), F(BLX_REG), + F(UNK), F(MSR_IMM), F(MSR_REG), F(MRS), F(MCR), F(MRC), F(SVC) +}; +#undef F_ALU +#undef F_MEM_WB +#undef F_MEM_HD +#undef F + +#define F(x) ARMInterpreter::T_##x +InterpreterFunc InterpretTHUMB[ARMInstrInfo::tk_Count] = +{ + F(LSL_IMM), F(LSR_IMM), F(ASR_IMM), + F(ADD_REG_), F(SUB_REG_), F(ADD_IMM_), F(SUB_IMM_), + F(MOV_IMM), F(CMP_IMM), F(ADD_IMM), F(SUB_IMM), + F(AND_REG), F(EOR_REG), F(LSL_REG), F(LSR_REG), F(ASR_REG), + F(ADC_REG), F(SBC_REG), F(ROR_REG), F(TST_REG), F(NEG_REG), + F(CMP_REG), F(CMN_REG), F(ORR_REG), F(MUL_REG), F(BIC_REG), F(MVN_REG), + F(ADD_HIREG), F(CMP_HIREG), F(MOV_HIREG), + F(ADD_PCREL), F(ADD_SPREL), F(ADD_SP), + F(LDR_PCREL), F(STR_REG), F(STRB_REG), F(LDR_REG), F(LDRB_REG), F(STRH_REG), + F(LDRSB_REG), F(LDRH_REG), F(LDRSH_REG), F(STR_IMM), F(LDR_IMM), F(STRB_IMM), + F(LDRB_IMM), F(STRH_IMM), F(LDRH_IMM), F(STR_SPREL), F(LDR_SPREL), + F(PUSH), F(POP), F(LDMIA), F(STMIA), + F(BCOND), F(BX), F(BLX_REG), F(B), F(BL_LONG_1), F(BL_LONG_2), + F(UNK), F(SVC), + NULL // BL_LONG psudo opcode +}; +#undef F + +void CompileBlock(ARM* cpu) { bool thumb = cpu->CPSR & 0x20; @@ -153,17 +315,41 @@ CompiledBlock CompileBlock(ARM* cpu) if (Config::JIT_MaxBlockSize > 32) Config::JIT_MaxBlockSize = 32; + u32 blockAddr = cpu->R[15] - (thumb ? 2 : 4); + if (!(cpu->Num == 0 + ? IsMapped<0>(blockAddr) + : IsMapped<1>(blockAddr))) + { + printf("Trying to compile a block in unmapped memory: %x\n", blockAddr); + } + + u32 pseudoPhysicalAddr = cpu->Num == 0 + ? TranslateAddr<0>(blockAddr) + : TranslateAddr<1>(blockAddr); + FetchedInstr instrs[Config::JIT_MaxBlockSize]; int i = 0; - u32 blockAddr = cpu->R[15] - (thumb ? 2 : 4); u32 r15 = cpu->R[15]; + + u32 addresseRanges[32] = {}; + u32 numAddressRanges = 0; + cpu->FillPipeline(); u32 nextInstr[2] = {cpu->NextInstr[0], cpu->NextInstr[1]}; u32 nextInstrAddr[2] = {blockAddr, r15}; + + JIT_DEBUGPRINT("start block %x (%x) %p %p (region invalidates %dx)\n", + blockAddr, pseudoPhysicalAddr, FastBlockAccess[pseudoPhysicalAddr / 2], + cpu->Num == 0 ? LookUpBlock<0>(blockAddr) : LookUpBlock<1>(blockAddr), + CodeRanges[pseudoPhysicalAddr / 256].TimesInvalidated); + + u32 lastSegmentStart = blockAddr; + do { r15 += thumb ? 2 : 4; + instrs[i].BranchFlags = 0; instrs[i].SetFlags = 0; instrs[i].Instr = nextInstr[0]; instrs[i].NextInstr[0] = nextInstr[0] = nextInstr[1]; @@ -171,6 +357,25 @@ CompiledBlock CompileBlock(ARM* cpu) instrs[i].Addr = nextInstrAddr[0]; nextInstrAddr[0] = nextInstrAddr[1]; nextInstrAddr[1] = r15; + JIT_DEBUGPRINT("instr %08x %x\n", instrs[i].Instr & (thumb ? 0xFFFF : ~0), instrs[i].Addr); + + u32 translatedAddr = (cpu->Num == 0 + ? TranslateAddr<0>(instrs[i].Addr) + : TranslateAddr<1>(instrs[i].Addr)) & ~0xFF; + if (i == 0 || translatedAddr != addresseRanges[numAddressRanges - 1]) + { + bool returning = false; + for (int j = 0; j < numAddressRanges; j++) + { + if (addresseRanges[j] == translatedAddr) + { + returning = true; + break; + } + } + if (!returning) + addresseRanges[numAddressRanges++] = translatedAddr; + } if (cpu->Num == 0) { @@ -198,6 +403,34 @@ CompiledBlock CompileBlock(ARM* cpu) instrs[i].NextInstr[1] = nextInstr[1]; instrs[i].Info = ARMInstrInfo::Decode(thumb, cpu->Num, instrs[i].Instr); + cpu->R[15] = r15; + cpu->CurInstr = instrs[i].Instr; + cpu->CodeCycles = instrs[i].CodeCycles; + + if (thumb) + { + InterpretTHUMB[instrs[i].Info.Kind](cpu); + } + else + { + if (cpu->Num == 0 && instrs[i].Info.Kind == ARMInstrInfo::ak_BLX_IMM) + { + ARMInterpreter::A_BLX_IMM(cpu); + } + else + { + u32 icode = ((instrs[i].Instr >> 4) & 0xF) | ((instrs[i].Instr >> 16) & 0xFF0); + assert(InterpretARM[instrs[i].Info.Kind] == ARMInterpreter::ARMInstrTable[icode] || instrs[i].Info.Kind == ARMInstrInfo::ak_MOV_REG_LSL_IMM); + if (cpu->CheckCondition(instrs[i].Cond())) + InterpretARM[instrs[i].Info.Kind](cpu); + else + cpu->AddCycles_C(); + } + } + + instrs[i].DataCycles = cpu->DataCycles; + instrs[i].DataRegion = cpu->DataRegion; + if (thumb && instrs[i].Info.Kind == ARMInstrInfo::tk_BL_LONG_2 && i > 0 && instrs[i - 1].Info.Kind == ARMInstrInfo::tk_BL_LONG_1) { @@ -208,40 +441,340 @@ CompiledBlock CompileBlock(ARM* cpu) instrs[i - 1].Info.EndBlock = true; i--; } - i++; + if (instrs[i].Info.Branches() && Config::JIT_BrancheOptimisations) + { + bool hasBranched = cpu->R[15] != r15; + + u32 cond, target; + bool staticBranch = DecodeBranch(thumb, instrs[i], cond, target); + JIT_DEBUGPRINT("branch cond %x target %x (%d)\n", cond, target, hasBranched); + + if (staticBranch) + { + bool isBackJump = false; + if (hasBranched) + { + for (int j = 0; j < i; j++) + { + if (instrs[i].Addr == target) + { + isBackJump = true; + break; + } + } + } + + if (cond < 0xE && target < instrs[i].Addr && target >= lastSegmentStart) + { + // we might have an idle loop + u32 offset = (target - blockAddr) / (thumb ? 2 : 4); + if (IsIdleLoop(instrs + offset, i - offset + 1)) + { + instrs[i].BranchFlags |= branch_IdleBranch; + JIT_DEBUGPRINT("found %s idle loop %d in block %x\n", thumb ? "thumb" : "arm", cpu->Num, blockAddr); + } + } + else if (hasBranched && (!thumb || cond == 0xE) && !isBackJump && i + 1 < Config::JIT_MaxBlockSize) + { + u32 targetPseudoPhysical = cpu->Num == 0 + ? TranslateAddr<0>(target) + : TranslateAddr<1>(target); + + r15 = target + (thumb ? 2 : 4); + assert(r15 == cpu->R[15]); + + JIT_DEBUGPRINT("block lengthened by static branch (target %x)\n", target); + + nextInstr[0] = cpu->NextInstr[0]; + nextInstr[1] = cpu->NextInstr[1]; + + nextInstrAddr[0] = target; + nextInstrAddr[1] = r15; + + lastSegmentStart = target; + + instrs[i].Info.EndBlock = false; + + if (cond < 0xE) + instrs[i].BranchFlags |= branch_FollowCondTaken; + } + } + + if (!hasBranched && cond < 0xE && i + 1 < Config::JIT_MaxBlockSize) + { + instrs[i].Info.EndBlock = false; + instrs[i].BranchFlags |= branch_FollowCondNotTaken; + } + } + + i++; bool canCompile = compiler->CanCompile(thumb, instrs[i - 1].Info.Kind); - if (instrs[i - 1].Info.ReadFlags != 0 || !canCompile) - floodFillSetFlags(instrs, i - 2, canCompile ? instrs[i - 1].Info.ReadFlags : 0xF); - } while(!instrs[i - 1].Info.EndBlock && i < Config::JIT_MaxBlockSize); + bool secondaryFlagReadCond = !canCompile || (instrs[i - 1].BranchFlags & (branch_FollowCondTaken | branch_FollowCondNotTaken)); + if (instrs[i - 1].Info.ReadFlags != 0 || secondaryFlagReadCond) + FloodFillSetFlags(instrs, i - 2, !secondaryFlagReadCond ? instrs[i - 1].Info.ReadFlags : 0xF); + } while(!instrs[i - 1].Info.EndBlock && i < Config::JIT_MaxBlockSize && !cpu->Halted); - floodFillSetFlags(instrs, i - 1, 0xF); + u32 restoreSlot = HashRestoreCandidate(pseudoPhysicalAddr); + JitBlock* prevBlock = RestoreCandidates[restoreSlot]; + bool mayRestore = true; + if (prevBlock && prevBlock->PseudoPhysicalAddr == pseudoPhysicalAddr) + { + RestoreCandidates[restoreSlot] = NULL; + if (prevBlock->NumInstrs == i) + { + for (int j = 0; j < i; j++) + { + if (prevBlock->Instrs()[j] != instrs[j].Instr) + { + mayRestore = false; + break; + } + } + } + else + mayRestore = false; - CompiledBlock block = compiler->CompileBlock(cpu, instrs, i); + if (prevBlock->NumAddresses == numAddressRanges) + { + for (int j = 0; j < numAddressRanges; j++) + { + if (prevBlock->AddressRanges()[j] != addresseRanges[j]) + { + mayRestore = false; + break; + } + } + } + else + mayRestore = false; + } + else + { + mayRestore = false; + prevBlock = NULL; + } - if (cpu->Num == 0) - InsertBlock<0>(blockAddr, block); + JitBlock* block; + if (!mayRestore) + { + if (prevBlock) + delete prevBlock; + + block = new JitBlock(i, numAddressRanges); + for (int j = 0; j < i; j++) + block->Instrs()[j] = instrs[j].Instr; + for (int j = 0; j < numAddressRanges; j++) + block->AddressRanges()[j] = addresseRanges[j]; + + block->StartAddr = blockAddr; + block->PseudoPhysicalAddr = pseudoPhysicalAddr; + + FloodFillSetFlags(instrs, i - 1, 0xF); + + block->EntryPoint = compiler->CompileBlock(cpu, thumb, instrs, i); + } else - InsertBlock<1>(blockAddr, block); + { + JIT_DEBUGPRINT("restored! %p\n", prevBlock); + block = prevBlock; + } + + for (int j = 0; j < numAddressRanges; j++) + { + assert(addresseRanges[j] == block->AddressRanges()[j]); + CodeRanges[addresseRanges[j] / 256].Blocks.Add(block); + } + + FastBlockAccess[block->PseudoPhysicalAddr / 2] = block->EntryPoint; - return block; + JitBlocks.Add(block); } -void InvalidateBlockCache() +void InvalidateByAddr(u32 pseudoPhysical) { - printf("Resetting JIT block cache...\n"); + JIT_DEBUGPRINT("invalidating by addr %x\n", pseudoPhysical); + AddressRange* range = &CodeRanges[pseudoPhysical / 256]; + int startLength = range->Blocks.Length; + for (int i = 0; i < range->Blocks.Length; i++) + { + assert(range->Blocks.Length == startLength); + JitBlock* block = range->Blocks[i]; + for (int j = 0; j < block->NumAddresses; j++) + { + u32 addr = block->AddressRanges()[j]; + if ((addr / 256) != (pseudoPhysical / 256)) + { + AddressRange* otherRange = &CodeRanges[addr / 256]; + assert(otherRange != range); + assert(otherRange->Blocks.RemoveByValue(block)); + } + } + + assert(JitBlocks.RemoveByValue(block)); + + FastBlockAccess[block->PseudoPhysicalAddr / 2] = NULL; - memset(cache.MainRAM, 0, sizeof(cache.MainRAM)); - memset(cache.SWRAM, 0, sizeof(cache.SWRAM)); - memset(cache.ARM9_BIOS, 0, sizeof(cache.ARM9_BIOS)); - memset(cache.ARM9_ITCM, 0, sizeof(cache.ARM9_ITCM)); - memset(cache.ARM9_LCDC, 0, sizeof(cache.ARM9_LCDC)); - memset(cache.ARM7_BIOS, 0, sizeof(cache.ARM7_BIOS)); - memset(cache.ARM7_WRAM, 0, sizeof(cache.ARM7_WRAM)); - memset(cache.ARM7_WVRAM, 0, sizeof(cache.ARM7_WVRAM)); + u32 slot = HashRestoreCandidate(block->PseudoPhysicalAddr); + if (RestoreCandidates[slot] && RestoreCandidates[slot] != block) + delete RestoreCandidates[slot]; + + RestoreCandidates[slot] = block; + } + if ((range->TimesInvalidated + 1) > range->TimesInvalidated) + range->TimesInvalidated++; + + range->Blocks.Clear(); +} + +void InvalidateByAddr7(u32 addr) +{ + u32 pseudoPhysical = TranslateAddr<1>(addr); + if (__builtin_expect(CodeRanges[pseudoPhysical / 256].Blocks.Length > 0, false)) + InvalidateByAddr(pseudoPhysical); +} + +void InvalidateITCM(u32 addr) +{ + u32 pseudoPhysical = addr + ExeMemRegionOffsets[exeMem_ITCM]; + if (CodeRanges[pseudoPhysical / 256].Blocks.Length > 0) + InvalidateByAddr(pseudoPhysical); +} + +void InvalidateAll() +{ + JIT_DEBUGPRINT("invalidating all %x\n", JitBlocks.Length); + for (int i = 0; i < JitBlocks.Length; i++) + { + JitBlock* block = JitBlocks[i]; + + FastBlockAccess[block->PseudoPhysicalAddr / 2] = NULL; + + for (int j = 0; j < block->NumAddresses; j++) + { + u32 addr = block->AddressRanges()[j]; + AddressRange* range = &CodeRanges[addr / 256]; + range->Blocks.Clear(); + if (range->TimesInvalidated + 1 > range->TimesInvalidated) + range->TimesInvalidated++; + } + + u32 slot = HashRestoreCandidate(block->PseudoPhysicalAddr); + if (RestoreCandidates[slot] && RestoreCandidates[slot] != block) + delete RestoreCandidates[slot]; + + RestoreCandidates[slot] = block; + } + + JitBlocks.Clear(); +} + +void ResetBlockCache() +{ + printf("Resetting JIT block cache...\n"); + + memset(FastBlockAccess, 0, sizeof(FastBlockAccess)); + for (int i = 0; i < sizeof(RestoreCandidates)/sizeof(RestoreCandidates[0]); i++) + { + if (RestoreCandidates[i]) + { + delete RestoreCandidates[i]; + RestoreCandidates[i] = NULL; + } + } + for (int i = 0; i < JitBlocks.Length; i++) + { + JitBlock* block = JitBlocks[i]; + for (int j = 0; j < block->NumAddresses; j++) + { + u32 addr = block->AddressRanges()[j]; + CodeRanges[addr / 256].Blocks.Clear(); + CodeRanges[addr / 256].TimesInvalidated = 0; + } + delete block; + } + JitBlocks.Clear(); compiler->Reset(); } +void* GetFuncForAddr(ARM* cpu, u32 addr, bool store, int size) +{ + if (cpu->Num == 0) + { + if ((addr & 0xFF000000) == 0x04000000) + { + /* + unfortunately we can't map GPU2D this way + since it's hidden inside an object + + though GPU3D registers are accessed much more intensive + */ + if (addr >= 0x04000320 && addr < 0x040006A4) + { + switch (size | store) + { + case 8: return (void*)GPU3D::Read8; + case 9: return (void*)GPU3D::Write8; + case 16: return (void*)GPU3D::Read16; + case 17: return (void*)GPU3D::Write16; + case 32: return (void*)GPU3D::Read32; + case 33: return (void*)GPU3D::Write32; + } + } + + switch (size | store) + { + case 8: return (void*)NDS::ARM9IORead8; + case 9: return (void*)NDS::ARM9IOWrite8; + case 16: return (void*)NDS::ARM9IORead16; + case 17: return (void*)NDS::ARM9IOWrite16; + case 32: return (void*)NDS::ARM9IORead32; + case 33: return (void*)NDS::ARM9IOWrite32; + } + } + } + else + { + switch (addr & 0xFF800000) + { + case 0x04000000: + if (addr >= 0x04000400 && addr < 0x04000520) + { + switch (size | store) + { + case 8: return (void*)SPU::Read8; + case 9: return (void*)SPU::Write8; + case 16: return (void*)SPU::Read16; + case 17: return (void*)SPU::Write16; + case 32: return (void*)SPU::Read32; + case 33: return (void*)SPU::Write32; + } + } + + switch (size | store) + { + case 8: return (void*)NDS::ARM7IORead8; + case 9: return (void*)NDS::ARM7IOWrite8; + case 16: return (void*)NDS::ARM7IORead16; + case 17: return (void*)NDS::ARM7IOWrite16; + case 32: return (void*)NDS::ARM7IORead32; + case 33: return (void*)NDS::ARM7IOWrite32; + } + break; + case 0x04800000: + if (addr < 0x04810000 && size == 16) + { + if (store) + return (void*)Wifi::Write; + else + return (void*)Wifi::Read; + } + break; + } + } + return NULL; +} + } \ No newline at end of file diff --git a/src/ARMJIT.h b/src/ARMJIT.h index 7e448ef..1db4d66 100644 --- a/src/ARMJIT.h +++ b/src/ARMJIT.h @@ -9,142 +9,67 @@ namespace ARMJIT { -typedef u32 (*CompiledBlock)(); - -struct FetchedInstr +enum ExeMemKind { - u32 A_Reg(int pos) const - { - return (Instr >> pos) & 0xF; - } - - u32 T_Reg(int pos) const - { - return (Instr >> pos) & 0x7; - } - - u32 Cond() const - { - return Instr >> 28; - } - - u8 SetFlags; - u32 Instr; - u32 NextInstr[2]; - u32 Addr; - - u8 CodeCycles; - - ARMInstrInfo::Info Info; + exeMem_Unmapped = 0, + exeMem_ITCM, + exeMem_MainRAM, + exeMem_SWRAM, + exeMem_LCDC, + exeMem_ARM9_BIOS, + exeMem_ARM7_BIOS, + exeMem_ARM7_WRAM, + exeMem_ARM7_WVRAM, + exeMem_Count }; -/* - Copied from DeSmuME - Some names where changed to match the nomenclature of melonDS +extern const u32 ExeMemRegionOffsets[]; +extern const u32 ExeMemRegionSizes[]; - Since it's nowhere explained and atleast I needed some time to get behind it, - here's a summary on how it works: - more or less all memory locations from which code can be executed are - represented by an array of function pointers, which point to null or - a function which executes a block instructions starting from there. +typedef u32 (*JitBlockEntry)(); - The most significant 4 bits of each address is ignored. This 28 bit space is - divided into 0x2000 32 KB for ARM9 and 0x4000 16 KB for ARM7, each of which - a pointer to the relevant place inside the afore mentioned arrays. 32 and 16 KB - are the sizes of the smallest contigous memory region mapped to the respective CPU. - Because ARM addresses are always aligned to 4 bytes and Thumb to a 2 byte boundary, - we only need every second half word to be adressable. +extern u32 AddrTranslate9[0x2000]; +extern u32 AddrTranslate7[0x4000]; - In case a memory write hits mapped memory, the function block at this - address is set to null, so it's recompiled the next time it's executed. - - This method has disadvantages, namely that only writing to the - first instruction of a block marks it as invalid and that memory remapping - (SWRAM and VRAM) isn't taken into account. -*/ - -struct BlockCache -{ - CompiledBlock* AddrMapping9[0x2000] = {0}; - CompiledBlock* AddrMapping7[0x4000] = {0}; - - CompiledBlock MainRAM[4*1024*1024/2]; - CompiledBlock SWRAM[0x8000/2]; // Shared working RAM - CompiledBlock ARM9_ITCM[0x8000/2]; - CompiledBlock ARM9_LCDC[0xA4000/2]; - CompiledBlock ARM9_BIOS[0x8000/2]; - CompiledBlock ARM7_BIOS[0x4000/2]; - CompiledBlock ARM7_WRAM[0x10000/2]; // dedicated ARM7 WRAM - CompiledBlock ARM7_WVRAM[0x40000/2]; // VRAM allocated as Working RAM -}; - -extern BlockCache cache; +const u32 ExeMemSpaceSize = 0x518000; // I hate you C++, sometimes I really hate you... +extern JitBlockEntry FastBlockAccess[ExeMemSpaceSize / 2]; template inline bool IsMapped(u32 addr) { if (num == 0) - return cache.AddrMapping9[(addr & 0xFFFFFFF) >> 15]; + return AddrTranslate9[(addr & 0xFFFFFFF) >> 15] >= ExeMemRegionSizes[exeMem_Unmapped]; else - return cache.AddrMapping7[(addr & 0xFFFFFFF) >> 14]; + return AddrTranslate7[(addr & 0xFFFFFFF) >> 14] >= ExeMemRegionSizes[exeMem_Unmapped]; } template -inline CompiledBlock LookUpBlock(u32 addr) +inline u32 TranslateAddr(u32 addr) { if (num == 0) - return cache.AddrMapping9[(addr & 0xFFFFFFF) >> 15][(addr & 0x7FFF) >> 1]; + return AddrTranslate9[(addr & 0xFFFFFFF) >> 15] + (addr & 0x7FFF); else - return cache.AddrMapping7[(addr & 0xFFFFFFF) >> 14][(addr & 0x3FFF) >> 1]; + return AddrTranslate7[(addr & 0xFFFFFFF) >> 14] + (addr & 0x3FFF); } template -inline void Invalidate16(u32 addr) +inline JitBlockEntry LookUpBlock(u32 addr) { - if (IsMapped(addr)) - { - if (num == 0) - cache.AddrMapping9[(addr & 0xFFFFFFF) >> 15][(addr & 0x7FFF) >> 1] = NULL; - else - cache.AddrMapping7[(addr & 0xFFFFFFF) >> 14][(addr & 0x3FFF) >> 1] = NULL; - } -} - -template -inline void Invalidate32(u32 addr) -{ - if (IsMapped(addr)) - { - if (num == 0) - { - CompiledBlock* page = cache.AddrMapping9[(addr & 0xFFFFFFF) >> 15]; - page[(addr & 0x7FFF) >> 1] = NULL; - page[((addr + 2) & 0x7FFF) >> 1] = NULL; - } - else - { - CompiledBlock* page = cache.AddrMapping7[(addr & 0xFFFFFFF) >> 14]; - page[(addr & 0x3FFF) >> 1] = NULL; - page[((addr + 2) & 0x3FFF) >> 1] = NULL; - } - } -} - -template -inline void InsertBlock(u32 addr, CompiledBlock func) -{ - if (num == 0) - cache.AddrMapping9[(addr & 0xFFFFFFF) >> 15][(addr & 0x7FFF) >> 1] = func; - else - cache.AddrMapping7[(addr & 0xFFFFFFF) >> 14][(addr & 0x3FFF) >> 1] = func; + return FastBlockAccess[TranslateAddr(addr) / 2]; } void Init(); void DeInit(); -CompiledBlock CompileBlock(ARM* cpu); +void InvalidateByAddr(u32 pseudoPhysical); +void InvalidateAll(); + +void InvalidateITCM(u32 addr); +void InvalidateByAddr7(u32 addr); + +void CompileBlock(ARM* cpu); -void InvalidateBlockCache(); +void ResetBlockCache(); } diff --git a/src/ARMJIT_Internal.h b/src/ARMJIT_Internal.h new file mode 100644 index 0000000..4acb488 --- /dev/null +++ b/src/ARMJIT_Internal.h @@ -0,0 +1,198 @@ +#ifndef ARMJIT_INTERNAL_H +#define ARMJIT_INTERNAL_H + +#include "types.h" +#include + +#include "ARMJIT.h" + +// here lands everything which doesn't fit into ARMJIT.h +// where it would be included by pretty much everything +namespace ARMJIT +{ + +enum +{ + branch_IdleBranch = 1 << 0, + branch_FollowCondTaken = 1 << 1, + branch_FollowCondNotTaken = 1 << 2 +}; + +struct FetchedInstr +{ + u32 A_Reg(int pos) const + { + return (Instr >> pos) & 0xF; + } + + u32 T_Reg(int pos) const + { + return (Instr >> pos) & 0x7; + } + + u32 Cond() const + { + return Instr >> 28; + } + + u8 BranchFlags; + u8 SetFlags; + u32 Instr; + u32 NextInstr[2]; + u32 Addr; + + u8 CodeCycles; + u8 DataCycles; + u8 DataRegion; + + ARMInstrInfo::Info Info; +}; + +/* + TinyVector + - because reinventing the wheel is the best! + + - meant to be used very often, with not so many elements + max 1 << 16 elements + - doesn't allocate while no elements are inserted + - not stl confirmant of course + - probably only works with POD types + - remove operations don't preserve order, but O(1)! +*/ +template +struct __attribute__((packed)) TinyVector +{ + T* Data = NULL; + u16 Capacity = 0; + u32 Length = 0; // make it 32 bit so we don't need movzx + + ~TinyVector() + { + delete[] Data; + } + + void MakeCapacity(u32 capacity) + { + assert(capacity <= UINT16_MAX); + assert(capacity > Capacity); + T* newMem = new T[capacity]; + if (Data != NULL) + memcpy(newMem, Data, sizeof(Data) * Length); + + T* oldData = Data; + Data = newMem; + if (oldData != NULL) + delete[] oldData; + + Capacity = capacity; + } + + void Clear() + { + Length = 0; + } + + void Add(T element) + { + assert(Length + 1 <= UINT16_MAX); + if (Length + 1 > Capacity) + MakeCapacity(((Capacity + 4) * 3) / 2); + + Data[Length++] = element; + } + + void Remove(int index) + { + assert(index >= 0 && index < Length); + + Length--; + Data[index] = Data[Length]; + /*for (int i = index; i < Length; i++) + Data[i] = Data[i + 1];*/ + } + + int Find(T needle) + { + for (int i = 0; i < Length; i++) + { + if (Data[i] == needle) + return i; + } + return -1; + } + + bool RemoveByValue(T needle) + { + for (int i = 0; i < Length; i++) + { + if (Data[i] == needle) + { + Remove(i); + return true; + } + } + return false; + } + + T& operator[](int index) + { + assert(index >= 0 && index < Length); + return Data[index]; + } +}; + +class JitBlock +{ +public: + JitBlock(u32 numInstrs, u32 numAddresses) + { + NumInstrs = numInstrs; + NumAddresses = numAddresses; + Data = new u32[numInstrs + numAddresses]; + } + + ~JitBlock() + { + delete[] Data; + } + + u32 StartAddr; + u32 PseudoPhysicalAddr; + + u32 NumInstrs; + u32 NumAddresses; + + JitBlockEntry EntryPoint; + + u32* Instrs() + { return Data; } + u32* AddressRanges() + { return Data + NumInstrs; } + +private: + /* + 0.. Blocks; + u16 TimesInvalidated; +}; + +extern AddressRange CodeRanges[ExeMemSpaceSize / 256]; + +typedef void (*InterpreterFunc)(ARM* cpu); +extern InterpreterFunc InterpretARM[]; +extern InterpreterFunc InterpretTHUMB[]; + +void* GetFuncForAddr(ARM* cpu, u32 addr, bool store, int size); + +} + +#endif \ No newline at end of file diff --git a/src/ARMJIT_RegisterCache.h b/src/ARMJIT_RegisterCache.h index fe2f203..ed6a2b7 100644 --- a/src/ARMJIT_RegisterCache.h +++ b/src/ARMJIT_RegisterCache.h @@ -60,15 +60,46 @@ public: assert("Welp!"); } + void PutLiteral(int reg, u32 val) + { + LiteralsLoaded |= (1 << reg); + LiteralValues[reg] = val; + } + + void UnloadLiteral(int reg) + { + LiteralsLoaded &= ~(1 << reg); + } + + bool IsLiteral(int reg) + { + return LiteralsLoaded & (1 << reg); + } + + void PrepareExit() + { + BitSet16 dirtyRegs(DirtyRegs); + for (int reg : dirtyRegs) + Compiler->SaveReg(reg, Mapping[reg]); + } + void Flush() { BitSet16 loadedSet(LoadedRegs); for (int reg : loadedSet) UnloadRegister(reg); + LiteralsLoaded = 0; } void Prepare(bool thumb, int i) { + if (LoadedRegs & (1 << 15)) + UnloadRegister(15); + + BitSet16 invalidedLiterals(LiteralsLoaded & Instrs[i].Info.DstRegs); + for (int reg : invalidedLiterals) + UnloadLiteral(reg); + u16 futureNeeded = 0; int ranking[16]; for (int j = 0; j < 16; j++) @@ -86,7 +117,7 @@ public: for (int reg : neverNeededAgain) UnloadRegister(reg); - FetchedInstr Instr = Instrs[i]; + FetchedInstr Instr = Instrs[i]; u16 necessaryRegs = (Instr.Info.SrcRegs & ~(1 << 15)) | Instr.Info.DstRegs; BitSet16 needToBeLoaded(necessaryRegs & ~LoadedRegs); if (needToBeLoaded != BitSet16(0)) @@ -125,6 +156,9 @@ public: static const int NativeRegsAvailable; Reg Mapping[16]; + u32 LiteralValues[16]; + + u16 LiteralsLoaded = 0; u32 NativeRegsUsed = 0; u16 LoadedRegs = 0; u16 DirtyRegs = 0; diff --git a/src/ARMJIT_x64/ARMJIT_ALU.cpp b/src/ARMJIT_x64/ARMJIT_ALU.cpp index f868ddf..14c223b 100644 --- a/src/ARMJIT_x64/ARMJIT_ALU.cpp +++ b/src/ARMJIT_x64/ARMJIT_ALU.cpp @@ -213,7 +213,13 @@ void Compiler::A_Comp_MovOp() MOV(32, rd, op2); if (((CurInstr.Instr >> 21) & 0xF) == 0xF) + { NOT(32, rd); + if (op2.IsImm() && CurInstr.Cond() == 0xE) + RegCache.PutLiteral(CurInstr.A_Reg(12), ~op2.Imm32()); + } + else if (op2.IsImm() && CurInstr.Cond() == 0xE) + RegCache.PutLiteral(CurInstr.A_Reg(12), op2.Imm32()); if (S) { @@ -564,7 +570,13 @@ void Compiler::T_Comp_AddSub_() Comp_AddCycles_C(); - if (op & 1) + // special case for thumb mov being alias to add rd, rn, #0 + if (CurInstr.SetFlags == 0 && rn.IsImm() && rn.Imm32() == 0) + { + if (rd != rs) + MOV(32, rd, rs); + } + else if (op & 1) Comp_ArithTriOp(&Compiler::SUB, rd, rs, rn, false, opSetsFlags|opInvertCarry|opRetriveCV); else Comp_ArithTriOp(&Compiler::ADD, rd, rs, rn, false, opSetsFlags|opSymmetric|opRetriveCV); @@ -614,7 +626,7 @@ void Compiler::T_Comp_ALU() u32 op = (CurInstr.Instr >> 6) & 0xF; if ((op >= 0x2 && op < 0x4) || op == 0x7) - Comp_AddCycles_CI(1); + Comp_AddCycles_CI(1); // shift by reg else Comp_AddCycles_C(); diff --git a/src/ARMJIT_x64/ARMJIT_Branch.cpp b/src/ARMJIT_x64/ARMJIT_Branch.cpp index cc7a3c4..0dedb3f 100644 --- a/src/ARMJIT_x64/ARMJIT_Branch.cpp +++ b/src/ARMJIT_x64/ARMJIT_Branch.cpp @@ -16,9 +16,6 @@ int squeezePointer(T* ptr) void Compiler::Comp_JumpTo(u32 addr, bool forceNonConstantCycles) { // we can simplify constant branches by a lot - // it's not completely safe to assume stuff like, which instructions to preload - // we'll see how it works out - IrregularCycles = true; u32 newPC; @@ -39,18 +36,12 @@ void Compiler::Comp_JumpTo(u32 addr, bool forceNonConstantCycles) { ARMv5* cpu9 = (ARMv5*)CurCPU; - u32 oldregion = R15 >> 24; - u32 newregion = addr >> 24; - u32 regionCodeCycles = cpu9->MemTimings[addr >> 12][0]; u32 compileTimeCodeCycles = cpu9->RegionCodeCycles; cpu9->RegionCodeCycles = regionCodeCycles; - MOV(32, MDisp(RCPU, offsetof(ARMv5, RegionCodeCycles)), Imm32(regionCodeCycles)); - - bool setupRegion = newregion != oldregion; - if (setupRegion) - cpu9->SetupCodeMem(addr); + if (Exit) + MOV(32, MDisp(RCPU, offsetof(ARMv5, RegionCodeCycles)), Imm32(regionCodeCycles)); if (addr & 0x1) { @@ -83,12 +74,7 @@ void Compiler::Comp_JumpTo(u32 addr, bool forceNonConstantCycles) cycles += cpu9->CodeCycles; } - MOV(64, MDisp(RCPU, offsetof(ARM, CodeMem.Mem)), Imm32(squeezePointer(cpu9->CodeMem.Mem))); - MOV(32, MDisp(RCPU, offsetof(ARM, CodeMem.Mask)), Imm32(cpu9->CodeMem.Mask)); - cpu9->RegionCodeCycles = compileTimeCodeCycles; - if (setupRegion) - cpu9->SetupCodeMem(R15); } else { @@ -100,8 +86,11 @@ void Compiler::Comp_JumpTo(u32 addr, bool forceNonConstantCycles) cpu7->CodeRegion = codeRegion; cpu7->CodeCycles = codeCycles; - MOV(32, MDisp(RCPU, offsetof(ARM, CodeRegion)), Imm32(codeRegion)); - MOV(32, MDisp(RCPU, offsetof(ARM, CodeCycles)), Imm32(codeCycles)); + if (Exit) + { + MOV(32, MDisp(RCPU, offsetof(ARM, CodeRegion)), Imm32(codeRegion)); + MOV(32, MDisp(RCPU, offsetof(ARM, CodeCycles)), Imm32(codeCycles)); + } if (addr & 0x1) { @@ -133,7 +122,8 @@ void Compiler::Comp_JumpTo(u32 addr, bool forceNonConstantCycles) cpu7->CodeCycles = addr >> 15; } - MOV(32, MDisp(RCPU, offsetof(ARM, R[15])), Imm32(newPC)); + if (Exit) + MOV(32, MDisp(RCPU, offsetof(ARM, R[15])), Imm32(newPC)); if ((Thumb || CurInstr.Cond() >= 0xE) && !forceNonConstantCycles) ConstantCycles += cycles; else @@ -219,10 +209,23 @@ void Compiler::T_Comp_BCOND() s32 offset = (s32)(CurInstr.Instr << 24) >> 23; Comp_JumpTo(R15 + offset + 1, true); + Comp_SpecialBranchBehaviour(); + FixupBranch skipFailed = J(); SetJumpTarget(skipExecute); + + if (CurInstr.BranchFlags & branch_FollowCondTaken) + { + RegCache.PrepareExit(); + SaveCPSR(false); + + MOV(32, R(RAX), Imm32(ConstantCycles)); + ABI_PopRegistersAndAdjustStack(BitSet32(ABI_ALL_CALLEE_SAVED & ABI_ALL_GPRS & ~BitSet32({RSP})), 8); + RET(); + } + Comp_AddCycles_C(true); - SetJumpTarget(skipFailed); + SetJumpTarget(skipFailed); } void Compiler::T_Comp_B() diff --git a/src/ARMJIT_x64/ARMJIT_Compiler.cpp b/src/ARMJIT_x64/ARMJIT_Compiler.cpp index d8ce1aa..25c55a3 100644 --- a/src/ARMJIT_x64/ARMJIT_Compiler.cpp +++ b/src/ARMJIT_x64/ARMJIT_Compiler.cpp @@ -72,12 +72,15 @@ Compiler::Compiler() for (int i = 0; i < 3; i++) { for (int j = 0; j < 2; j++) - { MemoryFuncs9[i][j] = Gen_MemoryRoutine9(j, 8 << i); - MemoryFuncs7[i][j][0] = Gen_MemoryRoutine7(j, false, 8 << i); - MemoryFuncs7[i][j][1] = Gen_MemoryRoutine7(j, true, 8 << i); - } } + MemoryFuncs7[0][0] = (void*)NDS::ARM7Read8; + MemoryFuncs7[0][1] = (void*)NDS::ARM7Write8; + MemoryFuncs7[1][0] = (void*)NDS::ARM7Read16; + MemoryFuncs7[1][1] = (void*)NDS::ARM7Write16; + MemoryFuncs7[2][0] = (void*)NDS::ARM7Read32; + MemoryFuncs7[2][1] = (void*)NDS::ARM7Write32; + for (int i = 0; i < 2; i++) for (int j = 0; j < 2; j++) { @@ -179,12 +182,13 @@ void Compiler::LoadCPSR() MOV(32, R(RCPSR), MDisp(RCPU, offsetof(ARM, CPSR))); } -void Compiler::SaveCPSR() +void Compiler::SaveCPSR(bool flagClean) { if (CPSRDirty) { MOV(32, MDisp(RCPU, offsetof(ARM, CPSR)), R(RCPSR)); - CPSRDirty = false; + if (flagClean) + CPSRDirty = false; } } @@ -204,6 +208,9 @@ void Compiler::SaveReg(int reg, X64Reg nativeReg) // invalidates RSCRATCH and RSCRATCH3 Gen::FixupBranch Compiler::CheckCondition(u32 cond) { + // hack, ldm/stm can get really big TODO: make this better + bool ldmStm = !Thumb && + (CurInstr.Info.Kind == ARMInstrInfo::ak_LDM || CurInstr.Info.Kind == ARMInstrInfo::ak_STM); if (cond >= 0x8) { static_assert(RSCRATCH3 == ECX, "RSCRATCH has to be equal to ECX!"); @@ -213,14 +220,14 @@ Gen::FixupBranch Compiler::CheckCondition(u32 cond) SHL(32, R(RSCRATCH), R(RSCRATCH3)); TEST(32, R(RSCRATCH), Imm32(ARM::ConditionTable[cond])); - return J_CC(CC_Z); + return J_CC(CC_Z, ldmStm); } else { // could have used a LUT, but then where would be the fun? TEST(32, R(RCPSR), Imm32(1 << (28 + ((~(cond >> 1) & 1) << 1 | (cond >> 2 & 1) ^ (cond >> 1 & 1))))); - return J_CC(cond & 1 ? CC_NZ : CC_Z); + return J_CC(cond & 1 ? CC_NZ : CC_Z, ldmStm); } } @@ -354,25 +361,34 @@ void Compiler::Reset() SetCodePtr(ResetStart); } -CompiledBlock Compiler::CompileBlock(ARM* cpu, FetchedInstr instrs[], int instrsCount) +void Compiler::Comp_SpecialBranchBehaviour() +{ + if (CurInstr.BranchFlags & branch_IdleBranch) + OR(32, MDisp(RCPU, offsetof(ARM, Halted)), Imm8(0x20)); + + if (CurInstr.BranchFlags & branch_FollowCondNotTaken) + { + RegCache.PrepareExit(); + SaveCPSR(false); + + MOV(32, R(RAX), Imm32(ConstantCycles)); + ABI_PopRegistersAndAdjustStack(BitSet32(ABI_ALL_CALLEE_SAVED & ABI_ALL_GPRS & ~BitSet32({RSP})), 8); + RET(); + } +} + +JitBlockEntry Compiler::CompileBlock(ARM* cpu, bool thumb, FetchedInstr instrs[], int instrsCount) { if (CodeMemSize - (GetWritableCodePtr() - ResetStart) < 1024 * 32) // guess... - InvalidateBlockCache(); + ResetBlockCache(); ConstantCycles = 0; - Thumb = cpu->CPSR & 0x20; + Thumb = thumb; Num = cpu->Num; - CodeRegion = cpu->CodeRegion; + CodeRegion = instrs[0].Addr >> 24; CurCPU = cpu; - CompiledBlock res = (CompiledBlock)GetWritableCodePtr(); - - if (!(Num == 0 - ? IsMapped<0>(instrs[0].Addr - (Thumb ? 2 : 4)) - : IsMapped<1>(instrs[0].Addr - (Thumb ? 2 : 4)))) - { - printf("Trying to compile a block in unmapped memory\n"); - } + JitBlockEntry res = (JitBlockEntry)GetWritableCodePtr(); ABI_PushRegistersAndAdjustStack(BitSet32(ABI_ALL_CALLEE_SAVED & ABI_ALL_GPRS & ~BitSet32({RSP})), 8); @@ -380,7 +396,6 @@ CompiledBlock Compiler::CompileBlock(ARM* cpu, FetchedInstr instrs[], int instrs LoadCPSR(); - // TODO: this is ugly as a whole, do better RegCache = RegisterCache(this, instrs, instrsCount); for (int i = 0; i < instrsCount; i++) @@ -388,21 +403,25 @@ CompiledBlock Compiler::CompileBlock(ARM* cpu, FetchedInstr instrs[], int instrs CurInstr = instrs[i]; R15 = CurInstr.Addr + (Thumb ? 4 : 8); + Exit = i == instrsCount - 1 || (CurInstr.BranchFlags & branch_FollowCondNotTaken); + CompileFunc comp = Thumb ? T_Comp[CurInstr.Info.Kind] : A_Comp[CurInstr.Info.Kind]; bool isConditional = Thumb ? CurInstr.Info.Kind == ARMInstrInfo::tk_BCOND : CurInstr.Cond() < 0xE; - if (comp == NULL || (i == instrsCount - 1 && (!CurInstr.Info.Branches() || isConditional))) + if (comp == NULL || (CurInstr.BranchFlags & branch_FollowCondTaken) || (i == instrsCount - 1 && (!CurInstr.Info.Branches() || isConditional))) { MOV(32, MDisp(RCPU, offsetof(ARM, R[15])), Imm32(R15)); - MOV(32, MDisp(RCPU, offsetof(ARM, CodeCycles)), Imm32(CurInstr.CodeCycles)); - MOV(32, MDisp(RCPU, offsetof(ARM, CurInstr)), Imm32(CurInstr.Instr)); - if (comp == NULL) + { + MOV(32, MDisp(RCPU, offsetof(ARM, CodeCycles)), Imm32(CurInstr.CodeCycles)); + MOV(32, MDisp(RCPU, offsetof(ARM, CurInstr)), Imm32(CurInstr.Instr)); + SaveCPSR(); + } } - + if (comp != NULL) RegCache.Prepare(Thumb, i); else @@ -410,12 +429,11 @@ CompiledBlock Compiler::CompileBlock(ARM* cpu, FetchedInstr instrs[], int instrs if (Thumb) { - u32 icode = (CurInstr.Instr >> 6) & 0x3FF; if (comp == NULL) { MOV(64, R(ABI_PARAM1), R(RCPU)); - ABI_CallFunction(ARMInterpreter::THUMBInstrTable[icode]); + ABI_CallFunction(InterpretTHUMB[CurInstr.Info.Kind]); } else (this->*comp)(); @@ -434,7 +452,9 @@ CompiledBlock Compiler::CompileBlock(ARM* cpu, FetchedInstr instrs[], int instrs } } else if (cond == 0xF) + { Comp_AddCycles_C(); + } else { IrregularCycles = false; @@ -443,25 +463,36 @@ CompiledBlock Compiler::CompileBlock(ARM* cpu, FetchedInstr instrs[], int instrs if (cond < 0xE) skipExecute = CheckCondition(cond); - u32 icode = ((CurInstr.Instr >> 4) & 0xF) | ((CurInstr.Instr >> 16) & 0xFF0); if (comp == NULL) { MOV(64, R(ABI_PARAM1), R(RCPU)); - ABI_CallFunction(ARMInterpreter::ARMInstrTable[icode]); + ABI_CallFunction(InterpretARM[CurInstr.Info.Kind]); } else (this->*comp)(); + Comp_SpecialBranchBehaviour(); + if (CurInstr.Cond() < 0xE) { - if (IrregularCycles) + if (IrregularCycles || (CurInstr.BranchFlags & branch_FollowCondTaken)) { FixupBranch skipFailed = J(); SetJumpTarget(skipExecute); Comp_AddCycles_C(true); + if (CurInstr.BranchFlags & branch_FollowCondTaken) + { + RegCache.PrepareExit(); + SaveCPSR(false); + + MOV(32, R(RAX), Imm32(ConstantCycles)); + ABI_PopRegistersAndAdjustStack(BitSet32(ABI_ALL_CALLEE_SAVED & ABI_ALL_GPRS & ~BitSet32({RSP})), 8); + RET(); + } + SetJumpTarget(skipFailed); } else @@ -483,6 +514,12 @@ CompiledBlock Compiler::CompileBlock(ARM* cpu, FetchedInstr instrs[], int instrs ABI_PopRegistersAndAdjustStack(BitSet32(ABI_ALL_CALLEE_SAVED & ABI_ALL_GPRS & ~BitSet32({RSP})), 8); RET(); + /*FILE* codeout = fopen("codeout", "a"); + fprintf(codeout, "beginning block argargarg__ %x!!!", instrs[0].Addr); + fwrite((u8*)res, GetWritableCodePtr() - (u8*)res, 1, codeout); + + fclose(codeout);*/ + return res; } @@ -528,4 +565,89 @@ void Compiler::Comp_AddCycles_CI(Gen::X64Reg i, int add) } } +void Compiler::Comp_AddCycles_CDI() +{ + if (Num == 0) + Comp_AddCycles_CD(); + else + { + IrregularCycles = true; + + s32 cycles; + + s32 numC = NDS::ARM7MemTimings[CurInstr.CodeCycles][Thumb ? 0 : 2]; + s32 numD = CurInstr.DataCycles; + + if (CurInstr.DataRegion == 0x02) // mainRAM + { + if (CodeRegion == 0x02) + cycles = numC + numD; + else + { + numC++; + cycles = std::max(numC + numD - 3, std::max(numC, numD)); + } + } + else if (CodeRegion == 0x02) + { + numD++; + cycles = std::max(numC + numD - 3, std::max(numC, numD)); + } + else + { + cycles = numC + numD + 1; + } + + printf("%x: %d %d cycles cdi (%d)\n", CurInstr.Instr, Num, CurInstr.DataCycles, cycles); + + if (!Thumb && CurInstr.Cond() < 0xE) + ADD(32, MDisp(RCPU, offsetof(ARM, Cycles)), Imm8(cycles)); + else + ConstantCycles += cycles; + } +} + +void Compiler::Comp_AddCycles_CD() +{ + u32 cycles = 0; + if (Num == 0) + { + s32 numC = (R15 & 0x2) ? 0 : CurInstr.CodeCycles; + s32 numD = CurInstr.DataCycles; + + //if (DataRegion != CodeRegion) + cycles = std::max(numC + numD - 6, std::max(numC, numD)); + + IrregularCycles = cycles != numC; + } + else + { + s32 numC = NDS::ARM7MemTimings[CurInstr.CodeCycles][Thumb ? 0 : 2]; + s32 numD = CurInstr.DataCycles; + + if (CurInstr.DataRegion == 0x02) + { + if (CodeRegion == 0x02) + cycles += numC + numD; + else + cycles += std::max(numC + numD - 3, std::max(numC, numD)); + } + else if (CodeRegion == 0x02) + { + cycles += std::max(numC + numD - 3, std::max(numC, numD)); + } + else + { + cycles += numC + numD; + } + + IrregularCycles = true; + } + + if (!Thumb && CurInstr.Cond() < 0xE) + ADD(32, MDisp(RCPU, offsetof(ARM, Cycles)), Imm8(cycles)); + else + ConstantCycles += cycles; +} + } \ No newline at end of file diff --git a/src/ARMJIT_x64/ARMJIT_Compiler.h b/src/ARMJIT_x64/ARMJIT_Compiler.h index fcb2380..792ff66 100644 --- a/src/ARMJIT_x64/ARMJIT_Compiler.h +++ b/src/ARMJIT_x64/ARMJIT_Compiler.h @@ -4,6 +4,7 @@ #include "../dolphin/x64Emitter.h" #include "../ARMJIT.h" +#include "../ARMJIT_Internal.h" #include "../ARMJIT_RegisterCache.h" namespace ARMJIT @@ -16,6 +17,32 @@ const Gen::X64Reg RSCRATCH = Gen::EAX; const Gen::X64Reg RSCRATCH2 = Gen::EDX; const Gen::X64Reg RSCRATCH3 = Gen::ECX; +struct ComplexOperand +{ + ComplexOperand() + {} + + ComplexOperand(u32 imm) + : IsImm(true), Imm(imm) + {} + ComplexOperand(int reg, int op, int amount) + : IsImm(false) + { + Reg.Reg = reg; + Reg.Op = op; + Reg.Amount = amount; + } + + bool IsImm; + union + { + struct + { + int Reg, Op, Amount; + } Reg; + u32 Imm; + }; +}; class Compiler : public Gen::XEmitter { @@ -24,7 +51,7 @@ public: void Reset(); - CompiledBlock CompileBlock(ARM* cpu, FetchedInstr instrs[], int instrsCount); + JitBlockEntry CompileBlock(ARM* cpu, bool thumb, FetchedInstr instrs[], int instrsCount); void LoadReg(int reg, Gen::X64Reg nativeReg); void SaveReg(int reg, Gen::X64Reg nativeReg); @@ -39,6 +66,8 @@ public: void Comp_AddCycles_C(bool forceNonConstant = false); void Comp_AddCycles_CI(u32 i); void Comp_AddCycles_CI(Gen::X64Reg i, int add); + void Comp_AddCycles_CDI(); + void Comp_AddCycles_CD(); enum { @@ -92,8 +121,17 @@ public: void T_Comp_BL_LONG_2(); void T_Comp_BL_Merged(); - void Comp_MemAccess(Gen::OpArg rd, bool signExtend, bool store, int size); + enum + { + memop_Writeback = 1 << 0, + memop_Post = 1 << 1, + memop_SignExtend = 1 << 2, + memop_Store = 1 << 3, + memop_SubtractOffset = 1 << 4 + }; + void Comp_MemAccess(int rd, int rn, const ComplexOperand& op2, int size, int flags); s32 Comp_MemAccessBlock(int rn, BitSet16 regs, bool store, bool preinc, bool decrement, bool usermode); + void Comp_MemLoadLiteral(int size, int rd, u32 addr); void Comp_ArithTriOp(void (Compiler::*op)(int, const Gen::OpArg&, const Gen::OpArg&), Gen::OpArg rd, Gen::OpArg rn, Gen::OpArg op2, bool carryUsed, int opFlags); @@ -105,8 +143,9 @@ public: void Comp_RetriveFlags(bool sign, bool retriveCV, bool carryUsed); + void Comp_SpecialBranchBehaviour(); + void* Gen_MemoryRoutine9(bool store, int size); - void* Gen_MemoryRoutine7(bool store, bool codeMainRAM, int size); void* Gen_MemoryRoutineSeq9(bool store, bool preinc); void* Gen_MemoryRoutineSeq7(bool store, bool preinc, bool codeMainRAM); @@ -117,10 +156,9 @@ public: Gen::OpArg Comp_RegShiftReg(int op, Gen::OpArg rs, Gen::OpArg rm, bool S, bool& carryUsed); Gen::OpArg A_Comp_GetALUOp2(bool S, bool& carryUsed); - Gen::OpArg A_Comp_GetMemWBOffset(); void LoadCPSR(); - void SaveCPSR(); + void SaveCPSR(bool flagClean = true); bool FlagsNZRequired() { return CurInstr.SetFlags & 0xC; } @@ -139,10 +177,11 @@ public: u8* ResetStart; u32 CodeMemSize; + bool Exit; bool IrregularCycles; void* MemoryFuncs9[3][2]; - void* MemoryFuncs7[3][2][2]; + void* MemoryFuncs7[3][2]; void* MemoryFuncsSeq9[2][2]; void* MemoryFuncsSeq7[2][2][2]; diff --git a/src/ARMJIT_x64/ARMJIT_LoadStore.cpp b/src/ARMJIT_x64/ARMJIT_LoadStore.cpp index bf8280d..13ca415 100644 --- a/src/ARMJIT_x64/ARMJIT_LoadStore.cpp +++ b/src/ARMJIT_x64/ARMJIT_LoadStore.cpp @@ -27,51 +27,7 @@ int squeezePointer(T* ptr) /* address - ABI_PARAM1 (a.k.a. ECX = RSCRATCH3 on Windows) store value - ABI_PARAM2 (a.k.a. RDX = RSCRATCH2 on Windows) - code cycles - ABI_PARAM3 */ - -#define CALC_CYCLES_9(numC, numD, scratch) \ - LEA(32, scratch, MComplex(numD, numC, SCALE_1, -6)); \ - CMP(32, R(numC), R(numD)); \ - CMOVcc(32, numD, R(numC), CC_G); \ - CMP(32, R(numD), R(scratch)); \ - CMOVcc(32, scratch, R(numD), CC_G); \ - ADD(32, MDisp(RCPU, offsetof(ARM, Cycles)), R(scratch)); -#define CALC_CYCLES_7_DATA_MAIN_RAM(numC, numD, scratch) \ - if (codeMainRAM) \ - { \ - LEA(32, scratch, MRegSum(numD, numC)); \ - ADD(32, MDisp(RCPU, offsetof(ARM, Cycles)), R(scratch)); \ - } \ - else \ - { \ - if (!store) \ - ADD(32, R(numC), Imm8(1)); \ - LEA(32, scratch, MComplex(numD, numC, SCALE_1, -3)); \ - CMP(32, R(numD), R(numC)); \ - CMOVcc(32, numC, R(numD), CC_G); \ - CMP(32, R(numC), R(scratch)); \ - CMOVcc(32, scratch, R(numC), CC_G); \ - ADD(32, MDisp(RCPU, offsetof(ARM, Cycles)), R(scratch)); \ - } -#define CALC_CYCLES_7_DATA_NON_MAIN_RAM(numC, numD, scratch) \ - if (codeMainRAM) \ - { \ - if (!store) \ - ADD(32, R(numD), Imm8(1)); \ - LEA(32, scratch, MComplex(numD, numC, SCALE_1, -3)); \ - CMP(32, R(numD), R(numC)); \ - CMOVcc(32, numC, R(numD), CC_G); \ - CMP(32, R(numC), R(scratch)); \ - CMOVcc(32, scratch, R(numC), CC_G); \ - ADD(32, MDisp(RCPU, offsetof(ARM, Cycles)), R(scratch)); \ - } \ - else \ - { \ - LEA(32, scratch, MComplex(numD, numC, SCALE_1, store ? 0 : 1)); \ - ADD(32, MDisp(RCPU, offsetof(ARM, Cycles)), R(scratch)); \ - } - void* Compiler::Gen_MemoryRoutine9(bool store, int size) { u32 addressMask = ~(size == 32 ? 3 : (size == 16 ? 1 : 0)); @@ -86,12 +42,6 @@ void* Compiler::Gen_MemoryRoutine9(bool store, int size) CMP(32, R(ABI_PARAM1), MDisp(RCPU, offsetof(ARMv5, ITCMSize))); FixupBranch insideITCM = J_CC(CC_B); - // cycle counting! - MOV(32, R(ABI_PARAM4), R(ABI_PARAM1)); - SHR(32, R(ABI_PARAM4), Imm8(12)); - MOVZX(32, 8, ABI_PARAM4, MComplex(RCPU, ABI_PARAM4, SCALE_4, offsetof(ARMv5, MemTimings) + (size == 32 ? 2 : 1))); - CALC_CYCLES_9(ABI_PARAM3, ABI_PARAM4, RSCRATCH) - if (store) { if (size > 8) @@ -127,7 +77,6 @@ void* Compiler::Gen_MemoryRoutine9(bool store, int size) } SetJumpTarget(insideDTCM); - ADD(32, MDisp(RCPU, offsetof(ARM, Cycles)), R(ABI_PARAM3)); AND(32, R(RSCRATCH), Imm32(0x3FFF & addressMask)); if (store) MOV(size, MComplex(RCPU, RSCRATCH, SCALE_1, offsetof(ARMv5, DTCM)), R(ABI_PARAM2)); @@ -146,16 +95,22 @@ void* Compiler::Gen_MemoryRoutine9(bool store, int size) RET(); SetJumpTarget(insideITCM); - ADD(32, MDisp(RCPU, offsetof(ARM, Cycles)), R(ABI_PARAM3)); MOV(32, R(ABI_PARAM3), R(ABI_PARAM1)); // free up ECX AND(32, R(ABI_PARAM3), Imm32(0x7FFF & addressMask)); if (store) { MOV(size, MComplex(RCPU, ABI_PARAM3, SCALE_1, offsetof(ARMv5, ITCM)), R(ABI_PARAM2)); - XOR(32, R(RSCRATCH), R(RSCRATCH)); - MOV(64, MScaled(ABI_PARAM3, SCALE_4, squeezePointer(cache.ARM9_ITCM)), R(RSCRATCH)); - if (size == 32) - MOV(64, MScaled(ABI_PARAM3, SCALE_4, squeezePointer(cache.ARM9_ITCM) + 8), R(RSCRATCH)); + + // if CodeRanges[pseudoPhysical/256].Blocks.Length > 0 we're writing into code! + static_assert(sizeof(AddressRange) == 16); + LEA(32, ABI_PARAM1, MDisp(ABI_PARAM3, ExeMemRegionOffsets[exeMem_ITCM])); + MOV(32, R(RSCRATCH), R(ABI_PARAM1)); + SHR(32, R(RSCRATCH), Imm8(8)); + SHL(32, R(RSCRATCH), Imm8(4)); + CMP(32, MDisp(RSCRATCH, squeezePointer(CodeRanges) + offsetof(AddressRange, Blocks.Length)), Imm8(0)); + FixupBranch noCode = J_CC(CC_Z); + JMP((u8*)InvalidateByAddr, true); + SetJumpTarget(noCode); } else { @@ -176,83 +131,6 @@ void* Compiler::Gen_MemoryRoutine9(bool store, int size) return res; } -void* Compiler::Gen_MemoryRoutine7(bool store, bool codeMainRAM, int size) -{ - u32 addressMask = ~(size == 32 ? 3 : (size == 16 ? 1 : 0)); - AlignCode4(); - void* res = GetWritableCodePtr(); - - MOV(32, R(RSCRATCH), R(ABI_PARAM1)); - SHR(32, R(RSCRATCH), Imm8(15)); - MOVZX(32, 8, ABI_PARAM4, MScaled(RSCRATCH, SCALE_4, (size == 32 ? 2 : 0) + squeezePointer(NDS::ARM7MemTimings))); - - MOV(32, R(RSCRATCH), R(ABI_PARAM1)); - AND(32, R(RSCRATCH), Imm32(0xFF000000)); - CMP(32, R(RSCRATCH), Imm32(0x02000000)); - FixupBranch outsideMainRAM = J_CC(CC_NE); - CALC_CYCLES_7_DATA_MAIN_RAM(ABI_PARAM3, ABI_PARAM4, RSCRATCH) - MOV(32, R(ABI_PARAM3), R(ABI_PARAM1)); - AND(32, R(ABI_PARAM3), Imm32((MAIN_RAM_SIZE - 1) & addressMask)); - if (store) - { - MOV(size, MDisp(ABI_PARAM3, squeezePointer(NDS::MainRAM)), R(ABI_PARAM2)); - XOR(32, R(RSCRATCH), R(RSCRATCH)); - MOV(64, MScaled(ABI_PARAM3, SCALE_4, squeezePointer(cache.MainRAM)), R(RSCRATCH)); - if (size == 32) - MOV(64, MScaled(ABI_PARAM3, SCALE_4, squeezePointer(cache.MainRAM) + 8), R(RSCRATCH)); - } - else - { - MOVZX(32, size, RSCRATCH, MDisp(ABI_PARAM3, squeezePointer(NDS::MainRAM))); - if (size == 32) - { - if (ABI_PARAM1 != ECX) - MOV(32, R(ECX), R(ABI_PARAM1)); - AND(32, R(ECX), Imm8(3)); - SHL(32, R(ECX), Imm8(3)); - ROR_(32, R(RSCRATCH), R(ECX)); - } - } - RET(); - - SetJumpTarget(outsideMainRAM); - CALC_CYCLES_7_DATA_NON_MAIN_RAM(ABI_PARAM3, ABI_PARAM4, RSCRATCH) - if (store) - { - if (size > 8) - AND(32, R(ABI_PARAM1), Imm32(addressMask)); - switch (size) - { - case 32: JMP((u8*)NDS::ARM7Write32, true); break; - case 16: JMP((u8*)NDS::ARM7Write16, true); break; - case 8: JMP((u8*)NDS::ARM7Write8, true); break; - } - } - else - { - if (size == 32) - { - ABI_PushRegistersAndAdjustStack({ABI_PARAM1}, 8); - AND(32, R(ABI_PARAM1), Imm32(addressMask)); - ABI_CallFunction(NDS::ARM7Read32); - ABI_PopRegistersAndAdjustStack({ECX}, 8); - AND(32, R(ECX), Imm8(3)); - SHL(32, R(ECX), Imm8(3)); - ROR_(32, R(RSCRATCH), R(ECX)); - RET(); - } - else if (size == 16) - { - AND(32, R(ABI_PARAM1), Imm32(addressMask)); - JMP((u8*)NDS::ARM7Read16, true); - } - else - JMP((u8*)NDS::ARM7Read8, true); - } - - return res; -} - #define MEMORY_SEQ_WHILE_COND \ if (!store) \ MOV(32, currentElement, R(EAX));\ @@ -266,24 +144,13 @@ void* Compiler::Gen_MemoryRoutine7(bool store, bool codeMainRAM, int size) ABI_PARAM1 address ABI_PARAM2 address where registers are stored ABI_PARAM3 how many values to read/write - ABI_PARAM4 code cycles Dolphin x64CodeEmitter is my favourite assembler */ void* Compiler::Gen_MemoryRoutineSeq9(bool store, bool preinc) { - const u8* zero = GetCodePtr(); - ADD(32, MDisp(RCPU, offsetof(ARM, Cycles)), R(ABI_PARAM4)); - RET(); - void* res = (void*)GetWritableCodePtr(); - TEST(32, R(ABI_PARAM3), R(ABI_PARAM3)); - J_CC(CC_Z, zero); - - PUSH(ABI_PARAM3); - PUSH(ABI_PARAM4); // we need you later - const u8* repeat = GetCodePtr(); if (preinc) @@ -311,12 +178,7 @@ void* Compiler::Gen_MemoryRoutineSeq9(bool store, bool preinc) ABI_PopRegistersAndAdjustStack({ABI_PARAM1, ABI_PARAM2, ABI_PARAM3}, 8); MEMORY_SEQ_WHILE_COND - MOV(32, R(RSCRATCH), R(ABI_PARAM1)); - SHR(32, R(RSCRATCH), Imm8(12)); - MOVZX(32, 8, ABI_PARAM2, MComplex(RCPU, RSCRATCH, SCALE_4, 2 + offsetof(ARMv5, MemTimings))); - MOVZX(32, 8, RSCRATCH, MComplex(RCPU, RSCRATCH, SCALE_4, 3 + offsetof(ARMv5, MemTimings))); - - FixupBranch finishIt1 = J(); + RET(); SetJumpTarget(insideDTCM); AND(32, R(RSCRATCH), Imm32(0x3FFF & ~3)); @@ -329,9 +191,7 @@ void* Compiler::Gen_MemoryRoutineSeq9(bool store, bool preinc) MOV(32, R(RSCRATCH), MComplex(RCPU, RSCRATCH, SCALE_1, offsetof(ARMv5, DTCM))); MEMORY_SEQ_WHILE_COND - MOV(32, R(RSCRATCH), Imm32(1)); // sequential access time - MOV(32, R(ABI_PARAM2), Imm32(1)); // non sequential - FixupBranch finishIt2 = J(); + RET(); SetJumpTarget(insideITCM); MOV(32, R(RSCRATCH), R(ABI_PARAM1)); @@ -340,31 +200,23 @@ void* Compiler::Gen_MemoryRoutineSeq9(bool store, bool preinc) { MOV(32, R(ABI_PARAM4), currentElement); MOV(32, MComplex(RCPU, RSCRATCH, SCALE_1, offsetof(ARMv5, ITCM)), R(ABI_PARAM4)); - XOR(32, R(ABI_PARAM4), R(ABI_PARAM4)); - MOV(64, MScaled(RSCRATCH, SCALE_4, squeezePointer(cache.ARM9_ITCM)), R(ABI_PARAM4)); - MOV(64, MScaled(RSCRATCH, SCALE_4, squeezePointer(cache.ARM9_ITCM) + 8), R(ABI_PARAM4)); + + ADD(32, R(RSCRATCH), Imm32(ExeMemRegionOffsets[exeMem_ITCM])); + MOV(32, R(ABI_PARAM4), R(RSCRATCH)); + SHR(32, R(RSCRATCH), Imm8(8)); + SHL(32, R(RSCRATCH), Imm8(4)); + CMP(32, MDisp(RSCRATCH, squeezePointer(CodeRanges) + offsetof(AddressRange, Blocks.Length)), Imm8(0)); + FixupBranch noCode = J_CC(CC_Z); + ABI_PushRegistersAndAdjustStack({ABI_PARAM1, ABI_PARAM2, ABI_PARAM3}, 8); + MOV(32, R(ABI_PARAM1), R(ABI_PARAM4)); + CALL((u8*)InvalidateByAddr); + ABI_PopRegistersAndAdjustStack({ABI_PARAM1, ABI_PARAM2, ABI_PARAM3}, 8); + SetJumpTarget(noCode); } else MOV(32, R(RSCRATCH), MComplex(RCPU, RSCRATCH, SCALE_1, offsetof(ARMv5, ITCM))); MEMORY_SEQ_WHILE_COND - MOV(32, R(RSCRATCH), Imm32(1)); - MOV(32, R(ABI_PARAM2), Imm32(1)); - - SetJumpTarget(finishIt1); - SetJumpTarget(finishIt2); - - POP(ABI_PARAM4); - POP(ABI_PARAM3); - - CMP(32, R(ABI_PARAM3), Imm8(1)); - FixupBranch skipSequential = J_CC(CC_E); - SUB(32, R(ABI_PARAM3), Imm8(1)); - IMUL(32, RSCRATCH, R(ABI_PARAM3)); - ADD(32, R(ABI_PARAM2), R(RSCRATCH)); - SetJumpTarget(skipSequential); - - CALC_CYCLES_9(ABI_PARAM4, ABI_PARAM2, RSCRATCH) RET(); return res; @@ -372,18 +224,8 @@ void* Compiler::Gen_MemoryRoutineSeq9(bool store, bool preinc) void* Compiler::Gen_MemoryRoutineSeq7(bool store, bool preinc, bool codeMainRAM) { - const u8* zero = GetCodePtr(); - ADD(32, MDisp(RCPU, offsetof(ARM, Cycles)), R(ABI_PARAM4)); - RET(); - void* res = (void*)GetWritableCodePtr(); - TEST(32, R(ABI_PARAM3), R(ABI_PARAM3)); - J_CC(CC_Z, zero); - - PUSH(ABI_PARAM3); - PUSH(ABI_PARAM4); // we need you later - const u8* repeat = GetCodePtr(); if (preinc) @@ -403,59 +245,227 @@ void* Compiler::Gen_MemoryRoutineSeq7(bool store, bool preinc, bool codeMainRAM) ABI_PopRegistersAndAdjustStack({ABI_PARAM1, ABI_PARAM2, ABI_PARAM3}, 8); MEMORY_SEQ_WHILE_COND - MOV(32, R(RSCRATCH), R(ABI_PARAM1)); - SHR(32, R(RSCRATCH), Imm8(15)); - MOVZX(32, 8, ABI_PARAM2, MScaled(RSCRATCH, SCALE_4, 2 + squeezePointer(NDS::ARM7MemTimings))); - MOVZX(32, 8, RSCRATCH, MScaled(RSCRATCH, SCALE_4, 3 + squeezePointer(NDS::ARM7MemTimings))); + RET(); - POP(ABI_PARAM4); - POP(ABI_PARAM3); + return res; +} - // TODO: optimise this - CMP(32, R(ABI_PARAM3), Imm8(1)); - FixupBranch skipSequential = J_CC(CC_E); - SUB(32, R(ABI_PARAM3), Imm8(1)); - IMUL(32, RSCRATCH, R(ABI_PARAM3)); - ADD(32, R(ABI_PARAM2), R(RSCRATCH)); - SetJumpTarget(skipSequential); +#undef MEMORY_SEQ_WHILE_COND - MOV(32, R(RSCRATCH), R(ABI_PARAM1)); - AND(32, R(RSCRATCH), Imm32(0xFF000000)); - CMP(32, R(RSCRATCH), Imm32(0x02000000)); - FixupBranch outsideMainRAM = J_CC(CC_NE); - CALC_CYCLES_7_DATA_MAIN_RAM(ABI_PARAM4, ABI_PARAM2, RSCRATCH) - RET(); +void Compiler::Comp_MemLoadLiteral(int size, int rd, u32 addr) +{ + u32 val; + // make sure arm7 bios is accessible + u32 tmpR15 = CurCPU->R[15]; + CurCPU->R[15] = R15; + if (size == 32) + { + CurCPU->DataRead32(addr & ~0x3, &val); + val = ROR(val, (addr & 0x3) << 3); + } + else if (size == 16) + CurCPU->DataRead16(addr & ~0x1, &val); + else + CurCPU->DataRead8(addr, &val); + CurCPU->R[15] = tmpR15; - SetJumpTarget(outsideMainRAM); - CALC_CYCLES_7_DATA_NON_MAIN_RAM(ABI_PARAM4, ABI_PARAM2, RSCRATCH) - RET(); + MOV(32, MapReg(rd), Imm32(val)); - return res; + if (Thumb || CurInstr.Cond() == 0xE) + RegCache.PutLiteral(rd, val); + + Comp_AddCycles_CDI(); } -#undef CALC_CYCLES_9 -#undef MEMORY_SEQ_WHILE_COND +void fault(u32 a, u32 b) +{ + printf("actually not static! %x %x\n", a, b); +} -void Compiler::Comp_MemAccess(OpArg rd, bool signExtend, bool store, int size) +void Compiler::Comp_MemAccess(int rd, int rn, const ComplexOperand& op2, int size, int flags) { - IrregularCycles = true; + if (flags & memop_Store) + { + Comp_AddCycles_CD(); + } + else + { + Comp_AddCycles_CDI(); + } - if (store) - MOV(32, R(ABI_PARAM2), rd); - u32 cycles = Num - ? NDS::ARM7MemTimings[CurInstr.CodeCycles][Thumb ? 0 : 2] - : (R15 & 0x2 ? 0 : CurInstr.CodeCycles); - MOV(32, R(ABI_PARAM3), Imm32(cycles)); - CALL(Num == 0 - ? MemoryFuncs9[size >> 4][store] - : MemoryFuncs7[size >> 4][store][CodeRegion == 0x02]); + u32 addressMask = ~0; + if (size == 32) + addressMask = ~3; + if (size == 16) + addressMask = ~1; - if (!store) + if (rn == 15 && rd != 15 && op2.IsImm && !(flags & (memop_Post|memop_Store|memop_Writeback))) { - if (signExtend) - MOVSX(32, size, rd.GetSimpleReg(), R(RSCRATCH)); + Comp_MemLoadLiteral(size, rd, + R15 + op2.Imm * ((flags & memop_SubtractOffset) ? -1 : 1)); + } + else + { + OpArg rdMapped = MapReg(rd); + OpArg rnMapped = MapReg(rn); + + bool inlinePreparation = Num == 1; + u32 constLocalROR32 = 4; + + void* memoryFunc = Num == 0 + ? MemoryFuncs9[size >> 4][!!(flags & memop_Store)] + : MemoryFuncs7[size >> 4][!!((flags & memop_Store))]; + + if ((rd != 15 || (flags & memop_Store)) && op2.IsImm && RegCache.IsLiteral(rn)) + { + u32 addr = RegCache.LiteralValues[rn] + op2.Imm * ((flags & memop_SubtractOffset) ? -1 : 1); + + /*MOV(32, R(ABI_PARAM1), Imm32(CurInstr.Instr)); + MOV(32, R(ABI_PARAM1), Imm32(R15)); + MOV_sum(32, RSCRATCH, rnMapped, Imm32(op2.Imm * ((flags & memop_SubtractOffset) ? -1 : 1))); + CMP(32, R(RSCRATCH), Imm32(addr)); + FixupBranch eq = J_CC(CC_E); + CALL((void*)fault); + SetJumpTarget(eq);*/ + + NDS::MemRegion region; + region.Mem = NULL; + if (Num == 0) + { + ARMv5* cpu5 = (ARMv5*)CurCPU; + + // stupid dtcm... + if (addr >= cpu5->DTCMBase && addr < (cpu5->DTCMBase + cpu5->DTCMSize)) + { + region.Mem = cpu5->DTCM; + region.Mask = 0x3FFF; + } + else + { + NDS::ARM9GetMemRegion(addr, flags & memop_Store, ®ion); + } + } + else + NDS::ARM7GetMemRegion(addr, flags & memop_Store, ®ion); + + if (region.Mem != NULL) + { + void* ptr = ®ion.Mem[addr & addressMask & region.Mask]; + + if (flags & memop_Store) + { + MOV(size, M(ptr), MapReg(rd)); + } + else + { + if (flags & memop_SignExtend) + MOVSX(32, size, rdMapped.GetSimpleReg(), M(ptr)); + else + MOVZX(32, size, rdMapped.GetSimpleReg(), M(ptr)); + + if (size == 32 && addr & ~0x3) + { + ROR_(32, rdMapped, Imm8((addr & 0x3) << 3)); + } + } + + return; + } + + void* specialFunc = GetFuncForAddr(CurCPU, addr, flags & memop_Store, size); + if (specialFunc) + { + memoryFunc = specialFunc; + inlinePreparation = true; + constLocalROR32 = addr & 0x3; + } + } + + X64Reg finalAddr = ABI_PARAM1; + if (flags & memop_Post) + { + MOV(32, R(ABI_PARAM1), rnMapped); + + finalAddr = rnMapped.GetSimpleReg(); + } + + if (op2.IsImm) + { + MOV_sum(32, finalAddr, rnMapped, Imm32(op2.Imm * ((flags & memop_SubtractOffset) ? -1 : 1))); + } else - MOVZX(32, size, rd.GetSimpleReg(), R(RSCRATCH)); + { + OpArg rm = MapReg(op2.Reg.Reg); + + if (!(flags & memop_SubtractOffset) && rm.IsSimpleReg() && rnMapped.IsSimpleReg() + && op2.Reg.Op == 0 && op2.Reg.Amount > 0 && op2.Reg.Amount <= 3) + { + LEA(32, finalAddr, + MComplex(rnMapped.GetSimpleReg(), rm.GetSimpleReg(), 1 << op2.Reg.Amount, 0)); + } + else + { + bool throwAway; + OpArg offset = + Comp_RegShiftImm(op2.Reg.Op, op2.Reg.Amount, rm, false, throwAway); + + if (flags & memop_SubtractOffset) + { + MOV(32, R(finalAddr), rnMapped); + if (!offset.IsZero()) + SUB(32, R(finalAddr), offset); + } + else + MOV_sum(32, finalAddr, rnMapped, offset); + } + } + + if ((flags & memop_Writeback) && !(flags & memop_Post)) + MOV(32, rnMapped, R(finalAddr)); + + if (flags & memop_Store) + MOV(32, R(ABI_PARAM2), rdMapped); + + if (!(flags & memop_Store) && inlinePreparation && constLocalROR32 == 4 && size == 32) + MOV(32, rdMapped, R(ABI_PARAM1)); + + if (inlinePreparation && size > 8) + AND(32, R(ABI_PARAM1), Imm8(addressMask)); + + CALL(memoryFunc); + + if (!(flags & memop_Store)) + { + if (inlinePreparation && size == 32) + { + if (constLocalROR32 == 4) + { + static_assert(RSCRATCH3 == ECX); + MOV(32, R(ECX), rdMapped); + AND(32, R(ECX), Imm8(3)); + SHL(32, R(ECX), Imm8(3)); + ROR_(32, R(RSCRATCH), R(ECX)); + } + else if (constLocalROR32 != 0) + ROR_(32, R(RSCRATCH), Imm8(constLocalROR32 << 3)); + } + + if (flags & memop_SignExtend) + MOVSX(32, size, rdMapped.GetSimpleReg(), R(RSCRATCH)); + else + MOVZX(32, size, rdMapped.GetSimpleReg(), R(RSCRATCH)); + } + + if (!(flags & memop_Store) && rd == 15) + { + if (size < 32) + printf("!!! LDR <32 bit PC %08X %x\n", R15, CurInstr.Instr); + { + if (Num == 1) + AND(32, rdMapped, Imm8(0xFE)); // immediate is sign extended + Comp_JumpTo(rdMapped.GetSimpleReg()); + } + } } } @@ -475,16 +485,13 @@ s32 Compiler::Comp_MemAccessBlock(int rn, BitSet16 regs, bool store, bool preinc s32 offset = (regsCount * 4) * (decrement ? -1 : 1); - u32 cycles = Num - ? NDS::ARM7MemTimings[CurInstr.CodeCycles][Thumb ? 0 : 2] - : (R15 & 0x2 ? 0 : CurInstr.CodeCycles); - // we need to make sure that the stack stays aligned to 16 bytes u32 stackAlloc = ((regsCount + 1) & ~1) * 8; - MOV(32, R(ABI_PARAM4), Imm32(cycles)); if (!store) { + Comp_AddCycles_CDI(); + MOV(32, R(ABI_PARAM3), Imm32(regsCount)); SUB(64, R(RSP), stackAlloc <= INT8_MAX ? Imm8(stackAlloc) : Imm32(stackAlloc)); MOV(64, R(ABI_PARAM2), R(RSP)); @@ -548,6 +555,8 @@ s32 Compiler::Comp_MemAccessBlock(int rn, BitSet16 regs, bool store, bool preinc } else { + Comp_AddCycles_CD(); + if (regsCount & 1) PUSH(RSCRATCH); @@ -594,81 +603,45 @@ s32 Compiler::Comp_MemAccessBlock(int rn, BitSet16 regs, bool store, bool preinc return offset; } -OpArg Compiler::A_Comp_GetMemWBOffset() -{ - if (!(CurInstr.Instr & (1 << 25))) - { - u32 imm = CurInstr.Instr & 0xFFF; - return Imm32(imm); - } - else - { - int op = (CurInstr.Instr >> 5) & 0x3; - int amount = (CurInstr.Instr >> 7) & 0x1F; - OpArg rm = MapReg(CurInstr.A_Reg(0)); - bool carryUsed; - - return Comp_RegShiftImm(op, amount, rm, false, carryUsed); - } -} void Compiler::A_Comp_MemWB() { - OpArg rn = MapReg(CurInstr.A_Reg(16)); - OpArg rd = MapReg(CurInstr.A_Reg(12)); bool load = CurInstr.Instr & (1 << 20); bool byte = CurInstr.Instr & (1 << 22); int size = byte ? 8 : 32; + + int flags = 0; + if (!load) + flags |= memop_Store; + if (!(CurInstr.Instr & (1 << 24))) + flags |= memop_Post; + if (CurInstr.Instr & (1 << 21)) + flags |= memop_Writeback; + if (!(CurInstr.Instr & (1 << 23))) + flags |= memop_SubtractOffset; - if (CurInstr.Instr & (1 << 24)) + ComplexOperand offset; + if (!(CurInstr.Instr & (1 << 25))) { - OpArg offset = A_Comp_GetMemWBOffset(); - if (CurInstr.Instr & (1 << 23)) - MOV_sum(32, ABI_PARAM1, rn, offset); - else - { - MOV(32, R(ABI_PARAM1), rn); - SUB(32, R(ABI_PARAM1), offset); - } - - if (CurInstr.Instr & (1 << 21)) - MOV(32, rn, R(ABI_PARAM1)); + offset = ComplexOperand(CurInstr.Instr & 0xFFF); } else - MOV(32, R(ABI_PARAM1), rn); - - if (!(CurInstr.Instr & (1 << 24))) { - OpArg offset = A_Comp_GetMemWBOffset(); + int op = (CurInstr.Instr >> 5) & 0x3; + int amount = (CurInstr.Instr >> 7) & 0x1F; + int rm = CurInstr.A_Reg(0); - if (CurInstr.Instr & (1 << 23)) - ADD(32, rn, offset); - else - SUB(32, rn, offset); + offset = ComplexOperand(rm, op, amount); } - Comp_MemAccess(rd, false, !load, byte ? 8 : 32); - if (load && CurInstr.A_Reg(12) == 15) - { - if (byte) - printf("!!! LDRB PC %08X\n", R15); - else - { - if (Num == 1) - AND(32, rd, Imm8(0xFE)); // immediate is sign extended - Comp_JumpTo(rd.GetSimpleReg()); - } - } + Comp_MemAccess(CurInstr.A_Reg(12), CurInstr.A_Reg(16), offset, size, flags); } void Compiler::A_Comp_MemHalf() { - OpArg rn = MapReg(CurInstr.A_Reg(16)); - OpArg rd = MapReg(CurInstr.A_Reg(12)); - - OpArg offset = CurInstr.Instr & (1 << 22) - ? Imm32(CurInstr.Instr & 0xF | ((CurInstr.Instr >> 4) & 0xF0)) - : MapReg(CurInstr.A_Reg(0)); + ComplexOperand offset = CurInstr.Instr & (1 << 22) + ? ComplexOperand(CurInstr.Instr & 0xF | ((CurInstr.Instr >> 4) & 0xF0)) + : ComplexOperand(CurInstr.A_Reg(0), 0, 0); int op = (CurInstr.Instr >> 5) & 0x3; bool load = CurInstr.Instr & (1 << 20); @@ -689,49 +662,29 @@ void Compiler::A_Comp_MemHalf() if (size == 32 && Num == 1) return; // NOP - if (CurInstr.Instr & (1 << 24)) - { - if (CurInstr.Instr & (1 << 23)) - MOV_sum(32, ABI_PARAM1, rn, offset); - else - { - MOV(32, R(ABI_PARAM1), rn); - SUB(32, R(ABI_PARAM1), offset); - } - - if (CurInstr.Instr & (1 << 21)) - MOV(32, rn, R(ABI_PARAM1)); - } - else - MOV(32, R(ABI_PARAM1), rn); - + int flags = 0; + if (signExtend) + flags |= memop_SignExtend; + if (!load) + flags |= memop_Store; if (!(CurInstr.Instr & (1 << 24))) - { - if (CurInstr.Instr & (1 << 23)) - ADD(32, rn, offset); - else - SUB(32, rn, offset); - } + flags |= memop_Post; + if (!(CurInstr.Instr & (1 << 23))) + flags |= memop_SubtractOffset; + if (CurInstr.Instr & (1 << 21)) + flags |= memop_Writeback; - Comp_MemAccess(rd, signExtend, !load, size); - - if (load && CurInstr.A_Reg(12) == 15) - printf("!!! MemHalf op PC %08X\n", R15);; + Comp_MemAccess(CurInstr.A_Reg(12), CurInstr.A_Reg(16), offset, size, flags); } void Compiler::T_Comp_MemReg() { - OpArg rd = MapReg(CurInstr.T_Reg(0)); - OpArg rb = MapReg(CurInstr.T_Reg(3)); - OpArg ro = MapReg(CurInstr.T_Reg(6)); - int op = (CurInstr.Instr >> 10) & 0x3; bool load = op & 0x2; bool byte = op & 0x1; - MOV_sum(32, ABI_PARAM1, rb, ro); - - Comp_MemAccess(rd, false, !load, byte ? 8 : 32); + Comp_MemAccess(CurInstr.T_Reg(0), CurInstr.T_Reg(3), ComplexOperand(CurInstr.T_Reg(6), 0, 0), + byte ? 8 : 32, load ? 0 : memop_Store); } void Compiler::A_Comp_LDM_STM() @@ -758,67 +711,55 @@ void Compiler::A_Comp_LDM_STM() void Compiler::T_Comp_MemImm() { - OpArg rd = MapReg(CurInstr.T_Reg(0)); - OpArg rb = MapReg(CurInstr.T_Reg(3)); - int op = (CurInstr.Instr >> 11) & 0x3; bool load = op & 0x1; bool byte = op & 0x2; u32 offset = ((CurInstr.Instr >> 6) & 0x1F) * (byte ? 1 : 4); - LEA(32, ABI_PARAM1, MDisp(rb.GetSimpleReg(), offset)); - - Comp_MemAccess(rd, false, !load, byte ? 8 : 32); + Comp_MemAccess(CurInstr.T_Reg(0), CurInstr.T_Reg(3), ComplexOperand(offset), + byte ? 8 : 32, load ? 0 : memop_Store); } void Compiler::T_Comp_MemRegHalf() { - OpArg rd = MapReg(CurInstr.T_Reg(0)); - OpArg rb = MapReg(CurInstr.T_Reg(3)); - OpArg ro = MapReg(CurInstr.T_Reg(6)); - int op = (CurInstr.Instr >> 10) & 0x3; bool load = op != 0; int size = op != 1 ? 16 : 8; bool signExtend = op & 1; - MOV_sum(32, ABI_PARAM1, rb, ro); + int flags = 0; + if (signExtend) + flags |= memop_SignExtend; + if (!load) + flags |= memop_Store; - Comp_MemAccess(rd, signExtend, !load, size); + Comp_MemAccess(CurInstr.T_Reg(0), CurInstr.T_Reg(3), ComplexOperand(CurInstr.T_Reg(6), 0, 0), + size, flags); } void Compiler::T_Comp_MemImmHalf() { - OpArg rd = MapReg(CurInstr.T_Reg(0)); - OpArg rb = MapReg(CurInstr.T_Reg(3)); - u32 offset = (CurInstr.Instr >> 5) & 0x3E; bool load = CurInstr.Instr & (1 << 11); - LEA(32, ABI_PARAM1, MDisp(rb.GetSimpleReg(), offset)); - - Comp_MemAccess(rd, false, !load, 16); + Comp_MemAccess(CurInstr.T_Reg(0), CurInstr.T_Reg(3), ComplexOperand(offset), 16, + load ? 0 : memop_Store); } void Compiler::T_Comp_LoadPCRel() { - OpArg rd = MapReg(CurInstr.T_Reg(8)); u32 addr = (R15 & ~0x2) + ((CurInstr.Instr & 0xFF) << 2); - // hopefully this doesn't break - u32 val; CurCPU->DataRead32(addr, &val); - MOV(32, rd, Imm32(val)); + Comp_MemLoadLiteral(32, CurInstr.T_Reg(8), addr); } void Compiler::T_Comp_MemSPRel() { u32 offset = (CurInstr.Instr & 0xFF) * 4; - OpArg rd = MapReg(CurInstr.T_Reg(8)); bool load = CurInstr.Instr & (1 << 11); - LEA(32, ABI_PARAM1, MDisp(MapReg(13).GetSimpleReg(), offset)); - - Comp_MemAccess(rd, false, !load, 32); + Comp_MemAccess(CurInstr.T_Reg(8), 13, ComplexOperand(offset), 32, + load ? 0 : memop_Store); } void Compiler::T_Comp_PUSH_POP() diff --git a/src/ARM_InstrInfo.cpp b/src/ARM_InstrInfo.cpp index 9239e29..0fbde26 100644 --- a/src/ARM_InstrInfo.cpp +++ b/src/ARM_InstrInfo.cpp @@ -36,7 +36,7 @@ enum { A_StaticShiftSetC = 1 << 18, A_SetC = 1 << 19, - A_WriteMemory = 1 << 20, + A_WriteMem = 1 << 20 }; #define A_BIOP A_Read16 @@ -109,7 +109,7 @@ const u32 A_UMULL = A_MulFlags | A_Write16 | A_Write12 | A_Read0 | A_Read8 | ak( const u32 A_UMLAL = A_MulFlags | A_Write16 | A_Write12 | A_Read16 | A_Read12 | A_Read0 | A_Read8 | ak(ak_UMLAL); const u32 A_SMULL = A_MulFlags | A_Write16 | A_Write12 | A_Read0 | A_Read8 | ak(ak_SMULL); const u32 A_SMLAL = A_MulFlags | A_Write16 | A_Write12 | A_Read16 | A_Read12 | A_Read0 | A_Read8 | ak(ak_SMLAL); -const u32 A_SMLAxy = A_Write16 | A_Read0 | A_Read8 | A_Read12 | ak(ak_SMLALxy); +const u32 A_SMLAxy = A_Write16 | A_Read0 | A_Read8 | A_Read12 | ak(ak_SMLAxy); const u32 A_SMLAWy = A_Write16 | A_Read0 | A_Read8 | A_Read12 | ak(ak_SMLAWy); const u32 A_SMULWy = A_Write16 | A_Read0 | A_Read8 | ak(ak_SMULWy); const u32 A_SMLALxy = A_Write16 | A_Write12 | A_Read16 | A_Read12 | A_Read0 | A_Read8 | ak(ak_SMLALxy); @@ -123,7 +123,7 @@ const u32 A_QDADD = A_Write12 | A_Read0 | A_Read16 | A_UnkOnARM7 | ak(ak_QDADD); const u32 A_QDSUB = A_Write12 | A_Read0 | A_Read16 | A_UnkOnARM7 | ak(ak_QDSUB); #define A_LDR A_Write12 -#define A_STR A_Read12 | A_WriteMemory +#define A_STR A_Read12 | A_WriteMem #define A_IMPLEMENT_WB_LDRSTR(x,k) \ const u32 A_##x##_IMM = A_##k | A_Read16 | A_MemWriteback | ak(ak_##x##_IMM); \ @@ -144,7 +144,7 @@ A_IMPLEMENT_WB_LDRSTR(LDR,LDR) A_IMPLEMENT_WB_LDRSTR(LDRB,LDR) #define A_LDRD A_Write12Double -#define A_STRD A_Read12Double | A_WriteMemory +#define A_STRD A_Read12Double | A_WriteMem #define A_IMPLEMENT_HD_LDRSTR(x,k) \ const u32 A_##x##_IMM = A_##k | A_Read16 | A_MemWriteback | ak(ak_##x##_IMM); \ @@ -159,11 +159,11 @@ A_IMPLEMENT_HD_LDRSTR(LDRH,LDR) A_IMPLEMENT_HD_LDRSTR(LDRSB,LDR) A_IMPLEMENT_HD_LDRSTR(LDRSH,LDR) -const u32 A_SWP = A_Write12 | A_Read16 | A_Read0 | A_WriteMemory | ak(ak_SWP); -const u32 A_SWPB = A_Write12 | A_Read16 | A_Read0 | A_WriteMemory | ak(ak_SWPB); +const u32 A_SWP = A_Write12 | A_Read16 | A_Read0 | A_WriteMem | ak(ak_SWP); +const u32 A_SWPB = A_Write12 | A_Read16 | A_Read0 | A_WriteMem | ak(ak_SWPB); const u32 A_LDM = A_Read16 | A_MemWriteback | ak(ak_LDM); -const u32 A_STM = A_Read16 | A_MemWriteback | A_WriteMemory | ak(ak_STM); +const u32 A_STM = A_Read16 | A_MemWriteback | A_WriteMem | ak(ak_STM); const u32 A_B = A_BranchAlways | ak(ak_B); const u32 A_BL = A_BranchAlways | A_Link | ak(ak_BL); @@ -181,7 +181,7 @@ const u32 A_SVC = A_BranchAlways | A_Link | ak(ak_SVC); // THUMB -#define tk(x) ((x) << 21) +#define tk(x) ((x) << 22) enum { T_Read0 = 1 << 0, @@ -210,6 +210,8 @@ enum { T_SetMaybeC = 1 << 18, T_ReadC = 1 << 19, T_SetC = 1 << 20, + + T_WriteMem = 1 << 21, }; const u32 T_LSL_IMM = T_SetNZ | T_SetMaybeC | T_Write0 | T_Read3 | tk(tk_LSL_IMM); @@ -253,30 +255,30 @@ const u32 T_ADD_SP = T_WriteR13 | T_ReadR13 | tk(tk_ADD_SP); const u32 T_LDR_PCREL = T_Write8 | tk(tk_LDR_PCREL); -const u32 T_STR_REG = T_Read0 | T_Read3 | T_Read6 | tk(tk_STR_REG); -const u32 T_STRB_REG = T_Read0 | T_Read3 | T_Read6 | tk(tk_STRB_REG); +const u32 T_STR_REG = T_Read0 | T_Read3 | T_Read6 | T_WriteMem | tk(tk_STR_REG); +const u32 T_STRB_REG = T_Read0 | T_Read3 | T_Read6 | T_WriteMem | tk(tk_STRB_REG); const u32 T_LDR_REG = T_Write0 | T_Read3 | T_Read6 | tk(tk_LDR_REG); const u32 T_LDRB_REG = T_Write0 | T_Read3 | T_Read6 | tk(tk_LDRB_REG); -const u32 T_STRH_REG = T_Read0 | T_Read3 | T_Read6 | tk(tk_STRH_REG); +const u32 T_STRH_REG = T_Read0 | T_Read3 | T_Read6 | T_WriteMem | tk(tk_STRH_REG); const u32 T_LDRSB_REG = T_Write0 | T_Read3 | T_Read6 | tk(tk_LDRSB_REG); const u32 T_LDRH_REG = T_Write0 | T_Read3 | T_Read6 | tk(tk_LDRH_REG); const u32 T_LDRSH_REG = T_Write0 | T_Read3 | T_Read6 | tk(tk_LDRSH_REG); -const u32 T_STR_IMM = T_Read0 | T_Read3 | tk(tk_STR_IMM); +const u32 T_STR_IMM = T_Read0 | T_Read3 | T_WriteMem | tk(tk_STR_IMM); const u32 T_LDR_IMM = T_Write0 | T_Read3 | tk(tk_LDR_IMM); -const u32 T_STRB_IMM = T_Read0 | T_Read3 | tk(tk_STRB_IMM); +const u32 T_STRB_IMM = T_Read0 | T_Read3 | T_WriteMem | tk(tk_STRB_IMM); const u32 T_LDRB_IMM = T_Write0 | T_Read3 | tk(tk_LDRB_IMM); -const u32 T_STRH_IMM = T_Read0 | T_Read3 | tk(tk_STRH_IMM); +const u32 T_STRH_IMM = T_Read0 | T_Read3 | T_WriteMem | tk(tk_STRH_IMM); const u32 T_LDRH_IMM = T_Write0 | T_Read3 | tk(tk_LDRH_IMM); -const u32 T_STR_SPREL = T_Read8 | T_ReadR13 | tk(tk_STR_SPREL); +const u32 T_STR_SPREL = T_Read8 | T_ReadR13 | T_WriteMem | tk(tk_STR_SPREL); const u32 T_LDR_SPREL = T_Write8 | T_ReadR13 | tk(tk_LDR_SPREL); -const u32 T_PUSH = T_ReadR13 | T_WriteR13 | tk(tk_PUSH); +const u32 T_PUSH = T_ReadR13 | T_WriteR13 | T_WriteMem | tk(tk_PUSH); const u32 T_POP = T_PopPC | T_ReadR13 | T_WriteR13 | tk(tk_POP); const u32 T_LDMIA = T_Read8 | T_Write8 | tk(tk_LDMIA); -const u32 T_STMIA = T_Read8 | T_Write8 | tk(tk_STMIA); +const u32 T_STMIA = T_Read8 | T_Write8 | T_WriteMem | tk(tk_STMIA); const u32 T_BCOND = T_BranchAlways | tk(tk_BCOND); const u32 T_BX = T_BranchAlways | T_ReadHi3 | tk(tk_BX); @@ -307,7 +309,7 @@ Info Decode(bool thumb, u32 num, u32 instr) if (thumb) { u32 data = THUMBInstrTable[(instr >> 6) & 0x3FF]; - res.Kind = (data >> 21) & 0x3F; + res.Kind = (data >> 22) & 0x3F; if (data & T_Read0) res.SrcRegs |= 1 << (instr & 0x7); @@ -356,6 +358,9 @@ Info Decode(bool thumb, u32 num, u32 instr) if (data & T_SetC) res.WriteFlags |= flag_C; + if (data & T_WriteMem) + res.SpecialKind = special_WriteMem; + res.EndBlock |= res.Branches(); if (res.Kind == tk_BCOND) @@ -382,6 +387,9 @@ Info Decode(bool thumb, u32 num, u32 instr) u32 id = (cn<<8)|(cm<<4)|cpinfo; if (id == 0x704 || id == 0x782 || id == 0x750 || id == 0x751 || id == 0x752) res.EndBlock |= true; + + if (id == 0x704 || id == 0x782) + res.SpecialKind = special_WaitForInterrupt; } if (res.Kind == ak_MCR || res.Kind == ak_MRC) { @@ -449,6 +457,9 @@ Info Decode(bool thumb, u32 num, u32 instr) if ((data & A_SetC) || (data & A_StaticShiftSetC) && ((instr >> 7) & 0x1F)) res.WriteFlags |= flag_C; + if (data & A_WriteMem) + res.SpecialKind = special_WriteMem; + if ((instr >> 28) < 0xE) { // make non conditional flag sets conditional diff --git a/src/ARM_InstrInfo.h b/src/ARM_InstrInfo.h index d01c600..d02f168 100644 --- a/src/ARM_InstrInfo.h +++ b/src/ARM_InstrInfo.h @@ -226,18 +226,27 @@ enum flag_V = 1 << 0, }; +enum +{ + special_NotSpecialAtAll = 0, + special_WriteMem, + special_WaitForInterrupt +}; + struct Info { u16 DstRegs, SrcRegs; u16 Kind; + u8 SpecialKind; + u8 ReadFlags; // lower 4 bits - set always // upper 4 bits - might set flag u8 WriteFlags; bool EndBlock; - bool Branches() + bool Branches() const { return DstRegs & (1 << 15); } diff --git a/src/CP15.cpp b/src/CP15.cpp index e6e91c3..10c3b1b 100644 --- a/src/CP15.cpp +++ b/src/CP15.cpp @@ -561,9 +561,11 @@ void ARMv5::CP15Write(u32 id, u32 val) case 0x750: + ARMJIT::InvalidateAll(); ICacheInvalidateAll(); return; case 0x751: + ARMJIT::InvalidateByAddr(ARMJIT::TranslateAddr<0>(val)); ICacheInvalidateByAddr(val); return; case 0x752: @@ -813,7 +815,7 @@ void ARMv5::DataWrite8(u32 addr, u8 val) DataCycles = 1; *(u8*)&ITCM[addr & 0x7FFF] = val; #ifdef JIT_ENABLED - ARMJIT::cache.ARM9_ITCM[(addr & 0x7FFF) >> 1] = NULL; + ARMJIT::InvalidateITCM(addr & 0x7FFF); #endif return; } @@ -837,7 +839,7 @@ void ARMv5::DataWrite16(u32 addr, u16 val) DataCycles = 1; *(u16*)&ITCM[addr & 0x7FFF] = val; #ifdef JIT_ENABLED - ARMJIT::cache.ARM9_ITCM[(addr & 0x7FFF) >> 1] = NULL; + ARMJIT::InvalidateITCM(addr & 0x7FFF); #endif return; } @@ -861,8 +863,7 @@ void ARMv5::DataWrite32(u32 addr, u32 val) DataCycles = 1; *(u32*)&ITCM[addr & 0x7FFF] = val; #ifdef JIT_ENABLED - ARMJIT::cache.ARM9_ITCM[(addr & 0x7FFF) >> 1] = NULL; - ARMJIT::cache.ARM9_ITCM[((addr + 2) & 0x7FFF) >> 1] = NULL; + ARMJIT::InvalidateITCM(addr & 0x7FFF); #endif return; } @@ -886,8 +887,7 @@ void ARMv5::DataWrite32S(u32 addr, u32 val) DataCycles += 1; *(u32*)&ITCM[addr & 0x7FFF] = val; #ifdef JIT_ENABLED - ARMJIT::cache.ARM9_ITCM[(addr & 0x7FFF) >> 1] = NULL; - ARMJIT::cache.ARM9_ITCM[((addr & 0x7FFF) >> 1) + 1] = NULL; + ARMJIT::InvalidateITCM(addr & 0x7FFF); #endif return; } diff --git a/src/Config.cpp b/src/Config.cpp index 3cff0ed..63d61a3 100644 --- a/src/Config.cpp +++ b/src/Config.cpp @@ -37,6 +37,7 @@ int GL_Antialias; #ifdef JIT_ENABLED bool JIT_Enable = false; int JIT_MaxBlockSize = 12; +bool JIT_BrancheOptimisations = true; #endif ConfigEntry ConfigFile[] = @@ -50,6 +51,7 @@ ConfigEntry ConfigFile[] = #ifdef JIT_ENABLED {"JIT_Enable", 0, &JIT_Enable, 0, NULL, 0}, {"JIT_MaxBlockSize", 0, &JIT_MaxBlockSize, 10, NULL, 0}, + {"JIT_BrancheOptimisations", 0, &JIT_BrancheOptimisations, 1, NULL, 0}, #endif {"", -1, NULL, 0, NULL, 0} diff --git a/src/Config.h b/src/Config.h index c13eae3..0fcefc3 100644 --- a/src/Config.h +++ b/src/Config.h @@ -49,6 +49,7 @@ extern int GL_Antialias; #ifdef JIT_ENABLED extern bool JIT_Enable; extern int JIT_MaxBlockSize; +extern bool JIT_BrancheOptimisations; #endif } diff --git a/src/NDS.cpp b/src/NDS.cpp index 1baa308..e9e6795 100644 --- a/src/NDS.cpp +++ b/src/NDS.cpp @@ -536,7 +536,7 @@ void Reset() RCnt = 0; #ifdef JIT_ENABLED - ARMJIT::InvalidateBlockCache(); + ARMJIT::ResetBlockCache(); #endif NDSCart::Reset(); @@ -757,7 +757,7 @@ bool DoSavestate(Savestate* file) #ifdef JIT_ENABLED if (!file->Saving) { - ARMJIT::InvalidateBlockCache(); + ARMJIT::ResetBlockCache(); } #endif @@ -1870,10 +1870,6 @@ u32 ARM9Read32(u32 addr) void ARM9Write8(u32 addr, u8 val) { -#ifdef JIT_ENABLED - ARMJIT::Invalidate16<0>(addr); -#endif - switch (addr & 0xFF000000) { case 0x02000000: @@ -1924,10 +1920,6 @@ void ARM9Write8(u32 addr, u8 val) void ARM9Write16(u32 addr, u16 val) { -#ifdef JIT_ENABLED - ARMJIT::Invalidate16<0>(addr); -#endif - switch (addr & 0xFF000000) { case 0x02000000: @@ -1994,10 +1986,6 @@ void ARM9Write16(u32 addr, u16 val) void ARM9Write32(u32 addr, u32 val) { -#ifdef JIT_ENABLED - ARMJIT::Invalidate32<0>(addr); -#endif - switch (addr & 0xFF000000) { case 0x02000000: @@ -2292,7 +2280,7 @@ u32 ARM7Read32(u32 addr) void ARM7Write8(u32 addr, u8 val) { #ifdef JIT_ENABLED - ARMJIT::Invalidate16<1>(addr); + ARMJIT::InvalidateByAddr7(addr); #endif switch (addr & 0xFF800000) @@ -2355,7 +2343,7 @@ void ARM7Write8(u32 addr, u8 val) void ARM7Write16(u32 addr, u16 val) { #ifdef JIT_ENABLED - ARMJIT::Invalidate16<1>(addr); + ARMJIT::InvalidateByAddr7(addr); #endif switch (addr & 0xFF800000) @@ -2428,7 +2416,7 @@ void ARM7Write16(u32 addr, u16 val) void ARM7Write32(u32 addr, u32 val) { #ifdef JIT_ENABLED - ARMJIT::Invalidate32<1>(addr); + ARMJIT::InvalidateByAddr7(addr); #endif switch (addr & 0xFF800000) diff --git a/src/libui_sdl/DlgEmuSettings.cpp b/src/libui_sdl/DlgEmuSettings.cpp index 09ea8eb..45e8e0c 100644 --- a/src/libui_sdl/DlgEmuSettings.cpp +++ b/src/libui_sdl/DlgEmuSettings.cpp @@ -42,6 +42,7 @@ uiCheckbox* cbDirectBoot; #ifdef JIT_ENABLED uiCheckbox* cbJITEnabled; uiEntry* enJITMaxBlockSize; +uiCheckbox* cbJITBranchOptimisations; #endif int OnCloseWindow(uiWindow* window, void* blarg) @@ -64,13 +65,15 @@ void OnOk(uiButton* btn, void* blarg) bool enableJit = uiCheckboxChecked(cbJITEnabled); char* maxBlockSizeStr = uiEntryText(enJITMaxBlockSize); long blockSize = strtol(maxBlockSizeStr, NULL, 10); + bool branchOptimisations = uiCheckboxChecked(cbJITBranchOptimisations); uiFreeText(maxBlockSizeStr); if (blockSize < 1) blockSize = 1; if (blockSize > 32) blockSize = 32; - if (enableJit != Config::JIT_Enable || blockSize != Config::JIT_MaxBlockSize) + if (enableJit != Config::JIT_Enable || blockSize != Config::JIT_MaxBlockSize || + branchOptimisations != Config::JIT_BrancheOptimisations) { if (RunningSomething && !uiMsgBoxConfirm(win, "Reset emulator", @@ -79,6 +82,7 @@ void OnOk(uiButton* btn, void* blarg) Config::JIT_Enable = enableJit; Config::JIT_MaxBlockSize = blockSize; + Config::JIT_BrancheOptimisations = uiCheckboxChecked(cbJITBranchOptimisations); restart = true; } @@ -101,9 +105,15 @@ void OnOk(uiButton* btn, void* blarg) void OnJITStateChanged(uiCheckbox* cb, void* blarg) { if (uiCheckboxChecked(cb)) + { uiControlEnable(uiControl(enJITMaxBlockSize)); + uiControlEnable(uiControl(cbJITBranchOptimisations)); + } else + { uiControlDisable(uiControl(enJITMaxBlockSize)); + uiControlDisable(uiControl(cbJITBranchOptimisations)); + } } #endif @@ -159,6 +169,14 @@ void Open() enJITMaxBlockSize = uiNewEntry(); uiBoxAppend(row, uiControl(enJITMaxBlockSize), 0); } + + { + uiBox* row = uiNewHorizontalBox(); + uiBoxAppend(in_ctrl, uiControl(row), 0); + + cbJITBranchOptimisations = uiNewCheckbox("Branch optimisations (breaks in rare cases games!)"); + uiBoxAppend(row, uiControl(cbJITBranchOptimisations), 0); + } } #endif @@ -194,6 +212,8 @@ void Open() uiEntrySetText(enJITMaxBlockSize, maxBlockSizeStr); } OnJITStateChanged(cbJITEnabled, NULL); + + uiCheckboxSetChecked(cbJITBranchOptimisations, Config::JIT_BrancheOptimisations); #endif uiControlShow(uiControl(win)); -- cgit v1.2.3 From aa23f21b8df9780578adf6e6ea6bcfba3fee83bb Mon Sep 17 00:00:00 2001 From: RSDuck Date: Wed, 16 Oct 2019 23:39:12 +0200 Subject: decrease jit block cache address granularity fixes Dragon Quest IX move code with side effects out of assert, fixes release build (thanks to m4wx for this one) also remove some leftovers of jit pipelining --- src/ARMJIT.cpp | 42 ++++++++++++++++++++++--------------- src/ARMJIT_Internal.h | 3 +-- src/ARMJIT_x64/ARMJIT_LoadStore.cpp | 31 ++++++++++++++------------- src/ARM_InstrInfo.cpp | 25 ++++++++++++++-------- src/ARM_InstrInfo.h | 3 ++- src/libui_sdl/main.cpp | 2 ++ 6 files changed, 62 insertions(+), 44 deletions(-) (limited to 'src/ARM_InstrInfo.cpp') diff --git a/src/ARMJIT.cpp b/src/ARMJIT.cpp index 686bdd6..19a5e70 100644 --- a/src/ARMJIT.cpp +++ b/src/ARMJIT.cpp @@ -106,7 +106,7 @@ u32 AddrTranslate9[0x2000]; u32 AddrTranslate7[0x4000]; JitBlockEntry FastBlockAccess[ExeMemSpaceSize / 2]; -AddressRange CodeRanges[ExeMemSpaceSize / 256]; +AddressRange CodeRanges[ExeMemSpaceSize / 512]; TinyVector JitBlocks; JitBlock* RestoreCandidates[0x1000] = {NULL}; @@ -285,6 +285,13 @@ InterpreterFunc InterpretARM[ARMInstrInfo::ak_Count] = #undef F_MEM_HD #undef F +void T_BL_LONG(ARM* cpu) +{ + ARMInterpreter::T_BL_LONG_1(cpu); + cpu->R[15] += 2; + ARMInterpreter::T_BL_LONG_2(cpu); +} + #define F(x) ARMInterpreter::T_##x InterpreterFunc InterpretTHUMB[ARMInstrInfo::tk_Count] = { @@ -302,7 +309,7 @@ InterpreterFunc InterpretTHUMB[ARMInstrInfo::tk_Count] = F(PUSH), F(POP), F(LDMIA), F(STMIA), F(BCOND), F(BX), F(BLX_REG), F(B), F(BL_LONG_1), F(BL_LONG_2), F(UNK), F(SVC), - NULL // BL_LONG psudo opcode + T_BL_LONG // BL_LONG psudo opcode }; #undef F @@ -341,7 +348,7 @@ void CompileBlock(ARM* cpu) JIT_DEBUGPRINT("start block %x (%x) %p %p (region invalidates %dx)\n", blockAddr, pseudoPhysicalAddr, FastBlockAccess[pseudoPhysicalAddr / 2], cpu->Num == 0 ? LookUpBlock<0>(blockAddr) : LookUpBlock<1>(blockAddr), - CodeRanges[pseudoPhysicalAddr / 256].TimesInvalidated); + CodeRanges[pseudoPhysicalAddr / 512].TimesInvalidated); u32 lastSegmentStart = blockAddr; @@ -352,7 +359,7 @@ void CompileBlock(ARM* cpu) instrs[i].BranchFlags = 0; instrs[i].SetFlags = 0; instrs[i].Instr = nextInstr[0]; - instrs[i].NextInstr[0] = nextInstr[0] = nextInstr[1]; + nextInstr[0] = nextInstr[1]; instrs[i].Addr = nextInstrAddr[0]; nextInstrAddr[0] = nextInstrAddr[1]; @@ -361,7 +368,7 @@ void CompileBlock(ARM* cpu) u32 translatedAddr = (cpu->Num == 0 ? TranslateAddr<0>(instrs[i].Addr) - : TranslateAddr<1>(instrs[i].Addr)) & ~0xFF; + : TranslateAddr<1>(instrs[i].Addr)) & ~0x1FF; if (i == 0 || translatedAddr != addresseRanges[numAddressRanges - 1]) { bool returning = false; @@ -400,7 +407,6 @@ void CompileBlock(ARM* cpu) nextInstr[1] = cpuv4->CodeRead32(r15); instrs[i].CodeCycles = cpu->CodeCycles; } - instrs[i].NextInstr[1] = nextInstr[1]; instrs[i].Info = ARMInstrInfo::Decode(thumb, cpu->Num, instrs[i].Instr); cpu->R[15] = r15; @@ -584,7 +590,7 @@ void CompileBlock(ARM* cpu) for (int j = 0; j < numAddressRanges; j++) { assert(addresseRanges[j] == block->AddressRanges()[j]); - CodeRanges[addresseRanges[j] / 256].Blocks.Add(block); + CodeRanges[addresseRanges[j] / 512].Blocks.Add(block); } FastBlockAccess[block->PseudoPhysicalAddr / 2] = block->EntryPoint; @@ -595,7 +601,7 @@ void CompileBlock(ARM* cpu) void InvalidateByAddr(u32 pseudoPhysical) { JIT_DEBUGPRINT("invalidating by addr %x\n", pseudoPhysical); - AddressRange* range = &CodeRanges[pseudoPhysical / 256]; + AddressRange* range = &CodeRanges[pseudoPhysical / 512]; int startLength = range->Blocks.Length; for (int i = 0; i < range->Blocks.Length; i++) { @@ -604,15 +610,17 @@ void InvalidateByAddr(u32 pseudoPhysical) for (int j = 0; j < block->NumAddresses; j++) { u32 addr = block->AddressRanges()[j]; - if ((addr / 256) != (pseudoPhysical / 256)) + if ((addr / 512) != (pseudoPhysical / 512)) { - AddressRange* otherRange = &CodeRanges[addr / 256]; + AddressRange* otherRange = &CodeRanges[addr / 512]; assert(otherRange != range); - assert(otherRange->Blocks.RemoveByValue(block)); + bool removed = otherRange->Blocks.RemoveByValue(block); + assert(removed); } } - assert(JitBlocks.RemoveByValue(block)); + bool removed = JitBlocks.RemoveByValue(block); + assert(removed); FastBlockAccess[block->PseudoPhysicalAddr / 2] = NULL; @@ -631,14 +639,14 @@ void InvalidateByAddr(u32 pseudoPhysical) void InvalidateByAddr7(u32 addr) { u32 pseudoPhysical = TranslateAddr<1>(addr); - if (__builtin_expect(CodeRanges[pseudoPhysical / 256].Blocks.Length > 0, false)) + if (__builtin_expect(CodeRanges[pseudoPhysical / 512].Blocks.Length > 0, false)) InvalidateByAddr(pseudoPhysical); } void InvalidateITCM(u32 addr) { u32 pseudoPhysical = addr + ExeMemRegionOffsets[exeMem_ITCM]; - if (CodeRanges[pseudoPhysical / 256].Blocks.Length > 0) + if (CodeRanges[pseudoPhysical / 512].Blocks.Length > 0) InvalidateByAddr(pseudoPhysical); } @@ -654,7 +662,7 @@ void InvalidateAll() for (int j = 0; j < block->NumAddresses; j++) { u32 addr = block->AddressRanges()[j]; - AddressRange* range = &CodeRanges[addr / 256]; + AddressRange* range = &CodeRanges[addr / 512]; range->Blocks.Clear(); if (range->TimesInvalidated + 1 > range->TimesInvalidated) range->TimesInvalidated++; @@ -689,8 +697,8 @@ void ResetBlockCache() for (int j = 0; j < block->NumAddresses; j++) { u32 addr = block->AddressRanges()[j]; - CodeRanges[addr / 256].Blocks.Clear(); - CodeRanges[addr / 256].TimesInvalidated = 0; + CodeRanges[addr / 512].Blocks.Clear(); + CodeRanges[addr / 512].TimesInvalidated = 0; } delete block; } diff --git a/src/ARMJIT_Internal.h b/src/ARMJIT_Internal.h index 4acb488..9e6713d 100644 --- a/src/ARMJIT_Internal.h +++ b/src/ARMJIT_Internal.h @@ -38,7 +38,6 @@ struct FetchedInstr u8 BranchFlags; u8 SetFlags; u32 Instr; - u32 NextInstr[2]; u32 Addr; u8 CodeCycles; @@ -185,7 +184,7 @@ struct __attribute__((packed)) AddressRange u16 TimesInvalidated; }; -extern AddressRange CodeRanges[ExeMemSpaceSize / 256]; +extern AddressRange CodeRanges[ExeMemSpaceSize / 512]; typedef void (*InterpreterFunc)(ARM* cpu); extern InterpreterFunc InterpretARM[]; diff --git a/src/ARMJIT_x64/ARMJIT_LoadStore.cpp b/src/ARMJIT_x64/ARMJIT_LoadStore.cpp index 13ca415..eb01c87 100644 --- a/src/ARMJIT_x64/ARMJIT_LoadStore.cpp +++ b/src/ARMJIT_x64/ARMJIT_LoadStore.cpp @@ -105,7 +105,7 @@ void* Compiler::Gen_MemoryRoutine9(bool store, int size) static_assert(sizeof(AddressRange) == 16); LEA(32, ABI_PARAM1, MDisp(ABI_PARAM3, ExeMemRegionOffsets[exeMem_ITCM])); MOV(32, R(RSCRATCH), R(ABI_PARAM1)); - SHR(32, R(RSCRATCH), Imm8(8)); + SHR(32, R(RSCRATCH), Imm8(9)); SHL(32, R(RSCRATCH), Imm8(4)); CMP(32, MDisp(RSCRATCH, squeezePointer(CodeRanges) + offsetof(AddressRange, Blocks.Length)), Imm8(0)); FixupBranch noCode = J_CC(CC_Z); @@ -203,7 +203,7 @@ void* Compiler::Gen_MemoryRoutineSeq9(bool store, bool preinc) ADD(32, R(RSCRATCH), Imm32(ExeMemRegionOffsets[exeMem_ITCM])); MOV(32, R(ABI_PARAM4), R(RSCRATCH)); - SHR(32, R(RSCRATCH), Imm8(8)); + SHR(32, R(RSCRATCH), Imm8(9)); SHL(32, R(RSCRATCH), Imm8(4)); CMP(32, MDisp(RSCRATCH, squeezePointer(CodeRanges) + offsetof(AddressRange, Blocks.Length)), Imm8(0)); FixupBranch noCode = J_CC(CC_Z); @@ -284,28 +284,29 @@ void fault(u32 a, u32 b) void Compiler::Comp_MemAccess(int rd, int rn, const ComplexOperand& op2, int size, int flags) { - if (flags & memop_Store) - { - Comp_AddCycles_CD(); - } - else - { - Comp_AddCycles_CDI(); - } - u32 addressMask = ~0; if (size == 32) addressMask = ~3; if (size == 16) addressMask = ~1; - if (rn == 15 && rd != 15 && op2.IsImm && !(flags & (memop_Post|memop_Store|memop_Writeback))) + if (rn == 15 && rd != 15 && op2.IsImm && !(flags & (memop_SignExtend|memop_Post|memop_Store|memop_Writeback))) { - Comp_MemLoadLiteral(size, rd, - R15 + op2.Imm * ((flags & memop_SubtractOffset) ? -1 : 1)); + u32 addr = R15 + op2.Imm * ((flags & memop_SubtractOffset) ? -1 : 1); + Comp_MemLoadLiteral(size, rd, addr); + return; } - else + { + if (flags & memop_Store) + { + Comp_AddCycles_CD(); + } + else + { + Comp_AddCycles_CDI(); + } + OpArg rdMapped = MapReg(rd); OpArg rnMapped = MapReg(rn); diff --git a/src/ARM_InstrInfo.cpp b/src/ARM_InstrInfo.cpp index 0fbde26..1261bbe 100644 --- a/src/ARM_InstrInfo.cpp +++ b/src/ARM_InstrInfo.cpp @@ -5,7 +5,7 @@ namespace ARMInstrInfo { -#define ak(x) ((x) << 21) +#define ak(x) ((x) << 22) enum { A_Read0 = 1 << 0, @@ -36,7 +36,8 @@ enum { A_StaticShiftSetC = 1 << 18, A_SetC = 1 << 19, - A_WriteMem = 1 << 20 + A_WriteMem = 1 << 20, + A_LoadMem = 1 << 21 }; #define A_BIOP A_Read16 @@ -122,7 +123,7 @@ const u32 A_QSUB = A_Write12 | A_Read0 | A_Read16 | A_UnkOnARM7 | ak(ak_QSUB); const u32 A_QDADD = A_Write12 | A_Read0 | A_Read16 | A_UnkOnARM7 | ak(ak_QDADD); const u32 A_QDSUB = A_Write12 | A_Read0 | A_Read16 | A_UnkOnARM7 | ak(ak_QDSUB); -#define A_LDR A_Write12 +#define A_LDR A_Write12 | A_LoadMem #define A_STR A_Read12 | A_WriteMem #define A_IMPLEMENT_WB_LDRSTR(x,k) \ @@ -143,7 +144,7 @@ A_IMPLEMENT_WB_LDRSTR(STRB,STR) A_IMPLEMENT_WB_LDRSTR(LDR,LDR) A_IMPLEMENT_WB_LDRSTR(LDRB,LDR) -#define A_LDRD A_Write12Double +#define A_LDRD A_Write12Double | A_LoadMem #define A_STRD A_Read12Double | A_WriteMem #define A_IMPLEMENT_HD_LDRSTR(x,k) \ @@ -159,10 +160,10 @@ A_IMPLEMENT_HD_LDRSTR(LDRH,LDR) A_IMPLEMENT_HD_LDRSTR(LDRSB,LDR) A_IMPLEMENT_HD_LDRSTR(LDRSH,LDR) -const u32 A_SWP = A_Write12 | A_Read16 | A_Read0 | A_WriteMem | ak(ak_SWP); -const u32 A_SWPB = A_Write12 | A_Read16 | A_Read0 | A_WriteMem | ak(ak_SWPB); +const u32 A_SWP = A_Write12 | A_Read16 | A_Read0 | A_LoadMem | A_WriteMem | ak(ak_SWP); +const u32 A_SWPB = A_Write12 | A_Read16 | A_Read0 | A_LoadMem | A_WriteMem | ak(ak_SWPB); -const u32 A_LDM = A_Read16 | A_MemWriteback | ak(ak_LDM); +const u32 A_LDM = A_Read16 | A_MemWriteback | A_LoadMem | ak(ak_LDM); const u32 A_STM = A_Read16 | A_MemWriteback | A_WriteMem | ak(ak_STM); const u32 A_B = A_BranchAlways | ak(ak_B); @@ -360,6 +361,9 @@ Info Decode(bool thumb, u32 num, u32 instr) if (data & T_WriteMem) res.SpecialKind = special_WriteMem; + + if (res.Kind == ARMInstrInfo::tk_LDR_PCREL) + res.SpecialKind = special_LoadLiteral; res.EndBlock |= res.Branches(); @@ -377,7 +381,7 @@ Info Decode(bool thumb, u32 num, u32 instr) if (data & A_UnkOnARM7 && num != 0) data = A_UNK; - res.Kind = (data >> 21) & 0x1FF; + res.Kind = (data >> 22) & 0x1FF; if (res.Kind == ak_MCR) { @@ -454,12 +458,15 @@ Info Decode(bool thumb, u32 num, u32 instr) res.ReadFlags |= flag_C; if ((data & A_RRXReadC) && !((instr >> 7) & 0x1F)) res.ReadFlags |= flag_C; - if ((data & A_SetC) || (data & A_StaticShiftSetC) && ((instr >> 7) & 0x1F)) + if ((data & A_SetC) || ((data & A_StaticShiftSetC) && ((instr >> 7) & 0x1F))) res.WriteFlags |= flag_C; if (data & A_WriteMem) res.SpecialKind = special_WriteMem; + if ((data & A_LoadMem) && res.SrcRegs == (1 << 15)) + res.SpecialKind = special_LoadLiteral; + if ((instr >> 28) < 0xE) { // make non conditional flag sets conditional diff --git a/src/ARM_InstrInfo.h b/src/ARM_InstrInfo.h index d02f168..c032a4f 100644 --- a/src/ARM_InstrInfo.h +++ b/src/ARM_InstrInfo.h @@ -230,7 +230,8 @@ enum { special_NotSpecialAtAll = 0, special_WriteMem, - special_WaitForInterrupt + special_WaitForInterrupt, + special_LoadLiteral }; struct Info diff --git a/src/libui_sdl/main.cpp b/src/libui_sdl/main.cpp index 0066668..c3db88d 100644 --- a/src/libui_sdl/main.cpp +++ b/src/libui_sdl/main.cpp @@ -2675,6 +2675,8 @@ void RecreateMainWindow(bool opengl) int main(int argc, char** argv) { + freopen("miauz.txt", "w", stdout); + srand(time(NULL)); printf("melonDS " MELONDS_VERSION "\n"); -- cgit v1.2.3 From 81f38c14be0d9ba5a3da8f67d9719ed2c47279c5 Mon Sep 17 00:00:00 2001 From: RSDuck Date: Fri, 18 Oct 2019 13:29:17 +0200 Subject: integrate changes from ARM64 backend and more - better handle LDM/STM in reg alloc - unify Halted and IRQ in anticipation for branch inlining - literal optimisations can be disabled in gui - jit blocks follow simple returns - fix idle loop detection - break jit blocks on IRQ (fixes saving in Pokemon White) --- src/ARM.cpp | 40 ++++++++++++++++++----------- src/ARM.h | 13 +++++++--- src/ARMJIT.cpp | 50 +++++++++++++++++++++++++++++++------ src/ARMJIT_RegisterCache.h | 33 +++++++++++++++++++----- src/ARMJIT_x64/ARMJIT_Compiler.cpp | 7 +++--- src/ARMJIT_x64/ARMJIT_LoadStore.cpp | 16 ++++++++---- src/ARM_InstrInfo.cpp | 28 +++++++++++++++++++++ src/ARM_InstrInfo.h | 2 +- src/Config.cpp | 2 ++ src/Config.h | 1 + src/NDS.cpp | 2 +- src/libui_sdl/DlgEmuSettings.cpp | 31 ++++++++++++++++++++--- src/libui_sdl/main.cpp | 2 -- 13 files changed, 179 insertions(+), 48 deletions(-) (limited to 'src/ARM_InstrInfo.cpp') diff --git a/src/ARM.cpp b/src/ARM.cpp index 423c940..4fab60e 100644 --- a/src/ARM.cpp +++ b/src/ARM.cpp @@ -113,7 +113,7 @@ void ARM::DoSavestate(Savestate* file) file->Var32((u32*)&Cycles); //file->Var32((u32*)&CyclesToRun); - file->Var32(&Halted); + file->Var32(&StopExecution); file->VarArray(R, 16*sizeof(u32)); file->Var32(&CPSR); @@ -589,16 +589,21 @@ void ARMv5::ExecuteJIT() NDS::ARM9Timestamp += Cycles; Cycles = 0; - if (IRQ) TriggerIRQ(); - if (Halted) + if (StopExecution) { - bool idleLoop = Halted & 0x20; - Halted &= ~0x20; - if ((Halted == 1 || idleLoop) && NDS::ARM9Timestamp < NDS::ARM9Target) + if (IRQ) + TriggerIRQ(); + + if (Halted || IdleLoop) { - NDS::ARM9Timestamp = NDS::ARM9Target; + bool idleLoop = IdleLoop; + IdleLoop = 0; + if ((Halted == 1 || idleLoop) && NDS::ARM9Timestamp < NDS::ARM9Target) + { + NDS::ARM9Timestamp = NDS::ARM9Target; + } + break; } - break; } } @@ -726,16 +731,21 @@ void ARMv4::ExecuteJIT() Cycles = 0; // TODO optimize this shit!!! - if (IRQ) TriggerIRQ(); - if (Halted) + if (StopExecution) { - bool idleLoop = Halted & 0x20; - Halted &= ~0x20; - if ((Halted == 1 || idleLoop) && NDS::ARM7Timestamp < NDS::ARM7Target) + if (IRQ) + TriggerIRQ(); + + if (Halted || IdleLoop) { - NDS::ARM7Timestamp = NDS::ARM7Target; + bool idleLoop = IdleLoop; + IdleLoop = 0; + if ((Halted == 1 || idleLoop) && NDS::ARM7Timestamp < NDS::ARM7Target) + { + NDS::ARM7Timestamp = NDS::ARM7Target; + } + break; } - break; } } diff --git a/src/ARM.h b/src/ARM.h index 8a01068..e252d23 100644 --- a/src/ARM.h +++ b/src/ARM.h @@ -112,9 +112,16 @@ public: u32 Num; s32 Cycles; - u32 Halted; - - u32 IRQ; // nonzero to trigger IRQ + union + { + struct + { + u8 Halted; + u8 IRQ; // nonzero to trigger IRQ + u8 IdleLoop; + }; + u32 StopExecution; + }; u32 CodeRegion; s32 CodeCycles; diff --git a/src/ARMJIT.cpp b/src/ARMJIT.cpp index 19a5e70..0695b85 100644 --- a/src/ARMJIT.cpp +++ b/src/ARMJIT.cpp @@ -16,11 +16,13 @@ #include "GPU3D.h" #include "SPU.h" #include "Wifi.h" +#include "NDSCart.h" namespace ARMJIT { #define JIT_DEBUGPRINT(msg, ...) +//#define JIT_DEBUGPRINT(msg, ...) printf(msg, ## __VA_ARGS__) Compiler* compiler; @@ -159,13 +161,17 @@ void FloodFillSetFlags(FetchedInstr instrs[], int start, u8 flags) } } -bool DecodeBranch(bool thumb, const FetchedInstr& instr, u32& cond, u32& targetAddr) +bool DecodeBranch(bool thumb, const FetchedInstr& instr, u32& cond, bool hasLink, u32 lr, bool& link, + u32& linkAddr, u32& targetAddr) { if (thumb) { u32 r15 = instr.Addr + 4; cond = 0xE; + link = instr.Info.Kind == ARMInstrInfo::tk_BL_LONG; + linkAddr = instr.Addr + 4; + if (instr.Info.Kind == ARMInstrInfo::tk_BL_LONG && !(instr.Instr & (1 << 12))) { targetAddr = r15 + ((s32)((instr.Instr & 0x7FF) << 21) >> 9); @@ -185,9 +191,18 @@ bool DecodeBranch(bool thumb, const FetchedInstr& instr, u32& cond, u32& targetA targetAddr = r15 + offset; return true; } + else if (hasLink && instr.Info.Kind == ARMInstrInfo::tk_BX && instr.A_Reg(3) == 14) + { + JIT_DEBUGPRINT("returning!\n"); + targetAddr = lr; + return true; + } } else { + link = instr.Info.Kind == ARMInstrInfo::ak_BL; + linkAddr = instr.Addr + 4; + cond = instr.Cond(); if (instr.Info.Kind == ARMInstrInfo::ak_BL || instr.Info.Kind == ARMInstrInfo::ak_B) @@ -197,6 +212,12 @@ bool DecodeBranch(bool thumb, const FetchedInstr& instr, u32& cond, u32& targetA targetAddr = r15 + offset; return true; } + else if (hasLink && instr.Info.Kind == ARMInstrInfo::ak_BX && instr.A_Reg(0) == 14) + { + JIT_DEBUGPRINT("returning!\n"); + targetAddr = lr; + return true; + } } return false; } @@ -351,6 +372,8 @@ void CompileBlock(ARM* cpu) CodeRanges[pseudoPhysicalAddr / 512].TimesInvalidated); u32 lastSegmentStart = blockAddr; + u32 lr; + bool hasLink = false; do { @@ -413,6 +436,9 @@ void CompileBlock(ARM* cpu) cpu->CurInstr = instrs[i].Instr; cpu->CodeCycles = instrs[i].CodeCycles; + if (instrs[i].Info.DstRegs & (1 << 14)) + hasLink = false; + if (thumb) { InterpretTHUMB[instrs[i].Info.Kind](cpu); @@ -452,8 +478,9 @@ void CompileBlock(ARM* cpu) { bool hasBranched = cpu->R[15] != r15; - u32 cond, target; - bool staticBranch = DecodeBranch(thumb, instrs[i], cond, target); + bool link; + u32 cond, target, linkAddr; + bool staticBranch = DecodeBranch(thumb, instrs[i], cond, hasLink, lr, link, linkAddr, target); JIT_DEBUGPRINT("branch cond %x target %x (%d)\n", cond, target, hasBranched); if (staticBranch) @@ -474,18 +501,24 @@ void CompileBlock(ARM* cpu) if (cond < 0xE && target < instrs[i].Addr && target >= lastSegmentStart) { // we might have an idle loop - u32 offset = (target - blockAddr) / (thumb ? 2 : 4); - if (IsIdleLoop(instrs + offset, i - offset + 1)) + u32 backwardsOffset = (instrs[i].Addr - target) / (thumb ? 2 : 4); + if (IsIdleLoop(&instrs[i - backwardsOffset], backwardsOffset + 1)) { instrs[i].BranchFlags |= branch_IdleBranch; JIT_DEBUGPRINT("found %s idle loop %d in block %x\n", thumb ? "thumb" : "arm", cpu->Num, blockAddr); } } - else if (hasBranched && (!thumb || cond == 0xE) && !isBackJump && i + 1 < Config::JIT_MaxBlockSize) + else if (hasBranched && !isBackJump && i + 1 < Config::JIT_MaxBlockSize) { u32 targetPseudoPhysical = cpu->Num == 0 ? TranslateAddr<0>(target) : TranslateAddr<1>(target); + + if (link) + { + lr = linkAddr; + hasLink = true; + } r15 = target + (thumb ? 2 : 4); assert(r15 == cpu->R[15]); @@ -520,7 +553,7 @@ void CompileBlock(ARM* cpu) bool secondaryFlagReadCond = !canCompile || (instrs[i - 1].BranchFlags & (branch_FollowCondTaken | branch_FollowCondNotTaken)); if (instrs[i - 1].Info.ReadFlags != 0 || secondaryFlagReadCond) FloodFillSetFlags(instrs, i - 2, !secondaryFlagReadCond ? instrs[i - 1].Info.ReadFlags : 0xF); - } while(!instrs[i - 1].Info.EndBlock && i < Config::JIT_MaxBlockSize && !cpu->Halted); + } while(!instrs[i - 1].Info.EndBlock && i < Config::JIT_MaxBlockSize && !cpu->Halted && (!cpu->IRQ || (cpu->CPSR & 0x80))); u32 restoreSlot = HashRestoreCandidate(pseudoPhysicalAddr); JitBlock* prevBlock = RestoreCandidates[restoreSlot]; @@ -713,6 +746,9 @@ void* GetFuncForAddr(ARM* cpu, u32 addr, bool store, int size) { if ((addr & 0xFF000000) == 0x04000000) { + if (!store && size == 32 && addr == 0x04100010 && NDS::ExMemCnt[0] & (1<<11)) + return (void*)NDSCart::ReadROMData; + /* unfortunately we can't map GPU2D this way since it's hidden inside an object diff --git a/src/ARMJIT_RegisterCache.h b/src/ARMJIT_RegisterCache.h index ed6a2b7..2222bc2 100644 --- a/src/ARMJIT_RegisterCache.h +++ b/src/ARMJIT_RegisterCache.h @@ -93,10 +93,12 @@ public: void Prepare(bool thumb, int i) { + FetchedInstr instr = Instrs[i]; + if (LoadedRegs & (1 << 15)) UnloadRegister(15); - BitSet16 invalidedLiterals(LiteralsLoaded & Instrs[i].Info.DstRegs); + BitSet16 invalidedLiterals(LiteralsLoaded & instr.Info.DstRegs); for (int reg : invalidedLiterals) UnloadLiteral(reg); @@ -108,6 +110,7 @@ public: { BitSet16 regsNeeded((Instrs[j].Info.SrcRegs & ~(1 << 15)) | Instrs[j].Info.DstRegs); futureNeeded |= regsNeeded.m_val; + regsNeeded &= BitSet16(~Instrs[j].Info.NotStrictlyNeeded); for (int reg : regsNeeded) ranking[reg]++; } @@ -117,8 +120,8 @@ public: for (int reg : neverNeededAgain) UnloadRegister(reg); - FetchedInstr Instr = Instrs[i]; - u16 necessaryRegs = (Instr.Info.SrcRegs & ~(1 << 15)) | Instr.Info.DstRegs; + u16 necessaryRegs = ((instr.Info.SrcRegs & ~(1 << 15)) | instr.Info.DstRegs) & ~instr.Info.NotStrictlyNeeded; + u16 writeRegs = instr.Info.DstRegs & ~instr.Info.NotStrictlyNeeded; BitSet16 needToBeLoaded(necessaryRegs & ~LoadedRegs); if (needToBeLoaded != BitSet16(0)) { @@ -143,13 +146,31 @@ public: loadedSet.m_val = LoadedRegs; } + // we don't need to load a value which is always going to be overwritten BitSet16 needValueLoaded(needToBeLoaded); - if (thumb || Instr.Cond() >= 0xE) - needValueLoaded = BitSet16(Instr.Info.SrcRegs); + if (thumb || instr.Cond() >= 0xE) + needValueLoaded = BitSet16(instr.Info.SrcRegs); for (int reg : needToBeLoaded) LoadRegister(reg, needValueLoaded[reg]); + } + { + BitSet16 loadedSet(LoadedRegs); + BitSet16 loadRegs(instr.Info.NotStrictlyNeeded & futureNeeded & ~LoadedRegs); + if (loadRegs && loadedSet.Count() < NativeRegsAvailable) + { + int left = NativeRegsAvailable - loadedSet.Count(); + for (int reg : loadRegs) + { + if (left-- == 0) + break; + + writeRegs |= (1 << reg) & instr.Info.DstRegs; + LoadRegister(reg, !(thumb || instr.Cond() >= 0xE) || (1 << reg) & instr.Info.SrcRegs); + } + } } - DirtyRegs |= Instr.Info.DstRegs & ~(1 << 15); + + DirtyRegs |= writeRegs & ~(1 << 15); } static const Reg NativeRegAllocOrder[]; diff --git a/src/ARMJIT_x64/ARMJIT_Compiler.cpp b/src/ARMJIT_x64/ARMJIT_Compiler.cpp index a994d34..fd38724 100644 --- a/src/ARMJIT_x64/ARMJIT_Compiler.cpp +++ b/src/ARMJIT_x64/ARMJIT_Compiler.cpp @@ -364,7 +364,7 @@ void Compiler::Reset() void Compiler::Comp_SpecialBranchBehaviour() { if (CurInstr.BranchFlags & branch_IdleBranch) - OR(32, MDisp(RCPU, offsetof(ARM, Halted)), Imm8(0x20)); + OR(32, MDisp(RCPU, offsetof(ARM, IdleLoop)), Imm8(0x1)); if (CurInstr.BranchFlags & branch_FollowCondNotTaken) { @@ -402,6 +402,7 @@ JitBlockEntry Compiler::CompileBlock(ARM* cpu, bool thumb, FetchedInstr instrs[] { CurInstr = instrs[i]; R15 = CurInstr.Addr + (Thumb ? 4 : 8); + CodeRegion = R15 >> 24; Exit = i == instrsCount - 1 || (CurInstr.BranchFlags & branch_FollowCondNotTaken); @@ -571,8 +572,6 @@ void Compiler::Comp_AddCycles_CDI() Comp_AddCycles_CD(); else { - IrregularCycles = true; - s32 cycles; s32 numC = NDS::ARM7MemTimings[CurInstr.CodeCycles][Thumb ? 0 : 2]; @@ -642,7 +641,7 @@ void Compiler::Comp_AddCycles_CD() IrregularCycles = true; } - if (!Thumb && CurInstr.Cond() < 0xE) + if (IrregularCycles && !Thumb && CurInstr.Cond() < 0xE) ADD(32, MDisp(RCPU, offsetof(ARM, Cycles)), Imm8(cycles)); else ConstantCycles += cycles; diff --git a/src/ARMJIT_x64/ARMJIT_LoadStore.cpp b/src/ARMJIT_x64/ARMJIT_LoadStore.cpp index eb01c87..3799774 100644 --- a/src/ARMJIT_x64/ARMJIT_LoadStore.cpp +++ b/src/ARMJIT_x64/ARMJIT_LoadStore.cpp @@ -1,5 +1,6 @@ #include "ARMJIT_Compiler.h" +#include "../Config.h" using namespace Gen; @@ -290,7 +291,7 @@ void Compiler::Comp_MemAccess(int rd, int rn, const ComplexOperand& op2, int siz if (size == 16) addressMask = ~1; - if (rn == 15 && rd != 15 && op2.IsImm && !(flags & (memop_SignExtend|memop_Post|memop_Store|memop_Writeback))) + if (Config::JIT_LiteralOptimisations && rn == 15 && rd != 15 && op2.IsImm && !(flags & (memop_SignExtend|memop_Post|memop_Store|memop_Writeback))) { u32 addr = R15 + op2.Imm * ((flags & memop_SubtractOffset) ? -1 : 1); Comp_MemLoadLiteral(size, rd, addr); @@ -309,6 +310,8 @@ void Compiler::Comp_MemAccess(int rd, int rn, const ComplexOperand& op2, int siz OpArg rdMapped = MapReg(rd); OpArg rnMapped = MapReg(rn); + if (Thumb && rn == 15) + rnMapped = Imm32(R15 & ~0x2); bool inlinePreparation = Num == 1; u32 constLocalROR32 = 4; @@ -317,7 +320,7 @@ void Compiler::Comp_MemAccess(int rd, int rn, const ComplexOperand& op2, int siz ? MemoryFuncs9[size >> 4][!!(flags & memop_Store)] : MemoryFuncs7[size >> 4][!!((flags & memop_Store))]; - if ((rd != 15 || (flags & memop_Store)) && op2.IsImm && RegCache.IsLiteral(rn)) + if (Config::JIT_LiteralOptimisations && (rd != 15 || (flags & memop_Store)) && op2.IsImm && RegCache.IsLiteral(rn)) { u32 addr = RegCache.LiteralValues[rn] + op2.Imm * ((flags & memop_SubtractOffset) ? -1 : 1); @@ -749,9 +752,12 @@ void Compiler::T_Comp_MemImmHalf() void Compiler::T_Comp_LoadPCRel() { - u32 addr = (R15 & ~0x2) + ((CurInstr.Instr & 0xFF) << 2); - - Comp_MemLoadLiteral(32, CurInstr.T_Reg(8), addr); + u32 offset = (CurInstr.Instr & 0xFF) << 2; + u32 addr = (R15 & ~0x2) + offset; + if (Config::JIT_LiteralOptimisations) + Comp_MemLoadLiteral(32, CurInstr.T_Reg(8), addr); + else + Comp_MemAccess(CurInstr.T_Reg(8), 15, ComplexOperand(offset), 32, 0); } void Compiler::T_Comp_MemSPRel() diff --git a/src/ARM_InstrInfo.cpp b/src/ARM_InstrInfo.cpp index 1261bbe..8f8bd35 100644 --- a/src/ARM_InstrInfo.cpp +++ b/src/ARM_InstrInfo.cpp @@ -365,6 +365,21 @@ Info Decode(bool thumb, u32 num, u32 instr) if (res.Kind == ARMInstrInfo::tk_LDR_PCREL) res.SpecialKind = special_LoadLiteral; + if (res.Kind == tk_LDMIA || res.Kind == tk_POP) + { + u32 set = (instr & 0xFF) & ~(res.DstRegs|res.SrcRegs); + res.NotStrictlyNeeded |= set; + res.DstRegs |= set; + } + if (res.Kind == tk_STMIA || res.Kind == tk_PUSH) + { + u32 set = (instr & 0xFF) & ~(res.DstRegs|res.SrcRegs); + if (res.Kind == tk_PUSH && instr & (1 << 8)) + set |= (1 << 14); + res.NotStrictlyNeeded |= set; + res.SrcRegs |= set; + } + res.EndBlock |= res.Branches(); if (res.Kind == tk_BCOND) @@ -466,6 +481,19 @@ Info Decode(bool thumb, u32 num, u32 instr) if ((data & A_LoadMem) && res.SrcRegs == (1 << 15)) res.SpecialKind = special_LoadLiteral; + + if (res.Kind == ak_LDM) + { + u16 set = (instr & 0xFFFF) & ~(res.SrcRegs|res.DstRegs|(1<<15)); + res.DstRegs |= set; + res.NotStrictlyNeeded |= set; + } + if (res.Kind == ak_STM) + { + u16 set = (instr & 0xFFFF) & ~(res.SrcRegs|res.DstRegs|(1<<15)); + res.SrcRegs |= set; + res.NotStrictlyNeeded |= set; + } if ((instr >> 28) < 0xE) { diff --git a/src/ARM_InstrInfo.h b/src/ARM_InstrInfo.h index c032a4f..2732181 100644 --- a/src/ARM_InstrInfo.h +++ b/src/ARM_InstrInfo.h @@ -236,7 +236,7 @@ enum struct Info { - u16 DstRegs, SrcRegs; + u16 DstRegs, SrcRegs, NotStrictlyNeeded; u16 Kind; u8 SpecialKind; diff --git a/src/Config.cpp b/src/Config.cpp index 63d61a3..eb5bfcc 100644 --- a/src/Config.cpp +++ b/src/Config.cpp @@ -38,6 +38,7 @@ int GL_Antialias; bool JIT_Enable = false; int JIT_MaxBlockSize = 12; bool JIT_BrancheOptimisations = true; +bool JIT_LiteralOptimisations = true; #endif ConfigEntry ConfigFile[] = @@ -52,6 +53,7 @@ ConfigEntry ConfigFile[] = {"JIT_Enable", 0, &JIT_Enable, 0, NULL, 0}, {"JIT_MaxBlockSize", 0, &JIT_MaxBlockSize, 10, NULL, 0}, {"JIT_BrancheOptimisations", 0, &JIT_BrancheOptimisations, 1, NULL, 0}, + {"JIT_BrancheOptimisations", 0, &JIT_LiteralOptimisations, 1, NULL, 0}, #endif {"", -1, NULL, 0, NULL, 0} diff --git a/src/Config.h b/src/Config.h index 0fcefc3..723ab13 100644 --- a/src/Config.h +++ b/src/Config.h @@ -50,6 +50,7 @@ extern int GL_Antialias; extern bool JIT_Enable; extern int JIT_MaxBlockSize; extern bool JIT_BrancheOptimisations; +extern bool JIT_LiteralOptimisations; #endif } diff --git a/src/NDS.cpp b/src/NDS.cpp index e9e6795..141c565 100644 --- a/src/NDS.cpp +++ b/src/NDS.cpp @@ -1142,7 +1142,7 @@ void UpdateIRQ(u32 cpu) if (IME[cpu] & 0x1) { - arm->IRQ = IE[cpu] & IF[cpu]; + arm->IRQ = !!(IE[cpu] & IF[cpu]); } else { diff --git a/src/libui_sdl/DlgEmuSettings.cpp b/src/libui_sdl/DlgEmuSettings.cpp index 45e8e0c..0df9c6c 100644 --- a/src/libui_sdl/DlgEmuSettings.cpp +++ b/src/libui_sdl/DlgEmuSettings.cpp @@ -43,6 +43,7 @@ uiCheckbox* cbDirectBoot; uiCheckbox* cbJITEnabled; uiEntry* enJITMaxBlockSize; uiCheckbox* cbJITBranchOptimisations; +uiCheckbox* cbJITLiteralOptimisations; #endif int OnCloseWindow(uiWindow* window, void* blarg) @@ -66,14 +67,16 @@ void OnOk(uiButton* btn, void* blarg) char* maxBlockSizeStr = uiEntryText(enJITMaxBlockSize); long blockSize = strtol(maxBlockSizeStr, NULL, 10); bool branchOptimisations = uiCheckboxChecked(cbJITBranchOptimisations); + bool literalOptimisations = uiCheckboxChecked(cbJITLiteralOptimisations); uiFreeText(maxBlockSizeStr); if (blockSize < 1) blockSize = 1; if (blockSize > 32) blockSize = 32; - if (enableJit != Config::JIT_Enable || blockSize != Config::JIT_MaxBlockSize || - branchOptimisations != Config::JIT_BrancheOptimisations) + if (enableJit != Config::JIT_Enable || blockSize != Config::JIT_MaxBlockSize + || branchOptimisations != Config::JIT_BrancheOptimisations + || literalOptimisations != Config::JIT_LiteralOptimisations) { if (RunningSomething && !uiMsgBoxConfirm(win, "Reset emulator", @@ -82,7 +85,8 @@ void OnOk(uiButton* btn, void* blarg) Config::JIT_Enable = enableJit; Config::JIT_MaxBlockSize = blockSize; - Config::JIT_BrancheOptimisations = uiCheckboxChecked(cbJITBranchOptimisations); + Config::JIT_BrancheOptimisations = branchOptimisations; + Config::JIT_LiteralOptimisations = literalOptimisations; restart = true; } @@ -108,11 +112,13 @@ void OnJITStateChanged(uiCheckbox* cb, void* blarg) { uiControlEnable(uiControl(enJITMaxBlockSize)); uiControlEnable(uiControl(cbJITBranchOptimisations)); + uiControlEnable(uiControl(cbJITLiteralOptimisations)); } else { uiControlDisable(uiControl(enJITMaxBlockSize)); uiControlDisable(uiControl(cbJITBranchOptimisations)); + uiControlDisable(uiControl(cbJITLiteralOptimisations)); } } #endif @@ -174,9 +180,25 @@ void Open() uiBox* row = uiNewHorizontalBox(); uiBoxAppend(in_ctrl, uiControl(row), 0); - cbJITBranchOptimisations = uiNewCheckbox("Branch optimisations (breaks in rare cases games!)"); + uiLabel* lbl = uiNewLabel("If you experience problems with a certain game, you can try disabling these options:"); + uiBoxAppend(row, uiControl(lbl), 0); + } + + { + uiBox* row = uiNewHorizontalBox(); + uiBoxAppend(in_ctrl, uiControl(row), 0); + + cbJITBranchOptimisations = uiNewCheckbox("Branch optimisations"); uiBoxAppend(row, uiControl(cbJITBranchOptimisations), 0); } + + { + uiBox* row = uiNewHorizontalBox(); + uiBoxAppend(in_ctrl, uiControl(row), 0); + + cbJITLiteralOptimisations = uiNewCheckbox("Literal optimisations"); + uiBoxAppend(row, uiControl(cbJITLiteralOptimisations), 0); + } } #endif @@ -214,6 +236,7 @@ void Open() OnJITStateChanged(cbJITEnabled, NULL); uiCheckboxSetChecked(cbJITBranchOptimisations, Config::JIT_BrancheOptimisations); + uiCheckboxSetChecked(cbJITLiteralOptimisations, Config::JIT_LiteralOptimisations); #endif uiControlShow(uiControl(win)); diff --git a/src/libui_sdl/main.cpp b/src/libui_sdl/main.cpp index c3db88d..0066668 100644 --- a/src/libui_sdl/main.cpp +++ b/src/libui_sdl/main.cpp @@ -2675,8 +2675,6 @@ void RecreateMainWindow(bool opengl) int main(int argc, char** argv) { - freopen("miauz.txt", "w", stdout); - srand(time(NULL)); printf("melonDS " MELONDS_VERSION "\n"); -- cgit v1.2.3 From 9b98b8816a1dc1373ce9a57aef845263456702c3 Mon Sep 17 00:00:00 2001 From: RSDuck Date: Tue, 4 Feb 2020 17:28:51 +0100 Subject: improve nop handling and proper behaviour for LDM^ fixes dslinux --- src/ARM.cpp | 2 ++ src/ARMJIT.cpp | 13 +++++++++---- src/ARMJIT_RegisterCache.h | 2 +- src/ARMJIT_x64/ARMJIT_Branch.cpp | 6 +++--- src/ARMJIT_x64/ARMJIT_Compiler.cpp | 1 + src/ARMJIT_x64/ARMJIT_Compiler.h | 2 ++ src/ARMJIT_x64/ARMJIT_LoadStore.cpp | 5 +++-- src/ARM_InstrInfo.cpp | 2 ++ src/ARM_InstrInfo.h | 2 ++ 9 files changed, 25 insertions(+), 10 deletions(-) (limited to 'src/ARM_InstrInfo.cpp') diff --git a/src/ARM.cpp b/src/ARM.cpp index 9ab9546..07cc472 100644 --- a/src/ARM.cpp +++ b/src/ARM.cpp @@ -725,6 +725,8 @@ void ARMv4::ExecuteJIT() return; } + //printf("executing armv4 at %08x\n", instrAddr); + ARMJIT::JitBlockEntry block = ARMJIT::LookUpBlock<1>(instrAddr); if (block) Cycles += block(); diff --git a/src/ARMJIT.cpp b/src/ARMJIT.cpp index c7387c9..8fd7708 100644 --- a/src/ARMJIT.cpp +++ b/src/ARMJIT.cpp @@ -273,6 +273,8 @@ bool IsIdleLoop(FetchedInstr* instrs, int instrsCount) typedef void (*InterpreterFunc)(ARM* cpu); +void NOP(ARM* cpu) {} + #define F(x) &ARMInterpreter::A_##x #define F_ALU(name, s) \ F(name##_REG_LSL_IMM##s), F(name##_REG_LSR_IMM##s), F(name##_REG_ASR_IMM##s), F(name##_REG_ROR_IMM##s), \ @@ -320,7 +322,8 @@ InterpreterFunc InterpretARM[ARMInstrInfo::ak_Count] = F(LDM), F(STM), F(B), F(BL), F(BLX_IMM), F(BX), F(BLX_REG), - F(UNK), F(MSR_IMM), F(MSR_REG), F(MRS), F(MCR), F(MRC), F(SVC) + F(UNK), F(MSR_IMM), F(MSR_REG), F(MRS), F(MCR), F(MRC), F(SVC), + NOP }; #undef F_ALU #undef F_MEM_WB @@ -387,8 +390,8 @@ void CompileBlock(ARM* cpu) u32 nextInstr[2] = {cpu->NextInstr[0], cpu->NextInstr[1]}; u32 nextInstrAddr[2] = {blockAddr, r15}; - JIT_DEBUGPRINT("start block %x (%x) %p %p (region invalidates %dx)\n", - blockAddr, pseudoPhysicalAddr, FastBlockAccess[pseudoPhysicalAddr / 2], + JIT_DEBUGPRINT("start block %x %08x (%x) %p %p (region invalidates %dx)\n", + blockAddr, cpu->CPSR, pseudoPhysicalAddr, FastBlockAccess[pseudoPhysicalAddr / 2], cpu->Num == 0 ? LookUpBlock<0>(blockAddr) : LookUpBlock<1>(blockAddr), CodeRanges[pseudoPhysicalAddr / 512].TimesInvalidated); @@ -473,7 +476,9 @@ void CompileBlock(ARM* cpu) else { u32 icode = ((instrs[i].Instr >> 4) & 0xF) | ((instrs[i].Instr >> 16) & 0xFF0); - assert(InterpretARM[instrs[i].Info.Kind] == ARMInterpreter::ARMInstrTable[icode] || instrs[i].Info.Kind == ARMInstrInfo::ak_MOV_REG_LSL_IMM); + assert(InterpretARM[instrs[i].Info.Kind] == ARMInterpreter::ARMInstrTable[icode] + || instrs[i].Info.Kind == ARMInstrInfo::ak_MOV_REG_LSL_IMM + || instrs[i].Info.Kind == ARMInstrInfo::ak_Nop); if (cpu->CheckCondition(instrs[i].Cond())) InterpretARM[instrs[i].Info.Kind](cpu); else diff --git a/src/ARMJIT_RegisterCache.h b/src/ARMJIT_RegisterCache.h index 2222bc2..b894657 100644 --- a/src/ARMJIT_RegisterCache.h +++ b/src/ARMJIT_RegisterCache.h @@ -152,7 +152,7 @@ public: needValueLoaded = BitSet16(instr.Info.SrcRegs); for (int reg : needToBeLoaded) LoadRegister(reg, needValueLoaded[reg]); - } + } { BitSet16 loadedSet(LoadedRegs); BitSet16 loadRegs(instr.Info.NotStrictlyNeeded & futureNeeded & ~LoadedRegs); diff --git a/src/ARMJIT_x64/ARMJIT_Branch.cpp b/src/ARMJIT_x64/ARMJIT_Branch.cpp index 0dedb3f..e02865d 100644 --- a/src/ARMJIT_x64/ARMJIT_Branch.cpp +++ b/src/ARMJIT_x64/ARMJIT_Branch.cpp @@ -134,7 +134,7 @@ void Compiler::Comp_JumpTo(Gen::X64Reg addr, bool restoreCPSR) { IrregularCycles = true; - BitSet16 hiRegsLoaded(RegCache.DirtyRegs & 0xFF00); + BitSet16 hiRegsLoaded(RegCache.LoadedRegs & 0x7F00); bool previouslyDirty = CPSRDirty; SaveCPSR(); @@ -156,12 +156,12 @@ void Compiler::Comp_JumpTo(Gen::X64Reg addr, bool restoreCPSR) if (!restoreCPSR) XOR(32, R(ABI_PARAM3), R(ABI_PARAM3)); else - MOV(32, R(ABI_PARAM3), Imm32(restoreCPSR)); + MOV(32, R(ABI_PARAM3), Imm32(true)); // what a waste if (Num == 0) CALL((void*)&ARMv5::JumpTo); else CALL((void*)&ARMv4::JumpTo); - + if (!Thumb && restoreCPSR && CurInstr.Cond() < 0xE) { for (int reg : hiRegsLoaded) diff --git a/src/ARMJIT_x64/ARMJIT_Compiler.cpp b/src/ARMJIT_x64/ARMJIT_Compiler.cpp index fd38724..5afe842 100644 --- a/src/ARMJIT_x64/ARMJIT_Compiler.cpp +++ b/src/ARMJIT_x64/ARMJIT_Compiler.cpp @@ -308,6 +308,7 @@ const Compiler::CompileFunc A_Comp[ARMInstrInfo::ak_Count] = F(A_Comp_BranchImm), F(A_Comp_BranchImm), F(A_Comp_BranchImm), F(A_Comp_BranchXchangeReg), F(A_Comp_BranchXchangeReg), // system stuff NULL, NULL, NULL, NULL, NULL, NULL, NULL, + F(Nop) }; const Compiler::CompileFunc T_Comp[ARMInstrInfo::tk_Count] = { diff --git a/src/ARMJIT_x64/ARMJIT_Compiler.h b/src/ARMJIT_x64/ARMJIT_Compiler.h index 792ff66..2cb57dc 100644 --- a/src/ARMJIT_x64/ARMJIT_Compiler.h +++ b/src/ARMJIT_x64/ARMJIT_Compiler.h @@ -79,6 +79,8 @@ public: opInvertOp2 = 1 << 5, }; + void Nop() {} + void A_Comp_Arith(); void A_Comp_MovOp(); void A_Comp_CmpOp(); diff --git a/src/ARMJIT_x64/ARMJIT_LoadStore.cpp b/src/ARMJIT_x64/ARMJIT_LoadStore.cpp index b66f304..4cafc1c 100644 --- a/src/ARMJIT_x64/ARMJIT_LoadStore.cpp +++ b/src/ARMJIT_x64/ARMJIT_LoadStore.cpp @@ -531,7 +531,7 @@ s32 Compiler::Comp_MemAccessBlock(int rn, BitSet16 regs, bool store, bool preinc { if (regs[reg]) { - if (usermode && reg >= 8 && reg < 15) + if (usermode && !regs[15] && reg >= 8 && reg < 15) { if (firstUserMode) { @@ -545,7 +545,8 @@ s32 Compiler::Comp_MemAccessBlock(int rn, BitSet16 regs, bool store, bool preinc FixupBranch sucessfulWritten = J_CC(CC_NC); if (RegCache.Mapping[reg] != INVALID_REG) MOV(32, R(RegCache.Mapping[reg]), R(ABI_PARAM3)); - SaveReg(reg, ABI_PARAM3); + else + SaveReg(reg, ABI_PARAM3); SetJumpTarget(sucessfulWritten); } else if (RegCache.Mapping[reg] == INVALID_REG) diff --git a/src/ARM_InstrInfo.cpp b/src/ARM_InstrInfo.cpp index 8f8bd35..08e2f0a 100644 --- a/src/ARM_InstrInfo.cpp +++ b/src/ARM_InstrInfo.cpp @@ -392,6 +392,8 @@ Info Decode(bool thumb, u32 num, u32 instr) u32 data = ARMInstrTable[((instr >> 4) & 0xF) | ((instr >> 16) & 0xFF0)]; if (num == 0 && (instr & 0xFE000000) == 0xFA000000) data = A_BLX_IMM; + else if ((instr >> 28) == 0xF) + data = ak(ak_Nop); if (data & A_UnkOnARM7 && num != 0) data = A_UNK; diff --git a/src/ARM_InstrInfo.h b/src/ARM_InstrInfo.h index 2732181..6ab4929 100644 --- a/src/ARM_InstrInfo.h +++ b/src/ARM_InstrInfo.h @@ -139,6 +139,8 @@ enum ak_MRC, ak_SVC, + ak_Nop, + ak_Count, tk_LSL_IMM = 0, -- cgit v1.2.3 From d6cc7de6c4b571b24809a0d9665ec6160fe5ff6d Mon Sep 17 00:00:00 2001 From: RSDuck Date: Tue, 4 Feb 2020 18:29:52 +0100 Subject: move ARM64 JIT backend here --- CMakeLists.txt | 2 +- src/ARM.h | 9 +- src/ARMJIT.cpp | 4 + src/ARMJIT_A64/ARMJIT_ALU.cpp | 837 +++++++ src/ARMJIT_A64/ARMJIT_Branch.cpp | 452 ++++ src/ARMJIT_A64/ARMJIT_Compiler.cpp | 707 ++++++ src/ARMJIT_A64/ARMJIT_Compiler.h | 234 ++ src/ARMJIT_A64/ARMJIT_LoadStore.cpp | 848 +++++++ src/ARM_InstrInfo.cpp | 7 +- src/CMakeLists.txt | 27 +- src/dolphin/Align.h | 24 + src/dolphin/Arm64Emitter.cpp | 4466 +++++++++++++++++++++++++++++++++++ src/dolphin/Arm64Emitter.h | 1152 +++++++++ src/dolphin/ArmCommon.h | 27 + src/dolphin/BitUtils.h | 254 ++ src/dolphin/Compat.h | 12 + src/dolphin/MathUtil.cpp | 13 + src/dolphin/MathUtil.h | 121 + 18 files changed, 9188 insertions(+), 8 deletions(-) create mode 100644 src/ARMJIT_A64/ARMJIT_ALU.cpp create mode 100644 src/ARMJIT_A64/ARMJIT_Branch.cpp create mode 100644 src/ARMJIT_A64/ARMJIT_Compiler.cpp create mode 100644 src/ARMJIT_A64/ARMJIT_Compiler.h create mode 100644 src/ARMJIT_A64/ARMJIT_LoadStore.cpp create mode 100644 src/dolphin/Align.h create mode 100644 src/dolphin/Arm64Emitter.cpp create mode 100644 src/dolphin/Arm64Emitter.h create mode 100644 src/dolphin/ArmCommon.h create mode 100644 src/dolphin/BitUtils.h create mode 100644 src/dolphin/MathUtil.cpp create mode 100644 src/dolphin/MathUtil.h (limited to 'src/ARM_InstrInfo.cpp') diff --git a/CMakeLists.txt b/CMakeLists.txt index d59e19c..9a0388d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -36,7 +36,7 @@ detect_architecture("__i386__" x86) detect_architecture("__arm__" ARM) detect_architecture("__aarch64__" ARM64) -if (ARCHITECTURE STREQUAL x86_64) +if (ARCHITECTURE STREQUAL x86_64 OR ARCHITECTURE STREQUAL ARM64) option(ENABLE_JIT "Enable x64 JIT recompiler" ON) endif() diff --git a/src/ARM.h b/src/ARM.h index e252d23..8282c01 100644 --- a/src/ARM.h +++ b/src/ARM.h @@ -246,10 +246,14 @@ public: u32 DTCMSetting, ITCMSetting; - u8 ITCM[0x8000]; + // for aarch64 JIT they need to go up here + // to be addressable by a 12-bit immediate u32 ITCMSize; - u8 DTCM[0x4000]; u32 DTCMBase, DTCMSize; + s32 RegionCodeCycles; + + u8 ITCM[0x8000]; + u8 DTCM[0x4000]; u8 ICache[0x2000]; u32 ICacheTags[64*4]; @@ -274,7 +278,6 @@ public: // code/16N/32N/32S u8 MemTimings[0x100000][4]; - s32 RegionCodeCycles; u8* CurICacheLine; }; diff --git a/src/ARMJIT.cpp b/src/ARMJIT.cpp index 8fd7708..561fabb 100644 --- a/src/ARMJIT.cpp +++ b/src/ARMJIT.cpp @@ -6,7 +6,11 @@ #include "Config.h" #include "ARMJIT_Internal.h" +#if defined(__x86_64__) #include "ARMJIT_x64/ARMJIT_Compiler.h" +#else +#include "ARMJIT_A64/ARMJIT_Compiler.h" +#endif #include "ARMInterpreter_ALU.h" #include "ARMInterpreter_LoadStore.h" diff --git a/src/ARMJIT_A64/ARMJIT_ALU.cpp b/src/ARMJIT_A64/ARMJIT_ALU.cpp new file mode 100644 index 0000000..0fe6a97 --- /dev/null +++ b/src/ARMJIT_A64/ARMJIT_ALU.cpp @@ -0,0 +1,837 @@ +#include "ARMJIT_Compiler.h" + +using namespace Arm64Gen; + +namespace ARMJIT +{ + +void Compiler::Comp_RegShiftReg(int op, bool S, Op2& op2, ARM64Reg rs) +{ + if (!(CurInstr.SetFlags & 0x2)) + S = false; + + CPSRDirty |= S; + + UBFX(W1, rs, 0, 8); + + if (!S) + { + if (op == 3) + RORV(W0, op2.Reg.Rm, W1); + else + { + CMP(W1, 32); + if (op == 2) + { + MOVI2R(W2, 31); + CSEL(W1, W2, W1, CC_GE); + ASRV(W0, op2.Reg.Rm, W1); + } + else + { + if (op == 0) + LSLV(W0, op2.Reg.Rm, W1); + else if (op == 1) + LSRV(W0, op2.Reg.Rm, W1); + CSEL(W0, WZR, W0, CC_GE); + } + } + } + else + { + MOV(W0, op2.Reg.Rm); + FixupBranch zero = CBZ(W1); + + SUB(W1, W1, 1); + if (op == 3) + { + RORV(W0, op2.Reg.Rm, W1); + BFI(RCPSR, W0, 29, 1); + } + else + { + CMP(W1, 31); + if (op == 2) + { + MOVI2R(W2, 31); + CSEL(W1, W2, W1, CC_GT); + ASRV(W0, op2.Reg.Rm, W1); + BFI(RCPSR, W0, 29, 1); + } + else + { + if (op == 0) + { + LSLV(W0, op2.Reg.Rm, W1); + UBFX(W1, W0, 31, 1); + } + else if (op == 1) + LSRV(W0, op2.Reg.Rm, W1); + CSEL(W1, WZR, op ? W0 : W1, CC_GT); + BFI(RCPSR, W1, 29, 1); + CSEL(W0, WZR, W0, CC_GE); + } + } + + MOV(W0, W0, ArithOption(W0, (ShiftType)op, 1)); + SetJumpTarget(zero); + } + op2 = Op2(W0, ST_LSL, 0); +} + +void Compiler::Comp_RegShiftImm(int op, int amount, bool S, Op2& op2, ARM64Reg tmp) +{ + if (!(CurInstr.SetFlags & 0x2)) + S = false; + + CPSRDirty |= S; + + switch (op) + { + case 0: // LSL + if (S && amount) + { + UBFX(tmp, op2.Reg.Rm, 32 - amount, 1); + BFI(RCPSR, tmp, 29, 1); + } + op2 = Op2(op2.Reg.Rm, ST_LSL, amount); + return; + case 1: // LSR + if (S) + { + UBFX(tmp, op2.Reg.Rm, (amount ? amount : 32) - 1, 1); + BFI(RCPSR, tmp, 29, 1); + } + if (amount == 0) + { + op2 = Op2(0); + return; + } + op2 = Op2(op2.Reg.Rm, ST_LSR, amount); + return; + case 2: // ASR + if (S) + { + UBFX(tmp, op2.Reg.Rm, (amount ? amount : 32) - 1, 1); + BFI(RCPSR, tmp, 29, 1); + } + op2 = Op2(op2.Reg.Rm, ST_ASR, amount ? amount : 31); + return; + case 3: // ROR + if (amount == 0) + { + UBFX(tmp, RCPSR, 29, 1); + LSL(tmp, tmp, 31); + if (S) + BFI(RCPSR, op2.Reg.Rm, 29, 1); + ORR(tmp, tmp, op2.Reg.Rm, ArithOption(tmp, ST_LSR, 1)); + + op2 = Op2(tmp, ST_LSL, 0); + } + else + { + if (S) + { + UBFX(tmp, op2.Reg.Rm, amount - 1, 1); + BFI(RCPSR, tmp, 29, 1); + } + op2 = Op2(op2.Reg.Rm, ST_ROR, amount); + } + return; + } +} + +void Compiler::Comp_RetriveFlags(bool retriveCV) +{ + if (CurInstr.SetFlags) + CPSRDirty = true; + + if (CurInstr.SetFlags & 0x4) + { + CSET(W0, CC_EQ); + BFI(RCPSR, W0, 30, 1); + } + if (CurInstr.SetFlags & 0x8) + { + CSET(W0, CC_MI); + BFI(RCPSR, W0, 31, 1); + } + if (retriveCV) + { + if (CurInstr.SetFlags & 0x2) + { + CSET(W0, CC_CS); + BFI(RCPSR, W0, 29, 1); + } + if (CurInstr.SetFlags & 0x1) + { + CSET(W0, CC_VS); + BFI(RCPSR, W0, 28, 1); + } + } +} + +void Compiler::Comp_Logical(int op, bool S, ARM64Reg rd, ARM64Reg rn, Op2 op2) +{ + if (S && !CurInstr.SetFlags) + S = false; + + switch (op) + { + case 0x0: // AND + if (S) + { + if (op2.IsImm) + ANDSI2R(rd, rn, op2.Imm, W0); + else + ANDS(rd, rn, op2.Reg.Rm, op2.ToArithOption()); + } + else + { + if (op2.IsImm) + ANDI2R(rd, rn, op2.Imm, W0); + else + AND(rd, rn, op2.Reg.Rm, op2.ToArithOption()); + } + break; + case 0x1: // EOR + if (op2.IsImm) + EORI2R(rd, rn, op2.Imm, W0); + else + EOR(rd, rn, op2.Reg.Rm, op2.ToArithOption()); + if (S && FlagsNZNeeded()) + TST(rd, rd); + break; + case 0xC: // ORR + if (op2.IsImm) + ORRI2R(rd, rn, op2.Imm, W0); + else + ORR(rd, rn, op2.Reg.Rm, op2.ToArithOption()); + if (S && FlagsNZNeeded()) + TST(rd, rd); + break; + case 0xE: // BIC + if (S) + { + if (op2.IsImm) + ANDSI2R(rd, rn, ~op2.Imm, W0); + else + BICS(rd, rn, op2.Reg.Rm, op2.ToArithOption()); + } + else + { + if (op2.IsImm) + ANDI2R(rd, rn, ~op2.Imm, W0); + else + BIC(rd, rn, op2.Reg.Rm, op2.ToArithOption()); + } + break; + } + + if (S) + Comp_RetriveFlags(false); +} + +void Compiler::Comp_Arithmetic(int op, bool S, ARM64Reg rd, ARM64Reg rn, Op2 op2) +{ + if (!op2.IsImm && op2.Reg.ShiftType == ST_ROR) + { + MOV(W0, op2.Reg.Rm, op2.ToArithOption()); + op2 = Op2(W0, ST_LSL, 0); + } + + if (S && !CurInstr.SetFlags) + S = false; + + bool CVInGP = false; + switch (op) + { + case 0x2: // SUB + if (S) + { + if (op2.IsImm) + SUBSI2R(rd, rn, op2.Imm, W0); + else + SUBS(rd, rn, op2.Reg.Rm, op2.ToArithOption()); + } + else + { + if (op2.IsImm) + { + MOVI2R(W2, op2.Imm); + SUBI2R(rd, rn, op2.Imm, W0); + } + else + SUB(rd, rn, op2.Reg.Rm, op2.ToArithOption()); + } + break; + case 0x3: // RSB + if (op2.IsZero()) + { + op2 = Op2(WZR); + } + else if (op2.IsImm) + { + MOVI2R(W1, op2.Imm); + op2 = Op2(W1); + } + else if (op2.Reg.ShiftAmount != 0) + { + MOV(W1, op2.Reg.Rm, op2.ToArithOption()); + op2 = Op2(W1); + } + + if (S) + SUBS(rd, op2.Reg.Rm, rn); + else + SUB(rd, op2.Reg.Rm, rn); + break; + case 0x4: // ADD + if (S) + { + if (op2.IsImm) + ADDSI2R(rd, rn, op2.Imm, W0); + else + ADDS(rd, rn, op2.Reg.Rm, op2.ToArithOption()); + } + else + { + if (op2.IsImm) + ADDI2R(rd, rn, op2.Imm, W0); + else + ADD(rd, rn, op2.Reg.Rm, op2.ToArithOption()); + } + break; + case 0x5: // ADC + UBFX(W2, RCPSR, 29, 1); + if (S) + { + CVInGP = true; + ADDS(W1, rn, W2); + CSET(W2, CC_CS); + CSET(W3, CC_VS); + if (op2.IsImm) + ADDSI2R(rd, W1, op2.Imm, W0); + else + ADDS(rd, W1, op2.Reg.Rm, op2.ToArithOption()); + CSINC(W2, W2, WZR, CC_CC); + CSINC(W3, W3, WZR, CC_VC); + } + else + { + ADD(W1, rn, W2); + if (op2.IsImm) + ADDI2R(rd, W1, op2.Imm, W0); + else + ADD(rd, W1, op2.Reg.Rm, op2.ToArithOption()); + } + break; + case 0x6: // SBC + UBFX(W2, RCPSR, 29, 1); + // W1 = -op2 - 1 + if (op2.IsImm) + MOVI2R(W1, ~op2.Imm); + else + ORN(W1, WZR, op2.Reg.Rm, op2.ToArithOption()); + if (S) + { + CVInGP = true; + ADDS(W1, W2, W1); + CSET(W2, CC_CS); + CSET(W3, CC_VS); + ADDS(rd, rn, W1); + CSINC(W2, W2, WZR, CC_CC); + CSINC(W3, W3, WZR, CC_VC); + } + else + { + ADD(W1, W2, W1); + ADD(rd, rn, W1); + } + break; + case 0x7: // RSC + UBFX(W2, RCPSR, 29, 1); + // W1 = -rn - 1 + MVN(W1, rn); + if (S) + { + CVInGP = true; + ADDS(W1, W2, W1); + CSET(W2, CC_CS); + CSET(W3, CC_VS); + if (op2.IsImm) + ADDSI2R(rd, W1, op2.Imm); + else + ADDS(rd, W1, op2.Reg.Rm, op2.ToArithOption()); + CSINC(W2, W2, WZR, CC_CC); + CSINC(W3, W3, WZR, CC_VC); + } + else + { + ADD(W1, W2, W1); + if (op2.IsImm) + ADDI2R(rd, W1, op2.Imm); + else + ADD(rd, W1, op2.Reg.Rm, op2.ToArithOption()); + } + break; + } + + if (S) + { + if (CVInGP) + { + BFI(RCPSR, W2, 29, 1); + BFI(RCPSR, W3, 28, 1); + } + Comp_RetriveFlags(!CVInGP); + } +} + +void Compiler::Comp_Compare(int op, ARM64Reg rn, Op2 op2) +{ + if (!op2.IsImm && op2.Reg.ShiftType == ST_ROR) + { + MOV(W0, op2.Reg.Rm, op2.ToArithOption()); + op2 = Op2(W0, ST_LSL, 0); + } + + switch (op) + { + case 0x8: // TST + if (op2.IsImm) + TSTI2R(rn, op2.Imm, W0); + else + ANDS(WZR, rn, op2.Reg.Rm, op2.ToArithOption()); + break; + case 0x9: // TEQ + if (op2.IsImm) + EORI2R(W0, rn, op2.Imm, W0); + else + EOR(W0, rn, op2.Reg.Rm, op2.ToArithOption()); + TST(W0, W0); + break; + case 0xA: // CMP + if (op2.IsImm) + CMPI2R(rn, op2.Imm, W0); + else + CMP(rn, op2.Reg.Rm, op2.ToArithOption()); + break; + case 0xB: // CMN + if (op2.IsImm) + ADDSI2R(WZR, rn, op2.Imm, W0); + else + CMN(rn, op2.Reg.Rm, op2.ToArithOption()); + break; + } + + Comp_RetriveFlags(op >= 0xA); +} + +// also counts cycles! +void Compiler::A_Comp_GetOp2(bool S, Op2& op2) +{ + if (CurInstr.Instr & (1 << 25)) + { + Comp_AddCycles_C(); + op2 = Op2(ROR(CurInstr.Instr & 0xFF, (CurInstr.Instr >> 7) & 0x1E)); + } + else + { + int op = (CurInstr.Instr >> 5) & 0x3; + op2.Reg.Rm = MapReg(CurInstr.A_Reg(0)); + if (CurInstr.Instr & (1 << 4)) + { + Comp_AddCycles_CI(1); + + ARM64Reg rs = MapReg(CurInstr.A_Reg(8)); + if (CurInstr.A_Reg(0) == 15) + { + ADD(W0, op2.Reg.Rm, 4); + op2.Reg.Rm = W0; + } + Comp_RegShiftReg(op, S, op2, rs); + } + else + { + Comp_AddCycles_C(); + + int amount = (CurInstr.Instr >> 7) & 0x1F; + Comp_RegShiftImm(op, amount, S, op2); + } + } +} + +void Compiler::A_Comp_ALUCmpOp() +{ + u32 op = (CurInstr.Instr >> 21) & 0xF; + ARM64Reg rn = MapReg(CurInstr.A_Reg(16)); + Op2 op2; + A_Comp_GetOp2(op <= 0x9, op2); + + Comp_Compare(op, rn, op2); +} + +void Compiler::A_Comp_ALUMovOp() +{ + bool S = CurInstr.Instr & (1 << 20); + u32 op = (CurInstr.Instr >> 21) & 0xF; + + ARM64Reg rd = MapReg(CurInstr.A_Reg(12)); + Op2 op2; + A_Comp_GetOp2(S, op2); + + if (op == 0xF) // MVN + { + if (op2.IsImm) + { + if (CurInstr.Cond() == 0xE) + RegCache.PutLiteral(CurInstr.A_Reg(12), ~op2.Imm); + MOVI2R(rd, ~op2.Imm); + } + else + ORN(rd, WZR, op2.Reg.Rm, op2.ToArithOption()); + } + else // MOV + { + if (op2.IsImm) + { + if (CurInstr.Cond() == 0xE) + RegCache.PutLiteral(CurInstr.A_Reg(12), op2.Imm); + MOVI2R(rd, op2.Imm); + } + else + MOV(rd, op2.Reg.Rm, op2.ToArithOption()); + } + + if (S) + { + if (FlagsNZNeeded()) + TST(rd, rd); + Comp_RetriveFlags(false); + } + + if (CurInstr.Info.Branches()) + Comp_JumpTo(rd, true, S); +} + +void Compiler::A_Comp_ALUTriOp() +{ + bool S = CurInstr.Instr & (1 << 20); + u32 op = (CurInstr.Instr >> 21) & 0xF; + bool logical = (1 << op) & 0xF303; + + ARM64Reg rd = MapReg(CurInstr.A_Reg(12)); + ARM64Reg rn = MapReg(CurInstr.A_Reg(16)); + Op2 op2; + A_Comp_GetOp2(S && logical, op2); + + if (op2.IsImm && op2.Imm == 0) + op2 = Op2(WZR, ST_LSL, 0); + + if (logical) + Comp_Logical(op, S, rd, rn, op2); + else + Comp_Arithmetic(op, S, rd, rn, op2); + + if (CurInstr.Info.Branches()) + Comp_JumpTo(rd, true, S); +} + +void Compiler::A_Comp_Clz() +{ + Comp_AddCycles_C(); + + ARM64Reg rd = MapReg(CurInstr.A_Reg(12)); + ARM64Reg rm = MapReg(CurInstr.A_Reg(0)); + + CLZ(rd, rm); + + assert(Num == 0); +} + +void Compiler::Comp_Mul_Mla(bool S, bool mla, ARM64Reg rd, ARM64Reg rm, ARM64Reg rs, ARM64Reg rn) +{ + if (Num == 0) + { + Comp_AddCycles_CI(S ? 3 : 1); + } + else + { + CLZ(W0, rs); + CLS(W1, rs); + CMP(W0, W1); + CSEL(W0, W0, W1, CC_GT); + Comp_AddCycles_CI(mla ? 1 : 0, W0, ArithOption(W0, ST_LSR, 3)); + } + + if (mla) + MADD(rd, rm, rs, rn); + else + MUL(rd, rm, rs); + + if (S && FlagsNZNeeded()) + { + TST(rd, rd); + Comp_RetriveFlags(false); + } +} + +void Compiler::A_Comp_Mul_Long() +{ + ARM64Reg rd = MapReg(CurInstr.A_Reg(16)); + ARM64Reg rm = MapReg(CurInstr.A_Reg(0)); + ARM64Reg rs = MapReg(CurInstr.A_Reg(8)); + ARM64Reg rn = MapReg(CurInstr.A_Reg(12)); + + bool S = CurInstr.Instr & (1 << 20); + bool add = CurInstr.Instr & (1 << 21); + bool sign = CurInstr.Instr & (1 << 22); + + if (Num == 0) + { + Comp_AddCycles_CI(S ? 3 : 1); + } + else + { + CLZ(W0, rs); + CLS(W1, rs); + CMP(W0, W1); + CSEL(W0, W0, W1, CC_GT); + Comp_AddCycles_CI(0, W0, ArithOption(W0, ST_LSR, 3)); + } + + if (add) + { + MOV(W0, rn); + BFI(X0, EncodeRegTo64(rd), 32, 32); + if (sign) + SMADDL(EncodeRegTo64(rn), rm, rs, X0); + else + UMADDL(EncodeRegTo64(rn), rm, rs, X0); + if (S && FlagsNZNeeded()) + TST(EncodeRegTo64(rn), EncodeRegTo64(rn)); + UBFX(EncodeRegTo64(rd), EncodeRegTo64(rn), 32, 32); + } + else + { + if (sign) + SMULL(EncodeRegTo64(rn), rm, rs); + else + UMULL(EncodeRegTo64(rn), rm, rs); + if (S && FlagsNZNeeded()) + TST(EncodeRegTo64(rn), EncodeRegTo64(rn)); + UBFX(EncodeRegTo64(rd), EncodeRegTo64(rn), 32, 32); + } + + if (S) + Comp_RetriveFlags(false); +} + +void Compiler::A_Comp_Mul() +{ + ARM64Reg rd = MapReg(CurInstr.A_Reg(16)); + ARM64Reg rm = MapReg(CurInstr.A_Reg(0)); + ARM64Reg rs = MapReg(CurInstr.A_Reg(8)); + + bool S = CurInstr.Instr & (1 << 20); + bool mla = CurInstr.Instr & (1 << 21); + ARM64Reg rn = INVALID_REG; + if (mla) + rn = MapReg(CurInstr.A_Reg(12)); + + Comp_Mul_Mla(S, mla, rd, rm, rs, rn); +} + +void Compiler::T_Comp_ShiftImm() +{ + Comp_AddCycles_C(); + + u32 op = (CurInstr.Instr >> 11) & 0x3; + int amount = (CurInstr.Instr >> 6) & 0x1F; + + ARM64Reg rd = MapReg(CurInstr.T_Reg(0)); + Op2 op2; + op2.Reg.Rm = MapReg(CurInstr.T_Reg(3)); + Comp_RegShiftImm(op, amount, true, op2); + if (op2.IsImm) + MOVI2R(rd, op2.Imm); + else + MOV(rd, op2.Reg.Rm, op2.ToArithOption()); + if (FlagsNZNeeded()) + TST(rd, rd); + + Comp_RetriveFlags(false); +} + +void Compiler::T_Comp_AddSub_() +{ + Comp_AddCycles_C(); + + Op2 op2; + if (CurInstr.Instr & (1 << 10)) + op2 = Op2((CurInstr.Instr >> 6) & 0x7); + else + op2 = Op2(MapReg(CurInstr.T_Reg(6))); + + Comp_Arithmetic( + CurInstr.Instr & (1 << 9) ? 0x2 : 0x4, + true, + MapReg(CurInstr.T_Reg(0)), + MapReg(CurInstr.T_Reg(3)), + op2); +} + +void Compiler::T_Comp_ALUImm8() +{ + Comp_AddCycles_C(); + + u32 imm = CurInstr.Instr & 0xFF; + int op = (CurInstr.Instr >> 11) & 0x3; + + ARM64Reg rd = MapReg(CurInstr.T_Reg(8)); + + switch (op) + { + case 0: + MOVI2R(rd, imm); + if (FlagsNZNeeded()) + TST(rd, rd); + Comp_RetriveFlags(false); + break; + case 1: + Comp_Compare(0xA, rd, Op2(imm)); + break; + case 2: + case 3: + Comp_Arithmetic(op == 2 ? 0x4 : 0x2, true, rd, rd, Op2(imm)); + break; + } +} + +void Compiler::T_Comp_ALU() +{ + int op = (CurInstr.Instr >> 6) & 0xF; + ARM64Reg rd = MapReg(CurInstr.T_Reg(0)); + ARM64Reg rs = MapReg(CurInstr.T_Reg(3)); + + if ((op >= 0x2 && op <= 0x4) || op == 0x7) + Comp_AddCycles_CI(1); + else + Comp_AddCycles_C(); + + switch (op) + { + case 0x0: + Comp_Logical(0x0, true, rd, rd, Op2(rs)); + break; + case 0x1: + Comp_Logical(0x1, true, rd, rd, Op2(rs)); + break; + case 0x2: + case 0x3: + case 0x4: + case 0x7: + { + Op2 op2; + op2.Reg.Rm = rd; + Comp_RegShiftReg(op == 0x7 ? 3 : (op - 0x2), true, op2, rs); + MOV(rd, op2.Reg.Rm, op2.ToArithOption()); + if (FlagsNZNeeded()) + TST(rd, rd); + Comp_RetriveFlags(false); + } + break; + case 0x5: + Comp_Arithmetic(0x5, true, rd, rd, Op2(rs)); + break; + case 0x6: + Comp_Arithmetic(0x6, true, rd, rd, Op2(rs)); + break; + case 0x8: + Comp_Compare(0x8, rd, Op2(rs)); + break; + case 0x9: + Comp_Arithmetic(0x3, true, rd, rs, Op2(0)); + break; + case 0xA: + Comp_Compare(0xA, rd, Op2(rs)); + break; + case 0xB: + Comp_Compare(0xB, rd, Op2(rs)); + break; + case 0xC: + Comp_Logical(0xC, true, rd, rd, Op2(rs)); + break; + case 0xD: + Comp_Mul_Mla(true, false, rd, rd, rs, INVALID_REG); + break; + case 0xE: + Comp_Logical(0xE, true, rd, rd, Op2(rs)); + break; + case 0xF: + MVN(rd, rs); + if (FlagsNZNeeded()) + TST(rd, rd); + Comp_RetriveFlags(false); + break; + } +} + +void Compiler::T_Comp_ALU_HiReg() +{ + u32 rd = ((CurInstr.Instr & 0x7) | ((CurInstr.Instr >> 4) & 0x8)); + ARM64Reg rdMapped = MapReg(rd); + ARM64Reg rs = MapReg((CurInstr.Instr >> 3) & 0xF); + + u32 op = (CurInstr.Instr >> 8) & 0x3; + + Comp_AddCycles_C(); + + switch (op) + { + case 0: + Comp_Arithmetic(0x4, false, rdMapped, rdMapped, Op2(rs)); + break; + case 1: + Comp_Compare(0xA, rdMapped, rs); + return; + case 2: + MOV(rdMapped, rs); + break; + } + + if (rd == 15) + { + Comp_JumpTo(rdMapped, false, false); + } +} + +void Compiler::T_Comp_AddSP() +{ + Comp_AddCycles_C(); + + ARM64Reg sp = MapReg(13); + u32 offset = (CurInstr.Instr & 0x7F) << 2; + if (CurInstr.Instr & (1 << 7)) + SUB(sp, sp, offset); + else + ADD(sp, sp, offset); +} + +void Compiler::T_Comp_RelAddr() +{ + Comp_AddCycles_C(); + + ARM64Reg rd = MapReg(CurInstr.T_Reg(8)); + u32 offset = (CurInstr.Instr & 0xFF) << 2; + if (CurInstr.Instr & (1 << 11)) + { + ARM64Reg sp = MapReg(13); + ADD(rd, sp, offset); + } + else + MOVI2R(rd, (R15 & ~2) + offset); +} + +} \ No newline at end of file diff --git a/src/ARMJIT_A64/ARMJIT_Branch.cpp b/src/ARMJIT_A64/ARMJIT_Branch.cpp new file mode 100644 index 0000000..542f0b7 --- /dev/null +++ b/src/ARMJIT_A64/ARMJIT_Branch.cpp @@ -0,0 +1,452 @@ +#include "ARMJIT_Compiler.h" + +using namespace Arm64Gen; + +// hack +const int kCodeCacheTiming = 3; + +namespace ARMJIT +{ + +template +void jumpToTrampoline(T* cpu, u32 addr, bool changeCPSR) +{ + cpu->JumpTo(addr, changeCPSR); +} + +void Compiler::Comp_JumpTo(u32 addr, bool forceNonConstantCycles) +{ + // we can simplify constant branches by a lot + // it's not completely safe to assume stuff like, which instructions to preload + // we'll see how it works out + + IrregularCycles = true; + + u32 newPC; + u32 cycles = 0; + bool setupRegion = false; + + if (addr & 0x1 && !Thumb) + { + CPSRDirty = true; + ORRI2R(RCPSR, RCPSR, 0x20); + } + else if (!(addr & 0x1) && Thumb) + { + CPSRDirty = true; + ANDI2R(RCPSR, RCPSR, ~0x20); + } + + if (Num == 0) + { + ARMv5* cpu9 = (ARMv5*)CurCPU; + + u32 oldregion = R15 >> 24; + u32 newregion = addr >> 24; + + u32 regionCodeCycles = cpu9->MemTimings[addr >> 12][0]; + u32 compileTimeCodeCycles = cpu9->RegionCodeCycles; + cpu9->RegionCodeCycles = regionCodeCycles; + + MOVI2R(W0, regionCodeCycles); + STR(INDEX_UNSIGNED, W0, RCPU, offsetof(ARMv5, RegionCodeCycles)); + + setupRegion = newregion != oldregion; + if (setupRegion) + cpu9->SetupCodeMem(addr); + + if (addr & 0x1) + { + addr &= ~0x1; + newPC = addr+2; + + // two-opcodes-at-once fetch + // doesn't matter if we put garbage in the MSbs there + if (addr & 0x2) + { + cpu9->CodeRead32(addr-2, true) >> 16; + cycles += cpu9->CodeCycles; + cpu9->CodeRead32(addr+2, false); + cycles += CurCPU->CodeCycles; + } + else + { + cpu9->CodeRead32(addr, true); + cycles += cpu9->CodeCycles; + } + } + else + { + addr &= ~0x3; + newPC = addr+4; + + cpu9->CodeRead32(addr, true); + cycles += cpu9->CodeCycles; + cpu9->CodeRead32(addr+4, false); + cycles += cpu9->CodeCycles; + } + + cpu9->RegionCodeCycles = compileTimeCodeCycles; + if (setupRegion) + cpu9->SetupCodeMem(R15); + } + else + { + ARMv4* cpu7 = (ARMv4*)CurCPU; + + u32 codeRegion = addr >> 24; + u32 codeCycles = addr >> 15; // cheato + + cpu7->CodeRegion = codeRegion; + cpu7->CodeCycles = codeCycles; + + MOVI2R(W0, codeRegion); + STR(INDEX_UNSIGNED, W0, RCPU, offsetof(ARM, CodeRegion)); + MOVI2R(W0, codeCycles); + STR(INDEX_UNSIGNED, W0, RCPU, offsetof(ARM, CodeCycles)); + + if (addr & 0x1) + { + addr &= ~0x1; + newPC = addr+2; + + // this is necessary because ARM7 bios protection + u32 compileTimePC = CurCPU->R[15]; + CurCPU->R[15] = newPC; + + cycles += NDS::ARM7MemTimings[codeCycles][0] + NDS::ARM7MemTimings[codeCycles][1]; + + CurCPU->R[15] = compileTimePC; + } + else + { + addr &= ~0x3; + newPC = addr+4; + + u32 compileTimePC = CurCPU->R[15]; + CurCPU->R[15] = newPC; + + cycles += NDS::ARM7MemTimings[codeCycles][2] + NDS::ARM7MemTimings[codeCycles][3]; + + CurCPU->R[15] = compileTimePC; + } + + cpu7->CodeRegion = R15 >> 24; + cpu7->CodeCycles = addr >> 15; + } + + if (Exit) + { + MOVI2R(W0, newPC); + STR(INDEX_UNSIGNED, W0, RCPU, offsetof(ARM, R[15])); + } + if ((Thumb || CurInstr.Cond() >= 0xE) && !forceNonConstantCycles) + ConstantCycles += cycles; + else + ADD(RCycles, RCycles, cycles); +} + + +void* Compiler::Gen_JumpTo9(int kind) +{ + AlignCode16(); + void* res = GetRXPtr(); + + MOVI2R(W2, kCodeCacheTiming); + // W1 - code cycles non branch + // W2 - branch code cycles + LSR(W1, W0, 12); + LSL(W1, W1, 2); + ADDI2R(W1, W1, offsetof(ARMv5, MemTimings), W2); + LDRB(W1, RCPU, W1); + + LDR(INDEX_UNSIGNED, W3, RCPU, offsetof(ARMv5, ITCMSize)); + + STR(INDEX_UNSIGNED, W1, RCPU, offsetof(ARMv5, RegionCodeCycles)); + + CMP(W0, W3); + FixupBranch outsideITCM = B(CC_LO); + MOVI2R(W1, 1); + MOVI2R(W2, 1); + SetJumpTarget(outsideITCM); + + FixupBranch switchToThumb; + if (kind == 0) + switchToThumb = TBNZ(W0, 0); + + if (kind == 0 || kind == 1) + { + ANDI2R(W0, W0, ~3); + + if (kind == 0) + ANDI2R(RCPSR, RCPSR, ~0x20); + + ADD(W3, W0, 4); + STR(INDEX_UNSIGNED, W3, RCPU, offsetof(ARM, R[15])); + + ADD(W1, W1, W2); + ADD(RCycles, RCycles, W1); + + RET(); + } + if (kind == 0 || kind == 2) + { + if (kind == 0) + { + SetJumpTarget(switchToThumb); + + ORRI2R(RCPSR, RCPSR, 0x20); + } + + ANDI2R(W0, W0, ~1); + + ADD(W3, W0, 2); + STR(INDEX_UNSIGNED, W3, RCPU, offsetof(ARM, R[15])); + + FixupBranch halfwordLoc = TBZ(W0, 1); + ADD(W1, W1, W2); + ADD(RCycles, RCycles, W1); + RET(); + + SetJumpTarget(halfwordLoc); + ADD(RCycles, RCycles, W2); + RET(); + } + + return res; +} + +void* Compiler::Gen_JumpTo7(int kind) +{ + void* res = GetRXPtr(); + + LSR(W1, W0, 24); + STR(INDEX_UNSIGNED, W1, RCPU, offsetof(ARM, CodeRegion)); + LSR(W1, W0, 15); + STR(INDEX_UNSIGNED, W1, RCPU, offsetof(ARM, CodeCycles)); + + MOVP2R(X2, NDS::ARM7MemTimings); + LDR(W3, X2, ArithOption(W1, true)); + + FixupBranch switchToThumb; + if (kind == 0) + switchToThumb = TBNZ(W0, 0); + + if (kind == 0 || kind == 1) + { + UBFX(W2, W3, 0, 8); + UBFX(W3, W3, 8, 8); + ADD(W2, W3, W2); + ADD(RCycles, RCycles, W2); + + ANDI2R(W0, W0, ~3); + + if (kind == 0) + ANDI2R(RCPSR, RCPSR, ~0x20); + + ADD(W3, W0, 4); + STR(INDEX_UNSIGNED, W3, RCPU, offsetof(ARM, R[15])); + + RET(); + } + if (kind == 0 || kind == 2) + { + if (kind == 0) + { + SetJumpTarget(switchToThumb); + + ORRI2R(RCPSR, RCPSR, 0x20); + } + + UBFX(W2, W3, 16, 8); + UBFX(W3, W3, 24, 8); + ADD(W2, W3, W2); + ADD(RCycles, RCycles, W2); + + ANDI2R(W0, W0, ~1); + + ADD(W3, W0, 2); + STR(INDEX_UNSIGNED, W3, RCPU, offsetof(ARM, R[15])); + + RET(); + } + + return res; +} + +void Compiler::Comp_JumpTo(Arm64Gen::ARM64Reg addr, bool switchThumb, bool restoreCPSR) +{ + IrregularCycles = true; + + if (!restoreCPSR) + { + if (switchThumb) + CPSRDirty = true; + MOV(W0, addr); + BL((Num ? JumpToFuncs7 : JumpToFuncs9)[switchThumb ? 0 : (Thumb + 1)]); + } + else + { + BitSet16 hiRegsLoaded(RegCache.DirtyRegs & 0xFF00); + bool previouslyDirty = CPSRDirty; + SaveCPSR(); + + if (restoreCPSR) + { + if (Thumb || CurInstr.Cond() >= 0xE) + RegCache.Flush(); + else + { + // the ugly way... + // we only save them, to load and save them again + for (int reg : hiRegsLoaded) + SaveReg(reg, RegCache.Mapping[reg]); + } + } + + if (switchThumb) + MOV(W1, addr); + else + { + if (Thumb) + ORRI2R(W1, addr, 1); + else + ANDI2R(W1, addr, ~1); + } + MOV(X0, RCPU); + MOVI2R(W2, restoreCPSR); + if (Num == 0) + QuickCallFunction(X3, jumpToTrampoline); + else + QuickCallFunction(X3, jumpToTrampoline); + + if (!Thumb && restoreCPSR && CurInstr.Cond() < 0xE) + { + for (int reg : hiRegsLoaded) + LoadReg(reg, RegCache.Mapping[reg]); + } + + if (previouslyDirty) + LoadCPSR(); + CPSRDirty = previouslyDirty; + } +} + +void Compiler::A_Comp_BranchImm() +{ + int op = (CurInstr.Instr >> 24) & 1; + s32 offset = (s32)(CurInstr.Instr << 8) >> 6; + u32 target = R15 + offset; + bool link = op; + + if (CurInstr.Cond() == 0xF) // BLX_imm + { + target += (op << 1) + 1; + link = true; + } + + if (link) + MOVI2R(MapReg(14), R15 - 4); + + Comp_JumpTo(target); +} + +void Compiler::A_Comp_BranchXchangeReg() +{ + ARM64Reg rn = MapReg(CurInstr.A_Reg(0)); + MOV(W0, rn); + if ((CurInstr.Instr & 0xF0) == 0x30) // BLX_reg + MOVI2R(MapReg(14), R15 - 4); + Comp_JumpTo(W0, true); +} + +void Compiler::T_Comp_BCOND() +{ + u32 cond = (CurInstr.Instr >> 8) & 0xF; + FixupBranch skipExecute = CheckCondition(cond); + + s32 offset = (s32)(CurInstr.Instr << 24) >> 23; + Comp_JumpTo(R15 + offset + 1, true); + + Comp_BranchSpecialBehaviour(); + + FixupBranch skipFailed = B(); + SetJumpTarget(skipExecute); + Comp_AddCycles_C(true); + + if (CurInstr.BranchFlags & branch_FollowCondTaken) + { + SaveCPSR(false); + RegCache.PrepareExit(); + + ADD(W0, RCycles, ConstantCycles); + ABI_PopRegisters(SavedRegs); + RET(); + } + + SetJumpTarget(skipFailed); +} + +void Compiler::T_Comp_B() +{ + s32 offset = (s32)((CurInstr.Instr & 0x7FF) << 21) >> 20; + Comp_JumpTo(R15 + offset + 1); +} + +void Compiler::T_Comp_BranchXchangeReg() +{ + bool link = CurInstr.Instr & (1 << 7); + + if (link) + { + if (Num == 1) + { + printf("BLX unsupported on ARM7!!!\n"); + return; + } + MOV(W0, MapReg(CurInstr.A_Reg(3))); + MOVI2R(MapReg(14), R15 - 1); + Comp_JumpTo(W0, true); + } + else + { + ARM64Reg rn = MapReg(CurInstr.A_Reg(3)); + Comp_JumpTo(rn, true); + } +} + +void Compiler::T_Comp_BL_LONG_1() +{ + s32 offset = (s32)((CurInstr.Instr & 0x7FF) << 21) >> 9; + MOVI2R(MapReg(14), R15 + offset); + Comp_AddCycles_C(); +} + +void Compiler::T_Comp_BL_LONG_2() +{ + ARM64Reg lr = MapReg(14); + s32 offset = (CurInstr.Instr & 0x7FF) << 1; + ADD(W0, lr, offset); + MOVI2R(lr, (R15 - 2) | 1); + Comp_JumpTo(W0, Num == 0 && !(CurInstr.Instr & (1 << 12))); +} + +void Compiler::T_Comp_BL_Merged() +{ + Comp_AddCycles_C(); + + R15 += 2; + + u32 upperPart = CurInstr.Instr >> 16; + u32 target = (R15 - 2) + ((s32)((CurInstr.Instr & 0x7FF) << 21) >> 9); + target += (upperPart & 0x7FF) << 1; + + if (Num == 1 || upperPart & (1 << 12)) + target |= 1; + + MOVI2R(MapReg(14), (R15 - 2) | 1); + + Comp_JumpTo(target); +} + +} \ No newline at end of file diff --git a/src/ARMJIT_A64/ARMJIT_Compiler.cpp b/src/ARMJIT_A64/ARMJIT_Compiler.cpp new file mode 100644 index 0000000..89d0029 --- /dev/null +++ b/src/ARMJIT_A64/ARMJIT_Compiler.cpp @@ -0,0 +1,707 @@ +#include "ARMJIT_Compiler.h" + +#include "../ARMInterpreter.h" + +#include "../ARMJIT_Internal.h" + +#ifdef __SWITCH__ +#include "../switch/compat_switch.h" + +extern char __start__; +#endif + +#include + +using namespace Arm64Gen; + + +namespace ARMJIT +{ + +/* + + Recompiling classic ARM to ARMv8 code is at the same time + easier and trickier than compiling to a less related architecture + like x64. At one hand you can translate a lot of instructions directly. + But at the same time, there are a ton of exceptions, like for + example ADD and SUB can't have a RORed second operand on ARMv8. + */ + +template <> +const ARM64Reg RegisterCache::NativeRegAllocOrder[] = + {W19, W20, W21, W22, W23, W24, W25, W26}; +template <> +const int RegisterCache::NativeRegsAvailable = 8; + +const int JitMemSize = 16 * 1024 * 1024; + +void Compiler::MovePC() +{ + ADD(MapReg(15), MapReg(15), Thumb ? 2 : 4); +} + +Compiler::Compiler() +{ +#ifdef __SWITCH__ + JitRWBase = memalign(0x1000, JitMemSize); + + JitRXStart = (u8*)&__start__ - JitMemSize - 0x1000; + JitRWStart = virtmemReserve(JitMemSize); + MemoryInfo info = {0}; + u32 pageInfo = {0}; + int i = 0; + while (JitRXStart != NULL) + { + svcQueryMemory(&info, &pageInfo, (u64)JitRXStart); + if (info.type != MemType_Unmapped) + JitRXStart = (void*)((u8*)info.addr - JitMemSize - 0x1000); + else + break; + if (i++ > 8) + { + printf("couldn't find unmapped place for jit memory\n"); + JitRXStart = NULL; + } + } + + assert(JitRXStart != NULL); + + bool succeded = R_SUCCEEDED(svcMapProcessCodeMemory(envGetOwnProcessHandle(), (u64)JitRXStart, (u64)JitRWBase, JitMemSize)); + assert(succeded); + succeded = R_SUCCEEDED(svcSetProcessMemoryPermission(envGetOwnProcessHandle(), (u64)JitRXStart, JitMemSize, Perm_Rx)); + assert(succeded); + succeded = R_SUCCEEDED(svcMapProcessMemory(JitRWStart, envGetOwnProcessHandle(), (u64)JitRXStart, JitMemSize)); + assert(succeded); + + SetCodeBase((u8*)JitRWStart, (u8*)JitRXStart); + JitMemUseableSize = JitMemSize; + Reset(); +#endif + + for (int i = 0; i < 3; i++) + { + for (int j = 0; j < 2; j++) + { + MemFunc9[i][j] = Gen_MemoryRoutine9(8 << i, j); + } + } + MemFunc7[0][0] = (void*)NDS::ARM7Read8; + MemFunc7[1][0] = (void*)NDS::ARM7Read16; + MemFunc7[2][0] = (void*)NDS::ARM7Read32; + MemFunc7[0][1] = (void*)NDS::ARM7Write8; + MemFunc7[1][1] = (void*)NDS::ARM7Write16; + MemFunc7[2][1] = (void*)NDS::ARM7Write32; + + for (int i = 0; i < 2; i++) + { + for (int j = 0; j < 2; j++) + { + MemFuncsSeq9[i][j] = Gen_MemoryRoutine9Seq(i, j); + MemFuncsSeq7[i][j] = Gen_MemoryRoutine7Seq(i, j); + } + } + + for (int i = 0; i < 3; i++) + { + JumpToFuncs9[i] = Gen_JumpTo9(i); + JumpToFuncs7[i] = Gen_JumpTo7(i); + } + + /* + W0 - mode + W1 - reg num + W3 - in/out value of reg + */ + { + ReadBanked = GetRXPtr(); + + ADD(X2, RCPU, X1, ArithOption(X1, ST_LSL, 2)); + CMP(W0, 0x11); + FixupBranch fiq = B(CC_EQ); + SUBS(W1, W1, 13 - 8); + ADD(X2, RCPU, X1, ArithOption(X1, ST_LSL, 2)); + FixupBranch notEverything = B(CC_LT); + CMP(W0, 0x12); + FixupBranch irq = B(CC_EQ); + CMP(W0, 0x13); + FixupBranch svc = B(CC_EQ); + CMP(W0, 0x17); + FixupBranch abt = B(CC_EQ); + CMP(W0, 0x1B); + FixupBranch und = B(CC_EQ); + SetJumpTarget(notEverything); + RET(); + + SetJumpTarget(fiq); + LDR(INDEX_UNSIGNED, W3, X2, offsetof(ARM, R_FIQ)); + RET(); + SetJumpTarget(irq); + LDR(INDEX_UNSIGNED, W3, X2, offsetof(ARM, R_IRQ)); + RET(); + SetJumpTarget(svc); + LDR(INDEX_UNSIGNED, W3, X2, offsetof(ARM, R_SVC)); + RET(); + SetJumpTarget(abt); + LDR(INDEX_UNSIGNED, W3, X2, offsetof(ARM, R_ABT)); + RET(); + SetJumpTarget(und); + LDR(INDEX_UNSIGNED, W3, X2, offsetof(ARM, R_UND)); + RET(); + } + { + WriteBanked = GetRXPtr(); + + ADD(X2, RCPU, X1, ArithOption(X1, ST_LSL, 2)); + CMP(W0, 0x11); + FixupBranch fiq = B(CC_EQ); + SUBS(W1, W1, 13 - 8); + ADD(X2, RCPU, X1, ArithOption(X1, ST_LSL, 2)); + FixupBranch notEverything = B(CC_LT); + CMP(W0, 0x12); + FixupBranch irq = B(CC_EQ); + CMP(W0, 0x13); + FixupBranch svc = B(CC_EQ); + CMP(W0, 0x17); + FixupBranch abt = B(CC_EQ); + CMP(W0, 0x1B); + FixupBranch und = B(CC_EQ); + SetJumpTarget(notEverything); + MOVI2R(W4, 0); + RET(); + + SetJumpTarget(fiq); + STR(INDEX_UNSIGNED, W3, X2, offsetof(ARM, R_FIQ)); + MOVI2R(W4, 1); + RET(); + SetJumpTarget(irq); + STR(INDEX_UNSIGNED, W3, X2, offsetof(ARM, R_IRQ)); + MOVI2R(W4, 1); + RET(); + SetJumpTarget(svc); + STR(INDEX_UNSIGNED, W3, X2, offsetof(ARM, R_SVC)); + MOVI2R(W4, 1); + RET(); + SetJumpTarget(abt); + STR(INDEX_UNSIGNED, W3, X2, offsetof(ARM, R_ABT)); + MOVI2R(W4, 1); + RET(); + SetJumpTarget(und); + STR(INDEX_UNSIGNED, W3, X2, offsetof(ARM, R_UND)); + MOVI2R(W4, 1); + RET(); + } + + //FlushIcache(); + + JitMemUseableSize -= GetCodeOffset(); + SetCodeBase((u8*)GetRWPtr(), (u8*)GetRXPtr()); +} + +Compiler::~Compiler() +{ +#ifdef __SWITCH__ + if (JitRWStart != NULL) + { + bool succeded = R_SUCCEEDED(svcUnmapProcessMemory(JitRWStart, envGetOwnProcessHandle(), (u64)JitRXStart, JitMemSize)); + assert(succeded); + virtmemFree(JitRWStart, JitMemSize); + succeded = R_SUCCEEDED(svcUnmapProcessCodeMemory(envGetOwnProcessHandle(), (u64)JitRXStart, (u64)JitRWBase, JitMemSize)); + assert(succeded); + free(JitRWBase); + } +#endif +} + +void Compiler::LoadReg(int reg, ARM64Reg nativeReg) +{ + if (reg == 15) + MOVI2R(nativeReg, R15); + else + LDR(INDEX_UNSIGNED, nativeReg, RCPU, offsetof(ARM, R[reg])); +} + +void Compiler::SaveReg(int reg, ARM64Reg nativeReg) +{ + STR(INDEX_UNSIGNED, nativeReg, RCPU, offsetof(ARM, R[reg])); +} + +void Compiler::LoadCPSR() +{ + assert(!CPSRDirty); + LDR(INDEX_UNSIGNED, RCPSR, RCPU, offsetof(ARM, CPSR)); +} + +void Compiler::SaveCPSR(bool markClean) +{ + if (CPSRDirty) + { + STR(INDEX_UNSIGNED, RCPSR, RCPU, offsetof(ARM, CPSR)); + CPSRDirty = CPSRDirty && !markClean; + } +} + +FixupBranch Compiler::CheckCondition(u32 cond) +{ + if (cond >= 0x8) + { + LSR(W1, RCPSR, 28); + MOVI2R(W2, 1); + LSLV(W2, W2, W1); + ANDI2R(W2, W2, ARM::ConditionTable[cond], W3); + + return CBZ(W2); + } + else + { + u8 bit = (28 + ((~(cond >> 1) & 1) << 1 | (cond >> 2 & 1) ^ (cond >> 1 & 1))); + + if (cond & 1) + return TBNZ(RCPSR, bit); + else + return TBZ(RCPSR, bit); + } +} + +#define F(x) &Compiler::A_Comp_##x +const Compiler::CompileFunc A_Comp[ARMInstrInfo::ak_Count] = +{ + // AND + F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), + F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), + // EOR + F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), + F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), + // SUB + F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), + F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), + // RSB + F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), + F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), + // ADD + F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), + F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), + // ADC + F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), + F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), + // SBC + F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), + F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), + // RSC + F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), + F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), + // ORR + F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), + F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), + // MOV + F(ALUMovOp), F(ALUMovOp), F(ALUMovOp), F(ALUMovOp), F(ALUMovOp), F(ALUMovOp), F(ALUMovOp), F(ALUMovOp), F(ALUMovOp), + F(ALUMovOp), F(ALUMovOp), F(ALUMovOp), F(ALUMovOp), F(ALUMovOp), F(ALUMovOp), F(ALUMovOp), F(ALUMovOp), F(ALUMovOp), + // BIC + F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), + F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), F(ALUTriOp), + // MVN + F(ALUMovOp), F(ALUMovOp), F(ALUMovOp), F(ALUMovOp), F(ALUMovOp), F(ALUMovOp), F(ALUMovOp), F(ALUMovOp), F(ALUMovOp), + F(ALUMovOp), F(ALUMovOp), F(ALUMovOp), F(ALUMovOp), F(ALUMovOp), F(ALUMovOp), F(ALUMovOp), F(ALUMovOp), F(ALUMovOp), + // TST + F(ALUCmpOp), F(ALUCmpOp), F(ALUCmpOp), F(ALUCmpOp), F(ALUCmpOp), F(ALUCmpOp), F(ALUCmpOp), F(ALUCmpOp), F(ALUCmpOp), + // TEQ + F(ALUCmpOp), F(ALUCmpOp), F(ALUCmpOp), F(ALUCmpOp), F(ALUCmpOp), F(ALUCmpOp), F(ALUCmpOp), F(ALUCmpOp), F(ALUCmpOp), + // CMP + F(ALUCmpOp), F(ALUCmpOp), F(ALUCmpOp), F(ALUCmpOp), F(ALUCmpOp), F(ALUCmpOp), F(ALUCmpOp), F(ALUCmpOp), F(ALUCmpOp), + // CMN + F(ALUCmpOp), F(ALUCmpOp), F(ALUCmpOp), F(ALUCmpOp), F(ALUCmpOp), F(ALUCmpOp), F(ALUCmpOp), F(ALUCmpOp), F(ALUCmpOp), + // Mul + F(Mul), F(Mul), F(Mul_Long), F(Mul_Long), F(Mul_Long), F(Mul_Long), NULL, NULL, NULL, NULL, NULL, + // ARMv5 exclusives + F(Clz), NULL, NULL, NULL, NULL, + + // STR + F(MemWB), F(MemWB), F(MemWB), F(MemWB), F(MemWB), F(MemWB), F(MemWB), F(MemWB), F(MemWB), F(MemWB), + // STRB + F(MemWB), F(MemWB), F(MemWB), F(MemWB), F(MemWB), F(MemWB), F(MemWB), F(MemWB), F(MemWB), F(MemWB), + // LDR + F(MemWB), F(MemWB), F(MemWB), F(MemWB), F(MemWB), F(MemWB), F(MemWB), F(MemWB), F(MemWB), F(MemWB), + // LDRB + F(MemWB), F(MemWB), F(MemWB), F(MemWB), F(MemWB), F(MemWB), F(MemWB), F(MemWB), F(MemWB), F(MemWB), + // STRH + F(MemHD), F(MemHD), F(MemHD), F(MemHD), + // LDRD + NULL, NULL, NULL, NULL, + // STRD + NULL, NULL, NULL, NULL, + // LDRH + F(MemHD), F(MemHD), F(MemHD), F(MemHD), + // LDRSB + F(MemHD), F(MemHD), F(MemHD), F(MemHD), + // LDRSH + F(MemHD), F(MemHD), F(MemHD), F(MemHD), + // Swap + NULL, NULL, + // LDM, STM + F(LDM_STM), F(LDM_STM), + // Branch + F(BranchImm), F(BranchImm), F(BranchImm), F(BranchXchangeReg), F(BranchXchangeReg), + // Special + NULL, NULL, NULL, NULL, NULL, NULL, NULL +}; +#undef F +#define F(x) &Compiler::T_Comp_##x +const Compiler::CompileFunc T_Comp[ARMInstrInfo::tk_Count] = +{ + // Shift imm + F(ShiftImm), F(ShiftImm), F(ShiftImm), + // Add/sub tri operand + F(AddSub_), F(AddSub_), F(AddSub_), F(AddSub_), + // 8 bit imm + F(ALUImm8), F(ALUImm8), F(ALUImm8), F(ALUImm8), + // ALU + F(ALU), F(ALU), F(ALU), F(ALU), F(ALU), F(ALU), F(ALU), F(ALU), + F(ALU), F(ALU), F(ALU), F(ALU), F(ALU), F(ALU), F(ALU), F(ALU), + // ALU hi reg + F(ALU_HiReg), F(ALU_HiReg), F(ALU_HiReg), + // PC/SP relative ops + F(RelAddr), F(RelAddr), F(AddSP), + // LDR PC rel + F(LoadPCRel), + // LDR/STR reg offset + F(MemReg), F(MemReg), F(MemReg), F(MemReg), + // LDR/STR sign extended, half + F(MemRegHalf), F(MemRegHalf), F(MemRegHalf), F(MemRegHalf), + // LDR/STR imm offset + F(MemImm), F(MemImm), F(MemImm), F(MemImm), + // LDR/STR half imm offset + F(MemImmHalf), F(MemImmHalf), + // LDR/STR sp rel + F(MemSPRel), F(MemSPRel), + // PUSH/POP + F(PUSH_POP), F(PUSH_POP), + // LDMIA, STMIA + F(LDMIA_STMIA), F(LDMIA_STMIA), + // Branch + F(BCOND), F(BranchXchangeReg), F(BranchXchangeReg), F(B), F(BL_LONG_1), F(BL_LONG_2), + // Unk, SVC + NULL, NULL, + F(BL_Merged) +}; + +bool Compiler::CanCompile(bool thumb, u16 kind) +{ + return (thumb ? T_Comp[kind] : A_Comp[kind]) != NULL; +} + +void Compiler::Comp_BranchSpecialBehaviour() +{ + if (CurInstr.BranchFlags & branch_IdleBranch) + { + MOVI2R(W0, 1); + STRB(INDEX_UNSIGNED, W0, RCPU, offsetof(ARM, IdleLoop)); + } + + if (CurInstr.BranchFlags & branch_FollowCondNotTaken) + { + SaveCPSR(false); + RegCache.PrepareExit(); + ADD(W0, RCycles, ConstantCycles); + ABI_PopRegisters(SavedRegs); + RET(); + } +} + +JitBlockEntry Compiler::CompileBlock(ARM* cpu, bool thumb, FetchedInstr instrs[], int instrsCount) +{ + if (JitMemUseableSize - GetCodeOffset() < 1024 * 16) + { + printf("JIT memory full, resetting...\n"); + ResetBlockCache(); + } + + JitBlockEntry res = (JitBlockEntry)GetRXPtr(); + + Thumb = thumb; + Num = cpu->Num; + CurCPU = cpu; + ConstantCycles = 0; + RegCache = RegisterCache(this, instrs, instrsCount, true); + + //printf("compiling block at %x\n", R15 - (Thumb ? 2 : 4)); + const u32 ALL_CALLEE_SAVED = 0x7FF80000; + + SavedRegs = BitSet32((RegCache.GetPushRegs() | BitSet32(0x78000000)) & BitSet32(ALL_CALLEE_SAVED)); + + //if (Num == 1) + { + ABI_PushRegisters(SavedRegs); + + MOVP2R(RCPU, CurCPU); + MOVI2R(RCycles, 0); + + LoadCPSR(); + } + + for (int i = 0; i < instrsCount; i++) + { + CurInstr = instrs[i]; + R15 = CurInstr.Addr + (Thumb ? 4 : 8); + CodeRegion = R15 >> 24; + + CompileFunc comp = Thumb + ? T_Comp[CurInstr.Info.Kind] + : A_Comp[CurInstr.Info.Kind]; + + Exit = i == (instrsCount - 1) || (CurInstr.BranchFlags & branch_FollowCondNotTaken); + + //printf("%x instr %x regs: r%x w%x n%x flags: %x %x %x\n", R15, CurInstr.Instr, CurInstr.Info.SrcRegs, CurInstr.Info.DstRegs, CurInstr.Info.ReadFlags, CurInstr.Info.NotStrictlyNeeded, CurInstr.Info.WriteFlags, CurInstr.SetFlags); + + bool isConditional = Thumb ? CurInstr.Info.Kind == ARMInstrInfo::tk_BCOND : CurInstr.Cond() < 0xE; + if (comp == NULL || (CurInstr.BranchFlags & branch_FollowCondTaken) || (i == instrsCount - 1 && (!CurInstr.Info.Branches() || isConditional))) + { + MOVI2R(W0, R15); + STR(INDEX_UNSIGNED, W0, RCPU, offsetof(ARM, R[15])); + if (comp == NULL) + { + MOVI2R(W0, CurInstr.Instr); + STR(INDEX_UNSIGNED, W0, RCPU, offsetof(ARM, CurInstr)); + } + if (Num == 0) + { + MOVI2R(W0, (s32)CurInstr.CodeCycles); + STR(INDEX_UNSIGNED, W0, RCPU, offsetof(ARM, CodeCycles)); + } + } + + if (comp == NULL) + { + SaveCPSR(); + RegCache.Flush(); + } + else + RegCache.Prepare(Thumb, i); + + if (Thumb) + { + if (comp == NULL) + { + MOV(X0, RCPU); + QuickCallFunction(X1, InterpretTHUMB[CurInstr.Info.Kind]); + } + else + (this->*comp)(); + } + else + { + u32 cond = CurInstr.Cond(); + if (CurInstr.Info.Kind == ARMInstrInfo::ak_BLX_IMM) + { + if (comp) + (this->*comp)(); + else + { + MOV(X0, RCPU); + QuickCallFunction(X1, ARMInterpreter::A_BLX_IMM); + } + } + else if (cond == 0xF) + Comp_AddCycles_C(); + else + { + IrregularCycles = false; + + FixupBranch skipExecute; + if (cond < 0xE) + skipExecute = CheckCondition(cond); + + if (comp == NULL) + { + MOV(X0, RCPU); + QuickCallFunction(X1, InterpretARM[CurInstr.Info.Kind]); + } + else + { + (this->*comp)(); + } + + Comp_BranchSpecialBehaviour(); + + if (cond < 0xE) + { + if (IrregularCycles) + { + FixupBranch skipNop = B(); + SetJumpTarget(skipExecute); + + Comp_AddCycles_C(); + + if (CurInstr.BranchFlags & branch_FollowCondTaken) + { + SaveCPSR(false); + RegCache.PrepareExit(); + ADD(W0, RCycles, ConstantCycles); + ABI_PopRegisters(SavedRegs); + RET(); + } + + SetJumpTarget(skipNop); + } + else + SetJumpTarget(skipExecute); + } + + } + } + + if (comp == NULL) + LoadCPSR(); + } + + RegCache.Flush(); + + //if (Num == 1) + { + SaveCPSR(); + + ADD(W0, RCycles, ConstantCycles); + + ABI_PopRegisters(SavedRegs); + } + //else + // ADD(RCycles, RCycles, ConstantCycles); + + RET(); + + FlushIcache(); + + //printf("finished\n"); + + return res; +} + +void Compiler::Reset() +{ + SetCodePtr(0); + + const u32 brk_0 = 0xD4200000; + + for (int i = 0; i < JitMemUseableSize / 4; i++) + *(((u32*)GetRWPtr()) + i) = brk_0; +} + +void Compiler::Comp_AddCycles_C(bool nonConst) +{ + s32 cycles = Num ? + NDS::ARM7MemTimings[CurInstr.CodeCycles][Thumb ? 1 : 3] + : ((R15 & 0x2) ? 0 : CurInstr.CodeCycles); + + if (!nonConst && !CurInstr.Info.Branches()) + ConstantCycles += cycles; + else + ADD(RCycles, RCycles, cycles); +} + +void Compiler::Comp_AddCycles_CI(u32 numI) +{ + s32 cycles = (Num ? + NDS::ARM7MemTimings[CurInstr.CodeCycles][Thumb ? 0 : 2] + : ((R15 & 0x2) ? 0 : CurInstr.CodeCycles)) + numI; + + if (Thumb || CurInstr.Cond() >= 0xE) + ConstantCycles += cycles; + else + ADD(RCycles, RCycles, cycles); +} + +void Compiler::Comp_AddCycles_CI(u32 c, ARM64Reg numI, ArithOption shift) +{ + s32 cycles = (Num ? + NDS::ARM7MemTimings[CurInstr.CodeCycles][Thumb ? 0 : 2] + : ((R15 & 0x2) ? 0 : CurInstr.CodeCycles)) + c; + + ADD(RCycles, RCycles, numI, shift); + if (Thumb || CurInstr.Cond() >= 0xE) + ConstantCycles += c; + else + ADD(RCycles, RCycles, cycles); +} + +void Compiler::Comp_AddCycles_CDI() +{ + if (Num == 0) + Comp_AddCycles_CD(); + else + { + IrregularCycles = true; + + s32 cycles; + + s32 numC = NDS::ARM7MemTimings[CurInstr.CodeCycles][Thumb ? 0 : 2]; + s32 numD = CurInstr.DataCycles; + + if (CurInstr.DataRegion == 0x02) // mainRAM + { + if (CodeRegion == 0x02) + cycles = numC + numD; + else + { + numC++; + cycles = std::max(numC + numD - 3, std::max(numC, numD)); + } + } + else if (CodeRegion == 0x02) + { + numD++; + cycles = std::max(numC + numD - 3, std::max(numC, numD)); + } + else + { + cycles = numC + numD + 1; + } + + if (!Thumb && CurInstr.Cond() < 0xE) + ADD(RCycles, RCycles, cycles); + else + ConstantCycles += cycles; + } +} + +void Compiler::Comp_AddCycles_CD() +{ + u32 cycles = 0; + if (Num == 0) + { + s32 numC = (R15 & 0x2) ? 0 : CurInstr.CodeCycles; + s32 numD = CurInstr.DataCycles; + + //if (DataRegion != CodeRegion) + cycles = std::max(numC + numD - 6, std::max(numC, numD)); + + IrregularCycles = cycles != numC; + } + else + { + s32 numC = NDS::ARM7MemTimings[CurInstr.CodeCycles][Thumb ? 0 : 2]; + s32 numD = CurInstr.DataCycles; + + if (CurInstr.DataRegion == 0x02) + { + if (CodeRegion == 0x02) + cycles += numC + numD; + else + cycles += std::max(numC + numD - 3, std::max(numC, numD)); + } + else if (CodeRegion == 0x02) + { + cycles += std::max(numC + numD - 3, std::max(numC, numD)); + } + else + { + cycles += numC + numD; + } + + IrregularCycles = true; + } + + if ((!Thumb && CurInstr.Cond() < 0xE) && IrregularCycles) + ADD(RCycles, RCycles, cycles); + else + ConstantCycles += cycles; +} + +} \ No newline at end of file diff --git a/src/ARMJIT_A64/ARMJIT_Compiler.h b/src/ARMJIT_A64/ARMJIT_Compiler.h new file mode 100644 index 0000000..7e13507 --- /dev/null +++ b/src/ARMJIT_A64/ARMJIT_Compiler.h @@ -0,0 +1,234 @@ +#ifndef ARMJIT_COMPILER_H +#define ARMJIT_COMPILER_H + +#include "../ARM.h" +#include "../ARMJIT.h" + +#include "../dolphin/Arm64Emitter.h" + +#include "../ARMJIT_Internal.h" +#include "../ARMJIT_RegisterCache.h" + +namespace ARMJIT +{ + +const Arm64Gen::ARM64Reg RCPSR = Arm64Gen::W27; +const Arm64Gen::ARM64Reg RCycles = Arm64Gen::W28; +const Arm64Gen::ARM64Reg RCPU = Arm64Gen::X29; + +struct Op2 +{ + Op2() + {} + + Op2(Arm64Gen::ARM64Reg rm) : IsImm(false) + { + Reg.Rm = rm; + Reg.ShiftType = Arm64Gen::ST_LSL; + Reg.ShiftAmount = 0; + } + + Op2(u32 imm) : IsImm(true), Imm(imm) + {} + + Op2(Arm64Gen::ARM64Reg rm, Arm64Gen::ShiftType st, int amount) : IsImm(false) + { + Reg.Rm = rm; + Reg.ShiftType = st; + Reg.ShiftAmount = amount; + } + + Arm64Gen::ArithOption ToArithOption() + { + assert(!IsImm); + return Arm64Gen::ArithOption(Reg.Rm, Reg.ShiftType, Reg.ShiftAmount); + } + + bool IsSimpleReg() + { return !IsImm && !Reg.ShiftAmount && Reg.ShiftType == Arm64Gen::ST_LSL; } + bool ImmFits12Bit() + { return IsImm && (Imm & 0xFFF == Imm); } + bool IsZero() + { return IsImm && !Imm; } + + bool IsImm; + union + { + struct + { + Arm64Gen::ARM64Reg Rm; + Arm64Gen::ShiftType ShiftType; + int ShiftAmount; + } Reg; + u32 Imm; + }; +}; + +class Compiler : Arm64Gen::ARM64XEmitter +{ +public: + typedef void (Compiler::*CompileFunc)(); + + Compiler(); + ~Compiler(); + + Arm64Gen::ARM64Reg MapReg(int reg) + { + assert(RegCache.Mapping[reg] != Arm64Gen::INVALID_REG); + return RegCache.Mapping[reg]; + } + + JitBlockEntry CompileBlock(ARM* cpu, bool thumb, FetchedInstr instrs[], int instrsCount); + + bool CanCompile(bool thumb, u16 kind); + + bool FlagsNZNeeded() + { + return CurInstr.SetFlags & 0xC; + } + + void Reset(); + + void Comp_AddCycles_C(bool forceNonConst = false); + void Comp_AddCycles_CI(u32 numI); + void Comp_AddCycles_CI(u32 c, Arm64Gen::ARM64Reg numI, Arm64Gen::ArithOption shift); + void Comp_AddCycles_CD(); + void Comp_AddCycles_CDI(); + + void MovePC(); + + void LoadReg(int reg, Arm64Gen::ARM64Reg nativeReg); + void SaveReg(int reg, Arm64Gen::ARM64Reg nativeReg); + + void LoadCPSR(); + void SaveCPSR(bool markClean = true); + + void A_Comp_ALUTriOp(); + void A_Comp_ALUMovOp(); + void A_Comp_ALUCmpOp(); + + void A_Comp_Mul(); + void A_Comp_Mul_Long(); + + void A_Comp_Clz(); + + void A_Comp_MemWB(); + void A_Comp_MemHD(); + + void A_Comp_LDM_STM(); + + void A_Comp_BranchImm(); + void A_Comp_BranchXchangeReg(); + + + void T_Comp_ShiftImm(); + void T_Comp_AddSub_(); + void T_Comp_ALUImm8(); + void T_Comp_ALU(); + void T_Comp_ALU_HiReg(); + void T_Comp_AddSP(); + void T_Comp_RelAddr(); + + void T_Comp_MemReg(); + void T_Comp_MemImm(); + void T_Comp_MemRegHalf(); + void T_Comp_MemImmHalf(); + void T_Comp_LoadPCRel(); + void T_Comp_MemSPRel(); + + void T_Comp_LDMIA_STMIA(); + void T_Comp_PUSH_POP(); + + void T_Comp_BCOND(); + void T_Comp_B(); + void T_Comp_BranchXchangeReg(); + void T_Comp_BL_LONG_1(); + void T_Comp_BL_LONG_2(); + void T_Comp_BL_Merged(); + + s32 Comp_MemAccessBlock(int rn, BitSet16 regs, bool store, bool preinc, bool decrement, bool usermode); + + void Comp_Mul_Mla(bool S, bool mla, Arm64Gen::ARM64Reg rd, Arm64Gen::ARM64Reg rm, Arm64Gen::ARM64Reg rs, Arm64Gen::ARM64Reg rn); + + void Comp_Compare(int op, Arm64Gen::ARM64Reg rn, Op2 op2); + void Comp_Logical(int op, bool S, Arm64Gen::ARM64Reg rd, Arm64Gen::ARM64Reg rn, Op2 op2); + void Comp_Arithmetic(int op, bool S, Arm64Gen::ARM64Reg rd, Arm64Gen::ARM64Reg rn, Op2 op2); + + void Comp_RetriveFlags(bool retriveCV); + + Arm64Gen::FixupBranch CheckCondition(u32 cond); + + void Comp_JumpTo(Arm64Gen::ARM64Reg addr, bool switchThumb, bool restoreCPSR = false); + void Comp_JumpTo(u32 addr, bool forceNonConstantCycles = false); + + void A_Comp_GetOp2(bool S, Op2& op2); + + void Comp_RegShiftImm(int op, int amount, bool S, Op2& op2, Arm64Gen::ARM64Reg tmp = Arm64Gen::W0); + void Comp_RegShiftReg(int op, bool S, Op2& op2, Arm64Gen::ARM64Reg rs); + + void Comp_MemLoadLiteral(int size, bool signExtend, int rd, u32 addr); + enum + { + memop_Writeback = 1 << 0, + memop_Post = 1 << 1, + memop_SignExtend = 1 << 2, + memop_Store = 1 << 3, + memop_SubtractOffset = 1 << 4 + }; + void Comp_MemAccess(int rd, int rn, Op2 offset, int size, int flags); + + void* Gen_MemoryRoutine9(int size, bool store); + + void* Gen_MemoryRoutine9Seq(bool store, bool preinc); + void* Gen_MemoryRoutine7Seq(bool store, bool preinc); + + // 0 = switch mode, 1 = stay arm, 2 = stay thumb + void* Gen_JumpTo9(int kind); + void* Gen_JumpTo7(int kind); + + void Comp_BranchSpecialBehaviour(); + + bool Exit; + + FetchedInstr CurInstr; + bool Thumb; + u32 R15; + u32 Num; + ARM* CurCPU; + u32 ConstantCycles; + u32 CodeRegion; + + BitSet32 SavedRegs; + + u32 JitMemUseableSize; + + void* ReadBanked, *WriteBanked; + + // [size][store] + void* MemFunc9[3][2]; + void* MemFunc7[3][2]; + + // [store][pre increment] + void* MemFuncsSeq9[2][2]; + // "[code in main ram] + void* MemFuncsSeq7[2][2]; + + void* JumpToFuncs9[3]; + void* JumpToFuncs7[3]; + + RegisterCache RegCache; + + bool CPSRDirty = false; + + bool IrregularCycles = false; + +#ifdef __SWITCH__ + void* JitRWBase; + void* JitRWStart; + void* JitRXStart; +#endif +}; + +} + +#endif \ No newline at end of file diff --git a/src/ARMJIT_A64/ARMJIT_LoadStore.cpp b/src/ARMJIT_A64/ARMJIT_LoadStore.cpp new file mode 100644 index 0000000..a5d0e3f --- /dev/null +++ b/src/ARMJIT_A64/ARMJIT_LoadStore.cpp @@ -0,0 +1,848 @@ +#include "ARMJIT_Compiler.h" + +#include "../Config.h" + +using namespace Arm64Gen; + +namespace ARMJIT +{ + +// W0 - address +// (if store) W1 - value to store +// W2 - code cycles +void* Compiler::Gen_MemoryRoutine9(int size, bool store) +{ + AlignCode16(); + void* res = GetRXPtr(); + + u32 addressMask; + switch (size) + { + case 32: addressMask = ~3; break; + case 16: addressMask = ~1; break; + case 8: addressMask = ~0; break; + } + + LDR(INDEX_UNSIGNED, W3, RCPU, offsetof(ARMv5, DTCMBase)); + LDR(INDEX_UNSIGNED, W4, RCPU, offsetof(ARMv5, DTCMSize)); + SUB(W3, W0, W3); + CMP(W3, W4); + FixupBranch insideDTCM = B(CC_LO); + + UBFX(W4, W0, 24, 8); + CMP(W4, 0x02); + FixupBranch outsideMainRAM = B(CC_NEQ); + ANDI2R(W3, W0, addressMask & (MAIN_RAM_SIZE - 1)); + MOVP2R(X4, NDS::MainRAM); + if (!store && size == 32) + { + LDR(W3, X3, X4); + ANDI2R(W0, W0, 3); + LSL(W0, W0, 3); + RORV(W0, W3, W0); + } + else if (store) + STRGeneric(size, W1, X3, X4); + else + LDRGeneric(size, false, W0, X3, X4); + RET(); + + SetJumpTarget(outsideMainRAM); + + LDR(INDEX_UNSIGNED, W3, RCPU, offsetof(ARMv5, ITCMSize)); + CMP(W0, W3); + FixupBranch insideITCM = B(CC_LO); + + if (store) + { + if (size > 8) + ANDI2R(W0, W0, addressMask); + + switch (size) + { + case 32: QuickTailCall(X4, NDS::ARM9Write32); break; + case 16: QuickTailCall(X4, NDS::ARM9Write16); break; + case 8: QuickTailCall(X4, NDS::ARM9Write8); break; + } + } + else + { + if (size == 32) + ABI_PushRegisters({0, 30}); + if (size > 8) + ANDI2R(W0, W0, addressMask); + + switch (size) + { + case 32: QuickCallFunction(X4, NDS::ARM9Read32); break; + case 16: QuickTailCall (X4, NDS::ARM9Read16); break; + case 8: QuickTailCall (X4, NDS::ARM9Read8 ); break; + } + if (size == 32) + { + ABI_PopRegisters({1, 30}); + ANDI2R(W1, W1, 3); + LSL(W1, W1, 3); + RORV(W0, W0, W1); + RET(); + } + } + + SetJumpTarget(insideDTCM); + ANDI2R(W3, W3, 0x3FFF & addressMask); + ADDI2R(W3, W3, offsetof(ARMv5, DTCM), W4); + if (!store && size == 32) + { + ANDI2R(W4, W0, 3); + LDR(W0, RCPU, W3); + LSL(W4, W4, 3); + RORV(W0, W0, W4); + } + else if (store) + STRGeneric(size, W1, RCPU, W3); + else + LDRGeneric(size, false, W0, RCPU, W3); + + RET(); + + SetJumpTarget(insideITCM); + ANDI2R(W3, W0, 0x7FFF & addressMask); + if (store) + { + LSR(W0, W3, 8); + ADDI2R(W0, W0, ExeMemRegionOffsets[exeMem_ITCM], W4); + MOVP2R(X4, CodeRanges); + ADD(X4, X4, X0, ArithOption(X0, ST_LSL, 4)); + static_assert(sizeof(AddressRange) == 16); + LDR(INDEX_UNSIGNED, W4, X4, offsetof(AddressRange, Blocks.Length)); + FixupBranch null = CBZ(W4); + ABI_PushRegisters({1, 3, 30}); + QuickCallFunction(X4, InvalidateByAddr); + ABI_PopRegisters({1, 3, 30}); + SetJumpTarget(null); + } + ADDI2R(W3, W3, offsetof(ARMv5, ITCM), W4); + if (!store && size == 32) + { + ANDI2R(W4, W0, 3); + LDR(W0, RCPU, W3); + LSL(W4, W4, 3); + RORV(W0, W0, W4); + } + else if (store) + STRGeneric(size, W1, RCPU, W3); + else + LDRGeneric(size, false, W0, RCPU, W3); + RET(); + + return res; +} + +/* + W0 - base address + X1 - stack space + W2 - values count +*/ +void* Compiler::Gen_MemoryRoutine9Seq(bool store, bool preinc) +{ + AlignCode16(); + void* res = GetRXPtr(); + + void* loopStart = GetRXPtr(); + SUB(W2, W2, 1); + + if (preinc) + ADD(W0, W0, 4); + + LDR(INDEX_UNSIGNED, W4, RCPU, offsetof(ARMv5, DTCMBase)); + LDR(INDEX_UNSIGNED, W5, RCPU, offsetof(ARMv5, DTCMSize)); + SUB(W4, W0, W4); + CMP(W4, W5); + FixupBranch insideDTCM = B(CC_LO); + + LDR(INDEX_UNSIGNED, W4, RCPU, offsetof(ARMv5, ITCMSize)); + CMP(W0, W4); + FixupBranch insideITCM = B(CC_LO); + + ABI_PushRegisters({0, 1, 2, 30}); // TODO: move SP only once + if (store) + { + LDR(X1, X1, ArithOption(X2, true)); + QuickCallFunction(X4, NDS::ARM9Write32); + + ABI_PopRegisters({0, 1, 2, 30}); + } + else + { + QuickCallFunction(X4, NDS::ARM9Read32); + MOV(W4, W0); + + ABI_PopRegisters({0, 1, 2, 30}); + + STR(X4, X1, ArithOption(X2, true)); + } + + if (!preinc) + ADD(W0, W0, 4); + CBNZ(W2, loopStart); + RET(); + + SetJumpTarget(insideDTCM); + + ANDI2R(W4, W4, ~3 & 0x3FFF); + ADDI2R(X4, X4, offsetof(ARMv5, DTCM)); + if (store) + { + LDR(X5, X1, ArithOption(X2, true)); + STR(W5, RCPU, X4); + } + else + { + LDR(W5, RCPU, X4); + STR(X5, X1, ArithOption(X2, true)); + } + + if (!preinc) + ADD(W0, W0, 4); + CBNZ(W2, loopStart); + RET(); + + SetJumpTarget(insideITCM); + + ANDI2R(W4, W0, ~3 & 0x7FFF); + + if (store) + { + LSR(W6, W4, 8); + ADDI2R(W6, W6, ExeMemRegionOffsets[exeMem_ITCM], W5); + MOVP2R(X5, CodeRanges); + ADD(X5, X5, X6, ArithOption(X6, ST_LSL, 4)); + static_assert(sizeof(AddressRange) == 16); + LDR(INDEX_UNSIGNED, W5, X5, offsetof(AddressRange, Blocks.Length)); + FixupBranch null = CBZ(W5); + ABI_PushRegisters({0, 1, 2, 4, 30}); + MOV(W0, W6); + QuickCallFunction(X5, InvalidateByAddr); + ABI_PopRegisters({0, 1, 2, 4, 30}); + SetJumpTarget(null); + } + + ADDI2R(W4, W4, offsetof(ARMv5, ITCM), W5); + if (store) + { + LDR(X5, X1, ArithOption(X2, true)); + STR(W5, RCPU, X4); + } + else + { + LDR(W5, RCPU, X4); + STR(X5, X1, ArithOption(X2, true)); + } + + if (!preinc) + ADD(W0, W0, 4); + CBNZ(W2, loopStart); + RET(); + return res; +} + +void* Compiler::Gen_MemoryRoutine7Seq(bool store, bool preinc) +{ + AlignCode16(); + void* res = GetRXPtr(); + + void* loopStart = GetRXPtr(); + SUB(W2, W2, 1); + + if (preinc) + ADD(W0, W0, 4); + + ABI_PushRegisters({0, 1, 2, 30}); + if (store) + { + LDR(X1, X1, ArithOption(X2, true)); + QuickCallFunction(X4, NDS::ARM7Write32); + ABI_PopRegisters({0, 1, 2, 30}); + } + else + { + QuickCallFunction(X4, NDS::ARM7Read32); + MOV(W4, W0); + ABI_PopRegisters({0, 1, 2, 30}); + STR(X4, X1, ArithOption(X2, true)); + } + + if (!preinc) + ADD(W0, W0, 4); + CBNZ(W2, loopStart); + RET(); + + return res; +} + +void Compiler::Comp_MemLoadLiteral(int size, bool signExtend, int rd, u32 addr) +{ + u32 val; + // make sure arm7 bios is accessible + u32 tmpR15 = CurCPU->R[15]; + CurCPU->R[15] = R15; + if (size == 32) + { + CurCPU->DataRead32(addr & ~0x3, &val); + val = ROR(val, (addr & 0x3) << 3); + } + else if (size == 16) + { + CurCPU->DataRead16(addr & ~0x1, &val); + if (signExtend) + val = ((s32)val << 16) >> 16; + } + else + { + CurCPU->DataRead8(addr, &val); + if (signExtend) + val = ((s32)val << 24) >> 24; + } + CurCPU->R[15] = tmpR15; + + MOVI2R(MapReg(rd), val); + + if (Thumb || CurInstr.Cond() == 0xE) + RegCache.PutLiteral(rd, val); +} + +void Compiler::Comp_MemAccess(int rd, int rn, Op2 offset, int size, int flags) +{ + u32 addressMask = ~0; + if (size == 32) + addressMask = ~3; + if (size == 16) + addressMask = ~1; + + if (flags & memop_Store) + Comp_AddCycles_CD(); + else + Comp_AddCycles_CDI(); + + if (Config::JIT_LiteralOptimisations && rn == 15 && rd != 15 && offset.IsImm && !(flags & (memop_Post|memop_Store|memop_Writeback))) + { + u32 addr = R15 + offset.Imm * ((flags & memop_SubtractOffset) ? -1 : 1); + u32 translatedAddr = Num == 0 ? TranslateAddr<0>(addr) : TranslateAddr<1>(addr); + + if (!(CodeRanges[translatedAddr / 512].InvalidLiterals & (1 << ((translatedAddr & 0x1FF) / 16)))) + { + Comp_MemLoadLiteral(size, flags & memop_SignExtend, rd, addr); + return; + } + } + + { + ARM64Reg rdMapped = MapReg(rd); + ARM64Reg rnMapped = MapReg(rn); + + bool inlinePreparation = Num == 1; + u32 constLocalROR32 = 4; + + void* memFunc = Num == 0 + ? MemFunc9[size >> 4][!!(flags & memop_Store)] + : MemFunc7[size >> 4][!!((flags & memop_Store))]; + + if (Config::JIT_LiteralOptimisations && (rd != 15 || (flags & memop_Store)) && offset.IsImm && RegCache.IsLiteral(rn)) + { + u32 addr = RegCache.LiteralValues[rn] + offset.Imm * ((flags & memop_SubtractOffset) ? -1 : 1); + + NDS::MemRegion region; + region.Mem = NULL; + if (Num == 0) + { + ARMv5* cpu5 = (ARMv5*)CurCPU; + + // stupid dtcm... + if (addr >= cpu5->DTCMBase && addr < (cpu5->DTCMBase + cpu5->DTCMSize)) + { + region.Mem = cpu5->DTCM; + region.Mask = 0x3FFF; + } + else + { + NDS::ARM9GetMemRegion(addr, flags & memop_Store, ®ion); + } + } + else + NDS::ARM7GetMemRegion(addr, flags & memop_Store, ®ion); + + if (region.Mem != NULL) + { + void* ptr = ®ion.Mem[addr & addressMask & region.Mask]; + + MOVP2R(X0, ptr); + if (flags & memop_Store) + STRGeneric(size, INDEX_UNSIGNED, rdMapped, X0, 0); + else + { + LDRGeneric(size, flags & memop_SignExtend, INDEX_UNSIGNED, rdMapped, X0, 0); + if (size == 32 && addr & ~0x3) + ROR_(rdMapped, rdMapped, (addr & 0x3) << 3); + } + return; + } + + void* specialFunc = GetFuncForAddr(CurCPU, addr, flags & memop_Store, size); + if (specialFunc) + { + memFunc = specialFunc; + inlinePreparation = true; + constLocalROR32 = addr & 0x3; + } + } + + ARM64Reg finalAddr = W0; + if (flags & memop_Post) + { + finalAddr = rnMapped; + MOV(W0, rnMapped); + } + + if (flags & memop_Store) + MOV(W1, rdMapped); + + if (!offset.IsImm) + Comp_RegShiftImm(offset.Reg.ShiftType, offset.Reg.ShiftAmount, false, offset, W2); + // offset might become an immediate + if (offset.IsImm) + { + if (flags & memop_SubtractOffset) + SUB(finalAddr, rnMapped, offset.Imm); + else + ADD(finalAddr, rnMapped, offset.Imm); + } + else + { + if (offset.Reg.ShiftType == ST_ROR) + { + ROR_(W0, offset.Reg.Rm, offset.Reg.ShiftAmount); + offset = Op2(W0); + } + + if (flags & memop_SubtractOffset) + SUB(finalAddr, rnMapped, offset.Reg.Rm, offset.ToArithOption()); + else + ADD(finalAddr, rnMapped, offset.Reg.Rm, offset.ToArithOption()); + } + + if (!(flags & memop_Post) && (flags & memop_Writeback)) + MOV(rnMapped, W0); + + if (inlinePreparation) + { + if (size == 32 && !(flags & memop_Store) && constLocalROR32 == 4) + ANDI2R(rdMapped, W0, 3); + if (size > 8) + ANDI2R(W0, W0, addressMask); + } + QuickCallFunction(X2, memFunc); + if (!(flags & memop_Store)) + { + if (inlinePreparation && !(flags & memop_Store) && size == 32) + { + if (constLocalROR32 == 4) + { + LSL(rdMapped, rdMapped, 3); + RORV(rdMapped, W0, rdMapped); + } + else if (constLocalROR32 > 0) + ROR_(rdMapped, W0, constLocalROR32 << 3); + else + MOV(rdMapped, W0); + } + else if (flags & memop_SignExtend) + { + if (size == 16) + SXTH(rdMapped, W0); + else if (size == 8) + SXTB(rdMapped, W0); + else + assert("What's wrong with you?"); + } + else + MOV(rdMapped, W0); + + if (CurInstr.Info.Branches()) + { + if (size < 32) + printf("LDR size < 32 branching?\n"); + Comp_JumpTo(rdMapped, Num == 0, false); + } + } + } +} + +void Compiler::A_Comp_MemWB() +{ + Op2 offset; + if (CurInstr.Instr & (1 << 25)) + offset = Op2(MapReg(CurInstr.A_Reg(0)), (ShiftType)((CurInstr.Instr >> 5) & 0x3), (CurInstr.Instr >> 7) & 0x1F); + else + offset = Op2(CurInstr.Instr & 0xFFF); + + bool load = CurInstr.Instr & (1 << 20); + bool byte = CurInstr.Instr & (1 << 22); + + int flags = 0; + if (!load) + flags |= memop_Store; + if (!(CurInstr.Instr & (1 << 24))) + flags |= memop_Post; + if (CurInstr.Instr & (1 << 21)) + flags |= memop_Writeback; + if (!(CurInstr.Instr & (1 << 23))) + flags |= memop_SubtractOffset; + + Comp_MemAccess(CurInstr.A_Reg(12), CurInstr.A_Reg(16), offset, byte ? 8 : 32, flags); +} + +void Compiler::A_Comp_MemHD() +{ + bool load = CurInstr.Instr & (1 << 20); + bool signExtend; + int op = (CurInstr.Instr >> 5) & 0x3; + int size; + + if (load) + { + signExtend = op >= 2; + size = op == 2 ? 8 : 16; + } + else + { + size = 16; + signExtend = false; + } + + Op2 offset; + if (CurInstr.Instr & (1 << 22)) + offset = Op2((CurInstr.Instr & 0xF) | ((CurInstr.Instr >> 4) & 0xF0)); + else + offset = Op2(MapReg(CurInstr.A_Reg(0))); + + int flags = 0; + if (signExtend) + flags |= memop_SignExtend; + if (!load) + flags |= memop_Store; + if (!(CurInstr.Instr & (1 << 24))) + flags |= memop_Post; + if (!(CurInstr.Instr & (1 << 23))) + flags |= memop_SubtractOffset; + if (CurInstr.Instr & (1 << 21)) + flags |= memop_Writeback; + + Comp_MemAccess(CurInstr.A_Reg(12), CurInstr.A_Reg(16), offset, size, flags); +} + +void Compiler::T_Comp_MemReg() +{ + int op = (CurInstr.Instr >> 10) & 0x3; + bool load = op & 0x2; + bool byte = op & 0x1; + + Comp_MemAccess(CurInstr.T_Reg(0), CurInstr.T_Reg(3), + Op2(MapReg(CurInstr.T_Reg(6))), byte ? 8 : 32, load ? 0 : memop_Store); +} + +void Compiler::T_Comp_MemImm() +{ + int op = (CurInstr.Instr >> 11) & 0x3; + bool load = op & 0x1; + bool byte = op & 0x2; + u32 offset = ((CurInstr.Instr >> 6) & 0x1F) * (byte ? 1 : 4); + + Comp_MemAccess(CurInstr.T_Reg(0), CurInstr.T_Reg(3), Op2(offset), + byte ? 8 : 32, load ? 0 : memop_Store); +} + +void Compiler::T_Comp_MemRegHalf() +{ + int op = (CurInstr.Instr >> 10) & 0x3; + bool load = op != 0; + int size = op != 1 ? 16 : 8; + bool signExtend = op & 1; + + int flags = 0; + if (signExtend) + flags |= memop_SignExtend; + if (!load) + flags |= memop_Store; + + Comp_MemAccess(CurInstr.T_Reg(0), CurInstr.T_Reg(3), Op2(MapReg(CurInstr.T_Reg(6))), + size, flags); +} + +void Compiler::T_Comp_MemImmHalf() +{ + u32 offset = (CurInstr.Instr >> 5) & 0x3E; + bool load = CurInstr.Instr & (1 << 11); + + Comp_MemAccess(CurInstr.T_Reg(0), CurInstr.T_Reg(3), Op2(offset), 16, + load ? 0 : memop_Store); +} + +void Compiler::T_Comp_LoadPCRel() +{ + u32 addr = (R15 & ~0x2) + ((CurInstr.Instr & 0xFF) << 2); + + if (Config::JIT_LiteralOptimisations) + { + Comp_MemLoadLiteral(32, false, CurInstr.T_Reg(8), addr); + Comp_AddCycles_CDI(); + } + else + { + bool negative = addr < R15; + u32 abs = negative ? R15 - addr : addr - R15; + Comp_MemAccess(CurInstr.T_Reg(8), 15, Op2(abs), 32, negative ? memop_SubtractOffset : 0); + } +} + +void Compiler::T_Comp_MemSPRel() +{ + u32 offset = (CurInstr.Instr & 0xFF) * 4; + bool load = CurInstr.Instr & (1 << 11); + + Comp_MemAccess(CurInstr.T_Reg(8), 13, Op2(offset), 32, load ? 0 : memop_Store); +} + +s32 Compiler::Comp_MemAccessBlock(int rn, BitSet16 regs, bool store, bool preinc, bool decrement, bool usermode) +{ + IrregularCycles = true; + + int regsCount = regs.Count(); + + if (regsCount == 0) + return 0; // actually not the right behaviour TODO: fix me + + SUB(SP, SP, ((regsCount + 1) & ~1) * 8); + if (store) + { + Comp_AddCycles_CD(); + + if (usermode && (regs & BitSet16(0x7f00))) + UBFX(W0, RCPSR, 0, 5); + + int i = regsCount - 1; + + BitSet16::Iterator it = regs.begin(); + while (it != regs.end()) + { + BitSet16::Iterator nextReg = it; + nextReg++; + + int reg = *it; + + if (usermode && reg >= 8 && reg < 15) + { + if (RegCache.Mapping[reg] != INVALID_REG) + MOV(W3, MapReg(reg)); + else + LoadReg(reg, W3); + MOVI2R(W1, reg - 8); + BL(ReadBanked); + STR(INDEX_UNSIGNED, W3, SP, i * 8); + } + else if (!usermode && nextReg != regs.end()) + { + ARM64Reg first = W3; + ARM64Reg second = W4; + + if (RegCache.Mapping[reg] != INVALID_REG) + first = MapReg(reg); + else + LoadReg(reg, W3); + + if (RegCache.Mapping[*nextReg] != INVALID_REG) + second = MapReg(*nextReg); + else + LoadReg(*nextReg, W4); + + STP(INDEX_SIGNED, EncodeRegTo64(second), EncodeRegTo64(first), SP, i * 8 - 8); + + i--; + it++; + } + else if (RegCache.Mapping[reg] != INVALID_REG) + STR(INDEX_UNSIGNED, MapReg(reg), SP, i * 8); + else + { + LoadReg(reg, W3); + STR(INDEX_UNSIGNED, W3, SP, i * 8); + } + i--; + it++; + } + } + if (decrement) + { + SUB(W0, MapReg(rn), regsCount * 4); + preinc ^= true; + } + else + MOV(W0, MapReg(rn)); + ADD(X1, SP, 0); + MOVI2R(W2, regsCount); + + BL(Num ? MemFuncsSeq7[store][preinc] : MemFuncsSeq9[store][preinc]); + + if (!store) + { + Comp_AddCycles_CDI(); + + if (usermode && (regs & BitSet16(0x7f00))) + UBFX(W0, RCPSR, 0, 5); + + int i = regsCount - 1; + BitSet16::Iterator it = regs.begin(); + while (it != regs.end()) + { + BitSet16::Iterator nextReg = it; + nextReg++; + + int reg = *it; + + if (usermode && reg >= 8 && reg < 15) + { + LDR(INDEX_UNSIGNED, W3, SP, i * 8); + MOVI2R(W1, reg - 8); + BL(WriteBanked); + FixupBranch alreadyWritten = CBNZ(W4); + if (RegCache.Mapping[reg] != INVALID_REG) + { + MOV(MapReg(reg), W3); + RegCache.DirtyRegs |= 1 << reg; + } + else + SaveReg(reg, W3); + SetJumpTarget(alreadyWritten); + } + else if (!usermode && nextReg != regs.end()) + { + ARM64Reg first = W3, second = W4; + + if (RegCache.Mapping[reg] != INVALID_REG) + { + first = MapReg(reg); + if (reg != 15) + RegCache.DirtyRegs |= 1 << reg; + } + if (RegCache.Mapping[*nextReg] != INVALID_REG) + { + second = MapReg(*nextReg); + if (*nextReg != 15) + RegCache.DirtyRegs |= 1 << *nextReg; + } + + LDP(INDEX_SIGNED, EncodeRegTo64(second), EncodeRegTo64(first), SP, i * 8 - 8); + + if (first == W3) + SaveReg(reg, W3); + if (second == W4) + SaveReg(*nextReg, W4); + + it++; + i--; + } + else if (RegCache.Mapping[reg] != INVALID_REG) + { + ARM64Reg mapped = MapReg(reg); + LDR(INDEX_UNSIGNED, mapped, SP, i * 8); + + if (reg != 15) + RegCache.DirtyRegs |= 1 << reg; + } + else + { + LDR(INDEX_UNSIGNED, W3, SP, i * 8); + SaveReg(reg, W3); + } + + it++; + i--; + } + } + ADD(SP, SP, ((regsCount + 1) & ~1) * 8); + + if (!store && regs[15]) + { + ARM64Reg mapped = MapReg(15); + Comp_JumpTo(mapped, Num == 0, usermode); + } + + return regsCount * 4 * (decrement ? -1 : 1); +} + +void Compiler::A_Comp_LDM_STM() +{ + BitSet16 regs(CurInstr.Instr & 0xFFFF); + + bool load = CurInstr.Instr & (1 << 20); + bool pre = CurInstr.Instr & (1 << 24); + bool add = CurInstr.Instr & (1 << 23); + bool writeback = CurInstr.Instr & (1 << 21); + bool usermode = CurInstr.Instr & (1 << 22); + + ARM64Reg rn = MapReg(CurInstr.A_Reg(16)); + + s32 offset = Comp_MemAccessBlock(CurInstr.A_Reg(16), regs, !load, pre, !add, usermode); + + if (load && writeback && regs[CurInstr.A_Reg(16)]) + writeback = Num == 0 + ? (!(regs & ~BitSet16(1 << CurInstr.A_Reg(16)))) || (regs & ~BitSet16((2 << CurInstr.A_Reg(16)) - 1)) + : false; + if (writeback) + { + if (offset > 0) + ADD(rn, rn, offset); + else + SUB(rn, rn, -offset); + } +} + +void Compiler::T_Comp_PUSH_POP() +{ + bool load = CurInstr.Instr & (1 << 11); + BitSet16 regs(CurInstr.Instr & 0xFF); + if (CurInstr.Instr & (1 << 8)) + { + if (load) + regs[15] = true; + else + regs[14] = true; + } + + ARM64Reg sp = MapReg(13); + s32 offset = Comp_MemAccessBlock(13, regs, !load, !load, !load, false); + + if (offset > 0) + ADD(sp, sp, offset); + else + SUB(sp, sp, -offset); +} + +void Compiler::T_Comp_LDMIA_STMIA() +{ + BitSet16 regs(CurInstr.Instr & 0xFF); + ARM64Reg rb = MapReg(CurInstr.T_Reg(8)); + bool load = CurInstr.Instr & (1 << 11); + u32 regsCount = regs.Count(); + + s32 offset = Comp_MemAccessBlock(CurInstr.T_Reg(8), regs, !load, false, false, false); + + if (!load || !regs[CurInstr.T_Reg(8)]) + { + if (offset > 0) + ADD(rb, rb, offset); + else + SUB(rb, rb, -offset); + } +} + +} \ No newline at end of file diff --git a/src/ARM_InstrInfo.cpp b/src/ARM_InstrInfo.cpp index 08e2f0a..b884773 100644 --- a/src/ARM_InstrInfo.cpp +++ b/src/ARM_InstrInfo.cpp @@ -2,6 +2,8 @@ #include +#include "Config.h" + namespace ARMInstrInfo { @@ -363,7 +365,11 @@ Info Decode(bool thumb, u32 num, u32 instr) res.SpecialKind = special_WriteMem; if (res.Kind == ARMInstrInfo::tk_LDR_PCREL) + { + if (!Config::JIT_LiteralOptimisations) + res.SrcRegs |= 1 << 15; res.SpecialKind = special_LoadLiteral; + } if (res.Kind == tk_LDMIA || res.Kind == tk_POP) { @@ -417,7 +423,6 @@ Info Decode(bool thumb, u32 num, u32 instr) u32 cp = ((instr >> 8) & 0xF); if ((num == 0 && cp != 15) || (num == 1 && cp != 14)) { - printf("happens\n"); data = A_UNK; res.Kind = ak_UNK; } diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 10428aa..8b81ce3 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -41,10 +41,31 @@ if (ENABLE_JIT) ARMJIT_x64/ARMJIT_Branch.cpp dolphin/CommonFuncs.cpp - dolphin/x64ABI.cpp - dolphin/x64CPUDetect.cpp - dolphin/x64Emitter.cpp ) + + if (ARCHITECTURE STREQUAL x86_64) + target_sources(core PRIVATE + dolphin/x64ABI.cpp + dolphin/x64CPUDetect.cpp + dolphin/x64Emitter.cpp + + ARMJIT_x64/ARMJIT_Compiler.cpp + ARMJIT_x64/ARMJIT_ALU.cpp + ARMJIT_x64/ARMJIT_LoadStore.cpp + ARMJIT_x64/ARMJIT_Branch.cpp + ) + endif() + if (ARCHITECTURE STREQUAL ARM64) + target_sources(core PRIVATE + dolphin/Arm64Emitter.cpp + dolphin/MathUtil.cpp + + ARMJIT_A64/ARMJIT_Compiler.cpp + ARMJIT_A64/ARMJIT_ALU.cpp + ARMJIT_A64/ARMJIT_LoadStore.cpp + ARMJIT_A64/ARMJIT_Branch.cpp + ) + endif() endif() if (WIN32) diff --git a/src/dolphin/Align.h b/src/dolphin/Align.h new file mode 100644 index 0000000..40c4576 --- /dev/null +++ b/src/dolphin/Align.h @@ -0,0 +1,24 @@ +// This file is under the public domain. + +#pragma once + +#include +#include + +namespace Common +{ +template +constexpr T AlignUp(T value, size_t size) +{ + static_assert(std::is_unsigned(), "T must be an unsigned value."); + return static_cast(value + (size - value % size) % size); +} + +template +constexpr T AlignDown(T value, size_t size) +{ + static_assert(std::is_unsigned(), "T must be an unsigned value."); + return static_cast(value - value % size); +} + +} // namespace Common diff --git a/src/dolphin/Arm64Emitter.cpp b/src/dolphin/Arm64Emitter.cpp new file mode 100644 index 0000000..dbcf425 --- /dev/null +++ b/src/dolphin/Arm64Emitter.cpp @@ -0,0 +1,4466 @@ +// Copyright 2015 Dolphin Emulator Project +// Licensed under GPLv2+ +// Refer to the license.txt file included. + +#include +#include +#include +#include +#include + +#include "Align.h" +#include "Arm64Emitter.h" +#include "Assert.h" +#include "BitUtils.h" +#include "../types.h" +#include "MathUtil.h" + +namespace Arm64Gen +{ +namespace +{ +const int kWRegSizeInBits = 32; +const int kXRegSizeInBits = 64; + +// The below few functions are taken from V8. +int CountLeadingZeros(uint64_t value, int width) +{ + // TODO(jbramley): Optimize this for ARM64 hosts. + int count = 0; + uint64_t bit_test = 1ULL << (width - 1); + while ((count < width) && ((bit_test & value) == 0)) + { + count++; + bit_test >>= 1; + } + return count; +} + +uint64_t LargestPowerOf2Divisor(uint64_t value) +{ + return value & -(int64_t)value; +} + +// For ADD/SUB +bool IsImmArithmetic(uint64_t input, u32* val, bool* shift) +{ + if (input < 4096) + { + *val = input; + *shift = false; + return true; + } + else if ((input & 0xFFF000) == input) + { + *val = input >> 12; + *shift = true; + return true; + } + return false; +} + +// For AND/TST/ORR/EOR etc +bool IsImmLogical(uint64_t value, unsigned int width, unsigned int* n, unsigned int* imm_s, + unsigned int* imm_r) +{ + // DCHECK((n != NULL) && (imm_s != NULL) && (imm_r != NULL)); + // DCHECK((width == kWRegSizeInBits) || (width == kXRegSizeInBits)); + + bool negate = false; + + // Logical immediates are encoded using parameters n, imm_s and imm_r using + // the following table: + // + // N imms immr size S R + // 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr) + // 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr) + // 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr) + // 0 110sss xxxrrr 8 UInt(sss) UInt(rrr) + // 0 1110ss xxxxrr 4 UInt(ss) UInt(rr) + // 0 11110s xxxxxr 2 UInt(s) UInt(r) + // (s bits must not be all set) + // + // A pattern is constructed of size bits, where the least significant S+1 bits + // are set. The pattern is rotated right by R, and repeated across a 32 or + // 64-bit value, depending on destination register width. + // + // Put another way: the basic format of a logical immediate is a single + // contiguous stretch of 1 bits, repeated across the whole word at intervals + // given by a power of 2. To identify them quickly, we first locate the + // lowest stretch of 1 bits, then the next 1 bit above that; that combination + // is different for every logical immediate, so it gives us all the + // information we need to identify the only logical immediate that our input + // could be, and then we simply check if that's the value we actually have. + // + // (The rotation parameter does give the possibility of the stretch of 1 bits + // going 'round the end' of the word. To deal with that, we observe that in + // any situation where that happens the bitwise NOT of the value is also a + // valid logical immediate. So we simply invert the input whenever its low bit + // is set, and then we know that the rotated case can't arise.) + + if (value & 1) + { + // If the low bit is 1, negate the value, and set a flag to remember that we + // did (so that we can adjust the return values appropriately). + negate = true; + value = ~value; + } + + if (width == kWRegSizeInBits) + { + // To handle 32-bit logical immediates, the very easiest thing is to repeat + // the input value twice to make a 64-bit word. The correct encoding of that + // as a logical immediate will also be the correct encoding of the 32-bit + // value. + + // The most-significant 32 bits may not be zero (ie. negate is true) so + // shift the value left before duplicating it. + value <<= kWRegSizeInBits; + value |= value >> kWRegSizeInBits; + } + + // The basic analysis idea: imagine our input word looks like this. + // + // 0011111000111110001111100011111000111110001111100011111000111110 + // c b a + // |<--d-->| + // + // We find the lowest set bit (as an actual power-of-2 value, not its index) + // and call it a. Then we add a to our original number, which wipes out the + // bottommost stretch of set bits and replaces it with a 1 carried into the + // next zero bit. Then we look for the new lowest set bit, which is in + // position b, and subtract it, so now our number is just like the original + // but with the lowest stretch of set bits completely gone. Now we find the + // lowest set bit again, which is position c in the diagram above. Then we'll + // measure the distance d between bit positions a and c (using CLZ), and that + // tells us that the only valid logical immediate that could possibly be equal + // to this number is the one in which a stretch of bits running from a to just + // below b is replicated every d bits. + uint64_t a = LargestPowerOf2Divisor(value); + uint64_t value_plus_a = value + a; + uint64_t b = LargestPowerOf2Divisor(value_plus_a); + uint64_t value_plus_a_minus_b = value_plus_a - b; + uint64_t c = LargestPowerOf2Divisor(value_plus_a_minus_b); + + int d, clz_a, out_n; + uint64_t mask; + + if (c != 0) + { + // The general case, in which there is more than one stretch of set bits. + // Compute the repeat distance d, and set up a bitmask covering the basic + // unit of repetition (i.e. a word with the bottom d bits set). Also, in all + // of these cases the N bit of the output will be zero. + clz_a = CountLeadingZeros(a, kXRegSizeInBits); + int clz_c = CountLeadingZeros(c, kXRegSizeInBits); + d = clz_a - clz_c; + mask = ((UINT64_C(1) << d) - 1); + out_n = 0; + } + else + { + // Handle degenerate cases. + // + // If any of those 'find lowest set bit' operations didn't find a set bit at + // all, then the word will have been zero thereafter, so in particular the + // last lowest_set_bit operation will have returned zero. So we can test for + // all the special case conditions in one go by seeing if c is zero. + if (a == 0) + { + // The input was zero (or all 1 bits, which will come to here too after we + // inverted it at the start of the function), for which we just return + // false. + return false; + } + else + { + // Otherwise, if c was zero but a was not, then there's just one stretch + // of set bits in our word, meaning that we have the trivial case of + // d == 64 and only one 'repetition'. Set up all the same variables as in + // the general case above, and set the N bit in the output. + clz_a = CountLeadingZeros(a, kXRegSizeInBits); + d = 64; + mask = ~UINT64_C(0); + out_n = 1; + } + } + + // If the repeat period d is not a power of two, it can't be encoded. + if (!MathUtil::IsPow2(d)) + return false; + + // If the bit stretch (b - a) does not fit within the mask derived from the + // repeat period, then fail. + if (((b - a) & ~mask) != 0) + return false; + + // The only possible option is b - a repeated every d bits. Now we're going to + // actually construct the valid logical immediate derived from that + // specification, and see if it equals our original input. + // + // To repeat a value every d bits, we multiply it by a number of the form + // (1 + 2^d + 2^(2d) + ...), i.e. 0x0001000100010001 or similar. These can + // be derived using a table lookup on CLZ(d). + static const std::array multipliers = {{ + 0x0000000000000001UL, + 0x0000000100000001UL, + 0x0001000100010001UL, + 0x0101010101010101UL, + 0x1111111111111111UL, + 0x5555555555555555UL, + }}; + + int multiplier_idx = CountLeadingZeros(d, kXRegSizeInBits) - 57; + + // Ensure that the index to the multipliers array is within bounds. + DEBUG_ASSERT((multiplier_idx >= 0) && (static_cast(multiplier_idx) < multipliers.size())); + + uint64_t multiplier = multipliers[multiplier_idx]; + uint64_t candidate = (b - a) * multiplier; + + // The candidate pattern doesn't match our input value, so fail. + if (value != candidate) + return false; + + // We have a match! This is a valid logical immediate, so now we have to + // construct the bits and pieces of the instruction encoding that generates + // it. + + // Count the set bits in our basic stretch. The special case of clz(0) == -1 + // makes the answer come out right for stretches that reach the very top of + // the word (e.g. numbers like 0xffffc00000000000). + int clz_b = (b == 0) ? -1 : CountLeadingZeros(b, kXRegSizeInBits); + int s = clz_a - clz_b; + + // Decide how many bits to rotate right by, to put the low bit of that basic + // stretch in position a. + int r; + if (negate) + { + // If we inverted the input right at the start of this function, here's + // where we compensate: the number of set bits becomes the number of clear + // bits, and the rotation count is based on position b rather than position + // a (since b is the location of the 'lowest' 1 bit after inversion). + s = d - s; + r = (clz_b + 1) & (d - 1); + } + else + { + r = (clz_a + 1) & (d - 1); + } + + // Now we're done, except for having to encode the S output in such a way that + // it gives both the number of set bits and the length of the repeated + // segment. The s field is encoded like this: + // + // imms size S + // ssssss 64 UInt(ssssss) + // 0sssss 32 UInt(sssss) + // 10ssss 16 UInt(ssss) + // 110sss 8 UInt(sss) + // 1110ss 4 UInt(ss) + // 11110s 2 UInt(s) + // + // So we 'or' (-d << 1) with our computed s to form imms. + *n = out_n; + *imm_s = ((-d << 1) | (s - 1)) & 0x3f; + *imm_r = r; + + return true; +} + +float FPImm8ToFloat(u8 bits) +{ + const u32 sign = bits >> 7; + const u32 bit6 = (bits >> 6) & 1; + const u32 exp = ((!bit6) << 7) | (0x7C * bit6) | ((bits >> 4) & 3); + const u32 mantissa = (bits & 0xF) << 19; + const u32 f = (sign << 31) | (exp << 23) | mantissa; + + return Common::BitCast(f); +} + +bool FPImm8FromFloat(float value, u8* imm_out) +{ + const u32 f = Common::BitCast(value); + const u32 mantissa4 = (f & 0x7FFFFF) >> 19; + const u32 exponent = (f >> 23) & 0xFF; + const u32 sign = f >> 31; + + if ((exponent >> 7) == ((exponent >> 6) & 1)) + return false; + + const u8 imm8 = (sign << 7) | ((!(exponent >> 7)) << 6) | ((exponent & 3) << 4) | mantissa4; + const float new_float = FPImm8ToFloat(imm8); + if (new_float == value) + *imm_out = imm8; + else + return false; + + return true; +} +} // Anonymous namespace + +void ARM64XEmitter::SetCodePtrUnsafe(ptrdiff_t ptr) +{ + m_code = ptr; +} + +void ARM64XEmitter::SetCodePtr(ptrdiff_t ptr) +{ + SetCodePtrUnsafe(ptr); + m_lastCacheFlushEnd = ptr; +} + +void ARM64XEmitter::SetCodeBase(u8* rwbase, u8* rxbase) +{ + m_code = 0; + m_lastCacheFlushEnd = 0; + m_rwbase = rwbase; + m_rxbase = rxbase; +} + +ptrdiff_t ARM64XEmitter::GetCodeOffset() +{ + return m_code; +} + +const u8* ARM64XEmitter::GetRWPtr() +{ + return m_rwbase + m_code; +} + +u8* ARM64XEmitter::GetWriteableRWPtr() +{ + return m_rwbase + m_code; +} + +void* ARM64XEmitter::GetRXPtr() +{ + return m_rxbase + m_code; +} + +void ARM64XEmitter::ReserveCodeSpace(u32 bytes) +{ + for (u32 i = 0; i < bytes / 4; i++) + BRK(0); +} + +ptrdiff_t ARM64XEmitter::AlignCode16() +{ + int c = int((u64)m_code & 15); + if (c) + ReserveCodeSpace(16 - c); + return m_code; +} + +ptrdiff_t ARM64XEmitter::AlignCodePage() +{ + int c = int((u64)m_code & 4095); + if (c) + ReserveCodeSpace(4096 - c); + return m_code; +} + +void ARM64XEmitter::Write32(u32 value) +{ + std::memcpy(m_rwbase + m_code, &value, sizeof(u32)); + m_code += sizeof(u32); +} + +void ARM64XEmitter::FlushIcache() +{ + FlushIcacheSection(m_rxbase + m_lastCacheFlushEnd, m_rxbase + m_code); + m_lastCacheFlushEnd = m_code; +} + +void ARM64XEmitter::FlushIcacheSection(u8* start, u8* end) +{ + if (start == end) + return; + +#if defined(IOS) + // Header file says this is equivalent to: sys_icache_invalidate(start, end - start); + sys_cache_control(kCacheFunctionPrepareForExecution, start, end - start); +#else + // Don't rely on GCC's __clear_cache implementation, as it caches + // icache/dcache cache line sizes, that can vary between cores on + // big.LITTLE architectures. + u64 addr, ctr_el0; + static size_t icache_line_size = 0xffff, dcache_line_size = 0xffff; + size_t isize, dsize; + + __asm__ volatile("mrs %0, ctr_el0" : "=r"(ctr_el0)); + isize = 4 << ((ctr_el0 >> 0) & 0xf); + dsize = 4 << ((ctr_el0 >> 16) & 0xf); + + // use the global minimum cache line size + icache_line_size = isize = icache_line_size < isize ? icache_line_size : isize; + dcache_line_size = dsize = dcache_line_size < dsize ? dcache_line_size : dsize; + + addr = (u64)start & ~(u64)(dsize - 1); + for (; addr < (u64)end; addr += dsize) + // use "civac" instead of "cvau", as this is the suggested workaround for + // Cortex-A53 errata 819472, 826319, 827319 and 824069. + __asm__ volatile("dc civac, %0" : : "r"(addr) : "memory"); + __asm__ volatile("dsb ish" : : : "memory"); + + addr = (u64)start & ~(u64)(isize - 1); + for (; addr < (u64)end; addr += isize) + __asm__ volatile("ic ivau, %0" : : "r"(addr) : "memory"); + + __asm__ volatile("dsb ish" : : : "memory"); + __asm__ volatile("isb" : : : "memory"); +#endif +} + +// Exception generation +static const u32 ExcEnc[][3] = { + {0, 0, 1}, // SVC + {0, 0, 2}, // HVC + {0, 0, 3}, // SMC + {1, 0, 0}, // BRK + {2, 0, 0}, // HLT + {5, 0, 1}, // DCPS1 + {5, 0, 2}, // DCPS2 + {5, 0, 3}, // DCPS3 +}; + +// Arithmetic generation +static const u32 ArithEnc[] = { + 0x058, // ADD + 0x258, // SUB +}; + +// Conditional Select +static const u32 CondSelectEnc[][2] = { + {0, 0}, // CSEL + {0, 1}, // CSINC + {1, 0}, // CSINV + {1, 1}, // CSNEG +}; + +// Data-Processing (1 source) +static const u32 Data1SrcEnc[][2] = { + {0, 0}, // RBIT + {0, 1}, // REV16 + {0, 2}, // REV32 + {0, 3}, // REV64 + {0, 4}, // CLZ + {0, 5}, // CLS +}; + +// Data-Processing (2 source) +static const u32 Data2SrcEnc[] = { + 0x02, // UDIV + 0x03, // SDIV + 0x08, // LSLV + 0x09, // LSRV + 0x0A, // ASRV + 0x0B, // RORV + 0x10, // CRC32B + 0x11, // CRC32H + 0x12, // CRC32W + 0x14, // CRC32CB + 0x15, // CRC32CH + 0x16, // CRC32CW + 0x13, // CRC32X (64bit Only) + 0x17, // XRC32CX (64bit Only) +}; + +// Data-Processing (3 source) +static const u32 Data3SrcEnc[][2] = { + {0, 0}, // MADD + {0, 1}, // MSUB + {1, 0}, // SMADDL (64Bit Only) + {1, 1}, // SMSUBL (64Bit Only) + {2, 0}, // SMULH (64Bit Only) + {5, 0}, // UMADDL (64Bit Only) + {5, 1}, // UMSUBL (64Bit Only) + {6, 0}, // UMULH (64Bit Only) +}; + +// Logical (shifted register) +static const u32 LogicalEnc[][2] = { + {0, 0}, // AND + {0, 1}, // BIC + {1, 0}, // OOR + {1, 1}, // ORN + {2, 0}, // EOR + {2, 1}, // EON + {3, 0}, // ANDS + {3, 1}, // BICS +}; + +// Load/Store Exclusive +static const u32 LoadStoreExcEnc[][5] = { + {0, 0, 0, 0, 0}, // STXRB + {0, 0, 0, 0, 1}, // STLXRB + {0, 0, 1, 0, 0}, // LDXRB + {0, 0, 1, 0, 1}, // LDAXRB + {0, 1, 0, 0, 1}, // STLRB + {0, 1, 1, 0, 1}, // LDARB + {1, 0, 0, 0, 0}, // STXRH + {1, 0, 0, 0, 1}, // STLXRH + {1, 0, 1, 0, 0}, // LDXRH + {1, 0, 1, 0, 1}, // LDAXRH + {1, 1, 0, 0, 1}, // STLRH + {1, 1, 1, 0, 1}, // LDARH + {2, 0, 0, 0, 0}, // STXR + {3, 0, 0, 0, 0}, // (64bit) STXR + {2, 0, 0, 0, 1}, // STLXR + {3, 0, 0, 0, 1}, // (64bit) STLXR + {2, 0, 0, 1, 0}, // STXP + {3, 0, 0, 1, 0}, // (64bit) STXP + {2, 0, 0, 1, 1}, // STLXP + {3, 0, 0, 1, 1}, // (64bit) STLXP + {2, 0, 1, 0, 0}, // LDXR + {3, 0, 1, 0, 0}, // (64bit) LDXR + {2, 0, 1, 0, 1}, // LDAXR + {3, 0, 1, 0, 1}, // (64bit) LDAXR + {2, 0, 1, 1, 0}, // LDXP + {3, 0, 1, 1, 0}, // (64bit) LDXP + {2, 0, 1, 1, 1}, // LDAXP + {3, 0, 1, 1, 1}, // (64bit) LDAXP + {2, 1, 0, 0, 1}, // STLR + {3, 1, 0, 0, 1}, // (64bit) STLR + {2, 1, 1, 0, 1}, // LDAR + {3, 1, 1, 0, 1}, // (64bit) LDAR +}; + +void ARM64XEmitter::EncodeCompareBranchInst(u32 op, ARM64Reg Rt, const void* ptr) +{ + bool b64Bit = Is64Bit(Rt); + s64 distance = (s64)ptr - (s64)(m_rxbase + m_code); + + ASSERT_MSG(DYNA_REC, !(distance & 0x3), "%s: distance must be a multiple of 4: %" PRIx64, + __func__, distance); + + distance >>= 2; + + ASSERT_MSG(DYNA_REC, distance >= -0x40000 && distance <= 0x3FFFF, + "%s: Received too large distance: %" PRIx64, __func__, distance); + + Rt = DecodeReg(Rt); + Write32((b64Bit << 31) | (0x34 << 24) | (op << 24) | (((u32)distance << 5) & 0xFFFFE0) | Rt); +} + +void ARM64XEmitter::EncodeTestBranchInst(u32 op, ARM64Reg Rt, u8 bits, const void* ptr) +{ + bool b64Bit = Is64Bit(Rt); + s64 distance = (s64)ptr - (s64)(m_rxbase + m_code); + + ASSERT_MSG(DYNA_REC, !(distance & 0x3), "%s: distance must be a multiple of 4: %" PRIx64, + __func__, distance); + + distance >>= 2; + + ASSERT_MSG(DYNA_REC, distance >= -0x3FFF && distance < 0x3FFF, + "%s: Received too large distance: %" PRIx64, __func__, distance); + + Rt = DecodeReg(Rt); + Write32((b64Bit << 31) | (0x36 << 24) | (op << 24) | (bits << 19) | + (((u32)distance << 5) & 0x7FFE0) | Rt); +} + +void ARM64XEmitter::EncodeUnconditionalBranchInst(u32 op, const void* ptr) +{ + s64 distance = (s64)ptr - s64(m_rxbase + m_code); + + ASSERT_MSG(DYNA_REC, !(distance & 0x3), "%s: distance must be a multiple of 4: %" PRIx64, + __func__, distance); + + distance >>= 2; + + ASSERT_MSG(DYNA_REC, distance >= -0x2000000LL && distance <= 0x1FFFFFFLL, + "%s: Received too large distance: %" PRIx64, __func__, distance); + + Write32((op << 31) | (0x5 << 26) | (distance & 0x3FFFFFF)); +} + +void ARM64XEmitter::EncodeUnconditionalBranchInst(u32 opc, u32 op2, u32 op3, u32 op4, ARM64Reg Rn) +{ + Rn = DecodeReg(Rn); + Write32((0x6B << 25) | (opc << 21) | (op2 << 16) | (op3 << 10) | (Rn << 5) | op4); +} + +void ARM64XEmitter::EncodeExceptionInst(u32 instenc, u32 imm) +{ + ASSERT_MSG(DYNA_REC, !(imm & ~0xFFFF), "%s: Exception instruction too large immediate: %d", + __func__, imm); + + Write32((0xD4 << 24) | (ExcEnc[instenc][0] << 21) | (imm << 5) | (ExcEnc[instenc][1] << 2) | + ExcEnc[instenc][2]); +} + +void ARM64XEmitter::EncodeSystemInst(u32 op0, u32 op1, u32 CRn, u32 CRm, u32 op2, ARM64Reg Rt) +{ + Write32((0x354 << 22) | (op0 << 19) | (op1 << 16) | (CRn << 12) | (CRm << 8) | (op2 << 5) | Rt); +} + +void ARM64XEmitter::EncodeArithmeticInst(u32 instenc, bool flags, ARM64Reg Rd, ARM64Reg Rn, + ARM64Reg Rm, ArithOption Option) +{ + bool b64Bit = Is64Bit(Rd); + + Rd = DecodeReg(Rd); + Rn = DecodeReg(Rn); + Rm = DecodeReg(Rm); + Write32((b64Bit << 31) | (flags << 29) | (ArithEnc[instenc] << 21) | + (Option.GetType() == ArithOption::TYPE_EXTENDEDREG ? (1 << 21) : 0) | (Rm << 16) | + Option.GetData() | (Rn << 5) | Rd); +} + +void ARM64XEmitter::EncodeArithmeticCarryInst(u32 op, bool flags, ARM64Reg Rd, ARM64Reg Rn, + ARM64Reg Rm) +{ + bool b64Bit = Is64Bit(Rd); + + Rd = DecodeReg(Rd); + Rm = DecodeReg(Rm); + Rn = DecodeReg(Rn); + Write32((b64Bit << 31) | (op << 30) | (flags << 29) | (0xD0 << 21) | (Rm << 16) | (Rn << 5) | Rd); +} + +void ARM64XEmitter::EncodeCondCompareImmInst(u32 op, ARM64Reg Rn, u32 imm, u32 nzcv, CCFlags cond) +{ + bool b64Bit = Is64Bit(Rn); + + ASSERT_MSG(DYNA_REC, !(imm & ~0x1F), "%s: too large immediate: %d", __func__, imm); + ASSERT_MSG(DYNA_REC, !(nzcv & ~0xF), "%s: Flags out of range: %d", __func__, nzcv); + + Rn = DecodeReg(Rn); + Write32((b64Bit << 31) | (op << 30) | (1 << 29) | (0xD2 << 21) | (imm << 16) | (cond << 12) | + (1 << 11) | (Rn << 5) | nzcv); +} + +void ARM64XEmitter::EncodeCondCompareRegInst(u32 op, ARM64Reg Rn, ARM64Reg Rm, u32 nzcv, + CCFlags cond) +{ + bool b64Bit = Is64Bit(Rm); + + ASSERT_MSG(DYNA_REC, !(nzcv & ~0xF), "%s: Flags out of range: %d", __func__, nzcv); + + Rm = DecodeReg(Rm); + Rn = DecodeReg(Rn); + Write32((b64Bit << 31) | (op << 30) | (1 << 29) | (0xD2 << 21) | (Rm << 16) | (cond << 12) | + (Rn << 5) | nzcv); +} + +void ARM64XEmitter::EncodeCondSelectInst(u32 instenc, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, + CCFlags cond) +{ + bool b64Bit = Is64Bit(Rd); + + Rd = DecodeReg(Rd); + Rm = DecodeReg(Rm); + Rn = DecodeReg(Rn); + Write32((b64Bit << 31) | (CondSelectEnc[instenc][0] << 30) | (0xD4 << 21) | (Rm << 16) | + (cond << 12) | (CondSelectEnc[instenc][1] << 10) | (Rn << 5) | Rd); +} + +void ARM64XEmitter::EncodeData1SrcInst(u32 instenc, ARM64Reg Rd, ARM64Reg Rn) +{ + bool b64Bit = Is64Bit(Rd); + + Rd = DecodeReg(Rd); + Rn = DecodeReg(Rn); + Write32((b64Bit << 31) | (0x2D6 << 21) | (Data1SrcEnc[instenc][0] << 16) | + (Data1SrcEnc[instenc][1] << 10) | (Rn << 5) | Rd); +} + +void ARM64XEmitter::EncodeData2SrcInst(u32 instenc, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) +{ + bool b64Bit = Is64Bit(Rd); + + Rd = DecodeReg(Rd); + Rm = DecodeReg(Rm); + Rn = DecodeReg(Rn); + Write32((b64Bit << 31) | (0x0D6 << 21) | (Rm << 16) | (Data2SrcEnc[instenc] << 10) | (Rn << 5) | + Rd); +} + +void ARM64XEmitter::EncodeData3SrcInst(u32 instenc, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, + ARM64Reg Ra) +{ + bool b64Bit = Is64Bit(Rd); + + Rd = DecodeReg(Rd); + Rm = DecodeReg(Rm); + Rn = DecodeReg(Rn); + Ra = DecodeReg(Ra); + Write32((b64Bit << 31) | (0xD8 << 21) | (Data3SrcEnc[instenc][0] << 21) | (Rm << 16) | + (Data3SrcEnc[instenc][1] << 15) | (Ra << 10) | (Rn << 5) | Rd); +} + +void ARM64XEmitter::EncodeLogicalInst(u32 instenc, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, + ArithOption Shift) +{ + bool b64Bit = Is64Bit(Rd); + + Rd = DecodeReg(Rd); + Rm = DecodeReg(Rm); + Rn = DecodeReg(Rn); + Write32((b64Bit << 31) | (LogicalEnc[instenc][0] << 29) | (0x5 << 25) | + (LogicalEnc[instenc][1] << 21) | Shift.GetData() | (Rm << 16) | (Rn << 5) | Rd); +} + +void ARM64XEmitter::EncodeLoadRegisterInst(u32 bitop, ARM64Reg Rt, u32 imm) +{ + bool b64Bit = Is64Bit(Rt); + bool bVec = IsVector(Rt); + + ASSERT_MSG(DYNA_REC, !(imm & 0xFFFFF), "%s: offset too large %d", __func__, imm); + + Rt = DecodeReg(Rt); + if (b64Bit && bitop != 0x2) // LDRSW(0x2) uses 64bit reg, doesn't have 64bit bit set + bitop |= 0x1; + Write32((bitop << 30) | (bVec << 26) | (0x18 << 24) | (imm << 5) | Rt); +} + +void ARM64XEmitter::EncodeLoadStoreExcInst(u32 instenc, ARM64Reg Rs, ARM64Reg Rt2, ARM64Reg Rn, + ARM64Reg Rt) +{ + Rs = DecodeReg(Rs); + Rt2 = DecodeReg(Rt2); + Rn = DecodeReg(Rn); + Rt = DecodeReg(Rt); + Write32((LoadStoreExcEnc[instenc][0] << 30) | (0x8 << 24) | (LoadStoreExcEnc[instenc][1] << 23) | + (LoadStoreExcEnc[instenc][2] << 22) | (LoadStoreExcEnc[instenc][3] << 21) | (Rs << 16) | + (LoadStoreExcEnc[instenc][4] << 15) | (Rt2 << 10) | (Rn << 5) | Rt); +} + +void ARM64XEmitter::EncodeLoadStorePairedInst(u32 op, ARM64Reg Rt, ARM64Reg Rt2, ARM64Reg Rn, + u32 imm) +{ + bool b64Bit = Is64Bit(Rt); + bool b128Bit = IsQuad(Rt); + bool bVec = IsVector(Rt); + + if (b128Bit) + imm >>= 4; + else if (b64Bit) + imm >>= 3; + else + imm >>= 2; + + ASSERT_MSG(DYNA_REC, !(imm & ~0xF), "%s: offset too large %d", __func__, imm); + + u32 opc = 0; + if (b128Bit) + opc = 2; + else if (b64Bit && bVec) + opc = 1; + else if (b64Bit && !bVec) + opc = 2; + + Rt = DecodeReg(Rt); + Rt2 = DecodeReg(Rt2); + Rn = DecodeReg(Rn); + Write32((opc << 30) | (bVec << 26) | (op << 22) | (imm << 15) | (Rt2 << 10) | (Rn << 5) | Rt); +} + +void ARM64XEmitter::EncodeLoadStoreIndexedInst(u32 op, u32 op2, ARM64Reg Rt, ARM64Reg Rn, s32 imm) +{ + bool b64Bit = Is64Bit(Rt); + bool bVec = IsVector(Rt); + + u32 offset = imm & 0x1FF; + + ASSERT_MSG(DYNA_REC, !(imm < -256 || imm > 255), "%s: offset too large %d", __func__, imm); + + Rt = DecodeReg(Rt); + Rn = DecodeReg(Rn); + Write32((b64Bit << 30) | (op << 22) | (bVec << 26) | (offset << 12) | (op2 << 10) | (Rn << 5) | + Rt); +} + +void ARM64XEmitter::EncodeLoadStoreIndexedInst(u32 op, ARM64Reg Rt, ARM64Reg Rn, s32 imm, u8 size) +{ + bool b64Bit = Is64Bit(Rt); + bool bVec = IsVector(Rt); + + if (size == 64) + imm >>= 3; + else if (size == 32) + imm >>= 2; + else if (size == 16) + imm >>= 1; + + ASSERT_MSG(DYNA_REC, imm >= 0, "%s(INDEX_UNSIGNED): offset must be positive %d", __func__, imm); + ASSERT_MSG(DYNA_REC, !(imm & ~0xFFF), "%s(INDEX_UNSIGNED): offset too large %d", __func__, imm); + + Rt = DecodeReg(Rt); + Rn = DecodeReg(Rn); + Write32((b64Bit << 30) | (op << 22) | (bVec << 26) | (imm << 10) | (Rn << 5) | Rt); +} + +void ARM64XEmitter::EncodeMOVWideInst(u32 op, ARM64Reg Rd, u32 imm, ShiftAmount pos) +{ + bool b64Bit = Is64Bit(Rd); + + ASSERT_MSG(DYNA_REC, !(imm & ~0xFFFF), "%s: immediate out of range: %d", __func__, imm); + + Rd = DecodeReg(Rd); + Write32((b64Bit << 31) | (op << 29) | (0x25 << 23) | (pos << 21) | (imm << 5) | Rd); +} + +void ARM64XEmitter::EncodeBitfieldMOVInst(u32 op, ARM64Reg Rd, ARM64Reg Rn, u32 immr, u32 imms) +{ + bool b64Bit = Is64Bit(Rd); + + Rd = DecodeReg(Rd); + Rn = DecodeReg(Rn); + Write32((b64Bit << 31) | (op << 29) | (0x26 << 23) | (b64Bit << 22) | (immr << 16) | + (imms << 10) | (Rn << 5) | Rd); +} + +void ARM64XEmitter::EncodeLoadStoreRegisterOffset(u32 size, u32 opc, ARM64Reg Rt, ARM64Reg Rn, + ArithOption Rm) +{ + Rt = DecodeReg(Rt); + Rn = DecodeReg(Rn); + ARM64Reg decoded_Rm = DecodeReg(Rm.GetReg()); + + Write32((size << 30) | (opc << 22) | (0x1C1 << 21) | (decoded_Rm << 16) | Rm.GetData() | + (1 << 11) | (Rn << 5) | Rt); +} + +void ARM64XEmitter::EncodeAddSubImmInst(u32 op, bool flags, u32 shift, u32 imm, ARM64Reg Rn, + ARM64Reg Rd) +{ + bool b64Bit = Is64Bit(Rd); + + ASSERT_MSG(DYNA_REC, !(imm & ~0xFFF), "%s: immediate too large: %x", __func__, imm); + + Rd = DecodeReg(Rd); + Rn = DecodeReg(Rn); + Write32((b64Bit << 31) | (op << 30) | (flags << 29) | (0x11 << 24) | (shift << 22) | (imm << 10) | + (Rn << 5) | Rd); +} + +void ARM64XEmitter::EncodeLogicalImmInst(u32 op, ARM64Reg Rd, ARM64Reg Rn, u32 immr, u32 imms, + int n) +{ + // Sometimes Rd is fixed to SP, but can still be 32bit or 64bit. + // Use Rn to determine bitness here. + bool b64Bit = Is64Bit(Rn); + + Rd = DecodeReg(Rd); + Rn = DecodeReg(Rn); + + Write32((b64Bit << 31) | (op << 29) | (0x24 << 23) | (n << 22) | (immr << 16) | (imms << 10) | + (Rn << 5) | Rd); +} + +void ARM64XEmitter::EncodeLoadStorePair(u32 op, u32 load, IndexType type, ARM64Reg Rt, ARM64Reg Rt2, + ARM64Reg Rn, s32 imm) +{ + bool b64Bit = Is64Bit(Rt); + u32 type_encode = 0; + + switch (type) + { + case INDEX_SIGNED: + type_encode = 0b010; + break; + case INDEX_POST: + type_encode = 0b001; + break; + case INDEX_PRE: + type_encode = 0b011; + break; + case INDEX_UNSIGNED: + ASSERT_MSG(DYNA_REC, false, "%s doesn't support INDEX_UNSIGNED!", __func__); + break; + } + + if (b64Bit) + { + op |= 0b10; + imm >>= 3; + } + else + { + imm >>= 2; + } + + Rt = DecodeReg(Rt); + Rt2 = DecodeReg(Rt2); + Rn = DecodeReg(Rn); + + Write32((op << 30) | (0b101 << 27) | (type_encode << 23) | (load << 22) | ((imm & 0x7F) << 15) | + (Rt2 << 10) | (Rn << 5) | Rt); +} +void ARM64XEmitter::EncodeAddressInst(u32 op, ARM64Reg Rd, s32 imm) +{ + Rd = DecodeReg(Rd); + + Write32((op << 31) | ((imm & 0x3) << 29) | (0x10 << 24) | ((imm & 0x1FFFFC) << 3) | Rd); +} + +void ARM64XEmitter::EncodeLoadStoreUnscaled(u32 size, u32 op, ARM64Reg Rt, ARM64Reg Rn, s32 imm) +{ + ASSERT_MSG(DYNA_REC, !(imm < -256 || imm > 255), "%s received too large offset: %d", __func__, + imm); + Rt = DecodeReg(Rt); + Rn = DecodeReg(Rn); + + Write32((size << 30) | (0b111 << 27) | (op << 22) | ((imm & 0x1FF) << 12) | (Rn << 5) | Rt); +} + +static constexpr bool IsInRangeImm19(s64 distance) +{ + return (distance >= -0x40000 && distance <= 0x3FFFF); +} + +static constexpr bool IsInRangeImm14(s64 distance) +{ + return (distance >= -0x2000 && distance <= 0x1FFF); +} + +static constexpr bool IsInRangeImm26(s64 distance) +{ + return (distance >= -0x2000000 && distance <= 0x1FFFFFF); +} + +static constexpr u32 MaskImm19(s64 distance) +{ + return distance & 0x7FFFF; +} + +static constexpr u32 MaskImm14(s64 distance) +{ + return distance & 0x3FFF; +} + +static constexpr u32 MaskImm26(s64 distance) +{ + return distance & 0x3FFFFFF; +} + +// FixupBranch branching +void ARM64XEmitter::SetJumpTarget(FixupBranch const& branch) +{ + bool Not = false; + u32 inst = 0; + s64 distance = (s64)(m_code - branch.ptr); + distance >>= 2; + + switch (branch.type) + { + case 1: // CBNZ + Not = true; + case 0: // CBZ + { + ASSERT_MSG(DYNA_REC, IsInRangeImm19(distance), "%s(%d): Received too large distance: %" PRIx64, + __func__, branch.type, distance); + bool b64Bit = Is64Bit(branch.reg); + ARM64Reg reg = DecodeReg(branch.reg); + inst = (b64Bit << 31) | (0x1A << 25) | (Not << 24) | (MaskImm19(distance) << 5) | reg; + } + break; + case 2: // B (conditional) + ASSERT_MSG(DYNA_REC, IsInRangeImm19(distance), "%s(%d): Received too large distance: %" PRIx64, + __func__, branch.type, distance); + inst = (0x2A << 25) | (MaskImm19(distance) << 5) | branch.cond; + break; + case 4: // TBNZ + Not = true; + case 3: // TBZ + { + ASSERT_MSG(DYNA_REC, IsInRangeImm14(distance), "%s(%d): Received too large distance: %" PRIx64, + __func__, branch.type, distance); + ARM64Reg reg = DecodeReg(branch.reg); + inst = ((branch.bit & 0x20) << 26) | (0x1B << 25) | (Not << 24) | ((branch.bit & 0x1F) << 19) | + (MaskImm14(distance) << 5) | reg; + } + break; + case 5: // B (uncoditional) + ASSERT_MSG(DYNA_REC, IsInRangeImm26(distance), "%s(%d): Received too large distance: %" PRIx64, + __func__, branch.type, distance); + inst = (0x5 << 26) | MaskImm26(distance); + break; + case 6: // BL (unconditional) + ASSERT_MSG(DYNA_REC, IsInRangeImm26(distance), "%s(%d): Received too large distance: %" PRIx64, + __func__, branch.type, distance); + inst = (0x25 << 26) | MaskImm26(distance); + break; + } + + std::memcpy(m_rwbase + branch.ptr, &inst, sizeof(inst)); +} + +FixupBranch ARM64XEmitter::CBZ(ARM64Reg Rt) +{ + FixupBranch branch; + branch.ptr = m_code; + branch.type = 0; + branch.reg = Rt; + HINT(HINT_NOP); + return branch; +} +FixupBranch ARM64XEmitter::CBNZ(ARM64Reg Rt) +{ + FixupBranch branch; + branch.ptr = m_code; + branch.type = 1; + branch.reg = Rt; + HINT(HINT_NOP); + return branch; +} +FixupBranch ARM64XEmitter::B(CCFlags cond) +{ + FixupBranch branch; + branch.ptr = m_code; + branch.type = 2; + branch.cond = cond; + HINT(HINT_NOP); + return branch; +} +FixupBranch ARM64XEmitter::TBZ(ARM64Reg Rt, u8 bit) +{ + FixupBranch branch; + branch.ptr = m_code; + branch.type = 3; + branch.reg = Rt; + branch.bit = bit; + HINT(HINT_NOP); + return branch; +} +FixupBranch ARM64XEmitter::TBNZ(ARM64Reg Rt, u8 bit) +{ + FixupBranch branch; + branch.ptr = m_code; + branch.type = 4; + branch.reg = Rt; + branch.bit = bit; + HINT(HINT_NOP); + return branch; +} +FixupBranch ARM64XEmitter::B() +{ + FixupBranch branch; + branch.ptr = m_code; + branch.type = 5; + HINT(HINT_NOP); + return branch; +} +FixupBranch ARM64XEmitter::BL() +{ + FixupBranch branch; + branch.ptr = m_code; + branch.type = 6; + HINT(HINT_NOP); + return branch; +} + +// Compare and Branch +void ARM64XEmitter::CBZ(ARM64Reg Rt, const void* ptr) +{ + EncodeCompareBranchInst(0, Rt, ptr); +} +void ARM64XEmitter::CBNZ(ARM64Reg Rt, const void* ptr) +{ + EncodeCompareBranchInst(1, Rt, ptr); +} + +// Conditional Branch +void ARM64XEmitter::B(CCFlags cond, const void* ptr) +{ + s64 distance = (s64)ptr - (s64)(m_rxbase + m_code); + + distance >>= 2; + + ASSERT_MSG(DYNA_REC, IsInRangeImm19(distance), + "%s: Received too large distance: %p->%p %" PRIi64 " %" PRIx64, __func__, m_execcode, ptr, + distance, distance); + Write32((0x54 << 24) | (MaskImm19(distance) << 5) | cond); +} + +// Test and Branch +void ARM64XEmitter::TBZ(ARM64Reg Rt, u8 bits, const void* ptr) +{ + EncodeTestBranchInst(0, Rt, bits, ptr); +} +void ARM64XEmitter::TBNZ(ARM64Reg Rt, u8 bits, const void* ptr) +{ + EncodeTestBranchInst(1, Rt, bits, ptr); +} + +// Unconditional Branch +void ARM64XEmitter::B(const void* ptr) +{ + EncodeUnconditionalBranchInst(0, ptr); +} +void ARM64XEmitter::BL(const void* ptr) +{ + EncodeUnconditionalBranchInst(1, ptr); +} + +void ARM64XEmitter::QuickCallFunction(ARM64Reg scratchreg, const void* func) +{ + s64 distance = (s64)func - (s64)(m_rxbase + m_code); + distance >>= 2; // Can only branch to opcode-aligned (4) addresses + if (!IsInRangeImm26(distance)) + { + // WARN_LOG(DYNA_REC, "Distance too far in function call (%p to %p)! Using scratch.", m_code, + // func); + MOVI2R(scratchreg, (uintptr_t)func); + BLR(scratchreg); + } + else + { + BL(func); + } +} + +void ARM64XEmitter::QuickTailCall(ARM64Reg scratchreg, const void* func) +{ + s64 distance = (s64)func - (s64)(m_rxbase + m_code); + distance >>= 2; // Can only branch to opcode-aligned (4) addresses + if (!IsInRangeImm26(distance)) + { + // WARN_LOG(DYNA_REC, "Distance too far in function call (%p to %p)! Using scratch.", m_code, + // func); + MOVI2R(scratchreg, (uintptr_t)func); + BR(scratchreg); + } + else + { + B(func); + } +} + +// Unconditional Branch (register) +void ARM64XEmitter::BR(ARM64Reg Rn) +{ + EncodeUnconditionalBranchInst(0, 0x1F, 0, 0, Rn); +} +void ARM64XEmitter::BLR(ARM64Reg Rn) +{ + EncodeUnconditionalBranchInst(1, 0x1F, 0, 0, Rn); +} +void ARM64XEmitter::RET(ARM64Reg Rn) +{ + EncodeUnconditionalBranchInst(2, 0x1F, 0, 0, Rn); +} +void ARM64XEmitter::ERET() +{ + EncodeUnconditionalBranchInst(4, 0x1F, 0, 0, SP); +} +void ARM64XEmitter::DRPS() +{ + EncodeUnconditionalBranchInst(5, 0x1F, 0, 0, SP); +} + +// Exception generation +void ARM64XEmitter::SVC(u32 imm) +{ + EncodeExceptionInst(0, imm); +} + +void ARM64XEmitter::HVC(u32 imm) +{ + EncodeExceptionInst(1, imm); +} + +void ARM64XEmitter::SMC(u32 imm) +{ + EncodeExceptionInst(2, imm); +} + +void ARM64XEmitter::BRK(u32 imm) +{ + EncodeExceptionInst(3, imm); +} + +void ARM64XEmitter::HLT(u32 imm) +{ + EncodeExceptionInst(4, imm); +} + +void ARM64XEmitter::DCPS1(u32 imm) +{ + EncodeExceptionInst(5, imm); +} + +void ARM64XEmitter::DCPS2(u32 imm) +{ + EncodeExceptionInst(6, imm); +} + +void ARM64XEmitter::DCPS3(u32 imm) +{ + EncodeExceptionInst(7, imm); +} + +// System +void ARM64XEmitter::_MSR(PStateField field, u8 imm) +{ + u32 op1 = 0, op2 = 0; + switch (field) + { + case FIELD_SPSel: + op1 = 0; + op2 = 5; + break; + case FIELD_DAIFSet: + op1 = 3; + op2 = 6; + break; + case FIELD_DAIFClr: + op1 = 3; + op2 = 7; + break; + default: + ASSERT_MSG(DYNA_REC, false, "Invalid PStateField to do a imm move to"); + break; + } + EncodeSystemInst(0, op1, 4, imm, op2, WSP); +} + +static void GetSystemReg(PStateField field, int& o0, int& op1, int& CRn, int& CRm, int& op2) +{ + switch (field) + { + case FIELD_NZCV: + o0 = 3; + op1 = 3; + CRn = 4; + CRm = 2; + op2 = 0; + break; + case FIELD_FPCR: + o0 = 3; + op1 = 3; + CRn = 4; + CRm = 4; + op2 = 0; + break; + case FIELD_FPSR: + o0 = 3; + op1 = 3; + CRn = 4; + CRm = 4; + op2 = 1; + break; + case FIELD_PMCR_EL0: + o0 = 3; + op1 = 3; + CRn = 9; + CRm = 6; + op2 = 0; + break; + case FIELD_PMCCNTR_EL0: + o0 = 3; + op1 = 3; + CRn = 9; + CRm = 7; + op2 = 0; + break; + default: + ASSERT_MSG(DYNA_REC, false, "Invalid PStateField to do a register move from/to"); + break; + } +} + +void ARM64XEmitter::_MSR(PStateField field, ARM64Reg Rt) +{ + int o0 = 0, op1 = 0, CRn = 0, CRm = 0, op2 = 0; + ASSERT_MSG(DYNA_REC, Is64Bit(Rt), "MSR: Rt must be 64-bit"); + GetSystemReg(field, o0, op1, CRn, CRm, op2); + EncodeSystemInst(o0, op1, CRn, CRm, op2, DecodeReg(Rt)); +} + +void ARM64XEmitter::MRS(ARM64Reg Rt, PStateField field) +{ + int o0 = 0, op1 = 0, CRn = 0, CRm = 0, op2 = 0; + ASSERT_MSG(DYNA_REC, Is64Bit(Rt), "MRS: Rt must be 64-bit"); + GetSystemReg(field, o0, op1, CRn, CRm, op2); + EncodeSystemInst(o0 | 4, op1, CRn, CRm, op2, DecodeReg(Rt)); +} + +void ARM64XEmitter::CNTVCT(Arm64Gen::ARM64Reg Rt) +{ + ASSERT_MSG(DYNA_REC, Is64Bit(Rt), "CNTVCT: Rt must be 64-bit"); + + // MRS , CNTVCT_EL0 ; Read CNTVCT_EL0 into Xt + EncodeSystemInst(3 | 4, 3, 0xe, 0, 2, DecodeReg(Rt)); +} + +void ARM64XEmitter::HINT(SystemHint op) +{ + EncodeSystemInst(0, 3, 2, 0, op, WSP); +} +void ARM64XEmitter::CLREX() +{ + EncodeSystemInst(0, 3, 3, 0, 2, WSP); +} +void ARM64XEmitter::DSB(BarrierType type) +{ + EncodeSystemInst(0, 3, 3, type, 4, WSP); +} +void ARM64XEmitter::DMB(BarrierType type) +{ + EncodeSystemInst(0, 3, 3, type, 5, WSP); +} +void ARM64XEmitter::ISB(BarrierType type) +{ + EncodeSystemInst(0, 3, 3, type, 6, WSP); +} + +// Add/Subtract (extended register) +void ARM64XEmitter::ADD(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) +{ + ADD(Rd, Rn, Rm, ArithOption(Rd, ST_LSL, 0)); +} + +void ARM64XEmitter::ADD(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ArithOption Option) +{ + EncodeArithmeticInst(0, false, Rd, Rn, Rm, Option); +} + +void ARM64XEmitter::ADDS(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) +{ + EncodeArithmeticInst(0, true, Rd, Rn, Rm, ArithOption(Rd, ST_LSL, 0)); +} + +void ARM64XEmitter::ADDS(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ArithOption Option) +{ + EncodeArithmeticInst(0, true, Rd, Rn, Rm, Option); +} + +void ARM64XEmitter::SUB(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) +{ + SUB(Rd, Rn, Rm, ArithOption(Rd, ST_LSL, 0)); +} + +void ARM64XEmitter::SUB(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ArithOption Option) +{ + EncodeArithmeticInst(1, false, Rd, Rn, Rm, Option); +} + +void ARM64XEmitter::SUBS(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) +{ + EncodeArithmeticInst(1, true, Rd, Rn, Rm, ArithOption(Rd, ST_LSL, 0)); +} + +void ARM64XEmitter::SUBS(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ArithOption Option) +{ + EncodeArithmeticInst(1, true, Rd, Rn, Rm, Option); +} + +void ARM64XEmitter::CMN(ARM64Reg Rn, ARM64Reg Rm) +{ + CMN(Rn, Rm, ArithOption(Rn, ST_LSL, 0)); +} + +void ARM64XEmitter::CMN(ARM64Reg Rn, ARM64Reg Rm, ArithOption Option) +{ + EncodeArithmeticInst(0, true, Is64Bit(Rn) ? ZR : WZR, Rn, Rm, Option); +} + +void ARM64XEmitter::CMP(ARM64Reg Rn, ARM64Reg Rm) +{ + CMP(Rn, Rm, ArithOption(Rn, ST_LSL, 0)); +} + +void ARM64XEmitter::CMP(ARM64Reg Rn, ARM64Reg Rm, ArithOption Option) +{ + EncodeArithmeticInst(1, true, Is64Bit(Rn) ? ZR : WZR, Rn, Rm, Option); +} + +// Add/Subtract (with carry) +void ARM64XEmitter::ADC(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) +{ + EncodeArithmeticCarryInst(0, false, Rd, Rn, Rm); +} +void ARM64XEmitter::ADCS(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) +{ + EncodeArithmeticCarryInst(0, true, Rd, Rn, Rm); +} +void ARM64XEmitter::SBC(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) +{ + EncodeArithmeticCarryInst(1, false, Rd, Rn, Rm); +} +void ARM64XEmitter::SBCS(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) +{ + EncodeArithmeticCarryInst(1, true, Rd, Rn, Rm); +} + +// Conditional Compare (immediate) +void ARM64XEmitter::CCMN(ARM64Reg Rn, u32 imm, u32 nzcv, CCFlags cond) +{ + EncodeCondCompareImmInst(0, Rn, imm, nzcv, cond); +} +void ARM64XEmitter::CCMP(ARM64Reg Rn, u32 imm, u32 nzcv, CCFlags cond) +{ + EncodeCondCompareImmInst(1, Rn, imm, nzcv, cond); +} + +// Conditiona Compare (register) +void ARM64XEmitter::CCMN(ARM64Reg Rn, ARM64Reg Rm, u32 nzcv, CCFlags cond) +{ + EncodeCondCompareRegInst(0, Rn, Rm, nzcv, cond); +} +void ARM64XEmitter::CCMP(ARM64Reg Rn, ARM64Reg Rm, u32 nzcv, CCFlags cond) +{ + EncodeCondCompareRegInst(1, Rn, Rm, nzcv, cond); +} + +// Conditional Select +void ARM64XEmitter::CSEL(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, CCFlags cond) +{ + EncodeCondSelectInst(0, Rd, Rn, Rm, cond); +} +void ARM64XEmitter::CSINC(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, CCFlags cond) +{ + EncodeCondSelectInst(1, Rd, Rn, Rm, cond); +} +void ARM64XEmitter::CSINV(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, CCFlags cond) +{ + EncodeCondSelectInst(2, Rd, Rn, Rm, cond); +} +void ARM64XEmitter::CSNEG(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, CCFlags cond) +{ + EncodeCondSelectInst(3, Rd, Rn, Rm, cond); +} + +// Data-Processing 1 source +void ARM64XEmitter::RBIT(ARM64Reg Rd, ARM64Reg Rn) +{ + EncodeData1SrcInst(0, Rd, Rn); +} +void ARM64XEmitter::REV16(ARM64Reg Rd, ARM64Reg Rn) +{ + EncodeData1SrcInst(1, Rd, Rn); +} +void ARM64XEmitter::REV32(ARM64Reg Rd, ARM64Reg Rn) +{ + EncodeData1SrcInst(2, Rd, Rn); +} +void ARM64XEmitter::REV64(ARM64Reg Rd, ARM64Reg Rn) +{ + EncodeData1SrcInst(3, Rd, Rn); +} +void ARM64XEmitter::CLZ(ARM64Reg Rd, ARM64Reg Rn) +{ + EncodeData1SrcInst(4, Rd, Rn); +} +void ARM64XEmitter::CLS(ARM64Reg Rd, ARM64Reg Rn) +{ + EncodeData1SrcInst(5, Rd, Rn); +} + +// Data-Processing 2 source +void ARM64XEmitter::UDIV(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) +{ + EncodeData2SrcInst(0, Rd, Rn, Rm); +} +void ARM64XEmitter::SDIV(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) +{ + EncodeData2SrcInst(1, Rd, Rn, Rm); +} +void ARM64XEmitter::LSLV(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) +{ + EncodeData2SrcInst(2, Rd, Rn, Rm); +} +void ARM64XEmitter::LSRV(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) +{ + EncodeData2SrcInst(3, Rd, Rn, Rm); +} +void ARM64XEmitter::ASRV(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) +{ + EncodeData2SrcInst(4, Rd, Rn, Rm); +} +void ARM64XEmitter::RORV(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) +{ + EncodeData2SrcInst(5, Rd, Rn, Rm); +} +void ARM64XEmitter::CRC32B(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) +{ + EncodeData2SrcInst(6, Rd, Rn, Rm); +} +void ARM64XEmitter::CRC32H(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) +{ + EncodeData2SrcInst(7, Rd, Rn, Rm); +} +void ARM64XEmitter::CRC32W(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) +{ + EncodeData2SrcInst(8, Rd, Rn, Rm); +} +void ARM64XEmitter::CRC32CB(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) +{ + EncodeData2SrcInst(9, Rd, Rn, Rm); +} +void ARM64XEmitter::CRC32CH(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) +{ + EncodeData2SrcInst(10, Rd, Rn, Rm); +} +void ARM64XEmitter::CRC32CW(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) +{ + EncodeData2SrcInst(11, Rd, Rn, Rm); +} +void ARM64XEmitter::CRC32X(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) +{ + EncodeData2SrcInst(12, Rd, Rn, Rm); +} +void ARM64XEmitter::CRC32CX(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) +{ + EncodeData2SrcInst(13, Rd, Rn, Rm); +} + +// Data-Processing 3 source +void ARM64XEmitter::MADD(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ARM64Reg Ra) +{ + EncodeData3SrcInst(0, Rd, Rn, Rm, Ra); +} +void ARM64XEmitter::MSUB(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ARM64Reg Ra) +{ + EncodeData3SrcInst(1, Rd, Rn, Rm, Ra); +} +void ARM64XEmitter::SMADDL(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ARM64Reg Ra) +{ + EncodeData3SrcInst(2, Rd, Rn, Rm, Ra); +} +void ARM64XEmitter::SMULL(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) +{ + SMADDL(Rd, Rn, Rm, SP); +} +void ARM64XEmitter::SMSUBL(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ARM64Reg Ra) +{ + EncodeData3SrcInst(3, Rd, Rn, Rm, Ra); +} +void ARM64XEmitter::SMULH(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) +{ + EncodeData3SrcInst(4, Rd, Rn, Rm, SP); +} +void ARM64XEmitter::UMADDL(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ARM64Reg Ra) +{ + EncodeData3SrcInst(5, Rd, Rn, Rm, Ra); +} +void ARM64XEmitter::UMULL(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) +{ + UMADDL(Rd, Rn, Rm, SP); +} +void ARM64XEmitter::UMSUBL(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ARM64Reg Ra) +{ + EncodeData3SrcInst(6, Rd, Rn, Rm, Ra); +} +void ARM64XEmitter::UMULH(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) +{ + EncodeData3SrcInst(7, Rd, Rn, Rm, SP); +} +void ARM64XEmitter::MUL(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) +{ + EncodeData3SrcInst(0, Rd, Rn, Rm, SP); +} +void ARM64XEmitter::MNEG(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) +{ + EncodeData3SrcInst(1, Rd, Rn, Rm, SP); +} + +// Logical (shifted register) +void ARM64XEmitter::AND(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ArithOption Shift) +{ + EncodeLogicalInst(0, Rd, Rn, Rm, Shift); +} +void ARM64XEmitter::BIC(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ArithOption Shift) +{ + EncodeLogicalInst(1, Rd, Rn, Rm, Shift); +} +void ARM64XEmitter::ORR(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ArithOption Shift) +{ + EncodeLogicalInst(2, Rd, Rn, Rm, Shift); +} +void ARM64XEmitter::ORN(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ArithOption Shift) +{ + EncodeLogicalInst(3, Rd, Rn, Rm, Shift); +} +void ARM64XEmitter::EOR(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ArithOption Shift) +{ + EncodeLogicalInst(4, Rd, Rn, Rm, Shift); +} +void ARM64XEmitter::EON(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ArithOption Shift) +{ + EncodeLogicalInst(5, Rd, Rn, Rm, Shift); +} +void ARM64XEmitter::ANDS(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ArithOption Shift) +{ + EncodeLogicalInst(6, Rd, Rn, Rm, Shift); +} +void ARM64XEmitter::BICS(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ArithOption Shift) +{ + EncodeLogicalInst(7, Rd, Rn, Rm, Shift); +} + +void ARM64XEmitter::MOV(ARM64Reg Rd, ARM64Reg Rm, ArithOption Shift) +{ + ORR(Rd, Is64Bit(Rd) ? ZR : WZR, Rm, Shift); +} + +void ARM64XEmitter::MOV(ARM64Reg Rd, ARM64Reg Rm) +{ + if (IsGPR(Rd) && IsGPR(Rm)) + ORR(Rd, Is64Bit(Rd) ? ZR : WZR, Rm, ArithOption(Rm, ST_LSL, 0)); + else + ASSERT_MSG(DYNA_REC, false, "Non-GPRs not supported in MOV"); +} +void ARM64XEmitter::MVN(ARM64Reg Rd, ARM64Reg Rm) +{ + ORN(Rd, Is64Bit(Rd) ? ZR : WZR, Rm, ArithOption(Rm, ST_LSL, 0)); +} +void ARM64XEmitter::LSL(ARM64Reg Rd, ARM64Reg Rm, int shift) +{ + int bits = Is64Bit(Rd) ? 64 : 32; + UBFM(Rd, Rm, (bits - shift) & (bits - 1), bits - shift - 1); +} +void ARM64XEmitter::LSR(ARM64Reg Rd, ARM64Reg Rm, int shift) +{ + int bits = Is64Bit(Rd) ? 64 : 32; + UBFM(Rd, Rm, shift, bits - 1); +} +void ARM64XEmitter::ASR(ARM64Reg Rd, ARM64Reg Rm, int shift) +{ + int bits = Is64Bit(Rd) ? 64 : 32; + SBFM(Rd, Rm, shift, bits - 1); +} +void ARM64XEmitter::ROR_(ARM64Reg Rd, ARM64Reg Rm, int shift) +{ + EXTR(Rd, Rm, Rm, shift); +} + +// Logical (immediate) +void ARM64XEmitter::AND(ARM64Reg Rd, ARM64Reg Rn, u32 immr, u32 imms, bool invert) +{ + EncodeLogicalImmInst(0, Rd, Rn, immr, imms, invert); +} +void ARM64XEmitter::ANDS(ARM64Reg Rd, ARM64Reg Rn, u32 immr, u32 imms, bool invert) +{ + EncodeLogicalImmInst(3, Rd, Rn, immr, imms, invert); +} +void ARM64XEmitter::EOR(ARM64Reg Rd, ARM64Reg Rn, u32 immr, u32 imms, bool invert) +{ + EncodeLogicalImmInst(2, Rd, Rn, immr, imms, invert); +} +void ARM64XEmitter::ORR(ARM64Reg Rd, ARM64Reg Rn, u32 immr, u32 imms, bool invert) +{ + EncodeLogicalImmInst(1, Rd, Rn, immr, imms, invert); +} +void ARM64XEmitter::TST(ARM64Reg Rn, u32 immr, u32 imms, bool invert) +{ + EncodeLogicalImmInst(3, Is64Bit(Rn) ? ZR : WZR, Rn, immr, imms, invert); +} + +// Add/subtract (immediate) +void ARM64XEmitter::ADD(ARM64Reg Rd, ARM64Reg Rn, u32 imm, bool shift) +{ + EncodeAddSubImmInst(0, false, shift, imm, Rn, Rd); +} +void ARM64XEmitter::ADDS(ARM64Reg Rd, ARM64Reg Rn, u32 imm, bool shift) +{ + EncodeAddSubImmInst(0, true, shift, imm, Rn, Rd); +} +void ARM64XEmitter::SUB(ARM64Reg Rd, ARM64Reg Rn, u32 imm, bool shift) +{ + EncodeAddSubImmInst(1, false, shift, imm, Rn, Rd); +} +void ARM64XEmitter::SUBS(ARM64Reg Rd, ARM64Reg Rn, u32 imm, bool shift) +{ + EncodeAddSubImmInst(1, true, shift, imm, Rn, Rd); +} +void ARM64XEmitter::CMP(ARM64Reg Rn, u32 imm, bool shift) +{ + EncodeAddSubImmInst(1, true, shift, imm, Rn, Is64Bit(Rn) ? SP : WSP); +} + +// Data Processing (Immediate) +void ARM64XEmitter::MOVZ(ARM64Reg Rd, u32 imm, ShiftAmount pos) +{ + EncodeMOVWideInst(2, Rd, imm, pos); +} +void ARM64XEmitter::MOVN(ARM64Reg Rd, u32 imm, ShiftAmount pos) +{ + EncodeMOVWideInst(0, Rd, imm, pos); +} +void ARM64XEmitter::MOVK(ARM64Reg Rd, u32 imm, ShiftAmount pos) +{ + EncodeMOVWideInst(3, Rd, imm, pos); +} + +// Bitfield move +void ARM64XEmitter::BFM(ARM64Reg Rd, ARM64Reg Rn, u32 immr, u32 imms) +{ + EncodeBitfieldMOVInst(1, Rd, Rn, immr, imms); +} +void ARM64XEmitter::SBFM(ARM64Reg Rd, ARM64Reg Rn, u32 immr, u32 imms) +{ + EncodeBitfieldMOVInst(0, Rd, Rn, immr, imms); +} +void ARM64XEmitter::UBFM(ARM64Reg Rd, ARM64Reg Rn, u32 immr, u32 imms) +{ + EncodeBitfieldMOVInst(2, Rd, Rn, immr, imms); +} + +void ARM64XEmitter::BFI(ARM64Reg Rd, ARM64Reg Rn, u32 lsb, u32 width) +{ + u32 size = Is64Bit(Rn) ? 64 : 32; + ASSERT_MSG(DYNA_REC, (lsb + width) <= size, + "%s passed lsb %d and width %d which is greater than the register size!", __func__, + lsb, width); + EncodeBitfieldMOVInst(1, Rd, Rn, (size - lsb) % size, width - 1); +} +void ARM64XEmitter::UBFIZ(ARM64Reg Rd, ARM64Reg Rn, u32 lsb, u32 width) +{ + u32 size = Is64Bit(Rn) ? 64 : 32; + ASSERT_MSG(DYNA_REC, (lsb + width) <= size, + "%s passed lsb %d and width %d which is greater than the register size!", __func__, + lsb, width); + EncodeBitfieldMOVInst(2, Rd, Rn, (size - lsb) % size, width - 1); +} +void ARM64XEmitter::EXTR(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, u32 shift) +{ + bool sf = Is64Bit(Rd); + bool N = sf; + Rd = DecodeReg(Rd); + Rn = DecodeReg(Rn); + Rm = DecodeReg(Rm); + Write32((sf << 31) | (0x27 << 23) | (N << 22) | (Rm << 16) | (shift << 10) | (Rm << 5) | Rd); +} +void ARM64XEmitter::SXTB(ARM64Reg Rd, ARM64Reg Rn) +{ + SBFM(Rd, Rn, 0, 7); +} +void ARM64XEmitter::SXTH(ARM64Reg Rd, ARM64Reg Rn) +{ + SBFM(Rd, Rn, 0, 15); +} +void ARM64XEmitter::SXTW(ARM64Reg Rd, ARM64Reg Rn) +{ + ASSERT_MSG(DYNA_REC, Is64Bit(Rd), "%s requires 64bit register as destination", __func__); + SBFM(Rd, Rn, 0, 31); +} +void ARM64XEmitter::UXTB(ARM64Reg Rd, ARM64Reg Rn) +{ + UBFM(Rd, Rn, 0, 7); +} +void ARM64XEmitter::UXTH(ARM64Reg Rd, ARM64Reg Rn) +{ + UBFM(Rd, Rn, 0, 15); +} + +// Load Register (Literal) +void ARM64XEmitter::LDR(ARM64Reg Rt, u32 imm) +{ + EncodeLoadRegisterInst(0, Rt, imm); +} +void ARM64XEmitter::LDRSW(ARM64Reg Rt, u32 imm) +{ + EncodeLoadRegisterInst(2, Rt, imm); +} +void ARM64XEmitter::PRFM(ARM64Reg Rt, u32 imm) +{ + EncodeLoadRegisterInst(3, Rt, imm); +} + +// Load/Store pair +void ARM64XEmitter::LDP(IndexType type, ARM64Reg Rt, ARM64Reg Rt2, ARM64Reg Rn, s32 imm) +{ + EncodeLoadStorePair(0, 1, type, Rt, Rt2, Rn, imm); +} +void ARM64XEmitter::LDPSW(IndexType type, ARM64Reg Rt, ARM64Reg Rt2, ARM64Reg Rn, s32 imm) +{ + EncodeLoadStorePair(1, 1, type, Rt, Rt2, Rn, imm); +} +void ARM64XEmitter::STP(IndexType type, ARM64Reg Rt, ARM64Reg Rt2, ARM64Reg Rn, s32 imm) +{ + EncodeLoadStorePair(0, 0, type, Rt, Rt2, Rn, imm); +} + +// Load/Store Exclusive +void ARM64XEmitter::STXRB(ARM64Reg Rs, ARM64Reg Rt, ARM64Reg Rn) +{ + EncodeLoadStoreExcInst(0, Rs, SP, Rt, Rn); +} +void ARM64XEmitter::STLXRB(ARM64Reg Rs, ARM64Reg Rt, ARM64Reg Rn) +{ + EncodeLoadStoreExcInst(1, Rs, SP, Rt, Rn); +} +void ARM64XEmitter::LDXRB(ARM64Reg Rt, ARM64Reg Rn) +{ + EncodeLoadStoreExcInst(2, SP, SP, Rt, Rn); +} +void ARM64XEmitter::LDAXRB(ARM64Reg Rt, ARM64Reg Rn) +{ + EncodeLoadStoreExcInst(3, SP, SP, Rt, Rn); +} +void ARM64XEmitter::STLRB(ARM64Reg Rt, ARM64Reg Rn) +{ + EncodeLoadStoreExcInst(4, SP, SP, Rt, Rn); +} +void ARM64XEmitter::LDARB(ARM64Reg Rt, ARM64Reg Rn) +{ + EncodeLoadStoreExcInst(5, SP, SP, Rt, Rn); +} +void ARM64XEmitter::STXRH(ARM64Reg Rs, ARM64Reg Rt, ARM64Reg Rn) +{ + EncodeLoadStoreExcInst(6, Rs, SP, Rt, Rn); +} +void ARM64XEmitter::STLXRH(ARM64Reg Rs, ARM64Reg Rt, ARM64Reg Rn) +{ + EncodeLoadStoreExcInst(7, Rs, SP, Rt, Rn); +} +void ARM64XEmitter::LDXRH(ARM64Reg Rt, ARM64Reg Rn) +{ + EncodeLoadStoreExcInst(8, SP, SP, Rt, Rn); +} +void ARM64XEmitter::LDAXRH(ARM64Reg Rt, ARM64Reg Rn) +{ + EncodeLoadStoreExcInst(9, SP, SP, Rt, Rn); +} +void ARM64XEmitter::STLRH(ARM64Reg Rt, ARM64Reg Rn) +{ + EncodeLoadStoreExcInst(10, SP, SP, Rt, Rn); +} +void ARM64XEmitter::LDARH(ARM64Reg Rt, ARM64Reg Rn) +{ + EncodeLoadStoreExcInst(11, SP, SP, Rt, Rn); +} +void ARM64XEmitter::STXR(ARM64Reg Rs, ARM64Reg Rt, ARM64Reg Rn) +{ + EncodeLoadStoreExcInst(12 + Is64Bit(Rt), Rs, SP, Rt, Rn); +} +void ARM64XEmitter::STLXR(ARM64Reg Rs, ARM64Reg Rt, ARM64Reg Rn) +{ + EncodeLoadStoreExcInst(14 + Is64Bit(Rt), Rs, SP, Rt, Rn); +} +void ARM64XEmitter::STXP(ARM64Reg Rs, ARM64Reg Rt, ARM64Reg Rt2, ARM64Reg Rn) +{ + EncodeLoadStoreExcInst(16 + Is64Bit(Rt), Rs, Rt2, Rt, Rn); +} +void ARM64XEmitter::STLXP(ARM64Reg Rs, ARM64Reg Rt, ARM64Reg Rt2, ARM64Reg Rn) +{ + EncodeLoadStoreExcInst(18 + Is64Bit(Rt), Rs, Rt2, Rt, Rn); +} +void ARM64XEmitter::LDXR(ARM64Reg Rt, ARM64Reg Rn) +{ + EncodeLoadStoreExcInst(20 + Is64Bit(Rt), SP, SP, Rt, Rn); +} +void ARM64XEmitter::LDAXR(ARM64Reg Rt, ARM64Reg Rn) +{ + EncodeLoadStoreExcInst(22 + Is64Bit(Rt), SP, SP, Rt, Rn); +} +void ARM64XEmitter::LDXP(ARM64Reg Rt, ARM64Reg Rt2, ARM64Reg Rn) +{ + EncodeLoadStoreExcInst(24 + Is64Bit(Rt), SP, Rt2, Rt, Rn); +} +void ARM64XEmitter::LDAXP(ARM64Reg Rt, ARM64Reg Rt2, ARM64Reg Rn) +{ + EncodeLoadStoreExcInst(26 + Is64Bit(Rt), SP, Rt2, Rt, Rn); +} +void ARM64XEmitter::STLR(ARM64Reg Rt, ARM64Reg Rn) +{ + EncodeLoadStoreExcInst(28 + Is64Bit(Rt), SP, SP, Rt, Rn); +} +void ARM64XEmitter::LDAR(ARM64Reg Rt, ARM64Reg Rn) +{ + EncodeLoadStoreExcInst(30 + Is64Bit(Rt), SP, SP, Rt, Rn); +} + +// Load/Store no-allocate pair (offset) +void ARM64XEmitter::STNP(ARM64Reg Rt, ARM64Reg Rt2, ARM64Reg Rn, u32 imm) +{ + EncodeLoadStorePairedInst(0xA0, Rt, Rt2, Rn, imm); +} +void ARM64XEmitter::LDNP(ARM64Reg Rt, ARM64Reg Rt2, ARM64Reg Rn, u32 imm) +{ + EncodeLoadStorePairedInst(0xA1, Rt, Rt2, Rn, imm); +} + +// Load/Store register (immediate post-indexed) +// XXX: Most of these support vectors +void ARM64XEmitter::STRB(IndexType type, ARM64Reg Rt, ARM64Reg Rn, s32 imm) +{ + if (type == INDEX_UNSIGNED) + EncodeLoadStoreIndexedInst(0x0E4, Rt, Rn, imm, 8); + else + EncodeLoadStoreIndexedInst(0x0E0, type == INDEX_POST ? 1 : 3, Rt, Rn, imm); +} +void ARM64XEmitter::LDRB(IndexType type, ARM64Reg Rt, ARM64Reg Rn, s32 imm) +{ + if (type == INDEX_UNSIGNED) + EncodeLoadStoreIndexedInst(0x0E5, Rt, Rn, imm, 8); + else + EncodeLoadStoreIndexedInst(0x0E1, type == INDEX_POST ? 1 : 3, Rt, Rn, imm); +} +void ARM64XEmitter::LDRSB(IndexType type, ARM64Reg Rt, ARM64Reg Rn, s32 imm) +{ + if (type == INDEX_UNSIGNED) + EncodeLoadStoreIndexedInst(Is64Bit(Rt) ? 0x0E6 : 0x0E7, Rt, Rn, imm, 8); + else + EncodeLoadStoreIndexedInst(Is64Bit(Rt) ? 0x0E2 : 0x0E3, type == INDEX_POST ? 1 : 3, Rt, Rn, + imm); +} +void ARM64XEmitter::STRH(IndexType type, ARM64Reg Rt, ARM64Reg Rn, s32 imm) +{ + if (type == INDEX_UNSIGNED) + EncodeLoadStoreIndexedInst(0x1E4, Rt, Rn, imm, 16); + else + EncodeLoadStoreIndexedInst(0x1E0, type == INDEX_POST ? 1 : 3, Rt, Rn, imm); +} +void ARM64XEmitter::LDRH(IndexType type, ARM64Reg Rt, ARM64Reg Rn, s32 imm) +{ + if (type == INDEX_UNSIGNED) + EncodeLoadStoreIndexedInst(0x1E5, Rt, Rn, imm, 16); + else + EncodeLoadStoreIndexedInst(0x1E1, type == INDEX_POST ? 1 : 3, Rt, Rn, imm); +} +void ARM64XEmitter::LDRSH(IndexType type, ARM64Reg Rt, ARM64Reg Rn, s32 imm) +{ + if (type == INDEX_UNSIGNED) + EncodeLoadStoreIndexedInst(Is64Bit(Rt) ? 0x1E6 : 0x1E7, Rt, Rn, imm, 16); + else + EncodeLoadStoreIndexedInst(Is64Bit(Rt) ? 0x1E2 : 0x1E3, type == INDEX_POST ? 1 : 3, Rt, Rn, + imm); +} +void ARM64XEmitter::STR(IndexType type, ARM64Reg Rt, ARM64Reg Rn, s32 imm) +{ + if (type == INDEX_UNSIGNED) + EncodeLoadStoreIndexedInst(Is64Bit(Rt) ? 0x3E4 : 0x2E4, Rt, Rn, imm, Is64Bit(Rt) ? 64 : 32); + else + EncodeLoadStoreIndexedInst(Is64Bit(Rt) ? 0x3E0 : 0x2E0, type == INDEX_POST ? 1 : 3, Rt, Rn, + imm); +} +void ARM64XEmitter::LDR(IndexType type, ARM64Reg Rt, ARM64Reg Rn, s32 imm) +{ + if (type == INDEX_UNSIGNED) + EncodeLoadStoreIndexedInst(Is64Bit(Rt) ? 0x3E5 : 0x2E5, Rt, Rn, imm, Is64Bit(Rt) ? 64 : 32); + else + EncodeLoadStoreIndexedInst(Is64Bit(Rt) ? 0x3E1 : 0x2E1, type == INDEX_POST ? 1 : 3, Rt, Rn, + imm); +} +void ARM64XEmitter::LDRSW(IndexType type, ARM64Reg Rt, ARM64Reg Rn, s32 imm) +{ + if (type == INDEX_UNSIGNED) + EncodeLoadStoreIndexedInst(0x2E6, Rt, Rn, imm, 32); + else + EncodeLoadStoreIndexedInst(0x2E2, type == INDEX_POST ? 1 : 3, Rt, Rn, imm); +} + +// Load/Store register (register offset) +void ARM64XEmitter::STRB(ARM64Reg Rt, ARM64Reg Rn, ArithOption Rm) +{ + EncodeLoadStoreRegisterOffset(0, 0, Rt, Rn, Rm); +} +void ARM64XEmitter::LDRB(ARM64Reg Rt, ARM64Reg Rn, ArithOption Rm) +{ + EncodeLoadStoreRegisterOffset(0, 1, Rt, Rn, Rm); +} +void ARM64XEmitter::LDRSB(ARM64Reg Rt, ARM64Reg Rn, ArithOption Rm) +{ + bool b64Bit = Is64Bit(Rt); + EncodeLoadStoreRegisterOffset(0, 3 - b64Bit, Rt, Rn, Rm); +} +void ARM64XEmitter::STRH(ARM64Reg Rt, ARM64Reg Rn, ArithOption Rm) +{ + EncodeLoadStoreRegisterOffset(1, 0, Rt, Rn, Rm); +} +void ARM64XEmitter::LDRH(ARM64Reg Rt, ARM64Reg Rn, ArithOption Rm) +{ + EncodeLoadStoreRegisterOffset(1, 1, Rt, Rn, Rm); +} +void ARM64XEmitter::LDRSH(ARM64Reg Rt, ARM64Reg Rn, ArithOption Rm) +{ + bool b64Bit = Is64Bit(Rt); + EncodeLoadStoreRegisterOffset(1, 3 - b64Bit, Rt, Rn, Rm); +} +void ARM64XEmitter::STR(ARM64Reg Rt, ARM64Reg Rn, ArithOption Rm) +{ + bool b64Bit = Is64Bit(Rt); + EncodeLoadStoreRegisterOffset(2 + b64Bit, 0, Rt, Rn, Rm); +} +void ARM64XEmitter::LDR(ARM64Reg Rt, ARM64Reg Rn, ArithOption Rm) +{ + bool b64Bit = Is64Bit(Rt); + EncodeLoadStoreRegisterOffset(2 + b64Bit, 1, Rt, Rn, Rm); +} +void ARM64XEmitter::LDRSW(ARM64Reg Rt, ARM64Reg Rn, ArithOption Rm) +{ + EncodeLoadStoreRegisterOffset(2, 2, Rt, Rn, Rm); +} +void ARM64XEmitter::PRFM(ARM64Reg Rt, ARM64Reg Rn, ArithOption Rm) +{ + EncodeLoadStoreRegisterOffset(3, 2, Rt, Rn, Rm); +} + +// Load/Store register (unscaled offset) +void ARM64XEmitter::STURB(ARM64Reg Rt, ARM64Reg Rn, s32 imm) +{ + EncodeLoadStoreUnscaled(0, 0, Rt, Rn, imm); +} +void ARM64XEmitter::LDURB(ARM64Reg Rt, ARM64Reg Rn, s32 imm) +{ + EncodeLoadStoreUnscaled(0, 1, Rt, Rn, imm); +} +void ARM64XEmitter::LDURSB(ARM64Reg Rt, ARM64Reg Rn, s32 imm) +{ + EncodeLoadStoreUnscaled(0, Is64Bit(Rt) ? 2 : 3, Rt, Rn, imm); +} +void ARM64XEmitter::STURH(ARM64Reg Rt, ARM64Reg Rn, s32 imm) +{ + EncodeLoadStoreUnscaled(1, 0, Rt, Rn, imm); +} +void ARM64XEmitter::LDURH(ARM64Reg Rt, ARM64Reg Rn, s32 imm) +{ + EncodeLoadStoreUnscaled(1, 1, Rt, Rn, imm); +} +void ARM64XEmitter::LDURSH(ARM64Reg Rt, ARM64Reg Rn, s32 imm) +{ + EncodeLoadStoreUnscaled(1, Is64Bit(Rt) ? 2 : 3, Rt, Rn, imm); +} +void ARM64XEmitter::STUR(ARM64Reg Rt, ARM64Reg Rn, s32 imm) +{ + EncodeLoadStoreUnscaled(Is64Bit(Rt) ? 3 : 2, 0, Rt, Rn, imm); +} +void ARM64XEmitter::LDUR(ARM64Reg Rt, ARM64Reg Rn, s32 imm) +{ + EncodeLoadStoreUnscaled(Is64Bit(Rt) ? 3 : 2, 1, Rt, Rn, imm); +} +void ARM64XEmitter::LDURSW(ARM64Reg Rt, ARM64Reg Rn, s32 imm) +{ + ASSERT_MSG(DYNA_REC, !Is64Bit(Rt), "%s must have a 64bit destination register!", __func__); + EncodeLoadStoreUnscaled(2, 2, Rt, Rn, imm); +} + +void ARM64XEmitter::LDRGeneric(int size, bool signExtend, ARM64Reg Rt, ARM64Reg Rn, ArithOption Rm) +{ + switch (size | signExtend) + { + case 32: LDR (Rt, Rn, Rm); break; + case 33: LDRSW(Rt, Rn, Rm); break; + case 16: LDRH (Rt, Rn, Rm); break; + case 17: LDRSH(Rt, Rn, Rm); break; + case 8: LDRB (Rt, Rn, Rm); break; + case 9: LDRSB(Rt, Rn, Rm); break; + default: PanicAlert("LDRGeneric(reg): invalid size %d", size); break; + } +} +void ARM64XEmitter::STRGeneric(int size, ARM64Reg Rt, ARM64Reg Rn, ArithOption Rm) +{ + switch (size) + { + case 32: STR (Rt, Rn, Rm); break; + case 16: STRH (Rt, Rn, Rm); break; + case 8: STRB (Rt, Rn, Rm); break; + default: PanicAlert("STRGeneric(reg): invalid size %d", size); break; + } +} + +void ARM64XEmitter::LDRGeneric(int size, bool signExtend, IndexType type, ARM64Reg Rt, ARM64Reg Rn, s32 imm) +{ + switch (size | signExtend) + { + case 32: LDR (type, Rt, Rn, imm); break; + case 33: LDRSW(type, Rt, Rn, imm); break; + case 16: LDRH (type, Rt, Rn, imm); break; + case 17: LDRSH(type, Rt, Rn, imm); break; + case 8: LDRB (type, Rt, Rn, imm); break; + case 9: LDRSB(type, Rt, Rn, imm); break; + default: PanicAlert("LDRGeneric(imm): invalid size %d", size); break; + } +} +void ARM64XEmitter::STRGeneric(int size, IndexType type, ARM64Reg Rt, ARM64Reg Rn, s32 imm) +{ + switch (size) + { + case 32: STR (type, Rt, Rn, imm); break; + case 16: STRH (type, Rt, Rn, imm); break; + case 8: STRB (type, Rt, Rn, imm); break; + default: PanicAlert("STRGeneric(imm): invalid size %d", size); break; + } +} + +// Address of label/page PC-relative +void ARM64XEmitter::ADR(ARM64Reg Rd, s32 imm) +{ + EncodeAddressInst(0, Rd, imm); +} +void ARM64XEmitter::ADRP(ARM64Reg Rd, s32 imm) +{ + EncodeAddressInst(1, Rd, imm >> 12); +} + +// Wrapper around MOVZ+MOVK (and later MOVN) +void ARM64XEmitter::MOVI2R(ARM64Reg Rd, u64 imm, bool optimize) +{ + unsigned int parts = Is64Bit(Rd) ? 4 : 2; + BitSet32 upload_part(0); + + // Always start with a movz! Kills the dependency on the register. + bool use_movz = true; + + if (!imm) + { + // Zero immediate, just clear the register. EOR is pointless when we have MOVZ, which looks + // clearer in disasm too. + MOVZ(Rd, 0, SHIFT_0); + return; + } + + if ((Is64Bit(Rd) && imm == std::numeric_limits::max()) || + (!Is64Bit(Rd) && imm == std::numeric_limits::max())) + { + // Max unsigned value (or if signed, -1) + // Set to ~ZR + ARM64Reg ZR = Is64Bit(Rd) ? SP : WSP; + ORN(Rd, ZR, ZR, ArithOption(ZR, ST_LSL, 0)); + return; + } + + // TODO: Make some more systemic use of MOVN, but this will take care of most cases. + // Small negative integer. Use MOVN + if (!Is64Bit(Rd) && (imm | 0xFFFF0000) == imm) + { + MOVN(Rd, ~imm, SHIFT_0); + return; + } + + // XXX: Use MOVN when possible. + // XXX: Optimize more + // XXX: Support rotating immediates to save instructions + if (optimize) + { + for (unsigned int i = 0; i < parts; ++i) + { + if ((imm >> (i * 16)) & 0xFFFF) + upload_part[i] = 1; + } + } + + u64 aligned_pc = (u64)(m_rxbase + m_code) & ~0xFFF; +s64 aligned_offset = (s64)imm - (s64)aligned_pc; + // The offset for ADR/ADRP is an s32, so make sure it can be represented in that + if (upload_part.Count() > 1 && std::abs(aligned_offset) < 0x7FFFFFFFLL) + { + // Immediate we are loading is within 4GB of our aligned range + // Most likely a address that we can load in one or two instructions + if (!(std::abs(aligned_offset) & 0xFFF)) + { + // Aligned ADR + ADRP(Rd, (s32)aligned_offset); + return; + } + else + { + // If the address is within 1MB of PC we can load it in a single instruction still + s64 offset = (s64)imm - (s64)(m_rxbase + m_code); + if (offset >= -0xFFFFF && offset <= 0xFFFFF) + { + ADR(Rd, (s32)offset); + return; + } + else + { + ADRP(Rd, (s32)(aligned_offset & ~0xFFF)); + ADD(Rd, Rd, imm & 0xFFF); + return; + } + } + } + + for (unsigned i = 0; i < parts; ++i) + { + if (use_movz && upload_part[i]) + { + MOVZ(Rd, (imm >> (i * 16)) & 0xFFFF, (ShiftAmount)i); + use_movz = false; + } + else + { + if (upload_part[i] || !optimize) + MOVK(Rd, (imm >> (i * 16)) & 0xFFFF, (ShiftAmount)i); + } + } +} + +bool ARM64XEmitter::MOVI2R2(ARM64Reg Rd, u64 imm1, u64 imm2) +{ + // TODO: Also optimize for performance, not just for code size. + ptrdiff_t start_offset = GetCodeOffset(); + + MOVI2R(Rd, imm1); + int size1 = GetCodeOffset() - start_offset; + + SetCodePtrUnsafe(start_offset); + + MOVI2R(Rd, imm2); + int size2 = GetCodeOffset() - start_offset; + + SetCodePtrUnsafe(start_offset); + + bool element = size1 > size2; + + MOVI2R(Rd, element ? imm2 : imm1); + + return element; +} + +void ARM64XEmitter::ABI_PushRegisters(BitSet32 registers) +{ + int num_regs = registers.Count(); + int stack_size = (num_regs + (num_regs & 1)) * 8; + auto it = registers.begin(); + + if (!num_regs) + return; + + // 8 byte per register, but 16 byte alignment, so we may have to padd one register. + // Only update the SP on the last write to avoid the dependency between those stores. + + // The first push must adjust the SP, else a context switch may invalidate everything below SP. + if (num_regs & 1) + { + STR(INDEX_PRE, (ARM64Reg)(X0 + *it++), SP, -stack_size); + } + else + { + ARM64Reg first_reg = (ARM64Reg)(X0 + *it++); + ARM64Reg second_reg = (ARM64Reg)(X0 + *it++); + STP(INDEX_PRE, first_reg, second_reg, SP, -stack_size); + } + + // Fast store for all other registers, this is always an even number. + for (int i = 0; i < (num_regs - 1) / 2; i++) + { + ARM64Reg odd_reg = (ARM64Reg)(X0 + *it++); + ARM64Reg even_reg = (ARM64Reg)(X0 + *it++); + STP(INDEX_SIGNED, odd_reg, even_reg, SP, 16 * (i + 1)); + } + + ASSERT_MSG(DYNA_REC, it == registers.end(), "%s registers don't match.", __func__); +} + +void ARM64XEmitter::ABI_PopRegisters(BitSet32 registers, BitSet32 ignore_mask) +{ + int num_regs = registers.Count(); + int stack_size = (num_regs + (num_regs & 1)) * 8; + auto it = registers.begin(); + + if (!num_regs) + return; + + // We must adjust the SP in the end, so load the first (two) registers at least. + ARM64Reg first = (ARM64Reg)(X0 + *it++); + ARM64Reg second; + if (!(num_regs & 1)) + second = (ARM64Reg)(X0 + *it++); + + // 8 byte per register, but 16 byte alignment, so we may have to padd one register. + // Only update the SP on the last load to avoid the dependency between those loads. + + // Fast load for all but the first (two) registers, this is always an even number. + for (int i = 0; i < (num_regs - 1) / 2; i++) + { + ARM64Reg odd_reg = (ARM64Reg)(X0 + *it++); + ARM64Reg even_reg = (ARM64Reg)(X0 + *it++); + LDP(INDEX_SIGNED, odd_reg, even_reg, SP, 16 * (i + 1)); + } + + // Post loading the first (two) registers. + if (num_regs & 1) + LDR(INDEX_POST, first, SP, stack_size); + else + LDP(INDEX_POST, first, second, SP, stack_size); + + ASSERT_MSG(DYNA_REC, it == registers.end(), "%s registers don't match.", __func__); +} + +// Float Emitter +void ARM64FloatEmitter::EmitLoadStoreImmediate(u8 size, u32 opc, IndexType type, ARM64Reg Rt, + ARM64Reg Rn, s32 imm) +{ + Rt = DecodeReg(Rt); + Rn = DecodeReg(Rn); + u32 encoded_size = 0; + u32 encoded_imm = 0; + + if (size == 8) + encoded_size = 0; + else if (size == 16) + encoded_size = 1; + else if (size == 32) + encoded_size = 2; + else if (size == 64) + encoded_size = 3; + else if (size == 128) + encoded_size = 0; + + if (type == INDEX_UNSIGNED) + { + ASSERT_MSG(DYNA_REC, !(imm & ((size - 1) >> 3)), + "%s(INDEX_UNSIGNED) immediate offset must be aligned to size! (%d) (%p)", __func__, + imm, m_emit->GetCodePtr()); + ASSERT_MSG(DYNA_REC, imm >= 0, "%s(INDEX_UNSIGNED) immediate offset must be positive!", + __func__); + if (size == 16) + imm >>= 1; + else if (size == 32) + imm >>= 2; + else if (size == 64) + imm >>= 3; + else if (size == 128) + imm >>= 4; + encoded_imm = (imm & 0xFFF); + } + else + { + ASSERT_MSG(DYNA_REC, !(imm < -256 || imm > 255), + "%s immediate offset must be within range of -256 to 256!", __func__); + encoded_imm = (imm & 0x1FF) << 2; + if (type == INDEX_POST) + encoded_imm |= 1; + else + encoded_imm |= 3; + } + + Write32((encoded_size << 30) | (0xF << 26) | (type == INDEX_UNSIGNED ? (1 << 24) : 0) | + (size == 128 ? (1 << 23) : 0) | (opc << 22) | (encoded_imm << 10) | (Rn << 5) | Rt); +} + +void ARM64FloatEmitter::EmitScalar2Source(bool M, bool S, u32 type, u32 opcode, ARM64Reg Rd, + ARM64Reg Rn, ARM64Reg Rm) +{ + ASSERT_MSG(DYNA_REC, !IsQuad(Rd), "%s only supports double and single registers!", __func__); + Rd = DecodeReg(Rd); + Rn = DecodeReg(Rn); + Rm = DecodeReg(Rm); + + Write32((M << 31) | (S << 29) | (0b11110001 << 21) | (type << 22) | (Rm << 16) | (opcode << 12) | + (1 << 11) | (Rn << 5) | Rd); +} + +void ARM64FloatEmitter::EmitThreeSame(bool U, u32 size, u32 opcode, ARM64Reg Rd, ARM64Reg Rn, + ARM64Reg Rm) +{ + ASSERT_MSG(DYNA_REC, !IsSingle(Rd), "%s doesn't support singles!", __func__); + bool quad = IsQuad(Rd); + Rd = DecodeReg(Rd); + Rn = DecodeReg(Rn); + Rm = DecodeReg(Rm); + + Write32((quad << 30) | (U << 29) | (0b1110001 << 21) | (size << 22) | (Rm << 16) | + (opcode << 11) | (1 << 10) | (Rn << 5) | Rd); +} + +void ARM64FloatEmitter::EmitCopy(bool Q, u32 op, u32 imm5, u32 imm4, ARM64Reg Rd, ARM64Reg Rn) +{ + Rd = DecodeReg(Rd); + Rn = DecodeReg(Rn); + + Write32((Q << 30) | (op << 29) | (0b111 << 25) | (imm5 << 16) | (imm4 << 11) | (1 << 10) | + (Rn << 5) | Rd); +} + +void ARM64FloatEmitter::Emit2RegMisc(bool Q, bool U, u32 size, u32 opcode, ARM64Reg Rd, ARM64Reg Rn) +{ + ASSERT_MSG(DYNA_REC, !IsSingle(Rd), "%s doesn't support singles!", __func__); + Rd = DecodeReg(Rd); + Rn = DecodeReg(Rn); + + Write32((Q << 30) | (U << 29) | (0b1110001 << 21) | (size << 22) | (opcode << 12) | (1 << 11) | + (Rn << 5) | Rd); +} + +void ARM64FloatEmitter::EmitLoadStoreSingleStructure(bool L, bool R, u32 opcode, bool S, u32 size, + ARM64Reg Rt, ARM64Reg Rn) +{ + ASSERT_MSG(DYNA_REC, !IsSingle(Rt), "%s doesn't support singles!", __func__); + bool quad = IsQuad(Rt); + Rt = DecodeReg(Rt); + Rn = DecodeReg(Rn); + + Write32((quad << 30) | (0b1101 << 24) | (L << 22) | (R << 21) | (opcode << 13) | (S << 12) | + (size << 10) | (Rn << 5) | Rt); +} + +void ARM64FloatEmitter::EmitLoadStoreSingleStructure(bool L, bool R, u32 opcode, bool S, u32 size, + ARM64Reg Rt, ARM64Reg Rn, ARM64Reg Rm) +{ + ASSERT_MSG(DYNA_REC, !IsSingle(Rt), "%s doesn't support singles!", __func__); + bool quad = IsQuad(Rt); + Rt = DecodeReg(Rt); + Rn = DecodeReg(Rn); + Rm = DecodeReg(Rm); + + Write32((quad << 30) | (0x1B << 23) | (L << 22) | (R << 21) | (Rm << 16) | (opcode << 13) | + (S << 12) | (size << 10) | (Rn << 5) | Rt); +} + +void ARM64FloatEmitter::Emit1Source(bool M, bool S, u32 type, u32 opcode, ARM64Reg Rd, ARM64Reg Rn) +{ + ASSERT_MSG(DYNA_REC, !IsQuad(Rd), "%s doesn't support vector!", __func__); + Rd = DecodeReg(Rd); + Rn = DecodeReg(Rn); + + Write32((M << 31) | (S << 29) | (0xF1 << 21) | (type << 22) | (opcode << 15) | (1 << 14) | + (Rn << 5) | Rd); +} + +void ARM64FloatEmitter::EmitConversion(bool sf, bool S, u32 type, u32 rmode, u32 opcode, + ARM64Reg Rd, ARM64Reg Rn) +{ + ASSERT_MSG(DYNA_REC, Rn <= SP, "%s only supports GPR as source!", __func__); + Rd = DecodeReg(Rd); + Rn = DecodeReg(Rn); + + Write32((sf << 31) | (S << 29) | (0xF1 << 21) | (type << 22) | (rmode << 19) | (opcode << 16) | + (Rn << 5) | Rd); +} + +void ARM64FloatEmitter::EmitConvertScalarToInt(ARM64Reg Rd, ARM64Reg Rn, RoundingMode round, + bool sign) +{ + DEBUG_ASSERT_MSG(DYNA_REC, IsScalar(Rn), "fcvts: Rn must be floating point"); + if (IsGPR(Rd)) + { + // Use the encoding that transfers the result to a GPR. + bool sf = Is64Bit(Rd); + int type = IsDouble(Rn) ? 1 : 0; + Rd = DecodeReg(Rd); + Rn = DecodeReg(Rn); + int opcode = (sign ? 1 : 0); + int rmode = 0; + switch (round) + { + case ROUND_A: + rmode = 0; + opcode |= 4; + break; + case ROUND_P: + rmode = 1; + break; + case ROUND_M: + rmode = 2; + break; + case ROUND_Z: + rmode = 3; + break; + case ROUND_N: + rmode = 0; + break; + } + EmitConversion2(sf, 0, true, type, rmode, opcode, 0, Rd, Rn); + } + else + { + // Use the encoding (vector, single) that keeps the result in the fp register. + int sz = IsDouble(Rn); + Rd = DecodeReg(Rd); + Rn = DecodeReg(Rn); + int opcode = 0; + switch (round) + { + case ROUND_A: + opcode = 0x1C; + break; + case ROUND_N: + opcode = 0x1A; + break; + case ROUND_M: + opcode = 0x1B; + break; + case ROUND_P: + opcode = 0x1A; + sz |= 2; + break; + case ROUND_Z: + opcode = 0x1B; + sz |= 2; + break; + } + Write32((0x5E << 24) | (sign << 29) | (sz << 22) | (1 << 21) | (opcode << 12) | (2 << 10) | + (Rn << 5) | Rd); + } +} + +void ARM64FloatEmitter::FCVTS(ARM64Reg Rd, ARM64Reg Rn, RoundingMode round) +{ + EmitConvertScalarToInt(Rd, Rn, round, false); +} + +void ARM64FloatEmitter::FCVTU(ARM64Reg Rd, ARM64Reg Rn, RoundingMode round) +{ + EmitConvertScalarToInt(Rd, Rn, round, true); +} + +void ARM64FloatEmitter::EmitConversion2(bool sf, bool S, bool direction, u32 type, u32 rmode, + u32 opcode, int scale, ARM64Reg Rd, ARM64Reg Rn) +{ + Rd = DecodeReg(Rd); + Rn = DecodeReg(Rn); + + Write32((sf << 31) | (S << 29) | (0xF0 << 21) | (direction << 21) | (type << 22) | (rmode << 19) | + (opcode << 16) | (scale << 10) | (Rn << 5) | Rd); +} + +void ARM64FloatEmitter::EmitCompare(bool M, bool S, u32 op, u32 opcode2, ARM64Reg Rn, ARM64Reg Rm) +{ + ASSERT_MSG(DYNA_REC, !IsQuad(Rn), "%s doesn't support vector!", __func__); + bool is_double = IsDouble(Rn); + + Rn = DecodeReg(Rn); + Rm = DecodeReg(Rm); + + Write32((M << 31) | (S << 29) | (0xF1 << 21) | (is_double << 22) | (Rm << 16) | (op << 14) | + (1 << 13) | (Rn << 5) | opcode2); +} + +void ARM64FloatEmitter::EmitCondSelect(bool M, bool S, CCFlags cond, ARM64Reg Rd, ARM64Reg Rn, + ARM64Reg Rm) +{ + ASSERT_MSG(DYNA_REC, !IsQuad(Rd), "%s doesn't support vector!", __func__); + bool is_double = IsDouble(Rd); + + Rd = DecodeReg(Rd); + Rn = DecodeReg(Rn); + Rm = DecodeReg(Rm); + + Write32((M << 31) | (S << 29) | (0xF1 << 21) | (is_double << 22) | (Rm << 16) | (cond << 12) | + (3 << 10) | (Rn << 5) | Rd); +} + +void ARM64FloatEmitter::EmitPermute(u32 size, u32 op, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) +{ + ASSERT_MSG(DYNA_REC, !IsSingle(Rd), "%s doesn't support singles!", __func__); + + bool quad = IsQuad(Rd); + + u32 encoded_size = 0; + if (size == 16) + encoded_size = 1; + else if (size == 32) + encoded_size = 2; + else if (size == 64) + encoded_size = 3; + + Rd = DecodeReg(Rd); + Rn = DecodeReg(Rn); + Rm = DecodeReg(Rm); + + Write32((quad << 30) | (7 << 25) | (encoded_size << 22) | (Rm << 16) | (op << 12) | (1 << 11) | + (Rn << 5) | Rd); +} + +void ARM64FloatEmitter::EmitScalarImm(bool M, bool S, u32 type, u32 imm5, ARM64Reg Rd, u32 imm8) +{ + ASSERT_MSG(DYNA_REC, !IsQuad(Rd), "%s doesn't support vector!", __func__); + + bool is_double = !IsSingle(Rd); + + Rd = DecodeReg(Rd); + + Write32((M << 31) | (S << 29) | (0xF1 << 21) | (is_double << 22) | (type << 22) | (imm8 << 13) | + (1 << 12) | (imm5 << 5) | Rd); +} + +void ARM64FloatEmitter::EmitShiftImm(bool Q, bool U, u32 immh, u32 immb, u32 opcode, ARM64Reg Rd, + ARM64Reg Rn) +{ + ASSERT_MSG(DYNA_REC, immh, "%s bad encoding! Can't have zero immh", __func__); + + Rd = DecodeReg(Rd); + Rn = DecodeReg(Rn); + + Write32((Q << 30) | (U << 29) | (0xF << 24) | (immh << 19) | (immb << 16) | (opcode << 11) | + (1 << 10) | (Rn << 5) | Rd); +} + +void ARM64FloatEmitter::EmitScalarShiftImm(bool U, u32 immh, u32 immb, u32 opcode, ARM64Reg Rd, + ARM64Reg Rn) +{ + Rd = DecodeReg(Rd); + Rn = DecodeReg(Rn); + + Write32((2 << 30) | (U << 29) | (0x3E << 23) | (immh << 19) | (immb << 16) | (opcode << 11) | + (1 << 10) | (Rn << 5) | Rd); +} + +void ARM64FloatEmitter::EmitLoadStoreMultipleStructure(u32 size, bool L, u32 opcode, ARM64Reg Rt, + ARM64Reg Rn) +{ + bool quad = IsQuad(Rt); + u32 encoded_size = 0; + + if (size == 16) + encoded_size = 1; + else if (size == 32) + encoded_size = 2; + else if (size == 64) + encoded_size = 3; + + Rt = DecodeReg(Rt); + Rn = DecodeReg(Rn); + + Write32((quad << 30) | (3 << 26) | (L << 22) | (opcode << 12) | (encoded_size << 10) | (Rn << 5) | + Rt); +} + +void ARM64FloatEmitter::EmitLoadStoreMultipleStructurePost(u32 size, bool L, u32 opcode, + ARM64Reg Rt, ARM64Reg Rn, ARM64Reg Rm) +{ + bool quad = IsQuad(Rt); + u32 encoded_size = 0; + + if (size == 16) + encoded_size = 1; + else if (size == 32) + encoded_size = 2; + else if (size == 64) + encoded_size = 3; + + Rt = DecodeReg(Rt); + Rn = DecodeReg(Rn); + Rm = DecodeReg(Rm); + + Write32((quad << 30) | (0b11001 << 23) | (L << 22) | (Rm << 16) | (opcode << 12) | + (encoded_size << 10) | (Rn << 5) | Rt); +} + +void ARM64FloatEmitter::EmitScalar1Source(bool M, bool S, u32 type, u32 opcode, ARM64Reg Rd, + ARM64Reg Rn) +{ + ASSERT_MSG(DYNA_REC, !IsQuad(Rd), "%s doesn't support vector!", __func__); + + Rd = DecodeReg(Rd); + Rn = DecodeReg(Rn); + + Write32((M << 31) | (S << 29) | (0xF1 << 21) | (type << 22) | (opcode << 15) | (1 << 14) | + (Rn << 5) | Rd); +} + +void ARM64FloatEmitter::EmitVectorxElement(bool U, u32 size, bool L, u32 opcode, bool H, + ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) +{ + bool quad = IsQuad(Rd); + + Rd = DecodeReg(Rd); + Rn = DecodeReg(Rn); + Rm = DecodeReg(Rm); + + Write32((quad << 30) | (U << 29) | (0xF << 24) | (size << 22) | (L << 21) | (Rm << 16) | + (opcode << 12) | (H << 11) | (Rn << 5) | Rd); +} + +void ARM64FloatEmitter::EmitLoadStoreUnscaled(u32 size, u32 op, ARM64Reg Rt, ARM64Reg Rn, s32 imm) +{ + ASSERT_MSG(DYNA_REC, !(imm < -256 || imm > 255), "%s received too large offset: %d", __func__, + imm); + Rt = DecodeReg(Rt); + Rn = DecodeReg(Rn); + + Write32((size << 30) | (0xF << 26) | (op << 22) | ((imm & 0x1FF) << 12) | (Rn << 5) | Rt); +} + +void ARM64FloatEmitter::EncodeLoadStorePair(u32 size, bool load, IndexType type, ARM64Reg Rt, + ARM64Reg Rt2, ARM64Reg Rn, s32 imm) +{ + u32 type_encode = 0; + u32 opc = 0; + + switch (type) + { + case INDEX_SIGNED: + type_encode = 0b010; + break; + case INDEX_POST: + type_encode = 0b001; + break; + case INDEX_PRE: + type_encode = 0b011; + break; + case INDEX_UNSIGNED: + ASSERT_MSG(DYNA_REC, false, "%s doesn't support INDEX_UNSIGNED!", __func__); + break; + } + + if (size == 128) + { + ASSERT_MSG(DYNA_REC, !(imm & 0xF), "%s received invalid offset 0x%x!", __func__, imm); + opc = 2; + imm >>= 4; + } + else if (size == 64) + { + ASSERT_MSG(DYNA_REC, !(imm & 0x7), "%s received invalid offset 0x%x!", __func__, imm); + opc = 1; + imm >>= 3; + } + else if (size == 32) + { + ASSERT_MSG(DYNA_REC, !(imm & 0x3), "%s received invalid offset 0x%x!", __func__, imm); + opc = 0; + imm >>= 2; + } + + Rt = DecodeReg(Rt); + Rt2 = DecodeReg(Rt2); + Rn = DecodeReg(Rn); + + Write32((opc << 30) | (0b1011 << 26) | (type_encode << 23) | (load << 22) | ((imm & 0x7F) << 15) | + (Rt2 << 10) | (Rn << 5) | Rt); +} + +void ARM64FloatEmitter::EncodeLoadStoreRegisterOffset(u32 size, bool load, ARM64Reg Rt, ARM64Reg Rn, + ArithOption Rm) +{ + ASSERT_MSG(DYNA_REC, Rm.GetType() == ArithOption::TYPE_EXTENDEDREG, + "%s must contain an extended reg as Rm!", __func__); + + u32 encoded_size = 0; + u32 encoded_op = 0; + + if (size == 8) + { + encoded_size = 0; + encoded_op = 0; + } + else if (size == 16) + { + encoded_size = 1; + encoded_op = 0; + } + else if (size == 32) + { + encoded_size = 2; + encoded_op = 0; + } + else if (size == 64) + { + encoded_size = 3; + encoded_op = 0; + } + else if (size == 128) + { + encoded_size = 0; + encoded_op = 2; + } + + if (load) + encoded_op |= 1; + + Rt = DecodeReg(Rt); + Rn = DecodeReg(Rn); + ARM64Reg decoded_Rm = DecodeReg(Rm.GetReg()); + + Write32((encoded_size << 30) | (encoded_op << 22) | (0b111100001 << 21) | (decoded_Rm << 16) | + Rm.GetData() | (1 << 11) | (Rn << 5) | Rt); +} + +void ARM64FloatEmitter::EncodeModImm(bool Q, u8 op, u8 cmode, u8 o2, ARM64Reg Rd, u8 abcdefgh) +{ + union + { + u8 hex; + struct + { + unsigned defgh : 5; + unsigned abc : 3; + }; + } v; + v.hex = abcdefgh; + Rd = DecodeReg(Rd); + Write32((Q << 30) | (op << 29) | (0xF << 24) | (v.abc << 16) | (cmode << 12) | (o2 << 11) | + (1 << 10) | (v.defgh << 5) | Rd); +} + +void ARM64FloatEmitter::LDR(u8 size, IndexType type, ARM64Reg Rt, ARM64Reg Rn, s32 imm) +{ + EmitLoadStoreImmediate(size, 1, type, Rt, Rn, imm); +} +void ARM64FloatEmitter::STR(u8 size, IndexType type, ARM64Reg Rt, ARM64Reg Rn, s32 imm) +{ + EmitLoadStoreImmediate(size, 0, type, Rt, Rn, imm); +} + +// Loadstore unscaled +void ARM64FloatEmitter::LDUR(u8 size, ARM64Reg Rt, ARM64Reg Rn, s32 imm) +{ + u32 encoded_size = 0; + u32 encoded_op = 0; + + if (size == 8) + { + encoded_size = 0; + encoded_op = 1; + } + else if (size == 16) + { + encoded_size = 1; + encoded_op = 1; + } + else if (size == 32) + { + encoded_size = 2; + encoded_op = 1; + } + else if (size == 64) + { + encoded_size = 3; + encoded_op = 1; + } + else if (size == 128) + { + encoded_size = 0; + encoded_op = 3; + } + + EmitLoadStoreUnscaled(encoded_size, encoded_op, Rt, Rn, imm); +} +void ARM64FloatEmitter::STUR(u8 size, ARM64Reg Rt, ARM64Reg Rn, s32 imm) +{ + u32 encoded_size = 0; + u32 encoded_op = 0; + + if (size == 8) + { + encoded_size = 0; + encoded_op = 0; + } + else if (size == 16) + { + encoded_size = 1; + encoded_op = 0; + } + else if (size == 32) + { + encoded_size = 2; + encoded_op = 0; + } + else if (size == 64) + { + encoded_size = 3; + encoded_op = 0; + } + else if (size == 128) + { + encoded_size = 0; + encoded_op = 2; + } + + EmitLoadStoreUnscaled(encoded_size, encoded_op, Rt, Rn, imm); +} + +// Loadstore single structure +void ARM64FloatEmitter::LD1(u8 size, ARM64Reg Rt, u8 index, ARM64Reg Rn) +{ + bool S = 0; + u32 opcode = 0; + u32 encoded_size = 0; + ARM64Reg encoded_reg = INVALID_REG; + + if (size == 8) + { + S = (index & 4) != 0; + opcode = 0; + encoded_size = index & 3; + if (index & 8) + encoded_reg = EncodeRegToQuad(Rt); + else + encoded_reg = EncodeRegToDouble(Rt); + } + else if (size == 16) + { + S = (index & 2) != 0; + opcode = 2; + encoded_size = (index & 1) << 1; + if (index & 4) + encoded_reg = EncodeRegToQuad(Rt); + else + encoded_reg = EncodeRegToDouble(Rt); + } + else if (size == 32) + { + S = (index & 1) != 0; + opcode = 4; + encoded_size = 0; + if (index & 2) + encoded_reg = EncodeRegToQuad(Rt); + else + encoded_reg = EncodeRegToDouble(Rt); + } + else if (size == 64) + { + S = 0; + opcode = 4; + encoded_size = 1; + if (index == 1) + encoded_reg = EncodeRegToQuad(Rt); + else + encoded_reg = EncodeRegToDouble(Rt); + } + + EmitLoadStoreSingleStructure(1, 0, opcode, S, encoded_size, encoded_reg, Rn); +} + +void ARM64FloatEmitter::LD1(u8 size, ARM64Reg Rt, u8 index, ARM64Reg Rn, ARM64Reg Rm) +{ + bool S = 0; + u32 opcode = 0; + u32 encoded_size = 0; + ARM64Reg encoded_reg = INVALID_REG; + + if (size == 8) + { + S = (index & 4) != 0; + opcode = 0; + encoded_size = index & 3; + if (index & 8) + encoded_reg = EncodeRegToQuad(Rt); + else + encoded_reg = EncodeRegToDouble(Rt); + } + else if (size == 16) + { + S = (index & 2) != 0; + opcode = 2; + encoded_size = (index & 1) << 1; + if (index & 4) + encoded_reg = EncodeRegToQuad(Rt); + else + encoded_reg = EncodeRegToDouble(Rt); + } + else if (size == 32) + { + S = (index & 1) != 0; + opcode = 4; + encoded_size = 0; + if (index & 2) + encoded_reg = EncodeRegToQuad(Rt); + else + encoded_reg = EncodeRegToDouble(Rt); + } + else if (size == 64) + { + S = 0; + opcode = 4; + encoded_size = 1; + if (index == 1) + encoded_reg = EncodeRegToQuad(Rt); + else + encoded_reg = EncodeRegToDouble(Rt); + } + + EmitLoadStoreSingleStructure(1, 0, opcode, S, encoded_size, encoded_reg, Rn, Rm); +} + +void ARM64FloatEmitter::LD1R(u8 size, ARM64Reg Rt, ARM64Reg Rn) +{ + EmitLoadStoreSingleStructure(1, 0, 6, 0, size >> 4, Rt, Rn); +} +void ARM64FloatEmitter::LD2R(u8 size, ARM64Reg Rt, ARM64Reg Rn) +{ + EmitLoadStoreSingleStructure(1, 1, 6, 0, size >> 4, Rt, Rn); +} +void ARM64FloatEmitter::LD1R(u8 size, ARM64Reg Rt, ARM64Reg Rn, ARM64Reg Rm) +{ + EmitLoadStoreSingleStructure(1, 0, 6, 0, size >> 4, Rt, Rn, Rm); +} +void ARM64FloatEmitter::LD2R(u8 size, ARM64Reg Rt, ARM64Reg Rn, ARM64Reg Rm) +{ + EmitLoadStoreSingleStructure(1, 1, 6, 0, size >> 4, Rt, Rn, Rm); +} + +void ARM64FloatEmitter::ST1(u8 size, ARM64Reg Rt, u8 index, ARM64Reg Rn) +{ + bool S = 0; + u32 opcode = 0; + u32 encoded_size = 0; + ARM64Reg encoded_reg = INVALID_REG; + + if (size == 8) + { + S = (index & 4) != 0; + opcode = 0; + encoded_size = index & 3; + if (index & 8) + encoded_reg = EncodeRegToQuad(Rt); + else + encoded_reg = EncodeRegToDouble(Rt); + } + else if (size == 16) + { + S = (index & 2) != 0; + opcode = 2; + encoded_size = (index & 1) << 1; + if (index & 4) + encoded_reg = EncodeRegToQuad(Rt); + else + encoded_reg = EncodeRegToDouble(Rt); + } + else if (size == 32) + { + S = (index & 1) != 0; + opcode = 4; + encoded_size = 0; + if (index & 2) + encoded_reg = EncodeRegToQuad(Rt); + else + encoded_reg = EncodeRegToDouble(Rt); + } + else if (size == 64) + { + S = 0; + opcode = 4; + encoded_size = 1; + if (index == 1) + encoded_reg = EncodeRegToQuad(Rt); + else + encoded_reg = EncodeRegToDouble(Rt); + } + + EmitLoadStoreSingleStructure(0, 0, opcode, S, encoded_size, encoded_reg, Rn); +} + +void ARM64FloatEmitter::ST1(u8 size, ARM64Reg Rt, u8 index, ARM64Reg Rn, ARM64Reg Rm) +{ + bool S = 0; + u32 opcode = 0; + u32 encoded_size = 0; + ARM64Reg encoded_reg = INVALID_REG; + + if (size == 8) + { + S = (index & 4) != 0; + opcode = 0; + encoded_size = index & 3; + if (index & 8) + encoded_reg = EncodeRegToQuad(Rt); + else + encoded_reg = EncodeRegToDouble(Rt); + } + else if (size == 16) + { + S = (index & 2) != 0; + opcode = 2; + encoded_size = (index & 1) << 1; + if (index & 4) + encoded_reg = EncodeRegToQuad(Rt); + else + encoded_reg = EncodeRegToDouble(Rt); + } + else if (size == 32) + { + S = (index & 1) != 0; + opcode = 4; + encoded_size = 0; + if (index & 2) + encoded_reg = EncodeRegToQuad(Rt); + else + encoded_reg = EncodeRegToDouble(Rt); + } + else if (size == 64) + { + S = 0; + opcode = 4; + encoded_size = 1; + if (index == 1) + encoded_reg = EncodeRegToQuad(Rt); + else + encoded_reg = EncodeRegToDouble(Rt); + } + + EmitLoadStoreSingleStructure(0, 0, opcode, S, encoded_size, encoded_reg, Rn, Rm); +} + +// Loadstore multiple structure +void ARM64FloatEmitter::LD1(u8 size, u8 count, ARM64Reg Rt, ARM64Reg Rn) +{ + ASSERT_MSG(DYNA_REC, !(count == 0 || count > 4), "%s must have a count of 1 to 4 registers!", + __func__); + u32 opcode = 0; + if (count == 1) + opcode = 0b111; + else if (count == 2) + opcode = 0b1010; + else if (count == 3) + opcode = 0b0110; + else if (count == 4) + opcode = 0b0010; + EmitLoadStoreMultipleStructure(size, 1, opcode, Rt, Rn); +} +void ARM64FloatEmitter::LD1(u8 size, u8 count, IndexType type, ARM64Reg Rt, ARM64Reg Rn, + ARM64Reg Rm) +{ + ASSERT_MSG(DYNA_REC, !(count == 0 || count > 4), "%s must have a count of 1 to 4 registers!", + __func__); + ASSERT_MSG(DYNA_REC, type == INDEX_POST, "%s only supports post indexing!", __func__); + + u32 opcode = 0; + if (count == 1) + opcode = 0b111; + else if (count == 2) + opcode = 0b1010; + else if (count == 3) + opcode = 0b0110; + else if (count == 4) + opcode = 0b0010; + EmitLoadStoreMultipleStructurePost(size, 1, opcode, Rt, Rn, Rm); +} +void ARM64FloatEmitter::ST1(u8 size, u8 count, ARM64Reg Rt, ARM64Reg Rn) +{ + ASSERT_MSG(DYNA_REC, !(count == 0 || count > 4), "%s must have a count of 1 to 4 registers!", + __func__); + u32 opcode = 0; + if (count == 1) + opcode = 0b111; + else if (count == 2) + opcode = 0b1010; + else if (count == 3) + opcode = 0b0110; + else if (count == 4) + opcode = 0b0010; + EmitLoadStoreMultipleStructure(size, 0, opcode, Rt, Rn); +} +void ARM64FloatEmitter::ST1(u8 size, u8 count, IndexType type, ARM64Reg Rt, ARM64Reg Rn, + ARM64Reg Rm) +{ + ASSERT_MSG(DYNA_REC, !(count == 0 || count > 4), "%s must have a count of 1 to 4 registers!", + __func__); + ASSERT_MSG(DYNA_REC, type == INDEX_POST, "%s only supports post indexing!", __func__); + + u32 opcode = 0; + if (count == 1) + opcode = 0b111; + else if (count == 2) + opcode = 0b1010; + else if (count == 3) + opcode = 0b0110; + else if (count == 4) + opcode = 0b0010; + EmitLoadStoreMultipleStructurePost(size, 0, opcode, Rt, Rn, Rm); +} + +// Scalar - 1 Source +void ARM64FloatEmitter::FMOV(ARM64Reg Rd, ARM64Reg Rn, bool top) +{ + if (IsScalar(Rd) && IsScalar(Rn)) + { + EmitScalar1Source(0, 0, IsDouble(Rd), 0, Rd, Rn); + } + else + { + ASSERT_MSG(DYNA_REC, !IsQuad(Rd) && !IsQuad(Rn), "FMOV can't move to/from quads"); + int rmode = 0; + int opcode = 6; + int sf = 0; + if (IsSingle(Rd) && !Is64Bit(Rn) && !top) + { + // GPR to scalar single + opcode |= 1; + } + else if (!Is64Bit(Rd) && IsSingle(Rn) && !top) + { + // Scalar single to GPR - defaults are correct + } + else + { + // TODO + ASSERT_MSG(DYNA_REC, 0, "FMOV: Unhandled case"); + } + Rd = DecodeReg(Rd); + Rn = DecodeReg(Rn); + Write32((sf << 31) | (0x1e2 << 20) | (rmode << 19) | (opcode << 16) | (Rn << 5) | Rd); + } +} + +// Loadstore paired +void ARM64FloatEmitter::LDP(u8 size, IndexType type, ARM64Reg Rt, ARM64Reg Rt2, ARM64Reg Rn, + s32 imm) +{ + EncodeLoadStorePair(size, true, type, Rt, Rt2, Rn, imm); +} +void ARM64FloatEmitter::STP(u8 size, IndexType type, ARM64Reg Rt, ARM64Reg Rt2, ARM64Reg Rn, + s32 imm) +{ + EncodeLoadStorePair(size, false, type, Rt, Rt2, Rn, imm); +} + +// Loadstore register offset +void ARM64FloatEmitter::STR(u8 size, ARM64Reg Rt, ARM64Reg Rn, ArithOption Rm) +{ + EncodeLoadStoreRegisterOffset(size, false, Rt, Rn, Rm); +} +void ARM64FloatEmitter::LDR(u8 size, ARM64Reg Rt, ARM64Reg Rn, ArithOption Rm) +{ + EncodeLoadStoreRegisterOffset(size, true, Rt, Rn, Rm); +} + +void ARM64FloatEmitter::FABS(ARM64Reg Rd, ARM64Reg Rn) +{ + EmitScalar1Source(0, 0, IsDouble(Rd), 1, Rd, Rn); +} +void ARM64FloatEmitter::FNEG(ARM64Reg Rd, ARM64Reg Rn) +{ + EmitScalar1Source(0, 0, IsDouble(Rd), 2, Rd, Rn); +} +void ARM64FloatEmitter::FSQRT(ARM64Reg Rd, ARM64Reg Rn) +{ + EmitScalar1Source(0, 0, IsDouble(Rd), 3, Rd, Rn); +} + +// Scalar - 2 Source +void ARM64FloatEmitter::FADD(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) +{ + EmitScalar2Source(0, 0, IsDouble(Rd), 2, Rd, Rn, Rm); +} +void ARM64FloatEmitter::FMUL(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) +{ + EmitScalar2Source(0, 0, IsDouble(Rd), 0, Rd, Rn, Rm); +} +void ARM64FloatEmitter::FSUB(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) +{ + EmitScalar2Source(0, 0, IsDouble(Rd), 3, Rd, Rn, Rm); +} +void ARM64FloatEmitter::FDIV(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) +{ + EmitScalar2Source(0, 0, IsDouble(Rd), 1, Rd, Rn, Rm); +} +void ARM64FloatEmitter::FMAX(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) +{ + EmitScalar2Source(0, 0, IsDouble(Rd), 4, Rd, Rn, Rm); +} +void ARM64FloatEmitter::FMIN(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) +{ + EmitScalar2Source(0, 0, IsDouble(Rd), 5, Rd, Rn, Rm); +} +void ARM64FloatEmitter::FMAXNM(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) +{ + EmitScalar2Source(0, 0, IsDouble(Rd), 6, Rd, Rn, Rm); +} +void ARM64FloatEmitter::FMINNM(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) +{ + EmitScalar2Source(0, 0, IsDouble(Rd), 7, Rd, Rn, Rm); +} +void ARM64FloatEmitter::FNMUL(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) +{ + EmitScalar2Source(0, 0, IsDouble(Rd), 8, Rd, Rn, Rm); +} + +void ARM64FloatEmitter::FMADD(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ARM64Reg Ra) +{ + EmitScalar3Source(IsDouble(Rd), Rd, Rn, Rm, Ra, 0); +} +void ARM64FloatEmitter::FMSUB(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ARM64Reg Ra) +{ + EmitScalar3Source(IsDouble(Rd), Rd, Rn, Rm, Ra, 1); +} +void ARM64FloatEmitter::FNMADD(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ARM64Reg Ra) +{ + EmitScalar3Source(IsDouble(Rd), Rd, Rn, Rm, Ra, 2); +} +void ARM64FloatEmitter::FNMSUB(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ARM64Reg Ra) +{ + EmitScalar3Source(IsDouble(Rd), Rd, Rn, Rm, Ra, 3); +} + +void ARM64FloatEmitter::EmitScalar3Source(bool isDouble, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, + ARM64Reg Ra, int opcode) +{ + int type = isDouble ? 1 : 0; + Rd = DecodeReg(Rd); + Rn = DecodeReg(Rn); + Rm = DecodeReg(Rm); + Ra = DecodeReg(Ra); + int o1 = opcode >> 1; + int o0 = opcode & 1; + m_emit->Write32((0x1F << 24) | (type << 22) | (o1 << 21) | (Rm << 16) | (o0 << 15) | (Ra << 10) | + (Rn << 5) | Rd); +} + +// Scalar floating point immediate +void ARM64FloatEmitter::FMOV(ARM64Reg Rd, uint8_t imm8) +{ + EmitScalarImm(0, 0, 0, 0, Rd, imm8); +} + +// Vector +void ARM64FloatEmitter::AND(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) +{ + EmitThreeSame(0, 0, 3, Rd, Rn, Rm); +} +void ARM64FloatEmitter::BSL(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) +{ + EmitThreeSame(1, 1, 3, Rd, Rn, Rm); +} +void ARM64FloatEmitter::DUP(u8 size, ARM64Reg Rd, ARM64Reg Rn, u8 index) +{ + u32 imm5 = 0; + + if (size == 8) + { + imm5 = 1; + imm5 |= index << 1; + } + else if (size == 16) + { + imm5 = 2; + imm5 |= index << 2; + } + else if (size == 32) + { + imm5 = 4; + imm5 |= index << 3; + } + else if (size == 64) + { + imm5 = 8; + imm5 |= index << 4; + } + + EmitCopy(IsQuad(Rd), 0, imm5, 0, Rd, Rn); +} +void ARM64FloatEmitter::FABS(u8 size, ARM64Reg Rd, ARM64Reg Rn) +{ + Emit2RegMisc(IsQuad(Rd), 0, 2 | (size >> 6), 0xF, Rd, Rn); +} +void ARM64FloatEmitter::FADD(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) +{ + EmitThreeSame(0, size >> 6, 0x1A, Rd, Rn, Rm); +} +void ARM64FloatEmitter::FMAX(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) +{ + EmitThreeSame(0, size >> 6, 0b11110, Rd, Rn, Rm); +} +void ARM64FloatEmitter::FMLA(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) +{ + EmitThreeSame(0, size >> 6, 0x19, Rd, Rn, Rm); +} +void ARM64FloatEmitter::FMIN(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) +{ + EmitThreeSame(0, 2 | size >> 6, 0b11110, Rd, Rn, Rm); +} +void ARM64FloatEmitter::FCVTL(u8 size, ARM64Reg Rd, ARM64Reg Rn) +{ + Emit2RegMisc(false, 0, size >> 6, 0x17, Rd, Rn); +} +void ARM64FloatEmitter::FCVTL2(u8 size, ARM64Reg Rd, ARM64Reg Rn) +{ + Emit2RegMisc(true, 0, size >> 6, 0x17, Rd, Rn); +} +void ARM64FloatEmitter::FCVTN(u8 dest_size, ARM64Reg Rd, ARM64Reg Rn) +{ + Emit2RegMisc(IsQuad(Rd), 0, dest_size >> 5, 0x16, Rd, Rn); +} +void ARM64FloatEmitter::FCVTZS(u8 size, ARM64Reg Rd, ARM64Reg Rn) +{ + Emit2RegMisc(IsQuad(Rd), 0, 2 | (size >> 6), 0x1B, Rd, Rn); +} +void ARM64FloatEmitter::FCVTZU(u8 size, ARM64Reg Rd, ARM64Reg Rn) +{ + Emit2RegMisc(IsQuad(Rd), 1, 2 | (size >> 6), 0x1B, Rd, Rn); +} +void ARM64FloatEmitter::FDIV(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) +{ + EmitThreeSame(1, size >> 6, 0x1F, Rd, Rn, Rm); +} +void ARM64FloatEmitter::FMUL(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) +{ + EmitThreeSame(1, size >> 6, 0x1B, Rd, Rn, Rm); +} +void ARM64FloatEmitter::FNEG(u8 size, ARM64Reg Rd, ARM64Reg Rn) +{ + Emit2RegMisc(IsQuad(Rd), 1, 2 | (size >> 6), 0xF, Rd, Rn); +} +void ARM64FloatEmitter::FRECPE(u8 size, ARM64Reg Rd, ARM64Reg Rn) +{ + Emit2RegMisc(IsQuad(Rd), 0, 2 | (size >> 6), 0x1D, Rd, Rn); +} +void ARM64FloatEmitter::FRSQRTE(u8 size, ARM64Reg Rd, ARM64Reg Rn) +{ + Emit2RegMisc(IsQuad(Rd), 1, 2 | (size >> 6), 0x1D, Rd, Rn); +} +void ARM64FloatEmitter::FSUB(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) +{ + EmitThreeSame(0, 2 | (size >> 6), 0x1A, Rd, Rn, Rm); +} +void ARM64FloatEmitter::FMLS(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) +{ + EmitThreeSame(0, 2 | (size >> 6), 0x19, Rd, Rn, Rm); +} +void ARM64FloatEmitter::NOT(ARM64Reg Rd, ARM64Reg Rn) +{ + Emit2RegMisc(IsQuad(Rd), 1, 0, 5, Rd, Rn); +} +void ARM64FloatEmitter::ORR(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) +{ + EmitThreeSame(0, 2, 3, Rd, Rn, Rm); +} +void ARM64FloatEmitter::REV16(u8 size, ARM64Reg Rd, ARM64Reg Rn) +{ + Emit2RegMisc(IsQuad(Rd), 0, size >> 4, 1, Rd, Rn); +} +void ARM64FloatEmitter::REV32(u8 size, ARM64Reg Rd, ARM64Reg Rn) +{ + Emit2RegMisc(IsQuad(Rd), 1, size >> 4, 0, Rd, Rn); +} +void ARM64FloatEmitter::REV64(u8 size, ARM64Reg Rd, ARM64Reg Rn) +{ + Emit2RegMisc(IsQuad(Rd), 0, size >> 4, 0, Rd, Rn); +} +void ARM64FloatEmitter::SCVTF(u8 size, ARM64Reg Rd, ARM64Reg Rn) +{ + Emit2RegMisc(IsQuad(Rd), 0, size >> 6, 0x1D, Rd, Rn); +} +void ARM64FloatEmitter::UCVTF(u8 size, ARM64Reg Rd, ARM64Reg Rn) +{ + Emit2RegMisc(IsQuad(Rd), 1, size >> 6, 0x1D, Rd, Rn); +} +void ARM64FloatEmitter::SCVTF(u8 size, ARM64Reg Rd, ARM64Reg Rn, int scale) +{ + int imm = size * 2 - scale; + EmitShiftImm(IsQuad(Rd), 0, imm >> 3, imm & 7, 0x1C, Rd, Rn); +} +void ARM64FloatEmitter::UCVTF(u8 size, ARM64Reg Rd, ARM64Reg Rn, int scale) +{ + int imm = size * 2 - scale; + EmitShiftImm(IsQuad(Rd), 1, imm >> 3, imm & 7, 0x1C, Rd, Rn); +} +void ARM64FloatEmitter::SQXTN(u8 dest_size, ARM64Reg Rd, ARM64Reg Rn) +{ + Emit2RegMisc(false, 0, dest_size >> 4, 0b10100, Rd, Rn); +} +void ARM64FloatEmitter::SQXTN2(u8 dest_size, ARM64Reg Rd, ARM64Reg Rn) +{ + Emit2RegMisc(true, 0, dest_size >> 4, 0b10100, Rd, Rn); +} +void ARM64FloatEmitter::UQXTN(u8 dest_size, ARM64Reg Rd, ARM64Reg Rn) +{ + Emit2RegMisc(false, 1, dest_size >> 4, 0b10100, Rd, Rn); +} +void ARM64FloatEmitter::UQXTN2(u8 dest_size, ARM64Reg Rd, ARM64Reg Rn) +{ + Emit2RegMisc(true, 1, dest_size >> 4, 0b10100, Rd, Rn); +} +void ARM64FloatEmitter::XTN(u8 dest_size, ARM64Reg Rd, ARM64Reg Rn) +{ + Emit2RegMisc(false, 0, dest_size >> 4, 0b10010, Rd, Rn); +} +void ARM64FloatEmitter::XTN2(u8 dest_size, ARM64Reg Rd, ARM64Reg Rn) +{ + Emit2RegMisc(true, 0, dest_size >> 4, 0b10010, Rd, Rn); +} + +// Move +void ARM64FloatEmitter::DUP(u8 size, ARM64Reg Rd, ARM64Reg Rn) +{ + u32 imm5 = 0; + + if (size == 8) + imm5 = 1; + else if (size == 16) + imm5 = 2; + else if (size == 32) + imm5 = 4; + else if (size == 64) + imm5 = 8; + + EmitCopy(IsQuad(Rd), 0, imm5, 1, Rd, Rn); +} +void ARM64FloatEmitter::INS(u8 size, ARM64Reg Rd, u8 index, ARM64Reg Rn) +{ + u32 imm5 = 0; + + if (size == 8) + { + imm5 = 1; + imm5 |= index << 1; + } + else if (size == 16) + { + imm5 = 2; + imm5 |= index << 2; + } + else if (size == 32) + { + imm5 = 4; + imm5 |= index << 3; + } + else if (size == 64) + { + imm5 = 8; + imm5 |= index << 4; + } + + EmitCopy(1, 0, imm5, 3, Rd, Rn); +} +void ARM64FloatEmitter::INS(u8 size, ARM64Reg Rd, u8 index1, ARM64Reg Rn, u8 index2) +{ + u32 imm5 = 0, imm4 = 0; + + if (size == 8) + { + imm5 = 1; + imm5 |= index1 << 1; + imm4 = index2; + } + else if (size == 16) + { + imm5 = 2; + imm5 |= index1 << 2; + imm4 = index2 << 1; + } + else if (size == 32) + { + imm5 = 4; + imm5 |= index1 << 3; + imm4 = index2 << 2; + } + else if (size == 64) + { + imm5 = 8; + imm5 |= index1 << 4; + imm4 = index2 << 3; + } + + EmitCopy(1, 1, imm5, imm4, Rd, Rn); +} + +void ARM64FloatEmitter::UMOV(u8 size, ARM64Reg Rd, ARM64Reg Rn, u8 index) +{ + bool b64Bit = Is64Bit(Rd); + ASSERT_MSG(DYNA_REC, Rd < SP, "%s destination must be a GPR!", __func__); + ASSERT_MSG(DYNA_REC, !(b64Bit && size != 64), + "%s must have a size of 64 when destination is 64bit!", __func__); + u32 imm5 = 0; + + if (size == 8) + { + imm5 = 1; + imm5 |= index << 1; + } + else if (size == 16) + { + imm5 = 2; + imm5 |= index << 2; + } + else if (size == 32) + { + imm5 = 4; + imm5 |= index << 3; + } + else if (size == 64) + { + imm5 = 8; + imm5 |= index << 4; + } + + EmitCopy(b64Bit, 0, imm5, 7, Rd, Rn); +} +void ARM64FloatEmitter::SMOV(u8 size, ARM64Reg Rd, ARM64Reg Rn, u8 index) +{ + bool b64Bit = Is64Bit(Rd); + ASSERT_MSG(DYNA_REC, Rd < SP, "%s destination must be a GPR!", __func__); + ASSERT_MSG(DYNA_REC, size != 64, "%s doesn't support 64bit destination. Use UMOV!", __func__); + u32 imm5 = 0; + + if (size == 8) + { + imm5 = 1; + imm5 |= index << 1; + } + else if (size == 16) + { + imm5 = 2; + imm5 |= index << 2; + } + else if (size == 32) + { + imm5 = 4; + imm5 |= index << 3; + } + + EmitCopy(b64Bit, 0, imm5, 5, Rd, Rn); +} + +// One source +void ARM64FloatEmitter::FCVT(u8 size_to, u8 size_from, ARM64Reg Rd, ARM64Reg Rn) +{ + u32 dst_encoding = 0; + u32 src_encoding = 0; + + if (size_to == 16) + dst_encoding = 3; + else if (size_to == 32) + dst_encoding = 0; + else if (size_to == 64) + dst_encoding = 1; + + if (size_from == 16) + src_encoding = 3; + else if (size_from == 32) + src_encoding = 0; + else if (size_from == 64) + src_encoding = 1; + + Emit1Source(0, 0, src_encoding, 4 | dst_encoding, Rd, Rn); +} + +void ARM64FloatEmitter::SCVTF(ARM64Reg Rd, ARM64Reg Rn) +{ + if (IsScalar(Rn)) + { + // Source is in FP register (like destination!). We must use a vector encoding. + bool sign = false; + Rd = DecodeReg(Rd); + Rn = DecodeReg(Rn); + int sz = IsDouble(Rn); + Write32((0x5e << 24) | (sign << 29) | (sz << 22) | (0x876 << 10) | (Rn << 5) | Rd); + } + else + { + bool sf = Is64Bit(Rn); + u32 type = 0; + if (IsDouble(Rd)) + type = 1; + EmitConversion(sf, 0, type, 0, 2, Rd, Rn); + } +} + +void ARM64FloatEmitter::UCVTF(ARM64Reg Rd, ARM64Reg Rn) +{ + if (IsScalar(Rn)) + { + // Source is in FP register (like destination!). We must use a vector encoding. + bool sign = true; + Rd = DecodeReg(Rd); + Rn = DecodeReg(Rn); + int sz = IsDouble(Rn); + Write32((0x5e << 24) | (sign << 29) | (sz << 22) | (0x876 << 10) | (Rn << 5) | Rd); + } + else + { + bool sf = Is64Bit(Rn); + u32 type = 0; + if (IsDouble(Rd)) + type = 1; + + EmitConversion(sf, 0, type, 0, 3, Rd, Rn); + } +} + +void ARM64FloatEmitter::SCVTF(ARM64Reg Rd, ARM64Reg Rn, int scale) +{ + bool sf = Is64Bit(Rn); + u32 type = 0; + if (IsDouble(Rd)) + type = 1; + + EmitConversion2(sf, 0, false, type, 0, 2, 64 - scale, Rd, Rn); +} + +void ARM64FloatEmitter::UCVTF(ARM64Reg Rd, ARM64Reg Rn, int scale) +{ + bool sf = Is64Bit(Rn); + u32 type = 0; + if (IsDouble(Rd)) + type = 1; + + EmitConversion2(sf, 0, false, type, 0, 3, 64 - scale, Rd, Rn); +} + +void ARM64FloatEmitter::FCMP(ARM64Reg Rn, ARM64Reg Rm) +{ + EmitCompare(0, 0, 0, 0, Rn, Rm); +} +void ARM64FloatEmitter::FCMP(ARM64Reg Rn) +{ + EmitCompare(0, 0, 0, 8, Rn, (ARM64Reg)0); +} +void ARM64FloatEmitter::FCMPE(ARM64Reg Rn, ARM64Reg Rm) +{ + EmitCompare(0, 0, 0, 0x10, Rn, Rm); +} +void ARM64FloatEmitter::FCMPE(ARM64Reg Rn) +{ + EmitCompare(0, 0, 0, 0x18, Rn, (ARM64Reg)0); +} +void ARM64FloatEmitter::FCMEQ(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) +{ + EmitThreeSame(0, size >> 6, 0x1C, Rd, Rn, Rm); +} +void ARM64FloatEmitter::FCMEQ(u8 size, ARM64Reg Rd, ARM64Reg Rn) +{ + Emit2RegMisc(IsQuad(Rd), 0, 2 | (size >> 6), 0xD, Rd, Rn); +} +void ARM64FloatEmitter::FCMGE(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) +{ + EmitThreeSame(1, size >> 6, 0x1C, Rd, Rn, Rm); +} +void ARM64FloatEmitter::FCMGE(u8 size, ARM64Reg Rd, ARM64Reg Rn) +{ + Emit2RegMisc(IsQuad(Rd), 1, 2 | (size >> 6), 0x0C, Rd, Rn); +} +void ARM64FloatEmitter::FCMGT(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) +{ + EmitThreeSame(1, 2 | (size >> 6), 0x1C, Rd, Rn, Rm); +} +void ARM64FloatEmitter::FCMGT(u8 size, ARM64Reg Rd, ARM64Reg Rn) +{ + Emit2RegMisc(IsQuad(Rd), 0, 2 | (size >> 6), 0x0C, Rd, Rn); +} +void ARM64FloatEmitter::FCMLE(u8 size, ARM64Reg Rd, ARM64Reg Rn) +{ + Emit2RegMisc(IsQuad(Rd), 1, 2 | (size >> 6), 0xD, Rd, Rn); +} +void ARM64FloatEmitter::FCMLT(u8 size, ARM64Reg Rd, ARM64Reg Rn) +{ + Emit2RegMisc(IsQuad(Rd), 0, 2 | (size >> 6), 0xE, Rd, Rn); +} + +void ARM64FloatEmitter::FCSEL(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, CCFlags cond) +{ + EmitCondSelect(0, 0, cond, Rd, Rn, Rm); +} + +// Permute +void ARM64FloatEmitter::UZP1(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) +{ + EmitPermute(size, 0b001, Rd, Rn, Rm); +} +void ARM64FloatEmitter::TRN1(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) +{ + EmitPermute(size, 0b010, Rd, Rn, Rm); +} +void ARM64FloatEmitter::ZIP1(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) +{ + EmitPermute(size, 0b011, Rd, Rn, Rm); +} +void ARM64FloatEmitter::UZP2(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) +{ + EmitPermute(size, 0b101, Rd, Rn, Rm); +} +void ARM64FloatEmitter::TRN2(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) +{ + EmitPermute(size, 0b110, Rd, Rn, Rm); +} +void ARM64FloatEmitter::ZIP2(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) +{ + EmitPermute(size, 0b111, Rd, Rn, Rm); +} + +// Shift by immediate +void ARM64FloatEmitter::SSHLL(u8 src_size, ARM64Reg Rd, ARM64Reg Rn, u32 shift) +{ + SSHLL(src_size, Rd, Rn, shift, false); +} +void ARM64FloatEmitter::SSHLL2(u8 src_size, ARM64Reg Rd, ARM64Reg Rn, u32 shift) +{ + SSHLL(src_size, Rd, Rn, shift, true); +} +void ARM64FloatEmitter::SHRN(u8 dest_size, ARM64Reg Rd, ARM64Reg Rn, u32 shift) +{ + SHRN(dest_size, Rd, Rn, shift, false); +} +void ARM64FloatEmitter::SHRN2(u8 dest_size, ARM64Reg Rd, ARM64Reg Rn, u32 shift) +{ + SHRN(dest_size, Rd, Rn, shift, true); +} +void ARM64FloatEmitter::USHLL(u8 src_size, ARM64Reg Rd, ARM64Reg Rn, u32 shift) +{ + USHLL(src_size, Rd, Rn, shift, false); +} +void ARM64FloatEmitter::USHLL2(u8 src_size, ARM64Reg Rd, ARM64Reg Rn, u32 shift) +{ + USHLL(src_size, Rd, Rn, shift, true); +} +void ARM64FloatEmitter::SXTL(u8 src_size, ARM64Reg Rd, ARM64Reg Rn) +{ + SXTL(src_size, Rd, Rn, false); +} +void ARM64FloatEmitter::SXTL2(u8 src_size, ARM64Reg Rd, ARM64Reg Rn) +{ + SXTL(src_size, Rd, Rn, true); +} +void ARM64FloatEmitter::UXTL(u8 src_size, ARM64Reg Rd, ARM64Reg Rn) +{ + UXTL(src_size, Rd, Rn, false); +} +void ARM64FloatEmitter::UXTL2(u8 src_size, ARM64Reg Rd, ARM64Reg Rn) +{ + UXTL(src_size, Rd, Rn, true); +} + +void ARM64FloatEmitter::SSHLL(u8 src_size, ARM64Reg Rd, ARM64Reg Rn, u32 shift, bool upper) +{ + ASSERT_MSG(DYNA_REC, shift < src_size, "%s shift amount must less than the element size!", + __func__); + u32 immh = 0; + u32 immb = shift & 0xFFF; + + if (src_size == 8) + { + immh = 1; + } + else if (src_size == 16) + { + immh = 2 | ((shift >> 3) & 1); + } + else if (src_size == 32) + { + immh = 4 | ((shift >> 3) & 3); + ; + } + EmitShiftImm(upper, 0, immh, immb, 0b10100, Rd, Rn); +} + +void ARM64FloatEmitter::USHLL(u8 src_size, ARM64Reg Rd, ARM64Reg Rn, u32 shift, bool upper) +{ + ASSERT_MSG(DYNA_REC, shift < src_size, "%s shift amount must less than the element size!", + __func__); + u32 immh = 0; + u32 immb = shift & 0xFFF; + + if (src_size == 8) + { + immh = 1; + } + else if (src_size == 16) + { + immh = 2 | ((shift >> 3) & 1); + } + else if (src_size == 32) + { + immh = 4 | ((shift >> 3) & 3); + ; + } + EmitShiftImm(upper, 1, immh, immb, 0b10100, Rd, Rn); +} + +void ARM64FloatEmitter::SHRN(u8 dest_size, ARM64Reg Rd, ARM64Reg Rn, u32 shift, bool upper) +{ + ASSERT_MSG(DYNA_REC, shift < dest_size, "%s shift amount must less than the element size!", + __func__); + u32 immh = 0; + u32 immb = shift & 0xFFF; + + if (dest_size == 8) + { + immh = 1; + } + else if (dest_size == 16) + { + immh = 2 | ((shift >> 3) & 1); + } + else if (dest_size == 32) + { + immh = 4 | ((shift >> 3) & 3); + ; + } + EmitShiftImm(upper, 1, immh, immb, 0b10000, Rd, Rn); +} + +void ARM64FloatEmitter::SXTL(u8 src_size, ARM64Reg Rd, ARM64Reg Rn, bool upper) +{ + SSHLL(src_size, Rd, Rn, 0, upper); +} + +void ARM64FloatEmitter::UXTL(u8 src_size, ARM64Reg Rd, ARM64Reg Rn, bool upper) +{ + USHLL(src_size, Rd, Rn, 0, upper); +} + +// vector x indexed element +void ARM64FloatEmitter::FMUL(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, u8 index) +{ + ASSERT_MSG(DYNA_REC, size == 32 || size == 64, "%s only supports 32bit or 64bit size!", __func__); + + bool L = false; + bool H = false; + if (size == 32) + { + L = index & 1; + H = (index >> 1) & 1; + } + else if (size == 64) + { + H = index == 1; + } + + EmitVectorxElement(0, 2 | (size >> 6), L, 0x9, H, Rd, Rn, Rm); +} + +void ARM64FloatEmitter::FMLA(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, u8 index) +{ + ASSERT_MSG(DYNA_REC, size == 32 || size == 64, "%s only supports 32bit or 64bit size!", __func__); + + bool L = false; + bool H = false; + if (size == 32) + { + L = index & 1; + H = (index >> 1) & 1; + } + else if (size == 64) + { + H = index == 1; + } + + EmitVectorxElement(0, 2 | (size >> 6), L, 1, H, Rd, Rn, Rm); +} + +// Modified Immediate +void ARM64FloatEmitter::MOVI(u8 size, ARM64Reg Rd, u64 imm, u8 shift) +{ + bool Q = IsQuad(Rd); + u8 cmode = 0; + u8 op = 0; + u8 abcdefgh = imm & 0xFF; + if (size == 8) + { + ASSERT_MSG(DYNA_REC, shift == 0, "%s(size8) doesn't support shift!", __func__); + ASSERT_MSG(DYNA_REC, !(imm & ~0xFFULL), "%s(size8) only supports 8bit values!", __func__); + } + else if (size == 16) + { + ASSERT_MSG(DYNA_REC, shift == 0 || shift == 8, "%s(size16) only supports shift of {0, 8}!", + __func__); + ASSERT_MSG(DYNA_REC, !(imm & ~0xFFULL), "%s(size16) only supports 8bit values!", __func__); + + if (shift == 8) + cmode |= 2; + } + else if (size == 32) + { + ASSERT_MSG(DYNA_REC, shift == 0 || shift == 8 || shift == 16 || shift == 24, + "%s(size32) only supports shift of {0, 8, 16, 24}!", __func__); + // XXX: Implement support for MOVI - shifting ones variant + ASSERT_MSG(DYNA_REC, !(imm & ~0xFFULL), "%s(size32) only supports 8bit values!", __func__); + switch (shift) + { + case 8: + cmode |= 2; + break; + case 16: + cmode |= 4; + break; + case 24: + cmode |= 6; + break; + default: + break; + } + } + else // 64 + { + ASSERT_MSG(DYNA_REC, shift == 0, "%s(size64) doesn't support shift!", __func__); + + op = 1; + cmode = 0xE; + abcdefgh = 0; + for (int i = 0; i < 8; ++i) + { + u8 tmp = (imm >> (i << 3)) & 0xFF; + ASSERT_MSG(DYNA_REC, tmp == 0xFF || tmp == 0, "%s(size64) Invalid immediate!", __func__); + if (tmp == 0xFF) + abcdefgh |= (1 << i); + } + } + EncodeModImm(Q, op, cmode, 0, Rd, abcdefgh); +} + +void ARM64FloatEmitter::BIC(u8 size, ARM64Reg Rd, u8 imm, u8 shift) +{ + bool Q = IsQuad(Rd); + u8 cmode = 1; + u8 op = 1; + if (size == 16) + { + ASSERT_MSG(DYNA_REC, shift == 0 || shift == 8, "%s(size16) only supports shift of {0, 8}!", + __func__); + + if (shift == 8) + cmode |= 2; + } + else if (size == 32) + { + ASSERT_MSG(DYNA_REC, shift == 0 || shift == 8 || shift == 16 || shift == 24, + "%s(size32) only supports shift of {0, 8, 16, 24}!", __func__); + // XXX: Implement support for MOVI - shifting ones variant + switch (shift) + { + case 8: + cmode |= 2; + break; + case 16: + cmode |= 4; + break; + case 24: + cmode |= 6; + break; + default: + break; + } + } + else + { + ASSERT_MSG(DYNA_REC, false, "%s only supports size of {16, 32}!", __func__); + } + EncodeModImm(Q, op, cmode, 0, Rd, imm); +} + +void ARM64FloatEmitter::ABI_PushRegisters(BitSet32 registers, ARM64Reg tmp) +{ + bool bundled_loadstore = false; + + for (int i = 0; i < 32; ++i) + { + if (!registers[i]) + continue; + + int count = 0; + while (++count < 4 && (i + count) < 32 && registers[i + count]) + { + } + if (count > 1) + { + bundled_loadstore = true; + break; + } + } + + if (bundled_loadstore && tmp != INVALID_REG) + { + int num_regs = registers.Count(); + m_emit->SUB(SP, SP, num_regs * 16); + m_emit->ADD(tmp, SP, 0); + std::vector island_regs; + for (int i = 0; i < 32; ++i) + { + if (!registers[i]) + continue; + + int count = 0; + + // 0 = true + // 1 < 4 && registers[i + 1] true! + // 2 < 4 && registers[i + 2] true! + // 3 < 4 && registers[i + 3] true! + // 4 < 4 && registers[i + 4] false! + while (++count < 4 && (i + count) < 32 && registers[i + count]) + { + } + + if (count == 1) + island_regs.push_back((ARM64Reg)(Q0 + i)); + else + ST1(64, count, INDEX_POST, (ARM64Reg)(Q0 + i), tmp); + + i += count - 1; + } + + // Handle island registers + std::vector pair_regs; + for (auto& it : island_regs) + { + pair_regs.push_back(it); + if (pair_regs.size() == 2) + { + STP(128, INDEX_POST, pair_regs[0], pair_regs[1], tmp, 32); + pair_regs.clear(); + } + } + if (pair_regs.size()) + STR(128, INDEX_POST, pair_regs[0], tmp, 16); + } + else + { + std::vector pair_regs; + for (auto it : registers) + { + pair_regs.push_back((ARM64Reg)(Q0 + it)); + if (pair_regs.size() == 2) + { + STP(128, INDEX_PRE, pair_regs[0], pair_regs[1], SP, -32); + pair_regs.clear(); + } + } + if (pair_regs.size()) + STR(128, INDEX_PRE, pair_regs[0], SP, -16); + } +} +void ARM64FloatEmitter::ABI_PopRegisters(BitSet32 registers, ARM64Reg tmp) +{ + bool bundled_loadstore = false; + int num_regs = registers.Count(); + + for (int i = 0; i < 32; ++i) + { + if (!registers[i]) + continue; + + int count = 0; + while (++count < 4 && (i + count) < 32 && registers[i + count]) + { + } + if (count > 1) + { + bundled_loadstore = true; + break; + } + } + + if (bundled_loadstore && tmp != INVALID_REG) + { + // The temporary register is only used to indicate that we can use this code path + std::vector island_regs; + for (int i = 0; i < 32; ++i) + { + if (!registers[i]) + continue; + + int count = 0; + while (++count < 4 && (i + count) < 32 && registers[i + count]) + { + } + + if (count == 1) + island_regs.push_back((ARM64Reg)(Q0 + i)); + else + LD1(64, count, INDEX_POST, (ARM64Reg)(Q0 + i), SP); + + i += count - 1; + } + + // Handle island registers + std::vector pair_regs; + for (auto& it : island_regs) + { + pair_regs.push_back(it); + if (pair_regs.size() == 2) + { + LDP(128, INDEX_POST, pair_regs[0], pair_regs[1], SP, 32); + pair_regs.clear(); + } + } + if (pair_regs.size()) + LDR(128, INDEX_POST, pair_regs[0], SP, 16); + } + else + { + bool odd = num_regs % 2; + std::vector pair_regs; + for (int i = 31; i >= 0; --i) + { + if (!registers[i]) + continue; + + if (odd) + { + // First load must be a regular LDR if odd + odd = false; + LDR(128, INDEX_POST, (ARM64Reg)(Q0 + i), SP, 16); + } + else + { + pair_regs.push_back((ARM64Reg)(Q0 + i)); + if (pair_regs.size() == 2) + { + LDP(128, INDEX_POST, pair_regs[1], pair_regs[0], SP, 32); + pair_regs.clear(); + } + } + } + } +} + +void ARM64XEmitter::ANDI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch) +{ + unsigned int n, imm_s, imm_r; + if (!Is64Bit(Rn)) + imm &= 0xFFFFFFFF; + if (IsImmLogical(imm, Is64Bit(Rn) ? 64 : 32, &n, &imm_s, &imm_r)) + { + AND(Rd, Rn, imm_r, imm_s, n != 0); + } + else + { + ASSERT_MSG(DYNA_REC, scratch != INVALID_REG, + "ANDI2R - failed to construct logical immediate value from %08x, need scratch", + (u32)imm); + MOVI2R(scratch, imm); + AND(Rd, Rn, scratch); + } +} + +void ARM64XEmitter::ORRI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch) +{ + unsigned int n, imm_s, imm_r; + if (IsImmLogical(imm, Is64Bit(Rn) ? 64 : 32, &n, &imm_s, &imm_r)) + { + ORR(Rd, Rn, imm_r, imm_s, n != 0); + } + else + { + ASSERT_MSG(DYNA_REC, scratch != INVALID_REG, + "ORRI2R - failed to construct logical immediate value from %08x, need scratch", + (u32)imm); + MOVI2R(scratch, imm); + ORR(Rd, Rn, scratch); + } +} + +void ARM64XEmitter::EORI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch) +{ + unsigned int n, imm_s, imm_r; + if (IsImmLogical(imm, Is64Bit(Rn) ? 64 : 32, &n, &imm_s, &imm_r)) + { + EOR(Rd, Rn, imm_r, imm_s, n != 0); + } + else + { + ASSERT_MSG(DYNA_REC, scratch != INVALID_REG, + "EORI2R - failed to construct logical immediate value from %08x, need scratch", + (u32)imm); + MOVI2R(scratch, imm); + EOR(Rd, Rn, scratch); + } +} + +void ARM64XEmitter::ANDSI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch) +{ + unsigned int n, imm_s, imm_r; + if (IsImmLogical(imm, Is64Bit(Rn) ? 64 : 32, &n, &imm_s, &imm_r)) + { + ANDS(Rd, Rn, imm_r, imm_s, n != 0); + } + else + { + ASSERT_MSG(DYNA_REC, scratch != INVALID_REG, + "ANDSI2R - failed to construct logical immediate value from %08x, need scratch", + (u32)imm); + MOVI2R(scratch, imm); + ANDS(Rd, Rn, scratch); + } +} + +void ARM64XEmitter::AddImmediate(ARM64Reg Rd, ARM64Reg Rn, u64 imm, bool shift, bool negative, + bool flags) +{ + switch ((negative << 1) | flags) + { + case 0: + ADD(Rd, Rn, imm, shift); + break; + case 1: + ADDS(Rd, Rn, imm, shift); + break; + case 2: + SUB(Rd, Rn, imm, shift); + break; + case 3: + SUBS(Rd, Rn, imm, shift); + break; + } +} + +void ARM64XEmitter::ADDI2R_internal(ARM64Reg Rd, ARM64Reg Rn, u64 imm, bool negative, bool flags, + ARM64Reg scratch) +{ + bool has_scratch = scratch != INVALID_REG; + u64 imm_neg = Is64Bit(Rd) ? -imm : -imm & 0xFFFFFFFFuLL; + bool neg_neg = negative ? false : true; + + // Fast paths, aarch64 immediate instructions + // Try them all first + if (imm <= 0xFFF) + { + AddImmediate(Rd, Rn, imm, false, negative, flags); + return; + } + if (imm <= 0xFFFFFF && (imm & 0xFFF) == 0) + { + AddImmediate(Rd, Rn, imm >> 12, true, negative, flags); + return; + } + if (imm_neg <= 0xFFF) + { + AddImmediate(Rd, Rn, imm_neg, false, neg_neg, flags); + return; + } + if (imm_neg <= 0xFFFFFF && (imm_neg & 0xFFF) == 0) + { + AddImmediate(Rd, Rn, imm_neg >> 12, true, neg_neg, flags); + return; + } + + // ADD+ADD is slower than MOVK+ADD, but inplace. + // But it supports a few more bits, so use it to avoid MOVK+MOVK+ADD. + // As this splits the addition in two parts, this must not be done on setting flags. + if (!flags && (imm >= 0x10000u || !has_scratch) && imm < 0x1000000u) + { + AddImmediate(Rd, Rn, imm & 0xFFF, false, negative, false); + AddImmediate(Rd, Rd, imm >> 12, true, negative, false); + return; + } + if (!flags && (imm_neg >= 0x10000u || !has_scratch) && imm_neg < 0x1000000u) + { + AddImmediate(Rd, Rn, imm_neg & 0xFFF, false, neg_neg, false); + AddImmediate(Rd, Rd, imm_neg >> 12, true, neg_neg, false); + return; + } + + ASSERT_MSG(DYNA_REC, has_scratch, + "ADDI2R - failed to construct arithmetic immediate value from %08x, need scratch", + (u32)imm); + + negative ^= MOVI2R2(scratch, imm, imm_neg); + switch ((negative << 1) | flags) + { + case 0: + ADD(Rd, Rn, scratch); + break; + case 1: + ADDS(Rd, Rn, scratch); + break; + case 2: + SUB(Rd, Rn, scratch); + break; + case 3: + SUBS(Rd, Rn, scratch); + break; + } +} + +void ARM64XEmitter::ADDI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch) +{ + ADDI2R_internal(Rd, Rn, imm, false, false, scratch); +} + +void ARM64XEmitter::ADDSI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch) +{ + ADDI2R_internal(Rd, Rn, imm, false, true, scratch); +} + +void ARM64XEmitter::SUBI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch) +{ + ADDI2R_internal(Rd, Rn, imm, true, false, scratch); +} + +void ARM64XEmitter::SUBSI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch) +{ + ADDI2R_internal(Rd, Rn, imm, true, true, scratch); +} + +void ARM64XEmitter::CMPI2R(ARM64Reg Rn, u64 imm, ARM64Reg scratch) +{ + ADDI2R_internal(Is64Bit(Rn) ? ZR : WZR, Rn, imm, true, true, scratch); +} + +bool ARM64XEmitter::TryADDI2R(ARM64Reg Rd, ARM64Reg Rn, u32 imm) +{ + u32 val; + bool shift; + if (IsImmArithmetic(imm, &val, &shift)) + ADD(Rd, Rn, val, shift); + else + return false; + + return true; +} + +bool ARM64XEmitter::TrySUBI2R(ARM64Reg Rd, ARM64Reg Rn, u32 imm) +{ + u32 val; + bool shift; + if (IsImmArithmetic(imm, &val, &shift)) + SUB(Rd, Rn, val, shift); + else + return false; + + return true; +} + +bool ARM64XEmitter::TryCMPI2R(ARM64Reg Rn, u32 imm) +{ + u32 val; + bool shift; + if (IsImmArithmetic(imm, &val, &shift)) + CMP(Rn, val, shift); + else + return false; + + return true; +} + +bool ARM64XEmitter::TryANDI2R(ARM64Reg Rd, ARM64Reg Rn, u32 imm) +{ + u32 n, imm_r, imm_s; + if (IsImmLogical(imm, 32, &n, &imm_s, &imm_r)) + AND(Rd, Rn, imm_r, imm_s, n != 0); + else + return false; + + return true; +} +bool ARM64XEmitter::TryORRI2R(ARM64Reg Rd, ARM64Reg Rn, u32 imm) +{ + u32 n, imm_r, imm_s; + if (IsImmLogical(imm, 32, &n, &imm_s, &imm_r)) + ORR(Rd, Rn, imm_r, imm_s, n != 0); + else + return false; + + return true; +} +bool ARM64XEmitter::TryEORI2R(ARM64Reg Rd, ARM64Reg Rn, u32 imm) +{ + u32 n, imm_r, imm_s; + if (IsImmLogical(imm, 32, &n, &imm_s, &imm_r)) + EOR(Rd, Rn, imm_r, imm_s, n != 0); + else + return false; + + return true; +} + +void ARM64FloatEmitter::MOVI2F(ARM64Reg Rd, float value, ARM64Reg scratch, bool negate) +{ + ASSERT_MSG(DYNA_REC, !IsDouble(Rd), "MOVI2F does not yet support double precision"); + uint8_t imm8; + if (value == 0.0) + { + FMOV(Rd, IsDouble(Rd) ? ZR : WZR); + if (negate) + FNEG(Rd, Rd); + // TODO: There are some other values we could generate with the float-imm instruction, like + // 1.0... + } + else if (FPImm8FromFloat(value, &imm8)) + { + FMOV(Rd, imm8); + } + else + { + ASSERT_MSG(DYNA_REC, scratch != INVALID_REG, + "Failed to find a way to generate FP immediate %f without scratch", value); + if (negate) + value = -value; + + const u32 ival = Common::BitCast(value); + m_emit->MOVI2R(scratch, ival); + FMOV(Rd, scratch); + } +} + +// TODO: Quite a few values could be generated easily using the MOVI instruction and friends. +void ARM64FloatEmitter::MOVI2FDUP(ARM64Reg Rd, float value, ARM64Reg scratch) +{ + // TODO: Make it work with more element sizes + // TODO: Optimize - there are shorter solution for many values + ARM64Reg s = (ARM64Reg)(S0 + DecodeReg(Rd)); + MOVI2F(s, value, scratch); + DUP(32, Rd, Rd, 0); +} + +} // namespace Arm64Gen diff --git a/src/dolphin/Arm64Emitter.h b/src/dolphin/Arm64Emitter.h new file mode 100644 index 0000000..4cb9ff7 --- /dev/null +++ b/src/dolphin/Arm64Emitter.h @@ -0,0 +1,1152 @@ +// Copyright 2015 Dolphin Emulator Project +// Licensed under GPLv2+ +// Refer to the license.txt file included. + +#pragma once + +#include +#include + +#include "ArmCommon.h" +#include "Assert.h" +#include "BitSet.h" +#include "Compat.h" + +namespace Arm64Gen +{ +// X30 serves a dual purpose as a link register +// Encoded as +// Types: +// 000 - 32bit GPR +// 001 - 64bit GPR +// 010 - VFP single precision +// 100 - VFP double precision +// 110 - VFP quad precision +enum ARM64Reg +{ + // 32bit registers + W0 = 0, + W1, + W2, + W3, + W4, + W5, + W6, + W7, + W8, + W9, + W10, + W11, + W12, + W13, + W14, + W15, + W16, + W17, + W18, + W19, + W20, + W21, + W22, + W23, + W24, + W25, + W26, + W27, + W28, + W29, + W30, + + WSP, // 32bit stack pointer + + // 64bit registers + X0 = 0x20, + X1, + X2, + X3, + X4, + X5, + X6, + X7, + X8, + X9, + X10, + X11, + X12, + X13, + X14, + X15, + X16, + X17, + X18, + X19, + X20, + X21, + X22, + X23, + X24, + X25, + X26, + X27, + X28, + X29, + X30, + + SP, // 64bit stack pointer + + // VFP single precision registers + S0 = 0x40, + S1, + S2, + S3, + S4, + S5, + S6, + S7, + S8, + S9, + S10, + S11, + S12, + S13, + S14, + S15, + S16, + S17, + S18, + S19, + S20, + S21, + S22, + S23, + S24, + S25, + S26, + S27, + S28, + S29, + S30, + S31, + + // VFP Double Precision registers + D0 = 0x80, + D1, + D2, + D3, + D4, + D5, + D6, + D7, + D8, + D9, + D10, + D11, + D12, + D13, + D14, + D15, + D16, + D17, + D18, + D19, + D20, + D21, + D22, + D23, + D24, + D25, + D26, + D27, + D28, + D29, + D30, + D31, + + // ASIMD Quad-Word registers + Q0 = 0xC0, + Q1, + Q2, + Q3, + Q4, + Q5, + Q6, + Q7, + Q8, + Q9, + Q10, + Q11, + Q12, + Q13, + Q14, + Q15, + Q16, + Q17, + Q18, + Q19, + Q20, + Q21, + Q22, + Q23, + Q24, + Q25, + Q26, + Q27, + Q28, + Q29, + Q30, + Q31, + + // For PRFM(prefetch memory) encoding + // This is encoded in the Rt register + // Data preload + PLDL1KEEP = 0, + PLDL1STRM, + PLDL2KEEP, + PLDL2STRM, + PLDL3KEEP, + PLDL3STRM, + // Instruction preload + PLIL1KEEP = 8, + PLIL1STRM, + PLIL2KEEP, + PLIL2STRM, + PLIL3KEEP, + PLIL3STRM, + // Prepare for store + PLTL1KEEP = 16, + PLTL1STRM, + PLTL2KEEP, + PLTL2STRM, + PLTL3KEEP, + PLTL3STRM, + + WZR = WSP, + ZR = SP, + + INVALID_REG = 0xFFFFFFFF +}; + +constexpr bool Is64Bit(ARM64Reg reg) +{ + return (reg & 0x20) != 0; +} +constexpr bool IsSingle(ARM64Reg reg) +{ + return (reg & 0xC0) == 0x40; +} +constexpr bool IsDouble(ARM64Reg reg) +{ + return (reg & 0xC0) == 0x80; +} +constexpr bool IsScalar(ARM64Reg reg) +{ + return IsSingle(reg) || IsDouble(reg); +} +constexpr bool IsQuad(ARM64Reg reg) +{ + return (reg & 0xC0) == 0xC0; +} +constexpr bool IsVector(ARM64Reg reg) +{ + return (reg & 0xC0) != 0; +} +constexpr bool IsGPR(ARM64Reg reg) +{ + return static_cast(reg) < 0x40; +} + +constexpr ARM64Reg DecodeReg(ARM64Reg reg) +{ + return static_cast(reg & 0x1F); +} +constexpr ARM64Reg EncodeRegTo64(ARM64Reg reg) +{ + return static_cast(reg | 0x20); +} +constexpr ARM64Reg EncodeRegToSingle(ARM64Reg reg) +{ + return static_cast(DecodeReg(reg) + S0); +} +constexpr ARM64Reg EncodeRegToDouble(ARM64Reg reg) +{ + return static_cast((reg & ~0xC0) | 0x80); +} +constexpr ARM64Reg EncodeRegToQuad(ARM64Reg reg) +{ + return static_cast(reg | 0xC0); +} + +enum OpType +{ + TYPE_IMM = 0, + TYPE_REG, + TYPE_IMMSREG, + TYPE_RSR, + TYPE_MEM +}; + +enum ShiftType +{ + ST_LSL = 0, + ST_LSR = 1, + ST_ASR = 2, + ST_ROR = 3, +}; + +enum IndexType +{ + INDEX_UNSIGNED, + INDEX_POST, + INDEX_PRE, + INDEX_SIGNED, // used in LDP/STP +}; + +enum ShiftAmount +{ + SHIFT_0 = 0, + SHIFT_16 = 1, + SHIFT_32 = 2, + SHIFT_48 = 3, +}; + +enum RoundingMode +{ + ROUND_A, // round to nearest, ties to away + ROUND_M, // round towards -inf + ROUND_N, // round to nearest, ties to even + ROUND_P, // round towards +inf + ROUND_Z, // round towards zero +}; + +struct FixupBranch +{ + ptrdiff_t ptr; + // Type defines + // 0 = CBZ (32bit) + // 1 = CBNZ (32bit) + // 2 = B (conditional) + // 3 = TBZ + // 4 = TBNZ + // 5 = B (unconditional) + // 6 = BL (unconditional) + u32 type; + + // Used with B.cond + CCFlags cond; + + // Used with TBZ/TBNZ + u8 bit; + + // Used with Test/Compare and Branch + ARM64Reg reg; +}; + +enum PStateField +{ + FIELD_SPSel = 0, + FIELD_DAIFSet, + FIELD_DAIFClr, + FIELD_NZCV, // The only system registers accessible from EL0 (user space) + FIELD_PMCR_EL0, + FIELD_PMCCNTR_EL0, + FIELD_FPCR = 0x340, + FIELD_FPSR = 0x341, +}; + +enum SystemHint +{ + HINT_NOP = 0, + HINT_YIELD, + HINT_WFE, + HINT_WFI, + HINT_SEV, + HINT_SEVL, +}; + +enum BarrierType +{ + OSHLD = 1, + OSHST = 2, + OSH = 3, + NSHLD = 5, + NSHST = 6, + NSH = 7, + ISHLD = 9, + ISHST = 10, + ISH = 11, + LD = 13, + ST = 14, + SY = 15, +}; + +class ArithOption +{ +public: + enum WidthSpecifier + { + WIDTH_DEFAULT, + WIDTH_32BIT, + WIDTH_64BIT, + }; + + enum ExtendSpecifier + { + EXTEND_UXTB = 0x0, + EXTEND_UXTH = 0x1, + EXTEND_UXTW = 0x2, /* Also LSL on 32bit width */ + EXTEND_UXTX = 0x3, /* Also LSL on 64bit width */ + EXTEND_SXTB = 0x4, + EXTEND_SXTH = 0x5, + EXTEND_SXTW = 0x6, + EXTEND_SXTX = 0x7, + }; + + enum TypeSpecifier + { + TYPE_EXTENDEDREG, + TYPE_IMM, + TYPE_SHIFTEDREG, + }; + +private: + ARM64Reg m_destReg; + WidthSpecifier m_width; + ExtendSpecifier m_extend; + TypeSpecifier m_type; + ShiftType m_shifttype; + u32 m_shift; + +public: + ArithOption(ARM64Reg Rd, bool index = false) + { + // Indexed registers are a certain feature of AARch64 + // On Loadstore instructions that use a register offset + // We can have the register as an index + // If we are indexing then the offset register will + // be shifted to the left so we are indexing at intervals + // of the size of what we are loading + // 8-bit: Index does nothing + // 16-bit: Index LSL 1 + // 32-bit: Index LSL 2 + // 64-bit: Index LSL 3 + if (index) + m_shift = 4; + else + m_shift = 0; + + m_destReg = Rd; + m_type = TYPE_EXTENDEDREG; + if (Is64Bit(Rd)) + { + m_width = WIDTH_64BIT; + m_extend = EXTEND_UXTX; + } + else + { + m_width = WIDTH_32BIT; + m_extend = EXTEND_UXTW; + } + m_shifttype = ST_LSL; + } + ArithOption(ARM64Reg Rd, ShiftType shift_type, u32 shift) + { + m_destReg = Rd; + m_shift = shift; + m_shifttype = shift_type; + m_type = TYPE_SHIFTEDREG; + if (Is64Bit(Rd)) + { + m_width = WIDTH_64BIT; + if (shift == 64) + m_shift = 0; + } + else + { + m_width = WIDTH_32BIT; + if (shift == 32) + m_shift = 0; + } + } + TypeSpecifier GetType() const { return m_type; } + ARM64Reg GetReg() const { return m_destReg; } + u32 GetData() const + { + switch (m_type) + { + case TYPE_EXTENDEDREG: + return (m_extend << 13) | (m_shift << 10); + break; + case TYPE_SHIFTEDREG: + return (m_shifttype << 22) | (m_shift << 10); + break; + default: + DEBUG_ASSERT_MSG(DYNA_REC, false, "Invalid type in GetData"); + break; + } + return 0; + } +}; + +class ARM64XEmitter +{ + friend class ARM64FloatEmitter; + +private: + ptrdiff_t m_code; + ptrdiff_t m_lastCacheFlushEnd; + u8* m_rwbase; + u8* m_rxbase; + + void AddImmediate(ARM64Reg Rd, ARM64Reg Rn, u64 imm, bool shift, bool negative, bool flags); + void EncodeCompareBranchInst(u32 op, ARM64Reg Rt, const void* ptr); + void EncodeTestBranchInst(u32 op, ARM64Reg Rt, u8 bits, const void* ptr); + void EncodeUnconditionalBranchInst(u32 op, const void* ptr); + void EncodeUnconditionalBranchInst(u32 opc, u32 op2, u32 op3, u32 op4, ARM64Reg Rn); + void EncodeExceptionInst(u32 instenc, u32 imm); + void EncodeSystemInst(u32 op0, u32 op1, u32 CRn, u32 CRm, u32 op2, ARM64Reg Rt); + void EncodeArithmeticInst(u32 instenc, bool flags, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, + ArithOption Option); + void EncodeArithmeticCarryInst(u32 op, bool flags, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm); + void EncodeCondCompareImmInst(u32 op, ARM64Reg Rn, u32 imm, u32 nzcv, CCFlags cond); + void EncodeCondCompareRegInst(u32 op, ARM64Reg Rn, ARM64Reg Rm, u32 nzcv, CCFlags cond); + void EncodeCondSelectInst(u32 instenc, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, CCFlags cond); + void EncodeData1SrcInst(u32 instenc, ARM64Reg Rd, ARM64Reg Rn); + void EncodeData2SrcInst(u32 instenc, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm); + void EncodeData3SrcInst(u32 instenc, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ARM64Reg Ra); + void EncodeLogicalInst(u32 instenc, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ArithOption Shift); + void EncodeLoadRegisterInst(u32 bitop, ARM64Reg Rt, u32 imm); + void EncodeLoadStoreExcInst(u32 instenc, ARM64Reg Rs, ARM64Reg Rt2, ARM64Reg Rn, ARM64Reg Rt); + void EncodeLoadStorePairedInst(u32 op, ARM64Reg Rt, ARM64Reg Rt2, ARM64Reg Rn, u32 imm); + void EncodeLoadStoreIndexedInst(u32 op, u32 op2, ARM64Reg Rt, ARM64Reg Rn, s32 imm); + void EncodeLoadStoreIndexedInst(u32 op, ARM64Reg Rt, ARM64Reg Rn, s32 imm, u8 size); + void EncodeMOVWideInst(u32 op, ARM64Reg Rd, u32 imm, ShiftAmount pos); + void EncodeBitfieldMOVInst(u32 op, ARM64Reg Rd, ARM64Reg Rn, u32 immr, u32 imms); + void EncodeLoadStoreRegisterOffset(u32 size, u32 opc, ARM64Reg Rt, ARM64Reg Rn, ArithOption Rm); + void EncodeAddSubImmInst(u32 op, bool flags, u32 shift, u32 imm, ARM64Reg Rn, ARM64Reg Rd); + void EncodeLogicalImmInst(u32 op, ARM64Reg Rd, ARM64Reg Rn, u32 immr, u32 imms, int n); + void EncodeLoadStorePair(u32 op, u32 load, IndexType type, ARM64Reg Rt, ARM64Reg Rt2, ARM64Reg Rn, + s32 imm); + void EncodeAddressInst(u32 op, ARM64Reg Rd, s32 imm); + void EncodeLoadStoreUnscaled(u32 size, u32 op, ARM64Reg Rt, ARM64Reg Rn, s32 imm); + +protected: + // TODO: make this less ugly + // used for Switch where memory is executable and writeable and different addresses + // we need to take this for relative addressing in account + + void Write32(u32 value); + +public: + ARM64XEmitter() : m_code(0), m_lastCacheFlushEnd(0), m_rwbase(nullptr), m_rxbase(nullptr) {} + ARM64XEmitter(u8* rwbase, u8* rxbase, ptrdiff_t offset) + { + m_rwbase = rwbase; + m_rxbase = rxbase; + m_code = offset; + m_lastCacheFlushEnd = offset; + } + + virtual ~ARM64XEmitter() {} + void SetCodePtr(ptrdiff_t ptr); + void SetCodePtrUnsafe(ptrdiff_t ptr); + void SetCodeBase(u8* rwbase, u8* rxbase); + void ReserveCodeSpace(u32 bytes); + ptrdiff_t AlignCode16(); + ptrdiff_t AlignCodePage(); + ptrdiff_t GetCodeOffset(); + const u8* GetRWPtr(); + u8* GetWriteableRWPtr(); + void* GetRXPtr(); + void FlushIcache(); + void FlushIcacheSection(u8* start, u8* end); + + // FixupBranch branching + void SetJumpTarget(FixupBranch const& branch); + FixupBranch CBZ(ARM64Reg Rt); + FixupBranch CBNZ(ARM64Reg Rt); + FixupBranch B(CCFlags cond); + FixupBranch TBZ(ARM64Reg Rt, u8 bit); + FixupBranch TBNZ(ARM64Reg Rt, u8 bit); + FixupBranch B(); + FixupBranch BL(); + + // Compare and Branch + void CBZ(ARM64Reg Rt, const void* ptr); + void CBNZ(ARM64Reg Rt, const void* ptr); + + // Conditional Branch + void B(CCFlags cond, const void* ptr); + + // Test and Branch + void TBZ(ARM64Reg Rt, u8 bits, const void* ptr); + void TBNZ(ARM64Reg Rt, u8 bits, const void* ptr); + + // Unconditional Branch + void B(const void* ptr); + void BL(const void* ptr); + + // Unconditional Branch (register) + void BR(ARM64Reg Rn); + void BLR(ARM64Reg Rn); + void RET(ARM64Reg Rn = X30); + void ERET(); + void DRPS(); + + // Exception generation + void SVC(u32 imm); + void HVC(u32 imm); + void SMC(u32 imm); + void BRK(u32 imm); + void HLT(u32 imm); + void DCPS1(u32 imm); + void DCPS2(u32 imm); + void DCPS3(u32 imm); + + // System + void _MSR(PStateField field, u8 imm); + void _MSR(PStateField field, ARM64Reg Rt); + void MRS(ARM64Reg Rt, PStateField field); + void CNTVCT(ARM64Reg Rt); + + void HINT(SystemHint op); + void CLREX(); + void DSB(BarrierType type); + void DMB(BarrierType type); + void ISB(BarrierType type); + + // Add/Subtract (Extended/Shifted register) + void ADD(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm); + void ADD(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ArithOption Option); + void ADDS(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm); + void ADDS(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ArithOption Option); + void SUB(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm); + void SUB(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ArithOption Option); + void SUBS(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm); + void SUBS(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ArithOption Option); + void CMN(ARM64Reg Rn, ARM64Reg Rm); + void CMN(ARM64Reg Rn, ARM64Reg Rm, ArithOption Option); + void CMP(ARM64Reg Rn, ARM64Reg Rm); + void CMP(ARM64Reg Rn, ARM64Reg Rm, ArithOption Option); + + // Add/Subtract (with carry) + void ADC(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm); + void ADCS(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm); + void SBC(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm); + void SBCS(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm); + + // Conditional Compare (immediate) + void CCMN(ARM64Reg Rn, u32 imm, u32 nzcv, CCFlags cond); + void CCMP(ARM64Reg Rn, u32 imm, u32 nzcv, CCFlags cond); + + // Conditional Compare (register) + void CCMN(ARM64Reg Rn, ARM64Reg Rm, u32 nzcv, CCFlags cond); + void CCMP(ARM64Reg Rn, ARM64Reg Rm, u32 nzcv, CCFlags cond); + + // Conditional Select + void CSEL(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, CCFlags cond); + void CSINC(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, CCFlags cond); + void CSINV(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, CCFlags cond); + void CSNEG(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, CCFlags cond); + + // Aliases + void CSET(ARM64Reg Rd, CCFlags cond) + { + ARM64Reg zr = Is64Bit(Rd) ? ZR : WZR; + CSINC(Rd, zr, zr, (CCFlags)((u32)cond ^ 1)); + } + void CSETM(ARM64Reg Rd, CCFlags cond) + { + ARM64Reg zr = Is64Bit(Rd) ? ZR : WZR; + CSINV(Rd, zr, zr, (CCFlags)((u32)cond ^ 1)); + } + void NEG(ARM64Reg Rd, ARM64Reg Rs) { SUB(Rd, Is64Bit(Rd) ? ZR : WZR, Rs); } + // Data-Processing 1 source + void RBIT(ARM64Reg Rd, ARM64Reg Rn); + void REV16(ARM64Reg Rd, ARM64Reg Rn); + void REV32(ARM64Reg Rd, ARM64Reg Rn); + void REV64(ARM64Reg Rd, ARM64Reg Rn); + void CLZ(ARM64Reg Rd, ARM64Reg Rn); + void CLS(ARM64Reg Rd, ARM64Reg Rn); + + // Data-Processing 2 source + void UDIV(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm); + void SDIV(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm); + void LSLV(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm); + void LSRV(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm); + void ASRV(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm); + void RORV(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm); + void CRC32B(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm); + void CRC32H(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm); + void CRC32W(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm); + void CRC32CB(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm); + void CRC32CH(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm); + void CRC32CW(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm); + void CRC32X(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm); + void CRC32CX(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm); + + // Data-Processing 3 source + void MADD(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ARM64Reg Ra); + void MSUB(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ARM64Reg Ra); + void SMADDL(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ARM64Reg Ra); + void SMULL(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm); + void SMSUBL(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ARM64Reg Ra); + void SMULH(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm); + void UMADDL(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ARM64Reg Ra); + void UMULL(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm); + void UMSUBL(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ARM64Reg Ra); + void UMULH(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm); + void MUL(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm); + void MNEG(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm); + + // Logical (shifted register) + void AND(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ArithOption Shift); + void BIC(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ArithOption Shift); + void ORR(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ArithOption Shift); + void ORN(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ArithOption Shift); + void EOR(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ArithOption Shift); + void EON(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ArithOption Shift); + void ANDS(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ArithOption Shift); + void BICS(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ArithOption Shift); + + // Wrap the above for saner syntax + void AND(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) { AND(Rd, Rn, Rm, ArithOption(Rd, ST_LSL, 0)); } + void BIC(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) { BIC(Rd, Rn, Rm, ArithOption(Rd, ST_LSL, 0)); } + void ORR(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) { ORR(Rd, Rn, Rm, ArithOption(Rd, ST_LSL, 0)); } + void ORN(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) { ORN(Rd, Rn, Rm, ArithOption(Rd, ST_LSL, 0)); } + void EOR(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) { EOR(Rd, Rn, Rm, ArithOption(Rd, ST_LSL, 0)); } + void EON(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) { EON(Rd, Rn, Rm, ArithOption(Rd, ST_LSL, 0)); } + void ANDS(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) { ANDS(Rd, Rn, Rm, ArithOption(Rd, ST_LSL, 0)); } + void BICS(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) { BICS(Rd, Rn, Rm, ArithOption(Rd, ST_LSL, 0)); } + // Convenience wrappers around ORR. These match the official convenience syntax. + void MOV(ARM64Reg Rd, ARM64Reg Rm, ArithOption Shift); + void MOV(ARM64Reg Rd, ARM64Reg Rm); + void MVN(ARM64Reg Rd, ARM64Reg Rm); + + // Convenience wrappers around UBFM/EXTR. + void LSR(ARM64Reg Rd, ARM64Reg Rm, int shift); + void LSL(ARM64Reg Rd, ARM64Reg Rm, int shift); + void ASR(ARM64Reg Rd, ARM64Reg Rm, int shift); + void ROR_(ARM64Reg Rd, ARM64Reg Rm, int shift); + + // Logical (immediate) + void AND(ARM64Reg Rd, ARM64Reg Rn, u32 immr, u32 imms, bool invert = false); + void ANDS(ARM64Reg Rd, ARM64Reg Rn, u32 immr, u32 imms, bool invert = false); + void EOR(ARM64Reg Rd, ARM64Reg Rn, u32 immr, u32 imms, bool invert = false); + void ORR(ARM64Reg Rd, ARM64Reg Rn, u32 immr, u32 imms, bool invert = false); + void TST(ARM64Reg Rn, u32 immr, u32 imms, bool invert = false); + void TST(ARM64Reg Rn, ARM64Reg Rm) { ANDS(Is64Bit(Rn) ? ZR : WZR, Rn, Rm); } + // Add/subtract (immediate) + void ADD(ARM64Reg Rd, ARM64Reg Rn, u32 imm, bool shift = false); + void ADDS(ARM64Reg Rd, ARM64Reg Rn, u32 imm, bool shift = false); + void SUB(ARM64Reg Rd, ARM64Reg Rn, u32 imm, bool shift = false); + void SUBS(ARM64Reg Rd, ARM64Reg Rn, u32 imm, bool shift = false); + void CMP(ARM64Reg Rn, u32 imm, bool shift = false); + + // Data Processing (Immediate) + void MOVZ(ARM64Reg Rd, u32 imm, ShiftAmount pos = SHIFT_0); + void MOVN(ARM64Reg Rd, u32 imm, ShiftAmount pos = SHIFT_0); + void MOVK(ARM64Reg Rd, u32 imm, ShiftAmount pos = SHIFT_0); + + // Bitfield move + void BFM(ARM64Reg Rd, ARM64Reg Rn, u32 immr, u32 imms); + void SBFM(ARM64Reg Rd, ARM64Reg Rn, u32 immr, u32 imms); + void UBFM(ARM64Reg Rd, ARM64Reg Rn, u32 immr, u32 imms); + void BFI(ARM64Reg Rd, ARM64Reg Rn, u32 lsb, u32 width); + void UBFIZ(ARM64Reg Rd, ARM64Reg Rn, u32 lsb, u32 width); + + // Extract register (ROR with two inputs, if same then faster on A67) + void EXTR(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, u32 shift); + + // Aliases + void SXTB(ARM64Reg Rd, ARM64Reg Rn); + void SXTH(ARM64Reg Rd, ARM64Reg Rn); + void SXTW(ARM64Reg Rd, ARM64Reg Rn); + void UXTB(ARM64Reg Rd, ARM64Reg Rn); + void UXTH(ARM64Reg Rd, ARM64Reg Rn); + + void UBFX(ARM64Reg Rd, ARM64Reg Rn, int lsb, int width) { UBFM(Rd, Rn, lsb, lsb + width - 1); } + // Load Register (Literal) + void LDR(ARM64Reg Rt, u32 imm); + void LDRSW(ARM64Reg Rt, u32 imm); + void PRFM(ARM64Reg Rt, u32 imm); + + // Load/Store Exclusive + void STXRB(ARM64Reg Rs, ARM64Reg Rt, ARM64Reg Rn); + void STLXRB(ARM64Reg Rs, ARM64Reg Rt, ARM64Reg Rn); + void LDXRB(ARM64Reg Rt, ARM64Reg Rn); + void LDAXRB(ARM64Reg Rt, ARM64Reg Rn); + void STLRB(ARM64Reg Rt, ARM64Reg Rn); + void LDARB(ARM64Reg Rt, ARM64Reg Rn); + void STXRH(ARM64Reg Rs, ARM64Reg Rt, ARM64Reg Rn); + void STLXRH(ARM64Reg Rs, ARM64Reg Rt, ARM64Reg Rn); + void LDXRH(ARM64Reg Rt, ARM64Reg Rn); + void LDAXRH(ARM64Reg Rt, ARM64Reg Rn); + void STLRH(ARM64Reg Rt, ARM64Reg Rn); + void LDARH(ARM64Reg Rt, ARM64Reg Rn); + void STXR(ARM64Reg Rs, ARM64Reg Rt, ARM64Reg Rn); + void STLXR(ARM64Reg Rs, ARM64Reg Rt, ARM64Reg Rn); + void STXP(ARM64Reg Rs, ARM64Reg Rt, ARM64Reg Rt2, ARM64Reg Rn); + void STLXP(ARM64Reg Rs, ARM64Reg Rt, ARM64Reg Rt2, ARM64Reg Rn); + void LDXR(ARM64Reg Rt, ARM64Reg Rn); + void LDAXR(ARM64Reg Rt, ARM64Reg Rn); + void LDXP(ARM64Reg Rt, ARM64Reg Rt2, ARM64Reg Rn); + void LDAXP(ARM64Reg Rt, ARM64Reg Rt2, ARM64Reg Rn); + void STLR(ARM64Reg Rt, ARM64Reg Rn); + void LDAR(ARM64Reg Rt, ARM64Reg Rn); + + // Load/Store no-allocate pair (offset) + void STNP(ARM64Reg Rt, ARM64Reg Rt2, ARM64Reg Rn, u32 imm); + void LDNP(ARM64Reg Rt, ARM64Reg Rt2, ARM64Reg Rn, u32 imm); + + // Load/Store register (immediate indexed) + void STRB(IndexType type, ARM64Reg Rt, ARM64Reg Rn, s32 imm); + void LDRB(IndexType type, ARM64Reg Rt, ARM64Reg Rn, s32 imm); + void LDRSB(IndexType type, ARM64Reg Rt, ARM64Reg Rn, s32 imm); + void STRH(IndexType type, ARM64Reg Rt, ARM64Reg Rn, s32 imm); + void LDRH(IndexType type, ARM64Reg Rt, ARM64Reg Rn, s32 imm); + void LDRSH(IndexType type, ARM64Reg Rt, ARM64Reg Rn, s32 imm); + void STR(IndexType type, ARM64Reg Rt, ARM64Reg Rn, s32 imm); + void LDR(IndexType type, ARM64Reg Rt, ARM64Reg Rn, s32 imm); + void LDRSW(IndexType type, ARM64Reg Rt, ARM64Reg Rn, s32 imm); + + // Load/Store register (register offset) + void STRB(ARM64Reg Rt, ARM64Reg Rn, ArithOption Rm); + void LDRB(ARM64Reg Rt, ARM64Reg Rn, ArithOption Rm); + void LDRSB(ARM64Reg Rt, ARM64Reg Rn, ArithOption Rm); + void STRH(ARM64Reg Rt, ARM64Reg Rn, ArithOption Rm); + void LDRH(ARM64Reg Rt, ARM64Reg Rn, ArithOption Rm); + void LDRSH(ARM64Reg Rt, ARM64Reg Rn, ArithOption Rm); + void STR(ARM64Reg Rt, ARM64Reg Rn, ArithOption Rm); + void LDR(ARM64Reg Rt, ARM64Reg Rn, ArithOption Rm); + void LDRSW(ARM64Reg Rt, ARM64Reg Rn, ArithOption Rm); + void PRFM(ARM64Reg Rt, ARM64Reg Rn, ArithOption Rm); + + // Load/Store register (unscaled offset) + void STURB(ARM64Reg Rt, ARM64Reg Rn, s32 imm); + void LDURB(ARM64Reg Rt, ARM64Reg Rn, s32 imm); + void LDURSB(ARM64Reg Rt, ARM64Reg Rn, s32 imm); + void STURH(ARM64Reg Rt, ARM64Reg Rn, s32 imm); + void LDURH(ARM64Reg Rt, ARM64Reg Rn, s32 imm); + void LDURSH(ARM64Reg Rt, ARM64Reg Rn, s32 imm); + void STUR(ARM64Reg Rt, ARM64Reg Rn, s32 imm); + void LDUR(ARM64Reg Rt, ARM64Reg Rn, s32 imm); + void LDURSW(ARM64Reg Rt, ARM64Reg Rn, s32 imm); + + // Load/Store pair + void LDP(IndexType type, ARM64Reg Rt, ARM64Reg Rt2, ARM64Reg Rn, s32 imm); + void LDPSW(IndexType type, ARM64Reg Rt, ARM64Reg Rt2, ARM64Reg Rn, s32 imm); + void STP(IndexType type, ARM64Reg Rt, ARM64Reg Rt2, ARM64Reg Rn, s32 imm); + + void LDRGeneric(int size, bool signExtend, ARM64Reg Rt, ARM64Reg Rn, ArithOption Rm); + void STRGeneric(int size, ARM64Reg Rt, ARM64Reg Rn, ArithOption Rm); + + void LDRGeneric(int size, bool signExtend, IndexType type, ARM64Reg Rt, ARM64Reg Rn, s32 imm); + void STRGeneric(int size, IndexType type, ARM64Reg Rt, ARM64Reg Rn, s32 imm); + + // Address of label/page PC-relative + void ADR(ARM64Reg Rd, s32 imm); + void ADRP(ARM64Reg Rd, s32 imm); + + // Wrapper around MOVZ+MOVK + void MOVI2R(ARM64Reg Rd, u64 imm, bool optimize = true); + bool MOVI2R2(ARM64Reg Rd, u64 imm1, u64 imm2); + template + void MOVP2R(ARM64Reg Rd, P* ptr) + { + ASSERT_MSG(DYNA_REC, Is64Bit(Rd), "Can't store pointers in 32-bit registers"); + MOVI2R(Rd, (uintptr_t)ptr); + } + + // Wrapper around AND x, y, imm etc. If you are sure the imm will work, no need to pass a scratch + // register. + void ANDI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch = INVALID_REG); + void ANDSI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch = INVALID_REG); + void TSTI2R(ARM64Reg Rn, u64 imm, ARM64Reg scratch = INVALID_REG) + { + ANDSI2R(Is64Bit(Rn) ? ZR : WZR, Rn, imm, scratch); + } + void ORRI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch = INVALID_REG); + void EORI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch = INVALID_REG); + void CMPI2R(ARM64Reg Rn, u64 imm, ARM64Reg scratch = INVALID_REG); + + void ADDI2R_internal(ARM64Reg Rd, ARM64Reg Rn, u64 imm, bool negative, bool flags, + ARM64Reg scratch); + void ADDI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch = INVALID_REG); + void ADDSI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch = INVALID_REG); + void SUBI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch = INVALID_REG); + void SUBSI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch = INVALID_REG); + + bool TryADDI2R(ARM64Reg Rd, ARM64Reg Rn, u32 imm); + bool TrySUBI2R(ARM64Reg Rd, ARM64Reg Rn, u32 imm); + bool TryCMPI2R(ARM64Reg Rn, u32 imm); + + bool TryANDI2R(ARM64Reg Rd, ARM64Reg Rn, u32 imm); + bool TryORRI2R(ARM64Reg Rd, ARM64Reg Rn, u32 imm); + bool TryEORI2R(ARM64Reg Rd, ARM64Reg Rn, u32 imm); + + // ABI related + void ABI_PushRegisters(BitSet32 registers); + void ABI_PopRegisters(BitSet32 registers, BitSet32 ignore_mask = BitSet32(0)); + + // Utility to generate a call to a std::function object. + // + // Unfortunately, calling operator() directly is undefined behavior in C++ + // (this method might be a thunk in the case of multi-inheritance) so we + // have to go through a trampoline function. + template + static T CallLambdaTrampoline(const std::function* f, Args... args) + { + return (*f)(args...); + } + + // This function expects you to have set up the state. + // Overwrites X0 and X30 + template + ARM64Reg ABI_SetupLambda(const std::function* f) + { + auto trampoline = &ARM64XEmitter::CallLambdaTrampoline; + MOVI2R(X30, (uintptr_t)trampoline); + MOVI2R(X0, (uintptr_t) const_cast((const void*)f)); + return X30; + } + + void QuickTailCall(ARM64Reg scratchreg, const void* func); + template + void QuickTailCall(ARM64Reg scratchreg, T func) + { + QuickTailCall(scratchreg, (const void*)func); + } + + // Plain function call + void QuickCallFunction(ARM64Reg scratchreg, const void* func); + template + void QuickCallFunction(ARM64Reg scratchreg, T func) + { + QuickCallFunction(scratchreg, (const void*)func); + } +}; + +class ARM64FloatEmitter +{ +public: + ARM64FloatEmitter(ARM64XEmitter* emit) : m_emit(emit) {} + void LDR(u8 size, IndexType type, ARM64Reg Rt, ARM64Reg Rn, s32 imm); + void STR(u8 size, IndexType type, ARM64Reg Rt, ARM64Reg Rn, s32 imm); + + // Loadstore unscaled + void LDUR(u8 size, ARM64Reg Rt, ARM64Reg Rn, s32 imm); + void STUR(u8 size, ARM64Reg Rt, ARM64Reg Rn, s32 imm); + + // Loadstore single structure + void LD1(u8 size, ARM64Reg Rt, u8 index, ARM64Reg Rn); + void LD1(u8 size, ARM64Reg Rt, u8 index, ARM64Reg Rn, ARM64Reg Rm); + void LD1R(u8 size, ARM64Reg Rt, ARM64Reg Rn); + void LD2R(u8 size, ARM64Reg Rt, ARM64Reg Rn); + void LD1R(u8 size, ARM64Reg Rt, ARM64Reg Rn, ARM64Reg Rm); + void LD2R(u8 size, ARM64Reg Rt, ARM64Reg Rn, ARM64Reg Rm); + void ST1(u8 size, ARM64Reg Rt, u8 index, ARM64Reg Rn); + void ST1(u8 size, ARM64Reg Rt, u8 index, ARM64Reg Rn, ARM64Reg Rm); + + // Loadstore multiple structure + void LD1(u8 size, u8 count, ARM64Reg Rt, ARM64Reg Rn); + void LD1(u8 size, u8 count, IndexType type, ARM64Reg Rt, ARM64Reg Rn, ARM64Reg Rm = SP); + void ST1(u8 size, u8 count, ARM64Reg Rt, ARM64Reg Rn); + void ST1(u8 size, u8 count, IndexType type, ARM64Reg Rt, ARM64Reg Rn, ARM64Reg Rm = SP); + + // Loadstore paired + void LDP(u8 size, IndexType type, ARM64Reg Rt, ARM64Reg Rt2, ARM64Reg Rn, s32 imm); + void STP(u8 size, IndexType type, ARM64Reg Rt, ARM64Reg Rt2, ARM64Reg Rn, s32 imm); + + // Loadstore register offset + void STR(u8 size, ARM64Reg Rt, ARM64Reg Rn, ArithOption Rm); + void LDR(u8 size, ARM64Reg Rt, ARM64Reg Rn, ArithOption Rm); + + // Scalar - 1 Source + void FABS(ARM64Reg Rd, ARM64Reg Rn); + void FNEG(ARM64Reg Rd, ARM64Reg Rn); + void FSQRT(ARM64Reg Rd, ARM64Reg Rn); + void FMOV(ARM64Reg Rd, ARM64Reg Rn, bool top = false); // Also generalized move between GPR/FP + + // Scalar - 2 Source + void FADD(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm); + void FMUL(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm); + void FSUB(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm); + void FDIV(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm); + void FMAX(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm); + void FMIN(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm); + void FMAXNM(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm); + void FMINNM(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm); + void FNMUL(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm); + + // Scalar - 3 Source. Note - the accumulator is last on ARM! + void FMADD(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ARM64Reg Ra); + void FMSUB(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ARM64Reg Ra); + void FNMADD(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ARM64Reg Ra); + void FNMSUB(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ARM64Reg Ra); + + // Scalar floating point immediate + void FMOV(ARM64Reg Rd, uint8_t imm8); + + // Vector + void AND(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm); + void BSL(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm); + void DUP(u8 size, ARM64Reg Rd, ARM64Reg Rn, u8 index); + void FABS(u8 size, ARM64Reg Rd, ARM64Reg Rn); + void FADD(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm); + void FMAX(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm); + void FMLA(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm); + void FMLS(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm); + void FMIN(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm); + void FCVTL(u8 size, ARM64Reg Rd, ARM64Reg Rn); + void FCVTL2(u8 size, ARM64Reg Rd, ARM64Reg Rn); + void FCVTN(u8 dest_size, ARM64Reg Rd, ARM64Reg Rn); + void FCVTN2(u8 dest_size, ARM64Reg Rd, ARM64Reg Rn); + void FCVTZS(u8 size, ARM64Reg Rd, ARM64Reg Rn); + void FCVTZU(u8 size, ARM64Reg Rd, ARM64Reg Rn); + void FDIV(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm); + void FMUL(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm); + void FNEG(u8 size, ARM64Reg Rd, ARM64Reg Rn); + void FRECPE(u8 size, ARM64Reg Rd, ARM64Reg Rn); + void FRSQRTE(u8 size, ARM64Reg Rd, ARM64Reg Rn); + void FSUB(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm); + void NOT(ARM64Reg Rd, ARM64Reg Rn); + void ORR(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm); + void MOV(ARM64Reg Rd, ARM64Reg Rn) { ORR(Rd, Rn, Rn); } + void REV16(u8 size, ARM64Reg Rd, ARM64Reg Rn); + void REV32(u8 size, ARM64Reg Rd, ARM64Reg Rn); + void REV64(u8 size, ARM64Reg Rd, ARM64Reg Rn); + void SCVTF(u8 size, ARM64Reg Rd, ARM64Reg Rn); + void UCVTF(u8 size, ARM64Reg Rd, ARM64Reg Rn); + void SCVTF(u8 size, ARM64Reg Rd, ARM64Reg Rn, int scale); + void UCVTF(u8 size, ARM64Reg Rd, ARM64Reg Rn, int scale); + void SQXTN(u8 dest_size, ARM64Reg Rd, ARM64Reg Rn); + void SQXTN2(u8 dest_size, ARM64Reg Rd, ARM64Reg Rn); + void UQXTN(u8 dest_size, ARM64Reg Rd, ARM64Reg Rn); + void UQXTN2(u8 dest_size, ARM64Reg Rd, ARM64Reg Rn); + void XTN(u8 dest_size, ARM64Reg Rd, ARM64Reg Rn); + void XTN2(u8 dest_size, ARM64Reg Rd, ARM64Reg Rn); + + // Move + void DUP(u8 size, ARM64Reg Rd, ARM64Reg Rn); + void INS(u8 size, ARM64Reg Rd, u8 index, ARM64Reg Rn); + void INS(u8 size, ARM64Reg Rd, u8 index1, ARM64Reg Rn, u8 index2); + void UMOV(u8 size, ARM64Reg Rd, ARM64Reg Rn, u8 index); + void SMOV(u8 size, ARM64Reg Rd, ARM64Reg Rn, u8 index); + + // One source + void FCVT(u8 size_to, u8 size_from, ARM64Reg Rd, ARM64Reg Rn); + + // Scalar convert float to int, in a lot of variants. + // Note that the scalar version of this operation has two encodings, one that goes to an integer + // register + // and one that outputs to a scalar fp register. + void FCVTS(ARM64Reg Rd, ARM64Reg Rn, RoundingMode round); + void FCVTU(ARM64Reg Rd, ARM64Reg Rn, RoundingMode round); + + // Scalar convert int to float. No rounding mode specifier necessary. + void SCVTF(ARM64Reg Rd, ARM64Reg Rn); + void UCVTF(ARM64Reg Rd, ARM64Reg Rn); + + // Scalar fixed point to float. scale is the number of fractional bits. + void SCVTF(ARM64Reg Rd, ARM64Reg Rn, int scale); + void UCVTF(ARM64Reg Rd, ARM64Reg Rn, int scale); + + // Float comparison + void FCMP(ARM64Reg Rn, ARM64Reg Rm); + void FCMP(ARM64Reg Rn); + void FCMPE(ARM64Reg Rn, ARM64Reg Rm); + void FCMPE(ARM64Reg Rn); + void FCMEQ(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm); + void FCMEQ(u8 size, ARM64Reg Rd, ARM64Reg Rn); + void FCMGE(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm); + void FCMGE(u8 size, ARM64Reg Rd, ARM64Reg Rn); + void FCMGT(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm); + void FCMGT(u8 size, ARM64Reg Rd, ARM64Reg Rn); + void FCMLE(u8 size, ARM64Reg Rd, ARM64Reg Rn); + void FCMLT(u8 size, ARM64Reg Rd, ARM64Reg Rn); + + // Conditional select + void FCSEL(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, CCFlags cond); + + // Permute + void UZP1(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm); + void TRN1(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm); + void ZIP1(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm); + void UZP2(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm); + void TRN2(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm); + void ZIP2(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm); + + // Shift by immediate + void SSHLL(u8 src_size, ARM64Reg Rd, ARM64Reg Rn, u32 shift); + void SSHLL2(u8 src_size, ARM64Reg Rd, ARM64Reg Rn, u32 shift); + void USHLL(u8 src_size, ARM64Reg Rd, ARM64Reg Rn, u32 shift); + void USHLL2(u8 src_size, ARM64Reg Rd, ARM64Reg Rn, u32 shift); + void SHRN(u8 dest_size, ARM64Reg Rd, ARM64Reg Rn, u32 shift); + void SHRN2(u8 dest_size, ARM64Reg Rd, ARM64Reg Rn, u32 shift); + void SXTL(u8 src_size, ARM64Reg Rd, ARM64Reg Rn); + void SXTL2(u8 src_size, ARM64Reg Rd, ARM64Reg Rn); + void UXTL(u8 src_size, ARM64Reg Rd, ARM64Reg Rn); + void UXTL2(u8 src_size, ARM64Reg Rd, ARM64Reg Rn); + + // vector x indexed element + void FMUL(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, u8 index); + void FMLA(u8 esize, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, u8 index); + + // Modified Immediate + void MOVI(u8 size, ARM64Reg Rd, u64 imm, u8 shift = 0); + void BIC(u8 size, ARM64Reg Rd, u8 imm, u8 shift = 0); + + void MOVI2F(ARM64Reg Rd, float value, ARM64Reg scratch = INVALID_REG, bool negate = false); + void MOVI2FDUP(ARM64Reg Rd, float value, ARM64Reg scratch = INVALID_REG); + + // ABI related + void ABI_PushRegisters(BitSet32 registers, ARM64Reg tmp = INVALID_REG); + void ABI_PopRegisters(BitSet32 registers, ARM64Reg tmp = INVALID_REG); + +private: + ARM64XEmitter* m_emit; + inline void Write32(u32 value) { m_emit->Write32(value); } + // Emitting functions + void EmitLoadStoreImmediate(u8 size, u32 opc, IndexType type, ARM64Reg Rt, ARM64Reg Rn, s32 imm); + void EmitScalar2Source(bool M, bool S, u32 type, u32 opcode, ARM64Reg Rd, ARM64Reg Rn, + ARM64Reg Rm); + void EmitThreeSame(bool U, u32 size, u32 opcode, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm); + void EmitCopy(bool Q, u32 op, u32 imm5, u32 imm4, ARM64Reg Rd, ARM64Reg Rn); + void Emit2RegMisc(bool Q, bool U, u32 size, u32 opcode, ARM64Reg Rd, ARM64Reg Rn); + void EmitLoadStoreSingleStructure(bool L, bool R, u32 opcode, bool S, u32 size, ARM64Reg Rt, + ARM64Reg Rn); + void EmitLoadStoreSingleStructure(bool L, bool R, u32 opcode, bool S, u32 size, ARM64Reg Rt, + ARM64Reg Rn, ARM64Reg Rm); + void Emit1Source(bool M, bool S, u32 type, u32 opcode, ARM64Reg Rd, ARM64Reg Rn); + void EmitConversion(bool sf, bool S, u32 type, u32 rmode, u32 opcode, ARM64Reg Rd, ARM64Reg Rn); + void EmitConversion2(bool sf, bool S, bool direction, u32 type, u32 rmode, u32 opcode, int scale, + ARM64Reg Rd, ARM64Reg Rn); + void EmitCompare(bool M, bool S, u32 op, u32 opcode2, ARM64Reg Rn, ARM64Reg Rm); + void EmitCondSelect(bool M, bool S, CCFlags cond, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm); + void EmitPermute(u32 size, u32 op, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm); + void EmitScalarImm(bool M, bool S, u32 type, u32 imm5, ARM64Reg Rd, u32 imm8); + void EmitShiftImm(bool Q, bool U, u32 immh, u32 immb, u32 opcode, ARM64Reg Rd, ARM64Reg Rn); + void EmitScalarShiftImm(bool U, u32 immh, u32 immb, u32 opcode, ARM64Reg Rd, ARM64Reg Rn); + void EmitLoadStoreMultipleStructure(u32 size, bool L, u32 opcode, ARM64Reg Rt, ARM64Reg Rn); + void EmitLoadStoreMultipleStructurePost(u32 size, bool L, u32 opcode, ARM64Reg Rt, ARM64Reg Rn, + ARM64Reg Rm); + void EmitScalar1Source(bool M, bool S, u32 type, u32 opcode, ARM64Reg Rd, ARM64Reg Rn); + void EmitVectorxElement(bool U, u32 size, bool L, u32 opcode, bool H, ARM64Reg Rd, ARM64Reg Rn, + ARM64Reg Rm); + void EmitLoadStoreUnscaled(u32 size, u32 op, ARM64Reg Rt, ARM64Reg Rn, s32 imm); + void EmitConvertScalarToInt(ARM64Reg Rd, ARM64Reg Rn, RoundingMode round, bool sign); + void EmitScalar3Source(bool isDouble, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ARM64Reg Ra, + int opcode); + void EncodeLoadStorePair(u32 size, bool load, IndexType type, ARM64Reg Rt, ARM64Reg Rt2, + ARM64Reg Rn, s32 imm); + void EncodeLoadStoreRegisterOffset(u32 size, bool load, ARM64Reg Rt, ARM64Reg Rn, ArithOption Rm); + void EncodeModImm(bool Q, u8 op, u8 cmode, u8 o2, ARM64Reg Rd, u8 abcdefgh); + + void SSHLL(u8 src_size, ARM64Reg Rd, ARM64Reg Rn, u32 shift, bool upper); + void USHLL(u8 src_size, ARM64Reg Rd, ARM64Reg Rn, u32 shift, bool upper); + void SHRN(u8 dest_size, ARM64Reg Rd, ARM64Reg Rn, u32 shift, bool upper); + void SXTL(u8 src_size, ARM64Reg Rd, ARM64Reg Rn, bool upper); + void UXTL(u8 src_size, ARM64Reg Rd, ARM64Reg Rn, bool upper); +}; + +} \ No newline at end of file diff --git a/src/dolphin/ArmCommon.h b/src/dolphin/ArmCommon.h new file mode 100644 index 0000000..6d82e9d --- /dev/null +++ b/src/dolphin/ArmCommon.h @@ -0,0 +1,27 @@ +// Copyright 2014 Dolphin Emulator Project +// Licensed under GPLv2+ +// Refer to the license.txt file included. + +#include "../types.h" + +enum CCFlags +{ + CC_EQ = 0, // Equal + CC_NEQ, // Not equal + CC_CS, // Carry Set + CC_CC, // Carry Clear + CC_MI, // Minus (Negative) + CC_PL, // Plus + CC_VS, // Overflow + CC_VC, // No Overflow + CC_HI, // Unsigned higher + CC_LS, // Unsigned lower or same + CC_GE, // Signed greater than or equal + CC_LT, // Signed less than + CC_GT, // Signed greater than + CC_LE, // Signed less than or equal + CC_AL, // Always (unconditional) 14 + CC_HS = CC_CS, // Alias of CC_CS Unsigned higher or same + CC_LO = CC_CC, // Alias of CC_CC Unsigned lower +}; +const u32 NO_COND = 0xE0000000; diff --git a/src/dolphin/BitUtils.h b/src/dolphin/BitUtils.h new file mode 100644 index 0000000..8b64a92 --- /dev/null +++ b/src/dolphin/BitUtils.h @@ -0,0 +1,254 @@ +// Copyright 2017 Dolphin Emulator Project +// Licensed under GPLv2+ +// Refer to the license.txt file included. + +#pragma once + +#include +#include +#include +#include + +namespace Common +{ +/// +/// Retrieves the size of a type in bits. +/// +/// @tparam T Type to get the size of. +/// +/// @return the size of the type in bits. +/// +template +constexpr size_t BitSize() noexcept +{ + return sizeof(T) * CHAR_BIT; +} + +/// +/// Extracts a bit from a value. +/// +/// @param src The value to extract a bit from. +/// @param bit The bit to extract. +/// +/// @tparam T The type of the value. +/// +/// @return The extracted bit. +/// +template +constexpr T ExtractBit(const T src, const size_t bit) noexcept +{ + return (src >> bit) & static_cast(1); +} + +/// +/// Extracts a bit from a value. +/// +/// @param src The value to extract a bit from. +/// +/// @tparam bit The bit to extract. +/// @tparam T The type of the value. +/// +/// @return The extracted bit. +/// +template +constexpr T ExtractBit(const T src) noexcept +{ + static_assert(bit < BitSize(), "Specified bit must be within T's bit width."); + + return ExtractBit(src, bit); +} + +/// +/// Extracts a range of bits from a value. +/// +/// @param src The value to extract the bits from. +/// @param begin The beginning of the bit range. This is inclusive. +/// @param end The ending of the bit range. This is inclusive. +/// +/// @tparam T The type of the value. +/// @tparam Result The returned result type. This is the unsigned analog +/// of a signed type if a signed type is passed as T. +/// +/// @return The extracted bits. +/// +template > +constexpr Result ExtractBits(const T src, const size_t begin, const size_t end) noexcept +{ + return static_cast(((static_cast(src) << ((BitSize() - 1) - end)) >> + (BitSize() - end + begin - 1))); +} + +/// +/// Extracts a range of bits from a value. +/// +/// @param src The value to extract the bits from. +/// +/// @tparam begin The beginning of the bit range. This is inclusive. +/// @tparam end The ending of the bit range. This is inclusive. +/// @tparam T The type of the value. +/// @tparam Result The returned result type. This is the unsigned analog +/// of a signed type if a signed type is passed as T. +/// +/// @return The extracted bits. +/// +template > +constexpr Result ExtractBits(const T src) noexcept +{ + static_assert(begin < end, "Beginning bit must be less than the ending bit."); + static_assert(begin < BitSize(), "Beginning bit is larger than T's bit width."); + static_assert(end < BitSize(), "Ending bit is larger than T's bit width."); + + return ExtractBits(src, begin, end); +} + +/// +/// Rotates a value left (ROL). +/// +/// @param value The value to rotate. +/// @param amount The number of bits to rotate the value. +/// @tparam T An unsigned type. +/// +/// @return The rotated value. +/// +template +constexpr T RotateLeft(const T value, size_t amount) noexcept +{ + static_assert(std::is_unsigned(), "Can only rotate unsigned types left."); + + amount %= BitSize(); + + if (amount == 0) + return value; + + return static_cast((value << amount) | (value >> (BitSize() - amount))); +} + +/// +/// Rotates a value right (ROR). +/// +/// @param value The value to rotate. +/// @param amount The number of bits to rotate the value. +/// @tparam T An unsigned type. +/// +/// @return The rotated value. +/// +template +constexpr T RotateRight(const T value, size_t amount) noexcept +{ + static_assert(std::is_unsigned(), "Can only rotate unsigned types right."); + + amount %= BitSize(); + + if (amount == 0) + return value; + + return static_cast((value >> amount) | (value << (BitSize() - amount))); +} + +/// +/// Verifies whether the supplied value is a valid bit mask of the form 0b00...0011...11. +/// Both edge cases of all zeros and all ones are considered valid masks, too. +/// +/// @param mask The mask value to test for validity. +/// +/// @tparam T The type of the value. +/// +/// @return A bool indicating whether the mask is valid. +/// +template +constexpr bool IsValidLowMask(const T mask) noexcept +{ + static_assert(std::is_integral::value, "Mask must be an integral type."); + static_assert(std::is_unsigned::value, "Signed masks can introduce hard to find bugs."); + + // Can be efficiently determined without looping or bit counting. It's the counterpart + // to https://graphics.stanford.edu/~seander/bithacks.html#DetermineIfPowerOf2 + // and doesn't require special casing either edge case. + return (mask & (mask + 1)) == 0; +} + +/// +/// Reinterpret objects of one type as another by bit-casting between object representations. +/// +/// @remark This is the example implementation of std::bit_cast which is to be included +/// in C++2a. See http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2017/p0476r2.html +/// for more details. The only difference is this variant is not constexpr, +/// as the mechanism for bit_cast requires a compiler built-in to have that quality. +/// +/// @param source The source object to convert to another representation. +/// +/// @tparam To The type to reinterpret source as. +/// @tparam From The initial type representation of source. +/// +/// @return The representation of type From as type To. +/// +/// @pre Both To and From types must be the same size +/// @pre Both To and From types must satisfy the TriviallyCopyable concept. +/// +template +inline To BitCast(const From& source) noexcept +{ + static_assert(sizeof(From) == sizeof(To), + "BitCast source and destination types must be equal in size."); + static_assert(std::is_trivially_copyable(), + "BitCast source type must be trivially copyable."); + static_assert(std::is_trivially_copyable(), + "BitCast destination type must be trivially copyable."); + + std::aligned_storage_t storage; + std::memcpy(&storage, &source, sizeof(storage)); + return reinterpret_cast(storage); +} + +template +class BitCastPtrType +{ +public: + static_assert(std::is_trivially_copyable(), + "BitCastPtr source type must be trivially copyable."); + static_assert(std::is_trivially_copyable(), + "BitCastPtr destination type must be trivially copyable."); + + explicit BitCastPtrType(PtrType* ptr) : m_ptr(ptr) {} + + // Enable operator= only for pointers to non-const data + template + inline typename std::enable_if() && !std::is_const()>::type + operator=(const S& source) + { + std::memcpy(m_ptr, &source, sizeof(source)); + } + + inline operator T() const + { + T result; + std::memcpy(&result, m_ptr, sizeof(result)); + return result; + } + +private: + PtrType* m_ptr; +}; + +// Provides an aliasing-safe alternative to reinterpret_cast'ing pointers to structs +// Conversion constructor and operator= provided for a convenient syntax. +// Usage: MyStruct s = BitCastPtr(some_ptr); +// BitCastPtr(some_ptr) = s; +template +inline auto BitCastPtr(PtrType* ptr) noexcept -> BitCastPtrType +{ + return BitCastPtrType{ptr}; +} + +template +void SetBit(T& value, size_t bit_number, bool bit_value) +{ + static_assert(std::is_unsigned(), "SetBit is only sane on unsigned types."); + + if (bit_value) + value |= (T{1} << bit_number); + else + value &= ~(T{1} << bit_number); +} + +} // namespace Common diff --git a/src/dolphin/Compat.h b/src/dolphin/Compat.h index f2f52a5..787d505 100644 --- a/src/dolphin/Compat.h +++ b/src/dolphin/Compat.h @@ -61,3 +61,15 @@ { \ printf(fmt "\n", ## __VA_ARGS__); \ } while (false) + +#if __cplusplus < 201703L +// cheat +namespace std +{ +template +T clamp(const T& v, const T& lo, const T& hi) +{ + return v < lo ? lo : (v > hi ? hi : v); +} +} +#endif \ No newline at end of file diff --git a/src/dolphin/MathUtil.cpp b/src/dolphin/MathUtil.cpp new file mode 100644 index 0000000..70f2ede --- /dev/null +++ b/src/dolphin/MathUtil.cpp @@ -0,0 +1,13 @@ +// Copyright 2008 Dolphin Emulator Project +// Licensed under GPLv2+ +// Refer to the license.txt file included. + +#include "MathUtil.h" + +#include + +// Calculate sum of a float list +float MathFloatVectorSum(const std::vector& Vec) +{ + return std::accumulate(Vec.begin(), Vec.end(), 0.0f); +} diff --git a/src/dolphin/MathUtil.h b/src/dolphin/MathUtil.h new file mode 100644 index 0000000..b1dbbae --- /dev/null +++ b/src/dolphin/MathUtil.h @@ -0,0 +1,121 @@ +// Copyright 2008 Dolphin Emulator Project +// Licensed under GPLv2+ +// Refer to the license.txt file included. + +#pragma once + +#include +#include + +#include "Compat.h" + +#include "../types.h" + +#ifdef _MSC_VER +#include +#endif + +namespace MathUtil +{ +constexpr double TAU = 6.2831853071795865; +constexpr double PI = TAU / 2; + +template +constexpr auto Sign(const T& val) -> decltype((T{} < val) - (val < T{})) +{ + return (T{} < val) - (val < T{}); +} + +template +constexpr auto Lerp(const T& x, const T& y, const F& a) -> decltype(x + (y - x) * a) +{ + return x + (y - x) * a; +} + +template +constexpr bool IsPow2(T imm) +{ + return imm > 0 && (imm & (imm - 1)) == 0; +} + +constexpr u32 NextPowerOf2(u32 value) +{ + --value; + value |= value >> 1; + value |= value >> 2; + value |= value >> 4; + value |= value >> 8; + value |= value >> 16; + ++value; + + return value; +} + +template +struct Rectangle +{ + T left{}; + T top{}; + T right{}; + T bottom{}; + + constexpr Rectangle() = default; + + constexpr Rectangle(T theLeft, T theTop, T theRight, T theBottom) + : left(theLeft), top(theTop), right(theRight), bottom(theBottom) + { + } + + constexpr bool operator==(const Rectangle& r) const + { + return left == r.left && top == r.top && right == r.right && bottom == r.bottom; + } + + T GetWidth() const { return abs(right - left); } + T GetHeight() const { return abs(bottom - top); } + // If the rectangle is in a coordinate system with a lower-left origin, use + // this Clamp. + void ClampLL(T x1, T y1, T x2, T y2) + { + left = std::clamp(left, x1, x2); + right = std::clamp(right, x1, x2); + top = std::clamp(top, y2, y1); + bottom = std::clamp(bottom, y2, y1); + } + + // If the rectangle is in a coordinate system with an upper-left origin, + // use this Clamp. + void ClampUL(T x1, T y1, T x2, T y2) + { + left = std::clamp(left, x1, x2); + right = std::clamp(right, x1, x2); + top = std::clamp(top, y1, y2); + bottom = std::clamp(bottom, y1, y2); + } +}; + +} // namespace MathUtil + +float MathFloatVectorSum(const std::vector&); + +// Rounds down. 0 -> undefined +inline int IntLog2(u64 val) +{ +#if defined(__GNUC__) + return 63 - __builtin_clzll(val); + +#elif defined(_MSC_VER) + unsigned long result = ULONG_MAX; + _BitScanReverse64(&result, val); + return result; + +#else + int result = -1; + while (val != 0) + { + val >>= 1; + ++result; + } + return result; +#endif +} -- cgit v1.2.3 From a9dd6e30adc590e11e3a076c1245f1b0b48f27f6 Mon Sep 17 00:00:00 2001 From: RSDuck Date: Sat, 25 Apr 2020 19:35:40 +0200 Subject: implement msr and mrs for the x64 JIT --- src/ARMJIT.cpp | 2 +- src/ARMJIT_x64/ARMJIT_Compiler.cpp | 127 ++++++++++++++++++++++++++++++++++++- src/ARMJIT_x64/ARMJIT_Compiler.h | 3 + src/ARM_InstrInfo.cpp | 4 ++ 4 files changed, 134 insertions(+), 2 deletions(-) (limited to 'src/ARM_InstrInfo.cpp') diff --git a/src/ARMJIT.cpp b/src/ARMJIT.cpp index cc8d4ce..46f71f1 100644 --- a/src/ARMJIT.cpp +++ b/src/ARMJIT.cpp @@ -824,7 +824,7 @@ void InvalidateITCM(u32 addr) void InvalidateAll() { - JIT_DEBUGPRINT("invalidating all %x\n", JitBlocks.Length); + JIT_DEBUGPRINT("invalidating all %x\n", JitBlocks.size()); for (auto it : JitBlocks) { JitBlock* block = it.second; diff --git a/src/ARMJIT_x64/ARMJIT_Compiler.cpp b/src/ARMJIT_x64/ARMJIT_Compiler.cpp index 1b2d312..52a16dc 100644 --- a/src/ARMJIT_x64/ARMJIT_Compiler.cpp +++ b/src/ARMJIT_x64/ARMJIT_Compiler.cpp @@ -38,6 +38,131 @@ const int RegisterCache::NativeRegsAvailable = #endif ; +void Compiler::A_Comp_MRS() +{ + Comp_AddCycles_C(); + + OpArg rd = MapReg(CurInstr.A_Reg(12)); + + if (CurInstr.Instr & (1 << 22)) + { + MOV(32, R(RSCRATCH), R(RCPSR)); + AND(32, R(RSCRATCH), Imm8(0x1F)); + XOR(32, R(ABI_PARAM3), R(ABI_PARAM3)); + MOV(32, R(ABI_PARAM2), Imm32(15 - 8)); + CALL(ReadBanked); + MOV(32, rd, R(ABI_PARAM3)); + } + else + MOV(32, rd, R(RCPSR)); +} + +void Compiler::A_Comp_MSR() +{ + Comp_AddCycles_C(); + + OpArg val = CurInstr.Instr & (1 << 25) + ? Imm32(ROR((CurInstr.Instr & 0xFF), ((CurInstr.Instr >> 7) & 0x1E))) + : MapReg(CurInstr.A_Reg(0)); + + u32 mask = 0; + if (CurInstr.Instr & (1<<16)) mask |= 0x000000FF; + if (CurInstr.Instr & (1<<17)) mask |= 0x0000FF00; + if (CurInstr.Instr & (1<<18)) mask |= 0x00FF0000; + if (CurInstr.Instr & (1<<19)) mask |= 0xFF000000; + + if (CurInstr.Instr & (1 << 22)) + { + MOV(32, R(RSCRATCH), R(RCPSR)); + AND(32, R(RSCRATCH), Imm8(0x1F)); + XOR(32, R(ABI_PARAM3), R(ABI_PARAM3)); + MOV(32, R(ABI_PARAM2), Imm32(15 - 8)); + CALL(ReadBanked); + + MOV(32, R(RSCRATCH2), Imm32(0xFFFFFF00)); + MOV(32, R(RSCRATCH3), Imm32(0xFFFFFFFF)); + MOV(32, R(RSCRATCH), R(RCPSR)); + AND(32, R(RSCRATCH), Imm8(0x1F)); + CMP(32, R(RSCRATCH), Imm8(0x10)); + CMOVcc(32, RSCRATCH2, R(RSCRATCH3), CC_NE); + AND(32, R(RSCRATCH2), Imm32(mask)); + + MOV(32, R(RSCRATCH), R(RSCRATCH2)); + NOT(32, R(RSCRATCH)); + AND(32, R(ABI_PARAM3), R(RSCRATCH)); + + AND(32, R(RSCRATCH2), val); + OR(32, R(ABI_PARAM3), R(RSCRATCH2)); + + MOV(32, R(RSCRATCH), R(RCPSR)); + AND(32, R(RSCRATCH), Imm8(0x1F)); + MOV(32, R(ABI_PARAM2), Imm32(15 - 8)); + CALL(WriteBanked); + } + else + { + mask &= 0xFFFFFFDF; + CPSRDirty = true; + + if ((mask & 0xFF) == 0) + { + AND(32, R(RCPSR), Imm32(~mask)); + if (val.IsImm()) + { + MOV(32, R(RSCRATCH), val); + AND(32, R(RSCRATCH), Imm32(mask)); + OR(32, R(RCPSR), R(RSCRATCH)); + } + else + { + OR(32, R(RCPSR), Imm32(val.Imm32() & mask)); + } + } + else + { + MOV(32, R(RSCRATCH2), Imm32(mask)); + MOV(32, R(RSCRATCH3), R(RSCRATCH2)); + AND(32, R(RSCRATCH3), Imm32(0xFFFFFF00)); + MOV(32, R(RSCRATCH), R(RCPSR)); + AND(32, R(RSCRATCH), Imm8(0x1F)); + CMP(32, R(RSCRATCH), Imm8(0x10)); + CMOVcc(32, RSCRATCH2, R(RSCRATCH3), CC_E); + + MOV(32, R(RSCRATCH3), R(RCPSR)); + + // I need you ANDN + MOV(32, R(RSCRATCH), R(RSCRATCH2)); + NOT(32, R(RSCRATCH)); + AND(32, R(RCPSR), R(RSCRATCH)); + + AND(32, R(RSCRATCH2), val); + OR(32, R(RCPSR), R(RSCRATCH2)); + + BitSet16 hiRegsLoaded(RegCache.LoadedRegs & 0x7F00); + if (Thumb || CurInstr.Cond() >= 0xE) + RegCache.Flush(); + else + { + // the ugly way... + // we only save them, to load and save them again + for (int reg : hiRegsLoaded) + SaveReg(reg, RegCache.Mapping[reg]); + } + + MOV(32, R(ABI_PARAM3), R(RCPSR)); + MOV(32, R(ABI_PARAM2), R(RSCRATCH3)); + MOV(64, R(ABI_PARAM1), R(RCPU)); + CALL((void*)&ARM::UpdateMode); + + if (!Thumb && CurInstr.Cond() < 0xE) + { + for (int reg : hiRegsLoaded) + LoadReg(reg, RegCache.Mapping[reg]); + } + } + } +} + /* We'll repurpose this .bss memory @@ -328,7 +453,7 @@ const Compiler::CompileFunc A_Comp[ARMInstrInfo::ak_Count] = // Branch F(A_Comp_BranchImm), F(A_Comp_BranchImm), F(A_Comp_BranchImm), F(A_Comp_BranchXchangeReg), F(A_Comp_BranchXchangeReg), // system stuff - NULL, NULL, NULL, NULL, NULL, NULL, NULL, + NULL, F(A_Comp_MSR), F(A_Comp_MSR), F(A_Comp_MRS), NULL, NULL, NULL, F(Nop) }; diff --git a/src/ARMJIT_x64/ARMJIT_Compiler.h b/src/ARMJIT_x64/ARMJIT_Compiler.h index a448b6d..2230eb8 100644 --- a/src/ARMJIT_x64/ARMJIT_Compiler.h +++ b/src/ARMJIT_x64/ARMJIT_Compiler.h @@ -100,6 +100,9 @@ public: void A_Comp_BranchImm(); void A_Comp_BranchXchangeReg(); + void A_Comp_MRS(); + void A_Comp_MSR(); + void T_Comp_ShiftImm(); void T_Comp_AddSub_(); void T_Comp_ALU_Imm8(); diff --git a/src/ARM_InstrInfo.cpp b/src/ARM_InstrInfo.cpp index b884773..28362d9 100644 --- a/src/ARM_InstrInfo.cpp +++ b/src/ARM_InstrInfo.cpp @@ -427,6 +427,10 @@ Info Decode(bool thumb, u32 num, u32 instr) res.Kind = ak_UNK; } } + if (res.Kind == ak_MRS && !(instr & (1 << 22))) + res.ReadFlags |= flag_N | flag_Z | flag_C | flag_V; + if ((res.Kind == ak_MSR_IMM || res.Kind == ak_MSR_REG) && instr & (1 << 19)) + res.WriteFlags |= flag_N | flag_Z | flag_C | flag_V; if (data & A_Read0) res.SrcRegs |= 1 << (instr & 0xF); -- cgit v1.2.3 From 0f53a34551d60964345debb1766f81ca4686eb17 Mon Sep 17 00:00:00 2001 From: RSDuck Date: Sat, 9 May 2020 00:45:05 +0200 Subject: rewrite JIT memory emulation --- src/ARM.cpp | 10 +- src/ARM.h | 24 +- src/ARMJIT.cpp | 905 +++++++++++++++++++++++++--------- src/ARMJIT.h | 65 ++- src/ARMJIT_A64/ARMJIT_Compiler.cpp | 4 +- src/ARMJIT_Internal.h | 68 ++- src/ARMJIT_RegisterCache.h | 18 +- src/ARMJIT_x64/ARMJIT_Compiler.cpp | 43 +- src/ARMJIT_x64/ARMJIT_Compiler.h | 34 +- src/ARMJIT_x64/ARMJIT_LoadStore.cpp | 935 +++++++++++++++++++----------------- src/ARM_InstrInfo.cpp | 16 +- src/CP15.cpp | 44 +- src/NDS.cpp | 105 +++- src/NDS.h | 8 + 14 files changed, 1465 insertions(+), 814 deletions(-) (limited to 'src/ARM_InstrInfo.cpp') diff --git a/src/ARM.cpp b/src/ARM.cpp index 95d2b8b..205332d 100644 --- a/src/ARM.cpp +++ b/src/ARM.cpp @@ -579,7 +579,8 @@ void ARMv5::ExecuteJIT() while (NDS::ARM9Timestamp < NDS::ARM9Target) { u32 instrAddr = R[15] - ((CPSR&0x20)?2:4); - if (!ARMJIT::IsMapped<0>(instrAddr)) + u32 translatedAddr = ARMJIT::TranslateAddr9(instrAddr); + if (!translatedAddr) { NDS::ARM9Timestamp = NDS::ARM9Target; printf("ARMv5 PC in non executable region %08X\n", R[15]); @@ -589,7 +590,7 @@ void ARMv5::ExecuteJIT() // hack so Cycles <= 0 becomes Cycles < 0 Cycles = NDS::ARM9Target - NDS::ARM9Timestamp - 1; - ARMJIT::JitBlockEntry block = ARMJIT::LookUpBlockEntry(ARMJIT::TranslateAddr<0>(instrAddr)); + ARMJIT::JitBlockEntry block = ARMJIT::LookUpBlockEntry<0>(translatedAddr); if (block) ARM_Dispatch(this, block); else @@ -722,7 +723,8 @@ void ARMv4::ExecuteJIT() while (NDS::ARM7Timestamp < NDS::ARM7Target) { u32 instrAddr = R[15] - ((CPSR&0x20)?2:4); - if (!ARMJIT::IsMapped<1>(instrAddr)) + u32 translatedAddr = ARMJIT::TranslateAddr7(instrAddr); + if (!translatedAddr) { NDS::ARM7Timestamp = NDS::ARM7Target; printf("ARMv4 PC in non executable region %08X\n", R[15]); @@ -731,7 +733,7 @@ void ARMv4::ExecuteJIT() Cycles = NDS::ARM7Target - NDS::ARM7Timestamp - 1; - ARMJIT::JitBlockEntry block = ARMJIT::LookUpBlockEntry(ARMJIT::TranslateAddr<1>(instrAddr)); + ARMJIT::JitBlockEntry block = ARMJIT::LookUpBlockEntry<1>(translatedAddr); if (block) ARM_Dispatch(this, block); else diff --git a/src/ARM.h b/src/ARM.h index 4877956..f64b7fe 100644 --- a/src/ARM.h +++ b/src/ARM.h @@ -308,7 +308,7 @@ public: void DataRead8(u32 addr, u32* val) { *val = NDS::ARM7Read8(addr); - DataRegion = addr >> 20; + DataRegion = addr; DataCycles = NDS::ARM7MemTimings[addr >> 15][0]; } @@ -317,7 +317,7 @@ public: addr &= ~1; *val = NDS::ARM7Read16(addr); - DataRegion = addr >> 20; + DataRegion = addr; DataCycles = NDS::ARM7MemTimings[addr >> 15][0]; } @@ -326,7 +326,7 @@ public: addr &= ~3; *val = NDS::ARM7Read32(addr); - DataRegion = addr >> 20; + DataRegion = addr; DataCycles = NDS::ARM7MemTimings[addr >> 15][2]; } @@ -341,7 +341,7 @@ public: void DataWrite8(u32 addr, u8 val) { NDS::ARM7Write8(addr, val); - DataRegion = addr >> 20; + DataRegion = addr; DataCycles = NDS::ARM7MemTimings[addr >> 15][0]; } @@ -350,7 +350,7 @@ public: addr &= ~1; NDS::ARM7Write16(addr, val); - DataRegion = addr >> 20; + DataRegion = addr; DataCycles = NDS::ARM7MemTimings[addr >> 15][0]; } @@ -359,7 +359,7 @@ public: addr &= ~3; NDS::ARM7Write32(addr, val); - DataRegion = addr >> 20; + DataRegion = addr; DataCycles = NDS::ARM7MemTimings[addr >> 15][2]; } @@ -390,7 +390,7 @@ public: s32 numC = NDS::ARM7MemTimings[CodeCycles][(CPSR&0x20)?0:2]; s32 numD = DataCycles; - if ((DataRegion >> 4) == 0x02) // mainRAM + if ((DataRegion >> 24) == 0x02) // mainRAM { if (CodeRegion == 0x02) Cycles -= numC + numD; @@ -417,7 +417,7 @@ public: s32 numC = NDS::ARM7MemTimings[CodeCycles][(CPSR&0x20)?0:2]; s32 numD = DataCycles; - if ((DataRegion >> 4) == 0x02) + if ((DataRegion >> 24) == 0x02) { if (CodeRegion == 0x02) Cycles -= numC + numD; @@ -443,4 +443,12 @@ void T_UNK(ARM* cpu); } +namespace NDS +{ + +extern ARMv5* ARM9; +extern ARMv4* ARM7; + +} + #endif // ARM_H diff --git a/src/ARMJIT.cpp b/src/ARMJIT.cpp index 46f71f1..9602aed 100644 --- a/src/ARMJIT.cpp +++ b/src/ARMJIT.cpp @@ -23,6 +23,7 @@ #include "ARMInterpreter_Branch.h" #include "ARMInterpreter.h" +#include "GPU.h" #include "GPU3D.h" #include "SPU.h" #include "Wifi.h" @@ -34,9 +35,10 @@ namespace ARMJIT #define JIT_DEBUGPRINT(msg, ...) //#define JIT_DEBUGPRINT(msg, ...) printf(msg, ## __VA_ARGS__) -Compiler* compiler; +Compiler* JITCompiler; -const u32 ExeMemRegionSizes[] = { +const u32 ExeMemRegionSizes[] = +{ 0x8000, // Unmapped Region (dummy) 0x8000, // ITCM 4*1024*1024, // Main RAM @@ -48,7 +50,8 @@ const u32 ExeMemRegionSizes[] = { 0x40000 // ARM7 WVRAM }; -const u32 ExeMemRegionOffsets[] = { +const u32 ExeMemRegionOffsets[] = +{ 0, 0x8000, 0x10000, @@ -61,65 +64,391 @@ const u32 ExeMemRegionOffsets[] = { 0x518000, }; -#define DUP2(x) x, x - -const static ExeMemKind JIT_MEM[2][32] = { - //arm9 - { - /* 0X*/ DUP2(exeMem_ITCM), - /* 1X*/ DUP2(exeMem_ITCM), // mirror - /* 2X*/ DUP2(exeMem_MainRAM), - /* 3X*/ DUP2(exeMem_SWRAM), - /* 4X*/ DUP2(exeMem_Unmapped), - /* 5X*/ DUP2(exeMem_Unmapped), - /* 6X*/ exeMem_Unmapped, - exeMem_LCDC, // Plain ARM9-CPU Access (LCDC mode) (max 656KB) - /* 7X*/ DUP2(exeMem_Unmapped), - /* 8X*/ DUP2(exeMem_Unmapped), - /* 9X*/ DUP2(exeMem_Unmapped), - /* AX*/ DUP2(exeMem_Unmapped), - /* BX*/ DUP2(exeMem_Unmapped), - /* CX*/ DUP2(exeMem_Unmapped), - /* DX*/ DUP2(exeMem_Unmapped), - /* EX*/ DUP2(exeMem_Unmapped), - /* FX*/ DUP2(exeMem_ARM9_BIOS) - }, - //arm7 - { - /* 0X*/ DUP2(exeMem_ARM7_BIOS), - /* 1X*/ DUP2(exeMem_Unmapped), - /* 2X*/ DUP2(exeMem_MainRAM), - /* 3X*/ exeMem_SWRAM, - exeMem_ARM7_WRAM, - /* 4X*/ DUP2(exeMem_Unmapped), - /* 5X*/ DUP2(exeMem_Unmapped), - /* 6X*/ DUP2(exeMem_ARM7_WVRAM), /* contrary to Gbatek, melonDS and itself, - DeSmuME doesn't mirror the 64 MB region at 0x6800000 */ - /* 7X*/ DUP2(exeMem_Unmapped), - /* 8X*/ DUP2(exeMem_Unmapped), - /* 9X*/ DUP2(exeMem_Unmapped), - /* AX*/ DUP2(exeMem_Unmapped), - /* BX*/ DUP2(exeMem_Unmapped), - /* CX*/ DUP2(exeMem_Unmapped), - /* DX*/ DUP2(exeMem_Unmapped), - /* EX*/ DUP2(exeMem_Unmapped), - /* FX*/ DUP2(exeMem_Unmapped) - } -}; - -#undef DUP2 - /* translates address to pseudo physical address - more compact, eliminates mirroring, everything comes in a row - we only need one translation table */ -u32 AddrTranslate9[0x2000]; -u32 AddrTranslate7[0x4000]; + +u32 TranslateAddr9(u32 addr) +{ + switch (ClassifyAddress9(addr)) + { + case memregion_MainRAM: return ExeMemRegionOffsets[exeMem_MainRAM] + (addr & (MAIN_RAM_SIZE - 1)); + case memregion_SWRAM9: + if (NDS::SWRAM_ARM9) + return ExeMemRegionOffsets[exeMem_SWRAM] + (NDS::SWRAM_ARM9 - NDS::SharedWRAM) + (addr & NDS::SWRAM_ARM9Mask); + else + return 0; + case memregion_ITCM: return ExeMemRegionOffsets[exeMem_ITCM] + (addr & 0x7FFF); + case memregion_VRAM: return (addr >= 0x6800000 && addr < 0x68A4000) ? ExeMemRegionOffsets[exeMem_LCDC] + (addr - 0x6800000) : 0; + case memregion_BIOS9: return ExeMemRegionOffsets[exeMem_ARM9_BIOS] + (addr & 0xFFF); + default: return 0; + } +} + +u32 TranslateAddr7(u32 addr) +{ + switch (ClassifyAddress7(addr)) + { + case memregion_MainRAM: return ExeMemRegionOffsets[exeMem_MainRAM] + (addr & (MAIN_RAM_SIZE - 1)); + case memregion_SWRAM7: + if (NDS::SWRAM_ARM7) + return ExeMemRegionOffsets[exeMem_SWRAM] + (NDS::SWRAM_ARM7 - NDS::SharedWRAM) + (addr & NDS::SWRAM_ARM7Mask); + else + return 0; + case memregion_BIOS7: return ExeMemRegionOffsets[exeMem_ARM7_BIOS] + addr; + case memregion_WRAM7: return ExeMemRegionOffsets[exeMem_ARM7_WRAM] + (addr & 0xFFFF); + case memregion_VWRAM: return ExeMemRegionOffsets[exeMem_ARM7_WVRAM] + (addr & 0x1FFFF); + default: return 0; + } +} AddressRange CodeRanges[ExeMemSpaceSize / 512]; -std::unordered_map JitBlocks; +TinyVector InvalidLiterals; + +std::unordered_map JitBlocks9; +std::unordered_map JitBlocks7; + +u8 MemoryStatus9[0x800000]; +u8 MemoryStatus7[0x800000]; + +int ClassifyAddress9(u32 addr) +{ + if (addr < NDS::ARM9->ITCMSize) + return memregion_ITCM; + else if (addr >= NDS::ARM9->DTCMBase && addr < (NDS::ARM9->DTCMBase + NDS::ARM9->DTCMSize)) + return memregion_DTCM; + else if ((addr & 0xFFFFF000) == 0xFFFF0000) + return memregion_BIOS9; + else + { + switch (addr & 0xFF000000) + { + case 0x02000000: + return memregion_MainRAM; + case 0x03000000: + return memregion_SWRAM9; + case 0x04000000: + return memregion_IO9; + case 0x06000000: + return memregion_VRAM; + } + } + return memregion_Other; +} + +int ClassifyAddress7(u32 addr) +{ + if (addr < 0x00004000) + return memregion_BIOS7; + else + { + switch (addr & 0xFF800000) + { + case 0x02000000: + case 0x02800000: + return memregion_MainRAM; + case 0x03000000: + if (NDS::SWRAM_ARM7) + return memregion_SWRAM7; + else + return memregion_WRAM7; + case 0x03800000: + return memregion_WRAM7; + case 0x04000000: + return memregion_IO7; + case 0x04800000: + return memregion_Wifi; + case 0x06000000: + case 0x06800000: + return memregion_VWRAM; + } + } + return memregion_Other; +} + +void UpdateMemoryStatus9(u32 start, u32 end) +{ + start >>= 12; + end >>= 12; + + if (end == 0xFFFFF) + end++; + + for (u32 i = start; i < end; i++) + { + u32 addr = i << 12; + + int region = ClassifyAddress9(addr); + u32 pseudoPhyisical = TranslateAddr9(addr); + + for (u32 j = 0; j < 8; j++) + { + u8 val = region; + if (CodeRanges[(pseudoPhyisical + (j << 12)) / 512].Blocks.Length) + val |= 0x80; + MemoryStatus9[i * 8 + j] = val; + } + } +} + +void UpdateMemoryStatus7(u32 start, u32 end) +{ + start >>= 12; + end >>= 12; + + if (end == 0xFFFFF) + end++; + + for (u32 i = start; i < end; i++) + { + u32 addr = i << 12; + + int region = ClassifyAddress7(addr); + u32 pseudoPhyisical = TranslateAddr7(addr); + + for (u32 j = 0; j < 8; j++) + { + u8 val = region; + if (CodeRanges[(pseudoPhyisical + (j << 12)) / 512].Blocks.Length) + val |= 0x80; + MemoryStatus7[i * 8 + j] = val; + } + } +} + +void UpdateRegionByPseudoPhyiscal(u32 addr, bool invalidate) +{ + for (u32 i = 1; i < exeMem_Count; i++) + { + if (addr >= ExeMemRegionOffsets[i] && addr < ExeMemRegionOffsets[i] + ExeMemRegionSizes[i]) + { + for (u32 num = 0; num < 2; num++) + { + u32 physSize = ExeMemRegionSizes[i]; + u32 mapSize = 0; + u32 mapStart = 0; + switch (i) + { + case exeMem_ITCM: + if (num == 0) + mapStart = 0; mapSize = NDS::ARM9->ITCMSize; + break; + case exeMem_MainRAM: mapStart = 0x2000000; mapSize = 0x1000000; break; + case exeMem_SWRAM: + if (num == 0) + { + if (NDS::SWRAM_ARM9) + mapStart = 0x3000000, mapSize = 0x1000000; + else + mapStart = mapSize = 0; + } + else + { + if (NDS::SWRAM_ARM7) + mapStart = 0x3000000, mapSize = 0x800000; + else + mapStart = mapSize = 0; + } + break; + case exeMem_LCDC: + if (num == 0) + mapStart = 0x6800000, mapSize = 0xA4000; + break; + case exeMem_ARM9_BIOS: + if (num == 0) + mapStart = 0xFFFF0000, mapSize = 0x10000; + break; + case exeMem_ARM7_BIOS: + if (num == 1) + mapStart = 0; mapSize = 0x4000; + break; + case exeMem_ARM7_WRAM: + if (num == 1) + { + if (NDS::SWRAM_ARM7) + mapStart = 0x3800000, mapSize = 0x800000; + else + mapStart = 0x3000000, mapSize = 0x1000000; + } + break; + case exeMem_ARM7_WVRAM: + if (num == 1) + mapStart = 0x6000000, mapSize = 0x1000000; + break; + } + + for (u32 j = 0; j < mapSize / physSize; j++) + { + u32 virtAddr = mapStart + physSize * j + (addr - ExeMemRegionOffsets[i]); + if (num == 0 + && virtAddr >= NDS::ARM9->DTCMBase && virtAddr < (NDS::ARM9->DTCMBase + NDS::ARM9->DTCMSize)) + continue; + if (invalidate) + { + if (num == 0) + MemoryStatus9[virtAddr / 512] |= 0x80; + else + MemoryStatus7[virtAddr / 512] |= 0x80; + } + else + { + if (num == 0) + MemoryStatus9[virtAddr / 512] &= ~0x80; + else + MemoryStatus7[virtAddr / 512] &= ~0x80; + } + } + + } + return; + } + } + + assert(false); +} + +template +T SlowRead9(ARMv5* cpu, u32 addr) +{ + u32 offset = addr & 0x3; + addr &= ~(sizeof(T) - 1); + + T val; + if (addr < cpu->ITCMSize) + val = *(T*)&cpu->ITCM[addr & 0x7FFF]; + else if (addr >= cpu->DTCMBase && addr < (cpu->DTCMBase + cpu->DTCMSize)) + val = *(T*)&cpu->DTCM[(addr - cpu->DTCMBase) & 0x3FFF]; + else if (std::is_same::value) + val = NDS::ARM9Read32(addr); + else if (std::is_same::value) + val = NDS::ARM9Read16(addr); + else + val = NDS::ARM9Read8(addr); + + if (std::is_same::value) + return ROR(val, offset << 3); + else + return val; +} + +template +void SlowWrite9(ARMv5* cpu, u32 addr, T val) +{ + addr &= ~(sizeof(T) - 1); + + if (addr < cpu->ITCMSize) + { + InvalidateITCMIfNecessary(addr); + *(T*)&cpu->ITCM[addr & 0x7FFF] = val; + } + else if (addr >= cpu->DTCMBase && addr < (cpu->DTCMBase + cpu->DTCMSize)) + { + *(T*)&cpu->DTCM[(addr - cpu->DTCMBase) & 0x3FFF] = val; + } + else if (std::is_same::value) + { + NDS::ARM9Write32(addr, val); + } + else if (std::is_same::value) + { + NDS::ARM9Write16(addr, val); + } + else + { + NDS::ARM9Write8(addr, val); + } +} + +template void SlowWrite9(ARMv5*, u32, u32); +template void SlowWrite9(ARMv5*, u32, u16); +template void SlowWrite9(ARMv5*, u32, u8); + +template u32 SlowRead9(ARMv5*, u32); +template u16 SlowRead9(ARMv5*, u32); +template u8 SlowRead9(ARMv5*, u32); + +template +T SlowRead7(u32 addr) +{ + u32 offset = addr & 0x3; + addr &= ~(sizeof(T) - 1); + + T val; + if (std::is_same::value) + val = NDS::ARM7Read32(addr); + else if (std::is_same::value) + val = NDS::ARM7Read16(addr); + else + val = NDS::ARM7Read8(addr); + + if (std::is_same::value) + return ROR(val, offset << 3); + else + return val; +} + +template +void SlowWrite7(u32 addr, T val) +{ + addr &= ~(sizeof(T) - 1); + + if (std::is_same::value) + NDS::ARM7Write32(addr, val); + else if (std::is_same::value) + NDS::ARM7Write16(addr, val); + else + NDS::ARM7Write8(addr, val); +} + +template +void SlowBlockTransfer9(u32 addr, u64* data, u32 num, ARMv5* cpu) +{ + addr &= ~0x3; + for (int i = 0; i < num; i++) + { + addr += PreInc * 4; + if (Write) + SlowWrite9(cpu, addr, data[i]); + else + data[i] = SlowRead9(cpu, addr); + addr += !PreInc * 4; + } +} + +template +void SlowBlockTransfer7(u32 addr, u64* data, u32 num) +{ + addr &= ~0x3; + for (int i = 0; i < num; i++) + { + addr += PreInc * 4; + if (Write) + SlowWrite7(addr, data[i]); + else + data[i] = SlowRead7(addr); + addr += !PreInc * 4; + } +} + +template void SlowWrite7(u32, u32); +template void SlowWrite7(u32, u16); +template void SlowWrite7(u32, u8); + +template u32 SlowRead7(u32); +template u16 SlowRead7(u32); +template u8 SlowRead7(u32); + +template void SlowBlockTransfer9(u32, u64*, u32, ARMv5*); +template void SlowBlockTransfer9(u32, u64*, u32, ARMv5*); +template void SlowBlockTransfer9(u32, u64*, u32, ARMv5*); +template void SlowBlockTransfer9(u32, u64*, u32, ARMv5*); +template void SlowBlockTransfer7(u32 addr, u64* data, u32 num); +template void SlowBlockTransfer7(u32 addr, u64* data, u32 num); +template void SlowBlockTransfer7(u32 addr, u64* data, u32 num); +template void SlowBlockTransfer7(u32 addr, u64* data, u32 num); template struct UnreliableHashTable @@ -211,31 +540,25 @@ struct UnreliableHashTable }; UnreliableHashTable RestoreCandidates; -UnreliableHashTable FastBlockLookUp; +UnreliableHashTable FastBlockLookUp9; +UnreliableHashTable FastBlockLookUp7; void Init() { - for (int i = 0; i < 0x2000; i++) - { - ExeMemKind kind = JIT_MEM[0][i >> 8]; - u32 size = ExeMemRegionSizes[kind]; - - AddrTranslate9[i] = ExeMemRegionOffsets[kind] + ((i << 15) & (size - 1)); - } - for (int i = 0; i < 0x4000; i++) - { - ExeMemKind kind = JIT_MEM[1][i >> 9]; - u32 size = ExeMemRegionSizes[kind]; - - AddrTranslate7[i] = ExeMemRegionOffsets[kind] + ((i << 14) & (size - 1)); - } - - compiler = new Compiler(); + JITCompiler = new Compiler(); } void DeInit() { - delete compiler; + delete JITCompiler; +} + +void Reset() +{ + ResetBlockCache(); + + UpdateMemoryStatus9(0, 0xFFFFFFFF); + UpdateMemoryStatus7(0, 0xFFFFFFFF); } void FloodFillSetFlags(FetchedInstr instrs[], int start, u8 flags) @@ -256,25 +579,31 @@ void FloodFillSetFlags(FetchedInstr instrs[], int start, u8 flags) } } -bool DecodeLiteral(const FetchedInstr& instr, u32& addr) +bool DecodeLiteral(bool thumb, const FetchedInstr& instr, u32& addr) { - switch (instr.Info.Kind) + if (!thumb) { - case ARMInstrInfo::ak_STR_IMM: - case ARMInstrInfo::ak_STRB_IMM: - addr = (instr.Addr + 8) + ((instr.Instr & 0xFFF) * (instr.Instr & (1 << 23) ? 1 : -1)); - return true; - case ARMInstrInfo::ak_STRD_IMM: - case ARMInstrInfo::ak_STRH_IMM: - addr = (instr.Addr + 8) + (((instr.Instr & 0xF00) >> 4 | (instr.Instr & 0xF)) * (instr.Instr & (1 << 23) ? 1 : -1)); - return true; - case ARMInstrInfo::ak_STM: // I honestly hope noone was ever crazy enough to do stm pc, {whatever} - addr = instr.Addr + 8; + switch (instr.Info.Kind) + { + case ARMInstrInfo::ak_LDR_IMM: + case ARMInstrInfo::ak_LDRB_IMM: + addr = (instr.Addr + 8) + ((instr.Instr & 0xFFF) * (instr.Instr & (1 << 23) ? 1 : -1)); + return true; + case ARMInstrInfo::ak_LDRH_IMM: + addr = (instr.Addr + 8) + (((instr.Instr & 0xF00) >> 4 | (instr.Instr & 0xF)) * (instr.Instr & (1 << 23) ? 1 : -1)); + return true; + default: + break; + } + } + else if (instr.Info.Kind == ARMInstrInfo::tk_LDR_PCREL) + { + addr = ((instr.Addr + 4) & ~0x2) + ((instr.Instr & 0xFF) << 2); return true; - default: - JIT_DEBUGPRINT("Literal %08x %x not recognised\n", instr.Instr, instr.Addr); - return false; } + + JIT_DEBUGPRINT("Literal %08x %x not recognised %d\n", instr.Instr, instr.Addr, instr.Info.Kind); + return false; } bool DecodeBranch(bool thumb, const FetchedInstr& instr, u32& cond, bool hasLink, u32 lr, bool& link, @@ -453,6 +782,8 @@ InterpreterFunc InterpretTHUMB[ARMInstrInfo::tk_Count] = }; #undef F + +extern u32 literalsPerBlock; void CompileBlock(ARM* cpu) { bool thumb = cpu->CPSR & 0x20; @@ -463,31 +794,33 @@ void CompileBlock(ARM* cpu) Config::JIT_MaxBlockSize = 32; u32 blockAddr = cpu->R[15] - (thumb ? 2 : 4); - if (!(cpu->Num == 0 - ? IsMapped<0>(blockAddr) - : IsMapped<1>(blockAddr))) + u32 pseudoPhysicalAddr = cpu->Num == 0 + ? TranslateAddr9(blockAddr) + : TranslateAddr7(blockAddr); + if (pseudoPhysicalAddr < ExeMemRegionSizes[exeMem_Unmapped]) { printf("Trying to compile a block in unmapped memory: %x\n", blockAddr); } - u32 pseudoPhysicalAddr = cpu->Num == 0 - ? TranslateAddr<0>(blockAddr) - : TranslateAddr<1>(blockAddr); - FetchedInstr instrs[Config::JIT_MaxBlockSize]; int i = 0; u32 r15 = cpu->R[15]; - u32 addresseRanges[32] = {}; + u32 addressRanges[Config::JIT_MaxBlockSize]; + u32 addressMasks[Config::JIT_MaxBlockSize] = {0}; u32 numAddressRanges = 0; + u32 numLiterals = 0; + u32 literalLoadAddrs[Config::JIT_MaxBlockSize]; + // they are going to be hashed + u32 literalValues[Config::JIT_MaxBlockSize]; + u32 instrValues[Config::JIT_MaxBlockSize]; + cpu->FillPipeline(); u32 nextInstr[2] = {cpu->NextInstr[0], cpu->NextInstr[1]}; u32 nextInstrAddr[2] = {blockAddr, r15}; - JIT_DEBUGPRINT("start block %x %08x (%x) (region invalidates %dx)\n", - blockAddr, cpu->CPSR, pseudoPhysicalAddr, - CodeRanges[pseudoPhysicalAddr / 512].TimesInvalidated); + JIT_DEBUGPRINT("start block %x %08x (%x)\n", blockAddr, cpu->CPSR, pseudoPhysicalAddr); u32 lastSegmentStart = blockAddr; u32 lr; @@ -507,23 +840,29 @@ void CompileBlock(ARM* cpu) nextInstrAddr[1] = r15; JIT_DEBUGPRINT("instr %08x %x\n", instrs[i].Instr & (thumb ? 0xFFFF : ~0), instrs[i].Addr); - u32 translatedAddr = (cpu->Num == 0 - ? TranslateAddr<0>(instrs[i].Addr) - : TranslateAddr<1>(instrs[i].Addr)) & ~0x1FF; - if (i == 0 || translatedAddr != addresseRanges[numAddressRanges - 1]) + instrValues[i] = instrs[i].Instr; + + u32 translatedAddr = cpu->Num == 0 + ? TranslateAddr9(instrs[i].Addr) + : TranslateAddr7(instrs[i].Addr); + u32 translatedAddrRounded = translatedAddr & ~0x1FF; + if (i == 0 || translatedAddrRounded != addressRanges[numAddressRanges - 1]) { bool returning = false; for (int j = 0; j < numAddressRanges; j++) { - if (addresseRanges[j] == translatedAddr) + if (addressRanges[j] == translatedAddrRounded) { + std::swap(addressRanges[j], addressRanges[numAddressRanges - 1]); + std::swap(addressMasks[j], addressMasks[numAddressRanges - 1]); returning = true; break; } } if (!returning) - addresseRanges[numAddressRanges++] = translatedAddr; + addressRanges[numAddressRanges++] = translatedAddrRounded; } + addressMasks[numAddressRanges - 1] |= 1 << ((translatedAddr & 0x1FF) / 16); if (cpu->Num == 0) { @@ -572,7 +911,8 @@ void CompileBlock(ARM* cpu) u32 icode = ((instrs[i].Instr >> 4) & 0xF) | ((instrs[i].Instr >> 16) & 0xFF0); assert(InterpretARM[instrs[i].Info.Kind] == ARMInterpreter::ARMInstrTable[icode] || instrs[i].Info.Kind == ARMInstrInfo::ak_MOV_REG_LSL_IMM - || instrs[i].Info.Kind == ARMInstrInfo::ak_Nop); + || instrs[i].Info.Kind == ARMInstrInfo::ak_Nop + || instrs[i].Info.Kind == ARMInstrInfo::ak_UNK); if (cpu->CheckCondition(instrs[i].Cond())) InterpretARM[instrs[i].Info.Kind](cpu); else @@ -583,21 +923,26 @@ void CompileBlock(ARM* cpu) instrs[i].DataCycles = cpu->DataCycles; instrs[i].DataRegion = cpu->DataRegion; - if (instrs[i].Info.SpecialKind == ARMInstrInfo::special_WriteMem - && instrs[i].Info.SrcRegs == (1 << 15) - && instrs[i].Info.DstRegs == 0) + u32 literalAddr; + if (Config::JIT_LiteralOptimisations + && instrs[i].Info.SpecialKind == ARMInstrInfo::special_LoadLiteral + && DecodeLiteral(thumb, instrs[i], literalAddr)) { - assert (!thumb); - - u32 addr; - if (DecodeLiteral(instrs[i], addr)) - { - JIT_DEBUGPRINT("pc relative write detected\n"); - u32 translatedAddr = cpu->Num == 0 ? TranslateAddr<0>(addr) : TranslateAddr<1>(addr); - - ARMJIT::InvalidateByAddr(translatedAddr, false); - CodeRanges[translatedAddr / 512].InvalidLiterals |= (1 << ((translatedAddr & 0x1FF) / 16)); - } + u32 translatedAddr = cpu->Num == 0 + ? TranslateAddr9(literalAddr) + : TranslateAddr7(literalAddr); + u32 translatedAddrRounded = translatedAddr & ~0x1FF; + + u32 j = 0; + for (; j < numAddressRanges; j++) + if (addressRanges[j] == translatedAddrRounded) + break; + if (j == numAddressRanges) + addressRanges[numAddressRanges++] = translatedAddrRounded; + addressMasks[j] |= 1 << ((translatedAddr & 0x1FF) / 16); + JIT_DEBUGPRINT("literal loading %08x %08x %08x %08x\n", literalAddr, translatedAddr, addressMasks[j], addressRanges[j]); + cpu->DataRead32(literalAddr, &literalValues[numLiterals]); + literalLoadAddrs[numLiterals++] = translatedAddr; } if (thumb && instrs[i].Info.Kind == ARMInstrInfo::tk_BL_LONG_2 && i > 0 @@ -650,8 +995,8 @@ void CompileBlock(ARM* cpu) else if (hasBranched && !isBackJump && i + 1 < Config::JIT_MaxBlockSize) { u32 targetPseudoPhysical = cpu->Num == 0 - ? TranslateAddr<0>(target) - : TranslateAddr<1>(target); + ? TranslateAddr9(target) + : TranslateAddr7(target); if (link) { @@ -688,36 +1033,29 @@ void CompileBlock(ARM* cpu) i++; - bool canCompile = compiler->CanCompile(thumb, instrs[i - 1].Info.Kind); + bool canCompile = JITCompiler->CanCompile(thumb, instrs[i - 1].Info.Kind); bool secondaryFlagReadCond = !canCompile || (instrs[i - 1].BranchFlags & (branch_FollowCondTaken | branch_FollowCondNotTaken)); if (instrs[i - 1].Info.ReadFlags != 0 || secondaryFlagReadCond) FloodFillSetFlags(instrs, i - 2, !secondaryFlagReadCond ? instrs[i - 1].Info.ReadFlags : 0xF); } while(!instrs[i - 1].Info.EndBlock && i < Config::JIT_MaxBlockSize && !cpu->Halted && (!cpu->IRQ || (cpu->CPSR & 0x80))); + u32 literalHash = (u32)XXH3_64bits(literalValues, numLiterals * 4); + u32 instrHash = (u32)XXH3_64bits(instrValues, i * 4); + JitBlock* prevBlock = RestoreCandidates.LookUp(pseudoPhysicalAddr); bool mayRestore = true; if (prevBlock) { RestoreCandidates.Remove(pseudoPhysicalAddr); - if (prevBlock->NumInstrs == i) - { - for (int j = 0; j < i; j++) - { - if (prevBlock->Instrs()[j] != instrs[j].Instr) - { - mayRestore = false; - break; - } - } - } - else - mayRestore = false; - if (prevBlock->NumAddresses == numAddressRanges) + mayRestore = prevBlock->LiteralHash == literalHash && prevBlock->InstrHash == instrHash; + + if (mayRestore && prevBlock->NumAddresses == numAddressRanges) { for (int j = 0; j < numAddressRanges; j++) { - if (prevBlock->AddressRanges()[j] != addresseRanges[j]) + if (prevBlock->AddressRanges()[j] != addressRanges[j] + || prevBlock->AddressMasks()[j] != addressMasks[j]) { mayRestore = false; break; @@ -739,18 +1077,21 @@ void CompileBlock(ARM* cpu) if (prevBlock) delete prevBlock; - block = new JitBlock(i, numAddressRanges); - for (int j = 0; j < i; j++) - block->Instrs()[j] = instrs[j].Instr; + block = new JitBlock(cpu->Num, i, numAddressRanges, numLiterals); + block->LiteralHash = literalHash; + block->InstrHash = instrHash; + for (int j = 0; j < numAddressRanges; j++) + block->AddressRanges()[j] = addressRanges[j]; for (int j = 0; j < numAddressRanges; j++) - block->AddressRanges()[j] = addresseRanges[j]; + block->AddressMasks()[j] = addressMasks[j]; + for (int j = 0; j < numLiterals; j++) + block->Literals()[j] = literalLoadAddrs[j]; - block->StartAddr = blockAddr; block->PseudoPhysicalAddr = pseudoPhysicalAddr; FloodFillSetFlags(instrs, i - 1, 0xF); - block->EntryPoint = compiler->CompileBlock(pseudoPhysicalAddr, cpu, thumb, instrs, i); + block->EntryPoint = JITCompiler->CompileBlock(pseudoPhysicalAddr, cpu, thumb, instrs, i); } else { @@ -760,23 +1101,73 @@ void CompileBlock(ARM* cpu) for (int j = 0; j < numAddressRanges; j++) { - assert(addresseRanges[j] == block->AddressRanges()[j]); - CodeRanges[addresseRanges[j] / 512].Blocks.Add(block); + assert(addressRanges[j] == block->AddressRanges()[j]); + assert(addressMasks[j] == block->AddressMasks()[j]); + assert(addressMasks[j] != 0); + CodeRanges[addressRanges[j] / 512].Code |= addressMasks[j]; + CodeRanges[addressRanges[j] / 512].Blocks.Add(block); + + UpdateRegionByPseudoPhyiscal(addressRanges[j], true); } - JitBlocks[pseudoPhysicalAddr] = block; - FastBlockLookUp.Insert(pseudoPhysicalAddr, compiler->SubEntryOffset(block->EntryPoint)); + if (cpu->Num == 0) + { + JitBlocks9[pseudoPhysicalAddr] = block; + FastBlockLookUp9.Insert(pseudoPhysicalAddr, JITCompiler->SubEntryOffset(block->EntryPoint)); + } + else + { + JitBlocks7[pseudoPhysicalAddr] = block; + FastBlockLookUp7.Insert(pseudoPhysicalAddr, JITCompiler->SubEntryOffset(block->EntryPoint)); + } } -void InvalidateByAddr(u32 pseudoPhysical, bool mayRestore) +void InvalidateByAddr(u32 pseudoPhysical) { JIT_DEBUGPRINT("invalidating by addr %x\n", pseudoPhysical); AddressRange* range = &CodeRanges[pseudoPhysical / 512]; - int startLength = range->Blocks.Length; - for (int i = 0; i < range->Blocks.Length; i++) + u32 mask = 1 << ((pseudoPhysical & 0x1FF) / 16); + + range->Code = 0; + for (int i = 0; i < range->Blocks.Length;) { - assert(range->Blocks.Length == startLength); JitBlock* block = range->Blocks[i]; + + bool invalidated = false; + u32 mask = 0; + for (int j = 0; j < block->NumAddresses; j++) + { + if (block->AddressRanges()[j] == (pseudoPhysical & ~0x1FF)) + { + mask = block->AddressMasks()[j]; + invalidated = block->AddressMasks()[j] & mask; + break; + } + } + assert(mask); + if (!invalidated) + { + range->Code |= mask; + i++; + continue; + } + range->Blocks.Remove(i); + + bool literalInvalidation = false; + for (int j = 0; j < block->NumLiterals; j++) + { + u32 addr = block->Literals()[j]; + if (addr == pseudoPhysical) + { + if (InvalidLiterals.Find(pseudoPhysical) != -1) + { + InvalidLiterals.Add(pseudoPhysical); + JIT_DEBUGPRINT("found invalid literal %d\n", InvalidLiterals.Length); + } + literalInvalidation = true; + break; + } + } for (int j = 0; j < block->NumAddresses; j++) { u32 addr = block->AddressRanges()[j]; @@ -786,76 +1177,59 @@ void InvalidateByAddr(u32 pseudoPhysical, bool mayRestore) assert(otherRange != range); bool removed = otherRange->Blocks.RemoveByValue(block); assert(removed); + + if (otherRange->Blocks.Length == 0) + { + otherRange->Code = 0; + UpdateRegionByPseudoPhyiscal(addr, false); + } } } for (int j = 0; j < block->NumLinks(); j++) - compiler->UnlinkBlock(block->Links()[j]); + JITCompiler->UnlinkBlock(block->Links()[j]); + block->ResetLinks(); - JitBlocks.erase(block->PseudoPhysicalAddr); - FastBlockLookUp.Remove(block->PseudoPhysicalAddr); + if (block->Num == 0) + { + JitBlocks9.erase(block->PseudoPhysicalAddr); + FastBlockLookUp9.Remove(block->PseudoPhysicalAddr); + } + else + { + JitBlocks7.erase(block->PseudoPhysicalAddr); + FastBlockLookUp7.Remove(block->PseudoPhysicalAddr); + } - if (mayRestore) + if (!literalInvalidation) { JitBlock* prevBlock = RestoreCandidates.Insert(block->PseudoPhysicalAddr, block); if (prevBlock) delete prevBlock; } + else + { + delete block; + } } - if ((range->TimesInvalidated + 1) > range->TimesInvalidated) - range->TimesInvalidated++; - - range->Blocks.Clear(); -} -void InvalidateByAddr7(u32 addr) -{ - u32 pseudoPhysical = TranslateAddr<1>(addr); - if (__builtin_expect(CodeRanges[pseudoPhysical / 512].Blocks.Length > 0, false)) - InvalidateByAddr(pseudoPhysical); + if (range->Blocks.Length == 0) + UpdateRegionByPseudoPhyiscal(pseudoPhysical, false); } -void InvalidateITCM(u32 addr) +void InvalidateRegionIfNecessary(u32 pseudoPhyisical) { - u32 pseudoPhysical = addr + ExeMemRegionOffsets[exeMem_ITCM]; - if (CodeRanges[pseudoPhysical / 512].Blocks.Length > 0) - InvalidateByAddr(pseudoPhysical); -} - -void InvalidateAll() -{ - JIT_DEBUGPRINT("invalidating all %x\n", JitBlocks.size()); - for (auto it : JitBlocks) - { - JitBlock* block = it.second; - - FastBlockLookUp.Remove(block->PseudoPhysicalAddr); - - for (int i = 0; i < block->NumAddresses; i++) - { - u32 addr = block->AddressRanges()[i]; - AddressRange* range = &CodeRanges[addr / 512]; - range->Blocks.Clear(); - if (range->TimesInvalidated + 1 > range->TimesInvalidated) - range->TimesInvalidated++; - } - for (int i = 0; i < block->NumLinks(); i++) - compiler->UnlinkBlock(block->Links()[i]); - block->ResetLinks(); - - JitBlock* prevBlock = RestoreCandidates.Insert(block->PseudoPhysicalAddr, block); - if (prevBlock) - delete prevBlock; - } - - JitBlocks.clear(); + if (CodeRanges[pseudoPhyisical / 512].Code & (1 << ((pseudoPhyisical & 0x1FF) / 16))) + InvalidateByAddr(pseudoPhyisical); } void ResetBlockCache() { printf("Resetting JIT block cache...\n"); - FastBlockLookUp.Reset(); + InvalidLiterals.Clear(); + FastBlockLookUp9.Reset(); + FastBlockLookUp7.Reset(); RestoreCandidates.Reset(); for (int i = 0; i < sizeof(RestoreCandidates.Table)/sizeof(RestoreCandidates.Table[0]); i++) { @@ -870,61 +1244,119 @@ void ResetBlockCache() RestoreCandidates.Table[i].ValB = NULL; } } - for (auto it : JitBlocks) + for (auto it : JitBlocks9) { JitBlock* block = it.second; for (int j = 0; j < block->NumAddresses; j++) { u32 addr = block->AddressRanges()[j]; CodeRanges[addr / 512].Blocks.Clear(); - CodeRanges[addr / 512].TimesInvalidated = 0; - CodeRanges[addr / 512].InvalidLiterals = 0; + CodeRanges[addr / 512].Code = 0; } delete block; } - JitBlocks.clear(); + for (auto it : JitBlocks7) + { + JitBlock* block = it.second; + for (int j = 0; j < block->NumAddresses; j++) + { + u32 addr = block->AddressRanges()[j]; + CodeRanges[addr / 512].Blocks.Clear(); + CodeRanges[addr / 512].Code = 0; + } + } + JitBlocks9.clear(); + JitBlocks7.clear(); - compiler->Reset(); + JITCompiler->Reset(); } +template JitBlockEntry LookUpBlockEntry(u32 addr) { - u32 entryOffset = FastBlockLookUp.LookUp(addr); + auto& fastMap = Num == 0 ? FastBlockLookUp9 : FastBlockLookUp7; + u32 entryOffset = fastMap.LookUp(addr); if (entryOffset != UINT32_MAX) - return compiler->AddEntryOffset(entryOffset); + return JITCompiler->AddEntryOffset(entryOffset); - auto block = JitBlocks.find(addr); - if (block != JitBlocks.end()) + auto& slowMap = Num == 0 ? JitBlocks9 : JitBlocks7; + auto block = slowMap.find(addr); + if (block != slowMap.end()) { - FastBlockLookUp.Insert(addr, compiler->SubEntryOffset(block->second->EntryPoint)); + fastMap.Insert(addr, JITCompiler->SubEntryOffset(block->second->EntryPoint)); return block->second->EntryPoint; } return NULL; } +template JitBlockEntry LookUpBlockEntry<0>(u32); +template JitBlockEntry LookUpBlockEntry<1>(u32); + template void LinkBlock(ARM* cpu, u32 codeOffset) { - u32 targetPseudoPhys = TranslateAddr(cpu->R[15] - ((cpu->CPSR&0x20)?2:4)); - auto block = JitBlocks.find(targetPseudoPhys); - if (block == JitBlocks.end()) + auto& blockMap = Num == 0 ? JitBlocks9 : JitBlocks7; + u32 instrAddr = cpu->R[15] - ((cpu->CPSR&0x20)?2:4); + u32 targetPseudoPhys = Num == 0 ? TranslateAddr9(instrAddr) : TranslateAddr7(instrAddr); + auto block = blockMap.find(targetPseudoPhys); + if (block == blockMap.end()) { CompileBlock(cpu); - block = JitBlocks.find(targetPseudoPhys); + block = blockMap.find(targetPseudoPhys); } JIT_DEBUGPRINT("linking to block %08x\n", targetPseudoPhys); block->second->AddLink(codeOffset); - compiler->LinkBlock(codeOffset, block->second->EntryPoint); + JITCompiler->LinkBlock(codeOffset, block->second->EntryPoint); +} + +template void LinkBlock<0>(ARM*, u32); +template void LinkBlock<1>(ARM*, u32); + +void WifiWrite32(u32 addr, u32 val) +{ + Wifi::Write(addr, val & 0xFFFF); + Wifi::Write(addr + 2, val >> 16); +} + +u32 WifiRead32(u32 addr) +{ + return Wifi::Read(addr) | (Wifi::Read(addr + 2) << 16); +} + +template +void VRAMWrite(u32 addr, T val) +{ + switch (addr & 0x00E00000) + { + case 0x00000000: GPU::WriteVRAM_ABG(addr, val); return; + case 0x00200000: GPU::WriteVRAM_BBG(addr, val); return; + case 0x00400000: GPU::WriteVRAM_AOBJ(addr, val); return; + case 0x00600000: GPU::WriteVRAM_BOBJ(addr, val); return; + default: GPU::WriteVRAM_LCDC(addr, val); return; + } +} +template +T VRAMRead(u32 addr) +{ + switch (addr & 0x00E00000) + { + case 0x00000000: return GPU::ReadVRAM_ABG(addr); + case 0x00200000: return GPU::ReadVRAM_BBG(addr); + case 0x00400000: return GPU::ReadVRAM_AOBJ(addr); + case 0x00600000: return GPU::ReadVRAM_BOBJ(addr); + default: return GPU::ReadVRAM_LCDC(addr); + } } void* GetFuncForAddr(ARM* cpu, u32 addr, bool store, int size) { if (cpu->Num == 0) { - if ((addr & 0xFF000000) == 0x04000000) + switch (addr & 0xFF000000) { + case 0x04000000: if (!store && size == 32 && addr == 0x04100010 && NDS::ExMemCnt[0] & (1<<11)) return (void*)NDSCart::ReadROMData; @@ -949,13 +1381,25 @@ void* GetFuncForAddr(ARM* cpu, u32 addr, bool store, int size) switch (size | store) { - case 8: return (void*)NDS::ARM9IORead8; - case 9: return (void*)NDS::ARM9IOWrite8; + case 8: return (void*)NDS::ARM9IORead8; + case 9: return (void*)NDS::ARM9IOWrite8; case 16: return (void*)NDS::ARM9IORead16; case 17: return (void*)NDS::ARM9IOWrite16; case 32: return (void*)NDS::ARM9IORead32; case 33: return (void*)NDS::ARM9IOWrite32; } + break; + case 0x06000000: + switch (size | store) + { + case 8: return (void*)VRAMRead; + case 9: return NULL; + case 16: return (void*)VRAMRead; + case 17: return (void*)VRAMWrite; + case 32: return (void*)VRAMRead; + case 33: return (void*)VRAMWrite; + } + break; } } else @@ -987,20 +1431,31 @@ void* GetFuncForAddr(ARM* cpu, u32 addr, bool store, int size) } break; case 0x04800000: - if (addr < 0x04810000 && size == 16) + if (addr < 0x04810000 && size >= 16) { - if (store) - return (void*)Wifi::Write; - else - return (void*)Wifi::Read; + switch (size | store) + { + case 16: return (void*)Wifi::Read; + case 17: return (void*)Wifi::Write; + case 32: return (void*)WifiRead32; + case 33: return (void*)WifiWrite32; + } } break; + case 0x06000000: + case 0x06800000: + switch (size | store) + { + case 8: return (void*)GPU::ReadVRAM_ARM7; + case 9: return (void*)GPU::WriteVRAM_ARM7; + case 16: return (void*)GPU::ReadVRAM_ARM7; + case 17: return (void*)GPU::WriteVRAM_ARM7; + case 32: return (void*)GPU::ReadVRAM_ARM7; + case 33: return (void*)GPU::WriteVRAM_ARM7; + } } } return NULL; } } - -template void ARMJIT::LinkBlock<0>(ARM*, u32); -template void ARMJIT::LinkBlock<1>(ARM*, u32); diff --git a/src/ARMJIT.h b/src/ARMJIT.h index cab385f..44a6140 100644 --- a/src/ARMJIT.h +++ b/src/ARMJIT.h @@ -28,45 +28,60 @@ extern const u32 ExeMemRegionSizes[]; typedef u32 (*JitBlockEntry)(); -extern u32 AddrTranslate9[0x2000]; -extern u32 AddrTranslate7[0x4000]; - const u32 ExeMemSpaceSize = 0x518000; // I hate you C++, sometimes I really hate you... -template -inline bool IsMapped(u32 addr) -{ - if (num == 0) - return AddrTranslate9[(addr & 0xFFFFFFF) >> 15] >= ExeMemRegionSizes[exeMem_Unmapped]; - else - return AddrTranslate7[(addr & 0xFFFFFFF) >> 14] >= ExeMemRegionSizes[exeMem_Unmapped]; -} - -template -inline u32 TranslateAddr(u32 addr) -{ - if (num == 0) - return AddrTranslate9[(addr & 0xFFFFFFF) >> 15] + (addr & 0x7FFF); - else - return AddrTranslate7[(addr & 0xFFFFFFF) >> 14] + (addr & 0x3FFF); -} +u32 TranslateAddr9(u32 addr); +u32 TranslateAddr7(u32 addr); +template JitBlockEntry LookUpBlockEntry(u32 addr); - void Init(); void DeInit(); -void InvalidateByAddr(u32 pseudoPhysical, bool mayRestore = true); -void InvalidateAll(); +void Reset(); + +void InvalidateByAddr(u32 pseudoPhysical); + +void InvalidateRegionIfNecessary(u32 addr); -void InvalidateITCM(u32 addr); -void InvalidateByAddr7(u32 addr); +inline void InvalidateMainRAMIfNecessary(u32 addr) +{ + InvalidateRegionIfNecessary(ExeMemRegionOffsets[exeMem_MainRAM] + (addr & (MAIN_RAM_SIZE - 1))); +} +inline void InvalidateITCMIfNecessary(u32 addr) +{ + InvalidateRegionIfNecessary(ExeMemRegionOffsets[exeMem_ITCM] + (addr & 0x7FFF)); +} +inline void InvalidateLCDCIfNecessary(u32 addr) +{ + if (addr < 0x68A3FFF) + InvalidateRegionIfNecessary(ExeMemRegionOffsets[exeMem_LCDC] + (addr - 0x6800000)); +} +inline void InvalidateSWRAM7IfNecessary(u32 addr) +{ + InvalidateRegionIfNecessary(ExeMemRegionOffsets[exeMem_SWRAM] + (NDS::SWRAM_ARM7 - NDS::SharedWRAM) + (addr & NDS::SWRAM_ARM7Mask)); +} +inline void InvalidateSWRAM9IfNecessary(u32 addr) +{ + InvalidateRegionIfNecessary(ExeMemRegionOffsets[exeMem_SWRAM] + (NDS::SWRAM_ARM9 - NDS::SharedWRAM) + (addr & NDS::SWRAM_ARM9Mask)); +} +inline void InvalidateARM7WRAMIfNecessary(u32 addr) +{ + InvalidateRegionIfNecessary(ExeMemRegionOffsets[exeMem_ARM7_WRAM] + (addr & 0xFFFF)); +} +inline void InvalidateARM7WVRAMIfNecessary(u32 addr) +{ + InvalidateRegionIfNecessary(ExeMemRegionOffsets[exeMem_ARM7_WVRAM] + (addr & 0x1FFFF)); +} void CompileBlock(ARM* cpu); void ResetBlockCache(); +void UpdateMemoryStatus9(u32 start, u32 end); +void UpdateMemoryStatus7(u32 start, u32 end); + } extern "C" void ARM_Dispatch(ARM* cpu, ARMJIT::JitBlockEntry entry); diff --git a/src/ARMJIT_A64/ARMJIT_Compiler.cpp b/src/ARMJIT_A64/ARMJIT_Compiler.cpp index 00fa436..a67f357 100644 --- a/src/ARMJIT_A64/ARMJIT_Compiler.cpp +++ b/src/ARMJIT_A64/ARMJIT_Compiler.cpp @@ -650,7 +650,7 @@ void Compiler::Comp_AddCycles_CDI() s32 numC = NDS::ARM7MemTimings[CurInstr.CodeCycles][Thumb ? 0 : 2]; s32 numD = CurInstr.DataCycles; - if ((CurInstr.DataRegion >> 4) == 0x02) // mainRAM + if ((CurInstr.DataRegion >> 24) == 0x02) // mainRAM { if (CodeRegion == 0x02) cycles = numC + numD; @@ -695,7 +695,7 @@ void Compiler::Comp_AddCycles_CD() s32 numC = NDS::ARM7MemTimings[CurInstr.CodeCycles][Thumb ? 0 : 2]; s32 numD = CurInstr.DataCycles; - if ((CurInstr.DataRegion >> 4) == 0x02) + if ((CurInstr.DataRegion >> 24) == 0x02) { if (CodeRegion == 0x02) cycles += numC + numD; diff --git a/src/ARMJIT_Internal.h b/src/ARMJIT_Internal.h index 66d1808..4e45760 100644 --- a/src/ARMJIT_Internal.h +++ b/src/ARMJIT_Internal.h @@ -152,30 +152,34 @@ struct __attribute__((packed)) TinyVector class JitBlock { public: - JitBlock(u32 numInstrs, u32 numAddresses) + JitBlock(u32 num, u32 literalHash, u32 numAddresses, u32 numLiterals) { - NumInstrs = numInstrs; + Num = num; NumAddresses = numAddresses; - Data.SetLength(numInstrs + numAddresses); + NumLiterals = numLiterals; + Data.SetLength(numAddresses * 2 + numLiterals); } - u32 StartAddr; u32 PseudoPhysicalAddr; - - u32 NumInstrs; - u32 NumAddresses; + + u32 InstrHash, LiteralHash; + u8 Num; + u16 NumAddresses; + u16 NumLiterals; JitBlockEntry EntryPoint; - u32* Instrs() - { return &Data[0]; } u32* AddressRanges() - { return &Data[NumInstrs]; } + { return &Data[0]; } + u32* AddressMasks() + { return &Data[NumAddresses]; } + u32* Literals() + { return &Data[NumAddresses * 2]; } u32* Links() - { return &Data[NumInstrs + NumAddresses]; } + { return &Data[NumAddresses * 2 + NumLiterals]; } u32 NumLinks() - { return Data.Length - NumInstrs - NumAddresses; } + { return Data.Length - NumAddresses * 2 - NumLiterals; } void AddLink(u32 link) { @@ -184,7 +188,7 @@ public: void ResetLinks() { - Data.SetLength(NumInstrs + NumAddresses); + Data.SetLength(NumAddresses * 2 + NumLiterals); } private: @@ -200,8 +204,7 @@ private: struct __attribute__((packed)) AddressRange { TinyVector Blocks; - u16 InvalidLiterals; - u16 TimesInvalidated; + u32 Code; }; extern AddressRange CodeRanges[ExeMemSpaceSize / 512]; @@ -210,14 +213,45 @@ typedef void (*InterpreterFunc)(ARM* cpu); extern InterpreterFunc InterpretARM[]; extern InterpreterFunc InterpretTHUMB[]; -extern u8 MemRegion9[0x80000]; -extern u8 MemRegion7[0x80000]; +extern u8 MemoryStatus9[0x800000]; +extern u8 MemoryStatus7[0x800000]; + +extern TinyVector InvalidLiterals; void* GetFuncForAddr(ARM* cpu, u32 addr, bool store, int size); template void LinkBlock(ARM* cpu, u32 codeOffset); +enum +{ + memregion_Other = 0, + memregion_ITCM, + memregion_DTCM, + memregion_BIOS9, + memregion_MainRAM, + memregion_SWRAM9, + memregion_SWRAM7, + memregion_IO9, + memregion_VRAM, + memregion_BIOS7, + memregion_WRAM7, + memregion_IO7, + memregion_Wifi, + memregion_VWRAM, +}; + +int ClassifyAddress9(u32 addr); +int ClassifyAddress7(u32 addr); + +template T SlowRead9(ARMv5* cpu, u32 addr); +template void SlowWrite9(ARMv5* cpu, u32 addr, T val); +template T SlowRead7(u32 addr); +template void SlowWrite7(u32 addr, T val); + +template void SlowBlockTransfer9(u32 addr, u64* data, u32 num, ARMv5* cpu); +template void SlowBlockTransfer7(u32 addr, u64* data, u32 num); + } #endif \ No newline at end of file diff --git a/src/ARMJIT_RegisterCache.h b/src/ARMJIT_RegisterCache.h index 5e18e84..0547c84 100644 --- a/src/ARMJIT_RegisterCache.h +++ b/src/ARMJIT_RegisterCache.h @@ -95,20 +95,6 @@ public: LiteralsLoaded = 0; } - BitSet32 GetPushRegs() - { - BitSet16 used; - for (int i = 0; i < InstrsCount; i++) - used |= BitSet16(Instrs[i].Info.SrcRegs | Instrs[i].Info.DstRegs); - - BitSet32 res; - u32 registersMax = std::min((int)used.Count(), NativeRegsAvailable); - for (int i = 0; i < registersMax; i++) - res |= BitSet32(1 << (int)NativeRegAllocOrder[i]); - - return res; - } - void Prepare(bool thumb, int i) { FetchedInstr instr = Instrs[i]; @@ -139,7 +125,6 @@ public: UnloadRegister(reg); u16 necessaryRegs = ((instr.Info.SrcRegs & PCAllocatableAsSrc) | instr.Info.DstRegs) & ~instr.Info.NotStrictlyNeeded; - u16 writeRegs = instr.Info.DstRegs & ~instr.Info.NotStrictlyNeeded; BitSet16 needToBeLoaded(necessaryRegs & ~LoadedRegs); if (needToBeLoaded != BitSet16(0)) { @@ -182,13 +167,12 @@ public: if (left-- == 0) break; - writeRegs |= (1 << reg) & instr.Info.DstRegs; LoadRegister(reg, !(thumb || instr.Cond() >= 0xE) || (1 << reg) & instr.Info.SrcRegs); } } } - DirtyRegs |= writeRegs & ~(1 << 15); + DirtyRegs |= (LoadedRegs & instr.Info.DstRegs) & ~(1 << 15); } static const Reg NativeRegAllocOrder[]; diff --git a/src/ARMJIT_x64/ARMJIT_Compiler.cpp b/src/ARMJIT_x64/ARMJIT_Compiler.cpp index dd20e3c..eee2e0f 100644 --- a/src/ARMJIT_x64/ARMJIT_Compiler.cpp +++ b/src/ARMJIT_x64/ARMJIT_Compiler.cpp @@ -195,26 +195,6 @@ Compiler::Compiler() Reset(); - for (int i = 0; i < 3; i++) - { - for (int j = 0; j < 2; j++) - MemoryFuncs9[i][j] = Gen_MemoryRoutine9(j, 8 << i); - } - MemoryFuncs7[0][0] = (void*)NDS::ARM7Read8; - MemoryFuncs7[0][1] = (void*)NDS::ARM7Write8; - MemoryFuncs7[1][0] = (void*)NDS::ARM7Read16; - MemoryFuncs7[1][1] = (void*)NDS::ARM7Write16; - MemoryFuncs7[2][0] = (void*)NDS::ARM7Read32; - MemoryFuncs7[2][1] = (void*)NDS::ARM7Write32; - - for (int i = 0; i < 2; i++) - for (int j = 0; j < 2; j++) - { - MemoryFuncsSeq9[i][j] = Gen_MemoryRoutineSeq9(i, j); - MemoryFuncsSeq7[i][j][0] = Gen_MemoryRoutineSeq7(i, j, false); - MemoryFuncsSeq7[i][j][1] = Gen_MemoryRoutineSeq7(i, j, true); - } - { // RSCRATCH mode // RSCRATCH2 reg number @@ -317,6 +297,12 @@ Compiler::Compiler() // move the region forward to prevent overwriting the generated functions CodeMemSize -= GetWritableCodePtr() - ResetStart; ResetStart = GetWritableCodePtr(); + + NearStart = ResetStart; + FarStart = ResetStart + 1024*1024*24; + + NearSize = FarStart - ResetStart; + FarSize = (ResetStart + CodeMemSize) - FarStart; } void Compiler::LoadCPSR() @@ -504,6 +490,9 @@ void Compiler::Reset() { memset(ResetStart, 0xcc, CodeMemSize); SetCodePtr(ResetStart); + + NearCode = NearStart; + FarCode = FarStart; } void Compiler::Comp_SpecialBranchBehaviour(bool taken) @@ -544,8 +533,16 @@ void Compiler::Comp_SpecialBranchBehaviour(bool taken) JitBlockEntry Compiler::CompileBlock(u32 translatedAddr, ARM* cpu, bool thumb, FetchedInstr instrs[], int instrsCount) { - if (CodeMemSize - (GetWritableCodePtr() - ResetStart) < 1024 * 32) // guess... + if (NearSize - (NearCode - NearStart) < 1024 * 32) // guess... + { + printf("near reset\n"); + ResetBlockCache(); + } + if (FarSize - (FarCode - FarStart) < 1024 * 32) // guess... + { + printf("far reset\n"); ResetBlockCache(); + } ConstantCycles = 0; Thumb = thumb; @@ -762,12 +759,14 @@ void Compiler::Comp_AddCycles_CDI() Comp_AddCycles_CD(); else { + IrregularCycles = true; + s32 cycles; s32 numC = NDS::ARM7MemTimings[CurInstr.CodeCycles][Thumb ? 0 : 2]; s32 numD = CurInstr.DataCycles; - if ((CurInstr.DataRegion >> 4) == 0x02) // mainRAM + if ((CurInstr.DataRegion >> 24) == 0x02) // mainRAM { if (CodeRegion == 0x02) cycles = numC + numD; diff --git a/src/ARMJIT_x64/ARMJIT_Compiler.h b/src/ARMJIT_x64/ARMJIT_Compiler.h index e0a4978..9df218b 100644 --- a/src/ARMJIT_x64/ARMJIT_Compiler.h +++ b/src/ARMJIT_x64/ARMJIT_Compiler.h @@ -140,7 +140,7 @@ public: }; void Comp_MemAccess(int rd, int rn, const ComplexOperand& op2, int size, int flags); s32 Comp_MemAccessBlock(int rn, BitSet16 regs, bool store, bool preinc, bool decrement, bool usermode); - void Comp_MemLoadLiteral(int size, int rd, u32 addr); + bool Comp_MemLoadLiteral(int size, int rd, u32 addr); void Comp_ArithTriOp(void (Compiler::*op)(int, const Gen::OpArg&, const Gen::OpArg&), Gen::OpArg rd, Gen::OpArg rn, Gen::OpArg op2, bool carryUsed, int opFlags); @@ -154,12 +154,6 @@ public: void Comp_SpecialBranchBehaviour(bool taken); - void* Gen_MemoryRoutine9(bool store, int size); - - void* Gen_MemoryRoutineSeq9(bool store, bool preinc); - void* Gen_MemoryRoutineSeq7(bool store, bool preinc, bool codeMainRAM); - - void* Gen_ChangeCPSRRoutine(); Gen::OpArg Comp_RegShiftImm(int op, int amount, Gen::OpArg rm, bool S, bool& carryUsed); Gen::OpArg Comp_RegShiftReg(int op, Gen::OpArg rs, Gen::OpArg rm, bool S, bool& carryUsed); @@ -193,6 +187,26 @@ public: return (u8*)entry - ResetStart; } + void SwitchToNearCode() + { + FarCode = GetWritableCodePtr(); + SetCodePtr(NearCode); + } + + void SwitchToFarCode() + { + NearCode = GetWritableCodePtr(); + SetCodePtr(FarCode); + } + + u8* FarCode; + u8* NearCode; + u32 FarSize; + u32 NearSize; + + u8* NearStart; + u8* FarStart; + u8* ResetStart; u32 CodeMemSize; @@ -201,12 +215,6 @@ public: void* BranchStub[2]; - void* MemoryFuncs9[3][2]; - void* MemoryFuncs7[3][2]; - - void* MemoryFuncsSeq9[2][2]; - void* MemoryFuncsSeq7[2][2][2]; - void* ReadBanked; void* WriteBanked; diff --git a/src/ARMJIT_x64/ARMJIT_LoadStore.cpp b/src/ARMJIT_x64/ARMJIT_LoadStore.cpp index b595e32..c13b779 100644 --- a/src/ARMJIT_x64/ARMJIT_LoadStore.cpp +++ b/src/ARMJIT_x64/ARMJIT_LoadStore.cpp @@ -25,236 +25,17 @@ int squeezePointer(T* ptr) improvement. */ -/* - address - ABI_PARAM1 (a.k.a. ECX = RSCRATCH3 on Windows) - store value - ABI_PARAM2 (a.k.a. RDX = RSCRATCH2 on Windows) -*/ -void* Compiler::Gen_MemoryRoutine9(bool store, int size) +bool Compiler::Comp_MemLoadLiteral(int size, int rd, u32 addr) { - u32 addressMask = ~(size == 32 ? 3 : (size == 16 ? 1 : 0)); - AlignCode4(); - void* res = GetWritableCodePtr(); - - MOV(32, R(RSCRATCH), R(ABI_PARAM1)); - SUB(32, R(RSCRATCH), MDisp(RCPU, offsetof(ARMv5, DTCMBase))); - CMP(32, R(RSCRATCH), MDisp(RCPU, offsetof(ARMv5, DTCMSize))); - FixupBranch insideDTCM = J_CC(CC_B); - - CMP(32, R(ABI_PARAM1), MDisp(RCPU, offsetof(ARMv5, ITCMSize))); - FixupBranch insideITCM = J_CC(CC_B); - - if (store) - { - if (size > 8) - AND(32, R(ABI_PARAM1), Imm32(addressMask)); - switch (size) - { - case 32: JMP((u8*)NDS::ARM9Write32, true); break; - case 16: JMP((u8*)NDS::ARM9Write16, true); break; - case 8: JMP((u8*)NDS::ARM9Write8, true); break; - } - } - else - { - if (size == 32) - { - ABI_PushRegistersAndAdjustStack({ABI_PARAM1}, 8); - AND(32, R(ABI_PARAM1), Imm32(addressMask)); - // everything's already in the appropriate register - ABI_CallFunction(NDS::ARM9Read32); - ABI_PopRegistersAndAdjustStack({ECX}, 8); - AND(32, R(ECX), Imm8(3)); - SHL(32, R(ECX), Imm8(3)); - ROR_(32, R(RSCRATCH), R(ECX)); - RET(); - } - else if (size == 16) - { - AND(32, R(ABI_PARAM1), Imm32(addressMask)); - JMP((u8*)NDS::ARM9Read16, true); - } - else - JMP((u8*)NDS::ARM9Read8, true); - } - - SetJumpTarget(insideDTCM); - AND(32, R(RSCRATCH), Imm32(0x3FFF & addressMask)); - if (store) - MOV(size, MComplex(RCPU, RSCRATCH, SCALE_1, offsetof(ARMv5, DTCM)), R(ABI_PARAM2)); - else - { - MOVZX(32, size, RSCRATCH, MComplex(RCPU, RSCRATCH, SCALE_1, offsetof(ARMv5, DTCM))); - if (size == 32) - { - if (ABI_PARAM1 != ECX) - MOV(32, R(ECX), R(ABI_PARAM1)); - AND(32, R(ECX), Imm8(3)); - SHL(32, R(ECX), Imm8(3)); - ROR_(32, R(RSCRATCH), R(ECX)); - } - } - RET(); + u32 translatedAddr = Num == 0 ? TranslateAddr9(addr) : TranslateAddr7(addr); - SetJumpTarget(insideITCM); - MOV(32, R(ABI_PARAM3), R(ABI_PARAM1)); // free up ECX - AND(32, R(ABI_PARAM3), Imm32(0x7FFF & addressMask)); - if (store) - { - MOV(size, MComplex(RCPU, ABI_PARAM3, SCALE_1, offsetof(ARMv5, ITCM)), R(ABI_PARAM2)); - - // if CodeRanges[pseudoPhysical/256].Blocks.Length > 0 we're writing into code! - static_assert(sizeof(AddressRange) == 16); - LEA(32, ABI_PARAM1, MDisp(ABI_PARAM3, ExeMemRegionOffsets[exeMem_ITCM])); - MOV(32, R(RSCRATCH), R(ABI_PARAM1)); - SHR(32, R(RSCRATCH), Imm8(9)); - SHL(32, R(RSCRATCH), Imm8(4)); - CMP(16, MDisp(RSCRATCH, squeezePointer(CodeRanges) + offsetof(AddressRange, Blocks.Length)), Imm8(0)); - FixupBranch noCode = J_CC(CC_Z); - JMP((u8*)InvalidateByAddr, true); - SetJumpTarget(noCode); - } - else + int invalidLiteralIdx = InvalidLiterals.Find(translatedAddr); + if (invalidLiteralIdx != -1) { - MOVZX(32, size, RSCRATCH, MComplex(RCPU, ABI_PARAM3, SCALE_1, offsetof(ARMv5, ITCM))); - if (size == 32) - { - if (ABI_PARAM1 != ECX) - MOV(32, R(ECX), R(ABI_PARAM1)); - AND(32, R(ECX), Imm8(3)); - SHL(32, R(ECX), Imm8(3)); - ROR_(32, R(RSCRATCH), R(ECX)); - } + InvalidLiterals.Remove(invalidLiteralIdx); + return false; } - RET(); - - static_assert(RSCRATCH == EAX, "Someone changed RSCRATCH!"); - - return res; -} - -#define MEMORY_SEQ_WHILE_COND \ - if (!store) \ - MOV(32, currentElement, R(EAX));\ - if (!preinc) \ - ADD(32, R(ABI_PARAM1), Imm8(4)); \ - \ - SUB(32, R(ABI_PARAM3), Imm8(1)); \ - J_CC(CC_NZ, repeat); - -/* - ABI_PARAM1 address - ABI_PARAM2 address where registers are stored - ABI_PARAM3 how many values to read/write - - Dolphin x64CodeEmitter is my favourite assembler - */ -void* Compiler::Gen_MemoryRoutineSeq9(bool store, bool preinc) -{ - void* res = (void*)GetWritableCodePtr(); - - const u8* repeat = GetCodePtr(); - - if (preinc) - ADD(32, R(ABI_PARAM1), Imm8(4)); - MOV(32, R(RSCRATCH), R(ABI_PARAM1)); - SUB(32, R(RSCRATCH), MDisp(RCPU, offsetof(ARMv5, DTCMBase))); - CMP(32, R(RSCRATCH), MDisp(RCPU, offsetof(ARMv5, DTCMSize))); - FixupBranch insideDTCM = J_CC(CC_B); - - CMP(32, R(ABI_PARAM1), MDisp(RCPU, offsetof(ARMv5, ITCMSize))); - FixupBranch insideITCM = J_CC(CC_B); - - OpArg currentElement = MComplex(ABI_PARAM2, ABI_PARAM3, SCALE_8, -8); // wasting stack space like a gangster - - ABI_PushRegistersAndAdjustStack({ABI_PARAM1, ABI_PARAM2, ABI_PARAM3}, 8); - AND(32, R(ABI_PARAM1), Imm8(~3)); - if (store) - { - MOV(32, R(ABI_PARAM2), currentElement); - CALL((void*)NDS::ARM9Write32); - } - else - CALL((void*)NDS::ARM9Read32); - ABI_PopRegistersAndAdjustStack({ABI_PARAM1, ABI_PARAM2, ABI_PARAM3}, 8); - - MEMORY_SEQ_WHILE_COND - RET(); - - SetJumpTarget(insideDTCM); - AND(32, R(RSCRATCH), Imm32(0x3FFF & ~3)); - if (store) - { - MOV(32, R(ABI_PARAM4), currentElement); - MOV(32, MComplex(RCPU, RSCRATCH, SCALE_1, offsetof(ARMv5, DTCM)), R(ABI_PARAM4)); - } - else - MOV(32, R(RSCRATCH), MComplex(RCPU, RSCRATCH, SCALE_1, offsetof(ARMv5, DTCM))); - - MEMORY_SEQ_WHILE_COND - RET(); - - SetJumpTarget(insideITCM); - MOV(32, R(RSCRATCH), R(ABI_PARAM1)); - AND(32, R(RSCRATCH), Imm32(0x7FFF & ~3)); - if (store) - { - MOV(32, R(ABI_PARAM4), currentElement); - MOV(32, MComplex(RCPU, RSCRATCH, SCALE_1, offsetof(ARMv5, ITCM)), R(ABI_PARAM4)); - - ADD(32, R(RSCRATCH), Imm32(ExeMemRegionOffsets[exeMem_ITCM])); - MOV(32, R(ABI_PARAM4), R(RSCRATCH)); - SHR(32, R(RSCRATCH), Imm8(9)); - SHL(32, R(RSCRATCH), Imm8(4)); - CMP(16, MDisp(RSCRATCH, squeezePointer(CodeRanges) + offsetof(AddressRange, Blocks.Length)), Imm8(0)); - FixupBranch noCode = J_CC(CC_Z); - ABI_PushRegistersAndAdjustStack({ABI_PARAM1, ABI_PARAM2, ABI_PARAM3}, 8); - MOV(32, R(ABI_PARAM1), R(ABI_PARAM4)); - CALL((u8*)InvalidateByAddr); - ABI_PopRegistersAndAdjustStack({ABI_PARAM1, ABI_PARAM2, ABI_PARAM3}, 8); - SetJumpTarget(noCode); - } - else - MOV(32, R(RSCRATCH), MComplex(RCPU, RSCRATCH, SCALE_1, offsetof(ARMv5, ITCM))); - - MEMORY_SEQ_WHILE_COND - RET(); - - return res; -} - -void* Compiler::Gen_MemoryRoutineSeq7(bool store, bool preinc, bool codeMainRAM) -{ - void* res = (void*)GetWritableCodePtr(); - - const u8* repeat = GetCodePtr(); - - if (preinc) - ADD(32, R(ABI_PARAM1), Imm8(4)); - - OpArg currentElement = MComplex(ABI_PARAM2, ABI_PARAM3, SCALE_8, -8); - - ABI_PushRegistersAndAdjustStack({ABI_PARAM1, ABI_PARAM2, ABI_PARAM3}, 8); - AND(32, R(ABI_PARAM1), Imm8(~3)); - if (store) - { - MOV(32, R(ABI_PARAM2), currentElement); - CALL((void*)NDS::ARM7Write32); - } - else - CALL((void*)NDS::ARM7Read32); - ABI_PopRegistersAndAdjustStack({ABI_PARAM1, ABI_PARAM2, ABI_PARAM3}, 8); - - MEMORY_SEQ_WHILE_COND - RET(); - - return res; -} - -#undef MEMORY_SEQ_WHILE_COND - -void Compiler::Comp_MemLoadLiteral(int size, int rd, u32 addr) -{ u32 val; // make sure arm7 bios is accessible u32 tmpR15 = CurCPU->R[15]; @@ -276,12 +57,10 @@ void Compiler::Comp_MemLoadLiteral(int size, int rd, u32 addr) RegCache.PutLiteral(rd, val); Comp_AddCycles_CDI(); + + return true; } -/*void fault(u32 a, u32 b, u32 c, u32 d) -{ - printf("actually not static! %x %x %x %x\n", a, b, c, d); -}*/ void Compiler::Comp_MemAccess(int rd, int rn, const ComplexOperand& op2, int size, int flags) { @@ -291,17 +70,12 @@ void Compiler::Comp_MemAccess(int rd, int rn, const ComplexOperand& op2, int siz if (size == 16) addressMask = ~1; - //bool check = false; if (Config::JIT_LiteralOptimisations && rn == 15 && rd != 15 && op2.IsImm && !(flags & (memop_SignExtend|memop_Post|memop_Store|memop_Writeback))) { u32 addr = R15 + op2.Imm * ((flags & memop_SubtractOffset) ? -1 : 1); - u32 translatedAddr = Num == 0 ? TranslateAddr<0>(addr) : TranslateAddr<1>(addr); - - if (!(CodeRanges[translatedAddr / 512].InvalidLiterals & (1 << ((translatedAddr & 0x1FF) / 16)))) - { - Comp_MemLoadLiteral(size, rd, addr); + + if (Comp_MemLoadLiteral(size, rd, addr)) return; - } } { @@ -314,173 +88,334 @@ void Compiler::Comp_MemAccess(int rd, int rn, const ComplexOperand& op2, int siz Comp_AddCycles_CDI(); } + bool addrIsStatic = Config::JIT_LiteralOptimisations + && RegCache.IsLiteral(rn) && op2.IsImm && !(flags & (memop_Writeback|memop_Post)); + u32 staticAddress; + if (addrIsStatic) + staticAddress = RegCache.LiteralValues[rn] + op2.Imm * ((flags & memop_SubtractOffset) ? -1 : 1); OpArg rdMapped = MapReg(rd); - OpArg rnMapped = MapReg(rn); - if (Thumb && rn == 15) - rnMapped = Imm32(R15 & ~0x2); - - bool inlinePreparation = Num == 1; - u32 constLocalROR32 = 4; - - void* memoryFunc = Num == 0 - ? MemoryFuncs9[size >> 4][!!(flags & memop_Store)] - : MemoryFuncs7[size >> 4][!!((flags & memop_Store))]; - if (Config::JIT_LiteralOptimisations && (rd != 15 || (flags & memop_Store)) && op2.IsImm && RegCache.IsLiteral(rn)) + if (!addrIsStatic) { - u32 addr = RegCache.LiteralValues[rn] + op2.Imm * ((flags & memop_SubtractOffset) ? -1 : 1); - - /*MOV(32, R(ABI_PARAM1), Imm32(CurInstr.Instr)); - MOV(32, R(ABI_PARAM1), Imm32(R15)); - MOV_sum(32, RSCRATCH, rnMapped, Imm32(op2.Imm * ((flags & memop_SubtractOffset) ? -1 : 1))); - CMP(32, R(RSCRATCH), Imm32(addr)); - FixupBranch eq = J_CC(CC_E); - CALL((void*)fault); - SetJumpTarget(eq);*/ - - NDS::MemRegion region; - region.Mem = NULL; - if (Num == 0) + OpArg rnMapped = MapReg(rn); + if (Thumb && rn == 15) + rnMapped = Imm32(R15 & ~0x2); + + X64Reg finalAddr = RSCRATCH3; + if (flags & memop_Post) { - ARMv5* cpu5 = (ARMv5*)CurCPU; + MOV(32, R(RSCRATCH3), rnMapped); - // stupid dtcm... - if (addr >= cpu5->DTCMBase && addr < (cpu5->DTCMBase + cpu5->DTCMSize)) - { - // disable this for now as DTCM is located in heap - // which might excced the RIP-addressable range - //region.Mem = cpu5->DTCM; - //region.Mask = 0x3FFF; - } - else - { - NDS::ARM9GetMemRegion(addr, flags & memop_Store, ®ion); - } + finalAddr = rnMapped.GetSimpleReg(); } - else - NDS::ARM7GetMemRegion(addr, flags & memop_Store, ®ion); - if (region.Mem != NULL) + if (op2.IsImm) + { + MOV_sum(32, finalAddr, rnMapped, Imm32(op2.Imm * ((flags & memop_SubtractOffset) ? -1 : 1))); + } + else { - void* ptr = ®ion.Mem[addr & addressMask & region.Mask]; + OpArg rm = MapReg(op2.Reg.Reg); - if (flags & memop_Store) + if (!(flags & memop_SubtractOffset) && rm.IsSimpleReg() && rnMapped.IsSimpleReg() + && op2.Reg.Op == 0 && op2.Reg.Amount > 0 && op2.Reg.Amount <= 3) { - MOV(size, M(ptr), MapReg(rd)); + LEA(32, finalAddr, + MComplex(rnMapped.GetSimpleReg(), rm.GetSimpleReg(), 1 << op2.Reg.Amount, 0)); } else { - if (flags & memop_SignExtend) - MOVSX(32, size, rdMapped.GetSimpleReg(), M(ptr)); - else - MOVZX(32, size, rdMapped.GetSimpleReg(), M(ptr)); + bool throwAway; + OpArg offset = + Comp_RegShiftImm(op2.Reg.Op, op2.Reg.Amount, rm, false, throwAway); - if (size == 32 && addr & ~0x3) + if (flags & memop_SubtractOffset) { - ROR_(32, rdMapped, Imm8((addr & 0x3) << 3)); + if (R(finalAddr) != rnMapped) + MOV(32, R(finalAddr), rnMapped); + if (!offset.IsZero()) + SUB(32, R(finalAddr), offset); } + else + MOV_sum(32, finalAddr, rnMapped, offset); } - - return; } - void* specialFunc = GetFuncForAddr(CurCPU, addr, flags & memop_Store, size); - if (specialFunc) - { - memoryFunc = specialFunc; - inlinePreparation = true; - constLocalROR32 = addr & 0x3; - } + if ((flags & memop_Writeback) && !(flags & memop_Post)) + MOV(32, rnMapped, R(finalAddr)); } - X64Reg finalAddr = ABI_PARAM1; - if (flags & memop_Post) - { - MOV(32, R(ABI_PARAM1), rnMapped); + int expectedTarget = Num == 0 + ? ClassifyAddress9(addrIsStatic ? staticAddress : CurInstr.DataRegion) + : ClassifyAddress7(addrIsStatic ? staticAddress : CurInstr.DataRegion); + if (CurInstr.Cond() < 0xE) + expectedTarget = memregion_Other; + + bool compileFastPath = false, compileSlowPath = !addrIsStatic || (flags & memop_Store); - finalAddr = rnMapped.GetSimpleReg(); + switch (expectedTarget) + { + case memregion_MainRAM: + case memregion_DTCM: + case memregion_WRAM7: + case memregion_SWRAM9: + case memregion_SWRAM7: + case memregion_IO9: + case memregion_IO7: + case memregion_VWRAM: + compileFastPath = true; + break; + case memregion_Wifi: + compileFastPath = size >= 16; + break; + case memregion_VRAM: + compileFastPath = !(flags & memop_Store) || size >= 16; + case memregion_BIOS9: + compileFastPath = !(flags & memop_Store); + break; + default: break; } - if (op2.IsImm) + if (addrIsStatic && !compileFastPath) { - MOV_sum(32, finalAddr, rnMapped, Imm32(op2.Imm * ((flags & memop_SubtractOffset) ? -1 : 1))); + compileFastPath = false; + compileSlowPath = true; } - else + + if (addrIsStatic && compileSlowPath) + MOV(32, R(RSCRATCH3), Imm32(staticAddress)); + + if (compileFastPath) { - OpArg rm = MapReg(op2.Reg.Reg); + FixupBranch slowPath; + if (compileSlowPath) + { + MOV(32, R(RSCRATCH), R(RSCRATCH3)); + SHR(32, R(RSCRATCH), Imm8(9)); + if (flags & memop_Store) + { + CMP(8, MDisp(RSCRATCH, squeezePointer(Num == 0 ? MemoryStatus9 : MemoryStatus7)), Imm8(expectedTarget)); + } + else + { + MOVZX(32, 8, RSCRATCH, MDisp(RSCRATCH, squeezePointer(Num == 0 ? MemoryStatus9 : MemoryStatus7))); + AND(32, R(RSCRATCH), Imm8(~0x80)); + CMP(32, R(RSCRATCH), Imm8(expectedTarget)); + } + + slowPath = J_CC(CC_NE, true); + } - if (!(flags & memop_SubtractOffset) && rm.IsSimpleReg() && rnMapped.IsSimpleReg() - && op2.Reg.Op == 0 && op2.Reg.Amount > 0 && op2.Reg.Amount <= 3) + if (expectedTarget == memregion_MainRAM || expectedTarget == memregion_WRAM7 + || expectedTarget == memregion_BIOS9) { - LEA(32, finalAddr, - MComplex(rnMapped.GetSimpleReg(), rm.GetSimpleReg(), 1 << op2.Reg.Amount, 0)); + u8* data; + u32 mask; + if (expectedTarget == memregion_MainRAM) + { + data = NDS::MainRAM; + mask = MAIN_RAM_SIZE - 1; + } + else if (expectedTarget == memregion_BIOS9) + { + data = NDS::ARM9BIOS; + mask = 0xFFF; + } + else + { + data = NDS::ARM7WRAM; + mask = 0xFFFF; + } + OpArg memLoc; + if (addrIsStatic) + { + memLoc = M(data + ((staticAddress & mask & addressMask))); + } + else + { + MOV(32, R(RSCRATCH), R(RSCRATCH3)); + AND(32, R(RSCRATCH), Imm32(mask & addressMask)); + memLoc = MDisp(RSCRATCH, squeezePointer(data)); + } + if (flags & memop_Store) + MOV(size, memLoc, rdMapped); + else if (flags & memop_SignExtend) + MOVSX(32, size, rdMapped.GetSimpleReg(), memLoc); + else + MOVZX(32, size, rdMapped.GetSimpleReg(), memLoc); } - else + else if (expectedTarget == memregion_DTCM) + { + if (addrIsStatic) + MOV(32, R(RSCRATCH), Imm32(staticAddress)); + else + MOV(32, R(RSCRATCH), R(RSCRATCH3)); + SUB(32, R(RSCRATCH), MDisp(RCPU, offsetof(ARMv5, DTCMBase))); + AND(32, R(RSCRATCH), Imm32(0x3FFF & addressMask)); + OpArg memLoc = MComplex(RCPU, RSCRATCH, SCALE_1, offsetof(ARMv5, DTCM)); + if (flags & memop_Store) + MOV(size, memLoc, rdMapped); + else if (flags & memop_SignExtend) + MOVSX(32, size, rdMapped.GetSimpleReg(), memLoc); + else + MOVZX(32, size, rdMapped.GetSimpleReg(), memLoc); + } + else if (expectedTarget == memregion_SWRAM9 || expectedTarget == memregion_SWRAM7) { - bool throwAway; - OpArg offset = - Comp_RegShiftImm(op2.Reg.Op, op2.Reg.Amount, rm, false, throwAway); - - if (flags & memop_SubtractOffset) + MOV(64, R(RSCRATCH2), M(expectedTarget == memregion_SWRAM9 ? &NDS::SWRAM_ARM9 : &NDS::SWRAM_ARM7)); + if (addrIsStatic) { - if (R(finalAddr) != rnMapped) - MOV(32, R(finalAddr), rnMapped); - if (!offset.IsZero()) - SUB(32, R(finalAddr), offset); + MOV(32, R(RSCRATCH), Imm32(staticAddress & addressMask)); } else - MOV_sum(32, finalAddr, rnMapped, offset); + { + MOV(32, R(RSCRATCH), R(RSCRATCH3)); + AND(32, R(RSCRATCH), Imm8(addressMask)); + } + AND(32, R(RSCRATCH), M(expectedTarget == memregion_SWRAM9 ? &NDS::SWRAM_ARM9Mask : &NDS::SWRAM_ARM7Mask)); + OpArg memLoc = MRegSum(RSCRATCH, RSCRATCH2); + if (flags & memop_Store) + MOV(size, memLoc, rdMapped); + else if (flags & memop_SignExtend) + MOVSX(32, size, rdMapped.GetSimpleReg(), memLoc); + else + MOVZX(32, size, rdMapped.GetSimpleReg(), memLoc); } - } + else + { + u32 maskedDataRegion; - if ((flags & memop_Writeback) && !(flags & memop_Post)) - MOV(32, rnMapped, R(finalAddr)); + if (addrIsStatic) + { + maskedDataRegion = staticAddress; + MOV(32, R(ABI_PARAM1), Imm32(staticAddress)); + } + else + { + if (ABI_PARAM1 != RSCRATCH3) + MOV(32, R(ABI_PARAM1), R(RSCRATCH3)); + AND(32, R(ABI_PARAM1), Imm8(addressMask)); - if (flags & memop_Store) - MOV(32, R(ABI_PARAM2), rdMapped); + maskedDataRegion = CurInstr.DataRegion; + if (Num == 0) + maskedDataRegion &= ~0xFFFFFF; + else + maskedDataRegion &= ~0x7FFFFF; + } - if (!(flags & memop_Store) && inlinePreparation && constLocalROR32 == 4 && size == 32) - MOV(32, rdMapped, R(ABI_PARAM1)); + void* func = GetFuncForAddr(CurCPU, maskedDataRegion, flags & memop_Store, size); - if (inlinePreparation && size > 8) - AND(32, R(ABI_PARAM1), Imm8(addressMask)); + if (flags & memop_Store) + { + MOV(32, R(ABI_PARAM2), rdMapped); - CALL(memoryFunc); + ABI_CallFunction((void(*)())func); + } + else + { + if (!addrIsStatic) + MOV(32, rdMapped, R(RSCRATCH3)); - /*if (Num == 0 && check) - { - CMP(32, R(EAX), rdMapped); - FixupBranch notEqual = J_CC(CC_E); - ABI_PushRegistersAndAdjustStack({RSCRATCH}, 0); - MOV(32, R(ABI_PARAM1), Imm32(R15 - (Thumb ? 4 : 8))); - MOV(32, R(ABI_PARAM2), R(EAX)); - MOV(32, R(ABI_PARAM3), rdMapped); - MOV(32, R(ABI_PARAM4), Imm32(CurInstr.Instr)); - CALL((u8*)fault); - ABI_PopRegistersAndAdjustStack({RSCRATCH}, 0); - SetJumpTarget(notEqual); - }*/ - - if (!(flags & memop_Store)) - { - if (inlinePreparation && size == 32) + ABI_CallFunction((void(*)())func); + + if (!addrIsStatic) + MOV(32, R(RSCRATCH3), rdMapped); + + if (flags & memop_SignExtend) + MOVSX(32, size, rdMapped.GetSimpleReg(), R(RSCRATCH)); + else + MOVZX(32, size, rdMapped.GetSimpleReg(), R(RSCRATCH)); + } + } + + if ((size == 32 && !(flags & memop_Store))) { - if (constLocalROR32 == 4) + if (addrIsStatic) + { + if (staticAddress & 0x3) + ROR_(32, rdMapped, Imm8((staticAddress & 0x3) * 8)); + } + else { - static_assert(RSCRATCH3 == ECX); - MOV(32, R(ECX), rdMapped); - AND(32, R(ECX), Imm8(3)); - SHL(32, R(ECX), Imm8(3)); - ROR_(32, R(RSCRATCH), R(ECX)); + AND(32, R(RSCRATCH3), Imm8(0x3)); + SHL(32, R(RSCRATCH3), Imm8(3)); + ROR_(32, rdMapped, R(RSCRATCH3)); } - else if (constLocalROR32 != 0) - ROR_(32, R(RSCRATCH), Imm8(constLocalROR32 << 3)); } - if (flags & memop_SignExtend) - MOVSX(32, size, rdMapped.GetSimpleReg(), R(RSCRATCH)); + if (compileSlowPath) + { + SwitchToFarCode(); + SetJumpTarget(slowPath); + } + } + + if (compileSlowPath) + { + if (Num == 0) + { + MOV(32, R(ABI_PARAM2), R(RSCRATCH3)); + MOV(64, R(ABI_PARAM1), R(RCPU)); + if (flags & memop_Store) + { + MOV(32, R(ABI_PARAM3), rdMapped); + + switch (size) + { + case 32: CALL((void*)&SlowWrite9); break; + case 16: CALL((void*)&SlowWrite9); break; + case 8: CALL((void*)&SlowWrite9); break; + } + } + else + { + switch (size) + { + case 32: CALL((void*)&SlowRead9); break; + case 16: CALL((void*)&SlowRead9); break; + case 8: CALL((void*)&SlowRead9); break; + } + } + } else - MOVZX(32, size, rdMapped.GetSimpleReg(), R(RSCRATCH)); + { + if (ABI_PARAM1 != RSCRATCH3) + MOV(32, R(ABI_PARAM1), R(RSCRATCH3)); + if (flags & memop_Store) + { + MOV(32, R(ABI_PARAM2), rdMapped); + + switch (size) + { + case 32: CALL((void*)&SlowWrite7); break; + case 16: CALL((void*)&SlowWrite7); break; + case 8: CALL((void*)&SlowWrite7); break; + } + } + else + { + switch (size) + { + case 32: CALL((void*)&SlowRead7); break; + case 16: CALL((void*)&SlowRead7); break; + case 8: CALL((void*)&SlowRead7); break; + } + } + } + if (!(flags & memop_Store)) + { + if (flags & memop_SignExtend) + MOVSX(32, size, rdMapped.GetSimpleReg(), R(RSCRATCH)); + else + MOVZX(32, size, rdMapped.GetSimpleReg(), R(RSCRATCH)); + } + } + + if (compileFastPath && compileSlowPath) + { + FixupBranch ret = J(true); + SwitchToNearCode(); + SetJumpTarget(ret); } if (!(flags & memop_Store) && rd == 15) @@ -498,100 +433,160 @@ void Compiler::Comp_MemAccess(int rd, int rn, const ComplexOperand& op2, int siz s32 Compiler::Comp_MemAccessBlock(int rn, BitSet16 regs, bool store, bool preinc, bool decrement, bool usermode) { - IrregularCycles = true; - int regsCount = regs.Count(); s32 offset = (regsCount * 4) * (decrement ? -1 : 1); // we need to make sure that the stack stays aligned to 16 bytes +#ifdef _WIN32 + // include shadow + u32 stackAlloc = ((regsCount + 4 + 1) & ~1) * 8; +#else u32 stackAlloc = ((regsCount + 1) & ~1) * 8; +#endif + u32 allocOffset = stackAlloc - regsCount * 8; - if (!store) + int expectedTarget = Num == 0 + ? ClassifyAddress9(CurInstr.DataRegion) + : ClassifyAddress7(CurInstr.DataRegion); + if (usermode || CurInstr.Cond() < 0xE) + expectedTarget = memregion_Other; + + bool compileFastPath = false; + + switch (expectedTarget) { + case memregion_DTCM: + case memregion_MainRAM: + case memregion_SWRAM9: + case memregion_SWRAM7: + case memregion_WRAM7: + compileFastPath = true; + break; + default: + break; + } + + if (!store) Comp_AddCycles_CDI(); + else + Comp_AddCycles_CD(); - if (decrement) + if (decrement) + { + MOV_sum(32, RSCRATCH4, MapReg(rn), Imm32(-regsCount * 4)); + preinc ^= true; + } + else + MOV(32, R(RSCRATCH4), MapReg(rn)); + + if (compileFastPath) + { + assert(!usermode); + + MOV(32, R(RSCRATCH), R(RSCRATCH4)); + SHR(32, R(RSCRATCH), Imm8(9)); + + if (store) { - MOV_sum(32, ABI_PARAM1, MapReg(rn), Imm32(-regsCount * 4)); - preinc ^= true; + CMP(8, MDisp(RSCRATCH, squeezePointer(Num == 0 ? MemoryStatus9 : MemoryStatus7)), Imm8(expectedTarget)); } else - MOV(32, R(ABI_PARAM1), MapReg(rn)); - - MOV(32, R(ABI_PARAM3), Imm32(regsCount)); - SUB(64, R(RSP), stackAlloc <= INT8_MAX ? Imm8(stackAlloc) : Imm32(stackAlloc)); - MOV(64, R(ABI_PARAM2), R(RSP)); - - CALL(Num == 0 - ? MemoryFuncsSeq9[0][preinc] - : MemoryFuncsSeq7[0][preinc][CodeRegion == 0x02]); + { + MOVZX(32, 8, RSCRATCH, MDisp(RSCRATCH, squeezePointer(Num == 0 ? MemoryStatus9 : MemoryStatus7))); + AND(32, R(RSCRATCH), Imm8(~0x80)); + CMP(32, R(RSCRATCH), Imm8(expectedTarget)); + } + FixupBranch slowPath = J_CC(CC_NE, true); - bool firstUserMode = true; - for (int reg = 15; reg >= 0; reg--) + if (expectedTarget == memregion_DTCM) { - if (regs[reg]) + SUB(32, R(RSCRATCH4), MDisp(RCPU, offsetof(ARMv5, DTCMBase))); + AND(32, R(RSCRATCH4), Imm32(0x3FFF & ~3)); + LEA(64, RSCRATCH4, MComplex(RCPU, RSCRATCH4, 1, offsetof(ARMv5, DTCM))); + } + else if (expectedTarget == memregion_MainRAM) + { + AND(32, R(RSCRATCH4), Imm32((MAIN_RAM_SIZE - 1) & ~3)); + ADD(64, R(RSCRATCH4), Imm32(squeezePointer(NDS::MainRAM))); + } + else if (expectedTarget == memregion_WRAM7) + { + AND(32, R(RSCRATCH4), Imm32(0xFFFF & ~3)); + ADD(64, R(RSCRATCH4), Imm32(squeezePointer(NDS::ARM7WRAM))); + } + else // SWRAM + { + AND(32, R(RSCRATCH4), Imm8(~3)); + AND(32, R(RSCRATCH4), M(expectedTarget == memregion_SWRAM9 ? &NDS::SWRAM_ARM9Mask : &NDS::SWRAM_ARM7Mask)); + ADD(64, R(RSCRATCH4), M(expectedTarget == memregion_SWRAM9 ? &NDS::SWRAM_ARM9 : &NDS::SWRAM_ARM7)); + } + u32 offset = 0; + for (int reg : regs) + { + if (preinc) + offset += 4; + OpArg mem = MDisp(RSCRATCH4, offset); + if (store) { - if (usermode && !regs[15] && reg >= 8 && reg < 15) + if (RegCache.LoadedRegs & (1 << reg)) { - if (firstUserMode) - { - MOV(32, R(RSCRATCH), R(RCPSR)); - AND(32, R(RSCRATCH), Imm8(0x1F)); - firstUserMode = false; - } - MOV(32, R(RSCRATCH2), Imm32(reg - 8)); - POP(RSCRATCH3); - CALL(WriteBanked); - FixupBranch sucessfulWritten = J_CC(CC_NC); - if (RegCache.Mapping[reg] != INVALID_REG) - MOV(32, R(RegCache.Mapping[reg]), R(RSCRATCH3)); - else - SaveReg(reg, RSCRATCH3); - SetJumpTarget(sucessfulWritten); + MOV(32, mem, MapReg(reg)); } - else if (RegCache.Mapping[reg] == INVALID_REG) + else { - assert(reg != 15); - - POP(RSCRATCH); - SaveReg(reg, RSCRATCH); + LoadReg(reg, RSCRATCH); + MOV(32, mem, R(RSCRATCH)); + } + } + else + { + if (RegCache.LoadedRegs & (1 << reg)) + { + MOV(32, MapReg(reg), mem); } else { - if (reg != 15) - RegCache.DirtyRegs |= (1 << reg); - POP(MapReg(reg).GetSimpleReg()); + MOV(32, R(RSCRATCH), mem); + SaveReg(reg, RSCRATCH); } } + if (!preinc) + offset += 4; } - if (regsCount & 1) - POP(RSCRATCH); + SwitchToFarCode(); + SetJumpTarget(slowPath); + } + + if (!store) + { + MOV(32, R(ABI_PARAM1), R(RSCRATCH4)); + MOV(32, R(ABI_PARAM3), Imm32(regsCount)); + SUB(64, R(RSP), stackAlloc <= INT8_MAX ? Imm8(stackAlloc) : Imm32(stackAlloc)); + if (allocOffset == 0) + MOV(64, R(ABI_PARAM2), R(RSP)); + else + LEA(64, ABI_PARAM2, MDisp(RSP, allocOffset)); + + if (Num == 0) + MOV(64, R(ABI_PARAM4), R(RCPU)); - if (regs[15]) + switch (Num * 2 | preinc) { - if (Num == 1) - { - if (Thumb) - OR(32, MapReg(15), Imm8(1)); - else - AND(32, MapReg(15), Imm8(0xFE)); - } - Comp_JumpTo(MapReg(15).GetSimpleReg(), usermode); + case 0: CALL((void*)&SlowBlockTransfer9); break; + case 1: CALL((void*)&SlowBlockTransfer9); break; + case 2: CALL((void*)&SlowBlockTransfer7); break; + case 3: CALL((void*)&SlowBlockTransfer7); break; } - } - else - { - Comp_AddCycles_CD(); - if (regsCount & 1) - PUSH(RSCRATCH); + if (allocOffset) + ADD(64, R(RSP), Imm8(allocOffset)); bool firstUserMode = true; for (int reg : regs) { - if (usermode && reg >= 8 && reg < 15) + if (usermode && !regs[15] && reg >= 8 && reg < 15) { if (firstUserMode) { @@ -599,43 +594,107 @@ s32 Compiler::Comp_MemAccessBlock(int rn, BitSet16 regs, bool store, bool preinc AND(32, R(RSCRATCH), Imm8(0x1F)); firstUserMode = false; } - if (RegCache.Mapping[reg] == INVALID_REG) - LoadReg(reg, RSCRATCH3); - else - MOV(32, R(RSCRATCH3), R(RegCache.Mapping[reg])); MOV(32, R(RSCRATCH2), Imm32(reg - 8)); - CALL(ReadBanked); - PUSH(RSCRATCH3); + POP(RSCRATCH3); + CALL(WriteBanked); + FixupBranch sucessfulWritten = J_CC(CC_NC); + if (RegCache.LoadedRegs & (1 << reg)) + MOV(32, R(RegCache.Mapping[reg]), R(RSCRATCH3)); + else + SaveReg(reg, RSCRATCH3); + SetJumpTarget(sucessfulWritten); } - else if (RegCache.Mapping[reg] == INVALID_REG) + else if (!(RegCache.LoadedRegs & (1 << reg))) { - LoadReg(reg, RSCRATCH); - PUSH(RSCRATCH); + assert(reg != 15); + + POP(RSCRATCH); + SaveReg(reg, RSCRATCH); } else { - PUSH(MapReg(reg).GetSimpleReg()); + POP(MapReg(reg).GetSimpleReg()); } } - - if (decrement) + } + else + { + bool firstUserMode = true; + for (int reg = 15; reg >= 0; reg--) { - MOV_sum(32, ABI_PARAM1, MapReg(rn), Imm32(-regsCount * 4)); - preinc ^= true; + if (regs[reg]) + { + if (usermode && reg >= 8 && reg < 15) + { + if (firstUserMode) + { + MOV(32, R(RSCRATCH), R(RCPSR)); + AND(32, R(RSCRATCH), Imm8(0x1F)); + firstUserMode = false; + } + if (RegCache.Mapping[reg] == INVALID_REG) + LoadReg(reg, RSCRATCH3); + else + MOV(32, R(RSCRATCH3), R(RegCache.Mapping[reg])); + MOV(32, R(RSCRATCH2), Imm32(reg - 8)); + CALL(ReadBanked); + PUSH(RSCRATCH3); + } + else if (!(RegCache.LoadedRegs & (1 << reg))) + { + LoadReg(reg, RSCRATCH); + PUSH(RSCRATCH); + } + else + { + PUSH(MapReg(reg).GetSimpleReg()); + } + } } - else - MOV(32, R(ABI_PARAM1), MapReg(rn)); - MOV(64, R(ABI_PARAM2), R(RSP)); + if (allocOffset) + SUB(64, R(RSP), Imm8(allocOffset)); + + MOV(32, R(ABI_PARAM1), R(RSCRATCH4)); + if (allocOffset) + LEA(64, ABI_PARAM2, MDisp(RSP, allocOffset)); + else + MOV(64, R(ABI_PARAM2), R(RSP)); + MOV(32, R(ABI_PARAM3), Imm32(regsCount)); + if (Num == 0) + MOV(64, R(ABI_PARAM4), R(RCPU)); - CALL(Num == 0 - ? MemoryFuncsSeq9[1][preinc] - : MemoryFuncsSeq7[1][preinc][CodeRegion == 0x02]); + switch (Num * 2 | preinc) + { + case 0: CALL((void*)&SlowBlockTransfer9); break; + case 1: CALL((void*)&SlowBlockTransfer9); break; + case 2: CALL((void*)&SlowBlockTransfer7); break; + case 3: CALL((void*)&SlowBlockTransfer7); break; + } ADD(64, R(RSP), stackAlloc <= INT8_MAX ? Imm8(stackAlloc) : Imm32(stackAlloc)); } + if (compileFastPath) + { + FixupBranch ret = J(true); + SwitchToNearCode(); + SetJumpTarget(ret); + } + + if (!store && regs[15]) + { + if (Num == 1) + { + if (Thumb) + OR(32, MapReg(15), Imm8(1)); + else + AND(32, MapReg(15), Imm8(0xFE)); + } + Comp_JumpTo(MapReg(15).GetSimpleReg(), usermode); + } + return offset; } @@ -786,9 +845,7 @@ void Compiler::T_Comp_LoadPCRel() { u32 offset = (CurInstr.Instr & 0xFF) << 2; u32 addr = (R15 & ~0x2) + offset; - if (Config::JIT_LiteralOptimisations) - Comp_MemLoadLiteral(32, CurInstr.T_Reg(8), addr); - else + if (!Config::JIT_LiteralOptimisations || !Comp_MemLoadLiteral(32, CurInstr.T_Reg(8), addr)) Comp_MemAccess(CurInstr.T_Reg(8), 15, ComplexOperand(offset), 32, 0); } diff --git a/src/ARM_InstrInfo.cpp b/src/ARM_InstrInfo.cpp index 28362d9..b50e821 100644 --- a/src/ARM_InstrInfo.cpp +++ b/src/ARM_InstrInfo.cpp @@ -373,16 +373,16 @@ Info Decode(bool thumb, u32 num, u32 instr) if (res.Kind == tk_LDMIA || res.Kind == tk_POP) { - u32 set = (instr & 0xFF) & ~(res.DstRegs|res.SrcRegs); - res.NotStrictlyNeeded |= set; + u32 set = (instr & 0xFF); + res.NotStrictlyNeeded |= set & ~(res.DstRegs|res.SrcRegs); res.DstRegs |= set; } if (res.Kind == tk_STMIA || res.Kind == tk_PUSH) { - u32 set = (instr & 0xFF) & ~(res.DstRegs|res.SrcRegs); + u32 set = (instr & 0xFF); if (res.Kind == tk_PUSH && instr & (1 << 8)) set |= (1 << 14); - res.NotStrictlyNeeded |= set; + res.NotStrictlyNeeded |= set & ~(res.DstRegs|res.SrcRegs); res.SrcRegs |= set; } @@ -495,15 +495,15 @@ Info Decode(bool thumb, u32 num, u32 instr) if (res.Kind == ak_LDM) { - u16 set = (instr & 0xFFFF) & ~(res.SrcRegs|res.DstRegs|(1<<15)); + u16 set = (instr & 0xFFFF); + res.NotStrictlyNeeded |= set & ~(res.SrcRegs|res.DstRegs|(1<<15)); res.DstRegs |= set; - res.NotStrictlyNeeded |= set; } if (res.Kind == ak_STM) { - u16 set = (instr & 0xFFFF) & ~(res.SrcRegs|res.DstRegs|(1<<15)); + u16 set = (instr & 0xFFFF); + res.NotStrictlyNeeded |= set & ~(res.SrcRegs|res.DstRegs|(1<<15)); res.SrcRegs |= set; - res.NotStrictlyNeeded |= set; } if ((instr >> 28) < 0xE) diff --git a/src/CP15.cpp b/src/CP15.cpp index 62258e9..e665dbd 100644 --- a/src/CP15.cpp +++ b/src/CP15.cpp @@ -97,6 +97,10 @@ void ARMv5::CP15DoSavestate(Savestate* file) void ARMv5::UpdateDTCMSetting() { +#ifdef JIT_ENABLED + u32 oldDTCMBase = DTCMBase; + u32 oldDTCMSize = DTCMSize; +#endif if (CP15Control & (1<<16)) { DTCMBase = DTCMSetting & 0xFFFFF000; @@ -109,10 +113,20 @@ void ARMv5::UpdateDTCMSetting() DTCMSize = 0; //printf("DTCM disabled\n"); } +#ifdef JIT_ENABLED + if (oldDTCMBase != DTCMBase || oldDTCMSize != DTCMSize) + { + ARMJIT::UpdateMemoryStatus9(oldDTCMBase, oldDTCMBase + oldDTCMSize); + ARMJIT::UpdateMemoryStatus9(DTCMBase, DTCMBase + DTCMSize); + } +#endif } void ARMv5::UpdateITCMSetting() { +#ifdef JIT_ENABLED + u32 oldITCMSize = ITCMSize; +#endif if (CP15Control & (1<<18)) { ITCMSize = 0x200 << ((ITCMSetting >> 1) & 0x1F); @@ -123,6 +137,10 @@ void ARMv5::UpdateITCMSetting() ITCMSize = 0; //printf("ITCM disabled\n"); } +#ifdef JIT_ENABLED + if (oldITCMSize != ITCMSize) + ARMJIT::UpdateMemoryStatus9(0, std::max(oldITCMSize, ITCMSize)); +#endif } @@ -561,15 +579,9 @@ void ARMv5::CP15Write(u32 id, u32 val) case 0x750: -#ifdef JIT_ENABLED - ARMJIT::InvalidateAll(); -#endif ICacheInvalidateAll(); return; case 0x751: -#ifdef JIT_ENABLED - ARMJIT::InvalidateByAddr(ARMJIT::TranslateAddr<0>(val)); -#endif ICacheInvalidateByAddr(val); return; case 0x752: @@ -732,7 +744,7 @@ u32 ARMv5::CodeRead32(u32 addr, bool branch) void ARMv5::DataRead8(u32 addr, u32* val) { - DataRegion = addr >> 12; + DataRegion = addr; if (addr < ITCMSize) { @@ -753,7 +765,7 @@ void ARMv5::DataRead8(u32 addr, u32* val) void ARMv5::DataRead16(u32 addr, u32* val) { - DataRegion = addr >> 12; + DataRegion = addr; addr &= ~1; @@ -776,7 +788,7 @@ void ARMv5::DataRead16(u32 addr, u32* val) void ARMv5::DataRead32(u32 addr, u32* val) { - DataRegion = addr >> 12; + DataRegion = addr; addr &= ~3; @@ -820,14 +832,14 @@ void ARMv5::DataRead32S(u32 addr, u32* val) void ARMv5::DataWrite8(u32 addr, u8 val) { - DataRegion = addr >> 12; + DataRegion = addr; if (addr < ITCMSize) { DataCycles = 1; *(u8*)&ITCM[addr & 0x7FFF] = val; #ifdef JIT_ENABLED - ARMJIT::InvalidateITCM(addr & 0x7FFF); + ARMJIT::InvalidateITCMIfNecessary(addr); #endif return; } @@ -844,7 +856,7 @@ void ARMv5::DataWrite8(u32 addr, u8 val) void ARMv5::DataWrite16(u32 addr, u16 val) { - DataRegion = addr >> 12; + DataRegion = addr; addr &= ~1; @@ -853,7 +865,7 @@ void ARMv5::DataWrite16(u32 addr, u16 val) DataCycles = 1; *(u16*)&ITCM[addr & 0x7FFF] = val; #ifdef JIT_ENABLED - ARMJIT::InvalidateITCM(addr & 0x7FFF); + ARMJIT::InvalidateITCMIfNecessary(addr); #endif return; } @@ -870,7 +882,7 @@ void ARMv5::DataWrite16(u32 addr, u16 val) void ARMv5::DataWrite32(u32 addr, u32 val) { - DataRegion = addr >> 12; + DataRegion = addr; addr &= ~3; @@ -879,7 +891,7 @@ void ARMv5::DataWrite32(u32 addr, u32 val) DataCycles = 1; *(u32*)&ITCM[addr & 0x7FFF] = val; #ifdef JIT_ENABLED - ARMJIT::InvalidateITCM(addr & 0x7FFF); + ARMJIT::InvalidateITCMIfNecessary(addr); #endif return; } @@ -903,7 +915,7 @@ void ARMv5::DataWrite32S(u32 addr, u32 val) DataCycles += 1; *(u32*)&ITCM[addr & 0x7FFF] = val; #ifdef JIT_ENABLED - ARMJIT::InvalidateITCM(addr & 0x7FFF); + ARMJIT::InvalidateITCMIfNecessary(addr); #endif return; } diff --git a/src/NDS.cpp b/src/NDS.cpp index 141c565..6e989a8 100644 --- a/src/NDS.cpp +++ b/src/NDS.cpp @@ -535,10 +535,6 @@ void Reset() KeyCnt = 0; RCnt = 0; -#ifdef JIT_ENABLED - ARMJIT::ResetBlockCache(); -#endif - NDSCart::Reset(); GBACart::Reset(); GPU::Reset(); @@ -548,6 +544,10 @@ void Reset() Wifi::Reset(); AREngine::Reset(); + +#ifdef JIT_ENABLED + ARMJIT::Reset(); +#endif } void Stop() @@ -1058,6 +1058,9 @@ void Halt() void MapSharedWRAM(u8 val) { + if (val == WRAMCnt) + return; + WRAMCnt = val; switch (WRAMCnt & 0x3) @@ -1090,6 +1093,11 @@ void MapSharedWRAM(u8 val) SWRAM_ARM7Mask = 0x7FFF; break; } + +#ifdef JIT_ENABLED + ARMJIT::UpdateMemoryStatus9(0x3000000, 0x3000000 + 0x1000000); + ARMJIT::UpdateMemoryStatus7(0x3000000, 0x3000000 + 0x1000000); +#endif } @@ -1873,12 +1881,18 @@ void ARM9Write8(u32 addr, u8 val) switch (addr & 0xFF000000) { case 0x02000000: +#ifdef JIT_ENABLED + ARMJIT::InvalidateMainRAMIfNecessary(addr); +#endif *(u8*)&MainRAM[addr & (MAIN_RAM_SIZE - 1)] = val; return; case 0x03000000: if (SWRAM_ARM9) { +#ifdef JIT_ENABLED + ARMJIT::InvalidateSWRAM9IfNecessary(addr); +#endif *(u8*)&SWRAM_ARM9[addr & SWRAM_ARM9Mask] = val; } return; @@ -1923,12 +1937,18 @@ void ARM9Write16(u32 addr, u16 val) switch (addr & 0xFF000000) { case 0x02000000: +#ifdef JIT_ENABLED + ARMJIT::InvalidateMainRAMIfNecessary(addr); +#endif *(u16*)&MainRAM[addr & (MAIN_RAM_SIZE - 1)] = val; return; case 0x03000000: if (SWRAM_ARM9) { +#ifdef JIT_ENABLED + ARMJIT::InvalidateSWRAM9IfNecessary(addr); +#endif *(u16*)&SWRAM_ARM9[addr & SWRAM_ARM9Mask] = val; } return; @@ -1949,7 +1969,12 @@ void ARM9Write16(u32 addr, u16 val) case 0x00200000: GPU::WriteVRAM_BBG(addr, val); return; case 0x00400000: GPU::WriteVRAM_AOBJ(addr, val); return; case 0x00600000: GPU::WriteVRAM_BOBJ(addr, val); return; - default: GPU::WriteVRAM_LCDC(addr, val); return; + default: +#ifdef JIT_ENABLED + ARMJIT::InvalidateLCDCIfNecessary(addr); +#endif + GPU::WriteVRAM_LCDC(addr, val); + return; } case 0x07000000: @@ -1989,12 +2014,18 @@ void ARM9Write32(u32 addr, u32 val) switch (addr & 0xFF000000) { case 0x02000000: +#ifdef JIT_ENABLED + ARMJIT::InvalidateMainRAMIfNecessary(addr); +#endif *(u32*)&MainRAM[addr & (MAIN_RAM_SIZE - 1)] = val; return ; case 0x03000000: if (SWRAM_ARM9) { +#ifdef JIT_ENABLED + ARMJIT::InvalidateSWRAM9IfNecessary(addr); +#endif *(u32*)&SWRAM_ARM9[addr & SWRAM_ARM9Mask] = val; } return; @@ -2015,7 +2046,12 @@ void ARM9Write32(u32 addr, u32 val) case 0x00200000: GPU::WriteVRAM_BBG(addr, val); return; case 0x00400000: GPU::WriteVRAM_AOBJ(addr, val); return; case 0x00600000: GPU::WriteVRAM_BOBJ(addr, val); return; - default: GPU::WriteVRAM_LCDC(addr, val); return; + default: +#ifdef JIT_ENABLED + ARMJIT::InvalidateLCDCIfNecessary(addr); +#endif + GPU::WriteVRAM_LCDC(addr, val); + return; } case 0x07000000: @@ -2279,30 +2315,38 @@ u32 ARM7Read32(u32 addr) void ARM7Write8(u32 addr, u8 val) { -#ifdef JIT_ENABLED - ARMJIT::InvalidateByAddr7(addr); -#endif - switch (addr & 0xFF800000) { case 0x02000000: case 0x02800000: +#ifdef JIT_ENABLED + ARMJIT::InvalidateMainRAMIfNecessary(addr); +#endif *(u8*)&MainRAM[addr & (MAIN_RAM_SIZE - 1)] = val; return; case 0x03000000: if (SWRAM_ARM7) { +#ifdef JIT_ENABLED + ARMJIT::InvalidateSWRAM7IfNecessary(addr); +#endif *(u8*)&SWRAM_ARM7[addr & SWRAM_ARM7Mask] = val; return; } else { +#ifdef JIT_ENABLED + ARMJIT::InvalidateARM7WRAMIfNecessary(addr); +#endif *(u8*)&ARM7WRAM[addr & 0xFFFF] = val; return; } case 0x03800000: +#ifdef JIT_ENABLED + ARMJIT::InvalidateARM7WRAMIfNecessary(addr); +#endif *(u8*)&ARM7WRAM[addr & 0xFFFF] = val; return; @@ -2312,6 +2356,9 @@ void ARM7Write8(u32 addr, u8 val) case 0x06000000: case 0x06800000: +#ifdef JIT_ENABLED + ARMJIT::InvalidateARM7WVRAMIfNecessary(addr); +#endif GPU::WriteVRAM_ARM7(addr, val); return; @@ -2342,30 +2389,38 @@ void ARM7Write8(u32 addr, u8 val) void ARM7Write16(u32 addr, u16 val) { -#ifdef JIT_ENABLED - ARMJIT::InvalidateByAddr7(addr); -#endif - switch (addr & 0xFF800000) { case 0x02000000: case 0x02800000: +#ifdef JIT_ENABLED + ARMJIT::InvalidateMainRAMIfNecessary(addr); +#endif *(u16*)&MainRAM[addr & (MAIN_RAM_SIZE - 1)] = val; return; case 0x03000000: if (SWRAM_ARM7) { +#ifdef JIT_ENABLED + ARMJIT::InvalidateSWRAM7IfNecessary(addr); +#endif *(u16*)&SWRAM_ARM7[addr & SWRAM_ARM7Mask] = val; return; } else { +#ifdef JIT_ENABLED + ARMJIT::InvalidateARM7WRAMIfNecessary(addr); +#endif *(u16*)&ARM7WRAM[addr & 0xFFFF] = val; return; } case 0x03800000: +#ifdef JIT_ENABLED + ARMJIT::InvalidateARM7WRAMIfNecessary(addr); +#endif *(u16*)&ARM7WRAM[addr & 0xFFFF] = val; return; @@ -2383,6 +2438,9 @@ void ARM7Write16(u32 addr, u16 val) case 0x06000000: case 0x06800000: +#ifdef JIT_ENABLED + ARMJIT::InvalidateARM7WVRAMIfNecessary(addr); +#endif GPU::WriteVRAM_ARM7(addr, val); return; @@ -2415,30 +2473,38 @@ void ARM7Write16(u32 addr, u16 val) void ARM7Write32(u32 addr, u32 val) { -#ifdef JIT_ENABLED - ARMJIT::InvalidateByAddr7(addr); -#endif - switch (addr & 0xFF800000) { case 0x02000000: case 0x02800000: +#ifdef JIT_ENABLED + ARMJIT::InvalidateMainRAMIfNecessary(addr); +#endif *(u32*)&MainRAM[addr & (MAIN_RAM_SIZE - 1)] = val; return; case 0x03000000: if (SWRAM_ARM7) { +#ifdef JIT_ENABLED + ARMJIT::InvalidateSWRAM7IfNecessary(addr); +#endif *(u32*)&SWRAM_ARM7[addr & SWRAM_ARM7Mask] = val; return; } else { +#ifdef JIT_ENABLED + ARMJIT::InvalidateARM7WRAMIfNecessary(addr); +#endif *(u32*)&ARM7WRAM[addr & 0xFFFF] = val; return; } case 0x03800000: +#ifdef JIT_ENABLED + ARMJIT::InvalidateARM7WRAMIfNecessary(addr); +#endif *(u32*)&ARM7WRAM[addr & 0xFFFF] = val; return; @@ -2457,6 +2523,9 @@ void ARM7Write32(u32 addr, u32 val) case 0x06000000: case 0x06800000: +#ifdef JIT_ENABLED + ARMJIT::InvalidateARM7WVRAMIfNecessary(addr); +#endif GPU::WriteVRAM_ARM7(addr, val); return; diff --git a/src/NDS.h b/src/NDS.h index c7b455e..163260b 100644 --- a/src/NDS.h +++ b/src/NDS.h @@ -120,6 +120,14 @@ extern u8 ROMSeed1[2*8]; extern u8 ARM9BIOS[0x1000]; extern u8 ARM7BIOS[0x4000]; +extern u8 SharedWRAM[0x8000]; +extern u8* SWRAM_ARM9; +extern u8* SWRAM_ARM7; +extern u32 SWRAM_ARM9Mask; +extern u32 SWRAM_ARM7Mask; + +extern u8 ARM7WRAM[0x10000]; + #define MAIN_RAM_SIZE 0x400000 extern u8 MainRAM[MAIN_RAM_SIZE]; -- cgit v1.2.3 From e335a8ca7615c702cfa2dcdb71deb69468088fd8 Mon Sep 17 00:00:00 2001 From: RSDuck Date: Sun, 14 Jun 2020 21:04:25 +0200 Subject: first steps in bringing over the JIT refactor/fastmem --- src/ARM.cpp | 43 +- src/ARM.h | 15 +- src/ARMJIT.cpp | 771 ++++++++++----------------------- src/ARMJIT.h | 64 +-- src/ARMJIT_A64/ARMJIT_ALU.cpp | 123 +++++- src/ARMJIT_A64/ARMJIT_Branch.cpp | 99 ++--- src/ARMJIT_A64/ARMJIT_Compiler.cpp | 383 ++++++++++++----- src/ARMJIT_A64/ARMJIT_Compiler.h | 71 +++- src/ARMJIT_A64/ARMJIT_Linkage.s | 68 +++ src/ARMJIT_A64/ARMJIT_LoadStore.cpp | 790 ++++++++++++++++------------------ src/ARMJIT_Compiler.h | 12 + src/ARMJIT_Internal.h | 70 +-- src/ARMJIT_Memory.cpp | 822 ++++++++++++++++++++++++++++++++++++ src/ARMJIT_Memory.h | 53 +++ src/ARMJIT_x64/ARMJIT_Compiler.cpp | 92 +--- src/ARMJIT_x64/ARMJIT_Compiler.h | 11 +- src/ARMJIT_x64/ARMJIT_LoadStore.cpp | 45 +- src/ARM_InstrInfo.cpp | 73 ++-- src/ARM_InstrInfo.h | 1 + src/CMakeLists.txt | 6 +- src/CP15.cpp | 84 ++-- src/Config.cpp | 6 +- src/Config.h | 1 + src/NDS.cpp | 220 +++++----- src/NDS.h | 17 +- 25 files changed, 2342 insertions(+), 1598 deletions(-) create mode 100644 src/ARMJIT_A64/ARMJIT_Linkage.s create mode 100644 src/ARMJIT_Compiler.h create mode 100644 src/ARMJIT_Memory.cpp create mode 100644 src/ARMJIT_Memory.h (limited to 'src/ARM_InstrInfo.cpp') diff --git a/src/ARM.cpp b/src/ARM.cpp index 92a3a9e..e529be8 100644 --- a/src/ARM.cpp +++ b/src/ARM.cpp @@ -21,6 +21,8 @@ #include "DSi.h" #include "ARM.h" #include "ARMInterpreter.h" +#include "ARMJIT.h" +#include "Config.h" #include "AREngine.h" #include "ARMJIT.h" #include "Config.h" @@ -74,7 +76,9 @@ ARM::~ARM() ARMv5::ARMv5() : ARM(0) { - // +#ifndef JIT_ENABLED + DTCM = new u8[DTCMSize]; +#endif } ARMv4::ARMv4() : ARM(1) @@ -82,6 +86,13 @@ ARMv4::ARMv4() : ARM(1) // } +ARMv5::~ARMv5() +{ +#ifndef JIT_ENABLED + delete[] DTCM; +#endif +} + void ARM::Reset() { Cycles = 0; @@ -622,24 +633,26 @@ void ARMv5::ExecuteJIT() while (NDS::ARM9Timestamp < NDS::ARM9Target) { u32 instrAddr = R[15] - ((CPSR&0x20)?2:4); - u32 translatedAddr = ARMJIT::TranslateAddr9(instrAddr); - if (!translatedAddr) + + // hack so Cycles <= 0 becomes Cycles < 0 + Cycles = NDS::ARM9Target - NDS::ARM9Timestamp - 1; + + if ((instrAddr < FastBlockLookupStart || instrAddr >= (FastBlockLookupStart + FastBlockLookupSize)) + && !ARMJIT::SetupExecutableRegion(0, instrAddr, FastBlockLookup, FastBlockLookupStart, FastBlockLookupSize)) { NDS::ARM9Timestamp = NDS::ARM9Target; printf("ARMv5 PC in non executable region %08X\n", R[15]); return; } - // hack so Cycles <= 0 becomes Cycles < 0 - Cycles = NDS::ARM9Target - NDS::ARM9Timestamp - 1; - - ARMJIT::JitBlockEntry block = ARMJIT::LookUpBlockEntry<0>(translatedAddr); + ARMJIT::JitBlockEntry block = ARMJIT::LookUpBlock(0, FastBlockLookup, + instrAddr - FastBlockLookupStart, instrAddr); if (block) ARM_Dispatch(this, block); else ARMJIT::CompileBlock(this); - NDS::ARM9Timestamp = NDS::ARM9Target - (Cycles + 1); + NDS::ARM9Timestamp = NDS::ARM9Target - Cycles - 1; if (StopExecution) { @@ -766,23 +779,25 @@ void ARMv4::ExecuteJIT() while (NDS::ARM7Timestamp < NDS::ARM7Target) { u32 instrAddr = R[15] - ((CPSR&0x20)?2:4); - u32 translatedAddr = ARMJIT::TranslateAddr7(instrAddr); - if (!translatedAddr) + + Cycles = NDS::ARM7Target - NDS::ARM7Timestamp - 1; + + if ((instrAddr < FastBlockLookupStart || instrAddr >= (FastBlockLookupStart + FastBlockLookupSize)) + && !ARMJIT::SetupExecutableRegion(1, instrAddr, FastBlockLookup, FastBlockLookupStart, FastBlockLookupSize)) { NDS::ARM7Timestamp = NDS::ARM7Target; printf("ARMv4 PC in non executable region %08X\n", R[15]); return; } - Cycles = NDS::ARM7Target - NDS::ARM7Timestamp - 1; - - ARMJIT::JitBlockEntry block = ARMJIT::LookUpBlockEntry<1>(translatedAddr); + ARMJIT::JitBlockEntry block = ARMJIT::LookUpBlock(1, FastBlockLookup, + instrAddr - FastBlockLookupStart, instrAddr); if (block) ARM_Dispatch(this, block); else ARMJIT::CompileBlock(this); - NDS::ARM7Timestamp = NDS::ARM7Target - (Cycles + 1); + NDS::ARM7Timestamp = NDS::ARM7Target - Cycles - 1; // TODO optimize this shit!!! if (StopExecution) diff --git a/src/ARM.h b/src/ARM.h index b1e8053..b7f16d6 100644 --- a/src/ARM.h +++ b/src/ARM.h @@ -32,11 +32,14 @@ enum RWFlags_ForceUser = (1<<21), }; +const u32 ITCMPhysicalSize = 0x8000; +const u32 DTCMPhysicalSize = 0x4000; + class ARM { public: ARM(u32 num); - ~ARM(); // destroy shit + virtual ~ARM(); // destroy shit virtual void Reset(); @@ -143,6 +146,11 @@ public: NDS::MemRegion CodeMem; +#ifdef JIT_ENABLED + u32 FastBlockLookupStart = 0, FastBlockLookupSize = 0; + u64* FastBlockLookup; +#endif + static u32 ConditionTable[16]; protected: @@ -158,6 +166,7 @@ class ARMv5 : public ARM { public: ARMv5(); + ~ARMv5(); void Reset(); @@ -260,8 +269,8 @@ public: u32 DTCMBase, DTCMSize; s32 RegionCodeCycles; - u8 ITCM[0x8000]; - u8 DTCM[0x4000]; + u8 ITCM[ITCMPhysicalSize]; + u8* DTCM; u8 ICache[0x2000]; u32 ICacheTags[64*4]; diff --git a/src/ARMJIT.cpp b/src/ARMJIT.cpp index 8d87c76..53b28c1 100644 --- a/src/ARMJIT.cpp +++ b/src/ARMJIT.cpp @@ -10,13 +10,8 @@ #include "Config.h" #include "ARMJIT_Internal.h" -#if defined(__x86_64__) -#include "ARMJIT_x64/ARMJIT_Compiler.h" -#elif defined(__aarch64__) -#include "ARMJIT_A64/ARMJIT_Compiler.h" -#else -#error "The current target platform doesn't have a JIT backend" -#endif +#include "ARMJIT_Memory.h" +#include "ARMJIT_Compiler.h" #include "ARMInterpreter_ALU.h" #include "ARMInterpreter_LoadStore.h" @@ -29,6 +24,11 @@ #include "Wifi.h" #include "NDSCart.h" +#include "ARMJIT_x64/ARMJIT_Offsets.h" +static_assert(offsetof(ARM, CPSR) == ARM_CPSR_offset); +static_assert(offsetof(ARM, Cycles) == ARM_Cycles_offset); +static_assert(offsetof(ARM, StopExecution) == ARM_StopExecution_offset); + namespace ARMJIT { @@ -37,281 +37,100 @@ namespace ARMJIT Compiler* JITCompiler; -const u32 ExeMemRegionSizes[] = -{ - 0x8000, // Unmapped Region (dummy) - 0x8000, // ITCM - 4*1024*1024, // Main RAM - 0x8000, // SWRAM - 0xA4000, // LCDC - 0x8000, // ARM9 BIOS - 0x4000, // ARM7 BIOS - 0x10000, // ARM7 WRAM - 0x40000 // ARM7 WVRAM -}; - -const u32 ExeMemRegionOffsets[] = -{ - 0, - 0x8000, - 0x10000, - 0x410000, - 0x418000, - 0x4BC000, - 0x4C4000, - 0x4C8000, - 0x4D8000, - 0x518000, -}; - -/* - translates address to pseudo physical address - - more compact, eliminates mirroring, everything comes in a row - - we only need one translation table -*/ - -u32 TranslateAddr9(u32 addr) -{ - switch (ClassifyAddress9(addr)) - { - case memregion_MainRAM: return ExeMemRegionOffsets[exeMem_MainRAM] + (addr & (MAIN_RAM_SIZE - 1)); - case memregion_SWRAM9: - if (NDS::SWRAM_ARM9) - return ExeMemRegionOffsets[exeMem_SWRAM] + (NDS::SWRAM_ARM9 - NDS::SharedWRAM) + (addr & NDS::SWRAM_ARM9Mask); - else - return 0; - case memregion_ITCM: return ExeMemRegionOffsets[exeMem_ITCM] + (addr & 0x7FFF); - case memregion_VRAM: return (addr >= 0x6800000 && addr < 0x68A4000) ? ExeMemRegionOffsets[exeMem_LCDC] + (addr - 0x6800000) : 0; - case memregion_BIOS9: return ExeMemRegionOffsets[exeMem_ARM9_BIOS] + (addr & 0xFFF); - default: return 0; - } -} - -u32 TranslateAddr7(u32 addr) -{ - switch (ClassifyAddress7(addr)) - { - case memregion_MainRAM: return ExeMemRegionOffsets[exeMem_MainRAM] + (addr & (MAIN_RAM_SIZE - 1)); - case memregion_SWRAM7: - if (NDS::SWRAM_ARM7) - return ExeMemRegionOffsets[exeMem_SWRAM] + (NDS::SWRAM_ARM7 - NDS::SharedWRAM) + (addr & NDS::SWRAM_ARM7Mask); - else - return 0; - case memregion_BIOS7: return ExeMemRegionOffsets[exeMem_ARM7_BIOS] + addr; - case memregion_WRAM7: return ExeMemRegionOffsets[exeMem_ARM7_WRAM] + (addr & 0xFFFF); - case memregion_VWRAM: return ExeMemRegionOffsets[exeMem_ARM7_WVRAM] + (addr & 0x1FFFF); - default: return 0; - } -} - -AddressRange CodeRanges[ExeMemSpaceSize / 512]; - -TinyVector InvalidLiterals; +AddressRange CodeIndexITCM[ITCMPhysicalSize / 512]; +AddressRange CodeIndexMainRAM[NDS::MainRAMSize / 512]; +AddressRange CodeIndexSWRAM[NDS::SharedWRAMSize / 512]; +AddressRange CodeIndexVRAM[0x100000 / 512]; +AddressRange CodeIndexARM9BIOS[sizeof(NDS::ARM9BIOS) / 512]; +AddressRange CodeIndexARM7BIOS[sizeof(NDS::ARM7BIOS) / 512]; +AddressRange CodeIndexARM7WRAM[NDS::ARM7WRAMSize / 512]; +AddressRange CodeIndexARM7WVRAM[0x40000 / 512]; std::unordered_map JitBlocks9; std::unordered_map JitBlocks7; -u8 MemoryStatus9[0x800000]; -u8 MemoryStatus7[0x800000]; +u64 FastBlockLookupITCM[ITCMPhysicalSize / 2]; +u64 FastBlockLookupMainRAM[NDS::MainRAMSize / 2]; +u64 FastBlockLookupSWRAM[NDS::SharedWRAMSize / 2]; +u64 FastBlockLookupVRAM[0x100000 / 2]; +u64 FastBlockLookupARM9BIOS[sizeof(NDS::ARM9BIOS) / 2]; +u64 FastBlockLookupARM7BIOS[sizeof(NDS::ARM7BIOS) / 2]; +u64 FastBlockLookupARM7WRAM[NDS::ARM7WRAMSize / 2]; +u64 FastBlockLookupARM7WVRAM[0x40000 / 2]; -int ClassifyAddress9(u32 addr) +const u32 CodeRegionSizes[ARMJIT_Memory::memregions_Count] = { - if (addr < NDS::ARM9->ITCMSize) - return memregion_ITCM; - else if (addr >= NDS::ARM9->DTCMBase && addr < (NDS::ARM9->DTCMBase + NDS::ARM9->DTCMSize)) - return memregion_DTCM; - else if ((addr & 0xFFFFF000) == 0xFFFF0000) - return memregion_BIOS9; - else - { - switch (addr & 0xFF000000) - { - case 0x02000000: - return memregion_MainRAM; - case 0x03000000: - return memregion_SWRAM9; - case 0x04000000: - return memregion_IO9; - case 0x06000000: - return memregion_VRAM; - } - } - return memregion_Other; -} + 0, + ITCMPhysicalSize, + 0, + sizeof(NDS::ARM9BIOS), + NDS::MainRAMSize, + NDS::SharedWRAMSize, + 0, + 0x100000, + sizeof(NDS::ARM7BIOS), + NDS::ARM7WRAMSize, + 0, + 0, + 0x40000, +}; -int ClassifyAddress7(u32 addr) +AddressRange* const CodeMemRegions[ARMJIT_Memory::memregions_Count] = { - if (addr < 0x00004000) - return memregion_BIOS7; - else - { - switch (addr & 0xFF800000) - { - case 0x02000000: - case 0x02800000: - return memregion_MainRAM; - case 0x03000000: - if (NDS::SWRAM_ARM7) - return memregion_SWRAM7; - else - return memregion_WRAM7; - case 0x03800000: - return memregion_WRAM7; - case 0x04000000: - return memregion_IO7; - case 0x04800000: - return memregion_Wifi; - case 0x06000000: - case 0x06800000: - return memregion_VWRAM; - } - } - return memregion_Other; -} + NULL, + CodeIndexITCM, + NULL, + CodeIndexARM9BIOS, + CodeIndexMainRAM, + CodeIndexSWRAM, + NULL, + CodeIndexVRAM, + CodeIndexARM7BIOS, + CodeIndexARM7WRAM, + NULL, + NULL, + CodeIndexARM7WVRAM, +}; -void UpdateMemoryStatus9(u32 start, u32 end) +u64* const FastBlockLookupRegions[ARMJIT_Memory::memregions_Count] = { - start >>= 12; - end >>= 12; - - if (end == 0xFFFFF) - end++; - - for (u32 i = start; i < end; i++) - { - u32 addr = i << 12; - - int region = ClassifyAddress9(addr); - u32 pseudoPhyisical = TranslateAddr9(addr); - - for (u32 j = 0; j < 8; j++) - { - u8 val = region; - if (CodeRanges[(pseudoPhyisical + (j << 12)) / 512].Blocks.Length) - val |= 0x80; - MemoryStatus9[i * 8 + j] = val; - } - } -} + NULL, + FastBlockLookupITCM, + NULL, + FastBlockLookupARM9BIOS, + FastBlockLookupMainRAM, + FastBlockLookupSWRAM, + NULL, + FastBlockLookupVRAM, + FastBlockLookupARM7BIOS, + FastBlockLookupARM7WRAM, + NULL, + NULL, + FastBlockLookupARM7WVRAM +}; -void UpdateMemoryStatus7(u32 start, u32 end) +u32 LocaliseCodeAddress(u32 num, u32 addr) { - start >>= 12; - end >>= 12; - - if (end == 0xFFFFF) - end++; - - for (u32 i = start; i < end; i++) + int region = num == 0 + ? ARMJIT_Memory::ClassifyAddress9(addr) + : ARMJIT_Memory::ClassifyAddress7(addr); + + u32 mappingStart, mappingSize, memoryOffset, memorySize; + if (ARMJIT_Memory::GetRegionMapping(region, num, mappingStart, + mappingSize, memoryOffset, memorySize) + && CodeMemRegions[region]) { - u32 addr = i << 12; - - int region = ClassifyAddress7(addr); - u32 pseudoPhyisical = TranslateAddr7(addr); - - for (u32 j = 0; j < 8; j++) - { - u8 val = region; - if (CodeRanges[(pseudoPhyisical + (j << 12)) / 512].Blocks.Length) - val |= 0x80; - MemoryStatus7[i * 8 + j] = val; - } + addr = ((addr - mappingStart) & (memorySize - 1)) + memoryOffset; + addr |= (u32)region << 28; + return addr; } + return 0; } -void UpdateRegionByPseudoPhyiscal(u32 addr, bool invalidate) -{ - for (u32 i = 1; i < exeMem_Count; i++) - { - if (addr >= ExeMemRegionOffsets[i] && addr < ExeMemRegionOffsets[i] + ExeMemRegionSizes[i]) - { - for (u32 num = 0; num < 2; num++) - { - u32 physSize = ExeMemRegionSizes[i]; - u32 mapSize = 0; - u32 mapStart = 0; - switch (i) - { - case exeMem_ITCM: - if (num == 0) - mapStart = 0; mapSize = NDS::ARM9->ITCMSize; - break; - case exeMem_MainRAM: mapStart = 0x2000000; mapSize = 0x1000000; break; - case exeMem_SWRAM: - if (num == 0) - { - if (NDS::SWRAM_ARM9) - mapStart = 0x3000000, mapSize = 0x1000000; - else - mapStart = mapSize = 0; - } - else - { - if (NDS::SWRAM_ARM7) - mapStart = 0x3000000, mapSize = 0x800000; - else - mapStart = mapSize = 0; - } - break; - case exeMem_LCDC: - if (num == 0) - mapStart = 0x6800000, mapSize = 0xA4000; - break; - case exeMem_ARM9_BIOS: - if (num == 0) - mapStart = 0xFFFF0000, mapSize = 0x10000; - break; - case exeMem_ARM7_BIOS: - if (num == 1) - mapStart = 0; mapSize = 0x4000; - break; - case exeMem_ARM7_WRAM: - if (num == 1) - { - if (NDS::SWRAM_ARM7) - mapStart = 0x3800000, mapSize = 0x800000; - else - mapStart = 0x3000000, mapSize = 0x1000000; - } - break; - case exeMem_ARM7_WVRAM: - if (num == 1) - mapStart = 0x6000000, mapSize = 0x1000000; - break; - } - - for (u32 j = 0; j < mapSize / physSize; j++) - { - u32 virtAddr = mapStart + physSize * j + (addr - ExeMemRegionOffsets[i]); - if (num == 0 - && virtAddr >= NDS::ARM9->DTCMBase && virtAddr < (NDS::ARM9->DTCMBase + NDS::ARM9->DTCMSize)) - continue; - if (invalidate) - { - if (num == 0) - MemoryStatus9[virtAddr / 512] |= 0x80; - else - MemoryStatus7[virtAddr / 512] |= 0x80; - } - else - { - if (num == 0) - MemoryStatus9[virtAddr / 512] &= ~0x80; - else - MemoryStatus7[virtAddr / 512] &= ~0x80; - } - } - - } - return; - } - } - - assert(false); -} +TinyVector InvalidLiterals; template -T SlowRead9(ARMv5* cpu, u32 addr) +T SlowRead9(u32 addr, ARMv5* cpu) { u32 offset = addr & 0x3; addr &= ~(sizeof(T) - 1); @@ -335,13 +154,13 @@ T SlowRead9(ARMv5* cpu, u32 addr) } template -void SlowWrite9(ARMv5* cpu, u32 addr, T val) +void SlowWrite9(u32 addr, ARMv5* cpu, T val) { addr &= ~(sizeof(T) - 1); if (addr < cpu->ITCMSize) { - InvalidateITCMIfNecessary(addr); + CheckAndInvalidate<0, ARMJIT_Memory::memregion_ITCM>(addr); *(T*)&cpu->ITCM[addr & 0x7FFF] = val; } else if (addr >= cpu->DTCMBase && addr < (cpu->DTCMBase + cpu->DTCMSize)) @@ -362,13 +181,13 @@ void SlowWrite9(ARMv5* cpu, u32 addr, T val) } } -template void SlowWrite9(ARMv5*, u32, u32); -template void SlowWrite9(ARMv5*, u32, u16); -template void SlowWrite9(ARMv5*, u32, u8); +template void SlowWrite9(u32, ARMv5*, u32); +template void SlowWrite9(u32, ARMv5*, u16); +template void SlowWrite9(u32, ARMv5*, u8); -template u32 SlowRead9(ARMv5*, u32); -template u16 SlowRead9(ARMv5*, u32); -template u8 SlowRead9(ARMv5*, u32); +template u32 SlowRead9(u32, ARMv5*); +template u16 SlowRead9(u32, ARMv5*); +template u8 SlowRead9(u32, ARMv5*); template T SlowRead7(u32 addr) @@ -407,14 +226,15 @@ template void SlowBlockTransfer9(u32 addr, u64* data, u32 num, ARMv5* cpu) { addr &= ~0x3; + if (PreInc) + addr += 4; for (int i = 0; i < num; i++) { - addr += PreInc * 4; if (Write) - SlowWrite9(cpu, addr, data[i]); + SlowWrite9(addr, cpu, data[i]); else - data[i] = SlowRead9(cpu, addr); - addr += !PreInc * 4; + data[i] = SlowRead9(addr, cpu); + addr += 4; } } @@ -422,14 +242,15 @@ template void SlowBlockTransfer7(u32 addr, u64* data, u32 num) { addr &= ~0x3; + if (PreInc) + addr += 4; for (int i = 0; i < num; i++) { - addr += PreInc * 4; if (Write) SlowWrite7(addr, data[i]); else data[i] = SlowRead7(addr); - addr += !PreInc * 4; + addr += 4; } } @@ -540,16 +361,18 @@ struct UnreliableHashTable }; UnreliableHashTable RestoreCandidates; -UnreliableHashTable FastBlockLookUp9; -UnreliableHashTable FastBlockLookUp7; void Init() { JITCompiler = new Compiler(); + + ARMJIT_Memory::Init(); } void DeInit() { + ARMJIT_Memory::DeInit(); + delete JITCompiler; } @@ -557,8 +380,7 @@ void Reset() { ResetBlockCache(); - UpdateMemoryStatus9(0, 0xFFFFFFFF); - UpdateMemoryStatus7(0, 0xFFFFFFFF); + ARMJIT_Memory::Reset(); } void FloodFillSetFlags(FetchedInstr instrs[], int start, u8 flags) @@ -673,11 +495,12 @@ bool IsIdleLoop(FetchedInstr* instrs, int instrsCount) // it basically checks if one iteration of a loop depends on another // the rules are quite simple + JIT_DEBUGPRINT("checking potential idle loop\n"); u16 regsWrittenTo = 0; u16 regsDisallowedToWrite = 0; for (int i = 0; i < instrsCount; i++) { - //printf("instr %d %x regs(%x %x) %x %x\n", i, instrs[i].Instr, instrs[i].Info.DstRegs, instrs[i].Info.SrcRegs, regsWrittenTo, regsDisallowedToWrite); + JIT_DEBUGPRINT("instr %d %x regs(%x %x) %x %x\n", i, instrs[i].Instr, instrs[i].Info.DstRegs, instrs[i].Info.SrcRegs, regsWrittenTo, regsDisallowedToWrite); if (instrs[i].Info.SpecialKind == ARMInstrInfo::special_WriteMem) return false; if (i < instrsCount - 1 && instrs[i].Info.Branches()) @@ -782,8 +605,6 @@ InterpreterFunc InterpretTHUMB[ARMInstrInfo::tk_Count] = }; #undef F - -extern u32 literalsPerBlock; void CompileBlock(ARM* cpu) { bool thumb = cpu->CPSR & 0x20; @@ -794,14 +615,28 @@ void CompileBlock(ARM* cpu) Config::JIT_MaxBlockSize = 32; u32 blockAddr = cpu->R[15] - (thumb ? 2 : 4); - u32 pseudoPhysicalAddr = cpu->Num == 0 - ? TranslateAddr9(blockAddr) - : TranslateAddr7(blockAddr); - if (pseudoPhysicalAddr < ExeMemRegionSizes[exeMem_Unmapped]) - { - printf("Trying to compile a block in unmapped memory: %x\n", blockAddr); - } - + + auto& map = cpu->Num == 0 ? JitBlocks9 : JitBlocks7; + auto existingBlockIt = map.find(blockAddr); + if (existingBlockIt != map.end()) + { + // there's already a block, though it's not inside the fast map + // could be that there are two blocks at the same physical addr + // but different mirrors + u32 localAddr = existingBlockIt->second->StartAddrLocal; + + u64* entry = &FastBlockLookupRegions[localAddr >> 28][localAddr & 0xFFFFFFF]; + *entry = ((u64)blockAddr | cpu->Num) << 32; + *entry |= JITCompiler->SubEntryOffset(existingBlockIt->second->EntryPoint); + return; + } + + u32 localAddr = LocaliseCodeAddress(cpu->Num, blockAddr); + if (!localAddr) + { + printf("trying to compile non executable code? %x\n", blockAddr); + } + FetchedInstr instrs[Config::JIT_MaxBlockSize]; int i = 0; u32 r15 = cpu->R[15]; @@ -842,9 +677,8 @@ void CompileBlock(ARM* cpu) instrValues[i] = instrs[i].Instr; - u32 translatedAddr = cpu->Num == 0 - ? TranslateAddr9(instrs[i].Addr) - : TranslateAddr7(instrs[i].Addr); + u32 translatedAddr = LocaliseCodeAddress(cpu->Num, instrs[i].Addr); + assert(translatedAddr); u32 translatedAddrRounded = translatedAddr & ~0x1FF; if (i == 0 || translatedAddrRounded != addressRanges[numAddressRanges - 1]) { @@ -928,9 +762,11 @@ void CompileBlock(ARM* cpu) && instrs[i].Info.SpecialKind == ARMInstrInfo::special_LoadLiteral && DecodeLiteral(thumb, instrs[i], literalAddr)) { - u32 translatedAddr = cpu->Num == 0 - ? TranslateAddr9(literalAddr) - : TranslateAddr7(literalAddr); + u32 translatedAddr = LocaliseCodeAddress(cpu->Num, literalAddr); + if (!translatedAddr) + { + printf("literal in non executable memory?\n"); + } u32 translatedAddrRounded = translatedAddr & ~0x1FF; u32 j = 0; @@ -994,9 +830,7 @@ void CompileBlock(ARM* cpu) } else if (hasBranched && !isBackJump && i + 1 < Config::JIT_MaxBlockSize) { - u32 targetPseudoPhysical = cpu->Num == 0 - ? TranslateAddr9(target) - : TranslateAddr7(target); + u32 targetLocalised = LocaliseCodeAddress(cpu->Num, target); if (link) { @@ -1048,7 +882,7 @@ void CompileBlock(ARM* cpu) { RestoreCandidates.Remove(instrHash); - mayRestore = prevBlock->PseudoPhysicalAddr == pseudoPhysicalAddr && prevBlock->LiteralHash == literalHash; + mayRestore = prevBlock->StartAddr == blockAddr && prevBlock->LiteralHash == literalHash; if (mayRestore && prevBlock->NumAddresses == numAddressRanges) { @@ -1087,11 +921,12 @@ void CompileBlock(ARM* cpu) for (int j = 0; j < numLiterals; j++) block->Literals()[j] = literalLoadAddrs[j]; - block->PseudoPhysicalAddr = pseudoPhysicalAddr; + block->StartAddr = blockAddr; + block->StartAddrLocal = localAddr; FloodFillSetFlags(instrs, i - 1, 0xF); - block->EntryPoint = JITCompiler->CompileBlock(pseudoPhysicalAddr, cpu, thumb, instrs, i); + block->EntryPoint = JITCompiler->CompileBlock(cpu, thumb, instrs, i); } else { @@ -1104,30 +939,34 @@ void CompileBlock(ARM* cpu) assert(addressRanges[j] == block->AddressRanges()[j]); assert(addressMasks[j] == block->AddressMasks()[j]); assert(addressMasks[j] != 0); - CodeRanges[addressRanges[j] / 512].Code |= addressMasks[j]; - CodeRanges[addressRanges[j] / 512].Blocks.Add(block); - UpdateRegionByPseudoPhyiscal(addressRanges[j], true); + AddressRange* region = CodeMemRegions[addressRanges[j] >> 28]; + + if (!PageContainsCode(®ion[(addressRanges[j] & 0xFFFF000) / 512])) + ARMJIT_Memory::SetCodeProtection(addressRanges[j] >> 28, addressRanges[j] & 0xFFFFFFF, true); + + AddressRange* range = ®ion[(addressRanges[j] & 0xFFFFFFF) / 512]; + range->Code |= addressMasks[j]; + range->Blocks.Add(block); } if (cpu->Num == 0) - { - JitBlocks9[pseudoPhysicalAddr] = block; - FastBlockLookUp9.Insert(pseudoPhysicalAddr, JITCompiler->SubEntryOffset(block->EntryPoint)); - } + JitBlocks9[blockAddr] = block; else - { - JitBlocks7[pseudoPhysicalAddr] = block; - FastBlockLookUp7.Insert(pseudoPhysicalAddr, JITCompiler->SubEntryOffset(block->EntryPoint)); - } + JitBlocks7[blockAddr] = block; + + u64* entry = &FastBlockLookupRegions[(localAddr >> 28)][(localAddr & 0xFFFFFFF) / 2]; + *entry = ((u64)blockAddr | cpu->Num) << 32; + *entry |= JITCompiler->SubEntryOffset(block->EntryPoint); } -void InvalidateByAddr(u32 pseudoPhysical) +void InvalidateByAddr(u32 localAddr) { - JIT_DEBUGPRINT("invalidating by addr %x\n", pseudoPhysical); + JIT_DEBUGPRINT("invalidating by addr %x\n", localAddr); - AddressRange* range = &CodeRanges[pseudoPhysical / 512]; - u32 mask = 1 << ((pseudoPhysical & 0x1FF) / 16); + AddressRange* region = CodeMemRegions[localAddr >> 28]; + AddressRange* range = ®ion[(localAddr & 0xFFFFFFF) / 512]; + u32 mask = 1 << ((localAddr & 0x1FF) / 16); range->Code = 0; for (int i = 0; i < range->Blocks.Length;) @@ -1138,7 +977,7 @@ void InvalidateByAddr(u32 pseudoPhysical) u32 mask = 0; for (int j = 0; j < block->NumAddresses; j++) { - if (block->AddressRanges()[j] == (pseudoPhysical & ~0x1FF)) + if (block->AddressRanges()[j] == (localAddr & ~0x1FF)) { mask = block->AddressMasks()[j]; invalidated = block->AddressMasks()[j] & mask; @@ -1154,15 +993,21 @@ void InvalidateByAddr(u32 pseudoPhysical) } range->Blocks.Remove(i); + if (range->Blocks.Length == 0 + && !PageContainsCode(®ion[(localAddr & 0xFFFF000) / 512])) + { + ARMJIT_Memory::SetCodeProtection(localAddr >> 28, localAddr & 0xFFFFFFF, false); + } + bool literalInvalidation = false; for (int j = 0; j < block->NumLiterals; j++) { u32 addr = block->Literals()[j]; - if (addr == pseudoPhysical) + if (addr == localAddr) { - if (InvalidLiterals.Find(pseudoPhysical) != -1) + if (InvalidLiterals.Find(localAddr) != -1) { - InvalidLiterals.Add(pseudoPhysical); + InvalidLiterals.Add(localAddr); JIT_DEBUGPRINT("found invalid literal %d\n", InvalidLiterals.Length); } literalInvalidation = true; @@ -1172,35 +1017,30 @@ void InvalidateByAddr(u32 pseudoPhysical) for (int j = 0; j < block->NumAddresses; j++) { u32 addr = block->AddressRanges()[j]; - if ((addr / 512) != (pseudoPhysical / 512)) + if ((addr / 512) != (localAddr / 512)) { - AddressRange* otherRange = &CodeRanges[addr / 512]; + AddressRange* otherRegion = CodeMemRegions[addr >> 28]; + AddressRange* otherRange = &otherRegion[(addr & 0xFFFFFFF) / 512]; assert(otherRange != range); + bool removed = otherRange->Blocks.RemoveByValue(block); assert(removed); if (otherRange->Blocks.Length == 0) { + if (!PageContainsCode(&otherRegion[(addr & 0xFFFF000) / 512])) + ARMJIT_Memory::SetCodeProtection(addr >> 28, addr & 0xFFFFFFF, false); + otherRange->Code = 0; - UpdateRegionByPseudoPhyiscal(addr, false); } } } - for (int j = 0; j < block->NumLinks(); j++) - JITCompiler->UnlinkBlock(block->Links()[j]); - block->ResetLinks(); - + FastBlockLookupRegions[block->StartAddrLocal >> 28][(block->StartAddrLocal & 0xFFFFFFF) / 2] = (u64)UINT32_MAX << 32; if (block->Num == 0) - { - JitBlocks9.erase(block->PseudoPhysicalAddr); - FastBlockLookUp9.Remove(block->PseudoPhysicalAddr); - } + JitBlocks9.erase(block->StartAddr); else - { - JitBlocks7.erase(block->PseudoPhysicalAddr); - FastBlockLookUp7.Remove(block->PseudoPhysicalAddr); - } + JitBlocks7.erase(block->StartAddr); if (!literalInvalidation) { @@ -1213,24 +1053,66 @@ void InvalidateByAddr(u32 pseudoPhysical) delete block; } } +} - if (range->Blocks.Length == 0) - UpdateRegionByPseudoPhyiscal(pseudoPhysical, false); +template +void CheckAndInvalidate(u32 addr) +{ + // let's hope this gets all properly inlined + u32 mappingStart, mappingSize, memoryOffset, memorySize; + if (ARMJIT_Memory::GetRegionMapping(region, num, mappingStart, mappingSize, memoryOffset, memorySize)) + { + u32 localAddr = ((addr - mappingStart) & (memorySize - 1)) + memoryOffset; + if (CodeMemRegions[region][localAddr / 512].Code & (1 << ((localAddr & 0x1FF) / 16))) + InvalidateByAddr(localAddr | (region << 28)); + } +} + +JitBlockEntry LookUpBlock(u32 num, u64* entries, u32 offset, u32 addr) +{ + u64* entry = &entries[offset / 2]; + if (*entry >> 32 == (addr | num)) + return JITCompiler->AddEntryOffset((u32)*entry); + return NULL; } -void InvalidateRegionIfNecessary(u32 pseudoPhyisical) +bool SetupExecutableRegion(u32 num, u32 blockAddr, u64*& entry, u32& start, u32& size) { - if (CodeRanges[pseudoPhyisical / 512].Code & (1 << ((pseudoPhyisical & 0x1FF) / 16))) - InvalidateByAddr(pseudoPhyisical); + int region = num == 0 + ? ARMJIT_Memory::ClassifyAddress9(blockAddr) + : ARMJIT_Memory::ClassifyAddress7(blockAddr); + + u32 mappingStart, mappingSize, memoryOffset, memorySize; + if (CodeMemRegions[region] + && ARMJIT_Memory::GetRegionMapping(region, num, mappingStart, + mappingSize, memoryOffset, memorySize)) + { + entry = FastBlockLookupRegions[region] + memoryOffset / 2; + // evil, though it should work for everything except DTCM which is not relevant here + start = blockAddr & ~(memorySize - 1); + size = memorySize; + return true; + } + else + return false; } +template void CheckAndInvalidate<0, ARMJIT_Memory::memregion_MainRAM>(u32); +template void CheckAndInvalidate<1, ARMJIT_Memory::memregion_MainRAM>(u32); +template void CheckAndInvalidate<0, ARMJIT_Memory::memregion_SWRAM>(u32); +template void CheckAndInvalidate<1, ARMJIT_Memory::memregion_SWRAM>(u32); +template void CheckAndInvalidate<1, ARMJIT_Memory::memregion_WRAM7>(u32); +template void CheckAndInvalidate<1, ARMJIT_Memory::memregion_VWRAM>(u32); +template void CheckAndInvalidate<0, ARMJIT_Memory::memregion_VRAM>(u32); +template void CheckAndInvalidate<0, ARMJIT_Memory::memregion_ITCM>(u32); + void ResetBlockCache() { printf("Resetting JIT block cache...\n"); InvalidLiterals.Clear(); - FastBlockLookUp9.Reset(); - FastBlockLookUp7.Reset(); + for (int i = 0; i < ARMJIT_Memory::memregions_Count; i++) + memset(FastBlockLookupRegions[i], 0xFF, CodeRegionSizes[i] * sizeof(u64) / 2); RestoreCandidates.Reset(); for (int i = 0; i < sizeof(RestoreCandidates.Table)/sizeof(RestoreCandidates.Table[0]); i++) { @@ -1251,8 +1133,9 @@ void ResetBlockCache() for (int j = 0; j < block->NumAddresses; j++) { u32 addr = block->AddressRanges()[j]; - CodeRanges[addr / 512].Blocks.Clear(); - CodeRanges[addr / 512].Code = 0; + AddressRange* range = &CodeMemRegions[addr >> 28][(addr & 0xFFFFFFF) / 512]; + range->Blocks.Clear(); + range->Code = 0; } delete block; } @@ -1262,8 +1145,9 @@ void ResetBlockCache() for (int j = 0; j < block->NumAddresses; j++) { u32 addr = block->AddressRanges()[j]; - CodeRanges[addr / 512].Blocks.Clear(); - CodeRanges[addr / 512].Code = 0; + AddressRange* range = &CodeMemRegions[addr >> 28][(addr & 0xFFFFFFF) / 512]; + range->Blocks.Clear(); + range->Code = 0; } } JitBlocks9.clear(); @@ -1272,191 +1156,4 @@ void ResetBlockCache() JITCompiler->Reset(); } -template -JitBlockEntry LookUpBlockEntry(u32 addr) -{ - auto& fastMap = Num == 0 ? FastBlockLookUp9 : FastBlockLookUp7; - u32 entryOffset = fastMap.LookUp(addr); - if (entryOffset != UINT32_MAX) - return JITCompiler->AddEntryOffset(entryOffset); - - auto& slowMap = Num == 0 ? JitBlocks9 : JitBlocks7; - auto block = slowMap.find(addr); - if (block != slowMap.end()) - { - fastMap.Insert(addr, JITCompiler->SubEntryOffset(block->second->EntryPoint)); - return block->second->EntryPoint; - } - return NULL; -} - -template JitBlockEntry LookUpBlockEntry<0>(u32); -template JitBlockEntry LookUpBlockEntry<1>(u32); - -template -void LinkBlock(ARM* cpu, u32 codeOffset) -{ - auto& blockMap = Num == 0 ? JitBlocks9 : JitBlocks7; - u32 instrAddr = cpu->R[15] - ((cpu->CPSR&0x20)?2:4); - u32 targetPseudoPhys = Num == 0 ? TranslateAddr9(instrAddr) : TranslateAddr7(instrAddr); - auto block = blockMap.find(targetPseudoPhys); - if (block == blockMap.end()) - { - CompileBlock(cpu); - block = blockMap.find(targetPseudoPhys); - } - - JIT_DEBUGPRINT("linking to block %08x\n", targetPseudoPhys); - - block->second->AddLink(codeOffset); - JITCompiler->LinkBlock(codeOffset, block->second->EntryPoint); -} - -template void LinkBlock<0>(ARM*, u32); -template void LinkBlock<1>(ARM*, u32); - -void WifiWrite32(u32 addr, u32 val) -{ - Wifi::Write(addr, val & 0xFFFF); - Wifi::Write(addr + 2, val >> 16); -} - -u32 WifiRead32(u32 addr) -{ - return Wifi::Read(addr) | (Wifi::Read(addr + 2) << 16); -} - -template -void VRAMWrite(u32 addr, T val) -{ - switch (addr & 0x00E00000) - { - case 0x00000000: GPU::WriteVRAM_ABG(addr, val); return; - case 0x00200000: GPU::WriteVRAM_BBG(addr, val); return; - case 0x00400000: GPU::WriteVRAM_AOBJ(addr, val); return; - case 0x00600000: GPU::WriteVRAM_BOBJ(addr, val); return; - default: GPU::WriteVRAM_LCDC(addr, val); return; - } -} -template -T VRAMRead(u32 addr) -{ - switch (addr & 0x00E00000) - { - case 0x00000000: return GPU::ReadVRAM_ABG(addr); - case 0x00200000: return GPU::ReadVRAM_BBG(addr); - case 0x00400000: return GPU::ReadVRAM_AOBJ(addr); - case 0x00600000: return GPU::ReadVRAM_BOBJ(addr); - default: return GPU::ReadVRAM_LCDC(addr); - } -} - -void* GetFuncForAddr(ARM* cpu, u32 addr, bool store, int size) -{ - if (cpu->Num == 0) - { - switch (addr & 0xFF000000) - { - case 0x04000000: - if (!store && size == 32 && addr == 0x04100010 && NDS::ExMemCnt[0] & (1<<11)) - return (void*)NDSCart::ReadROMData; - - /* - unfortunately we can't map GPU2D this way - since it's hidden inside an object - - though GPU3D registers are accessed much more intensive - */ - if (addr >= 0x04000320 && addr < 0x040006A4) - { - switch (size | store) - { - case 8: return (void*)GPU3D::Read8; - case 9: return (void*)GPU3D::Write8; - case 16: return (void*)GPU3D::Read16; - case 17: return (void*)GPU3D::Write16; - case 32: return (void*)GPU3D::Read32; - case 33: return (void*)GPU3D::Write32; - } - } - - switch (size | store) - { - case 8: return (void*)NDS::ARM9IORead8; - case 9: return (void*)NDS::ARM9IOWrite8; - case 16: return (void*)NDS::ARM9IORead16; - case 17: return (void*)NDS::ARM9IOWrite16; - case 32: return (void*)NDS::ARM9IORead32; - case 33: return (void*)NDS::ARM9IOWrite32; - } - break; - case 0x06000000: - switch (size | store) - { - case 8: return (void*)VRAMRead; - case 9: return NULL; - case 16: return (void*)VRAMRead; - case 17: return (void*)VRAMWrite; - case 32: return (void*)VRAMRead; - case 33: return (void*)VRAMWrite; - } - break; - } - } - else - { - switch (addr & 0xFF800000) - { - case 0x04000000: - if (addr >= 0x04000400 && addr < 0x04000520) - { - switch (size | store) - { - case 8: return (void*)SPU::Read8; - case 9: return (void*)SPU::Write8; - case 16: return (void*)SPU::Read16; - case 17: return (void*)SPU::Write16; - case 32: return (void*)SPU::Read32; - case 33: return (void*)SPU::Write32; - } - } - - switch (size | store) - { - case 8: return (void*)NDS::ARM7IORead8; - case 9: return (void*)NDS::ARM7IOWrite8; - case 16: return (void*)NDS::ARM7IORead16; - case 17: return (void*)NDS::ARM7IOWrite16; - case 32: return (void*)NDS::ARM7IORead32; - case 33: return (void*)NDS::ARM7IOWrite32; - } - break; - case 0x04800000: - if (addr < 0x04810000 && size >= 16) - { - switch (size | store) - { - case 16: return (void*)Wifi::Read; - case 17: return (void*)Wifi::Write; - case 32: return (void*)WifiRead32; - case 33: return (void*)WifiWrite32; - } - } - break; - case 0x06000000: - case 0x06800000: - switch (size | store) - { - case 8: return (void*)GPU::ReadVRAM_ARM7; - case 9: return (void*)GPU::WriteVRAM_ARM7; - case 16: return (void*)GPU::ReadVRAM_ARM7; - case 17: return (void*)GPU::WriteVRAM_ARM7; - case 32: return (void*)GPU::ReadVRAM_ARM7; - case 33: return (void*)GPU::WriteVRAM_ARM7; - } - } - } - return NULL; -} - } diff --git a/src/ARMJIT.h b/src/ARMJIT.h index 44a6140..2320b7b 100644 --- a/src/ARMJIT.h +++ b/src/ARMJIT.h @@ -9,32 +9,7 @@ namespace ARMJIT { -enum ExeMemKind -{ - exeMem_Unmapped = 0, - exeMem_ITCM, - exeMem_MainRAM, - exeMem_SWRAM, - exeMem_LCDC, - exeMem_ARM9_BIOS, - exeMem_ARM7_BIOS, - exeMem_ARM7_WRAM, - exeMem_ARM7_WVRAM, - exeMem_Count -}; - -extern const u32 ExeMemRegionOffsets[]; -extern const u32 ExeMemRegionSizes[]; - -typedef u32 (*JitBlockEntry)(); - -const u32 ExeMemSpaceSize = 0x518000; // I hate you C++, sometimes I really hate you... - -u32 TranslateAddr9(u32 addr); -u32 TranslateAddr7(u32 addr); - -template -JitBlockEntry LookUpBlockEntry(u32 addr); +typedef void (*JitBlockEntry)(); void Init(); void DeInit(); @@ -43,44 +18,15 @@ void Reset(); void InvalidateByAddr(u32 pseudoPhysical); -void InvalidateRegionIfNecessary(u32 addr); - -inline void InvalidateMainRAMIfNecessary(u32 addr) -{ - InvalidateRegionIfNecessary(ExeMemRegionOffsets[exeMem_MainRAM] + (addr & (MAIN_RAM_SIZE - 1))); -} -inline void InvalidateITCMIfNecessary(u32 addr) -{ - InvalidateRegionIfNecessary(ExeMemRegionOffsets[exeMem_ITCM] + (addr & 0x7FFF)); -} -inline void InvalidateLCDCIfNecessary(u32 addr) -{ - if (addr < 0x68A3FFF) - InvalidateRegionIfNecessary(ExeMemRegionOffsets[exeMem_LCDC] + (addr - 0x6800000)); -} -inline void InvalidateSWRAM7IfNecessary(u32 addr) -{ - InvalidateRegionIfNecessary(ExeMemRegionOffsets[exeMem_SWRAM] + (NDS::SWRAM_ARM7 - NDS::SharedWRAM) + (addr & NDS::SWRAM_ARM7Mask)); -} -inline void InvalidateSWRAM9IfNecessary(u32 addr) -{ - InvalidateRegionIfNecessary(ExeMemRegionOffsets[exeMem_SWRAM] + (NDS::SWRAM_ARM9 - NDS::SharedWRAM) + (addr & NDS::SWRAM_ARM9Mask)); -} -inline void InvalidateARM7WRAMIfNecessary(u32 addr) -{ - InvalidateRegionIfNecessary(ExeMemRegionOffsets[exeMem_ARM7_WRAM] + (addr & 0xFFFF)); -} -inline void InvalidateARM7WVRAMIfNecessary(u32 addr) -{ - InvalidateRegionIfNecessary(ExeMemRegionOffsets[exeMem_ARM7_WVRAM] + (addr & 0x1FFFF)); -} +template +void CheckAndInvalidate(u32 addr); void CompileBlock(ARM* cpu); void ResetBlockCache(); -void UpdateMemoryStatus9(u32 start, u32 end); -void UpdateMemoryStatus7(u32 start, u32 end); +JitBlockEntry LookUpBlock(u32 num, u64* entries, u32 offset, u32 addr); +bool SetupExecutableRegion(u32 num, u32 blockAddr, u64*& entry, u32& start, u32& size); } diff --git a/src/ARMJIT_A64/ARMJIT_ALU.cpp b/src/ARMJIT_A64/ARMJIT_ALU.cpp index 0fe6a97..5f021a0 100644 --- a/src/ARMJIT_A64/ARMJIT_ALU.cpp +++ b/src/ARMJIT_A64/ARMJIT_ALU.cpp @@ -243,7 +243,7 @@ void Compiler::Comp_Arithmetic(int op, bool S, ARM64Reg rd, ARM64Reg rn, Op2 op2 if (S && !CurInstr.SetFlags) S = false; - bool CVInGP = false; + bool CVInGPR = false; switch (op) { case 0x2: // SUB @@ -306,7 +306,7 @@ void Compiler::Comp_Arithmetic(int op, bool S, ARM64Reg rd, ARM64Reg rn, Op2 op2 UBFX(W2, RCPSR, 29, 1); if (S) { - CVInGP = true; + CVInGPR = true; ADDS(W1, rn, W2); CSET(W2, CC_CS); CSET(W3, CC_VS); @@ -335,7 +335,7 @@ void Compiler::Comp_Arithmetic(int op, bool S, ARM64Reg rd, ARM64Reg rn, Op2 op2 ORN(W1, WZR, op2.Reg.Rm, op2.ToArithOption()); if (S) { - CVInGP = true; + CVInGPR = true; ADDS(W1, W2, W1); CSET(W2, CC_CS); CSET(W3, CC_VS); @@ -355,7 +355,7 @@ void Compiler::Comp_Arithmetic(int op, bool S, ARM64Reg rd, ARM64Reg rn, Op2 op2 MVN(W1, rn); if (S) { - CVInGP = true; + CVInGPR = true; ADDS(W1, W2, W1); CSET(W2, CC_CS); CSET(W3, CC_VS); @@ -379,12 +379,12 @@ void Compiler::Comp_Arithmetic(int op, bool S, ARM64Reg rd, ARM64Reg rn, Op2 op2 if (S) { - if (CVInGP) + if (CVInGPR) { BFI(RCPSR, W2, 29, 1); BFI(RCPSR, W3, 28, 1); } - Comp_RetriveFlags(!CVInGP); + Comp_RetriveFlags(!CVInGPR); } } @@ -501,7 +501,23 @@ void Compiler::A_Comp_ALUMovOp() MOVI2R(rd, op2.Imm); } else - MOV(rd, op2.Reg.Rm, op2.ToArithOption()); + { + // ORR with shifted operand has cycles latency + if (op2.Reg.ShiftAmount > 0) + { + switch (op2.Reg.ShiftType) + { + case ST_LSL: LSL(rd, op2.Reg.Rm, op2.Reg.ShiftAmount); break; + case ST_LSR: LSR(rd, op2.Reg.Rm, op2.Reg.ShiftAmount); break; + case ST_ASR: ASR(rd, op2.Reg.Rm, op2.Reg.ShiftAmount); break; + case ST_ROR: ROR_(rd, op2.Reg.Rm, op2.Reg.ShiftAmount); break; + } + } + else + { + MOV(rd, op2.Reg.Rm, op2.ToArithOption()); + } + } } if (S) @@ -558,10 +574,7 @@ void Compiler::Comp_Mul_Mla(bool S, bool mla, ARM64Reg rd, ARM64Reg rm, ARM64Reg } else { - CLZ(W0, rs); - CLS(W1, rs); - CMP(W0, W1); - CSEL(W0, W0, W1, CC_GT); + CLS(W0, rs); Comp_AddCycles_CI(mla ? 1 : 0, W0, ArithOption(W0, ST_LSR, 3)); } @@ -594,10 +607,10 @@ void Compiler::A_Comp_Mul_Long() } else { - CLZ(W0, rs); - CLS(W1, rs); - CMP(W0, W1); - CSEL(W0, W0, W1, CC_GT); + if (sign) + CLS(W0, rs); + else + CLZ(W0, rs); Comp_AddCycles_CI(0, W0, ArithOption(W0, ST_LSR, 3)); } @@ -628,6 +641,86 @@ void Compiler::A_Comp_Mul_Long() Comp_RetriveFlags(false); } +void Compiler::A_Comp_Mul_Short() +{ + ARM64Reg rd = MapReg(CurInstr.A_Reg(16)); + ARM64Reg rm = MapReg(CurInstr.A_Reg(0)); + ARM64Reg rs = MapReg(CurInstr.A_Reg(8)); + u32 op = (CurInstr.Instr >> 21) & 0xF; + + bool x = CurInstr.Instr & (1 << 5); + bool y = CurInstr.Instr & (1 << 6); + + SBFX(W1, rs, y ? 16 : 0, 16); + + if (op == 0b1000) + { + // SMLAxy + + SBFX(W0, rm, x ? 16 : 0, 16); + + MUL(W0, W0, W1); + + ORRI2R(W1, RCPSR, 0x08000000); + + ARM64Reg rn = MapReg(CurInstr.A_Reg(12)); + ADDS(rd, W0, rn); + + CSEL(RCPSR, W1, RCPSR, CC_VS); + + CPSRDirty = true; + + Comp_AddCycles_C(); + } + else if (op == 0b1011) + { + // SMULxy + + SBFX(W0, rm, x ? 16 : 0, 16); + + MUL(rd, W0, W1); + + Comp_AddCycles_C(); + } + else if (op == 0b1010) + { + // SMLALxy + + ARM64Reg rn = MapReg(CurInstr.A_Reg(12)); + + MOV(W2, rn); + BFI(X2, rd, 32, 32); + + SBFX(W0, rm, x ? 16 : 0, 16); + + SMADDL(EncodeRegTo64(rn), W0, W1, X2); + + UBFX(EncodeRegTo64(rd), EncodeRegTo64(rn), 32, 32); + + Comp_AddCycles_CI(1); + } + else if (op == 0b1001) + { + // SMLAWy/SMULWy + SMULL(X0, rm, W1); + ASR(x ? EncodeRegTo64(rd) : X0, X0, 16); + + if (!x) + { + ORRI2R(W1, RCPSR, 0x08000000); + + ARM64Reg rn = MapReg(CurInstr.A_Reg(12)); + ADDS(rd, W0, rn); + + CSEL(RCPSR, W1, RCPSR, CC_VS); + + CPSRDirty = true; + } + + Comp_AddCycles_C(); + } +} + void Compiler::A_Comp_Mul() { ARM64Reg rd = MapReg(CurInstr.A_Reg(16)); diff --git a/src/ARMJIT_A64/ARMJIT_Branch.cpp b/src/ARMJIT_A64/ARMJIT_Branch.cpp index 542f0b7..f130938 100644 --- a/src/ARMJIT_A64/ARMJIT_Branch.cpp +++ b/src/ARMJIT_A64/ARMJIT_Branch.cpp @@ -143,7 +143,7 @@ void Compiler::Comp_JumpTo(u32 addr, bool forceNonConstantCycles) if ((Thumb || CurInstr.Cond() >= 0xE) && !forceNonConstantCycles) ConstantCycles += cycles; else - ADD(RCycles, RCycles, cycles); + SUB(RCycles, RCycles, cycles); } @@ -152,23 +152,19 @@ void* Compiler::Gen_JumpTo9(int kind) AlignCode16(); void* res = GetRXPtr(); - MOVI2R(W2, kCodeCacheTiming); - // W1 - code cycles non branch - // W2 - branch code cycles LSR(W1, W0, 12); - LSL(W1, W1, 2); ADDI2R(W1, W1, offsetof(ARMv5, MemTimings), W2); LDRB(W1, RCPU, W1); - LDR(INDEX_UNSIGNED, W3, RCPU, offsetof(ARMv5, ITCMSize)); + LDR(INDEX_UNSIGNED, W2, RCPU, offsetof(ARMv5, ITCMSize)); STR(INDEX_UNSIGNED, W1, RCPU, offsetof(ARMv5, RegionCodeCycles)); - CMP(W0, W3); - FixupBranch outsideITCM = B(CC_LO); - MOVI2R(W1, 1); - MOVI2R(W2, 1); - SetJumpTarget(outsideITCM); + CMP(W1, 0xFF); + MOVI2R(W3, kCodeCacheTiming); + CSEL(W1, W3, W1, CC_EQ); + CMP(W0, W2); + CSINC(W1, W1, WZR, CC_HS); FixupBranch switchToThumb; if (kind == 0) @@ -176,40 +172,36 @@ void* Compiler::Gen_JumpTo9(int kind) if (kind == 0 || kind == 1) { - ANDI2R(W0, W0, ~3); - + // ARM if (kind == 0) ANDI2R(RCPSR, RCPSR, ~0x20); - ADD(W3, W0, 4); - STR(INDEX_UNSIGNED, W3, RCPU, offsetof(ARM, R[15])); - - ADD(W1, W1, W2); - ADD(RCycles, RCycles, W1); + ANDI2R(W0, W0, ~3); + ADD(W0, W0, 4); + STR(INDEX_UNSIGNED, W0, RCPU, offsetof(ARMv5, R[15])); + ADD(W1, W1, W1); + SUB(RCycles, RCycles, W1); RET(); } + if (kind == 0 || kind == 2) { + // Thumb if (kind == 0) { SetJumpTarget(switchToThumb); - ORRI2R(RCPSR, RCPSR, 0x20); } ANDI2R(W0, W0, ~1); + ADD(W0, W0, 2); + STR(INDEX_UNSIGNED, W0, RCPU, offsetof(ARMv5, R[15])); - ADD(W3, W0, 2); - STR(INDEX_UNSIGNED, W3, RCPU, offsetof(ARM, R[15])); - - FixupBranch halfwordLoc = TBZ(W0, 1); - ADD(W1, W1, W2); - ADD(RCycles, RCycles, W1); - RET(); - - SetJumpTarget(halfwordLoc); - ADD(RCycles, RCycles, W2); + ADD(W2, W1, W1); + TSTI2R(W0, 0x2); + CSEL(W1, W1, W2, CC_EQ); + SUB(RCycles, RCycles, W1); RET(); } @@ -237,7 +229,7 @@ void* Compiler::Gen_JumpTo7(int kind) UBFX(W2, W3, 0, 8); UBFX(W3, W3, 8, 8); ADD(W2, W3, W2); - ADD(RCycles, RCycles, W2); + SUB(RCycles, RCycles, W2); ANDI2R(W0, W0, ~3); @@ -261,7 +253,7 @@ void* Compiler::Gen_JumpTo7(int kind) UBFX(W2, W3, 16, 8); UBFX(W3, W3, 24, 8); ADD(W2, W3, W2); - ADD(RCycles, RCycles, W2); + SUB(RCycles, RCycles, W2); ANDI2R(W0, W0, ~1); @@ -287,22 +279,11 @@ void Compiler::Comp_JumpTo(Arm64Gen::ARM64Reg addr, bool switchThumb, bool resto } else { - BitSet16 hiRegsLoaded(RegCache.DirtyRegs & 0xFF00); - bool previouslyDirty = CPSRDirty; + + bool cpsrDirty = CPSRDirty; SaveCPSR(); - - if (restoreCPSR) - { - if (Thumb || CurInstr.Cond() >= 0xE) - RegCache.Flush(); - else - { - // the ugly way... - // we only save them, to load and save them again - for (int reg : hiRegsLoaded) - SaveReg(reg, RegCache.Mapping[reg]); - } - } + SaveCycles(); + PushRegs(restoreCPSR); if (switchThumb) MOV(W1, addr); @@ -319,16 +300,12 @@ void Compiler::Comp_JumpTo(Arm64Gen::ARM64Reg addr, bool switchThumb, bool resto QuickCallFunction(X3, jumpToTrampoline); else QuickCallFunction(X3, jumpToTrampoline); - - if (!Thumb && restoreCPSR && CurInstr.Cond() < 0xE) - { - for (int reg : hiRegsLoaded) - LoadReg(reg, RegCache.Mapping[reg]); - } - if (previouslyDirty) - LoadCPSR(); - CPSRDirty = previouslyDirty; + PopRegs(restoreCPSR); + LoadCycles(); + LoadCPSR(); + if (CurInstr.Cond() < 0xE) + CPSRDirty = cpsrDirty; } } @@ -368,21 +345,13 @@ void Compiler::T_Comp_BCOND() s32 offset = (s32)(CurInstr.Instr << 24) >> 23; Comp_JumpTo(R15 + offset + 1, true); - Comp_BranchSpecialBehaviour(); + Comp_BranchSpecialBehaviour(true); FixupBranch skipFailed = B(); SetJumpTarget(skipExecute); Comp_AddCycles_C(true); - if (CurInstr.BranchFlags & branch_FollowCondTaken) - { - SaveCPSR(false); - RegCache.PrepareExit(); - - ADD(W0, RCycles, ConstantCycles); - ABI_PopRegisters(SavedRegs); - RET(); - } + Comp_BranchSpecialBehaviour(false); SetJumpTarget(skipFailed); } diff --git a/src/ARMJIT_A64/ARMJIT_Compiler.cpp b/src/ARMJIT_A64/ARMJIT_Compiler.cpp index a67f357..42435ed 100644 --- a/src/ARMJIT_A64/ARMJIT_Compiler.cpp +++ b/src/ARMJIT_A64/ARMJIT_Compiler.cpp @@ -1,9 +1,3 @@ -#include "ARMJIT_Compiler.h" - -#include "../ARMInterpreter.h" - -#include "../ARMJIT_Internal.h" - #ifdef __SWITCH__ #include "../switch/compat_switch.h" @@ -13,10 +7,17 @@ extern char __start__; #include #endif +#include "ARMJIT_Compiler.h" + +#include "../ARMJIT_Internal.h" +#include "../ARMInterpreter.h" +#include "../Config.h" + #include using namespace Arm64Gen; +extern "C" void ARM_Ret(); namespace ARMJIT { @@ -28,7 +29,10 @@ namespace ARMJIT like x64. At one hand you can translate a lot of instructions directly. But at the same time, there are a ton of exceptions, like for example ADD and SUB can't have a RORed second operand on ARMv8. - */ + + While writing a JIT when an instruction is recompiled into multiple ones + not to write back until you've read all the other operands! +*/ template <> const ARM64Reg RegisterCache::NativeRegAllocOrder[] = @@ -46,6 +50,132 @@ void Compiler::MovePC() ADD(MapReg(15), MapReg(15), Thumb ? 2 : 4); } +void Compiler::A_Comp_MRS() +{ + Comp_AddCycles_C(); + + ARM64Reg rd = MapReg(CurInstr.A_Reg(12)); + + if (CurInstr.Instr & (1 << 22)) + { + ANDI2R(W5, RCPSR, 0x1F); + MOVI2R(W3, 0); + MOVI2R(W1, 15 - 8); + BL(ReadBanked); + MOV(rd, W3); + } + else + MOV(rd, RCPSR); +} + +void Compiler::A_Comp_MSR() +{ + Comp_AddCycles_C(); + + ARM64Reg val; + if (CurInstr.Instr & (1 << 25)) + { + val = W0; + MOVI2R(val, ROR((CurInstr.Instr & 0xFF), ((CurInstr.Instr >> 7) & 0x1E))); + } + else + { + val = MapReg(CurInstr.A_Reg(0)); + } + + u32 mask = 0; + if (CurInstr.Instr & (1<<16)) mask |= 0x000000FF; + if (CurInstr.Instr & (1<<17)) mask |= 0x0000FF00; + if (CurInstr.Instr & (1<<18)) mask |= 0x00FF0000; + if (CurInstr.Instr & (1<<19)) mask |= 0xFF000000; + + if (CurInstr.Instr & (1 << 22)) + { + ANDI2R(W5, RCPSR, 0x1F); + MOVI2R(W3, 0); + MOVI2R(W1, 15 - 8); + BL(ReadBanked); + + MOVI2R(W1, mask); + MOVI2R(W2, mask & 0xFFFFFF00); + ANDI2R(W5, RCPSR, 0x1F); + CMP(W5, 0x10); + CSEL(W1, W2, W1, CC_EQ); + + BIC(W3, W3, W1); + AND(W0, val, W1); + ORR(W3, W3, W0); + + MOVI2R(W1, 15 - 8); + + BL(WriteBanked); + } + else + { + mask &= 0xFFFFFFDF; + CPSRDirty = true; + + if ((mask & 0xFF) == 0) + { + ANDI2R(RCPSR, RCPSR, ~mask); + ANDI2R(W0, val, mask); + ORR(RCPSR, RCPSR, W0); + } + else + { + MOVI2R(W2, mask); + MOVI2R(W3, mask & 0xFFFFFF00); + ANDI2R(W1, RCPSR, 0x1F); + // W1 = first argument + CMP(W1, 0x10); + CSEL(W2, W3, W2, CC_EQ); + + BIC(RCPSR, RCPSR, W2); + AND(W0, val, W2); + ORR(RCPSR, RCPSR, W0); + + MOV(W2, RCPSR); + MOV(X0, RCPU); + + PushRegs(true); + + QuickCallFunction(X3, (void*)&ARM::UpdateMode); + + PopRegs(true); + } + } +} + +void Compiler::PushRegs(bool saveHiRegs) +{ + if (saveHiRegs) + { + if (Thumb || CurInstr.Cond() == 0xE) + { + BitSet16 hiRegsLoaded(RegCache.LoadedRegs & 0x7F00); + for (int reg : hiRegsLoaded) + RegCache.UnloadRegister(reg); + } + else + { + BitSet16 hiRegsDirty(RegCache.LoadedRegs & 0x7F00); + for (int reg : hiRegsDirty) + SaveReg(reg, RegCache.Mapping[reg]); + } + } +} + +void Compiler::PopRegs(bool saveHiRegs) +{ + if (saveHiRegs) + { + BitSet16 hiRegsLoaded(RegCache.LoadedRegs & 0x7F00); + + for (int reg : hiRegsLoaded) + LoadReg(reg, RegCache.Mapping[reg]); + } +} + Compiler::Compiler() { #ifdef __SWITCH__ @@ -80,8 +210,7 @@ Compiler::Compiler() assert(succeded); SetCodeBase((u8*)JitRWStart, (u8*)JitRXStart); - JitMemUseableSize = JitMemSize; - Reset(); + JitMemMainSize = JitMemSize; #else u64 pageSize = sysconf(_SC_PAGE_SIZE); u8* pageAligned = (u8*)(((u64)JitMem & ~(pageSize - 1)) + pageSize); @@ -90,31 +219,8 @@ Compiler::Compiler() SetCodeBase(pageAligned, pageAligned); JitMemUseableSize = alignedSize; - Reset(); #endif - - for (int i = 0; i < 3; i++) - { - for (int j = 0; j < 2; j++) - { - MemFunc9[i][j] = Gen_MemoryRoutine9(8 << i, j); - } - } - MemFunc7[0][0] = (void*)NDS::ARM7Read8; - MemFunc7[1][0] = (void*)NDS::ARM7Read16; - MemFunc7[2][0] = (void*)NDS::ARM7Read32; - MemFunc7[0][1] = (void*)NDS::ARM7Write8; - MemFunc7[1][1] = (void*)NDS::ARM7Write16; - MemFunc7[2][1] = (void*)NDS::ARM7Write32; - - for (int i = 0; i < 2; i++) - { - for (int j = 0; j < 2; j++) - { - MemFuncsSeq9[i][j] = Gen_MemoryRoutine9Seq(i, j); - MemFuncsSeq7[i][j] = Gen_MemoryRoutine7Seq(i, j); - } - } + SetCodePtr(0); for (int i = 0; i < 3; i++) { @@ -123,26 +229,26 @@ Compiler::Compiler() } /* - W0 - mode + W5 - mode W1 - reg num W3 - in/out value of reg */ { ReadBanked = GetRXPtr(); - ADD(X2, RCPU, X1, ArithOption(X1, ST_LSL, 2)); - CMP(W0, 0x11); + ADD(X2, RCPU, X1, ArithOption(X2, ST_LSL, 2)); + CMP(W5, 0x11); FixupBranch fiq = B(CC_EQ); SUBS(W1, W1, 13 - 8); - ADD(X2, RCPU, X1, ArithOption(X1, ST_LSL, 2)); + ADD(X2, RCPU, X1, ArithOption(X2, ST_LSL, 2)); FixupBranch notEverything = B(CC_LT); - CMP(W0, 0x12); + CMP(W5, 0x12); FixupBranch irq = B(CC_EQ); - CMP(W0, 0x13); + CMP(W5, 0x13); FixupBranch svc = B(CC_EQ); - CMP(W0, 0x17); + CMP(W5, 0x17); FixupBranch abt = B(CC_EQ); - CMP(W0, 0x1B); + CMP(W5, 0x1B); FixupBranch und = B(CC_EQ); SetJumpTarget(notEverything); RET(); @@ -166,19 +272,19 @@ Compiler::Compiler() { WriteBanked = GetRXPtr(); - ADD(X2, RCPU, X1, ArithOption(X1, ST_LSL, 2)); - CMP(W0, 0x11); + ADD(X2, RCPU, X1, ArithOption(X2, ST_LSL, 2)); + CMP(W5, 0x11); FixupBranch fiq = B(CC_EQ); SUBS(W1, W1, 13 - 8); - ADD(X2, RCPU, X1, ArithOption(X1, ST_LSL, 2)); + ADD(X2, RCPU, X1, ArithOption(X2, ST_LSL, 2)); FixupBranch notEverything = B(CC_LT); - CMP(W0, 0x12); + CMP(W5, 0x12); FixupBranch irq = B(CC_EQ); - CMP(W0, 0x13); + CMP(W5, 0x13); FixupBranch svc = B(CC_EQ); - CMP(W0, 0x17); + CMP(W5, 0x17); FixupBranch abt = B(CC_EQ); - CMP(W0, 0x1B); + CMP(W5, 0x1B); FixupBranch und = B(CC_EQ); SetJumpTarget(notEverything); MOVI2R(W4, 0); @@ -206,9 +312,71 @@ Compiler::Compiler() RET(); } - //FlushIcache(); + for (int num = 0; num < 2; num++) + { + for (int size = 0; size < 3; size++) + { + for (int reg = 0; reg < 8; reg++) + { + ARM64Reg rdMapped = (ARM64Reg)(W19 + reg); + PatchedStoreFuncs[num][size][reg] = GetRXPtr(); + if (num == 0) + { + MOV(X1, RCPU); + MOV(W2, rdMapped); + } + else + { + MOV(W1, rdMapped); + } + ABI_PushRegisters({30}); + switch ((8 << size) | num) + { + case 32: QuickCallFunction(X3, SlowWrite9); break; + case 33: QuickCallFunction(X3, SlowWrite7); break; + case 16: QuickCallFunction(X3, SlowWrite9); break; + case 17: QuickCallFunction(X3, SlowWrite7); break; + case 8: QuickCallFunction(X3, SlowWrite9); break; + case 9: QuickCallFunction(X3, SlowWrite7); break; + } + ABI_PopRegisters({30}); + RET(); + + for (int signextend = 0; signextend < 2; signextend++) + { + PatchedLoadFuncs[num][size][signextend][reg] = GetRXPtr(); + if (num == 0) + MOV(X1, RCPU); + ABI_PushRegisters({30}); + switch ((8 << size) | num) + { + case 32: QuickCallFunction(X3, SlowRead9); break; + case 33: QuickCallFunction(X3, SlowRead7); break; + case 16: QuickCallFunction(X3, SlowRead9); break; + case 17: QuickCallFunction(X3, SlowRead7); break; + case 8: QuickCallFunction(X3, SlowRead9); break; + case 9: QuickCallFunction(X3, SlowRead7); break; + } + ABI_PopRegisters({30}); + if (size == 32) + MOV(rdMapped, W0); + else if (signextend) + SBFX(rdMapped, W0, 0, 8 << size); + else + UBFX(rdMapped, W0, 0, 8 << size); + RET(); + } + } + } + } + + FlushIcache(); + + JitMemSecondarySize = 1024*1024*4; + + JitMemMainSize -= GetCodeOffset(); + JitMemMainSize -= JitMemSecondarySize; - JitMemUseableSize -= GetCodeOffset(); SetCodeBase((u8*)GetRWPtr(), (u8*)GetRXPtr()); } @@ -227,6 +395,16 @@ Compiler::~Compiler() #endif } +void Compiler::LoadCycles() +{ + LDR(INDEX_UNSIGNED, RCycles, RCPU, offsetof(ARM, Cycles)); +} + +void Compiler::SaveCycles() +{ + STR(INDEX_UNSIGNED, RCycles, RCPU, offsetof(ARM, Cycles)); +} + void Compiler::LoadReg(int reg, ARM64Reg nativeReg) { if (reg == 15) @@ -325,7 +503,7 @@ const Compiler::CompileFunc A_Comp[ARMInstrInfo::ak_Count] = // CMN F(ALUCmpOp), F(ALUCmpOp), F(ALUCmpOp), F(ALUCmpOp), F(ALUCmpOp), F(ALUCmpOp), F(ALUCmpOp), F(ALUCmpOp), F(ALUCmpOp), // Mul - F(Mul), F(Mul), F(Mul_Long), F(Mul_Long), F(Mul_Long), F(Mul_Long), NULL, NULL, NULL, NULL, NULL, + F(Mul), F(Mul), F(Mul_Long), F(Mul_Long), F(Mul_Long), F(Mul_Long), F(Mul_Short), F(Mul_Short), F(Mul_Short), F(Mul_Short), F(Mul_Short), // ARMv5 exclusives F(Clz), NULL, NULL, NULL, NULL, @@ -356,7 +534,7 @@ const Compiler::CompileFunc A_Comp[ARMInstrInfo::ak_Count] = // Branch F(BranchImm), F(BranchImm), F(BranchImm), F(BranchXchangeReg), F(BranchXchangeReg), // Special - NULL, NULL, NULL, NULL, NULL, NULL, NULL, + NULL, F(MSR), F(MSR), F(MRS), NULL, NULL, NULL, &Compiler::Nop }; #undef F @@ -404,29 +582,34 @@ bool Compiler::CanCompile(bool thumb, u16 kind) return (thumb ? T_Comp[kind] : A_Comp[kind]) != NULL; } -void Compiler::Comp_BranchSpecialBehaviour() +void Compiler::Comp_BranchSpecialBehaviour(bool taken) { - if (CurInstr.BranchFlags & branch_IdleBranch) + if (taken && CurInstr.BranchFlags & branch_IdleBranch) { MOVI2R(W0, 1); STRB(INDEX_UNSIGNED, W0, RCPU, offsetof(ARM, IdleLoop)); } - if (CurInstr.BranchFlags & branch_FollowCondNotTaken) + if ((CurInstr.BranchFlags & branch_FollowCondNotTaken && taken) + || (CurInstr.BranchFlags & branch_FollowCondTaken && !taken)) { - SaveCPSR(false); RegCache.PrepareExit(); - ADD(W0, RCycles, ConstantCycles); - ABI_PopRegisters(SavedRegs); - RET(); + + SUB(RCycles, RCycles, ConstantCycles); + QuickTailCall(X0, ARM_Ret); } } JitBlockEntry Compiler::CompileBlock(ARM* cpu, bool thumb, FetchedInstr instrs[], int instrsCount) { - if (JitMemUseableSize - GetCodeOffset() < 1024 * 16) + if (JitMemMainSize - GetCodeOffset() < 1024 * 16) + { + printf("JIT near memory full, resetting...\n"); + ResetBlockCache(); + } + if ((JitMemMainSize + JitMemSecondarySize) - OtherCodeRegion < 1024 * 8) { - printf("JIT memory full, resetting...\n"); + printf("JIT far memory full, resetting...\n"); ResetBlockCache(); } @@ -437,21 +620,7 @@ JitBlockEntry Compiler::CompileBlock(ARM* cpu, bool thumb, FetchedInstr instrs[] CurCPU = cpu; ConstantCycles = 0; RegCache = RegisterCache(this, instrs, instrsCount, true); - - //printf("compiling block at %x\n", R15 - (Thumb ? 2 : 4)); - const u32 ALL_CALLEE_SAVED = 0x7FF80000; - - SavedRegs = BitSet32((RegCache.GetPushRegs() | BitSet32(0x78000000)) & BitSet32(ALL_CALLEE_SAVED)); - - //if (Num == 1) - { - ABI_PushRegisters(SavedRegs); - - MOVP2R(RCPU, CurCPU); - MOVI2R(RCycles, 0); - - LoadCPSR(); - } + CPSRDirty = false; for (int i = 0; i < instrsCount; i++) { @@ -486,6 +655,7 @@ JitBlockEntry Compiler::CompileBlock(ARM* cpu, bool thumb, FetchedInstr instrs[] if (comp == NULL) { + SaveCycles(); SaveCPSR(); RegCache.Flush(); } @@ -535,25 +705,18 @@ JitBlockEntry Compiler::CompileBlock(ARM* cpu, bool thumb, FetchedInstr instrs[] (this->*comp)(); } - Comp_BranchSpecialBehaviour(); + Comp_BranchSpecialBehaviour(true); if (cond < 0xE) { - if (IrregularCycles) + if (IrregularCycles || (CurInstr.BranchFlags & branch_FollowCondTaken)) { FixupBranch skipNop = B(); SetJumpTarget(skipExecute); Comp_AddCycles_C(); - if (CurInstr.BranchFlags & branch_FollowCondTaken) - { - SaveCPSR(false); - RegCache.PrepareExit(); - ADD(W0, RCycles, ConstantCycles); - ABI_PopRegisters(SavedRegs); - RET(); - } + Comp_BranchSpecialBehaviour(false); SetJumpTarget(skipNop); } @@ -565,76 +728,74 @@ JitBlockEntry Compiler::CompileBlock(ARM* cpu, bool thumb, FetchedInstr instrs[] } if (comp == NULL) + { + LoadCycles(); LoadCPSR(); + } } RegCache.Flush(); - //if (Num == 1) - { - SaveCPSR(); - - ADD(W0, RCycles, ConstantCycles); - - ABI_PopRegisters(SavedRegs); - } - //else - // ADD(RCycles, RCycles, ConstantCycles); - - RET(); + SUB(RCycles, RCycles, ConstantCycles); + QuickTailCall(X0, ARM_Ret); FlushIcache(); - //printf("finished\n"); - return res; } void Compiler::Reset() { + LoadStorePatches.clear(); + SetCodePtr(0); + OtherCodeRegion = JitMemMainSize; const u32 brk_0 = 0xD4200000; - for (int i = 0; i < JitMemUseableSize / 4; i++) + for (int i = 0; i < (JitMemMainSize + JitMemSecondarySize) / 4; i++) *(((u32*)GetRWPtr()) + i) = brk_0; } -void Compiler::Comp_AddCycles_C(bool nonConst) +void Compiler::Comp_AddCycles_C(bool forceNonConstant) { s32 cycles = Num ? NDS::ARM7MemTimings[CurInstr.CodeCycles][Thumb ? 1 : 3] : ((R15 & 0x2) ? 0 : CurInstr.CodeCycles); - if (!nonConst && !CurInstr.Info.Branches()) + if (forceNonConstant) ConstantCycles += cycles; else - ADD(RCycles, RCycles, cycles); + SUB(RCycles, RCycles, cycles); } void Compiler::Comp_AddCycles_CI(u32 numI) { + IrregularCycles = true; + s32 cycles = (Num ? NDS::ARM7MemTimings[CurInstr.CodeCycles][Thumb ? 0 : 2] : ((R15 & 0x2) ? 0 : CurInstr.CodeCycles)) + numI; - if (Thumb || CurInstr.Cond() >= 0xE) + if (Thumb || CurInstr.Cond() == 0xE) ConstantCycles += cycles; else - ADD(RCycles, RCycles, cycles); + SUB(RCycles, RCycles, cycles); } void Compiler::Comp_AddCycles_CI(u32 c, ARM64Reg numI, ArithOption shift) { + IrregularCycles = true; + s32 cycles = (Num ? NDS::ARM7MemTimings[CurInstr.CodeCycles][Thumb ? 0 : 2] : ((R15 & 0x2) ? 0 : CurInstr.CodeCycles)) + c; - ADD(RCycles, RCycles, numI, shift); + SUB(RCycles, RCycles, cycles); if (Thumb || CurInstr.Cond() >= 0xE) - ConstantCycles += c; + ConstantCycles += cycles; else - ADD(RCycles, RCycles, cycles); + SUB(RCycles, RCycles, cycles); } void Compiler::Comp_AddCycles_CDI() @@ -671,7 +832,7 @@ void Compiler::Comp_AddCycles_CDI() } if (!Thumb && CurInstr.Cond() < 0xE) - ADD(RCycles, RCycles, cycles); + SUB(RCycles, RCycles, cycles); else ConstantCycles += cycles; } @@ -715,7 +876,7 @@ void Compiler::Comp_AddCycles_CD() } if ((!Thumb && CurInstr.Cond() < 0xE) && IrregularCycles) - ADD(RCycles, RCycles, cycles); + SUB(RCycles, RCycles, cycles); else ConstantCycles += cycles; } diff --git a/src/ARMJIT_A64/ARMJIT_Compiler.h b/src/ARMJIT_A64/ARMJIT_Compiler.h index 5c9ef41..e4ffc63 100644 --- a/src/ARMJIT_A64/ARMJIT_Compiler.h +++ b/src/ARMJIT_A64/ARMJIT_Compiler.h @@ -9,6 +9,8 @@ #include "../ARMJIT_Internal.h" #include "../ARMJIT_RegisterCache.h" +#include + namespace ARMJIT { @@ -64,7 +66,14 @@ struct Op2 }; }; -class Compiler : Arm64Gen::ARM64XEmitter +struct LoadStorePatch +{ + void* PatchFunc; + s32 PatchOffset; + u32 PatchSize; +}; + +class Compiler : public Arm64Gen::ARM64XEmitter { public: typedef void (Compiler::*CompileFunc)(); @@ -72,6 +81,9 @@ public: Compiler(); ~Compiler(); + void PushRegs(bool saveHiRegs); + void PopRegs(bool saveHiRegs); + Arm64Gen::ARM64Reg MapReg(int reg) { assert(RegCache.Mapping[reg] != Arm64Gen::INVALID_REG); @@ -89,7 +101,7 @@ public: void Reset(); - void Comp_AddCycles_C(bool forceNonConst = false); + void Comp_AddCycles_C(bool forceNonConstant = false); void Comp_AddCycles_CI(u32 numI); void Comp_AddCycles_CI(u32 c, Arm64Gen::ARM64Reg numI, Arm64Gen::ArithOption shift); void Comp_AddCycles_CD(); @@ -103,6 +115,9 @@ public: void LoadCPSR(); void SaveCPSR(bool markClean = true); + void LoadCycles(); + void SaveCycles(); + void Nop() {} void A_Comp_ALUTriOp(); @@ -111,6 +126,7 @@ public: void A_Comp_Mul(); void A_Comp_Mul_Long(); + void A_Comp_Mul_Short(); void A_Comp_Clz(); @@ -122,6 +138,8 @@ public: void A_Comp_BranchImm(); void A_Comp_BranchXchangeReg(); + void A_Comp_MRS(); + void A_Comp_MSR(); void T_Comp_ShiftImm(); void T_Comp_AddSub_(); @@ -168,7 +186,7 @@ public: void Comp_RegShiftImm(int op, int amount, bool S, Op2& op2, Arm64Gen::ARM64Reg tmp = Arm64Gen::W0); void Comp_RegShiftReg(int op, bool S, Op2& op2, Arm64Gen::ARM64Reg rs); - void Comp_MemLoadLiteral(int size, bool signExtend, int rd, u32 addr); + bool Comp_MemLoadLiteral(int size, bool signExtend, int rd, u32 addr); enum { memop_Writeback = 1 << 0, @@ -179,16 +197,33 @@ public: }; void Comp_MemAccess(int rd, int rn, Op2 offset, int size, int flags); - void* Gen_MemoryRoutine9(int size, bool store); - - void* Gen_MemoryRoutine9Seq(bool store, bool preinc); - void* Gen_MemoryRoutine7Seq(bool store, bool preinc); - // 0 = switch mode, 1 = stay arm, 2 = stay thumb void* Gen_JumpTo9(int kind); void* Gen_JumpTo7(int kind); - void Comp_BranchSpecialBehaviour(); + void Comp_BranchSpecialBehaviour(bool taken); + + JitBlockEntry AddEntryOffset(u32 offset) + { + return (JitBlockEntry)(GetRXBase() + offset); + } + + u32 SubEntryOffset(JitBlockEntry entry) + { + return (u8*)entry - GetRXBase(); + } + + bool IsJITFault(u64 pc); + s64 RewriteMemAccess(u64 pc); + + void SwapCodeRegion() + { + ptrdiff_t offset = GetCodeOffset(); + SetCodePtrUnsafe(OtherCodeRegion); + OtherCodeRegion = offset; + } + + ptrdiff_t OtherCodeRegion; bool Exit; @@ -202,22 +237,20 @@ public: BitSet32 SavedRegs; - u32 JitMemUseableSize; + u32 JitMemSecondarySize; + u32 JitMemMainSize; void* ReadBanked, *WriteBanked; - // [size][store] - void* MemFunc9[3][2]; - void* MemFunc7[3][2]; - - // [store][pre increment] - void* MemFuncsSeq9[2][2]; - // "[code in main ram] - void* MemFuncsSeq7[2][2]; - void* JumpToFuncs9[3]; void* JumpToFuncs7[3]; + std::unordered_map LoadStorePatches; + + // [Num][Size][Sign Extend][Output register] + void* PatchedLoadFuncs[2][3][2][8]; + void* PatchedStoreFuncs[2][3][8]; + RegisterCache RegCache; bool CPSRDirty = false; diff --git a/src/ARMJIT_A64/ARMJIT_Linkage.s b/src/ARMJIT_A64/ARMJIT_Linkage.s new file mode 100644 index 0000000..536a478 --- /dev/null +++ b/src/ARMJIT_A64/ARMJIT_Linkage.s @@ -0,0 +1,68 @@ +#include "../ARMJIT_x64/ARMJIT_Offsets.h" + +.text + +#define RCPSR W27 +#define RCycles W28 +#define RCPU X29 + +.p2align 4,,15 + +.global ARM_Dispatch +ARM_Dispatch: + stp x19, x20, [sp, #-96]! + stp x21, x22, [sp, #16] + stp x23, x24, [sp, #32] + stp x25, x26, [sp, #48] + stp x27, x28, [sp, #64] + stp x29, x30, [sp, #80] + + mov RCPU, x0 + ldr RCycles, [RCPU, ARM_Cycles_offset] + ldr RCPSR, [RCPU, ARM_CPSR_offset] + + br x1 + +.p2align 4,,15 + +.global ARM_Ret +ARM_Ret: + str RCycles, [RCPU, ARM_Cycles_offset] + str RCPSR, [RCPU, ARM_CPSR_offset] + + ldp x29, x30, [sp, #80] + ldp x27, x28, [sp, #64] + ldp x25, x26, [sp, #48] + ldp x23, x24, [sp, #32] + ldp x21, x22, [sp, #16] + ldp x19, x20, [sp], #96 + + ret + +.p2align 4,,15 + +.global ARM_RestoreContext +ARM_RestoreContext: + mov sp, x0 + + ldp x0, x1, [sp] + ldp x2, x3, [sp, #16] + ldp x4, x5, [sp, #32] + ldp x6, x7, [sp, #48] + ldp x8, x9, [sp, #64] + ldp x10, x11, [sp, #80] + ldp x12, x13, [sp, #96] + ldp x14, x15, [sp, #112] + ldp x16, x17, [sp, #128] + ldp x18, x19, [sp, #144] + ldp x20, x21, [sp, #160] + ldp x22, x23, [sp, #176] + ldp x24, x25, [sp, #192] + ldp x26, x27, [sp, #208] + ldp x28, x29, [sp, #224] + ldr x30, [sp, #240] + + ldp x17, x18, [sp, #248] + mov sp, x17 + + br x18 \ No newline at end of file diff --git a/src/ARMJIT_A64/ARMJIT_LoadStore.cpp b/src/ARMJIT_A64/ARMJIT_LoadStore.cpp index 6cf710b..b307d0e 100644 --- a/src/ARMJIT_A64/ARMJIT_LoadStore.cpp +++ b/src/ARMJIT_A64/ARMJIT_LoadStore.cpp @@ -2,286 +2,62 @@ #include "../Config.h" +#include "../ARMJIT_Memory.h" + using namespace Arm64Gen; namespace ARMJIT { -// W0 - address -// (if store) W1 - value to store -// W2 - code cycles -void* Compiler::Gen_MemoryRoutine9(int size, bool store) +bool Compiler::IsJITFault(u64 pc) { - AlignCode16(); - void* res = GetRXPtr(); - - u32 addressMask; - switch (size) - { - case 32: addressMask = ~3; break; - case 16: addressMask = ~1; break; - case 8: addressMask = ~0; break; - } - - LDR(INDEX_UNSIGNED, W3, RCPU, offsetof(ARMv5, DTCMBase)); - LDR(INDEX_UNSIGNED, W4, RCPU, offsetof(ARMv5, DTCMSize)); - SUB(W3, W0, W3); - CMP(W3, W4); - FixupBranch insideDTCM = B(CC_LO); - - UBFX(W4, W0, 24, 8); - CMP(W4, 0x02); - FixupBranch outsideMainRAM = B(CC_NEQ); - ANDI2R(W3, W0, addressMask & (MAIN_RAM_SIZE - 1)); - MOVP2R(X4, NDS::MainRAM); - if (!store && size == 32) - { - LDR(W3, X3, X4); - ANDI2R(W0, W0, 3); - LSL(W0, W0, 3); - RORV(W0, W3, W0); - } - else if (store) - STRGeneric(size, W1, X3, X4); - else - LDRGeneric(size, false, W0, X3, X4); - RET(); - - SetJumpTarget(outsideMainRAM); - - LDR(INDEX_UNSIGNED, W3, RCPU, offsetof(ARMv5, ITCMSize)); - CMP(W0, W3); - FixupBranch insideITCM = B(CC_LO); - - if (store) - { - if (size > 8) - ANDI2R(W0, W0, addressMask); - - switch (size) - { - case 32: QuickTailCall(X4, NDS::ARM9Write32); break; - case 16: QuickTailCall(X4, NDS::ARM9Write16); break; - case 8: QuickTailCall(X4, NDS::ARM9Write8); break; - } - } - else - { - if (size == 32) - ABI_PushRegisters({0, 30}); - if (size > 8) - ANDI2R(W0, W0, addressMask); - - switch (size) - { - case 32: QuickCallFunction(X4, NDS::ARM9Read32); break; - case 16: QuickTailCall (X4, NDS::ARM9Read16); break; - case 8: QuickTailCall (X4, NDS::ARM9Read8 ); break; - } - if (size == 32) - { - ABI_PopRegisters({1, 30}); - ANDI2R(W1, W1, 3); - LSL(W1, W1, 3); - RORV(W0, W0, W1); - RET(); - } - } - - SetJumpTarget(insideDTCM); - ANDI2R(W3, W3, 0x3FFF & addressMask); - ADDI2R(W3, W3, offsetof(ARMv5, DTCM), W4); - if (!store && size == 32) - { - ANDI2R(W4, W0, 3); - LDR(W0, RCPU, W3); - LSL(W4, W4, 3); - RORV(W0, W0, W4); - } - else if (store) - STRGeneric(size, W1, RCPU, W3); - else - LDRGeneric(size, false, W0, RCPU, W3); - - RET(); - - SetJumpTarget(insideITCM); - ANDI2R(W3, W0, 0x7FFF & addressMask); - if (store) - { - ADDI2R(W0, W3, ExeMemRegionOffsets[exeMem_ITCM], W4); - LSR(W5, W0, 9); - MOVP2R(X4, CodeRanges); - ADD(X4, X4, X5, ArithOption(X5, ST_LSL, 4)); - static_assert(sizeof(AddressRange) == 16); - LDRH(INDEX_UNSIGNED, W4, X4, offsetof(AddressRange, Blocks.Length)); - FixupBranch null = CBZ(W4); - ABI_PushRegisters({1, 3, 30}); - QuickCallFunction(X4, InvalidateByAddr); - ABI_PopRegisters({1, 3, 30}); - SetJumpTarget(null); - } - ADDI2R(W3, W3, offsetof(ARMv5, ITCM), W4); - if (!store && size == 32) - { - ANDI2R(W4, W0, 3); - LDR(W0, RCPU, W3); - LSL(W4, W4, 3); - RORV(W0, W0, W4); - } - else if (store) - STRGeneric(size, W1, RCPU, W3); - else - LDRGeneric(size, false, W0, RCPU, W3); - RET(); - - return res; + return pc >= (u64)GetRXBase() && pc - (u64)GetRXBase() < (JitMemMainSize + JitMemSecondarySize); } -/* - W0 - base address - X1 - stack space - W2 - values count -*/ -void* Compiler::Gen_MemoryRoutine9Seq(bool store, bool preinc) +s64 Compiler::RewriteMemAccess(u64 pc) { - AlignCode16(); - void* res = GetRXPtr(); - - void* loopStart = GetRXPtr(); - SUB(W2, W2, 1); - - if (preinc) - ADD(W0, W0, 4); + ptrdiff_t pcOffset = pc - (u64)GetRXBase(); - LDR(INDEX_UNSIGNED, W4, RCPU, offsetof(ARMv5, DTCMBase)); - LDR(INDEX_UNSIGNED, W5, RCPU, offsetof(ARMv5, DTCMSize)); - SUB(W4, W0, W4); - CMP(W4, W5); - FixupBranch insideDTCM = B(CC_LO); + auto it = LoadStorePatches.find(pcOffset); - LDR(INDEX_UNSIGNED, W4, RCPU, offsetof(ARMv5, ITCMSize)); - CMP(W0, W4); - FixupBranch insideITCM = B(CC_LO); - - ABI_PushRegisters({0, 1, 2, 30}); // TODO: move SP only once - if (store) + if (it != LoadStorePatches.end()) { - LDR(X1, X1, ArithOption(X2, true)); - QuickCallFunction(X4, NDS::ARM9Write32); + LoadStorePatch patch = it->second; - ABI_PopRegisters({0, 1, 2, 30}); - } - else - { - QuickCallFunction(X4, NDS::ARM9Read32); - MOV(W4, W0); + ptrdiff_t curCodeOffset = GetCodeOffset(); - ABI_PopRegisters({0, 1, 2, 30}); + SetCodePtrUnsafe(pcOffset + patch.PatchOffset); - STR(X4, X1, ArithOption(X2, true)); - } + BL(patch.PatchFunc); - if (!preinc) - ADD(W0, W0, 4); - CBNZ(W2, loopStart); - RET(); + for (int i = 0; i < patch.PatchSize / 4 - 1; i++) + HINT(HINT_NOP); - SetJumpTarget(insideDTCM); + FlushIcacheSection((u8*)pc + patch.PatchOffset, (u8*)GetRXPtr()); - ANDI2R(W4, W4, ~3 & 0x3FFF); - ADDI2R(X4, X4, offsetof(ARMv5, DTCM)); - if (store) - { - LDR(X5, X1, ArithOption(X2, true)); - STR(W5, RCPU, X4); - } - else - { - LDR(W5, RCPU, X4); - STR(X5, X1, ArithOption(X2, true)); - } + SetCodePtrUnsafe(curCodeOffset); - if (!preinc) - ADD(W0, W0, 4); - CBNZ(W2, loopStart); - RET(); - - SetJumpTarget(insideITCM); - - ANDI2R(W4, W0, ~3 & 0x7FFF); - - ADDI2R(W6, W4, offsetof(ARMv5, ITCM), W5); - if (store) - { - LDR(X5, X1, ArithOption(X2, true)); - STR(W5, RCPU, X6); - } - else - { - LDR(W5, RCPU, X6); - STR(X5, X1, ArithOption(X2, true)); - } + LoadStorePatches.erase(it); - if (store) - { - ADDI2R(W4, W4, ExeMemRegionOffsets[exeMem_ITCM], W5); - LSR(W6, W4, 9); - MOVP2R(X5, CodeRanges); - ADD(X5, X5, X6, ArithOption(X6, ST_LSL, 4)); - static_assert(sizeof(AddressRange) == 16); - LDRH(INDEX_UNSIGNED, W5, X5, offsetof(AddressRange, Blocks.Length)); - FixupBranch null = CBZ(W5); - ABI_PushRegisters({0, 1, 2, 4, 30}); - MOV(W0, W4); - QuickCallFunction(X5, InvalidateByAddr); - ABI_PopRegisters({0, 1, 2, 4, 30}); - SetJumpTarget(null); + return patch.PatchOffset; } - - if (!preinc) - ADD(W0, W0, 4); - CBNZ(W2, loopStart); - RET(); - return res; + printf("this is a JIT bug! %08x\n", __builtin_bswap32(*(u32*)pc)); + assert(false); } -void* Compiler::Gen_MemoryRoutine7Seq(bool store, bool preinc) +bool Compiler::Comp_MemLoadLiteral(int size, bool signExtend, int rd, u32 addr) { - AlignCode16(); - void* res = GetRXPtr(); + u32 localAddr = LocaliseCodeAddress(Num, addr); - void* loopStart = GetRXPtr(); - SUB(W2, W2, 1); - - if (preinc) - ADD(W0, W0, 4); - - ABI_PushRegisters({0, 1, 2, 30}); - if (store) + int invalidLiteralIdx = InvalidLiterals.Find(localAddr); + if (invalidLiteralIdx != -1) { - LDR(X1, X1, ArithOption(X2, true)); - QuickCallFunction(X4, NDS::ARM7Write32); - ABI_PopRegisters({0, 1, 2, 30}); + InvalidLiterals.Remove(invalidLiteralIdx); + return false; } - else - { - QuickCallFunction(X4, NDS::ARM7Read32); - MOV(W4, W0); - ABI_PopRegisters({0, 1, 2, 30}); - STR(X4, X1, ArithOption(X2, true)); - } - - if (!preinc) - ADD(W0, W0, 4); - CBNZ(W2, loopStart); - RET(); - return res; -} + Comp_AddCycles_CDI(); -void Compiler::Comp_MemLoadLiteral(int size, bool signExtend, int rd, u32 addr) -{ u32 val; // make sure arm7 bios is accessible u32 tmpR15 = CurCPU->R[15]; @@ -309,6 +85,8 @@ void Compiler::Comp_MemLoadLiteral(int size, bool signExtend, int rd, u32 addr) if (Thumb || CurInstr.Cond() == 0xE) RegCache.PutLiteral(rd, val); + + return true; } void Compiler::Comp_MemAccess(int rd, int rn, Op2 offset, int size, int flags) @@ -318,163 +96,209 @@ void Compiler::Comp_MemAccess(int rd, int rn, Op2 offset, int size, int flags) addressMask = ~3; if (size == 16) addressMask = ~1; + + if (Config::JIT_LiteralOptimisations && rn == 15 && rd != 15 && offset.IsImm && !(flags & (memop_Post|memop_Store|memop_Writeback))) + { + u32 addr = R15 + offset.Imm * ((flags & memop_SubtractOffset) ? -1 : 1); + + if (Comp_MemLoadLiteral(size, flags & memop_SignExtend, rd, addr)) + return; + } if (flags & memop_Store) Comp_AddCycles_CD(); else Comp_AddCycles_CDI(); - if (Config::JIT_LiteralOptimisations && rn == 15 && rd != 15 && offset.IsImm && !(flags & (memop_Post|memop_Store|memop_Writeback))) - { - u32 addr = R15 + offset.Imm * ((flags & memop_SubtractOffset) ? -1 : 1); - u32 translatedAddr = Num == 0 ? TranslateAddr<0>(addr) : TranslateAddr<1>(addr); + ARM64Reg rdMapped = MapReg(rd); + ARM64Reg rnMapped = MapReg(rn); - if (!(CodeRanges[translatedAddr / 512].InvalidLiterals & (1 << ((translatedAddr & 0x1FF) / 16)))) - { - Comp_MemLoadLiteral(size, flags & memop_SignExtend, rd, addr); - return; - } + if (Thumb && rn == 15) + { + ANDI2R(W3, rnMapped, ~2); + rnMapped = W3; } + ARM64Reg finalAddr = W0; + if (flags & memop_Post) { - ARM64Reg rdMapped = MapReg(rd); - ARM64Reg rnMapped = MapReg(rn); - - bool inlinePreparation = Num == 1; - u32 constLocalROR32 = 4; + finalAddr = rnMapped; + MOV(W0, rnMapped); + } - void* memFunc = Num == 0 - ? MemFunc9[size >> 4][!!(flags & memop_Store)] - : MemFunc7[size >> 4][!!((flags & memop_Store))]; + bool addrIsStatic = Config::JIT_LiteralOptimisations + && RegCache.IsLiteral(rn) && offset.IsImm && !(flags & (memop_Writeback|memop_Post)); + u32 staticAddress; + if (addrIsStatic) + staticAddress = RegCache.LiteralValues[rn] + offset.Imm * ((flags & memop_SubtractOffset) ? -1 : 1); - if (Config::JIT_LiteralOptimisations && (rd != 15 || (flags & memop_Store)) && offset.IsImm && RegCache.IsLiteral(rn)) + if (!offset.IsImm) + Comp_RegShiftImm(offset.Reg.ShiftType, offset.Reg.ShiftAmount, false, offset, W2); + // offset might has become an immediate + if (offset.IsImm) + { + if (offset.Imm) + { + if (flags & memop_SubtractOffset) + SUB(finalAddr, rnMapped, offset.Imm); + else + ADD(finalAddr, rnMapped, offset.Imm); + } + else if (finalAddr != rnMapped) + MOV(finalAddr, rnMapped); + } + else + { + if (offset.Reg.ShiftType == ST_ROR) { - u32 addr = RegCache.LiteralValues[rn] + offset.Imm * ((flags & memop_SubtractOffset) ? -1 : 1); + ROR_(W0, offset.Reg.Rm, offset.Reg.ShiftAmount); + offset = Op2(W0); + } - NDS::MemRegion region; - region.Mem = NULL; - if (Num == 0) - { - ARMv5* cpu5 = (ARMv5*)CurCPU; + if (flags & memop_SubtractOffset) + SUB(finalAddr, rnMapped, offset.Reg.Rm, offset.ToArithOption()); + else + ADD(finalAddr, rnMapped, offset.Reg.Rm, offset.ToArithOption()); + } - // stupid dtcm... - if (addr >= cpu5->DTCMBase && addr < (cpu5->DTCMBase + cpu5->DTCMSize)) - { - region.Mem = cpu5->DTCM; - region.Mask = 0x3FFF; - } - else - { - NDS::ARM9GetMemRegion(addr, flags & memop_Store, ®ion); - } - } - else - NDS::ARM7GetMemRegion(addr, flags & memop_Store, ®ion); + if (!(flags & memop_Post) && (flags & memop_Writeback)) + MOV(rnMapped, W0); - if (region.Mem != NULL) - { - void* ptr = ®ion.Mem[addr & addressMask & region.Mask]; + u32 expectedTarget = Num == 0 + ? ARMJIT_Memory::ClassifyAddress9(addrIsStatic ? staticAddress : CurInstr.DataRegion) + : ARMJIT_Memory::ClassifyAddress7(addrIsStatic ? staticAddress : CurInstr.DataRegion); - MOVP2R(X0, ptr); - if (flags & memop_Store) - STRGeneric(size, INDEX_UNSIGNED, rdMapped, X0, 0); - else - { - LDRGeneric(size, flags & memop_SignExtend, INDEX_UNSIGNED, rdMapped, X0, 0); - if (size == 32 && addr & ~0x3) - ROR_(rdMapped, rdMapped, (addr & 0x3) << 3); - } - return; - } + if (Config::JIT_FastMemory && ((!Thumb && CurInstr.Cond() != 0xE) || ARMJIT_Memory::IsMappable(expectedTarget))) + { + ptrdiff_t memopStart = GetCodeOffset(); + LoadStorePatch patch; - void* specialFunc = GetFuncForAddr(CurCPU, addr, flags & memop_Store, size); - if (specialFunc) - { - memFunc = specialFunc; - inlinePreparation = true; - constLocalROR32 = addr & 0x3; - } - } + patch.PatchFunc = flags & memop_Store + ? PatchedStoreFuncs[Num][__builtin_ctz(size) - 3][rdMapped - W19] + : PatchedLoadFuncs[Num][__builtin_ctz(size) - 3][!!(flags & memop_SignExtend)][rdMapped - W19]; + assert(rdMapped - W19 >= 0 && rdMapped - W19 < 8); - ARM64Reg finalAddr = W0; - if (flags & memop_Post) - { - finalAddr = rnMapped; - MOV(W0, rnMapped); - } + MOVP2R(X7, Num == 0 ? ARMJIT_Memory::FastMem9Start : ARMJIT_Memory::FastMem7Start); + // take a chance at fastmem + if (size > 8) + ANDI2R(W1, W0, addressMask); + + ptrdiff_t loadStorePosition = GetCodeOffset(); if (flags & memop_Store) - MOV(W1, rdMapped); - - if (!offset.IsImm) - Comp_RegShiftImm(offset.Reg.ShiftType, offset.Reg.ShiftAmount, false, offset, W2); - // offset might become an immediate - if (offset.IsImm) { - if (flags & memop_SubtractOffset) - SUB(finalAddr, rnMapped, offset.Imm); - else - ADD(finalAddr, rnMapped, offset.Imm); + STRGeneric(size, rdMapped, size > 8 ? X1 : X0, X7); } else { - if (offset.Reg.ShiftType == ST_ROR) + LDRGeneric(size, flags & memop_SignExtend, rdMapped, size > 8 ? X1 : X0, X7); + if (size == 32) { - ROR_(W0, offset.Reg.Rm, offset.Reg.ShiftAmount); - offset = Op2(W0); + UBFIZ(W0, W0, 3, 2); + RORV(rdMapped, rdMapped, W0); } - - if (flags & memop_SubtractOffset) - SUB(finalAddr, rnMapped, offset.Reg.Rm, offset.ToArithOption()); - else - ADD(finalAddr, rnMapped, offset.Reg.Rm, offset.ToArithOption()); } - if (!(flags & memop_Post) && (flags & memop_Writeback)) - MOV(rnMapped, W0); + patch.PatchOffset = memopStart - loadStorePosition; + patch.PatchSize = GetCodeOffset() - memopStart; + LoadStorePatches[loadStorePosition] = patch; + } + else + { + void* func = NULL; + if (addrIsStatic) + func = ARMJIT_Memory::GetFuncForAddr(CurCPU, staticAddress, flags & memop_Store, size); - if (inlinePreparation) + if (func) { - if (size == 32 && !(flags & memop_Store) && constLocalROR32 == 4) - ANDI2R(rdMapped, W0, 3); - if (size > 8) - ANDI2R(W0, W0, addressMask); + if (flags & memop_Store) + MOV(W1, rdMapped); + QuickCallFunction(X2, (void (*)())func); + + if (!(flags & memop_Store)) + { + if (size == 32) + { + if (staticAddress & 0x3) + ROR_(rdMapped, W0, (staticAddress & 0x3) << 3); + else + MOV(rdMapped, W0); + } + else + { + if (flags & memop_SignExtend) + SBFX(rdMapped, W0, 0, size); + else + UBFX(rdMapped, W0, 0, size); + } + } } - QuickCallFunction(X2, memFunc); - if (!(flags & memop_Store)) + else { - if (inlinePreparation && !(flags & memop_Store) && size == 32) + if (Num == 0) { - if (constLocalROR32 == 4) + MOV(X1, RCPU); + if (flags & memop_Store) { - LSL(rdMapped, rdMapped, 3); - RORV(rdMapped, W0, rdMapped); + MOV(W2, rdMapped); + switch (size) + { + case 32: QuickCallFunction(X3, SlowWrite9); break; + case 16: QuickCallFunction(X3, SlowWrite9); break; + case 8: QuickCallFunction(X3, SlowWrite9); break; + } } - else if (constLocalROR32 > 0) - ROR_(rdMapped, W0, constLocalROR32 << 3); else - MOV(rdMapped, W0); + { + switch (size) + { + case 32: QuickCallFunction(X3, SlowRead9); break; + case 16: QuickCallFunction(X3, SlowRead9); break; + case 8: QuickCallFunction(X3, SlowRead9); break; + } + } } - else if (flags & memop_SignExtend) + else { - if (size == 16) - SXTH(rdMapped, W0); - else if (size == 8) - SXTB(rdMapped, W0); + if (flags & memop_Store) + { + MOV(W1, rdMapped); + switch (size) + { + case 32: QuickCallFunction(X3, SlowWrite7); break; + case 16: QuickCallFunction(X3, SlowWrite7); break; + case 8: QuickCallFunction(X3, SlowWrite7); break; + } + } else - assert("What's wrong with you?"); + { + switch (size) + { + case 32: QuickCallFunction(X3, SlowRead7); break; + case 16: QuickCallFunction(X3, SlowRead7); break; + case 8: QuickCallFunction(X3, SlowRead7); break; + } + } } - else - MOV(rdMapped, W0); - - if (CurInstr.Info.Branches()) + + if (!(flags & memop_Store)) { - if (size < 32) - printf("LDR size < 32 branching?\n"); - Comp_JumpTo(rdMapped, Num == 0, false); + if (size == 32) + MOV(rdMapped, W0); + else if (flags & memop_SignExtend) + SBFX(rdMapped, W0, 0, size); + else + UBFX(rdMapped, W0, 0, size); } } } + + if (CurInstr.Info.Branches()) + { + if (size < 32) + printf("LDR size < 32 branching?\n"); + Comp_JumpTo(rdMapped, Num == 0, false); + } } void Compiler::A_Comp_MemWB() @@ -589,19 +413,11 @@ void Compiler::T_Comp_MemImmHalf() void Compiler::T_Comp_LoadPCRel() { - u32 addr = (R15 & ~0x2) + ((CurInstr.Instr & 0xFF) << 2); + u32 offset = ((CurInstr.Instr & 0xFF) << 2); + u32 addr = (R15 & ~0x2) + offset; - if (Config::JIT_LiteralOptimisations) - { - Comp_MemLoadLiteral(32, false, CurInstr.T_Reg(8), addr); - Comp_AddCycles_CDI(); - } - else - { - bool negative = addr < R15; - u32 abs = negative ? R15 - addr : addr - R15; - Comp_MemAccess(CurInstr.T_Reg(8), 15, Op2(abs), 32, negative ? memop_SubtractOffset : 0); - } + if (!Config::JIT_LiteralOptimisations || !Comp_MemLoadLiteral(32, false, CurInstr.T_Reg(8), addr)) + Comp_MemAccess(CurInstr.T_Reg(8), 15, Op2(offset), 32, 0); } void Compiler::T_Comp_MemSPRel() @@ -621,15 +437,138 @@ s32 Compiler::Comp_MemAccessBlock(int rn, BitSet16 regs, bool store, bool preinc if (regsCount == 0) return 0; // actually not the right behaviour TODO: fix me - SUB(SP, SP, ((regsCount + 1) & ~1) * 8); - if (store) + if (regsCount == 1 && !usermode && RegCache.LoadedRegs & (1 << *regs.begin())) { + int flags = 0; + if (store) + flags |= memop_Store; + if (decrement) + flags |= memop_SubtractOffset; + Op2 offset = preinc ? Op2(4) : Op2(0); + + Comp_MemAccess(*regs.begin(), rn, offset, 32, flags); + + return decrement ? -4 : 4; + } + + if (store) Comp_AddCycles_CD(); + else + Comp_AddCycles_CDI(); - if (usermode && (regs & BitSet16(0x7f00))) - UBFX(W0, RCPSR, 0, 5); + int expectedTarget = Num == 0 + ? ARMJIT_Memory::ClassifyAddress9(CurInstr.DataRegion) + : ARMJIT_Memory::ClassifyAddress7(CurInstr.DataRegion); + + bool compileFastPath = Config::JIT_FastMemory + && store && !usermode && (CurInstr.Cond() < 0xE || ARMJIT_Memory::IsMappable(expectedTarget)); + + if (decrement) + { + SUB(W0, MapReg(rn), regsCount * 4); + ANDI2R(W0, W0, ~3); + preinc ^= true; + } + else + { + ANDI2R(W0, MapReg(rn), ~3); + } + + LoadStorePatch patch; + if (compileFastPath) + { + ptrdiff_t fastPathStart = GetCodeOffset(); + ptrdiff_t firstLoadStoreOffset; + + bool firstLoadStore = true; + + MOVP2R(X1, Num == 0 ? ARMJIT_Memory::FastMem9Start : ARMJIT_Memory::FastMem7Start); + ADD(X1, X1, X0); + + u32 offset = preinc ? 4 : 0; + BitSet16::Iterator it = regs.begin(); + + if (regsCount & 1) + { + int reg = *it; + it++; + + ARM64Reg first = W3; + if (RegCache.LoadedRegs & (1 << reg)) + first = MapReg(reg); + else if (store) + LoadReg(reg, first); + + if (firstLoadStore) + { + firstLoadStoreOffset = GetCodeOffset(); + firstLoadStore = false; + } + + if (store) + STR(INDEX_UNSIGNED, first, X1, offset); + else + LDR(INDEX_UNSIGNED, first, X1, offset); + + if (!(RegCache.LoadedRegs & (1 << reg)) && !store) + SaveReg(reg, first); + + offset += 4; + } + + while (it != regs.end()) + { + int reg = *it; + it++; + int nextReg = *it; + it++; - int i = regsCount - 1; + ARM64Reg first = W3, second = W4; + if (RegCache.LoadedRegs & (1 << reg)) + first = MapReg(reg); + else if (store) + LoadReg(reg, first); + if (RegCache.LoadedRegs & (1 << nextReg)) + second = MapReg(nextReg); + else if (store) + LoadReg(nextReg, second); + + if (firstLoadStore) + { + firstLoadStoreOffset = GetCodeOffset(); + firstLoadStore = false; + } + + if (store) + STP(INDEX_SIGNED, first, second, X1, offset); + else + LDP(INDEX_SIGNED, first, second, X1, offset); + + if (!(RegCache.LoadedRegs & (1 << reg)) && !store) + SaveReg(reg, first); + if (!(RegCache.LoadedRegs & (1 << nextReg)) && !store) + SaveReg(nextReg, second); + + offset += 8; + } + + patch.PatchSize = GetCodeOffset() - fastPathStart; + patch.PatchOffset = fastPathStart - firstLoadStoreOffset; + SwapCodeRegion(); + patch.PatchFunc = GetRXPtr(); + + LoadStorePatches[firstLoadStoreOffset] = patch; + + ABI_PushRegisters({30}); + } + + int i = 0; + + SUB(SP, SP, ((regsCount + 1) & ~1) * 8); + if (store) + { + if (usermode && (regs & BitSet16(0x7f00))) + UBFX(W5, RCPSR, 0, 5); BitSet16::Iterator it = regs.begin(); while (it != regs.end()) @@ -641,7 +580,7 @@ s32 Compiler::Comp_MemAccessBlock(int rn, BitSet16 regs, bool store, bool preinc if (usermode && reg >= 8 && reg < 15) { - if (RegCache.Mapping[reg] != INVALID_REG) + if (RegCache.LoadedRegs & (1 << reg)) MOV(W3, MapReg(reg)); else LoadReg(reg, W3); @@ -651,55 +590,67 @@ s32 Compiler::Comp_MemAccessBlock(int rn, BitSet16 regs, bool store, bool preinc } else if (!usermode && nextReg != regs.end()) { - ARM64Reg first = W3; - ARM64Reg second = W4; + ARM64Reg first = W3, second = W4; - if (RegCache.Mapping[reg] != INVALID_REG) + if (RegCache.LoadedRegs & (1 << reg)) first = MapReg(reg); else LoadReg(reg, W3); - if (RegCache.Mapping[*nextReg] != INVALID_REG) + if (RegCache.LoadedRegs & (1 << *nextReg)) second = MapReg(*nextReg); else LoadReg(*nextReg, W4); - STP(INDEX_SIGNED, EncodeRegTo64(second), EncodeRegTo64(first), SP, i * 8 - 8); + STP(INDEX_SIGNED, EncodeRegTo64(first), EncodeRegTo64(second), SP, i * 8); - i--; + i++; it++; } - else if (RegCache.Mapping[reg] != INVALID_REG) + else if (RegCache.LoadedRegs & (1 << reg)) + { STR(INDEX_UNSIGNED, MapReg(reg), SP, i * 8); + } else { LoadReg(reg, W3); STR(INDEX_UNSIGNED, W3, SP, i * 8); } - i--; + i++; it++; } } - if (decrement) - { - SUB(W0, MapReg(rn), regsCount * 4); - preinc ^= true; - } - else - MOV(W0, MapReg(rn)); + ADD(X1, SP, 0); MOVI2R(W2, regsCount); - BL(Num ? MemFuncsSeq7[store][preinc] : MemFuncsSeq9[store][preinc]); + if (Num == 0) + { + MOV(X3, RCPU); + switch (preinc * 2 | store) + { + case 0: QuickCallFunction(X4, SlowBlockTransfer9); break; + case 1: QuickCallFunction(X4, SlowBlockTransfer9); break; + case 2: QuickCallFunction(X4, SlowBlockTransfer9); break; + case 3: QuickCallFunction(X4, SlowBlockTransfer9); break; + } + } + else + { + switch (preinc * 2 | store) + { + case 0: QuickCallFunction(X4, SlowBlockTransfer7); break; + case 1: QuickCallFunction(X4, SlowBlockTransfer7); break; + case 2: QuickCallFunction(X4, SlowBlockTransfer7); break; + case 3: QuickCallFunction(X4, SlowBlockTransfer7); break; + } + } if (!store) { - Comp_AddCycles_CDI(); - if (usermode && !regs[15] && (regs & BitSet16(0x7f00))) - UBFX(W0, RCPSR, 0, 5); + UBFX(W5, RCPSR, 0, 5); - int i = regsCount - 1; BitSet16::Iterator it = regs.begin(); while (it != regs.end()) { @@ -714,11 +665,8 @@ s32 Compiler::Comp_MemAccessBlock(int rn, BitSet16 regs, bool store, bool preinc MOVI2R(W1, reg - 8); BL(WriteBanked); FixupBranch alreadyWritten = CBNZ(W4); - if (RegCache.Mapping[reg] != INVALID_REG) - { + if (RegCache.LoadedRegs & (1 << reg)) MOV(MapReg(reg), W3); - RegCache.DirtyRegs |= 1 << reg; - } else SaveReg(reg, W3); SetJumpTarget(alreadyWritten); @@ -727,20 +675,12 @@ s32 Compiler::Comp_MemAccessBlock(int rn, BitSet16 regs, bool store, bool preinc { ARM64Reg first = W3, second = W4; - if (RegCache.Mapping[reg] != INVALID_REG) - { + if (RegCache.LoadedRegs & (1 << reg)) first = MapReg(reg); - if (reg != 15) - RegCache.DirtyRegs |= 1 << reg; - } - if (RegCache.Mapping[*nextReg] != INVALID_REG) - { + if (RegCache.LoadedRegs & (1 << *nextReg)) second = MapReg(*nextReg); - if (*nextReg != 15) - RegCache.DirtyRegs |= 1 << *nextReg; - } - LDP(INDEX_SIGNED, EncodeRegTo64(second), EncodeRegTo64(first), SP, i * 8 - 8); + LDP(INDEX_SIGNED, EncodeRegTo64(first), EncodeRegTo64(second), SP, i * 8); if (first == W3) SaveReg(reg, W3); @@ -748,15 +688,12 @@ s32 Compiler::Comp_MemAccessBlock(int rn, BitSet16 regs, bool store, bool preinc SaveReg(*nextReg, W4); it++; - i--; + i++; } - else if (RegCache.Mapping[reg] != INVALID_REG) + else if (RegCache.LoadedRegs & (1 << reg)) { ARM64Reg mapped = MapReg(reg); LDR(INDEX_UNSIGNED, mapped, SP, i * 8); - - if (reg != 15) - RegCache.DirtyRegs |= 1 << reg; } else { @@ -765,11 +702,20 @@ s32 Compiler::Comp_MemAccessBlock(int rn, BitSet16 regs, bool store, bool preinc } it++; - i--; + i++; } } ADD(SP, SP, ((regsCount + 1) & ~1) * 8); + if (compileFastPath) + { + ABI_PopRegisters({30}); + RET(); + + FlushIcacheSection((u8*)patch.PatchFunc, (u8*)GetRXPtr()); + SwapCodeRegion(); + } + if (!store && regs[15]) { ARM64Reg mapped = MapReg(15); diff --git a/src/ARMJIT_Compiler.h b/src/ARMJIT_Compiler.h new file mode 100644 index 0000000..513c103 --- /dev/null +++ b/src/ARMJIT_Compiler.h @@ -0,0 +1,12 @@ +#if defined(__x86_64__) +#include "ARMJIT_x64/ARMJIT_Compiler.h" +#elif defined(__aarch64__) +#include "ARMJIT_A64/ARMJIT_Compiler.h" +#else +#error "The current target platform doesn't have a JIT backend" +#endif + +namespace ARMJIT +{ +extern Compiler* JITCompiler; +} \ No newline at end of file diff --git a/src/ARMJIT_Internal.h b/src/ARMJIT_Internal.h index 4e45760..19684c4 100644 --- a/src/ARMJIT_Internal.h +++ b/src/ARMJIT_Internal.h @@ -3,8 +3,11 @@ #include "types.h" #include +#include +#include #include "ARMJIT.h" +#include "ARMJIT_Memory.h" // here lands everything which doesn't fit into ARMJIT.h // where it would be included by pretty much everything @@ -160,8 +163,8 @@ public: Data.SetLength(numAddresses * 2 + numLiterals); } - u32 PseudoPhysicalAddr; - + u32 StartAddr; + u32 StartAddrLocal; u32 InstrHash, LiteralHash; u8 Num; u16 NumAddresses; @@ -175,28 +178,8 @@ public: { return &Data[NumAddresses]; } u32* Literals() { return &Data[NumAddresses * 2]; } - u32* Links() - { return &Data[NumAddresses * 2 + NumLiterals]; } - - u32 NumLinks() - { return Data.Length - NumAddresses * 2 - NumLiterals; } - - void AddLink(u32 link) - { - Data.Add(link); - } - - void ResetLinks() - { - Data.SetLength(NumAddresses * 2 + NumLiterals); - } private: - /* - 0.. Data; }; @@ -207,45 +190,32 @@ struct __attribute__((packed)) AddressRange u32 Code; }; -extern AddressRange CodeRanges[ExeMemSpaceSize / 512]; typedef void (*InterpreterFunc)(ARM* cpu); extern InterpreterFunc InterpretARM[]; extern InterpreterFunc InterpretTHUMB[]; -extern u8 MemoryStatus9[0x800000]; -extern u8 MemoryStatus7[0x800000]; - extern TinyVector InvalidLiterals; -void* GetFuncForAddr(ARM* cpu, u32 addr, bool store, int size); - -template -void LinkBlock(ARM* cpu, u32 codeOffset); +extern AddressRange* const CodeMemRegions[ARMJIT_Memory::memregions_Count]; -enum +inline bool PageContainsCode(AddressRange* range) { - memregion_Other = 0, - memregion_ITCM, - memregion_DTCM, - memregion_BIOS9, - memregion_MainRAM, - memregion_SWRAM9, - memregion_SWRAM7, - memregion_IO9, - memregion_VRAM, - memregion_BIOS7, - memregion_WRAM7, - memregion_IO7, - memregion_Wifi, - memregion_VWRAM, -}; + for (int i = 0; i < 8; i++) + { + if (range[i].Blocks.Length > 0) + return true; + } + return false; +} + +u32 LocaliseCodeAddress(u32 num, u32 addr); -int ClassifyAddress9(u32 addr); -int ClassifyAddress7(u32 addr); +template +void LinkBlock(ARM* cpu, u32 codeOffset); -template T SlowRead9(ARMv5* cpu, u32 addr); -template void SlowWrite9(ARMv5* cpu, u32 addr, T val); +template T SlowRead9(u32 addr, ARMv5* cpu); +template void SlowWrite9(u32 addr, ARMv5* cpu, T val); template T SlowRead7(u32 addr); template void SlowWrite7(u32 addr, T val); diff --git a/src/ARMJIT_Memory.cpp b/src/ARMJIT_Memory.cpp new file mode 100644 index 0000000..162827d --- /dev/null +++ b/src/ARMJIT_Memory.cpp @@ -0,0 +1,822 @@ +#ifdef __SWITCH__ +#include "switch/compat_switch.h" +#endif + +#include "ARMJIT_Memory.h" + +#include "ARMJIT_Internal.h" +#include "ARMJIT_Compiler.h" + +#include "GPU.h" +#include "GPU3D.h" +#include "Wifi.h" +#include "NDSCart.h" +#include "SPU.h" + +#include + +/* + We're handling fastmem here. + + Basically we're repurposing a big piece of virtual memory + and map the memory regions as they're structured on the DS + in it. + + On most systems you have a single piece of main ram, + maybe some video ram and faster cache RAM and that's about it. + Here we have not only a lot more different memory regions, + but also two address spaces. Not only that but they all have + mirrors (the worst case is 16kb SWRAM which is mirrored 1024x). + + We handle this by only mapping those regions which are actually + used and by praying the games don't go wild. + + Beware, this file is full of platform specific code. + +*/ + +namespace ARMJIT_Memory +{ +#ifdef __aarch64__ +struct FaultDescription +{ + u64 IntegerRegisters[33]; + u64 FaultAddr; + + u32 GetEmulatedAddr() + { + // now this is podracing + return (u32)IntegerRegisters[0]; + } + u64 RealAddr() + { + return FaultAddr; + } + + u64 GetPC() + { + return IntegerRegisters[32]; + } + + void RestoreAndRepeat(s64 offset); +}; +#else +struct FaultDescription +{ + u64 GetPC() + { + return 0; + } + + u32 GetEmulatedAddr() + { + return 0; + } + u64 RealAddr() + { + return 0; + } + + void RestoreAndRepeat(s64 offset); +}; +#endif + +void FaultHandler(FaultDescription* faultDesc); +} + + +#ifdef __aarch64__ + +extern "C" void ARM_RestoreContext(u64* registers) __attribute__((noreturn)); + +#endif + +#ifdef __SWITCH__ +// with LTO the symbols seem to be not properly overriden +// if they're somewhere else + +extern "C" +{ +extern char __start__; +extern char __rodata_start; + +alignas(16) u8 __nx_exception_stack[0x8000]; +u64 __nx_exception_stack_size = 0x8000; + +void __libnx_exception_handler(ThreadExceptionDump* ctx) +{ + ARMJIT_Memory::FaultDescription desc; + memcpy(desc.IntegerRegisters, &ctx->cpu_gprs[0].x, 8*29); + desc.IntegerRegisters[29] = ctx->fp.x; + desc.IntegerRegisters[30] = ctx->lr.x; + desc.IntegerRegisters[31] = ctx->sp.x; + desc.IntegerRegisters[32] = ctx->pc.x; + + ARMJIT_Memory::FaultHandler(&desc); + + if (ctx->pc.x >= (u64)&__start__ && ctx->pc.x < (u64)&__rodata_start) + { + printf("non JIT fault in .text at 0x%x (type %d) (trying to access 0x%x?)\n", + ctx->pc.x - (u64)&__start__, ctx->error_desc, ctx->far.x); + } + else + { + printf("non JIT fault somewhere in deep (address) space at %x (type %d)\n", ctx->pc.x, ctx->error_desc); + } +} + +} +#endif + +namespace ARMJIT_Memory +{ + +#ifdef __aarch64__ +void FaultDescription::RestoreAndRepeat(s64 offset) +{ + IntegerRegisters[32] += offset; + + ARM_RestoreContext(IntegerRegisters); +} +#else +void FaultDescription::RestoreAndRepeat(s64 offset) +{ + +} +#endif + +void* FastMem9Start, *FastMem7Start; + +const u32 MemoryTotalSize = + NDS::MainRAMSize + + NDS::SharedWRAMSize + + NDS::ARM7WRAMSize + + DTCMPhysicalSize; + +const u32 MemBlockMainRAMOffset = 0; +const u32 MemBlockSWRAMOffset = NDS::MainRAMSize; +const u32 MemBlockARM7WRAMOffset = NDS::MainRAMSize + NDS::SharedWRAMSize; +const u32 MemBlockDTCMOffset = NDS::MainRAMSize + NDS::SharedWRAMSize + NDS::ARM7WRAMSize; + +const u32 OffsetsPerRegion[memregions_Count] = +{ + UINT32_MAX, + UINT32_MAX, + MemBlockDTCMOffset, + UINT32_MAX, + MemBlockMainRAMOffset, + MemBlockSWRAMOffset, + UINT32_MAX, + UINT32_MAX, + UINT32_MAX, + MemBlockARM7WRAMOffset, + UINT32_MAX, + UINT32_MAX, + UINT32_MAX, +}; + +enum +{ + memstate_Unmapped, + memstate_MappedRW, + // on switch this is unmapped as well + memstate_MappedProtected, +}; + +u8 MappingStatus9[1 << (32-12)]; +u8 MappingStatus7[1 << (32-12)]; + +#ifdef __SWITCH__ +u8* MemoryBase; +u8* MemoryBaseCodeMem; +#else +u8* MemoryBase; +#endif + +bool MapIntoRange(u32 addr, u32 num, u32 offset, u32 size) +{ + u8* dst = (u8*)(num == 0 ? FastMem9Start : FastMem7Start) + addr; +#ifdef __SWITCH__ + Result r = (svcMapProcessMemory(dst, envGetOwnProcessHandle(), + (u64)(MemoryBaseCodeMem + offset), size)); + return R_SUCCEEDED(r); +#endif +} + +bool UnmapFromRange(u32 addr, u32 num, u32 offset, u32 size) +{ + u8* dst = (u8*)(num == 0 ? FastMem9Start : FastMem7Start) + addr; +#ifdef __SWITCH__ + Result r = svcUnmapProcessMemory(dst, envGetOwnProcessHandle(), + (u64)(MemoryBaseCodeMem + offset), size); + printf("%x\n", r); + return R_SUCCEEDED(r); +#endif +} + +struct Mapping +{ + u32 Addr; + u32 Size, LocalOffset; + u32 Num; + + void Unmap(int region) + { + bool skipDTCM = Num == 0 && region != memregion_DTCM; + u8* statuses = Num == 0 ? MappingStatus9 : MappingStatus7; + u32 offset = 0; + while (offset < Size) + { + if (skipDTCM && Addr + offset == NDS::ARM9->DTCMBase) + { + offset += NDS::ARM9->DTCMSize; + printf("%x skip\n", NDS::ARM9->DTCMSize); + } + else + { + u32 segmentOffset = offset; + u8 status = statuses[(Addr + offset) >> 12]; + while (statuses[(Addr + offset) >> 12] == status + && offset < Size + && (!skipDTCM || Addr + offset != NDS::ARM9->DTCMBase)) + { + assert(statuses[(Addr + offset) >> 12] != memstate_Unmapped); + statuses[(Addr + offset) >> 12] = memstate_Unmapped; + offset += 0x1000; + } + + if (status == memstate_MappedRW) + { + u32 segmentSize = offset - segmentOffset; + printf("unmapping %x %x %x %x\n", Addr + segmentOffset, Num, segmentOffset + LocalOffset + OffsetsPerRegion[region], segmentSize); + bool success = UnmapFromRange(Addr + segmentOffset, Num, segmentOffset + LocalOffset + OffsetsPerRegion[region], segmentSize); + assert(success); + } + } + } + } +}; +ARMJIT::TinyVector Mappings[memregions_Count]; + +void SetCodeProtection(int region, u32 offset, bool protect) +{ + offset &= ~0xFFF; + printf("set code protection %d %x %d\n", region, offset, protect); + + for (int i = 0; i < Mappings[region].Length; i++) + { + Mapping& mapping = Mappings[region][i]; + + u32 effectiveAddr = mapping.Addr + (offset - mapping.LocalOffset); + if (mapping.Num == 0 + && region != memregion_DTCM + && effectiveAddr >= NDS::ARM9->DTCMBase + && effectiveAddr < (NDS::ARM9->DTCMBase + NDS::ARM9->DTCMSize)) + continue; + + u8* states = (u8*)(mapping.Num == 0 ? MappingStatus9 : MappingStatus7); + + printf("%d %x %d\n", states[effectiveAddr >> 12], effectiveAddr, mapping.Num); + assert(states[effectiveAddr >> 12] == (protect ? memstate_MappedRW : memstate_MappedProtected)); + states[effectiveAddr >> 12] = protect ? memstate_MappedProtected : memstate_MappedRW; + + bool success; + if (protect) + success = UnmapFromRange(effectiveAddr, mapping.Num, OffsetsPerRegion[region] + offset, 0x1000); + else + success = MapIntoRange(effectiveAddr, mapping.Num, OffsetsPerRegion[region] + offset, 0x1000); + assert(success); + } +} + +void RemapDTCM(u32 newBase, u32 newSize) +{ + // this first part could be made more efficient + // by unmapping DTCM first and then map the holes + u32 oldDTCMBase = NDS::ARM9->DTCMBase; + u32 oldDTCBEnd = oldDTCMBase + NDS::ARM9->DTCMSize; + + u32 newEnd = newBase + newSize; + + printf("remapping DTCM %x %x %x %x\n", newBase, newEnd, oldDTCMBase, oldDTCBEnd); + // unmap all regions containing the old or the current DTCM mapping + for (int region = 0; region < memregions_Count; region++) + { + if (region == memregion_DTCM) + continue; + + for (int i = 0; i < Mappings[region].Length;) + { + Mapping& mapping = Mappings[region][i]; + + u32 start = mapping.Addr; + u32 end = mapping.Addr + mapping.Size; + + printf("mapping %d %x %x %x %x\n", region, mapping.Addr, mapping.Size, mapping.Num, mapping.LocalOffset); + + bool oldOverlap = NDS::ARM9->DTCMSize > 0 && ((oldDTCMBase >= start && oldDTCMBase < end) || (oldDTCBEnd >= start && oldDTCBEnd < end)); + bool newOverlap = newSize > 0 && ((newBase >= start && newBase < end) || (newEnd >= start && newEnd < end)); + + if (mapping.Num == 0 && (oldOverlap || newOverlap)) + { + mapping.Unmap(region); + Mappings[region].Remove(i); + } + else + { + i++; + } + } + } + + for (int i = 0; i < Mappings[memregion_DTCM].Length; i++) + { + Mappings[memregion_DTCM][i].Unmap(memregion_DTCM); + } + Mappings[memregion_DTCM].Clear(); +} + +void RemapSWRAM() +{ + printf("remapping SWRAM\n"); + for (int i = 0; i < Mappings[memregion_SWRAM].Length; i++) + { + Mappings[memregion_SWRAM][i].Unmap(memregion_SWRAM); + } + Mappings[memregion_SWRAM].Clear(); + for (int i = 0; i < Mappings[memregion_WRAM7].Length; i++) + { + Mappings[memregion_WRAM7][i].Unmap(memregion_WRAM7); + } + Mappings[memregion_WRAM7].Clear(); +} + +bool MapAtAddress(u32 addr) +{ + u32 num = NDS::CurCPU; + + int region = num == 0 + ? ClassifyAddress9(addr) + : ClassifyAddress7(addr); + + if (!IsMappable(region)) + return false; + + u32 mappingStart, mappingSize, memoryOffset, memorySize; + bool isMapped = GetRegionMapping(region, num, mappingStart, mappingSize, memoryOffset, memorySize); + + if (!isMapped) + return false; + + // this calculation even works with DTCM + // which doesn't have to be aligned to it's own size + u32 mirrorStart = (addr - mappingStart) / memorySize * memorySize + mappingStart; + + u8* states = num == 0 ? MappingStatus9 : MappingStatus7; + printf("trying to create mapping %08x %d %x %d %x\n", addr, num, memorySize, region, memoryOffset); + bool isExecutable = ARMJIT::CodeMemRegions[region]; + + ARMJIT::AddressRange* range = ARMJIT::CodeMemRegions[region] + memoryOffset; + + // this overcomplicated piece of code basically just finds whole pieces of code memory + // which can be mapped + u32 offset = 0; + bool skipDTCM = num == 0 && region != memregion_DTCM; + while (offset < memorySize) + { + if (skipDTCM && mirrorStart + offset == NDS::ARM9->DTCMBase) + { + offset += NDS::ARM9->DTCMSize; + } + else + { + u32 sectionOffset = offset; + bool hasCode = isExecutable && ARMJIT::PageContainsCode(&range[offset / 512]); + while ((!isExecutable || ARMJIT::PageContainsCode(&range[offset / 512]) == hasCode) + && offset < memorySize + && (!skipDTCM || mirrorStart + offset != NDS::ARM9->DTCMBase)) + { + assert(states[(mirrorStart + offset) >> 12] == memstate_Unmapped); + states[(mirrorStart + offset) >> 12] = hasCode ? memstate_MappedProtected : memstate_MappedRW; + offset += 0x1000; + } + + u32 sectionSize = offset - sectionOffset; + + if (!hasCode) + { + printf("trying to map %x (size: %x) from %x\n", mirrorStart + sectionOffset, sectionSize, sectionOffset + memoryOffset + OffsetsPerRegion[region]); + bool succeded = MapIntoRange(mirrorStart + sectionOffset, num, sectionOffset + memoryOffset + OffsetsPerRegion[region], sectionSize); + assert(succeded); + } + } + } + + Mapping mapping{mirrorStart, memorySize, memoryOffset, num}; + Mappings[region].Add(mapping); + + printf("mapped mirror at %08x-%08x\n", mirrorStart, mirrorStart + memorySize - 1); + + return true; +} + +void FaultHandler(FaultDescription* faultDesc) +{ + if (ARMJIT::JITCompiler->IsJITFault(faultDesc->GetPC())) + { + bool rewriteToSlowPath = true; + + u32 addr = faultDesc->GetEmulatedAddr(); + + if ((NDS::CurCPU == 0 ? MappingStatus9 : MappingStatus7)[addr >> 12] == memstate_Unmapped) + rewriteToSlowPath = !MapAtAddress(faultDesc->GetEmulatedAddr()); + + s64 offset = 0; + if (rewriteToSlowPath) + { + offset = ARMJIT::JITCompiler->RewriteMemAccess(faultDesc->GetPC()); + } + faultDesc->RestoreAndRepeat(offset); + } +} + +void Init() +{ +#if defined(__SWITCH__) + MemoryBase = (u8*)memalign(0x1000, MemoryTotalSize); + MemoryBaseCodeMem = (u8*)virtmemReserve(MemoryTotalSize); + + bool succeded = R_SUCCEEDED(svcMapProcessCodeMemory(envGetOwnProcessHandle(), (u64)MemoryBaseCodeMem, + (u64)MemoryBase, MemoryTotalSize)); + assert(succeded); + succeded = R_SUCCEEDED(svcSetProcessMemoryPermission(envGetOwnProcessHandle(), (u64)MemoryBaseCodeMem, + MemoryTotalSize, Perm_Rw)); + assert(succeded); + + // 8 GB of address space, just don't ask... + FastMem9Start = virtmemReserve(0x100000000); + assert(FastMem9Start); + FastMem7Start = virtmemReserve(0x100000000); + assert(FastMem7Start); + + NDS::MainRAM = MemoryBaseCodeMem + MemBlockMainRAMOffset; + NDS::SharedWRAM = MemoryBaseCodeMem + MemBlockSWRAMOffset; + NDS::ARM7WRAM = MemoryBaseCodeMem + MemBlockARM7WRAMOffset; + NDS::ARM9->DTCM = MemoryBaseCodeMem + MemBlockDTCMOffset; +#else + MemoryBase = new u8[MemoryTotalSize]; + + NDS::MainRAM = MemoryBase + MemBlockMainRAMOffset; + NDS::SharedWRAM = MemoryBase + MemBlockSWRAMOffset; + NDS::ARM7WRAM = MemoryBase + MemBlockARM7WRAMOffset; + NDS::ARM9->DTCM = MemoryBase + MemBlockDTCMOffset; +#endif +} + +void DeInit() +{ +#if defined(__SWITCH__) + virtmemFree(FastMem9Start, 0x100000000); + virtmemFree(FastMem7Start, 0x100000000); + + svcUnmapProcessCodeMemory(envGetOwnProcessHandle(), (u64)MemoryBaseCodeMem, (u64)MemoryBase, MemoryTotalSize); + virtmemFree(MemoryBaseCodeMem, MemoryTotalSize); + free(MemoryBase); +#else + delete[] MemoryBase; +#endif +} + +void Reset() +{ + for (int region = 0; region < memregions_Count; region++) + { + for (int i = 0; i < Mappings[region].Length; i++) + Mappings[region][i].Unmap(region); + Mappings[region].Clear(); + } + + for (int i = 0; i < sizeof(MappingStatus9); i++) + { + assert(MappingStatus9[i] == memstate_Unmapped); + assert(MappingStatus7[i] == memstate_Unmapped); + } + + printf("done resetting jit mem\n"); +} + +bool IsMappable(int region) +{ + return OffsetsPerRegion[region] != UINT32_MAX; +} + +bool GetRegionMapping(int region, u32 num, u32& mappingStart, u32& mappingSize, u32& memoryOffset, u32& memorySize) +{ + memoryOffset = 0; + switch (region) + { + case memregion_ITCM: + if (num == 0) + { + mappingStart = 0; + mappingSize = NDS::ARM9->ITCMSize; + memorySize = ITCMPhysicalSize; + return true; + } + return false; + case memregion_DTCM: + if (num == 0) + { + mappingStart = NDS::ARM9->DTCMBase; + mappingSize = NDS::ARM9->DTCMSize; + memorySize = DTCMPhysicalSize; + return true; + } + return false; + case memregion_BIOS9: + if (num == 0) + { + mappingStart = 0xFFFF0000; + mappingSize = 0x10000; + memorySize = 0x1000; + return true; + } + return false; + case memregion_MainRAM: + mappingStart = 0x2000000; + mappingSize = 0x1000000; + memorySize = NDS::MainRAMSize; + return true; + case memregion_SWRAM: + mappingStart = 0x3000000; + if (num == 0 && NDS::SWRAM_ARM9.Mem) + { + mappingSize = 0x1000000; + memoryOffset = NDS::SWRAM_ARM9.Mem - NDS::SharedWRAM; + memorySize = NDS::SWRAM_ARM9.Mask + 1; + return true; + } + else if (num == 1 && NDS::SWRAM_ARM7.Mem) + { + mappingSize = 0x800000; + memoryOffset = NDS::SWRAM_ARM7.Mem - NDS::SharedWRAM; + memorySize = NDS::SWRAM_ARM7.Mask + 1; + return true; + } + return false; + case memregion_VRAM: + if (num == 0) + { + // this is a gross simplification + // mostly to make code on vram working + // it doesn't take any of the actual VRAM mappings into account + mappingStart = 0x6000000; + mappingSize = 0x1000000; + memorySize = 0x100000; + return true; + } + return false; + case memregion_BIOS7: + if (num == 1) + { + mappingStart = 0; + mappingSize = 0x4000; + memorySize = 0x4000; + return true; + } + return false; + case memregion_WRAM7: + if (num == 1) + { + if (NDS::SWRAM_ARM7.Mem) + { + mappingStart = 0x3800000; + mappingSize = 0x800000; + } + else + { + mappingStart = 0x3000000; + mappingSize = 0x1000000; + } + memorySize = NDS::ARM7WRAMSize; + return true; + } + return false; + case memregion_VWRAM: + if (num == 1) + { + mappingStart = 0x6000000; + mappingSize = 0x1000000; + memorySize = 0x20000; + return true; + } + return false; + default: + // for the JIT we don't are about the rest + return false; + } +} + +int ClassifyAddress9(u32 addr) +{ + if (addr < NDS::ARM9->ITCMSize) + return memregion_ITCM; + else if (addr >= NDS::ARM9->DTCMBase && addr < (NDS::ARM9->DTCMBase + NDS::ARM9->DTCMSize)) + return memregion_DTCM; + else if ((addr & 0xFFFFF000) == 0xFFFF0000) + return memregion_BIOS9; + else + { + switch (addr & 0xFF000000) + { + case 0x02000000: + return memregion_MainRAM; + case 0x03000000: + if (NDS::SWRAM_ARM9.Mem) + return memregion_SWRAM; + else + return memregion_Other; + case 0x04000000: + return memregion_IO9; + case 0x06000000: + return memregion_VRAM; + } + } + return memregion_Other; +} + +int ClassifyAddress7(u32 addr) +{ + if (addr < 0x00004000) + return memregion_BIOS7; + else + { + switch (addr & 0xFF800000) + { + case 0x02000000: + case 0x02800000: + return memregion_MainRAM; + case 0x03000000: + if (NDS::SWRAM_ARM7.Mem) + return memregion_SWRAM; + else + return memregion_WRAM7; + case 0x03800000: + return memregion_WRAM7; + case 0x04000000: + return memregion_IO7; + case 0x04800000: + return memregion_Wifi; + case 0x06000000: + case 0x06800000: + return memregion_VWRAM; + } + } + return memregion_Other; +} + +void WifiWrite32(u32 addr, u32 val) +{ + Wifi::Write(addr, val & 0xFFFF); + Wifi::Write(addr + 2, val >> 16); +} + +u32 WifiRead32(u32 addr) +{ + return Wifi::Read(addr) | (Wifi::Read(addr + 2) << 16); +} + +template +void VRAMWrite(u32 addr, T val) +{ + switch (addr & 0x00E00000) + { + case 0x00000000: GPU::WriteVRAM_ABG(addr, val); return; + case 0x00200000: GPU::WriteVRAM_BBG(addr, val); return; + case 0x00400000: GPU::WriteVRAM_AOBJ(addr, val); return; + case 0x00600000: GPU::WriteVRAM_BOBJ(addr, val); return; + default: GPU::WriteVRAM_LCDC(addr, val); return; + } +} +template +T VRAMRead(u32 addr) +{ + switch (addr & 0x00E00000) + { + case 0x00000000: return GPU::ReadVRAM_ABG(addr); + case 0x00200000: return GPU::ReadVRAM_BBG(addr); + case 0x00400000: return GPU::ReadVRAM_AOBJ(addr); + case 0x00600000: return GPU::ReadVRAM_BOBJ(addr); + default: return GPU::ReadVRAM_LCDC(addr); + } +} + +void* GetFuncForAddr(ARM* cpu, u32 addr, bool store, int size) +{ + if (cpu->Num == 0) + { + switch (addr & 0xFF000000) + { + case 0x04000000: + if (!store && size == 32 && addr == 0x04100010 && NDS::ExMemCnt[0] & (1<<11)) + return (void*)NDSCart::ReadROMData; + + /* + unfortunately we can't map GPU2D this way + since it's hidden inside an object + + though GPU3D registers are accessed much more intensive + */ + if (addr >= 0x04000320 && addr < 0x040006A4) + { + switch (size | store) + { + case 8: return (void*)GPU3D::Read8; + case 9: return (void*)GPU3D::Write8; + case 16: return (void*)GPU3D::Read16; + case 17: return (void*)GPU3D::Write16; + case 32: return (void*)GPU3D::Read32; + case 33: return (void*)GPU3D::Write32; + } + } + + switch (size | store) + { + case 8: return (void*)NDS::ARM9IORead8; + case 9: return (void*)NDS::ARM9IOWrite8; + case 16: return (void*)NDS::ARM9IORead16; + case 17: return (void*)NDS::ARM9IOWrite16; + case 32: return (void*)NDS::ARM9IORead32; + case 33: return (void*)NDS::ARM9IOWrite32; + } + break; + case 0x06000000: + switch (size | store) + { + case 8: return (void*)VRAMRead; + case 9: return NULL; + case 16: return (void*)VRAMRead; + case 17: return (void*)VRAMWrite; + case 32: return (void*)VRAMRead; + case 33: return (void*)VRAMWrite; + } + break; + } + } + else + { + switch (addr & 0xFF800000) + { + case 0x04000000: + if (addr >= 0x04000400 && addr < 0x04000520) + { + switch (size | store) + { + case 8: return (void*)SPU::Read8; + case 9: return (void*)SPU::Write8; + case 16: return (void*)SPU::Read16; + case 17: return (void*)SPU::Write16; + case 32: return (void*)SPU::Read32; + case 33: return (void*)SPU::Write32; + } + } + + switch (size | store) + { + case 8: return (void*)NDS::ARM7IORead8; + case 9: return (void*)NDS::ARM7IOWrite8; + case 16: return (void*)NDS::ARM7IORead16; + case 17: return (void*)NDS::ARM7IOWrite16; + case 32: return (void*)NDS::ARM7IORead32; + case 33: return (void*)NDS::ARM7IOWrite32; + } + break; + case 0x04800000: + if (addr < 0x04810000 && size >= 16) + { + switch (size | store) + { + case 16: return (void*)Wifi::Read; + case 17: return (void*)Wifi::Write; + case 32: return (void*)WifiRead32; + case 33: return (void*)WifiWrite32; + } + } + break; + case 0x06000000: + case 0x06800000: + switch (size | store) + { + case 8: return (void*)GPU::ReadVRAM_ARM7; + case 9: return (void*)GPU::WriteVRAM_ARM7; + case 16: return (void*)GPU::ReadVRAM_ARM7; + case 17: return (void*)GPU::WriteVRAM_ARM7; + case 32: return (void*)GPU::ReadVRAM_ARM7; + case 33: return (void*)GPU::WriteVRAM_ARM7; + } + } + } + return NULL; +} + +} \ No newline at end of file diff --git a/src/ARMJIT_Memory.h b/src/ARMJIT_Memory.h new file mode 100644 index 0000000..1a59d98 --- /dev/null +++ b/src/ARMJIT_Memory.h @@ -0,0 +1,53 @@ +#ifndef ARMJIT_MEMORY +#define ARMJIT_MEMORY + +#include "types.h" + +#include "ARM.h" + +namespace ARMJIT_Memory +{ + +extern void* FastMem9Start; +extern void* FastMem7Start; + +void Init(); +void DeInit(); + +void Reset(); + +enum +{ + memregion_Other = 0, + memregion_ITCM, + memregion_DTCM, + memregion_BIOS9, + memregion_MainRAM, + memregion_SWRAM, + memregion_IO9, + memregion_VRAM, + memregion_BIOS7, + memregion_WRAM7, + memregion_IO7, + memregion_Wifi, + memregion_VWRAM, + memregions_Count +}; + +int ClassifyAddress9(u32 addr); +int ClassifyAddress7(u32 addr); + +bool GetRegionMapping(int region, u32 num, u32& mappingStart, u32& mappingSize, u32& memoryOffset, u32& memorySize); + +bool IsMappable(int region); + +void RemapDTCM(u32 newBase, u32 newSize); +void RemapSWRAM(); + +void SetCodeProtection(int region, u32 offset, bool protect); + +void* GetFuncForAddr(ARM* cpu, u32 addr, bool store, int size); + +} + +#endif \ No newline at end of file diff --git a/src/ARMJIT_x64/ARMJIT_Compiler.cpp b/src/ARMJIT_x64/ARMJIT_Compiler.cpp index fd3fb70..34c1c91 100644 --- a/src/ARMJIT_x64/ARMJIT_Compiler.cpp +++ b/src/ARMJIT_x64/ARMJIT_Compiler.cpp @@ -301,24 +301,6 @@ Compiler::Compiler() RET(); } - { - CPSRDirty = true; - BranchStub[0] = GetWritableCodePtr(); - SaveCPSR(); - MOV(64, R(ABI_PARAM1), R(RCPU)); - CALL((u8*)ARMJIT::LinkBlock<0>); - LoadCPSR(); - JMP((u8*)ARM_Ret, true); - - CPSRDirty = true; - BranchStub[1] = GetWritableCodePtr(); - SaveCPSR(); - MOV(64, R(ABI_PARAM1), R(RCPU)); - CALL((u8*)ARMJIT::LinkBlock<1>); - LoadCPSR(); - JMP((u8*)ARM_Ret, true); - } - // move the region forward to prevent overwriting the generated functions CodeMemSize -= GetWritableCodePtr() - ResetStart; ResetStart = GetWritableCodePtr(); @@ -520,6 +502,11 @@ void Compiler::Reset() FarCode = FarStart; } +bool Compiler::IsJITFault(u64 addr) +{ + return addr >= (u64)CodeMemory && addr < (u64)CodeMemory + sizeof(CodeMemory); +} + void Compiler::Comp_SpecialBranchBehaviour(bool taken) { if (taken && CurInstr.BranchFlags & branch_IdleBranch) @@ -531,32 +518,11 @@ void Compiler::Comp_SpecialBranchBehaviour(bool taken) RegCache.PrepareExit(); SUB(32, MDisp(RCPU, offsetof(ARM, Cycles)), Imm32(ConstantCycles)); - - if (Config::JIT_BrancheOptimisations == 2 && !(CurInstr.BranchFlags & branch_IdleBranch) - && (!taken || (CurInstr.BranchFlags & branch_StaticTarget))) - { - FixupBranch ret = J_CC(CC_S); - CMP(32, MDisp(RCPU, offsetof(ARM, StopExecution)), Imm8(0)); - FixupBranch ret2 = J_CC(CC_NZ); - - u8* rewritePart = GetWritableCodePtr(); - NOP(5); - - MOV(32, R(ABI_PARAM2), Imm32(rewritePart - ResetStart)); - JMP((u8*)BranchStub[Num], true); - - SetJumpTarget(ret); - SetJumpTarget(ret2); - JMP((u8*)ARM_Ret, true); - } - else - { - JMP((u8*)&ARM_Ret, true); - } + JMP((u8*)&ARM_Ret, true); } } -JitBlockEntry Compiler::CompileBlock(u32 translatedAddr, ARM* cpu, bool thumb, FetchedInstr instrs[], int instrsCount) +JitBlockEntry Compiler::CompileBlock(ARM* cpu, bool thumb, FetchedInstr instrs[], int instrsCount) { if (NearSize - (NearCode - NearStart) < 1024 * 32) // guess... { @@ -575,7 +541,7 @@ JitBlockEntry Compiler::CompileBlock(u32 translatedAddr, ARM* cpu, bool thumb, F CodeRegion = instrs[0].Addr >> 24; CurCPU = cpu; // CPSR might have been modified in a previous block - CPSRDirty = Config::JIT_BrancheOptimisations == 2; + CPSRDirty = false; JitBlockEntry res = (JitBlockEntry)GetWritableCodePtr(); @@ -685,31 +651,7 @@ JitBlockEntry Compiler::CompileBlock(u32 translatedAddr, ARM* cpu, bool thumb, F RegCache.Flush(); SUB(32, MDisp(RCPU, offsetof(ARM, Cycles)), Imm32(ConstantCycles)); - - if (Config::JIT_BrancheOptimisations == 2 - && !(instrs[instrsCount - 1].BranchFlags & branch_IdleBranch) - && (!instrs[instrsCount - 1].Info.Branches() - || instrs[instrsCount - 1].BranchFlags & branch_FollowCondNotTaken - || (instrs[instrsCount - 1].BranchFlags & branch_FollowCondTaken && instrs[instrsCount - 1].BranchFlags & branch_StaticTarget))) - { - FixupBranch ret = J_CC(CC_S); - CMP(32, MDisp(RCPU, offsetof(ARM, StopExecution)), Imm8(0)); - FixupBranch ret2 = J_CC(CC_NZ); - - u8* rewritePart = GetWritableCodePtr(); - NOP(5); - - MOV(32, R(ABI_PARAM2), Imm32(rewritePart - ResetStart)); - JMP((u8*)BranchStub[Num], true); - - SetJumpTarget(ret); - SetJumpTarget(ret2); - JMP((u8*)ARM_Ret, true); - } - else - { - JMP((u8*)ARM_Ret, true); - } + JMP((u8*)ARM_Ret, true); /*FILE* codeout = fopen("codeout", "a"); fprintf(codeout, "beginning block argargarg__ %x!!!", instrs[0].Addr); @@ -720,22 +662,6 @@ JitBlockEntry Compiler::CompileBlock(u32 translatedAddr, ARM* cpu, bool thumb, F return res; } -void Compiler::LinkBlock(u32 offset, JitBlockEntry entry) -{ - u8* curPtr = GetWritableCodePtr(); - SetCodePtr(ResetStart + offset); - JMP((u8*)entry, true); - SetCodePtr(curPtr); -} - -void Compiler::UnlinkBlock(u32 offset) -{ - u8* curPtr = GetWritableCodePtr(); - SetCodePtr(ResetStart + offset); - NOP(5); - SetCodePtr(curPtr); -} - void Compiler::Comp_AddCycles_C(bool forceNonConstant) { s32 cycles = Num ? diff --git a/src/ARMJIT_x64/ARMJIT_Compiler.h b/src/ARMJIT_x64/ARMJIT_Compiler.h index f2fc301..09ac257 100644 --- a/src/ARMJIT_x64/ARMJIT_Compiler.h +++ b/src/ARMJIT_x64/ARMJIT_Compiler.h @@ -52,10 +52,7 @@ public: void Reset(); - void LinkBlock(u32 offset, JitBlockEntry entry); - void UnlinkBlock(u32 offset); - - JitBlockEntry CompileBlock(u32 translatedAddr, ARM* cpu, bool thumb, FetchedInstr instrs[], int instrsCount); + JitBlockEntry CompileBlock(ARM* cpu, bool thumb, FetchedInstr instrs[], int instrsCount); void LoadReg(int reg, Gen::X64Reg nativeReg); void SaveReg(int reg, Gen::X64Reg nativeReg); @@ -202,6 +199,10 @@ public: SetCodePtr(FarCode); } + bool IsJITFault(u64 addr); + + s32 RewriteMemAccess(u64 pc); + u8* FarCode; u8* NearCode; u32 FarSize; @@ -216,8 +217,6 @@ public: bool Exit; bool IrregularCycles; - void* BranchStub[2]; - void* ReadBanked; void* WriteBanked; diff --git a/src/ARMJIT_x64/ARMJIT_LoadStore.cpp b/src/ARMJIT_x64/ARMJIT_LoadStore.cpp index cf0bd23..0bf2f83 100644 --- a/src/ARMJIT_x64/ARMJIT_LoadStore.cpp +++ b/src/ARMJIT_x64/ARMJIT_LoadStore.cpp @@ -15,6 +15,11 @@ int squeezePointer(T* ptr) return truncated; } +s32 Compiler::RewriteMemAccess(u64 pc) +{ + return 0; +} + /* According to DeSmuME and my own research, approx. 99% (seriously, that's an empirical number) of all memory load and store instructions always access addresses in the same region as @@ -27,14 +32,15 @@ int squeezePointer(T* ptr) bool Compiler::Comp_MemLoadLiteral(int size, int rd, u32 addr) { - u32 translatedAddr = Num == 0 ? TranslateAddr9(addr) : TranslateAddr7(addr); + return false; + //u32 translatedAddr = Num == 0 ? TranslateAddr9(addr) : TranslateAddr7(addr); - int invalidLiteralIdx = InvalidLiterals.Find(translatedAddr); + /*int invalidLiteralIdx = InvalidLiterals.Find(translatedAddr); if (invalidLiteralIdx != -1) { InvalidLiterals.Remove(invalidLiteralIdx); return false; - } + }*/ u32 val; // make sure arm7 bios is accessible @@ -95,7 +101,7 @@ void Compiler::Comp_MemAccess(int rd, int rn, const ComplexOperand& op2, int siz staticAddress = RegCache.LiteralValues[rn] + op2.Imm * ((flags & memop_SubtractOffset) ? -1 : 1); OpArg rdMapped = MapReg(rd); - if (!addrIsStatic) + if (true) { OpArg rnMapped = MapReg(rn); if (Thumb && rn == 15) @@ -145,7 +151,7 @@ void Compiler::Comp_MemAccess(int rd, int rn, const ComplexOperand& op2, int siz MOV(32, rnMapped, R(finalAddr)); } - int expectedTarget = Num == 0 + /*int expectedTarget = Num == 0 ? ClassifyAddress9(addrIsStatic ? staticAddress : CurInstr.DataRegion) : ClassifyAddress7(addrIsStatic ? staticAddress : CurInstr.DataRegion); if (CurInstr.Cond() < 0xE) @@ -184,8 +190,8 @@ void Compiler::Comp_MemAccess(int rd, int rn, const ComplexOperand& op2, int siz if (addrIsStatic && compileSlowPath) MOV(32, R(RSCRATCH3), Imm32(staticAddress)); - - if (compileFastPath) +*/ + /*if (compileFastPath) { FixupBranch slowPath; if (compileSlowPath) @@ -357,15 +363,16 @@ void Compiler::Comp_MemAccess(int rd, int rn, const ComplexOperand& op2, int siz SetJumpTarget(slowPath); } } - - if (compileSlowPath) +*/ + if (true) { PushRegs(false); if (Num == 0) { - MOV(32, R(ABI_PARAM2), R(RSCRATCH3)); - MOV(64, R(ABI_PARAM1), R(RCPU)); + MOV(64, R(ABI_PARAM2), R(RCPU)); + if (ABI_PARAM1 != RSCRATCH3) + MOV(32, R(ABI_PARAM1), R(RSCRATCH3)); if (flags & memop_Store) { MOV(32, R(ABI_PARAM3), rdMapped); @@ -423,13 +430,13 @@ void Compiler::Comp_MemAccess(int rd, int rn, const ComplexOperand& op2, int siz MOVZX(32, size, rdMapped.GetSimpleReg(), R(RSCRATCH)); } } - +/* if (compileFastPath && compileSlowPath) { FixupBranch ret = J(true); SwitchToNearCode(); SetJumpTarget(ret); - } + }*/ if (!(flags & memop_Store) && rd == 15) { @@ -458,7 +465,7 @@ s32 Compiler::Comp_MemAccessBlock(int rn, BitSet16 regs, bool store, bool preinc u32 stackAlloc = ((regsCount + 1) & ~1) * 8; #endif u32 allocOffset = stackAlloc - regsCount * 8; - +/* int expectedTarget = Num == 0 ? ClassifyAddress9(CurInstr.DataRegion) : ClassifyAddress7(CurInstr.DataRegion); @@ -479,7 +486,7 @@ s32 Compiler::Comp_MemAccessBlock(int rn, BitSet16 regs, bool store, bool preinc default: break; } - +*/ if (!store) Comp_AddCycles_CDI(); else @@ -492,7 +499,7 @@ s32 Compiler::Comp_MemAccessBlock(int rn, BitSet16 regs, bool store, bool preinc } else MOV(32, R(RSCRATCH4), MapReg(rn)); - +/* if (compileFastPath) { assert(!usermode); @@ -570,7 +577,7 @@ s32 Compiler::Comp_MemAccessBlock(int rn, BitSet16 regs, bool store, bool preinc SwitchToFarCode(); SetJumpTarget(slowPath); - } + }*/ if (!store) { @@ -696,13 +703,13 @@ s32 Compiler::Comp_MemAccessBlock(int rn, BitSet16 regs, bool store, bool preinc PopRegs(false); } - +/* if (compileFastPath) { FixupBranch ret = J(true); SwitchToNearCode(); SetJumpTarget(ret); - } + }*/ if (!store && regs[15]) { diff --git a/src/ARM_InstrInfo.cpp b/src/ARM_InstrInfo.cpp index b50e821..ccec951 100644 --- a/src/ARM_InstrInfo.cpp +++ b/src/ARM_InstrInfo.cpp @@ -206,15 +206,14 @@ enum { T_ReadR14 = 1 << 13, T_WriteR14 = 1 << 14, - T_PopPC = 1 << 15, - - T_SetNZ = 1 << 16, - T_SetCV = 1 << 17, - T_SetMaybeC = 1 << 18, - T_ReadC = 1 << 19, - T_SetC = 1 << 20, + T_SetNZ = 1 << 15, + T_SetCV = 1 << 16, + T_SetMaybeC = 1 << 17, + T_ReadC = 1 << 18, + T_SetC = 1 << 19, - T_WriteMem = 1 << 21, + T_WriteMem = 1 << 20, + T_LoadMem = 1 << 21, }; const u32 T_LSL_IMM = T_SetNZ | T_SetMaybeC | T_Write0 | T_Read3 | tk(tk_LSL_IMM); @@ -256,31 +255,31 @@ const u32 T_ADD_PCREL = T_Write8 | tk(tk_ADD_PCREL); const u32 T_ADD_SPREL = T_Write8 | T_ReadR13 | tk(tk_ADD_SPREL); const u32 T_ADD_SP = T_WriteR13 | T_ReadR13 | tk(tk_ADD_SP); -const u32 T_LDR_PCREL = T_Write8 | tk(tk_LDR_PCREL); +const u32 T_LDR_PCREL = T_Write8 | T_LoadMem | tk(tk_LDR_PCREL); const u32 T_STR_REG = T_Read0 | T_Read3 | T_Read6 | T_WriteMem | tk(tk_STR_REG); const u32 T_STRB_REG = T_Read0 | T_Read3 | T_Read6 | T_WriteMem | tk(tk_STRB_REG); -const u32 T_LDR_REG = T_Write0 | T_Read3 | T_Read6 | tk(tk_LDR_REG); -const u32 T_LDRB_REG = T_Write0 | T_Read3 | T_Read6 | tk(tk_LDRB_REG); +const u32 T_LDR_REG = T_Write0 | T_Read3 | T_Read6 | T_LoadMem | tk(tk_LDR_REG); +const u32 T_LDRB_REG = T_Write0 | T_Read3 | T_Read6 | T_LoadMem | tk(tk_LDRB_REG); const u32 T_STRH_REG = T_Read0 | T_Read3 | T_Read6 | T_WriteMem | tk(tk_STRH_REG); -const u32 T_LDRSB_REG = T_Write0 | T_Read3 | T_Read6 | tk(tk_LDRSB_REG); -const u32 T_LDRH_REG = T_Write0 | T_Read3 | T_Read6 | tk(tk_LDRH_REG); -const u32 T_LDRSH_REG = T_Write0 | T_Read3 | T_Read6 | tk(tk_LDRSH_REG); +const u32 T_LDRSB_REG = T_Write0 | T_Read3 | T_Read6 | T_LoadMem | tk(tk_LDRSB_REG); +const u32 T_LDRH_REG = T_Write0 | T_Read3 | T_Read6 | T_LoadMem | tk(tk_LDRH_REG); +const u32 T_LDRSH_REG = T_Write0 | T_Read3 | T_Read6 | T_LoadMem | tk(tk_LDRSH_REG); const u32 T_STR_IMM = T_Read0 | T_Read3 | T_WriteMem | tk(tk_STR_IMM); -const u32 T_LDR_IMM = T_Write0 | T_Read3 | tk(tk_LDR_IMM); +const u32 T_LDR_IMM = T_Write0 | T_Read3 | T_LoadMem | tk(tk_LDR_IMM); const u32 T_STRB_IMM = T_Read0 | T_Read3 | T_WriteMem | tk(tk_STRB_IMM); -const u32 T_LDRB_IMM = T_Write0 | T_Read3 | tk(tk_LDRB_IMM); +const u32 T_LDRB_IMM = T_Write0 | T_Read3 | T_LoadMem | tk(tk_LDRB_IMM); const u32 T_STRH_IMM = T_Read0 | T_Read3 | T_WriteMem | tk(tk_STRH_IMM); -const u32 T_LDRH_IMM = T_Write0 | T_Read3 | tk(tk_LDRH_IMM); +const u32 T_LDRH_IMM = T_Write0 | T_Read3 | T_LoadMem | tk(tk_LDRH_IMM); const u32 T_STR_SPREL = T_Read8 | T_ReadR13 | T_WriteMem | tk(tk_STR_SPREL); -const u32 T_LDR_SPREL = T_Write8 | T_ReadR13 | tk(tk_LDR_SPREL); +const u32 T_LDR_SPREL = T_Write8 | T_ReadR13 | T_LoadMem | tk(tk_LDR_SPREL); const u32 T_PUSH = T_ReadR13 | T_WriteR13 | T_WriteMem | tk(tk_PUSH); -const u32 T_POP = T_PopPC | T_ReadR13 | T_WriteR13 | tk(tk_POP); +const u32 T_POP = T_ReadR13 | T_WriteR13 | T_LoadMem | tk(tk_POP); -const u32 T_LDMIA = T_Read8 | T_Write8 | tk(tk_LDMIA); +const u32 T_LDMIA = T_Read8 | T_Write8 | T_LoadMem | tk(tk_LDMIA); const u32 T_STMIA = T_Read8 | T_Write8 | T_WriteMem | tk(tk_STMIA); const u32 T_BCOND = T_BranchAlways | tk(tk_BCOND); @@ -347,7 +346,7 @@ Info Decode(bool thumb, u32 num, u32 instr) if (data & T_BranchAlways) res.DstRegs |= (1 << 15); - if (data & T_PopPC && instr & (1 << 8)) + if (res.Kind == tk_POP && instr & (1 << 8)) res.DstRegs |= 1 << 15; if (data & T_SetNZ) @@ -364,11 +363,18 @@ Info Decode(bool thumb, u32 num, u32 instr) if (data & T_WriteMem) res.SpecialKind = special_WriteMem; - if (res.Kind == ARMInstrInfo::tk_LDR_PCREL) + if (data & T_LoadMem) { - if (!Config::JIT_LiteralOptimisations) - res.SrcRegs |= 1 << 15; - res.SpecialKind = special_LoadLiteral; + if (res.Kind == tk_LDR_PCREL) + { + if (!Config::JIT_LiteralOptimisations) + res.SrcRegs |= 1 << 15; + res.SpecialKind = special_LoadLiteral; + } + else + { + res.SpecialKind = special_LoadMem; + } } if (res.Kind == tk_LDMIA || res.Kind == tk_POP) @@ -401,11 +407,17 @@ Info Decode(bool thumb, u32 num, u32 instr) else if ((instr >> 28) == 0xF) data = ak(ak_Nop); - if (data & A_UnkOnARM7 && num != 0) + if (data & A_UnkOnARM7 && num == 1) data = A_UNK; res.Kind = (data >> 22) & 0x1FF; + if (res.Kind >= ak_SMLAxy && res.Kind <= ak_SMULxy && num == 1) + { + data = ak(ak_Nop); + res.Kind = ak_Nop; + } + if (res.Kind == ak_MCR) { u32 cn = (instr >> 16) & 0xF; @@ -490,8 +502,13 @@ Info Decode(bool thumb, u32 num, u32 instr) if (data & A_WriteMem) res.SpecialKind = special_WriteMem; - if ((data & A_LoadMem) && res.SrcRegs == (1 << 15)) - res.SpecialKind = special_LoadLiteral; + if (data & A_LoadMem) + { + if (res.SrcRegs == (1 << 15)) + res.SpecialKind = special_LoadLiteral; + else + res.SpecialKind = special_LoadMem; + } if (res.Kind == ak_LDM) { diff --git a/src/ARM_InstrInfo.h b/src/ARM_InstrInfo.h index 6ab4929..a702435 100644 --- a/src/ARM_InstrInfo.h +++ b/src/ARM_InstrInfo.h @@ -232,6 +232,7 @@ enum { special_NotSpecialAtAll = 0, special_WriteMem, + special_LoadMem, special_WaitForInterrupt, special_LoadLiteral }; diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index f35b3e9..84bbc2b 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -55,9 +55,11 @@ if (ENABLE_JIT) enable_language(ASM) target_sources(core PRIVATE - ARMJIT.cpp ARM_InstrInfo.cpp + ARMJIT.cpp + ARMJIT_Memory.cpp + dolphin/CommonFuncs.cpp ) @@ -85,6 +87,8 @@ if (ENABLE_JIT) ARMJIT_A64/ARMJIT_ALU.cpp ARMJIT_A64/ARMJIT_LoadStore.cpp ARMJIT_A64/ARMJIT_Branch.cpp + + ARMJIT_A64/ARMJIT_Linkage.s ) endif() endif() diff --git a/src/CP15.cpp b/src/CP15.cpp index 225847e..3d64259 100644 --- a/src/CP15.cpp +++ b/src/CP15.cpp @@ -22,6 +22,7 @@ #include "DSi.h" #include "ARM.h" #include "ARMJIT.h" +#include "ARMJIT_Memory.h" // access timing for cached regions @@ -42,8 +43,8 @@ void ARMv5::CP15Reset() DTCMSetting = 0; ITCMSetting = 0; - memset(ITCM, 0, 0x8000); - memset(DTCM, 0, 0x4000); + memset(ITCM, 0, ITCMPhysicalSize); + memset(DTCM, 0, DTCMPhysicalSize); ITCMSize = 0; DTCMBase = 0xFFFFFFFF; @@ -75,8 +76,8 @@ void ARMv5::CP15DoSavestate(Savestate* file) file->Var32(&DTCMSetting); file->Var32(&ITCMSetting); - file->VarArray(ITCM, 0x8000); - file->VarArray(DTCM, 0x4000); + file->VarArray(ITCM, ITCMPhysicalSize); + file->VarArray(DTCM, DTCMPhysicalSize); file->Var32(&PU_CodeCacheable); file->Var32(&PU_DataCacheable); @@ -98,36 +99,30 @@ void ARMv5::CP15DoSavestate(Savestate* file) void ARMv5::UpdateDTCMSetting() { -#ifdef JIT_ENABLED - u32 oldDTCMBase = DTCMBase; - u32 oldDTCMSize = DTCMSize; -#endif + u32 newDTCMBase; + u32 newDTCMSize; if (CP15Control & (1<<16)) { - DTCMBase = DTCMSetting & 0xFFFFF000; - DTCMSize = 0x200 << ((DTCMSetting >> 1) & 0x1F); + newDTCMBase = DTCMSetting & 0xFFFFF000; + newDTCMSize = 0x200 << ((DTCMSetting >> 1) & 0x1F); //printf("DTCM [%08X] enabled at %08X, size %X\n", DTCMSetting, DTCMBase, DTCMSize); } else { - DTCMBase = 0xFFFFFFFF; - DTCMSize = 0; + newDTCMBase = 0xFFFFFFFF; + newDTCMSize = 0; //printf("DTCM disabled\n"); } -#ifdef JIT_ENABLED - if (oldDTCMBase != DTCMBase || oldDTCMSize != DTCMSize) + if (newDTCMBase != DTCMBase || newDTCMSize != DTCMSize) { - ARMJIT::UpdateMemoryStatus9(oldDTCMBase, oldDTCMBase + oldDTCMSize); - ARMJIT::UpdateMemoryStatus9(DTCMBase, DTCMBase + DTCMSize); + ARMJIT_Memory::RemapDTCM(newDTCMBase, newDTCMSize); + DTCMBase = newDTCMBase; + DTCMSize = newDTCMSize; } -#endif } void ARMv5::UpdateITCMSetting() { -#ifdef JIT_ENABLED - u32 oldITCMSize = ITCMSize; -#endif if (CP15Control & (1<<18)) { ITCMSize = 0x200 << ((ITCMSetting >> 1) & 0x1F); @@ -138,10 +133,6 @@ void ARMv5::UpdateITCMSetting() ITCMSize = 0; //printf("ITCM disabled\n"); } -#ifdef JIT_ENABLED - if (oldITCMSize != ITCMSize) - ARMJIT::UpdateMemoryStatus9(0, std::max(oldITCMSize, ITCMSize)); -#endif } @@ -581,12 +572,15 @@ void ARMv5::CP15Write(u32 id, u32 val) case 0x750: ICacheInvalidateAll(); + //Halt(255); return; case 0x751: ICacheInvalidateByAddr(val); + //Halt(255); return; case 0x752: printf("CP15: ICACHE INVALIDATE WEIRD. %08X\n", val); + //Halt(255); return; @@ -723,7 +717,7 @@ u32 ARMv5::CodeRead32(u32 addr, bool branch) if (addr < ITCMSize) { CodeCycles = 1; - return *(u32*)&ITCM[addr & 0x7FFF]; + return *(u32*)&ITCM[addr & (ITCMPhysicalSize - 1)]; } CodeCycles = RegionCodeCycles; @@ -750,13 +744,13 @@ void ARMv5::DataRead8(u32 addr, u32* val) if (addr < ITCMSize) { DataCycles = 1; - *val = *(u8*)&ITCM[addr & 0x7FFF]; + *val = *(u8*)&ITCM[addr & (ITCMPhysicalSize - 1)]; return; } if (addr >= DTCMBase && addr < (DTCMBase + DTCMSize)) { DataCycles = 1; - *val = *(u8*)&DTCM[(addr - DTCMBase) & 0x3FFF]; + *val = *(u8*)&DTCM[(addr - DTCMBase) & (DTCMPhysicalSize - 1)]; return; } @@ -773,13 +767,13 @@ void ARMv5::DataRead16(u32 addr, u32* val) if (addr < ITCMSize) { DataCycles = 1; - *val = *(u16*)&ITCM[addr & 0x7FFF]; + *val = *(u16*)&ITCM[addr & (ITCMPhysicalSize - 1)]; return; } if (addr >= DTCMBase && addr < (DTCMBase + DTCMSize)) { DataCycles = 1; - *val = *(u16*)&DTCM[(addr - DTCMBase) & 0x3FFF]; + *val = *(u16*)&DTCM[(addr - DTCMBase) & (DTCMPhysicalSize - 1)]; return; } @@ -796,13 +790,13 @@ void ARMv5::DataRead32(u32 addr, u32* val) if (addr < ITCMSize) { DataCycles = 1; - *val = *(u32*)&ITCM[addr & 0x7FFF]; + *val = *(u32*)&ITCM[addr & (ITCMPhysicalSize - 1)]; return; } if (addr >= DTCMBase && addr < (DTCMBase + DTCMSize)) { DataCycles = 1; - *val = *(u32*)&DTCM[(addr - DTCMBase) & 0x3FFF]; + *val = *(u32*)&DTCM[(addr - DTCMBase) & (DTCMPhysicalSize - 1)]; return; } @@ -817,13 +811,13 @@ void ARMv5::DataRead32S(u32 addr, u32* val) if (addr < ITCMSize) { DataCycles += 1; - *val = *(u32*)&ITCM[addr & 0x7FFF]; + *val = *(u32*)&ITCM[addr & (ITCMPhysicalSize - 1)]; return; } if (addr >= DTCMBase && addr < (DTCMBase + DTCMSize)) { DataCycles += 1; - *val = *(u32*)&DTCM[(addr - DTCMBase) & 0x3FFF]; + *val = *(u32*)&DTCM[(addr - DTCMBase) & (DTCMPhysicalSize - 1)]; return; } @@ -838,16 +832,16 @@ void ARMv5::DataWrite8(u32 addr, u8 val) if (addr < ITCMSize) { DataCycles = 1; - *(u8*)&ITCM[addr & 0x7FFF] = val; + *(u8*)&ITCM[addr & (ITCMPhysicalSize - 1)] = val; #ifdef JIT_ENABLED - ARMJIT::InvalidateITCMIfNecessary(addr); + ARMJIT::CheckAndInvalidate<0, ARMJIT_Memory::memregion_ITCM>(addr); #endif return; } if (addr >= DTCMBase && addr < (DTCMBase + DTCMSize)) { DataCycles = 1; - *(u8*)&DTCM[(addr - DTCMBase) & 0x3FFF] = val; + *(u8*)&DTCM[(addr - DTCMBase) & (DTCMPhysicalSize - 1)] = val; return; } @@ -864,16 +858,16 @@ void ARMv5::DataWrite16(u32 addr, u16 val) if (addr < ITCMSize) { DataCycles = 1; - *(u16*)&ITCM[addr & 0x7FFF] = val; + *(u16*)&ITCM[addr & (ITCMPhysicalSize - 1)] = val; #ifdef JIT_ENABLED - ARMJIT::InvalidateITCMIfNecessary(addr); + ARMJIT::CheckAndInvalidate<0, ARMJIT_Memory::memregion_ITCM>(addr); #endif return; } if (addr >= DTCMBase && addr < (DTCMBase + DTCMSize)) { DataCycles = 1; - *(u16*)&DTCM[(addr - DTCMBase) & 0x3FFF] = val; + *(u16*)&DTCM[(addr - DTCMBase) & (DTCMPhysicalSize - 1)] = val; return; } @@ -890,16 +884,16 @@ void ARMv5::DataWrite32(u32 addr, u32 val) if (addr < ITCMSize) { DataCycles = 1; - *(u32*)&ITCM[addr & 0x7FFF] = val; + *(u32*)&ITCM[addr & (ITCMPhysicalSize - 1)] = val; #ifdef JIT_ENABLED - ARMJIT::InvalidateITCMIfNecessary(addr); + ARMJIT::CheckAndInvalidate<0, ARMJIT_Memory::memregion_ITCM>(addr); #endif return; } if (addr >= DTCMBase && addr < (DTCMBase + DTCMSize)) { DataCycles = 1; - *(u32*)&DTCM[(addr - DTCMBase) & 0x3FFF] = val; + *(u32*)&DTCM[(addr - DTCMBase) & (DTCMPhysicalSize - 1)] = val; return; } @@ -914,16 +908,16 @@ void ARMv5::DataWrite32S(u32 addr, u32 val) if (addr < ITCMSize) { DataCycles += 1; - *(u32*)&ITCM[addr & 0x7FFF] = val; + *(u32*)&ITCM[addr & (ITCMPhysicalSize - 1)] = val; #ifdef JIT_ENABLED - ARMJIT::InvalidateITCMIfNecessary(addr); + ARMJIT::CheckAndInvalidate<0, ARMJIT_Memory::memregion_ITCM>(addr); #endif return; } if (addr >= DTCMBase && addr < (DTCMBase + DTCMSize)) { DataCycles += 1; - *(u32*)&DTCM[(addr - DTCMBase) & 0x3FFF] = val; + *(u32*)&DTCM[(addr - DTCMBase) & (DTCMPhysicalSize - 1)] = val; return; } diff --git a/src/Config.cpp b/src/Config.cpp index 22e9c11..edf84f2 100644 --- a/src/Config.cpp +++ b/src/Config.cpp @@ -47,8 +47,9 @@ int JIT_LiteralOptimisations = true; #ifdef JIT_ENABLED int JIT_Enable = false; int JIT_MaxBlockSize = 32; -int JIT_BrancheOptimisations = 2; +int JIT_BrancheOptimisations = true; int JIT_LiteralOptimisations = true; +int JIT_FastMemory = true; #endif ConfigEntry ConfigFile[] = @@ -72,8 +73,9 @@ ConfigEntry ConfigFile[] = #ifdef JIT_ENABLED {"JIT_Enable", 0, &JIT_Enable, 0, NULL, 0}, {"JIT_MaxBlockSize", 0, &JIT_MaxBlockSize, 32, NULL, 0}, - {"JIT_BranchOptimisations", 0, &JIT_BrancheOptimisations, 2, NULL, 0}, + {"JIT_BranchOptimisations", 0, &JIT_BrancheOptimisations, 1, NULL, 0}, {"JIT_LiteralOptimisations", 0, &JIT_LiteralOptimisations, 1, NULL, 0}, + {"JIT_FastMem", 0, &JIT_FastMemory, 1, NULL, 0}, #endif {"", -1, NULL, 0, NULL, 0} diff --git a/src/Config.h b/src/Config.h index 31fa67a..7b19a4b 100644 --- a/src/Config.h +++ b/src/Config.h @@ -63,6 +63,7 @@ extern int JIT_Enable; extern int JIT_MaxBlockSize; extern int JIT_BrancheOptimisations; extern int JIT_LiteralOptimisations; +extern int JIT_FastMemory; #endif } diff --git a/src/NDS.cpp b/src/NDS.cpp index 657241f..3d65482 100644 --- a/src/NDS.cpp +++ b/src/NDS.cpp @@ -33,6 +33,7 @@ #include "AREngine.h" #include "Platform.h" #include "ARMJIT.h" +#include "ARMJIT_Memory.h" #include "DSi.h" #include "DSi_SPI_TSC.h" @@ -94,17 +95,17 @@ u32 CPUStop; u8 ARM9BIOS[0x1000]; u8 ARM7BIOS[0x4000]; -u8 MainRAM[0x1000000]; +u8* MainRAM; u32 MainRAMMask; -u8 SharedWRAM[0x8000]; +u8* SharedWRAM; u8 WRAMCnt; -u8* SWRAM_ARM9; -u8* SWRAM_ARM7; -u32 SWRAM_ARM9Mask; -u32 SWRAM_ARM7Mask; -u8 ARM7WRAM[0x10000]; +// putting them together so they're always next to each other +MemRegion SWRAM_ARM9; +MemRegion SWRAM_ARM7; + +u8* ARM7WRAM; u16 ExMemCnt[2]; @@ -171,6 +172,10 @@ bool Init() #ifdef JIT_ENABLED ARMJIT::Init(); +#else + MainRAM = new u8[MainRAMSize]; + ARM7WRAM = new u8[ARM7WRAMSize]; + SharedWRAM = new u8[SharedWRAMSize]; #endif DMAs[0] = new DMA(0, 0); @@ -485,6 +490,10 @@ void Reset() printf("ARM7 BIOS loaded\n"); fclose(f); } + +#ifdef JIT_ENABLED + ARMJIT::Reset(); +#endif if (ConsoleType == 1) { @@ -510,7 +519,7 @@ void Reset() InitTimings(); - memset(MainRAM, 0, 0x1000000); + memset(MainRAM, 0, MainRAMMask + 1); memset(SharedWRAM, 0, 0x8000); memset(ARM7WRAM, 0, 0x10000); @@ -587,10 +596,6 @@ void Reset() } AREngine::Reset(); - -#ifdef JIT_ENABLED - ARMJIT::Reset(); -#endif } void Stop() @@ -705,7 +710,7 @@ bool DoSavestate(Savestate* file) file->VarArray(MainRAM, 0x400000); file->VarArray(SharedWRAM, 0x8000); - file->VarArray(ARM7WRAM, 0x10000); + file->VarArray(ARM7WRAM, ARM7WRAMSize); file->VarArray(ExMemCnt, 2*sizeof(u16)); file->VarArray(ROMSeed0, 2*8); @@ -1128,43 +1133,40 @@ void MapSharedWRAM(u8 val) if (val == WRAMCnt) return; + ARMJIT_Memory::RemapSWRAM(); + WRAMCnt = val; switch (WRAMCnt & 0x3) { case 0: - SWRAM_ARM9 = &SharedWRAM[0]; - SWRAM_ARM9Mask = 0x7FFF; - SWRAM_ARM7 = NULL; - SWRAM_ARM7Mask = 0; + SWRAM_ARM9.Mem = &SharedWRAM[0]; + SWRAM_ARM9.Mask = 0x7FFF; + SWRAM_ARM7.Mem = NULL; + SWRAM_ARM7.Mask = 0; break; case 1: - SWRAM_ARM9 = &SharedWRAM[0x4000]; - SWRAM_ARM9Mask = 0x3FFF; - SWRAM_ARM7 = &SharedWRAM[0]; - SWRAM_ARM7Mask = 0x3FFF; + SWRAM_ARM9.Mem = &SharedWRAM[0x4000]; + SWRAM_ARM9.Mask = 0x3FFF; + SWRAM_ARM7.Mem = &SharedWRAM[0]; + SWRAM_ARM7.Mask = 0x3FFF; break; case 2: - SWRAM_ARM9 = &SharedWRAM[0]; - SWRAM_ARM9Mask = 0x3FFF; - SWRAM_ARM7 = &SharedWRAM[0x4000]; - SWRAM_ARM7Mask = 0x3FFF; + SWRAM_ARM9.Mem = &SharedWRAM[0]; + SWRAM_ARM9.Mask = 0x3FFF; + SWRAM_ARM7.Mem = &SharedWRAM[0x4000]; + SWRAM_ARM7.Mask = 0x3FFF; break; case 3: - SWRAM_ARM9 = NULL; - SWRAM_ARM9Mask = 0; - SWRAM_ARM7 = &SharedWRAM[0]; - SWRAM_ARM7Mask = 0x7FFF; + SWRAM_ARM9.Mem = NULL; + SWRAM_ARM9.Mask = 0; + SWRAM_ARM7.Mem = &SharedWRAM[0]; + SWRAM_ARM7.Mask = 0x7FFF; break; } - -#ifdef JIT_ENABLED - ARMJIT::UpdateMemoryStatus9(0x3000000, 0x3000000 + 0x1000000); - ARMJIT::UpdateMemoryStatus7(0x3000000, 0x3000000 + 0x1000000); -#endif } @@ -1835,12 +1837,12 @@ u8 ARM9Read8(u32 addr) switch (addr & 0xFF000000) { case 0x02000000: - return *(u8*)&MainRAM[addr & MainRAMMask]; + return *(u8*)&MainRAM[addr & (MainRAMSize - 1)]; case 0x03000000: - if (SWRAM_ARM9) + if (SWRAM_ARM9.Mem) { - return *(u8*)&SWRAM_ARM9[addr & SWRAM_ARM9Mask]; + return *(u8*)&SWRAM_ARM9.Mem[addr & SWRAM_ARM9.Mask]; } else { @@ -1900,12 +1902,12 @@ u16 ARM9Read16(u32 addr) switch (addr & 0xFF000000) { case 0x02000000: - return *(u16*)&MainRAM[addr & MainRAMMask]; + return *(u16*)&MainRAM[addr & (MainRAMSize - 1)]; case 0x03000000: - if (SWRAM_ARM9) + if (SWRAM_ARM9.Mem) { - return *(u16*)&SWRAM_ARM9[addr & SWRAM_ARM9Mask]; + return *(u16*)&SWRAM_ARM9.Mem[addr & SWRAM_ARM9.Mask]; } else { @@ -1968,9 +1970,9 @@ u32 ARM9Read32(u32 addr) return *(u32*)&MainRAM[addr & MainRAMMask]; case 0x03000000: - if (SWRAM_ARM9) + if (SWRAM_ARM9.Mem) { - return *(u32*)&SWRAM_ARM9[addr & SWRAM_ARM9Mask]; + return *(u32*)&SWRAM_ARM9.Mem[addr & SWRAM_ARM9.Mask]; } else { @@ -2026,7 +2028,7 @@ void ARM9Write8(u32 addr, u8 val) { case 0x02000000: #ifdef JIT_ENABLED - ARMJIT::InvalidateMainRAMIfNecessary(addr); + ARMJIT::CheckAndInvalidate<0, ARMJIT_Memory::memregion_MainRAM>(addr); #endif *(u8*)&MainRAM[addr & MainRAMMask] = val; #ifdef JIT_ENABLED @@ -2035,12 +2037,12 @@ void ARM9Write8(u32 addr, u8 val) return; case 0x03000000: - if (SWRAM_ARM9) + if (SWRAM_ARM9.Mem) { #ifdef JIT_ENABLED - ARMJIT::InvalidateSWRAM9IfNecessary(addr); + ARMJIT::CheckAndInvalidate<0, ARMJIT_Memory::memregion_SWRAM>(addr); #endif - *(u8*)&SWRAM_ARM9[addr & SWRAM_ARM9Mask] = val; + *(u8*)&SWRAM_ARM9.Mem[addr & SWRAM_ARM9.Mask] = val; } return; @@ -2085,7 +2087,7 @@ void ARM9Write16(u32 addr, u16 val) { case 0x02000000: #ifdef JIT_ENABLED - ARMJIT::InvalidateMainRAMIfNecessary(addr); + ARMJIT::CheckAndInvalidate<0, ARMJIT_Memory::memregion_MainRAM>(addr); #endif *(u16*)&MainRAM[addr & MainRAMMask] = val; #ifdef JIT_ENABLED @@ -2094,12 +2096,12 @@ void ARM9Write16(u32 addr, u16 val) return; case 0x03000000: - if (SWRAM_ARM9) + if (SWRAM_ARM9.Mem) { #ifdef JIT_ENABLED - ARMJIT::InvalidateSWRAM9IfNecessary(addr); + ARMJIT::CheckAndInvalidate<0, ARMJIT_Memory::memregion_SWRAM>(addr); #endif - *(u16*)&SWRAM_ARM9[addr & SWRAM_ARM9Mask] = val; + *(u16*)&SWRAM_ARM9.Mem[addr & SWRAM_ARM9.Mask] = val; } return; @@ -2113,18 +2115,16 @@ void ARM9Write16(u32 addr, u16 val) return; case 0x06000000: +#ifdef JIT_ENABLED + ARMJIT::CheckAndInvalidate<0, ARMJIT_Memory::memregion_VRAM>(addr); +#endif switch (addr & 0x00E00000) { case 0x00000000: GPU::WriteVRAM_ABG(addr, val); return; case 0x00200000: GPU::WriteVRAM_BBG(addr, val); return; case 0x00400000: GPU::WriteVRAM_AOBJ(addr, val); return; case 0x00600000: GPU::WriteVRAM_BOBJ(addr, val); return; - default: -#ifdef JIT_ENABLED - ARMJIT::InvalidateLCDCIfNecessary(addr); -#endif - GPU::WriteVRAM_LCDC(addr, val); - return; + default: GPU::WriteVRAM_LCDC(addr, val); return; } case 0x07000000: @@ -2165,7 +2165,7 @@ void ARM9Write32(u32 addr, u32 val) { case 0x02000000: #ifdef JIT_ENABLED - ARMJIT::InvalidateMainRAMIfNecessary(addr); + ARMJIT::CheckAndInvalidate<0, ARMJIT_Memory::memregion_MainRAM>(addr); #endif *(u32*)&MainRAM[addr & MainRAMMask] = val; #ifdef JIT_ENABLED @@ -2174,12 +2174,12 @@ void ARM9Write32(u32 addr, u32 val) return ; case 0x03000000: - if (SWRAM_ARM9) + if (SWRAM_ARM9.Mem) { #ifdef JIT_ENABLED - ARMJIT::InvalidateSWRAM9IfNecessary(addr); + ARMJIT::CheckAndInvalidate<0, ARMJIT_Memory::memregion_SWRAM>(addr); #endif - *(u32*)&SWRAM_ARM9[addr & SWRAM_ARM9Mask] = val; + *(u32*)&SWRAM_ARM9.Mem[addr & SWRAM_ARM9.Mask] = val; } return; @@ -2193,18 +2193,16 @@ void ARM9Write32(u32 addr, u32 val) return; case 0x06000000: +#ifdef JIT_ENABLED + ARMJIT::CheckAndInvalidate<0, ARMJIT_Memory::memregion_VRAM>(addr); +#endif switch (addr & 0x00E00000) { case 0x00000000: GPU::WriteVRAM_ABG(addr, val); return; case 0x00200000: GPU::WriteVRAM_BBG(addr, val); return; case 0x00400000: GPU::WriteVRAM_AOBJ(addr, val); return; case 0x00600000: GPU::WriteVRAM_BOBJ(addr, val); return; - default: -#ifdef JIT_ENABLED - ARMJIT::InvalidateLCDCIfNecessary(addr); -#endif - GPU::WriteVRAM_LCDC(addr, val); - return; + default: GPU::WriteVRAM_LCDC(addr, val); return; } case 0x07000000: @@ -2250,10 +2248,10 @@ bool ARM9GetMemRegion(u32 addr, bool write, MemRegion* region) return true; case 0x03000000: - if (SWRAM_ARM9) + if (SWRAM_ARM9.Mem) { - region->Mem = SWRAM_ARM9; - region->Mask = SWRAM_ARM9Mask; + region->Mem = SWRAM_ARM9.Mem; + region->Mask = SWRAM_ARM9.Mask; return true; } break; @@ -2292,17 +2290,17 @@ u8 ARM7Read8(u32 addr) return *(u8*)&MainRAM[addr & MainRAMMask]; case 0x03000000: - if (SWRAM_ARM7) + if (SWRAM_ARM7.Mem) { - return *(u8*)&SWRAM_ARM7[addr & SWRAM_ARM7Mask]; + return *(u8*)&SWRAM_ARM7.Mem[addr & SWRAM_ARM7.Mask]; } else { - return *(u8*)&ARM7WRAM[addr & 0xFFFF]; + return *(u8*)&ARM7WRAM[addr & (ARM7WRAMSize - 1)]; } case 0x03800000: - return *(u8*)&ARM7WRAM[addr & 0xFFFF]; + return *(u8*)&ARM7WRAM[addr & (ARM7WRAMSize - 1)]; case 0x04000000: return ARM7IORead8(addr); @@ -2352,17 +2350,17 @@ u16 ARM7Read16(u32 addr) return *(u16*)&MainRAM[addr & MainRAMMask]; case 0x03000000: - if (SWRAM_ARM7) + if (SWRAM_ARM7.Mem) { - return *(u16*)&SWRAM_ARM7[addr & SWRAM_ARM7Mask]; + return *(u16*)&SWRAM_ARM7.Mem[addr & SWRAM_ARM7.Mask]; } else { - return *(u16*)&ARM7WRAM[addr & 0xFFFF]; + return *(u16*)&ARM7WRAM[addr & (ARM7WRAMSize - 1)]; } case 0x03800000: - return *(u16*)&ARM7WRAM[addr & 0xFFFF]; + return *(u16*)&ARM7WRAM[addr & (ARM7WRAMSize - 1)]; case 0x04000000: return ARM7IORead16(addr); @@ -2419,17 +2417,17 @@ u32 ARM7Read32(u32 addr) return *(u32*)&MainRAM[addr & MainRAMMask]; case 0x03000000: - if (SWRAM_ARM7) + if (SWRAM_ARM7.Mem) { - return *(u32*)&SWRAM_ARM7[addr & SWRAM_ARM7Mask]; + return *(u32*)&SWRAM_ARM7.Mem[addr & SWRAM_ARM7.Mask]; } else { - return *(u32*)&ARM7WRAM[addr & 0xFFFF]; + return *(u32*)&ARM7WRAM[addr & (ARM7WRAMSize - 1)]; } case 0x03800000: - return *(u32*)&ARM7WRAM[addr & 0xFFFF]; + return *(u32*)&ARM7WRAM[addr & (ARM7WRAMSize - 1)]; case 0x04000000: return ARM7IORead32(addr); @@ -2474,7 +2472,7 @@ void ARM7Write8(u32 addr, u8 val) case 0x02000000: case 0x02800000: #ifdef JIT_ENABLED - ARMJIT::InvalidateMainRAMIfNecessary(addr); + ARMJIT::CheckAndInvalidate<1, ARMJIT_Memory::memregion_MainRAM>(addr); #endif *(u8*)&MainRAM[addr & MainRAMMask] = val; #ifdef JIT_ENABLED @@ -2483,28 +2481,28 @@ void ARM7Write8(u32 addr, u8 val) return; case 0x03000000: - if (SWRAM_ARM7) + if (SWRAM_ARM7.Mem) { #ifdef JIT_ENABLED - ARMJIT::InvalidateSWRAM7IfNecessary(addr); + ARMJIT::CheckAndInvalidate<1, ARMJIT_Memory::memregion_SWRAM>(addr); #endif - *(u8*)&SWRAM_ARM7[addr & SWRAM_ARM7Mask] = val; + *(u8*)&SWRAM_ARM7.Mem[addr & SWRAM_ARM7.Mask] = val; return; } else { #ifdef JIT_ENABLED - ARMJIT::InvalidateARM7WRAMIfNecessary(addr); + ARMJIT::CheckAndInvalidate<1, ARMJIT_Memory::memregion_WRAM7>(addr); #endif - *(u8*)&ARM7WRAM[addr & 0xFFFF] = val; + *(u8*)&ARM7WRAM[addr & (ARM7WRAMSize - 1)] = val; return; } case 0x03800000: #ifdef JIT_ENABLED - ARMJIT::InvalidateARM7WRAMIfNecessary(addr); + ARMJIT::CheckAndInvalidate<1, ARMJIT_Memory::memregion_WRAM7>(addr); #endif - *(u8*)&ARM7WRAM[addr & 0xFFFF] = val; + *(u8*)&ARM7WRAM[addr & (ARM7WRAMSize - 1)] = val; return; case 0x04000000: @@ -2514,7 +2512,7 @@ void ARM7Write8(u32 addr, u8 val) case 0x06000000: case 0x06800000: #ifdef JIT_ENABLED - ARMJIT::InvalidateARM7WVRAMIfNecessary(addr); + ARMJIT::CheckAndInvalidate<1, ARMJIT_Memory::memregion_VWRAM>(addr); #endif GPU::WriteVRAM_ARM7(addr, val); return; @@ -2551,7 +2549,7 @@ void ARM7Write16(u32 addr, u16 val) case 0x02000000: case 0x02800000: #ifdef JIT_ENABLED - ARMJIT::InvalidateMainRAMIfNecessary(addr); + ARMJIT::CheckAndInvalidate<1, ARMJIT_Memory::memregion_MainRAM>(addr); #endif *(u16*)&MainRAM[addr & MainRAMMask] = val; #ifdef JIT_ENABLED @@ -2560,28 +2558,28 @@ void ARM7Write16(u32 addr, u16 val) return; case 0x03000000: - if (SWRAM_ARM7) + if (SWRAM_ARM7.Mem) { #ifdef JIT_ENABLED - ARMJIT::InvalidateSWRAM7IfNecessary(addr); + ARMJIT::CheckAndInvalidate<1, ARMJIT_Memory::memregion_SWRAM>(addr); #endif - *(u16*)&SWRAM_ARM7[addr & SWRAM_ARM7Mask] = val; + *(u16*)&SWRAM_ARM7.Mem[addr & SWRAM_ARM7.Mask] = val; return; } else { #ifdef JIT_ENABLED - ARMJIT::InvalidateARM7WRAMIfNecessary(addr); + ARMJIT::CheckAndInvalidate<1, ARMJIT_Memory::memregion_WRAM7>(addr); #endif - *(u16*)&ARM7WRAM[addr & 0xFFFF] = val; + *(u16*)&ARM7WRAM[addr & (ARM7WRAMSize - 1)] = val; return; } case 0x03800000: #ifdef JIT_ENABLED - ARMJIT::InvalidateARM7WRAMIfNecessary(addr); + ARMJIT::CheckAndInvalidate<1, ARMJIT_Memory::memregion_WRAM7>(addr); #endif - *(u16*)&ARM7WRAM[addr & 0xFFFF] = val; + *(u16*)&ARM7WRAM[addr & (ARM7WRAMSize - 1)] = val; return; case 0x04000000: @@ -2599,7 +2597,7 @@ void ARM7Write16(u32 addr, u16 val) case 0x06000000: case 0x06800000: #ifdef JIT_ENABLED - ARMJIT::InvalidateARM7WVRAMIfNecessary(addr); + ARMJIT::CheckAndInvalidate<1, ARMJIT_Memory::memregion_VWRAM>(addr); #endif GPU::WriteVRAM_ARM7(addr, val); return; @@ -2638,7 +2636,7 @@ void ARM7Write32(u32 addr, u32 val) case 0x02000000: case 0x02800000: #ifdef JIT_ENABLED - ARMJIT::InvalidateMainRAMIfNecessary(addr); + ARMJIT::CheckAndInvalidate<1, ARMJIT_Memory::memregion_MainRAM>(addr); #endif *(u32*)&MainRAM[addr & MainRAMMask] = val; #ifdef JIT_ENABLED @@ -2647,28 +2645,28 @@ void ARM7Write32(u32 addr, u32 val) return; case 0x03000000: - if (SWRAM_ARM7) + if (SWRAM_ARM7.Mem) { #ifdef JIT_ENABLED - ARMJIT::InvalidateSWRAM7IfNecessary(addr); + ARMJIT::CheckAndInvalidate<1, ARMJIT_Memory::memregion_SWRAM>(addr); #endif - *(u32*)&SWRAM_ARM7[addr & SWRAM_ARM7Mask] = val; + *(u32*)&SWRAM_ARM7.Mem[addr & SWRAM_ARM7.Mask] = val; return; } else { #ifdef JIT_ENABLED - ARMJIT::InvalidateARM7WRAMIfNecessary(addr); + ARMJIT::CheckAndInvalidate<1, ARMJIT_Memory::memregion_WRAM7>(addr); #endif - *(u32*)&ARM7WRAM[addr & 0xFFFF] = val; + *(u32*)&ARM7WRAM[addr & (ARM7WRAMSize - 1)] = val; return; } case 0x03800000: #ifdef JIT_ENABLED - ARMJIT::InvalidateARM7WRAMIfNecessary(addr); + ARMJIT::CheckAndInvalidate<1, ARMJIT_Memory::memregion_WRAM7>(addr); #endif - *(u32*)&ARM7WRAM[addr & 0xFFFF] = val; + *(u32*)&ARM7WRAM[addr & (ARM7WRAMSize - 1)] = val; return; case 0x04000000: @@ -2687,7 +2685,7 @@ void ARM7Write32(u32 addr, u32 val) case 0x06000000: case 0x06800000: #ifdef JIT_ENABLED - ARMJIT::InvalidateARM7WVRAMIfNecessary(addr); + ARMJIT::CheckAndInvalidate<1, ARMJIT_Memory::memregion_VWRAM>(addr); #endif GPU::WriteVRAM_ARM7(addr, val); return; @@ -2736,17 +2734,17 @@ bool ARM7GetMemRegion(u32 addr, bool write, MemRegion* region) // then access all the WRAM as one contiguous block starting at 0x037F8000 // this case needs a bit of a hack to cover // it's not really worth bothering anyway - if (!SWRAM_ARM7) + if (!SWRAM_ARM7.Mem) { region->Mem = ARM7WRAM; - region->Mask = 0xFFFF; + region->Mask = ARM7WRAMSize-1; return true; } break; case 0x03800000: region->Mem = ARM7WRAM; - region->Mask = 0xFFFF; + region->Mask = ARM7WRAMSize-1; return true; } diff --git a/src/NDS.h b/src/NDS.h index e9b56da..4b4f9a1 100644 --- a/src/NDS.h +++ b/src/NDS.h @@ -134,6 +134,7 @@ typedef struct } MemRegion; extern int ConsoleType; +extern int CurCPU; extern u8 ARM9MemTimings[0x40000][4]; extern u8 ARM7MemTimings[0x20000][4]; @@ -161,20 +162,20 @@ extern u8 ARM9BIOS[0x1000]; extern u8 ARM7BIOS[0x4000]; extern u16 ARM7BIOSProt; -extern u8 MainRAM[0x1000000]; +extern u8* MainRAM; extern u32 MainRAMMask; -extern u8 SharedWRAM[0x8000]; -extern u8* SWRAM_ARM9; -extern u8* SWRAM_ARM7; -extern u32 SWRAM_ARM9Mask; -extern u32 SWRAM_ARM7Mask; - -extern u8 ARM7WRAM[0x10000]; +const u32 SharedWRAMSize = 0x8000; +extern u8* SharedWRAM; +extern MemRegion SWRAM_ARM9; +extern MemRegion SWRAM_ARM7; extern u32 KeyInput; +const u32 ARM7WRAMSize = 0x10000; +extern u8* ARM7WRAM; + bool Init(); void DeInit(); void Reset(); -- cgit v1.2.3