--- a/src/sh4/ia32abi.h Sun Jan 06 12:24:18 2008 +0000 +++ b/src/sh4/ia32abi.h Mon Jan 14 09:08:58 2008 +0000 @@ -80,19 +80,16 @@ { PUSH_r32(addr); call_func0(sh4_read_long); - POP_r32(addr); + POP_r32(R_ECX); PUSH_r32(R_EAX); - ADD_imm8s_r32( 4, addr ); - PUSH_r32(addr); + ADD_imm8s_r32( 4, R_ECX ); + PUSH_r32(R_ECX); call_func0(sh4_read_long); ADD_imm8s_r32( 4, R_ESP ); MOV_r32_r32( R_EAX, arg2b ); POP_r32(arg2a); } -#define EXIT_BLOCK_SIZE 29 - - /** * Emit the 'start of block' assembly. Sets up the stack frame and save * SI/DI as required @@ -108,8 +105,9 @@ sh4_x86.fpuen_checked = FALSE; sh4_x86.branch_taken = FALSE; sh4_x86.backpatch_posn = 0; + sh4_x86.recovery_posn = 0; sh4_x86.block_start_pc = pc; - sh4_x86.tlb_on = MMIO_READ(MMU,MMUCR)&MMUCR_AT; + sh4_x86.tlb_on = IS_MMU_ENABLED(); sh4_x86.tstate = TSTATE_NONE; #ifdef STACK_ALIGN sh4_x86.stack_posn = 8; @@ -134,6 +132,9 @@ RET(); } +#define EXIT_BLOCK_SIZE(pc) (24 + (IS_IN_ICACHE(pc)?5:CALL_FUNC1_SIZE)) + + /** * Exit the block to an absolute PC */ @@ -141,7 +142,37 @@ { load_imm32( R_ECX, pc ); // 5 store_spreg( R_ECX, REG_OFFSET(pc) ); // 3 - MOV_moff32_EAX( xlat_get_lut_entry(pc) ); // 5 + if( IS_IN_ICACHE(pc) ) { + MOV_moff32_EAX( xlat_get_lut_entry(GET_ICACHE_PHYS(pc)) ); // 5 + } else if( sh4_x86.tlb_on ) { + call_func1(xlat_get_code_by_vma,R_ECX); + } else { + call_func1(xlat_get_code,R_ECX); + } + AND_imm8s_r32( 0xFC, R_EAX ); // 3 + load_imm32( R_ECX, ((endpc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5 + ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) ); // 6 + POP_r32(R_EBP); + RET(); +} + +#define EXIT_BLOCK_REL_SIZE(pc) (27 + (IS_IN_ICACHE(pc)?5:CALL_FUNC1_SIZE)) + +/** + * Exit the block to a relative PC + */ +void exit_block_rel( sh4addr_t pc, sh4addr_t endpc ) +{ + load_imm32( R_ECX, pc - sh4_x86.block_start_pc ); // 5 + ADD_sh4r_r32( R_PC, R_ECX ); + store_spreg( R_ECX, REG_OFFSET(pc) ); // 3 + if( IS_IN_ICACHE(pc) ) { + MOV_moff32_EAX( xlat_get_lut_entry(GET_ICACHE_PHYS(pc)) ); // 5 + } else if( sh4_x86.tlb_on ) { + call_func1(xlat_get_code_by_vma,R_ECX); + } else { + call_func1(xlat_get_code,R_ECX); + } AND_imm8s_r32( 0xFC, R_EAX ); // 3 load_imm32( R_ECX, ((endpc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5 ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) ); // 6 @@ -155,23 +186,22 @@ void sh4_translate_end_block( sh4addr_t pc ) { if( sh4_x86.branch_taken == FALSE ) { // Didn't exit unconditionally already, so write the termination here - exit_block( pc, pc ); + exit_block_rel( pc, pc ); } if( sh4_x86.backpatch_posn != 0 ) { unsigned int i; // Raise exception uint8_t *end_ptr = xlat_output; - load_spreg( R_ECX, REG_OFFSET(pc) ); + MOV_r32_r32( R_EDX, R_ECX ); ADD_r32_r32( R_EDX, R_ECX ); - ADD_r32_r32( R_EDX, R_ECX ); - store_spreg( R_ECX, REG_OFFSET(pc) ); + ADD_r32_sh4r( R_ECX, R_PC ); MOV_moff32_EAX( &sh4_cpu_period ); MUL_r32( R_EDX ); ADD_r32_sh4r( R_EAX, REG_OFFSET(slice_cycle) ); call_func0( sh4_raise_exception ); ADD_imm8s_r32( 4, R_ESP ); - load_spreg( R_EAX, REG_OFFSET(pc) ); + load_spreg( R_EAX, R_PC ); if( sh4_x86.tlb_on ) { call_func1(xlat_get_code_by_vma,R_EAX); } else { @@ -182,14 +212,13 @@ // Exception already raised - just cleanup uint8_t *preexc_ptr = xlat_output; - load_imm32( R_ECX, sh4_x86.block_start_pc ); + MOV_r32_r32( R_EDX, R_ECX ); ADD_r32_r32( R_EDX, R_ECX ); - ADD_r32_r32( R_EDX, R_ECX ); - store_spreg( R_ECX, REG_OFFSET(spc) ); + ADD_r32_sh4r( R_ECX, R_SPC ); MOV_moff32_EAX( &sh4_cpu_period ); MUL_r32( R_EDX ); ADD_r32_sh4r( R_EAX, REG_OFFSET(slice_cycle) ); - load_spreg( R_EAX, REG_OFFSET(pc) ); + load_spreg( R_EAX, R_PC ); if( sh4_x86.tlb_on ) { call_func1(xlat_get_code_by_vma,R_EAX); } else {