1.1 --- a/src/sh4/ia32mac.h Sun Jan 06 12:24:18 2008 +0000
1.2 +++ b/src/sh4/ia32mac.h Thu Jan 10 08:28:37 2008 +0000
1.3 @@ -101,11 +101,11 @@
1.5 load_imm32(R_EAX, (uint32_t)sh4_read_long);
1.9 SUB_imm8s_r32( adj2-adj, R_ESP );
1.11 - ADD_imm8s_r32( 4, addr );
1.13 + ADD_imm8s_r32( 4, R_ECX );
1.15 load_imm32(R_EAX, (uint32_t)sh4_read_long);
1.17 ADD_imm8s_r32( 4, R_ESP );
1.19 sh4_x86.stack_posn -= 4;
1.22 -#define EXIT_BLOCK_SIZE 29
1.26 * Emit the 'start of block' assembly. Sets up the stack frame and save
1.28 @@ -133,9 +130,10 @@
1.29 sh4_x86.fpuen_checked = FALSE;
1.30 sh4_x86.branch_taken = FALSE;
1.31 sh4_x86.backpatch_posn = 0;
1.32 + sh4_x86.recovery_posn = 0;
1.33 sh4_x86.block_start_pc = pc;
1.34 sh4_x86.tstate = TSTATE_NONE;
1.35 - sh4_x86.tlb_on = MMIO_READ(MMU,MMUCR)&MMUCR_AT;
1.36 + sh4_x86.tlb_on = IS_MMU_ENABLED();
1.37 sh4_x86.stack_posn = 8;
1.44 +#define EXIT_BLOCK_SIZE(pc) (24 + (IS_IN_ICACHE(pc)?5:CALL_FUNC1_SIZE))
1.48 * Exit the block to an absolute PC
1.50 @@ -164,7 +165,37 @@
1.52 load_imm32( R_ECX, pc ); // 5
1.53 store_spreg( R_ECX, REG_OFFSET(pc) ); // 3
1.54 - MOV_moff32_EAX( xlat_get_lut_entry(pc) ); // 5
1.55 + if( IS_IN_ICACHE(pc) ) {
1.56 + MOV_moff32_EAX( xlat_get_lut_entry(GET_ICACHE_PHYS(pc)) ); // 5
1.57 + } else if( sh4_x86.tlb_on ) {
1.58 + call_func1(xlat_get_code_by_vma,R_ECX);
1.60 + call_func1(xlat_get_code,R_ECX);
1.62 + AND_imm8s_r32( 0xFC, R_EAX ); // 3
1.63 + load_imm32( R_ECX, ((endpc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
1.64 + ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) ); // 6
1.69 +#define EXIT_BLOCK_REL_SIZE(pc) (27 + (IS_IN_ICACHE(pc)?5:CALL_FUNC1_SIZE))
1.72 + * Exit the block to a relative PC
1.74 +void exit_block_rel( sh4addr_t pc, sh4addr_t endpc )
1.76 + load_imm32( R_ECX, pc - sh4_x86.block_start_pc ); // 5
1.77 + ADD_sh4r_r32( R_PC, R_ECX );
1.78 + store_spreg( R_ECX, REG_OFFSET(pc) ); // 3
1.79 + if( IS_IN_ICACHE(pc) ) {
1.80 + MOV_moff32_EAX( xlat_get_lut_entry(GET_ICACHE_PHYS(pc)) ); // 5
1.81 + } else if( sh4_x86.tlb_on ) {
1.82 + call_func1(xlat_get_code_by_vma,R_ECX);
1.84 + call_func1(xlat_get_code,R_ECX);
1.86 AND_imm8s_r32( 0xFC, R_EAX ); // 3
1.87 load_imm32( R_ECX, ((endpc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
1.88 ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) ); // 6
1.89 @@ -178,23 +209,22 @@
1.90 void sh4_translate_end_block( sh4addr_t pc ) {
1.91 if( sh4_x86.branch_taken == FALSE ) {
1.92 // Didn't exit unconditionally already, so write the termination here
1.93 - exit_block( pc, pc );
1.94 + exit_block_rel( pc, pc );
1.96 if( sh4_x86.backpatch_posn != 0 ) {
1.99 uint8_t *end_ptr = xlat_output;
1.100 - load_spreg( R_ECX, REG_OFFSET(pc) );
1.101 + MOV_r32_r32( R_EDX, R_ECX );
1.102 ADD_r32_r32( R_EDX, R_ECX );
1.103 - ADD_r32_r32( R_EDX, R_ECX );
1.104 - store_spreg( R_ECX, REG_OFFSET(pc) );
1.105 + ADD_r32_sh4r( R_ECX, R_PC );
1.106 MOV_moff32_EAX( &sh4_cpu_period );
1.108 ADD_r32_sh4r( R_EAX, REG_OFFSET(slice_cycle) );
1.111 call_func1( sh4_raise_exception, R_EDX );
1.112 - load_spreg( R_EAX, REG_OFFSET(pc) );
1.113 + load_spreg( R_EAX, R_PC );
1.114 if( sh4_x86.tlb_on ) {
1.115 call_func1(xlat_get_code_by_vma,R_EAX);
1.117 @@ -205,14 +235,13 @@
1.119 // Exception already raised - just cleanup
1.120 uint8_t *preexc_ptr = xlat_output;
1.121 - load_imm32( R_ECX, sh4_x86.block_start_pc );
1.122 + MOV_r32_r32( R_EDX, R_ECX );
1.123 ADD_r32_r32( R_EDX, R_ECX );
1.124 - ADD_r32_r32( R_EDX, R_ECX );
1.125 - store_spreg( R_ECX, REG_OFFSET(spc) );
1.126 + ADD_r32_sh4r( R_ECX, R_SPC );
1.127 MOV_moff32_EAX( &sh4_cpu_period );
1.129 ADD_r32_sh4r( R_EAX, REG_OFFSET(slice_cycle) );
1.130 - load_spreg( R_EAX, REG_OFFSET(pc) );
1.131 + load_spreg( R_EAX, R_PC );
1.132 if( sh4_x86.tlb_on ) {
1.133 call_func1(xlat_get_code_by_vma,R_EAX);