Search
lxdream.org :: lxdream/src/sh4/ia32mac.h :: diff
lxdream 0.9.1
released Jun 29
Download Now
filename src/sh4/ia32mac.h
changeset 586:2a3ba82cf243
prev539:75f3e594d4a7
next590:4db6a084ca3c
author nkeynes
date Tue Jan 15 20:50:23 2008 +0000 (14 years ago)
permissions -rw-r--r--
last change Merged lxdream-mmu r570:596 to trunk
file annotate diff log raw
1.1 --- a/src/sh4/ia32mac.h Wed Nov 21 11:40:15 2007 +0000
1.2 +++ b/src/sh4/ia32mac.h Tue Jan 15 20:50:23 2008 +0000
1.3 @@ -1,5 +1,5 @@
1.4 /**
1.5 - * $Id: sh4x86.in,v 1.20 2007-11-08 11:54:16 nkeynes Exp $
1.6 + * $Id$
1.7 *
1.8 * Provides the implementation for the ia32 ABI (eg prologue, epilogue, and
1.9 * calling conventions)
1.10 @@ -101,11 +101,11 @@
1.11 PUSH_r32(addr);
1.12 load_imm32(R_EAX, (uint32_t)sh4_read_long);
1.13 CALL_r32(R_EAX);
1.14 - POP_r32(addr);
1.15 + POP_r32(R_ECX);
1.16 SUB_imm8s_r32( adj2-adj, R_ESP );
1.17 PUSH_r32(R_EAX);
1.18 - ADD_imm8s_r32( 4, addr );
1.19 - PUSH_r32(addr);
1.20 + ADD_imm8s_r32( 4, R_ECX );
1.21 + PUSH_r32(R_ECX);
1.22 load_imm32(R_EAX, (uint32_t)sh4_read_long);
1.23 CALL_r32(R_EAX);
1.24 ADD_imm8s_r32( 4, R_ESP );
1.25 @@ -115,9 +115,6 @@
1.26 sh4_x86.stack_posn -= 4;
1.27 }
1.28
1.29 -#define EXIT_BLOCK_SIZE 29
1.30 -
1.31 -
1.32 /**
1.33 * Emit the 'start of block' assembly. Sets up the stack frame and save
1.34 * SI/DI as required
1.35 @@ -133,8 +130,10 @@
1.36 sh4_x86.fpuen_checked = FALSE;
1.37 sh4_x86.branch_taken = FALSE;
1.38 sh4_x86.backpatch_posn = 0;
1.39 + sh4_x86.recovery_posn = 0;
1.40 sh4_x86.block_start_pc = pc;
1.41 sh4_x86.tstate = TSTATE_NONE;
1.42 + sh4_x86.tlb_on = IS_MMU_ENABLED();
1.43 sh4_x86.stack_posn = 8;
1.44 }
1.45
1.46 @@ -142,16 +141,23 @@
1.47 * Exit the block with sh4r.pc already written
1.48 * Bytes: 15
1.49 */
1.50 -void exit_block_pcset( pc )
1.51 +void exit_block_pcset( sh4addr_t pc )
1.52 {
1.53 load_imm32( R_ECX, ((pc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
1.54 ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) ); // 6
1.55 load_spreg( R_EAX, REG_OFFSET(pc) );
1.56 - call_func1(xlat_get_code,R_EAX);
1.57 + if( sh4_x86.tlb_on ) {
1.58 + call_func1(xlat_get_code_by_vma,R_EAX);
1.59 + } else {
1.60 + call_func1(xlat_get_code,R_EAX);
1.61 + }
1.62 POP_r32(R_EBP);
1.63 RET();
1.64 }
1.65
1.66 +#define EXIT_BLOCK_SIZE(pc) (24 + (IS_IN_ICACHE(pc)?5:CALL_FUNC1_SIZE))
1.67 +
1.68 +
1.69 /**
1.70 * Exit the block to an absolute PC
1.71 */
1.72 @@ -159,7 +165,37 @@
1.73 {
1.74 load_imm32( R_ECX, pc ); // 5
1.75 store_spreg( R_ECX, REG_OFFSET(pc) ); // 3
1.76 - MOV_moff32_EAX( xlat_get_lut_entry(pc) ); // 5
1.77 + if( IS_IN_ICACHE(pc) ) {
1.78 + MOV_moff32_EAX( xlat_get_lut_entry(GET_ICACHE_PHYS(pc)) ); // 5
1.79 + } else if( sh4_x86.tlb_on ) {
1.80 + call_func1(xlat_get_code_by_vma,R_ECX);
1.81 + } else {
1.82 + call_func1(xlat_get_code,R_ECX);
1.83 + }
1.84 + AND_imm8s_r32( 0xFC, R_EAX ); // 3
1.85 + load_imm32( R_ECX, ((endpc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
1.86 + ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) ); // 6
1.87 + POP_r32(R_EBP);
1.88 + RET();
1.89 +}
1.90 +
1.91 +#define EXIT_BLOCK_REL_SIZE(pc) (27 + (IS_IN_ICACHE(pc)?5:CALL_FUNC1_SIZE))
1.92 +
1.93 +/**
1.94 + * Exit the block to a relative PC
1.95 + */
1.96 +void exit_block_rel( sh4addr_t pc, sh4addr_t endpc )
1.97 +{
1.98 + load_imm32( R_ECX, pc - sh4_x86.block_start_pc ); // 5
1.99 + ADD_sh4r_r32( R_PC, R_ECX );
1.100 + store_spreg( R_ECX, REG_OFFSET(pc) ); // 3
1.101 + if( IS_IN_ICACHE(pc) ) {
1.102 + MOV_moff32_EAX( xlat_get_lut_entry(GET_ICACHE_PHYS(pc)) ); // 5
1.103 + } else if( sh4_x86.tlb_on ) {
1.104 + call_func1(xlat_get_code_by_vma,R_ECX);
1.105 + } else {
1.106 + call_func1(xlat_get_code,R_ECX);
1.107 + }
1.108 AND_imm8s_r32( 0xFC, R_EAX ); // 3
1.109 load_imm32( R_ECX, ((endpc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
1.110 ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) ); // 6
1.111 @@ -173,48 +209,88 @@
1.112 void sh4_translate_end_block( sh4addr_t pc ) {
1.113 if( sh4_x86.branch_taken == FALSE ) {
1.114 // Didn't exit unconditionally already, so write the termination here
1.115 - exit_block( pc, pc );
1.116 + exit_block_rel( pc, pc );
1.117 }
1.118 if( sh4_x86.backpatch_posn != 0 ) {
1.119 + unsigned int i;
1.120 + // Raise exception
1.121 uint8_t *end_ptr = xlat_output;
1.122 - // Exception termination. Jump block for various exception codes:
1.123 - PUSH_imm32( EXC_DATA_ADDR_READ );
1.124 - JMP_rel8( 33, target1 );
1.125 - PUSH_imm32( EXC_DATA_ADDR_WRITE );
1.126 - JMP_rel8( 26, target2 );
1.127 - PUSH_imm32( EXC_ILLEGAL );
1.128 - JMP_rel8( 19, target3 );
1.129 - PUSH_imm32( EXC_SLOT_ILLEGAL );
1.130 - JMP_rel8( 12, target4 );
1.131 - PUSH_imm32( EXC_FPU_DISABLED );
1.132 - JMP_rel8( 5, target5 );
1.133 - PUSH_imm32( EXC_SLOT_FPU_DISABLED );
1.134 - // target
1.135 - JMP_TARGET(target1);
1.136 - JMP_TARGET(target2);
1.137 - JMP_TARGET(target3);
1.138 - JMP_TARGET(target4);
1.139 - JMP_TARGET(target5);
1.140 - // Raise exception
1.141 - load_spreg( R_ECX, REG_OFFSET(pc) );
1.142 + MOV_r32_r32( R_EDX, R_ECX );
1.143 ADD_r32_r32( R_EDX, R_ECX );
1.144 - ADD_r32_r32( R_EDX, R_ECX );
1.145 - store_spreg( R_ECX, REG_OFFSET(pc) );
1.146 + ADD_r32_sh4r( R_ECX, R_PC );
1.147 MOV_moff32_EAX( &sh4_cpu_period );
1.148 MUL_r32( R_EDX );
1.149 ADD_r32_sh4r( R_EAX, REG_OFFSET(slice_cycle) );
1.150
1.151 - POP_r32(R_EDX);
1.152 - call_func1( sh4_raise_exception, R_EDX );
1.153 - load_spreg( R_EAX, REG_OFFSET(pc) );
1.154 - call_func1(xlat_get_code,R_EAX);
1.155 + POP_r32(R_EDX);
1.156 + call_func1( sh4_raise_exception, R_EDX );
1.157 + load_spreg( R_EAX, R_PC );
1.158 + if( sh4_x86.tlb_on ) {
1.159 + call_func1(xlat_get_code_by_vma,R_EAX);
1.160 + } else {
1.161 + call_func1(xlat_get_code,R_EAX);
1.162 + }
1.163 POP_r32(R_EBP);
1.164 RET();
1.165
1.166 - sh4_x86_do_backpatch( end_ptr );
1.167 + // Exception already raised - just cleanup
1.168 + uint8_t *preexc_ptr = xlat_output;
1.169 + MOV_r32_r32( R_EDX, R_ECX );
1.170 + ADD_r32_r32( R_EDX, R_ECX );
1.171 + ADD_r32_sh4r( R_ECX, R_SPC );
1.172 + MOV_moff32_EAX( &sh4_cpu_period );
1.173 + MUL_r32( R_EDX );
1.174 + ADD_r32_sh4r( R_EAX, REG_OFFSET(slice_cycle) );
1.175 + load_spreg( R_EAX, R_PC );
1.176 + if( sh4_x86.tlb_on ) {
1.177 + call_func1(xlat_get_code_by_vma,R_EAX);
1.178 + } else {
1.179 + call_func1(xlat_get_code,R_EAX);
1.180 + }
1.181 + POP_r32(R_EBP);
1.182 + RET();
1.183 +
1.184 + for( i=0; i< sh4_x86.backpatch_posn; i++ ) {
1.185 + *sh4_x86.backpatch_list[i].fixup_addr =
1.186 + xlat_output - ((uint8_t *)sh4_x86.backpatch_list[i].fixup_addr) - 4;
1.187 + if( sh4_x86.backpatch_list[i].exc_code == -1 ) {
1.188 + load_imm32( R_EDX, sh4_x86.backpatch_list[i].fixup_icount );
1.189 + int rel = preexc_ptr - xlat_output;
1.190 + JMP_rel(rel);
1.191 + } else {
1.192 + PUSH_imm32( sh4_x86.backpatch_list[i].exc_code );
1.193 + load_imm32( R_EDX, sh4_x86.backpatch_list[i].fixup_icount );
1.194 + int rel = end_ptr - xlat_output;
1.195 + JMP_rel(rel);
1.196 + }
1.197 + }
1.198 }
1.199 }
1.200
1.201 +void *xlat_get_native_pc()
1.202 +{
1.203 + void *result = NULL;
1.204 + asm(
1.205 + "mov %%ebp, %%eax\n\t"
1.206 + "mov $0x8, %%ecx\n\t"
1.207 + "mov %1, %%edx\n"
1.208 +"frame_loop: test %%eax, %%eax\n\t"
1.209 + "je frame_not_found\n\t"
1.210 + "cmp (%%eax), %%edx\n\t"
1.211 + "je frame_found\n\t"
1.212 + "sub $0x1, %%ecx\n\t"
1.213 + "je frame_not_found\n\t"
1.214 + "movl (%%eax), %%eax\n\t"
1.215 + "jmp frame_loop\n"
1.216 +"frame_found: movl 0x4(%%eax), %0\n"
1.217 +"frame_not_found:"
1.218 + : "=r" (result)
1.219 + : "r" (&sh4r)
1.220 + : "eax", "ecx", "edx" );
1.221 + return result;
1.222 +}
1.223 +
1.224 +
1.225 #endif
1.226
1.227
.