--- a/src/sh4/ia32mac.h Mon May 12 10:00:13 2008 +0000 +++ b/src/sh4/ia32mac.h Mon Aug 04 06:00:11 2008 +0000 @@ -1,8 +1,9 @@ /** * $Id$ * - * Provides the implementation for the ia32 ABI (eg prologue, epilogue, and - * calling conventions) + * Provides the implementation for the ia32 Mac OS X ABI variant + * (eg prologue, epilogue, and calling conventions). Main difference + * from ia32abi is that stack frames are aligned on 16-byte boundaries. * * Copyright (c) 2007 Nathan Keynes. * @@ -17,8 +18,8 @@ * GNU General Public License for more details. */ -#ifndef __lxdream_ia32abi_H -#define __lxdream_ia32abi_H 1 +#ifndef lxdream_ia32mac_H +#define lxdream_ia32mac_H 1 #define load_ptr( reg, ptr ) load_imm32( reg, (uint32_t)ptr ); @@ -124,7 +125,7 @@ PUSH_r32(R_EBP); /* mov &sh4r, ebp */ load_ptr( R_EBP, ((uint8_t *)&sh4r) + 128 ); - + sh4_x86.in_delay_slot = FALSE; sh4_x86.priv_checked = FALSE; sh4_x86.fpuen_checked = FALSE; @@ -145,9 +146,9 @@ ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) ); // 6 load_spreg( R_EAX, R_PC ); if( sh4_x86.tlb_on ) { - call_func1(xlat_get_code_by_vma,R_EAX); + call_func1(xlat_get_code_by_vma,R_EAX); } else { - call_func1(xlat_get_code,R_EAX); + call_func1(xlat_get_code,R_EAX); } POP_r32(R_EBP); RET(); @@ -163,9 +164,9 @@ load_spreg( R_EAX, R_NEW_PC ); store_spreg( R_EAX, R_PC ); if( sh4_x86.tlb_on ) { - call_func1(xlat_get_code_by_vma,R_EAX); + call_func1(xlat_get_code_by_vma,R_EAX); } else { - call_func1(xlat_get_code,R_EAX); + call_func1(xlat_get_code,R_EAX); } POP_r32(R_EBP); RET(); @@ -183,11 +184,11 @@ load_imm32( R_ECX, pc ); // 5 store_spreg( R_ECX, REG_OFFSET(pc) ); // 3 if( IS_IN_ICACHE(pc) ) { - MOV_moff32_EAX( xlat_get_lut_entry(GET_ICACHE_PHYS(pc)) ); // 5 + MOV_moff32_EAX( xlat_get_lut_entry(GET_ICACHE_PHYS(pc)) ); // 5 } else if( sh4_x86.tlb_on ) { - call_func1(xlat_get_code_by_vma,R_ECX); + call_func1(xlat_get_code_by_vma,R_ECX); } else { - call_func1(xlat_get_code,R_ECX); + call_func1(xlat_get_code,R_ECX); } AND_imm8s_r32( 0xFC, R_EAX ); // 3 load_imm32( R_ECX, ((endpc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5 @@ -207,11 +208,11 @@ ADD_sh4r_r32( R_PC, R_ECX ); store_spreg( R_ECX, REG_OFFSET(pc) ); // 3 if( IS_IN_ICACHE(pc) ) { - MOV_moff32_EAX( xlat_get_lut_entry(GET_ICACHE_PHYS(pc)) ); // 5 + MOV_moff32_EAX( xlat_get_lut_entry(GET_ICACHE_PHYS(pc)) ); // 5 } else if( sh4_x86.tlb_on ) { - call_func1(xlat_get_code_by_vma,R_ECX); + call_func1(xlat_get_code_by_vma,R_ECX); } else { - call_func1(xlat_get_code,R_ECX); + call_func1(xlat_get_code,R_ECX); } AND_imm8s_r32( 0xFC, R_EAX ); // 3 load_imm32( R_ECX, ((endpc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5 @@ -225,66 +226,66 @@ */ void sh4_translate_end_block( sh4addr_t pc ) { if( sh4_x86.branch_taken == FALSE ) { - // Didn't exit unconditionally already, so write the termination here - exit_block_rel( pc, pc ); + // Didn't exit unconditionally already, so write the termination here + exit_block_rel( pc, pc ); } if( sh4_x86.backpatch_posn != 0 ) { - unsigned int i; - // Raise exception - uint8_t *end_ptr = xlat_output; - MOV_r32_r32( R_EDX, R_ECX ); - ADD_r32_r32( R_EDX, R_ECX ); - ADD_r32_sh4r( R_ECX, R_PC ); - MOV_moff32_EAX( &sh4_cpu_period ); - MUL_r32( R_EDX ); - ADD_r32_sh4r( R_EAX, REG_OFFSET(slice_cycle) ); + unsigned int i; + // Raise exception + uint8_t *end_ptr = xlat_output; + MOV_r32_r32( R_EDX, R_ECX ); + ADD_r32_r32( R_EDX, R_ECX ); + ADD_r32_sh4r( R_ECX, R_PC ); + MOV_moff32_EAX( &sh4_cpu_period ); + MUL_r32( R_EDX ); + ADD_r32_sh4r( R_EAX, REG_OFFSET(slice_cycle) ); POP_r32(R_EDX); call_func1( sh4_raise_exception, R_EDX ); - load_spreg( R_EAX, R_PC ); - if( sh4_x86.tlb_on ) { - call_func1(xlat_get_code_by_vma,R_EAX); - } else { - call_func1(xlat_get_code,R_EAX); - } - POP_r32(R_EBP); - RET(); + load_spreg( R_EAX, R_PC ); + if( sh4_x86.tlb_on ) { + call_func1(xlat_get_code_by_vma,R_EAX); + } else { + call_func1(xlat_get_code,R_EAX); + } + POP_r32(R_EBP); + RET(); - // Exception already raised - just cleanup - uint8_t *preexc_ptr = xlat_output; - MOV_r32_r32( R_EDX, R_ECX ); - ADD_r32_r32( R_EDX, R_ECX ); - ADD_r32_sh4r( R_ECX, R_SPC ); - MOV_moff32_EAX( &sh4_cpu_period ); - MUL_r32( R_EDX ); - ADD_r32_sh4r( R_EAX, REG_OFFSET(slice_cycle) ); - load_spreg( R_EAX, R_PC ); - if( sh4_x86.tlb_on ) { - call_func1(xlat_get_code_by_vma,R_EAX); - } else { - call_func1(xlat_get_code,R_EAX); - } - POP_r32(R_EBP); - RET(); + // Exception already raised - just cleanup + uint8_t *preexc_ptr = xlat_output; + MOV_r32_r32( R_EDX, R_ECX ); + ADD_r32_r32( R_EDX, R_ECX ); + ADD_r32_sh4r( R_ECX, R_SPC ); + MOV_moff32_EAX( &sh4_cpu_period ); + MUL_r32( R_EDX ); + ADD_r32_sh4r( R_EAX, REG_OFFSET(slice_cycle) ); + load_spreg( R_EAX, R_PC ); + if( sh4_x86.tlb_on ) { + call_func1(xlat_get_code_by_vma,R_EAX); + } else { + call_func1(xlat_get_code,R_EAX); + } + POP_r32(R_EBP); + RET(); - for( i=0; i< sh4_x86.backpatch_posn; i++ ) { - uint32_t *fixup_addr = (uint32_t *)&xlat_current_block->code[sh4_x86.backpatch_list[i].fixup_offset]; - *fixup_addr = xlat_output - (uint8_t *)&xlat_current_block->code[sh4_x86.backpatch_list[i].fixup_offset] - 4; - if( sh4_x86.backpatch_list[i].exc_code < 0 ) { - load_imm32( R_EDX, sh4_x86.backpatch_list[i].fixup_icount ); - int stack_adj = -1 - sh4_x86.backpatch_list[i].exc_code; - if( stack_adj > 0 ) { - ADD_imm8s_r32( stack_adj, R_ESP ); - } - int rel = preexc_ptr - xlat_output; - JMP_rel(rel); - } else { - PUSH_imm32( sh4_x86.backpatch_list[i].exc_code ); - load_imm32( R_EDX, sh4_x86.backpatch_list[i].fixup_icount ); - int rel = end_ptr - xlat_output; - JMP_rel(rel); - } - } + for( i=0; i< sh4_x86.backpatch_posn; i++ ) { + uint32_t *fixup_addr = (uint32_t *)&xlat_current_block->code[sh4_x86.backpatch_list[i].fixup_offset]; + *fixup_addr = xlat_output - (uint8_t *)&xlat_current_block->code[sh4_x86.backpatch_list[i].fixup_offset] - 4; + if( sh4_x86.backpatch_list[i].exc_code < 0 ) { + load_imm32( R_EDX, sh4_x86.backpatch_list[i].fixup_icount ); + int stack_adj = -1 - sh4_x86.backpatch_list[i].exc_code; + if( stack_adj > 0 ) { + ADD_imm8s_r32( stack_adj, R_ESP ); + } + int rel = preexc_ptr - xlat_output; + JMP_rel(rel); + } else { + PUSH_imm32( sh4_x86.backpatch_list[i].exc_code ); + load_imm32( R_EDX, sh4_x86.backpatch_list[i].fixup_icount ); + int rel = end_ptr - xlat_output; + JMP_rel(rel); + } + } } } @@ -292,26 +293,26 @@ { void *result = NULL; asm( - "mov %%ebp, %%eax\n\t" - "mov $0x8, %%ecx\n\t" - "mov %1, %%edx\n" -"frame_loop: test %%eax, %%eax\n\t" - "je frame_not_found\n\t" - "cmp (%%eax), %%edx\n\t" - "je frame_found\n\t" - "sub $0x1, %%ecx\n\t" - "je frame_not_found\n\t" - "movl (%%eax), %%eax\n\t" - "jmp frame_loop\n" -"frame_found: movl 0x4(%%eax), %0\n" -"frame_not_found:" - : "=r" (result) - : "r" (((uint8_t *)&sh4r) + 128 ) - : "eax", "ecx", "edx" ); + "mov %%ebp, %%eax\n\t" + "mov $0x8, %%ecx\n\t" + "mov %1, %%edx\n" + "frame_loop: test %%eax, %%eax\n\t" + "je frame_not_found\n\t" + "cmp (%%eax), %%edx\n\t" + "je frame_found\n\t" + "sub $0x1, %%ecx\n\t" + "je frame_not_found\n\t" + "movl (%%eax), %%eax\n\t" + "jmp frame_loop\n" + "frame_found: movl 0x4(%%eax), %0\n" + "frame_not_found:" + : "=r" (result) + : "r" (((uint8_t *)&sh4r) + 128 ) + : "eax", "ecx", "edx" ); return result; } -#endif +#endif /* !lxdream_ia32mac.h */