Search
lxdream.org :: lxdream/src/sh4/ia64abi.h :: diff
lxdream 0.9.1
released Jun 29
Download Now
filename src/sh4/ia64abi.h
changeset 926:68f3e0fe02f1
prev908:a00debcf2600
next927:17b6b9e245d8
author nkeynes
date Sun Dec 14 07:50:48 2008 +0000 (11 years ago)
permissions -rw-r--r--
last change Setup a 'proper' stackframe in translated blocks. This doesn't affect performance noticeably,
but does ensure that
a) The stack is aligned correctly on OS X with no extra effort, and
b) We can't mess up the stack and crash that way anymore.
Replace all PUSH/POP instructions (outside of prologue/epilogue) with ESP-rel moves to stack
local variables.
Finally merge ia32mac and ia32abi together, since they're pretty much the same now anyway (and
thereby simplifying maintenance a good deal)
file annotate diff log raw
1.1 --- a/src/sh4/ia64abi.h Thu Oct 30 05:50:21 2008 +0000
1.2 +++ b/src/sh4/ia64abi.h Sun Dec 14 07:50:48 2008 +0000
1.3 @@ -94,8 +94,16 @@
1.4 void enter_block( )
1.5 {
1.6 PUSH_r32(R_EBP);
1.7 - /* mov &sh4r, ebp */
1.8 load_ptr( R_EBP, ((uint8_t *)&sh4r) + 128 );
1.9 + // Minimum aligned allocation is 16 bytes
1.10 + REXW(); SUB_imm8s_r32( 16, R_ESP );
1.11 +}
1.12 +
1.13 +static inline void exit_block( )
1.14 +{
1.15 + REXW(); ADD_imm8s_r32( 16, R_ESP );
1.16 + POP_r32(R_EBP);
1.17 + RET();
1.18 }
1.19
1.20 /**
1.21 @@ -111,8 +119,7 @@
1.22 } else {
1.23 call_func1(xlat_get_code,R_EAX);
1.24 }
1.25 - POP_r32(R_EBP);
1.26 - RET();
1.27 + exit_block();
1.28 }
1.29
1.30 /**
1.31 @@ -129,30 +136,28 @@
1.32 } else {
1.33 call_func1(xlat_get_code,R_EAX);
1.34 }
1.35 - POP_r32(R_EBP);
1.36 - RET();
1.37 + exit_block();
1.38 }
1.39
1.40 #define EXIT_BLOCK_SIZE(pc) (25 + (IS_IN_ICACHE(pc)?10:CALL_FUNC1_SIZE))
1.41 /**
1.42 * Exit the block to an absolute PC
1.43 */
1.44 -void exit_block( sh4addr_t pc, sh4addr_t endpc )
1.45 +void exit_block_abs( sh4addr_t pc, sh4addr_t endpc )
1.46 {
1.47 load_imm32( R_ECX, pc ); // 5
1.48 store_spreg( R_ECX, REG_OFFSET(pc) ); // 3
1.49 if( IS_IN_ICACHE(pc) ) {
1.50 REXW(); MOV_moff32_EAX( xlat_get_lut_entry(pc) );
1.51 + REXW(); AND_imm8s_r32( 0xFC, R_EAX ); // 4
1.52 } else if( sh4_x86.tlb_on ) {
1.53 call_func1(xlat_get_code_by_vma, R_ECX);
1.54 } else {
1.55 call_func1(xlat_get_code,R_ECX);
1.56 }
1.57 - REXW(); AND_imm8s_r32( 0xFC, R_EAX ); // 4
1.58 load_imm32( R_ECX, ((endpc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
1.59 ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) ); // 6
1.60 - POP_r32(R_EBP);
1.61 - RET();
1.62 + exit_block();
1.63 }
1.64
1.65
1.66 @@ -168,16 +173,15 @@
1.67 store_spreg( R_ECX, REG_OFFSET(pc) ); // 3
1.68 if( IS_IN_ICACHE(pc) ) {
1.69 REXW(); MOV_moff32_EAX( xlat_get_lut_entry(GET_ICACHE_PHYS(pc)) ); // 5
1.70 + REXW(); AND_imm8s_r32( 0xFC, R_EAX ); // 4
1.71 } else if( sh4_x86.tlb_on ) {
1.72 call_func1(xlat_get_code_by_vma,R_ECX);
1.73 } else {
1.74 call_func1(xlat_get_code,R_ECX);
1.75 }
1.76 - REXW(); AND_imm8s_r32( 0xFC, R_EAX ); // 4
1.77 load_imm32( R_ECX, ((endpc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
1.78 ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) ); // 6
1.79 - POP_r32(R_EBP);
1.80 - RET();
1.81 + exit_block();
1.82 }
1.83
1.84 /**
1.85 @@ -206,9 +210,8 @@
1.86 } else {
1.87 call_func1(xlat_get_code,R_EAX);
1.88 }
1.89 - POP_r32(R_EBP);
1.90 - RET();
1.91 -
1.92 + exit_block();
1.93 +
1.94 // Exception already raised - just cleanup
1.95 uint8_t *preexc_ptr = xlat_output;
1.96 MOV_r32_r32( R_EDX, R_ECX );
1.97 @@ -223,8 +226,7 @@
1.98 } else {
1.99 call_func0(xlat_get_code);
1.100 }
1.101 - POP_r32(R_EBP);
1.102 - RET();
1.103 + exit_block();
1.104
1.105 for( i=0; i< sh4_x86.backpatch_posn; i++ ) {
1.106 uint32_t *fixup_addr = (uint32_t *)&xlat_current_block->code[sh4_x86.backpatch_list[i].fixup_offset];
1.107 @@ -233,7 +235,7 @@
1.108 load_imm32( R_EDX, sh4_x86.backpatch_list[i].fixup_icount );
1.109 int stack_adj = -1 - sh4_x86.backpatch_list[i].exc_code;
1.110 if( stack_adj > 0 ) {
1.111 - ADD_imm8s_r32( stack_adj*4, R_ESP );
1.112 + REXW(); ADD_imm8s_r32( stack_adj*4, R_ESP );
1.113 }
1.114 int rel = preexc_ptr - xlat_output;
1.115 JMP_rel(rel);
1.116 @@ -247,26 +249,35 @@
1.117 }
1.118 }
1.119
1.120 +struct UnwindInfo {
1.121 + uintptr_t block_start;
1.122 + uintptr_t block_end;
1.123 + void *pc;
1.124 +};
1.125 +
1.126 _Unwind_Reason_Code xlat_check_frame( struct _Unwind_Context *context, void *arg )
1.127 {
1.128 - void *rbp = (void *)_Unwind_GetGR(context, 6);
1.129 - void *expect = (((uint8_t *)&sh4r) + 128 );
1.130 - if( rbp == expect ) {
1.131 - void **result = (void **)arg;
1.132 - *result = (void *)_Unwind_GetIP(context);
1.133 + struct UnwindInfo *info = arg;
1.134 + void *pc = (void *)_Unwind_GetIP(context);
1.135 + if( ((uintptr_t)pc) >= info->block_start && ((uintptr_t)pc) < info->block_end ) {
1.136 + info->pc = pc;
1.137 return _URC_NORMAL_STOP;
1.138 }
1.139 -
1.140 +
1.141 return _URC_NO_REASON;
1.142 }
1.143
1.144 -void *xlat_get_native_pc( void *code, uint32_t size )
1.145 +void *xlat_get_native_pc( void *code, uint32_t code_size )
1.146 {
1.147 struct _Unwind_Exception exc;
1.148 + struct UnwindInfo info;
1.149
1.150 + info.pc = NULL;
1.151 + info.block_start = (uintptr_t)code;
1.152 + info.block_end = info.block_start + code_size;
1.153 void *result = NULL;
1.154 - _Unwind_Backtrace( xlat_check_frame, &result );
1.155 - return result;
1.156 + _Unwind_Backtrace( xlat_check_frame, &info );
1.157 + return info.pc;
1.158 }
1.159
1.160 #endif /* !lxdream_ia64abi_H */
.