4 * Provides the implementation for the ia32 ABI variant
5 * (eg prologue, epilogue, and calling conventions). Stack frame is
6 * aligned on 16-byte boundaries for the benefit of OS X (which
9 * Copyright (c) 2007 Nathan Keynes.
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
22 #ifndef lxdream_ia32mac_H
23 #define lxdream_ia32mac_H 1
25 #define load_ptr( reg, ptr ) load_imm32( reg, (uint32_t)ptr );
27 static inline void decode_address( int addr_reg )
29 uintptr_t base = (sh4r.xlat_sh4_mode&SR_MD) ? (uintptr_t)sh4_address_space : (uintptr_t)sh4_user_address_space;
30 MOV_r32_r32( addr_reg, R_ECX );
31 SHR_imm8_r32( 12, R_ECX );
32 MOV_r32disp32x4_r32( R_ECX, base, R_ECX );
36 * Note: clobbers EAX to make the indirect call - this isn't usually
37 * a problem since the callee will usually clobber it anyway.
39 static inline void call_func0( void *ptr )
41 load_imm32(R_ECX, (uint32_t)ptr);
46 static inline void call_func1( void *ptr, int arg1 )
49 MOV_r32_r32( arg1, R_EAX );
51 load_imm32(R_ECX, (uint32_t)ptr);
55 static inline void call_func1_r32( int addr_reg, int arg1 )
58 MOV_r32_r32( arg1, R_EAX );
63 static inline void call_func1_r32disp8( int preg, uint32_t disp8, int arg1 )
66 MOV_r32_r32( arg1, R_EAX );
68 CALL_r32disp8(preg, disp8);
71 static inline void call_func1_r32disp8_exc( int preg, uint32_t disp8, int arg1, int pc )
74 MOV_r32_r32( arg1, R_EAX );
76 load_exc_backpatch(R_EDX);
77 CALL_r32disp8(preg, disp8);
80 static inline void call_func2( void *ptr, int arg1, int arg2 )
83 MOV_r32_r32( arg2, R_EDX );
86 MOV_r32_r32( arg1, R_EAX );
88 load_imm32(R_ECX, (uint32_t)ptr);
92 static inline void call_func2_r32( int addr_reg, int arg1, int arg2 )
95 MOV_r32_r32( arg2, R_EDX );
98 MOV_r32_r32( arg1, R_EAX );
103 static inline void call_func2_r32disp8( int preg, uint32_t disp8, int arg1, int arg2 )
105 if( arg2 != R_EDX ) {
106 MOV_r32_r32( arg2, R_EDX );
108 if( arg1 != R_EAX ) {
109 MOV_r32_r32( arg1, R_EAX );
111 CALL_r32disp8(preg, disp8);
114 static inline void call_func2_r32disp8_exc( int preg, uint32_t disp8, int arg1, int arg2, int pc )
116 if( arg2 != R_EDX ) {
117 MOV_r32_r32( arg2, R_EDX );
119 if( arg1 != R_EAX ) {
120 MOV_r32_r32( arg1, R_EAX );
122 MOV_backpatch_esp8( 0 );
123 CALL_r32disp8(preg, disp8);
128 static inline void call_func1_exc( void *ptr, int arg1, int pc )
130 if( arg1 != R_EAX ) {
131 MOV_r32_r32( arg1, R_EAX );
133 load_exc_backpatch(R_EDX);
134 load_imm32(R_ECX, (uint32_t)ptr);
138 static inline void call_func2_exc( void *ptr, int arg1, int arg2, int pc )
140 if( arg2 != R_EDX ) {
141 MOV_r32_r32( arg2, R_EDX );
143 if( arg1 != R_EAX ) {
144 MOV_r32_r32( arg1, R_EAX );
146 MOV_backpatch_esp8(0);
147 load_imm32(R_ECX, (uint32_t)ptr);
152 static inline void call_func1( void *ptr, int arg1 )
154 SUB_imm8s_r32( 12, R_ESP );
156 load_imm32(R_ECX, (uint32_t)ptr);
158 ADD_imm8s_r32( 16, R_ESP );
161 static inline void call_func2( void *ptr, int arg1, int arg2 )
163 SUB_imm8s_r32( 8, R_ESP );
166 load_imm32(R_ECX, (uint32_t)ptr);
168 ADD_imm8s_r32( 16, R_ESP );
174 * Emit the 'start of block' assembly. Sets up the stack frame and save
176 * Allocates 8 bytes for local variables, which also has the convenient
177 * side-effect of aligning the stack.
182 load_ptr( R_EBP, ((uint8_t *)&sh4r) + 128 );
183 SUB_imm8s_r32( 8, R_ESP );
186 static inline void exit_block( )
188 ADD_imm8s_r32( 8, R_ESP );
194 * Exit the block with sh4r.new_pc written with the target pc
196 void exit_block_pcset( sh4addr_t pc )
198 load_imm32( R_ECX, ((pc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
199 ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) ); // 6
200 load_spreg( R_EAX, R_PC );
201 if( sh4_x86.tlb_on ) {
202 call_func1(xlat_get_code_by_vma,R_EAX);
204 call_func1(xlat_get_code,R_EAX);
210 * Exit the block with sh4r.new_pc written with the target pc
212 void exit_block_newpcset( sh4addr_t pc )
214 load_imm32( R_ECX, ((pc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
215 ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) ); // 6
216 load_spreg( R_EAX, R_NEW_PC );
217 store_spreg( R_EAX, R_PC );
218 if( sh4_x86.tlb_on ) {
219 call_func1(xlat_get_code_by_vma,R_EAX);
221 call_func1(xlat_get_code,R_EAX);
228 * Exit the block to an absolute PC
230 void exit_block_abs( sh4addr_t pc, sh4addr_t endpc )
232 load_imm32( R_ECX, pc ); // 5
233 store_spreg( R_ECX, REG_OFFSET(pc) ); // 3
234 if( IS_IN_ICACHE(pc) ) {
235 MOV_moff32_EAX( xlat_get_lut_entry(GET_ICACHE_PHYS(pc)) ); // 5
236 AND_imm8s_r32( 0xFC, R_EAX ); // 3
237 } else if( sh4_x86.tlb_on ) {
238 call_func1(xlat_get_code_by_vma,R_ECX);
240 call_func1(xlat_get_code,R_ECX);
242 load_imm32( R_ECX, ((endpc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
243 ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) ); // 6
248 * Exit the block to a relative PC
250 void exit_block_rel( sh4addr_t pc, sh4addr_t endpc )
252 load_imm32( R_ECX, pc - sh4_x86.block_start_pc ); // 5
253 ADD_sh4r_r32( R_PC, R_ECX );
254 store_spreg( R_ECX, REG_OFFSET(pc) ); // 3
255 if( IS_IN_ICACHE(pc) ) {
256 MOV_moff32_EAX( xlat_get_lut_entry(GET_ICACHE_PHYS(pc)) ); // 5
257 AND_imm8s_r32( 0xFC, R_EAX ); // 3
258 } else if( sh4_x86.tlb_on ) {
259 call_func1(xlat_get_code_by_vma,R_ECX);
261 call_func1(xlat_get_code,R_ECX);
263 load_imm32( R_ECX, ((endpc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
264 ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) ); // 6
269 * Exit unconditionally with a general exception
271 void exit_block_exc( int code, sh4addr_t pc )
273 load_imm32( R_ECX, pc - sh4_x86.block_start_pc ); // 5
274 ADD_r32_sh4r( R_ECX, R_PC );
275 load_imm32( R_ECX, ((pc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
276 ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) ); // 6
277 load_imm32( R_EAX, code );
278 call_func1( sh4_raise_exception, R_EAX );
280 load_spreg( R_EAX, R_PC );
281 if( sh4_x86.tlb_on ) {
282 call_func1(xlat_get_code_by_vma,R_EAX);
284 call_func1(xlat_get_code,R_EAX);
291 * Write the block trailer (exception handling block)
293 void sh4_translate_end_block( sh4addr_t pc ) {
294 if( sh4_x86.branch_taken == FALSE ) {
295 // Didn't exit unconditionally already, so write the termination here
296 exit_block_rel( pc, pc );
298 if( sh4_x86.backpatch_posn != 0 ) {
301 uint8_t *end_ptr = xlat_output;
302 MOV_r32_r32( R_EDX, R_ECX );
303 ADD_r32_r32( R_EDX, R_ECX );
304 ADD_r32_sh4r( R_ECX, R_PC );
305 MOV_moff32_EAX( &sh4_cpu_period );
307 ADD_r32_sh4r( R_EAX, REG_OFFSET(slice_cycle) );
310 call_func1( sh4_raise_exception, R_EAX );
311 load_spreg( R_EAX, R_PC );
312 if( sh4_x86.tlb_on ) {
313 call_func1(xlat_get_code_by_vma,R_EAX);
315 call_func1(xlat_get_code,R_EAX);
319 // Exception already raised - just cleanup
320 uint8_t *preexc_ptr = xlat_output;
321 MOV_r32_r32( R_EDX, R_ECX );
322 ADD_r32_r32( R_EDX, R_ECX );
323 ADD_r32_sh4r( R_ECX, R_SPC );
324 MOV_moff32_EAX( &sh4_cpu_period );
326 ADD_r32_sh4r( R_EAX, REG_OFFSET(slice_cycle) );
327 load_spreg( R_EAX, R_PC );
328 if( sh4_x86.tlb_on ) {
329 call_func1(xlat_get_code_by_vma,R_EAX);
331 call_func1(xlat_get_code,R_EAX);
335 for( i=0; i< sh4_x86.backpatch_posn; i++ ) {
336 uint32_t *fixup_addr = (uint32_t *)&xlat_current_block->code[sh4_x86.backpatch_list[i].fixup_offset];
337 if( sh4_x86.backpatch_list[i].exc_code < 0 ) {
338 if( sh4_x86.backpatch_list[i].exc_code == -2 ) {
339 *fixup_addr = (uint32_t)xlat_output;
341 *fixup_addr += xlat_output - (uint8_t *)&xlat_current_block->code[sh4_x86.backpatch_list[i].fixup_offset] - 4;
343 load_imm32( R_EDX, sh4_x86.backpatch_list[i].fixup_icount );
344 int rel = preexc_ptr - xlat_output;
347 *fixup_addr += xlat_output - (uint8_t *)&xlat_current_block->code[sh4_x86.backpatch_list[i].fixup_offset] - 4;
348 PUSH_imm32( sh4_x86.backpatch_list[i].exc_code );
349 load_imm32( R_EDX, sh4_x86.backpatch_list[i].fixup_icount );
350 int rel = end_ptr - xlat_output;
359 * The unwind methods only work if we compiled with DWARF2 frame information
360 * (ie -fexceptions), otherwise we have to use the direct frame scan.
362 #ifdef HAVE_EXCEPTIONS
366 uintptr_t block_start;
371 _Unwind_Reason_Code xlat_check_frame( struct _Unwind_Context *context, void *arg )
373 struct UnwindInfo *info = arg;
374 void *pc = (void *)_Unwind_GetIP(context);
375 if( ((uintptr_t)pc) >= info->block_start && ((uintptr_t)pc) < info->block_end ) {
377 return _URC_NORMAL_STOP;
380 return _URC_NO_REASON;
383 void *xlat_get_native_pc( void *code, uint32_t code_size )
385 struct _Unwind_Exception exc;
386 struct UnwindInfo info;
389 info.block_start = (uintptr_t)code;
390 info.block_end = info.block_start + code_size;
392 _Unwind_Backtrace( xlat_check_frame, &info );
396 void *xlat_get_native_pc( void *code, uint32_t code_size )
400 "mov %%ebp, %%eax\n\t"
401 "mov $0x8, %%ecx\n\t"
403 "frame_loop: test %%eax, %%eax\n\t"
404 "je frame_not_found\n\t"
405 "cmp (%%eax), %%edx\n\t"
407 "sub $0x1, %%ecx\n\t"
408 "je frame_not_found\n\t"
409 "movl (%%eax), %%eax\n\t"
411 "frame_found: movl 0x4(%%eax), %0\n"
414 : "r" (((uint8_t *)&sh4r) + 128 )
415 : "eax", "ecx", "edx" );
420 #endif /* !lxdream_ia32mac.h */
.