Search
lxdream.org :: lxdream :: r992:7c15f8a71995
lxdream 0.9.1
released Jun 29
Download Now
changeset992:7c15f8a71995
parent991:60c7fab9c880
child993:f59ebcf41f5d
authornkeynes
dateWed Mar 04 23:27:59 2009 +0000 (15 years ago)
Move ABI headers to xlat/x86 as well (and finally rename erroneously named ia64abi to amd64abi)
src/sh4/ia32abi.h
src/sh4/ia64abi.h
src/sh4/sh4x86.in
src/xlat/x86/amd64abi.h
src/xlat/x86/ia32abi.h
1.1 --- a/src/sh4/ia32abi.h Wed Mar 04 23:12:21 2009 +0000
1.2 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000
1.3 @@ -1,426 +0,0 @@
1.4 -/**
1.5 - * $Id$
1.6 - *
1.7 - * Provides the implementation for the ia32 ABI variant
1.8 - * (eg prologue, epilogue, and calling conventions). Stack frame is
1.9 - * aligned on 16-byte boundaries for the benefit of OS X (which
1.10 - * requires it).
1.11 - *
1.12 - * Copyright (c) 2007 Nathan Keynes.
1.13 - *
1.14 - * This program is free software; you can redistribute it and/or modify
1.15 - * it under the terms of the GNU General Public License as published by
1.16 - * the Free Software Foundation; either version 2 of the License, or
1.17 - * (at your option) any later version.
1.18 - *
1.19 - * This program is distributed in the hope that it will be useful,
1.20 - * but WITHOUT ANY WARRANTY; without even the implied warranty of
1.21 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1.22 - * GNU General Public License for more details.
1.23 - */
1.24 -
1.25 -#ifndef lxdream_ia32mac_H
1.26 -#define lxdream_ia32mac_H 1
1.27 -
1.28 -#define load_ptr( reg, ptr ) load_imm32( reg, (uint32_t)ptr );
1.29 -
1.30 -static inline void decode_address( int addr_reg )
1.31 -{
1.32 - uintptr_t base = (sh4r.xlat_sh4_mode&SR_MD) ? (uintptr_t)sh4_address_space : (uintptr_t)sh4_user_address_space;
1.33 - MOVL_r32_r32( addr_reg, REG_ECX );
1.34 - SHRL_imm_r32( 12, REG_ECX );
1.35 - MOVP_sib_rptr( 2, REG_ECX, -1, base, REG_ECX );
1.36 -}
1.37 -
1.38 -/**
1.39 - * Note: clobbers EAX to make the indirect call - this isn't usually
1.40 - * a problem since the callee will usually clobber it anyway.
1.41 - */
1.42 -static inline void call_func0( void *ptr )
1.43 -{
1.44 - load_imm32(REG_ECX, (uint32_t)ptr);
1.45 - CALL_r32(REG_ECX);
1.46 -}
1.47 -
1.48 -#ifdef HAVE_FASTCALL
1.49 -static inline void call_func1( void *ptr, int arg1 )
1.50 -{
1.51 - if( arg1 != REG_EAX ) {
1.52 - MOVL_r32_r32( arg1, REG_EAX );
1.53 - }
1.54 - MOVP_immptr_rptr((uintptr_t)ptr, REG_ECX);
1.55 - CALL_r32(REG_ECX);
1.56 -}
1.57 -
1.58 -static inline void call_func1_r32( int addr_reg, int arg1 )
1.59 -{
1.60 - if( arg1 != REG_EAX ) {
1.61 - MOVL_r32_r32( arg1, REG_EAX );
1.62 - }
1.63 - CALL_r32(addr_reg);
1.64 -}
1.65 -
1.66 -static inline void call_func1_r32disp8( int preg, uint32_t disp8, int arg1 )
1.67 -{
1.68 - if( arg1 != REG_EAX ) {
1.69 - MOVL_r32_r32( arg1, REG_EAX );
1.70 - }
1.71 - CALL_r32disp(preg, disp8);
1.72 -}
1.73 -
1.74 -static inline void call_func1_r32disp8_exc( int preg, uint32_t disp8, int arg1, int pc )
1.75 -{
1.76 - if( arg1 != REG_EAX ) {
1.77 - MOVL_r32_r32( arg1, REG_EAX );
1.78 - }
1.79 - MOVP_immptr_rptr(0,REG_EDX);
1.80 - sh4_x86_add_backpatch(xlat_output, pc, -2);
1.81 - CALL_r32disp(preg, disp8);
1.82 -}
1.83 -
1.84 -static inline void call_func2( void *ptr, int arg1, int arg2 )
1.85 -{
1.86 - if( arg2 != REG_EDX ) {
1.87 - MOVL_r32_r32( arg2, REG_EDX );
1.88 - }
1.89 - if( arg1 != REG_EAX ) {
1.90 - MOVL_r32_r32( arg1, REG_EAX );
1.91 - }
1.92 - MOVP_immptr_rptr((uint32_t)ptr, REG_ECX);
1.93 - CALL_r32(REG_ECX);
1.94 -}
1.95 -
1.96 -static inline void call_func2_r32( int addr_reg, int arg1, int arg2 )
1.97 -{
1.98 - if( arg2 != REG_EDX ) {
1.99 - MOVL_r32_r32( arg2, REG_EDX );
1.100 - }
1.101 - if( arg1 != REG_EAX ) {
1.102 - MOVL_r32_r32( arg1, REG_EAX );
1.103 - }
1.104 - CALL_r32(addr_reg);
1.105 -}
1.106 -
1.107 -static inline void call_func2_r32disp8( int preg, uint32_t disp8, int arg1, int arg2 )
1.108 -{
1.109 - if( arg2 != REG_EDX ) {
1.110 - MOVL_r32_r32( arg2, REG_EDX );
1.111 - }
1.112 - if( arg1 != REG_EAX ) {
1.113 - MOVL_r32_r32( arg1, REG_EAX );
1.114 - }
1.115 - CALL_r32disp(preg, disp8);
1.116 -}
1.117 -
1.118 -static inline void call_func2_r32disp8_exc( int preg, uint32_t disp8, int arg1, int arg2, int pc )
1.119 -{
1.120 - if( arg2 != REG_EDX ) {
1.121 - MOVL_r32_r32( arg2, REG_EDX );
1.122 - }
1.123 - if( arg1 != REG_EAX ) {
1.124 - MOVL_r32_r32( arg1, REG_EAX );
1.125 - }
1.126 - MOVL_imm32_rspdisp(0,0);
1.127 - sh4_x86_add_backpatch(xlat_output, pc, -2);
1.128 - CALL_r32disp(preg, disp8);
1.129 -}
1.130 -
1.131 -
1.132 -
1.133 -static inline void call_func1_exc( void *ptr, int arg1, int pc )
1.134 -{
1.135 - if( arg1 != REG_EAX ) {
1.136 - MOVL_r32_r32( arg1, REG_EAX );
1.137 - }
1.138 - MOVP_immptr_rptr(0,REG_EDX);
1.139 - sh4_x86_add_backpatch(xlat_output, pc, -2);
1.140 - MOVP_immptr_rptr((uint32_t)ptr, REG_ECX);
1.141 - CALL_r32(REG_ECX);
1.142 -}
1.143 -
1.144 -static inline void call_func2_exc( void *ptr, int arg1, int arg2, int pc )
1.145 -{
1.146 - if( arg2 != REG_EDX ) {
1.147 - MOVL_r32_r32( arg2, REG_EDX );
1.148 - }
1.149 - if( arg1 != REG_EAX ) {
1.150 - MOVL_r32_r32( arg1, REG_EAX );
1.151 - }
1.152 - MOVL_imm32_rspdisp(0,0);
1.153 - sh4_x86_add_backpatch(xlat_output, pc, -2);
1.154 - MOVP_immptr_rptr((uint32_t)ptr, REG_ECX);
1.155 - CALL_r32(REG_ECX);
1.156 -}
1.157 -
1.158 -#else
1.159 -static inline void call_func1( void *ptr, int arg1 )
1.160 -{
1.161 - SUBL_imms_r32( 12, REG_ESP );
1.162 - PUSH_r32(arg1);
1.163 - MOVP_immptr_rptr((uint32_t)ptr, REG_ECX);
1.164 - CALL_r32(REG_ECX);
1.165 - ADDL_imms_r32( 16, REG_ESP );
1.166 -}
1.167 -
1.168 -static inline void call_func2( void *ptr, int arg1, int arg2 )
1.169 -{
1.170 - SUBL_imms_r32( 8, REG_ESP );
1.171 - PUSH_r32(arg2);
1.172 - PUSH_r32(arg1);
1.173 - MOVP_immptr_rptr((uint32_t)ptr, REG_ECX);
1.174 - CALL_r32(REG_ECX);
1.175 - ADDL_imms_r32( 16, REG_ESP );
1.176 -}
1.177 -
1.178 -#endif
1.179 -
1.180 -/**
1.181 - * Emit the 'start of block' assembly. Sets up the stack frame and save
1.182 - * SI/DI as required
1.183 - * Allocates 8 bytes for local variables, which also has the convenient
1.184 - * side-effect of aligning the stack.
1.185 - */
1.186 -void enter_block( )
1.187 -{
1.188 - PUSH_r32(REG_EBP);
1.189 - load_ptr( REG_EBP, ((uint8_t *)&sh4r) + 128 );
1.190 - SUBL_imms_r32( 8, REG_ESP );
1.191 -}
1.192 -
1.193 -static inline void exit_block( )
1.194 -{
1.195 - ADDL_imms_r32( 8, REG_ESP );
1.196 - POP_r32(REG_EBP);
1.197 - RET();
1.198 -}
1.199 -
1.200 -/**
1.201 - * Exit the block with sh4r.new_pc written with the target pc
1.202 - */
1.203 -void exit_block_pcset( sh4addr_t pc )
1.204 -{
1.205 - load_imm32( REG_ECX, ((pc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
1.206 - ADDL_r32_rbpdisp( REG_ECX, REG_OFFSET(slice_cycle) ); // 6
1.207 - load_spreg( REG_EAX, R_PC );
1.208 - if( sh4_x86.tlb_on ) {
1.209 - call_func1(xlat_get_code_by_vma,REG_EAX);
1.210 - } else {
1.211 - call_func1(xlat_get_code,REG_EAX);
1.212 - }
1.213 - exit_block();
1.214 -}
1.215 -
1.216 -/**
1.217 - * Exit the block with sh4r.new_pc written with the target pc
1.218 - */
1.219 -void exit_block_newpcset( sh4addr_t pc )
1.220 -{
1.221 - load_imm32( REG_ECX, ((pc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
1.222 - ADDL_r32_rbpdisp( REG_ECX, REG_OFFSET(slice_cycle) ); // 6
1.223 - load_spreg( REG_EAX, R_NEW_PC );
1.224 - store_spreg( REG_EAX, R_PC );
1.225 - if( sh4_x86.tlb_on ) {
1.226 - call_func1(xlat_get_code_by_vma,REG_EAX);
1.227 - } else {
1.228 - call_func1(xlat_get_code,REG_EAX);
1.229 - }
1.230 - exit_block();
1.231 -}
1.232 -
1.233 -
1.234 -/**
1.235 - * Exit the block to an absolute PC
1.236 - */
1.237 -void exit_block_abs( sh4addr_t pc, sh4addr_t endpc )
1.238 -{
1.239 - load_imm32( REG_ECX, pc ); // 5
1.240 - store_spreg( REG_ECX, REG_OFFSET(pc) ); // 3
1.241 - if( IS_IN_ICACHE(pc) ) {
1.242 - MOVP_moffptr_rax( xlat_get_lut_entry(GET_ICACHE_PHYS(pc)) ); // 5
1.243 - ANDL_imms_r32( 0xFFFFFFFC, REG_EAX ); // 3
1.244 - } else if( sh4_x86.tlb_on ) {
1.245 - call_func1(xlat_get_code_by_vma,REG_ECX);
1.246 - } else {
1.247 - call_func1(xlat_get_code,REG_ECX);
1.248 - }
1.249 - load_imm32( REG_ECX, ((endpc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
1.250 - ADDL_r32_rbpdisp( REG_ECX, REG_OFFSET(slice_cycle) ); // 6
1.251 - exit_block();
1.252 -}
1.253 -
1.254 -/**
1.255 - * Exit the block to a relative PC
1.256 - */
1.257 -void exit_block_rel( sh4addr_t pc, sh4addr_t endpc )
1.258 -{
1.259 - load_imm32( REG_ECX, pc - sh4_x86.block_start_pc ); // 5
1.260 - ADDL_rbpdisp_r32( R_PC, REG_ECX );
1.261 - store_spreg( REG_ECX, R_PC ); // 3
1.262 - if( IS_IN_ICACHE(pc) ) {
1.263 - MOVP_moffptr_rax( xlat_get_lut_entry(GET_ICACHE_PHYS(pc)) ); // 5
1.264 - ANDL_imms_r32( 0xFFFFFFFC, REG_EAX ); // 3
1.265 - } else if( sh4_x86.tlb_on ) {
1.266 - call_func1(xlat_get_code_by_vma,REG_ECX);
1.267 - } else {
1.268 - call_func1(xlat_get_code,REG_ECX);
1.269 - }
1.270 - load_imm32( REG_ECX, ((endpc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
1.271 - ADDL_r32_rbpdisp( REG_ECX, REG_OFFSET(slice_cycle) ); // 6
1.272 - exit_block();
1.273 -}
1.274 -
1.275 -/**
1.276 - * Exit unconditionally with a general exception
1.277 - */
1.278 -void exit_block_exc( int code, sh4addr_t pc )
1.279 -{
1.280 - load_imm32( REG_ECX, pc - sh4_x86.block_start_pc ); // 5
1.281 - ADDL_r32_rbpdisp( REG_ECX, R_PC );
1.282 - load_imm32( REG_ECX, ((pc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
1.283 - ADDL_r32_rbpdisp( REG_ECX, REG_OFFSET(slice_cycle) ); // 6
1.284 - load_imm32( REG_EAX, code );
1.285 - call_func1( sh4_raise_exception, REG_EAX );
1.286 -
1.287 - load_spreg( REG_EAX, R_PC );
1.288 - if( sh4_x86.tlb_on ) {
1.289 - call_func1(xlat_get_code_by_vma,REG_EAX);
1.290 - } else {
1.291 - call_func1(xlat_get_code,REG_EAX);
1.292 - }
1.293 -
1.294 - exit_block();
1.295 -}
1.296 -
1.297 -/**
1.298 - * Write the block trailer (exception handling block)
1.299 - */
1.300 -void sh4_translate_end_block( sh4addr_t pc ) {
1.301 - if( sh4_x86.branch_taken == FALSE ) {
1.302 - // Didn't exit unconditionally already, so write the termination here
1.303 - exit_block_rel( pc, pc );
1.304 - }
1.305 - if( sh4_x86.backpatch_posn != 0 ) {
1.306 - unsigned int i;
1.307 - // Raise exception
1.308 - uint8_t *end_ptr = xlat_output;
1.309 - MOVL_r32_r32( REG_EDX, REG_ECX );
1.310 - ADDL_r32_r32( REG_EDX, REG_ECX );
1.311 - ADDL_r32_rbpdisp( REG_ECX, R_PC );
1.312 - MOVL_moffptr_eax( &sh4_cpu_period );
1.313 - MULL_r32( REG_EDX );
1.314 - ADDL_r32_rbpdisp( REG_EAX, REG_OFFSET(slice_cycle) );
1.315 -
1.316 - POP_r32(REG_EAX);
1.317 - call_func1( sh4_raise_exception, REG_EAX );
1.318 - load_spreg( REG_EAX, R_PC );
1.319 - if( sh4_x86.tlb_on ) {
1.320 - call_func1(xlat_get_code_by_vma,REG_EAX);
1.321 - } else {
1.322 - call_func1(xlat_get_code,REG_EAX);
1.323 - }
1.324 - exit_block();
1.325 -
1.326 - // Exception already raised - just cleanup
1.327 - uint8_t *preexc_ptr = xlat_output;
1.328 - MOVL_r32_r32( REG_EDX, REG_ECX );
1.329 - ADDL_r32_r32( REG_EDX, REG_ECX );
1.330 - ADDL_r32_rbpdisp( REG_ECX, R_SPC );
1.331 - MOVL_moffptr_eax( &sh4_cpu_period );
1.332 - MULL_r32( REG_EDX );
1.333 - ADDL_r32_rbpdisp( REG_EAX, REG_OFFSET(slice_cycle) );
1.334 - load_spreg( REG_EAX, R_PC );
1.335 - if( sh4_x86.tlb_on ) {
1.336 - call_func1(xlat_get_code_by_vma,REG_EAX);
1.337 - } else {
1.338 - call_func1(xlat_get_code,REG_EAX);
1.339 - }
1.340 - exit_block();
1.341 -
1.342 - for( i=0; i< sh4_x86.backpatch_posn; i++ ) {
1.343 - uint32_t *fixup_addr = (uint32_t *)&xlat_current_block->code[sh4_x86.backpatch_list[i].fixup_offset];
1.344 - if( sh4_x86.backpatch_list[i].exc_code < 0 ) {
1.345 - if( sh4_x86.backpatch_list[i].exc_code == -2 ) {
1.346 - *fixup_addr = (uint32_t)xlat_output;
1.347 - } else {
1.348 - *fixup_addr += xlat_output - (uint8_t *)&xlat_current_block->code[sh4_x86.backpatch_list[i].fixup_offset] - 4;
1.349 - }
1.350 - load_imm32( REG_EDX, sh4_x86.backpatch_list[i].fixup_icount );
1.351 - int rel = preexc_ptr - xlat_output;
1.352 - JMP_prerel(rel);
1.353 - } else {
1.354 - *fixup_addr += xlat_output - (uint8_t *)&xlat_current_block->code[sh4_x86.backpatch_list[i].fixup_offset] - 4;
1.355 - PUSH_imm32( sh4_x86.backpatch_list[i].exc_code );
1.356 - load_imm32( REG_EDX, sh4_x86.backpatch_list[i].fixup_icount );
1.357 - int rel = end_ptr - xlat_output;
1.358 - JMP_prerel(rel);
1.359 - }
1.360 - }
1.361 - }
1.362 -}
1.363 -
1.364 -
1.365 -/**
1.366 - * The unwind methods only work if we compiled with DWARF2 frame information
1.367 - * (ie -fexceptions), otherwise we have to use the direct frame scan.
1.368 - */
1.369 -#ifdef HAVE_EXCEPTIONS
1.370 -#include <unwind.h>
1.371 -
1.372 -struct UnwindInfo {
1.373 - uintptr_t block_start;
1.374 - uintptr_t block_end;
1.375 - void *pc;
1.376 -};
1.377 -
1.378 -_Unwind_Reason_Code xlat_check_frame( struct _Unwind_Context *context, void *arg )
1.379 -{
1.380 - struct UnwindInfo *info = arg;
1.381 - void *pc = (void *)_Unwind_GetIP(context);
1.382 - if( ((uintptr_t)pc) >= info->block_start && ((uintptr_t)pc) < info->block_end ) {
1.383 - info->pc = pc;
1.384 - return _URC_NORMAL_STOP;
1.385 - }
1.386 -
1.387 - return _URC_NO_REASON;
1.388 -}
1.389 -
1.390 -void *xlat_get_native_pc( void *code, uint32_t code_size )
1.391 -{
1.392 - struct _Unwind_Exception exc;
1.393 - struct UnwindInfo info;
1.394 -
1.395 - info.pc = NULL;
1.396 - info.block_start = (uintptr_t)code;
1.397 - info.block_end = info.block_start + code_size;
1.398 - void *result = NULL;
1.399 - _Unwind_Backtrace( xlat_check_frame, &info );
1.400 - return info.pc;
1.401 -}
1.402 -#else
1.403 -void *xlat_get_native_pc( void *code, uint32_t code_size )
1.404 -{
1.405 - void *result = NULL;
1.406 - asm(
1.407 - "mov %%ebp, %%eax\n\t"
1.408 - "mov $0x8, %%ecx\n\t"
1.409 - "mov %1, %%edx\n"
1.410 - "frame_loop: test %%eax, %%eax\n\t"
1.411 - "je frame_not_found\n\t"
1.412 - "cmp (%%eax), %%edx\n\t"
1.413 - "je frame_found\n\t"
1.414 - "sub $0x1, %%ecx\n\t"
1.415 - "je frame_not_found\n\t"
1.416 - "movl (%%eax), %%eax\n\t"
1.417 - "jmp frame_loop\n"
1.418 - "frame_found: movl 0x4(%%eax), %0\n"
1.419 - "frame_not_found:"
1.420 - : "=r" (result)
1.421 - : "r" (((uint8_t *)&sh4r) + 128 )
1.422 - : "eax", "ecx", "edx" );
1.423 - return result;
1.424 -}
1.425 -#endif
1.426 -
1.427 -#endif /* !lxdream_ia32mac.h */
1.428 -
1.429 -
2.1 --- a/src/sh4/ia64abi.h Wed Mar 04 23:12:21 2009 +0000
2.2 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000
2.3 @@ -1,318 +0,0 @@
2.4 -/**
2.5 - * $Id$
2.6 - *
2.7 - * Provides the implementation for the AMD64 ABI (eg prologue, epilogue, and
2.8 - * calling conventions)
2.9 - *
2.10 - * Copyright (c) 2007 Nathan Keynes.
2.11 - *
2.12 - * This program is free software; you can redistribute it and/or modify
2.13 - * it under the terms of the GNU General Public License as published by
2.14 - * the Free Software Foundation; either version 2 of the License, or
2.15 - * (at your option) any later version.
2.16 - *
2.17 - * This program is distributed in the hope that it will be useful,
2.18 - * but WITHOUT ANY WARRANTY; without even the implied warranty of
2.19 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2.20 - * GNU General Public License for more details.
2.21 - */
2.22 -
2.23 -#ifndef lxdream_ia64abi_H
2.24 -#define lxdream_ia64abi_H 1
2.25 -
2.26 -#include <unwind.h>
2.27 -
2.28 -#define load_ptr( reg, ptr ) load_imm64( reg, (uint64_t)ptr );
2.29 -
2.30 -static inline void decode_address( int addr_reg )
2.31 -{
2.32 - uintptr_t base = (sh4r.xlat_sh4_mode&SR_MD) ? (uintptr_t)sh4_address_space : (uintptr_t)sh4_user_address_space;
2.33 - MOVL_r32_r32( addr_reg, REG_RCX );
2.34 - SHRL_imm_r32( 12, REG_RCX );
2.35 - MOVP_immptr_rptr( base, REG_RDI );
2.36 - MOVP_sib_rptr(3, REG_RCX, REG_RDI, 0, REG_RCX);
2.37 -}
2.38 -
2.39 -/**
2.40 - * Note: clobbers EAX to make the indirect call - this isn't usually
2.41 - * a problem since the callee will usually clobber it anyway.
2.42 - * Size: 12 bytes
2.43 - */
2.44 -#define CALL_FUNC0_SIZE 12
2.45 -static inline void call_func0( void *ptr )
2.46 -{
2.47 - MOVQ_imm64_r64((uint64_t)ptr, REG_RAX);
2.48 - CALL_r32(REG_RAX);
2.49 -}
2.50 -
2.51 -static inline void call_func1( void *ptr, int arg1 )
2.52 -{
2.53 - MOVQ_r64_r64(arg1, REG_RDI);
2.54 - call_func0(ptr);
2.55 -}
2.56 -
2.57 -static inline void call_func1_exc( void *ptr, int arg1, int pc )
2.58 -{
2.59 - MOVQ_r64_r64(arg1, REG_RDI);
2.60 - MOVP_immptr_rptr(0, REG_RSI);
2.61 - sh4_x86_add_backpatch( xlat_output, pc, -2 );
2.62 - call_func0(ptr);
2.63 -}
2.64 -
2.65 -static inline void call_func1_r32disp8( int preg, uint32_t disp8, int arg1 )
2.66 -{
2.67 - MOVQ_r64_r64(arg1, REG_RDI);
2.68 - CALL_r32disp(preg, disp8);
2.69 -}
2.70 -
2.71 -static inline void call_func1_r32disp8_exc( int preg, uint32_t disp8, int arg1, int pc )
2.72 -{
2.73 - MOVQ_r64_r64(arg1, REG_RDI);
2.74 - MOVP_immptr_rptr(0, REG_RSI);
2.75 - sh4_x86_add_backpatch( xlat_output, pc, -2 );
2.76 - CALL_r32disp(preg, disp8);
2.77 -}
2.78 -
2.79 -static inline void call_func2( void *ptr, int arg1, int arg2 )
2.80 -{
2.81 - MOVQ_r64_r64(arg1, REG_RDI);
2.82 - MOVQ_r64_r64(arg2, REG_RSI);
2.83 - call_func0(ptr);
2.84 -}
2.85 -
2.86 -static inline void call_func2_r32disp8( int preg, uint32_t disp8, int arg1, int arg2 )
2.87 -{
2.88 - MOVQ_r64_r64(arg1, REG_RDI);
2.89 - MOVQ_r64_r64(arg2, REG_RSI);
2.90 - CALL_r32disp(preg, disp8);
2.91 -}
2.92 -
2.93 -static inline void call_func2_r32disp8_exc( int preg, uint32_t disp8, int arg1, int arg2, int pc )
2.94 -{
2.95 - MOVQ_r64_r64(arg1, REG_RDI);
2.96 - MOVQ_r64_r64(arg2, REG_RSI);
2.97 - MOVP_immptr_rptr(0, REG_RDX);
2.98 - sh4_x86_add_backpatch( xlat_output, pc, -2 );
2.99 - CALL_r32disp(preg, disp8);
2.100 -}
2.101 -
2.102 -
2.103 -
2.104 -/**
2.105 - * Emit the 'start of block' assembly. Sets up the stack frame and save
2.106 - * SI/DI as required
2.107 - */
2.108 -void enter_block( )
2.109 -{
2.110 - PUSH_r32(REG_RBP);
2.111 - load_ptr( REG_RBP, ((uint8_t *)&sh4r) + 128 );
2.112 - // Minimum aligned allocation is 16 bytes
2.113 - SUBQ_imms_r64( 16, REG_RSP );
2.114 -}
2.115 -
2.116 -static inline void exit_block( )
2.117 -{
2.118 - ADDQ_imms_r64( 16, REG_RSP );
2.119 - POP_r32(REG_RBP);
2.120 - RET();
2.121 -}
2.122 -
2.123 -/**
2.124 - * Exit the block with sh4r.pc already written
2.125 - */
2.126 -void exit_block_pcset( sh4addr_t pc )
2.127 -{
2.128 - load_imm32( REG_ECX, ((pc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
2.129 - ADDL_r32_rbpdisp( REG_ECX, REG_OFFSET(slice_cycle) ); // 6
2.130 - load_spreg( REG_RAX, R_PC );
2.131 - if( sh4_x86.tlb_on ) {
2.132 - call_func1(xlat_get_code_by_vma,REG_RAX);
2.133 - } else {
2.134 - call_func1(xlat_get_code,REG_RAX);
2.135 - }
2.136 - exit_block();
2.137 -}
2.138 -
2.139 -/**
2.140 - * Exit the block with sh4r.new_pc written with the target address
2.141 - */
2.142 -void exit_block_newpcset( sh4addr_t pc )
2.143 -{
2.144 - load_imm32( REG_ECX, ((pc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
2.145 - ADDL_r32_rbpdisp( REG_ECX, REG_OFFSET(slice_cycle) ); // 6
2.146 - load_spreg( REG_RAX, R_NEW_PC );
2.147 - store_spreg( REG_RAX, R_PC );
2.148 - if( sh4_x86.tlb_on ) {
2.149 - call_func1(xlat_get_code_by_vma,REG_RAX);
2.150 - } else {
2.151 - call_func1(xlat_get_code,REG_RAX);
2.152 - }
2.153 - exit_block();
2.154 -}
2.155 -
2.156 -#define EXIT_BLOCK_SIZE(pc) (25 + (IS_IN_ICACHE(pc)?10:CALL_FUNC1_SIZE))
2.157 -/**
2.158 - * Exit the block to an absolute PC
2.159 - */
2.160 -void exit_block_abs( sh4addr_t pc, sh4addr_t endpc )
2.161 -{
2.162 - load_imm32( REG_RCX, pc ); // 5
2.163 - store_spreg( REG_RCX, REG_OFFSET(pc) ); // 3
2.164 - if( IS_IN_ICACHE(pc) ) {
2.165 - MOVP_moffptr_rax( xlat_get_lut_entry(pc) );
2.166 - ANDQ_imms_r64( 0xFFFFFFFC, REG_RAX ); // 4
2.167 - } else if( sh4_x86.tlb_on ) {
2.168 - call_func1(xlat_get_code_by_vma, REG_RCX);
2.169 - } else {
2.170 - call_func1(xlat_get_code,REG_RCX);
2.171 - }
2.172 - load_imm32( REG_ECX, ((endpc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
2.173 - ADDL_r32_rbpdisp( REG_ECX, REG_OFFSET(slice_cycle) ); // 6
2.174 - exit_block();
2.175 -}
2.176 -
2.177 -
2.178 -#define EXIT_BLOCK_REL_SIZE(pc) (28 + (IS_IN_ICACHE(pc)?10:CALL_FUNC1_SIZE))
2.179 -
2.180 -/**
2.181 - * Exit the block to a relative PC
2.182 - */
2.183 -void exit_block_rel( sh4addr_t pc, sh4addr_t endpc )
2.184 -{
2.185 - load_imm32( REG_ECX, pc - sh4_x86.block_start_pc ); // 5
2.186 - ADDL_rbpdisp_r32( R_PC, REG_ECX );
2.187 - store_spreg( REG_ECX, REG_OFFSET(pc) ); // 3
2.188 - if( IS_IN_ICACHE(pc) ) {
2.189 - MOVP_moffptr_rax( xlat_get_lut_entry(GET_ICACHE_PHYS(pc)) ); // 5
2.190 - ANDQ_imms_r64( 0xFFFFFFFC, REG_RAX ); // 4
2.191 - } else if( sh4_x86.tlb_on ) {
2.192 - call_func1(xlat_get_code_by_vma,REG_RCX);
2.193 - } else {
2.194 - call_func1(xlat_get_code,REG_RCX);
2.195 - }
2.196 - load_imm32( REG_ECX, ((endpc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
2.197 - ADDL_r32_rbpdisp( REG_ECX, REG_OFFSET(slice_cycle) ); // 6
2.198 - exit_block();
2.199 -}
2.200 -
2.201 -/**
2.202 - * Exit unconditionally with a general exception
2.203 - */
2.204 -void exit_block_exc( int code, sh4addr_t pc )
2.205 -{
2.206 - load_imm32( REG_ECX, pc - sh4_x86.block_start_pc ); // 5
2.207 - ADDL_r32_rbpdisp( REG_ECX, R_PC );
2.208 - load_imm32( REG_ECX, ((pc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
2.209 - ADDL_r32_rbpdisp( REG_ECX, REG_OFFSET(slice_cycle) ); // 6
2.210 - load_imm32( REG_RAX, code );
2.211 - call_func1( sh4_raise_exception, REG_RAX );
2.212 -
2.213 - load_spreg( REG_RAX, R_PC );
2.214 - if( sh4_x86.tlb_on ) {
2.215 - call_func1(xlat_get_code_by_vma,REG_RAX);
2.216 - } else {
2.217 - call_func1(xlat_get_code,REG_RAX);
2.218 - }
2.219 -
2.220 - exit_block();
2.221 -}
2.222 -
2.223 -
2.224 -/**
2.225 - * Write the block trailer (exception handling block)
2.226 - */
2.227 -void sh4_translate_end_block( sh4addr_t pc ) {
2.228 - if( sh4_x86.branch_taken == FALSE ) {
2.229 - // Didn't exit unconditionally already, so write the termination here
2.230 - exit_block_rel( pc, pc );
2.231 - }
2.232 - if( sh4_x86.backpatch_posn != 0 ) {
2.233 - unsigned int i;
2.234 - // Raise exception
2.235 - uint8_t *end_ptr = xlat_output;
2.236 - MOVL_r32_r32( REG_RDX, REG_RCX );
2.237 - ADDL_r32_r32( REG_RDX, REG_RCX );
2.238 - ADDL_r32_rbpdisp( REG_RCX, R_PC );
2.239 - MOVL_moffptr_eax( &sh4_cpu_period );
2.240 - MULL_r32( REG_RDX );
2.241 - ADDL_r32_rbpdisp( REG_RAX, REG_OFFSET(slice_cycle) );
2.242 -
2.243 - call_func0( sh4_raise_exception );
2.244 - load_spreg( REG_RAX, R_PC );
2.245 - if( sh4_x86.tlb_on ) {
2.246 - call_func1(xlat_get_code_by_vma,REG_RAX);
2.247 - } else {
2.248 - call_func1(xlat_get_code,REG_RAX);
2.249 - }
2.250 - exit_block();
2.251 -
2.252 - // Exception already raised - just cleanup
2.253 - uint8_t *preexc_ptr = xlat_output;
2.254 - MOVL_r32_r32( REG_EDX, REG_ECX );
2.255 - ADDL_r32_r32( REG_EDX, REG_ECX );
2.256 - ADDL_r32_rbpdisp( REG_ECX, R_SPC );
2.257 - MOVL_moffptr_eax( &sh4_cpu_period );
2.258 - MULL_r32( REG_EDX );
2.259 - ADDL_r32_rbpdisp( REG_EAX, REG_OFFSET(slice_cycle) );
2.260 - load_spreg( REG_RDI, R_PC );
2.261 - if( sh4_x86.tlb_on ) {
2.262 - call_func0(xlat_get_code_by_vma);
2.263 - } else {
2.264 - call_func0(xlat_get_code);
2.265 - }
2.266 - exit_block();
2.267 -
2.268 - for( i=0; i< sh4_x86.backpatch_posn; i++ ) {
2.269 - uint32_t *fixup_addr = (uint32_t *)&xlat_current_block->code[sh4_x86.backpatch_list[i].fixup_offset];
2.270 - if( sh4_x86.backpatch_list[i].exc_code < 0 ) {
2.271 - if( sh4_x86.backpatch_list[i].exc_code == -2 ) {
2.272 - *((uintptr_t *)fixup_addr) = (uintptr_t)xlat_output;
2.273 - } else {
2.274 - *fixup_addr = xlat_output - (uint8_t *)&xlat_current_block->code[sh4_x86.backpatch_list[i].fixup_offset] - 4;
2.275 - }
2.276 - load_imm32( REG_RDX, sh4_x86.backpatch_list[i].fixup_icount );
2.277 - int rel = preexc_ptr - xlat_output;
2.278 - JMP_prerel(rel);
2.279 - } else {
2.280 - *fixup_addr = xlat_output - (uint8_t *)&xlat_current_block->code[sh4_x86.backpatch_list[i].fixup_offset] - 4;
2.281 - load_imm32( REG_RDI, sh4_x86.backpatch_list[i].exc_code );
2.282 - load_imm32( REG_RDX, sh4_x86.backpatch_list[i].fixup_icount );
2.283 - int rel = end_ptr - xlat_output;
2.284 - JMP_prerel(rel);
2.285 - }
2.286 - }
2.287 - }
2.288 -}
2.289 -
2.290 -struct UnwindInfo {
2.291 - uintptr_t block_start;
2.292 - uintptr_t block_end;
2.293 - void *pc;
2.294 -};
2.295 -
2.296 -_Unwind_Reason_Code xlat_check_frame( struct _Unwind_Context *context, void *arg )
2.297 -{
2.298 - struct UnwindInfo *info = arg;
2.299 - void *pc = (void *)_Unwind_GetIP(context);
2.300 - if( ((uintptr_t)pc) >= info->block_start && ((uintptr_t)pc) < info->block_end ) {
2.301 - info->pc = pc;
2.302 - return _URC_NORMAL_STOP;
2.303 - }
2.304 -
2.305 - return _URC_NO_REASON;
2.306 -}
2.307 -
2.308 -void *xlat_get_native_pc( void *code, uint32_t code_size )
2.309 -{
2.310 - struct _Unwind_Exception exc;
2.311 - struct UnwindInfo info;
2.312 -
2.313 - info.pc = NULL;
2.314 - info.block_start = (uintptr_t)code;
2.315 - info.block_end = info.block_start + code_size;
2.316 - void *result = NULL;
2.317 - _Unwind_Backtrace( xlat_check_frame, &info );
2.318 - return info.pc;
2.319 -}
2.320 -
2.321 -#endif /* !lxdream_ia64abi_H */
3.1 --- a/src/sh4/sh4x86.in Wed Mar 04 23:12:21 2009 +0000
3.2 +++ b/src/sh4/sh4x86.in Wed Mar 04 23:27:59 2009 +0000
3.3 @@ -322,9 +322,9 @@
3.4
3.5 /****** Import appropriate calling conventions ******/
3.6 #if SIZEOF_VOID_P == 8
3.7 -#include "sh4/ia64abi.h"
3.8 +#include "xlat/x86/amd64abi.h"
3.9 #else /* 32-bit system */
3.10 -#include "sh4/ia32abi.h"
3.11 +#include "xlat/x86/ia32abi.h"
3.12 #endif
3.13
3.14 void sh4_translate_begin_block( sh4addr_t pc )
4.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
4.2 +++ b/src/xlat/x86/amd64abi.h Wed Mar 04 23:27:59 2009 +0000
4.3 @@ -0,0 +1,318 @@
4.4 +/**
4.5 + * $Id$
4.6 + *
4.7 + * Provides the implementation for the AMD64 ABI (eg prologue, epilogue, and
4.8 + * calling conventions)
4.9 + *
4.10 + * Copyright (c) 2007 Nathan Keynes.
4.11 + *
4.12 + * This program is free software; you can redistribute it and/or modify
4.13 + * it under the terms of the GNU General Public License as published by
4.14 + * the Free Software Foundation; either version 2 of the License, or
4.15 + * (at your option) any later version.
4.16 + *
4.17 + * This program is distributed in the hope that it will be useful,
4.18 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
4.19 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
4.20 + * GNU General Public License for more details.
4.21 + */
4.22 +
4.23 +#ifndef lxdream_ia64abi_H
4.24 +#define lxdream_ia64abi_H 1
4.25 +
4.26 +#include <unwind.h>
4.27 +
4.28 +#define load_ptr( reg, ptr ) load_imm64( reg, (uint64_t)ptr );
4.29 +
4.30 +static inline void decode_address( int addr_reg )
4.31 +{
4.32 + uintptr_t base = (sh4r.xlat_sh4_mode&SR_MD) ? (uintptr_t)sh4_address_space : (uintptr_t)sh4_user_address_space;
4.33 + MOVL_r32_r32( addr_reg, REG_RCX );
4.34 + SHRL_imm_r32( 12, REG_RCX );
4.35 + MOVP_immptr_rptr( base, REG_RDI );
4.36 + MOVP_sib_rptr(3, REG_RCX, REG_RDI, 0, REG_RCX);
4.37 +}
4.38 +
4.39 +/**
4.40 + * Note: clobbers EAX to make the indirect call - this isn't usually
4.41 + * a problem since the callee will usually clobber it anyway.
4.42 + * Size: 12 bytes
4.43 + */
4.44 +#define CALL_FUNC0_SIZE 12
4.45 +static inline void call_func0( void *ptr )
4.46 +{
4.47 + MOVQ_imm64_r64((uint64_t)ptr, REG_RAX);
4.48 + CALL_r32(REG_RAX);
4.49 +}
4.50 +
4.51 +static inline void call_func1( void *ptr, int arg1 )
4.52 +{
4.53 + MOVQ_r64_r64(arg1, REG_RDI);
4.54 + call_func0(ptr);
4.55 +}
4.56 +
4.57 +static inline void call_func1_exc( void *ptr, int arg1, int pc )
4.58 +{
4.59 + MOVQ_r64_r64(arg1, REG_RDI);
4.60 + MOVP_immptr_rptr(0, REG_RSI);
4.61 + sh4_x86_add_backpatch( xlat_output, pc, -2 );
4.62 + call_func0(ptr);
4.63 +}
4.64 +
4.65 +static inline void call_func1_r32disp8( int preg, uint32_t disp8, int arg1 )
4.66 +{
4.67 + MOVQ_r64_r64(arg1, REG_RDI);
4.68 + CALL_r32disp(preg, disp8);
4.69 +}
4.70 +
4.71 +static inline void call_func1_r32disp8_exc( int preg, uint32_t disp8, int arg1, int pc )
4.72 +{
4.73 + MOVQ_r64_r64(arg1, REG_RDI);
4.74 + MOVP_immptr_rptr(0, REG_RSI);
4.75 + sh4_x86_add_backpatch( xlat_output, pc, -2 );
4.76 + CALL_r32disp(preg, disp8);
4.77 +}
4.78 +
4.79 +static inline void call_func2( void *ptr, int arg1, int arg2 )
4.80 +{
4.81 + MOVQ_r64_r64(arg1, REG_RDI);
4.82 + MOVQ_r64_r64(arg2, REG_RSI);
4.83 + call_func0(ptr);
4.84 +}
4.85 +
4.86 +static inline void call_func2_r32disp8( int preg, uint32_t disp8, int arg1, int arg2 )
4.87 +{
4.88 + MOVQ_r64_r64(arg1, REG_RDI);
4.89 + MOVQ_r64_r64(arg2, REG_RSI);
4.90 + CALL_r32disp(preg, disp8);
4.91 +}
4.92 +
4.93 +static inline void call_func2_r32disp8_exc( int preg, uint32_t disp8, int arg1, int arg2, int pc )
4.94 +{
4.95 + MOVQ_r64_r64(arg1, REG_RDI);
4.96 + MOVQ_r64_r64(arg2, REG_RSI);
4.97 + MOVP_immptr_rptr(0, REG_RDX);
4.98 + sh4_x86_add_backpatch( xlat_output, pc, -2 );
4.99 + CALL_r32disp(preg, disp8);
4.100 +}
4.101 +
4.102 +
4.103 +
4.104 +/**
4.105 + * Emit the 'start of block' assembly. Sets up the stack frame and save
4.106 + * SI/DI as required
4.107 + */
4.108 +void enter_block( )
4.109 +{
4.110 + PUSH_r32(REG_RBP);
4.111 + load_ptr( REG_RBP, ((uint8_t *)&sh4r) + 128 );
4.112 + // Minimum aligned allocation is 16 bytes
4.113 + SUBQ_imms_r64( 16, REG_RSP );
4.114 +}
4.115 +
4.116 +static inline void exit_block( )
4.117 +{
4.118 + ADDQ_imms_r64( 16, REG_RSP );
4.119 + POP_r32(REG_RBP);
4.120 + RET();
4.121 +}
4.122 +
4.123 +/**
4.124 + * Exit the block with sh4r.pc already written
4.125 + */
4.126 +void exit_block_pcset( sh4addr_t pc )
4.127 +{
4.128 + load_imm32( REG_ECX, ((pc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
4.129 + ADDL_r32_rbpdisp( REG_ECX, REG_OFFSET(slice_cycle) ); // 6
4.130 + load_spreg( REG_RAX, R_PC );
4.131 + if( sh4_x86.tlb_on ) {
4.132 + call_func1(xlat_get_code_by_vma,REG_RAX);
4.133 + } else {
4.134 + call_func1(xlat_get_code,REG_RAX);
4.135 + }
4.136 + exit_block();
4.137 +}
4.138 +
4.139 +/**
4.140 + * Exit the block with sh4r.new_pc written with the target address
4.141 + */
4.142 +void exit_block_newpcset( sh4addr_t pc )
4.143 +{
4.144 + load_imm32( REG_ECX, ((pc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
4.145 + ADDL_r32_rbpdisp( REG_ECX, REG_OFFSET(slice_cycle) ); // 6
4.146 + load_spreg( REG_RAX, R_NEW_PC );
4.147 + store_spreg( REG_RAX, R_PC );
4.148 + if( sh4_x86.tlb_on ) {
4.149 + call_func1(xlat_get_code_by_vma,REG_RAX);
4.150 + } else {
4.151 + call_func1(xlat_get_code,REG_RAX);
4.152 + }
4.153 + exit_block();
4.154 +}
4.155 +
4.156 +#define EXIT_BLOCK_SIZE(pc) (25 + (IS_IN_ICACHE(pc)?10:CALL_FUNC1_SIZE))
4.157 +/**
4.158 + * Exit the block to an absolute PC
4.159 + */
4.160 +void exit_block_abs( sh4addr_t pc, sh4addr_t endpc )
4.161 +{
4.162 + load_imm32( REG_RCX, pc ); // 5
4.163 + store_spreg( REG_RCX, REG_OFFSET(pc) ); // 3
4.164 + if( IS_IN_ICACHE(pc) ) {
4.165 + MOVP_moffptr_rax( xlat_get_lut_entry(pc) );
4.166 + ANDQ_imms_r64( 0xFFFFFFFC, REG_RAX ); // 4
4.167 + } else if( sh4_x86.tlb_on ) {
4.168 + call_func1(xlat_get_code_by_vma, REG_RCX);
4.169 + } else {
4.170 + call_func1(xlat_get_code,REG_RCX);
4.171 + }
4.172 + load_imm32( REG_ECX, ((endpc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
4.173 + ADDL_r32_rbpdisp( REG_ECX, REG_OFFSET(slice_cycle) ); // 6
4.174 + exit_block();
4.175 +}
4.176 +
4.177 +
4.178 +#define EXIT_BLOCK_REL_SIZE(pc) (28 + (IS_IN_ICACHE(pc)?10:CALL_FUNC1_SIZE))
4.179 +
4.180 +/**
4.181 + * Exit the block to a relative PC
4.182 + */
4.183 +void exit_block_rel( sh4addr_t pc, sh4addr_t endpc )
4.184 +{
4.185 + load_imm32( REG_ECX, pc - sh4_x86.block_start_pc ); // 5
4.186 + ADDL_rbpdisp_r32( R_PC, REG_ECX );
4.187 + store_spreg( REG_ECX, REG_OFFSET(pc) ); // 3
4.188 + if( IS_IN_ICACHE(pc) ) {
4.189 + MOVP_moffptr_rax( xlat_get_lut_entry(GET_ICACHE_PHYS(pc)) ); // 5
4.190 + ANDQ_imms_r64( 0xFFFFFFFC, REG_RAX ); // 4
4.191 + } else if( sh4_x86.tlb_on ) {
4.192 + call_func1(xlat_get_code_by_vma,REG_RCX);
4.193 + } else {
4.194 + call_func1(xlat_get_code,REG_RCX);
4.195 + }
4.196 + load_imm32( REG_ECX, ((endpc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
4.197 + ADDL_r32_rbpdisp( REG_ECX, REG_OFFSET(slice_cycle) ); // 6
4.198 + exit_block();
4.199 +}
4.200 +
4.201 +/**
4.202 + * Exit unconditionally with a general exception
4.203 + */
4.204 +void exit_block_exc( int code, sh4addr_t pc )
4.205 +{
4.206 + load_imm32( REG_ECX, pc - sh4_x86.block_start_pc ); // 5
4.207 + ADDL_r32_rbpdisp( REG_ECX, R_PC );
4.208 + load_imm32( REG_ECX, ((pc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
4.209 + ADDL_r32_rbpdisp( REG_ECX, REG_OFFSET(slice_cycle) ); // 6
4.210 + load_imm32( REG_RAX, code );
4.211 + call_func1( sh4_raise_exception, REG_RAX );
4.212 +
4.213 + load_spreg( REG_RAX, R_PC );
4.214 + if( sh4_x86.tlb_on ) {
4.215 + call_func1(xlat_get_code_by_vma,REG_RAX);
4.216 + } else {
4.217 + call_func1(xlat_get_code,REG_RAX);
4.218 + }
4.219 +
4.220 + exit_block();
4.221 +}
4.222 +
4.223 +
4.224 +/**
4.225 + * Write the block trailer (exception handling block)
4.226 + */
4.227 +void sh4_translate_end_block( sh4addr_t pc ) {
4.228 + if( sh4_x86.branch_taken == FALSE ) {
4.229 + // Didn't exit unconditionally already, so write the termination here
4.230 + exit_block_rel( pc, pc );
4.231 + }
4.232 + if( sh4_x86.backpatch_posn != 0 ) {
4.233 + unsigned int i;
4.234 + // Raise exception
4.235 + uint8_t *end_ptr = xlat_output;
4.236 + MOVL_r32_r32( REG_RDX, REG_RCX );
4.237 + ADDL_r32_r32( REG_RDX, REG_RCX );
4.238 + ADDL_r32_rbpdisp( REG_RCX, R_PC );
4.239 + MOVL_moffptr_eax( &sh4_cpu_period );
4.240 + MULL_r32( REG_RDX );
4.241 + ADDL_r32_rbpdisp( REG_RAX, REG_OFFSET(slice_cycle) );
4.242 +
4.243 + call_func0( sh4_raise_exception );
4.244 + load_spreg( REG_RAX, R_PC );
4.245 + if( sh4_x86.tlb_on ) {
4.246 + call_func1(xlat_get_code_by_vma,REG_RAX);
4.247 + } else {
4.248 + call_func1(xlat_get_code,REG_RAX);
4.249 + }
4.250 + exit_block();
4.251 +
4.252 + // Exception already raised - just cleanup
4.253 + uint8_t *preexc_ptr = xlat_output;
4.254 + MOVL_r32_r32( REG_EDX, REG_ECX );
4.255 + ADDL_r32_r32( REG_EDX, REG_ECX );
4.256 + ADDL_r32_rbpdisp( REG_ECX, R_SPC );
4.257 + MOVL_moffptr_eax( &sh4_cpu_period );
4.258 + MULL_r32( REG_EDX );
4.259 + ADDL_r32_rbpdisp( REG_EAX, REG_OFFSET(slice_cycle) );
4.260 + load_spreg( REG_RDI, R_PC );
4.261 + if( sh4_x86.tlb_on ) {
4.262 + call_func0(xlat_get_code_by_vma);
4.263 + } else {
4.264 + call_func0(xlat_get_code);
4.265 + }
4.266 + exit_block();
4.267 +
4.268 + for( i=0; i< sh4_x86.backpatch_posn; i++ ) {
4.269 + uint32_t *fixup_addr = (uint32_t *)&xlat_current_block->code[sh4_x86.backpatch_list[i].fixup_offset];
4.270 + if( sh4_x86.backpatch_list[i].exc_code < 0 ) {
4.271 + if( sh4_x86.backpatch_list[i].exc_code == -2 ) {
4.272 + *((uintptr_t *)fixup_addr) = (uintptr_t)xlat_output;
4.273 + } else {
4.274 + *fixup_addr = xlat_output - (uint8_t *)&xlat_current_block->code[sh4_x86.backpatch_list[i].fixup_offset] - 4;
4.275 + }
4.276 + load_imm32( REG_RDX, sh4_x86.backpatch_list[i].fixup_icount );
4.277 + int rel = preexc_ptr - xlat_output;
4.278 + JMP_prerel(rel);
4.279 + } else {
4.280 + *fixup_addr = xlat_output - (uint8_t *)&xlat_current_block->code[sh4_x86.backpatch_list[i].fixup_offset] - 4;
4.281 + load_imm32( REG_RDI, sh4_x86.backpatch_list[i].exc_code );
4.282 + load_imm32( REG_RDX, sh4_x86.backpatch_list[i].fixup_icount );
4.283 + int rel = end_ptr - xlat_output;
4.284 + JMP_prerel(rel);
4.285 + }
4.286 + }
4.287 + }
4.288 +}
4.289 +
4.290 +struct UnwindInfo {
4.291 + uintptr_t block_start;
4.292 + uintptr_t block_end;
4.293 + void *pc;
4.294 +};
4.295 +
4.296 +_Unwind_Reason_Code xlat_check_frame( struct _Unwind_Context *context, void *arg )
4.297 +{
4.298 + struct UnwindInfo *info = arg;
4.299 + void *pc = (void *)_Unwind_GetIP(context);
4.300 + if( ((uintptr_t)pc) >= info->block_start && ((uintptr_t)pc) < info->block_end ) {
4.301 + info->pc = pc;
4.302 + return _URC_NORMAL_STOP;
4.303 + }
4.304 +
4.305 + return _URC_NO_REASON;
4.306 +}
4.307 +
4.308 +void *xlat_get_native_pc( void *code, uint32_t code_size )
4.309 +{
4.310 + struct _Unwind_Exception exc;
4.311 + struct UnwindInfo info;
4.312 +
4.313 + info.pc = NULL;
4.314 + info.block_start = (uintptr_t)code;
4.315 + info.block_end = info.block_start + code_size;
4.316 + void *result = NULL;
4.317 + _Unwind_Backtrace( xlat_check_frame, &info );
4.318 + return info.pc;
4.319 +}
4.320 +
4.321 +#endif /* !lxdream_ia64abi_H */
5.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
5.2 +++ b/src/xlat/x86/ia32abi.h Wed Mar 04 23:27:59 2009 +0000
5.3 @@ -0,0 +1,426 @@
5.4 +/**
5.5 + * $Id$
5.6 + *
5.7 + * Provides the implementation for the ia32 ABI variant
5.8 + * (eg prologue, epilogue, and calling conventions). Stack frame is
5.9 + * aligned on 16-byte boundaries for the benefit of OS X (which
5.10 + * requires it).
5.11 + *
5.12 + * Copyright (c) 2007 Nathan Keynes.
5.13 + *
5.14 + * This program is free software; you can redistribute it and/or modify
5.15 + * it under the terms of the GNU General Public License as published by
5.16 + * the Free Software Foundation; either version 2 of the License, or
5.17 + * (at your option) any later version.
5.18 + *
5.19 + * This program is distributed in the hope that it will be useful,
5.20 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
5.21 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
5.22 + * GNU General Public License for more details.
5.23 + */
5.24 +
5.25 +#ifndef lxdream_ia32mac_H
5.26 +#define lxdream_ia32mac_H 1
5.27 +
5.28 +#define load_ptr( reg, ptr ) load_imm32( reg, (uint32_t)ptr );
5.29 +
5.30 +static inline void decode_address( int addr_reg )
5.31 +{
5.32 + uintptr_t base = (sh4r.xlat_sh4_mode&SR_MD) ? (uintptr_t)sh4_address_space : (uintptr_t)sh4_user_address_space;
5.33 + MOVL_r32_r32( addr_reg, REG_ECX );
5.34 + SHRL_imm_r32( 12, REG_ECX );
5.35 + MOVP_sib_rptr( 2, REG_ECX, -1, base, REG_ECX );
5.36 +}
5.37 +
5.38 +/**
5.39 + * Note: clobbers EAX to make the indirect call - this isn't usually
5.40 + * a problem since the callee will usually clobber it anyway.
5.41 + */
5.42 +static inline void call_func0( void *ptr )
5.43 +{
5.44 + load_imm32(REG_ECX, (uint32_t)ptr);
5.45 + CALL_r32(REG_ECX);
5.46 +}
5.47 +
5.48 +#ifdef HAVE_FASTCALL
5.49 +static inline void call_func1( void *ptr, int arg1 )
5.50 +{
5.51 + if( arg1 != REG_EAX ) {
5.52 + MOVL_r32_r32( arg1, REG_EAX );
5.53 + }
5.54 + MOVP_immptr_rptr((uintptr_t)ptr, REG_ECX);
5.55 + CALL_r32(REG_ECX);
5.56 +}
5.57 +
5.58 +static inline void call_func1_r32( int addr_reg, int arg1 )
5.59 +{
5.60 + if( arg1 != REG_EAX ) {
5.61 + MOVL_r32_r32( arg1, REG_EAX );
5.62 + }
5.63 + CALL_r32(addr_reg);
5.64 +}
5.65 +
5.66 +static inline void call_func1_r32disp8( int preg, uint32_t disp8, int arg1 )
5.67 +{
5.68 + if( arg1 != REG_EAX ) {
5.69 + MOVL_r32_r32( arg1, REG_EAX );
5.70 + }
5.71 + CALL_r32disp(preg, disp8);
5.72 +}
5.73 +
5.74 +static inline void call_func1_r32disp8_exc( int preg, uint32_t disp8, int arg1, int pc )
5.75 +{
5.76 + if( arg1 != REG_EAX ) {
5.77 + MOVL_r32_r32( arg1, REG_EAX );
5.78 + }
5.79 + MOVP_immptr_rptr(0,REG_EDX);
5.80 + sh4_x86_add_backpatch(xlat_output, pc, -2);
5.81 + CALL_r32disp(preg, disp8);
5.82 +}
5.83 +
5.84 +static inline void call_func2( void *ptr, int arg1, int arg2 )
5.85 +{
5.86 + if( arg2 != REG_EDX ) {
5.87 + MOVL_r32_r32( arg2, REG_EDX );
5.88 + }
5.89 + if( arg1 != REG_EAX ) {
5.90 + MOVL_r32_r32( arg1, REG_EAX );
5.91 + }
5.92 + MOVP_immptr_rptr((uint32_t)ptr, REG_ECX);
5.93 + CALL_r32(REG_ECX);
5.94 +}
5.95 +
5.96 +static inline void call_func2_r32( int addr_reg, int arg1, int arg2 )
5.97 +{
5.98 + if( arg2 != REG_EDX ) {
5.99 + MOVL_r32_r32( arg2, REG_EDX );
5.100 + }
5.101 + if( arg1 != REG_EAX ) {
5.102 + MOVL_r32_r32( arg1, REG_EAX );
5.103 + }
5.104 + CALL_r32(addr_reg);
5.105 +}
5.106 +
5.107 +static inline void call_func2_r32disp8( int preg, uint32_t disp8, int arg1, int arg2 )
5.108 +{
5.109 + if( arg2 != REG_EDX ) {
5.110 + MOVL_r32_r32( arg2, REG_EDX );
5.111 + }
5.112 + if( arg1 != REG_EAX ) {
5.113 + MOVL_r32_r32( arg1, REG_EAX );
5.114 + }
5.115 + CALL_r32disp(preg, disp8);
5.116 +}
5.117 +
5.118 +static inline void call_func2_r32disp8_exc( int preg, uint32_t disp8, int arg1, int arg2, int pc )
5.119 +{
5.120 + if( arg2 != REG_EDX ) {
5.121 + MOVL_r32_r32( arg2, REG_EDX );
5.122 + }
5.123 + if( arg1 != REG_EAX ) {
5.124 + MOVL_r32_r32( arg1, REG_EAX );
5.125 + }
5.126 + MOVL_imm32_rspdisp(0,0);
5.127 + sh4_x86_add_backpatch(xlat_output, pc, -2);
5.128 + CALL_r32disp(preg, disp8);
5.129 +}
5.130 +
5.131 +
5.132 +
5.133 +static inline void call_func1_exc( void *ptr, int arg1, int pc )
5.134 +{
5.135 + if( arg1 != REG_EAX ) {
5.136 + MOVL_r32_r32( arg1, REG_EAX );
5.137 + }
5.138 + MOVP_immptr_rptr(0,REG_EDX);
5.139 + sh4_x86_add_backpatch(xlat_output, pc, -2);
5.140 + MOVP_immptr_rptr((uint32_t)ptr, REG_ECX);
5.141 + CALL_r32(REG_ECX);
5.142 +}
5.143 +
5.144 +static inline void call_func2_exc( void *ptr, int arg1, int arg2, int pc )
5.145 +{
5.146 + if( arg2 != REG_EDX ) {
5.147 + MOVL_r32_r32( arg2, REG_EDX );
5.148 + }
5.149 + if( arg1 != REG_EAX ) {
5.150 + MOVL_r32_r32( arg1, REG_EAX );
5.151 + }
5.152 + MOVL_imm32_rspdisp(0,0);
5.153 + sh4_x86_add_backpatch(xlat_output, pc, -2);
5.154 + MOVP_immptr_rptr((uint32_t)ptr, REG_ECX);
5.155 + CALL_r32(REG_ECX);
5.156 +}
5.157 +
5.158 +#else
5.159 +static inline void call_func1( void *ptr, int arg1 )
5.160 +{
5.161 + SUBL_imms_r32( 12, REG_ESP );
5.162 + PUSH_r32(arg1);
5.163 + MOVP_immptr_rptr((uint32_t)ptr, REG_ECX);
5.164 + CALL_r32(REG_ECX);
5.165 + ADDL_imms_r32( 16, REG_ESP );
5.166 +}
5.167 +
5.168 +static inline void call_func2( void *ptr, int arg1, int arg2 )
5.169 +{
5.170 + SUBL_imms_r32( 8, REG_ESP );
5.171 + PUSH_r32(arg2);
5.172 + PUSH_r32(arg1);
5.173 + MOVP_immptr_rptr((uint32_t)ptr, REG_ECX);
5.174 + CALL_r32(REG_ECX);
5.175 + ADDL_imms_r32( 16, REG_ESP );
5.176 +}
5.177 +
5.178 +#endif
5.179 +
5.180 +/**
5.181 + * Emit the 'start of block' assembly. Sets up the stack frame and save
5.182 + * SI/DI as required
5.183 + * Allocates 8 bytes for local variables, which also has the convenient
5.184 + * side-effect of aligning the stack.
5.185 + */
5.186 +void enter_block( )
5.187 +{
5.188 + PUSH_r32(REG_EBP);
5.189 + load_ptr( REG_EBP, ((uint8_t *)&sh4r) + 128 );
5.190 + SUBL_imms_r32( 8, REG_ESP );
5.191 +}
5.192 +
5.193 +static inline void exit_block( )
5.194 +{
5.195 + ADDL_imms_r32( 8, REG_ESP );
5.196 + POP_r32(REG_EBP);
5.197 + RET();
5.198 +}
5.199 +
5.200 +/**
5.201 + * Exit the block with sh4r.new_pc written with the target pc
5.202 + */
5.203 +void exit_block_pcset( sh4addr_t pc )
5.204 +{
5.205 + load_imm32( REG_ECX, ((pc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
5.206 + ADDL_r32_rbpdisp( REG_ECX, REG_OFFSET(slice_cycle) ); // 6
5.207 + load_spreg( REG_EAX, R_PC );
5.208 + if( sh4_x86.tlb_on ) {
5.209 + call_func1(xlat_get_code_by_vma,REG_EAX);
5.210 + } else {
5.211 + call_func1(xlat_get_code,REG_EAX);
5.212 + }
5.213 + exit_block();
5.214 +}
5.215 +
5.216 +/**
5.217 + * Exit the block with sh4r.new_pc written with the target pc
5.218 + */
5.219 +void exit_block_newpcset( sh4addr_t pc )
5.220 +{
5.221 + load_imm32( REG_ECX, ((pc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
5.222 + ADDL_r32_rbpdisp( REG_ECX, REG_OFFSET(slice_cycle) ); // 6
5.223 + load_spreg( REG_EAX, R_NEW_PC );
5.224 + store_spreg( REG_EAX, R_PC );
5.225 + if( sh4_x86.tlb_on ) {
5.226 + call_func1(xlat_get_code_by_vma,REG_EAX);
5.227 + } else {
5.228 + call_func1(xlat_get_code,REG_EAX);
5.229 + }
5.230 + exit_block();
5.231 +}
5.232 +
5.233 +
5.234 +/**
5.235 + * Exit the block to an absolute PC
5.236 + */
5.237 +void exit_block_abs( sh4addr_t pc, sh4addr_t endpc )
5.238 +{
5.239 + load_imm32( REG_ECX, pc ); // 5
5.240 + store_spreg( REG_ECX, REG_OFFSET(pc) ); // 3
5.241 + if( IS_IN_ICACHE(pc) ) {
5.242 + MOVP_moffptr_rax( xlat_get_lut_entry(GET_ICACHE_PHYS(pc)) ); // 5
5.243 + ANDL_imms_r32( 0xFFFFFFFC, REG_EAX ); // 3
5.244 + } else if( sh4_x86.tlb_on ) {
5.245 + call_func1(xlat_get_code_by_vma,REG_ECX);
5.246 + } else {
5.247 + call_func1(xlat_get_code,REG_ECX);
5.248 + }
5.249 + load_imm32( REG_ECX, ((endpc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
5.250 + ADDL_r32_rbpdisp( REG_ECX, REG_OFFSET(slice_cycle) ); // 6
5.251 + exit_block();
5.252 +}
5.253 +
5.254 +/**
5.255 + * Exit the block to a relative PC
5.256 + */
5.257 +void exit_block_rel( sh4addr_t pc, sh4addr_t endpc )
5.258 +{
5.259 + load_imm32( REG_ECX, pc - sh4_x86.block_start_pc ); // 5
5.260 + ADDL_rbpdisp_r32( R_PC, REG_ECX );
5.261 + store_spreg( REG_ECX, R_PC ); // 3
5.262 + if( IS_IN_ICACHE(pc) ) {
5.263 + MOVP_moffptr_rax( xlat_get_lut_entry(GET_ICACHE_PHYS(pc)) ); // 5
5.264 + ANDL_imms_r32( 0xFFFFFFFC, REG_EAX ); // 3
5.265 + } else if( sh4_x86.tlb_on ) {
5.266 + call_func1(xlat_get_code_by_vma,REG_ECX);
5.267 + } else {
5.268 + call_func1(xlat_get_code,REG_ECX);
5.269 + }
5.270 + load_imm32( REG_ECX, ((endpc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
5.271 + ADDL_r32_rbpdisp( REG_ECX, REG_OFFSET(slice_cycle) ); // 6
5.272 + exit_block();
5.273 +}
5.274 +
5.275 +/**
5.276 + * Exit unconditionally with a general exception
5.277 + */
5.278 +void exit_block_exc( int code, sh4addr_t pc )
5.279 +{
5.280 + load_imm32( REG_ECX, pc - sh4_x86.block_start_pc ); // 5
5.281 + ADDL_r32_rbpdisp( REG_ECX, R_PC );
5.282 + load_imm32( REG_ECX, ((pc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
5.283 + ADDL_r32_rbpdisp( REG_ECX, REG_OFFSET(slice_cycle) ); // 6
5.284 + load_imm32( REG_EAX, code );
5.285 + call_func1( sh4_raise_exception, REG_EAX );
5.286 +
5.287 + load_spreg( REG_EAX, R_PC );
5.288 + if( sh4_x86.tlb_on ) {
5.289 + call_func1(xlat_get_code_by_vma,REG_EAX);
5.290 + } else {
5.291 + call_func1(xlat_get_code,REG_EAX);
5.292 + }
5.293 +
5.294 + exit_block();
5.295 +}
5.296 +
5.297 +/**
5.298 + * Write the block trailer (exception handling block)
5.299 + */
5.300 +void sh4_translate_end_block( sh4addr_t pc ) {
5.301 + if( sh4_x86.branch_taken == FALSE ) {
5.302 + // Didn't exit unconditionally already, so write the termination here
5.303 + exit_block_rel( pc, pc );
5.304 + }
5.305 + if( sh4_x86.backpatch_posn != 0 ) {
5.306 + unsigned int i;
5.307 + // Raise exception
5.308 + uint8_t *end_ptr = xlat_output;
5.309 + MOVL_r32_r32( REG_EDX, REG_ECX );
5.310 + ADDL_r32_r32( REG_EDX, REG_ECX );
5.311 + ADDL_r32_rbpdisp( REG_ECX, R_PC );
5.312 + MOVL_moffptr_eax( &sh4_cpu_period );
5.313 + MULL_r32( REG_EDX );
5.314 + ADDL_r32_rbpdisp( REG_EAX, REG_OFFSET(slice_cycle) );
5.315 +
5.316 + POP_r32(REG_EAX);
5.317 + call_func1( sh4_raise_exception, REG_EAX );
5.318 + load_spreg( REG_EAX, R_PC );
5.319 + if( sh4_x86.tlb_on ) {
5.320 + call_func1(xlat_get_code_by_vma,REG_EAX);
5.321 + } else {
5.322 + call_func1(xlat_get_code,REG_EAX);
5.323 + }
5.324 + exit_block();
5.325 +
5.326 + // Exception already raised - just cleanup
5.327 + uint8_t *preexc_ptr = xlat_output;
5.328 + MOVL_r32_r32( REG_EDX, REG_ECX );
5.329 + ADDL_r32_r32( REG_EDX, REG_ECX );
5.330 + ADDL_r32_rbpdisp( REG_ECX, R_SPC );
5.331 + MOVL_moffptr_eax( &sh4_cpu_period );
5.332 + MULL_r32( REG_EDX );
5.333 + ADDL_r32_rbpdisp( REG_EAX, REG_OFFSET(slice_cycle) );
5.334 + load_spreg( REG_EAX, R_PC );
5.335 + if( sh4_x86.tlb_on ) {
5.336 + call_func1(xlat_get_code_by_vma,REG_EAX);
5.337 + } else {
5.338 + call_func1(xlat_get_code,REG_EAX);
5.339 + }
5.340 + exit_block();
5.341 +
5.342 + for( i=0; i< sh4_x86.backpatch_posn; i++ ) {
5.343 + uint32_t *fixup_addr = (uint32_t *)&xlat_current_block->code[sh4_x86.backpatch_list[i].fixup_offset];
5.344 + if( sh4_x86.backpatch_list[i].exc_code < 0 ) {
5.345 + if( sh4_x86.backpatch_list[i].exc_code == -2 ) {
5.346 + *fixup_addr = (uint32_t)xlat_output;
5.347 + } else {
5.348 + *fixup_addr += xlat_output - (uint8_t *)&xlat_current_block->code[sh4_x86.backpatch_list[i].fixup_offset] - 4;
5.349 + }
5.350 + load_imm32( REG_EDX, sh4_x86.backpatch_list[i].fixup_icount );
5.351 + int rel = preexc_ptr - xlat_output;
5.352 + JMP_prerel(rel);
5.353 + } else {
5.354 + *fixup_addr += xlat_output - (uint8_t *)&xlat_current_block->code[sh4_x86.backpatch_list[i].fixup_offset] - 4;
5.355 + PUSH_imm32( sh4_x86.backpatch_list[i].exc_code );
5.356 + load_imm32( REG_EDX, sh4_x86.backpatch_list[i].fixup_icount );
5.357 + int rel = end_ptr - xlat_output;
5.358 + JMP_prerel(rel);
5.359 + }
5.360 + }
5.361 + }
5.362 +}
5.363 +
5.364 +
5.365 +/**
5.366 + * The unwind methods only work if we compiled with DWARF2 frame information
5.367 + * (ie -fexceptions), otherwise we have to use the direct frame scan.
5.368 + */
5.369 +#ifdef HAVE_EXCEPTIONS
5.370 +#include <unwind.h>
5.371 +
5.372 +struct UnwindInfo {
5.373 + uintptr_t block_start;
5.374 + uintptr_t block_end;
5.375 + void *pc;
5.376 +};
5.377 +
5.378 +_Unwind_Reason_Code xlat_check_frame( struct _Unwind_Context *context, void *arg )
5.379 +{
5.380 + struct UnwindInfo *info = arg;
5.381 + void *pc = (void *)_Unwind_GetIP(context);
5.382 + if( ((uintptr_t)pc) >= info->block_start && ((uintptr_t)pc) < info->block_end ) {
5.383 + info->pc = pc;
5.384 + return _URC_NORMAL_STOP;
5.385 + }
5.386 +
5.387 + return _URC_NO_REASON;
5.388 +}
5.389 +
5.390 +void *xlat_get_native_pc( void *code, uint32_t code_size )
5.391 +{
5.392 + struct _Unwind_Exception exc;
5.393 + struct UnwindInfo info;
5.394 +
5.395 + info.pc = NULL;
5.396 + info.block_start = (uintptr_t)code;
5.397 + info.block_end = info.block_start + code_size;
5.398 + void *result = NULL;
5.399 + _Unwind_Backtrace( xlat_check_frame, &info );
5.400 + return info.pc;
5.401 +}
5.402 +#else
5.403 +void *xlat_get_native_pc( void *code, uint32_t code_size )
5.404 +{
5.405 + void *result = NULL;
5.406 + asm(
5.407 + "mov %%ebp, %%eax\n\t"
5.408 + "mov $0x8, %%ecx\n\t"
5.409 + "mov %1, %%edx\n"
5.410 + "frame_loop: test %%eax, %%eax\n\t"
5.411 + "je frame_not_found\n\t"
5.412 + "cmp (%%eax), %%edx\n\t"
5.413 + "je frame_found\n\t"
5.414 + "sub $0x1, %%ecx\n\t"
5.415 + "je frame_not_found\n\t"
5.416 + "movl (%%eax), %%eax\n\t"
5.417 + "jmp frame_loop\n"
5.418 + "frame_found: movl 0x4(%%eax), %0\n"
5.419 + "frame_not_found:"
5.420 + : "=r" (result)
5.421 + : "r" (((uint8_t *)&sh4r) + 128 )
5.422 + : "eax", "ecx", "edx" );
5.423 + return result;
5.424 +}
5.425 +#endif
5.426 +
5.427 +#endif /* !lxdream_ia32mac.h */
5.428 +
5.429 +
.