Search
lxdream.org :: lxdream/src/sh4/ia64abi.h :: diff
lxdream 0.9.1
released Jun 29
Download Now
filename src/sh4/ia64abi.h
changeset 736:a02d1475ccfd
prev670:5d277b7ad0df
next800:0d1be79c9b33
author nkeynes
date Tue Jul 29 23:25:03 2008 +0000 (15 years ago)
permissions -rw-r--r--
last change Fix minor warnings
file annotate diff log raw
1.1 --- a/src/sh4/ia64abi.h Tue May 13 08:48:15 2008 +0000
1.2 +++ b/src/sh4/ia64abi.h Tue Jul 29 23:25:03 2008 +0000
1.3 @@ -1,7 +1,7 @@
1.4 /**
1.5 * $Id$
1.6 *
1.7 - * Provides the implementation for the ia32 ABI (eg prologue, epilogue, and
1.8 + * Provides the implementation for the AMD64 ABI (eg prologue, epilogue, and
1.9 * calling conventions)
1.10 *
1.11 * Copyright (c) 2007 Nathan Keynes.
1.12 @@ -17,13 +17,13 @@
1.13 * GNU General Public License for more details.
1.14 */
1.15
1.16 -#ifndef __lxdream_x86_64abi_H
1.17 -#define __lxdream_x86_64abi_H 1
1.18 +#ifndef lxdream_ia64abi_H
1.19 +#define lxdream_ia64abi_H 1
1.20
1.21 #include <unwind.h>
1.22
1.23 #define load_ptr( reg, ptr ) load_imm64( reg, (uint64_t)ptr );
1.24 -
1.25 +
1.26 /**
1.27 * Note: clobbers EAX to make the indirect call - this isn't usually
1.28 * a problem since the callee will usually clobber it anyway.
1.29 @@ -96,7 +96,7 @@
1.30 PUSH_r32(R_EBP);
1.31 /* mov &sh4r, ebp */
1.32 load_ptr( R_EBP, ((uint8_t *)&sh4r) + 128 );
1.33 -
1.34 +
1.35 sh4_x86.in_delay_slot = FALSE;
1.36 sh4_x86.priv_checked = FALSE;
1.37 sh4_x86.fpuen_checked = FALSE;
1.38 @@ -116,9 +116,9 @@
1.39 ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) ); // 6
1.40 load_spreg( R_EAX, R_PC );
1.41 if( sh4_x86.tlb_on ) {
1.42 - call_func1(xlat_get_code_by_vma,R_EAX);
1.43 + call_func1(xlat_get_code_by_vma,R_EAX);
1.44 } else {
1.45 - call_func1(xlat_get_code,R_EAX);
1.46 + call_func1(xlat_get_code,R_EAX);
1.47 }
1.48 POP_r32(R_EBP);
1.49 RET();
1.50 @@ -134,9 +134,9 @@
1.51 load_spreg( R_EAX, R_NEW_PC );
1.52 store_spreg( R_EAX, R_PC );
1.53 if( sh4_x86.tlb_on ) {
1.54 - call_func1(xlat_get_code_by_vma,R_EAX);
1.55 + call_func1(xlat_get_code_by_vma,R_EAX);
1.56 } else {
1.57 - call_func1(xlat_get_code,R_EAX);
1.58 + call_func1(xlat_get_code,R_EAX);
1.59 }
1.60 POP_r32(R_EBP);
1.61 RET();
1.62 @@ -151,11 +151,11 @@
1.63 load_imm32( R_ECX, pc ); // 5
1.64 store_spreg( R_ECX, REG_OFFSET(pc) ); // 3
1.65 if( IS_IN_ICACHE(pc) ) {
1.66 - REXW(); MOV_moff32_EAX( xlat_get_lut_entry(pc) );
1.67 + REXW(); MOV_moff32_EAX( xlat_get_lut_entry(pc) );
1.68 } else if( sh4_x86.tlb_on ) {
1.69 - call_func1(xlat_get_code_by_vma, R_ECX);
1.70 + call_func1(xlat_get_code_by_vma, R_ECX);
1.71 } else {
1.72 - call_func1(xlat_get_code,R_ECX);
1.73 + call_func1(xlat_get_code,R_ECX);
1.74 }
1.75 REXW(); AND_imm8s_r32( 0xFC, R_EAX ); // 4
1.76 load_imm32( R_ECX, ((endpc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
1.77 @@ -176,11 +176,11 @@
1.78 ADD_sh4r_r32( R_PC, R_ECX );
1.79 store_spreg( R_ECX, REG_OFFSET(pc) ); // 3
1.80 if( IS_IN_ICACHE(pc) ) {
1.81 - REXW(); MOV_moff32_EAX( xlat_get_lut_entry(GET_ICACHE_PHYS(pc)) ); // 5
1.82 + REXW(); MOV_moff32_EAX( xlat_get_lut_entry(GET_ICACHE_PHYS(pc)) ); // 5
1.83 } else if( sh4_x86.tlb_on ) {
1.84 - call_func1(xlat_get_code_by_vma,R_ECX);
1.85 + call_func1(xlat_get_code_by_vma,R_ECX);
1.86 } else {
1.87 - call_func1(xlat_get_code,R_ECX);
1.88 + call_func1(xlat_get_code,R_ECX);
1.89 }
1.90 REXW(); AND_imm8s_r32( 0xFC, R_EAX ); // 4
1.91 load_imm32( R_ECX, ((endpc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
1.92 @@ -194,65 +194,65 @@
1.93 */
1.94 void sh4_translate_end_block( sh4addr_t pc ) {
1.95 if( sh4_x86.branch_taken == FALSE ) {
1.96 - // Didn't exit unconditionally already, so write the termination here
1.97 - exit_block_rel( pc, pc );
1.98 + // Didn't exit unconditionally already, so write the termination here
1.99 + exit_block_rel( pc, pc );
1.100 }
1.101 if( sh4_x86.backpatch_posn != 0 ) {
1.102 - unsigned int i;
1.103 - // Raise exception
1.104 - uint8_t *end_ptr = xlat_output;
1.105 - MOV_r32_r32( R_EDX, R_ECX );
1.106 - ADD_r32_r32( R_EDX, R_ECX );
1.107 - ADD_r32_sh4r( R_ECX, R_PC );
1.108 - MOV_moff32_EAX( &sh4_cpu_period );
1.109 - MUL_r32( R_EDX );
1.110 - ADD_r32_sh4r( R_EAX, REG_OFFSET(slice_cycle) );
1.111 + unsigned int i;
1.112 + // Raise exception
1.113 + uint8_t *end_ptr = xlat_output;
1.114 + MOV_r32_r32( R_EDX, R_ECX );
1.115 + ADD_r32_r32( R_EDX, R_ECX );
1.116 + ADD_r32_sh4r( R_ECX, R_PC );
1.117 + MOV_moff32_EAX( &sh4_cpu_period );
1.118 + MUL_r32( R_EDX );
1.119 + ADD_r32_sh4r( R_EAX, REG_OFFSET(slice_cycle) );
1.120
1.121 - call_func0( sh4_raise_exception );
1.122 - load_spreg( R_EAX, R_PC );
1.123 - if( sh4_x86.tlb_on ) {
1.124 - call_func1(xlat_get_code_by_vma,R_EAX);
1.125 - } else {
1.126 - call_func1(xlat_get_code,R_EAX);
1.127 - }
1.128 - POP_r32(R_EBP);
1.129 - RET();
1.130 + call_func0( sh4_raise_exception );
1.131 + load_spreg( R_EAX, R_PC );
1.132 + if( sh4_x86.tlb_on ) {
1.133 + call_func1(xlat_get_code_by_vma,R_EAX);
1.134 + } else {
1.135 + call_func1(xlat_get_code,R_EAX);
1.136 + }
1.137 + POP_r32(R_EBP);
1.138 + RET();
1.139
1.140 - // Exception already raised - just cleanup
1.141 - uint8_t *preexc_ptr = xlat_output;
1.142 - MOV_r32_r32( R_EDX, R_ECX );
1.143 - ADD_r32_r32( R_EDX, R_ECX );
1.144 - ADD_r32_sh4r( R_ECX, R_SPC );
1.145 - MOV_moff32_EAX( &sh4_cpu_period );
1.146 - MUL_r32( R_EDX );
1.147 - ADD_r32_sh4r( R_EAX, REG_OFFSET(slice_cycle) );
1.148 - load_spreg( R_EDI, R_PC );
1.149 - if( sh4_x86.tlb_on ) {
1.150 - call_func0(xlat_get_code_by_vma);
1.151 - } else {
1.152 - call_func0(xlat_get_code);
1.153 - }
1.154 - POP_r32(R_EBP);
1.155 - RET();
1.156 + // Exception already raised - just cleanup
1.157 + uint8_t *preexc_ptr = xlat_output;
1.158 + MOV_r32_r32( R_EDX, R_ECX );
1.159 + ADD_r32_r32( R_EDX, R_ECX );
1.160 + ADD_r32_sh4r( R_ECX, R_SPC );
1.161 + MOV_moff32_EAX( &sh4_cpu_period );
1.162 + MUL_r32( R_EDX );
1.163 + ADD_r32_sh4r( R_EAX, REG_OFFSET(slice_cycle) );
1.164 + load_spreg( R_EDI, R_PC );
1.165 + if( sh4_x86.tlb_on ) {
1.166 + call_func0(xlat_get_code_by_vma);
1.167 + } else {
1.168 + call_func0(xlat_get_code);
1.169 + }
1.170 + POP_r32(R_EBP);
1.171 + RET();
1.172
1.173 - for( i=0; i< sh4_x86.backpatch_posn; i++ ) {
1.174 - uint32_t *fixup_addr = (uint32_t *)&xlat_current_block->code[sh4_x86.backpatch_list[i].fixup_offset];
1.175 - *fixup_addr = xlat_output - (uint8_t *)&xlat_current_block->code[sh4_x86.backpatch_list[i].fixup_offset] - 4;
1.176 - if( sh4_x86.backpatch_list[i].exc_code < 0 ) {
1.177 - load_imm32( R_EDX, sh4_x86.backpatch_list[i].fixup_icount );
1.178 - int stack_adj = -1 - sh4_x86.backpatch_list[i].exc_code;
1.179 - if( stack_adj > 0 ) {
1.180 - ADD_imm8s_r32( stack_adj*4, R_ESP );
1.181 - }
1.182 - int rel = preexc_ptr - xlat_output;
1.183 - JMP_rel(rel);
1.184 - } else {
1.185 - load_imm32( R_EDI, sh4_x86.backpatch_list[i].exc_code );
1.186 - load_imm32( R_EDX, sh4_x86.backpatch_list[i].fixup_icount );
1.187 - int rel = end_ptr - xlat_output;
1.188 - JMP_rel(rel);
1.189 - }
1.190 - }
1.191 + for( i=0; i< sh4_x86.backpatch_posn; i++ ) {
1.192 + uint32_t *fixup_addr = (uint32_t *)&xlat_current_block->code[sh4_x86.backpatch_list[i].fixup_offset];
1.193 + *fixup_addr = xlat_output - (uint8_t *)&xlat_current_block->code[sh4_x86.backpatch_list[i].fixup_offset] - 4;
1.194 + if( sh4_x86.backpatch_list[i].exc_code < 0 ) {
1.195 + load_imm32( R_EDX, sh4_x86.backpatch_list[i].fixup_icount );
1.196 + int stack_adj = -1 - sh4_x86.backpatch_list[i].exc_code;
1.197 + if( stack_adj > 0 ) {
1.198 + ADD_imm8s_r32( stack_adj*4, R_ESP );
1.199 + }
1.200 + int rel = preexc_ptr - xlat_output;
1.201 + JMP_rel(rel);
1.202 + } else {
1.203 + load_imm32( R_EDI, sh4_x86.backpatch_list[i].exc_code );
1.204 + load_imm32( R_EDX, sh4_x86.backpatch_list[i].fixup_icount );
1.205 + int rel = end_ptr - xlat_output;
1.206 + JMP_rel(rel);
1.207 + }
1.208 + }
1.209 }
1.210 }
1.211
1.212 @@ -265,17 +265,17 @@
1.213 *result = (void *)_Unwind_GetIP(context);
1.214 return _URC_NORMAL_STOP;
1.215 }
1.216 -
1.217 +
1.218 return _URC_NO_REASON;
1.219 }
1.220
1.221 void *xlat_get_native_pc()
1.222 {
1.223 struct _Unwind_Exception exc;
1.224 -
1.225 +
1.226 void *result = NULL;
1.227 _Unwind_Backtrace( xlat_check_frame, &result );
1.228 return result;
1.229 }
1.230
1.231 -#endif
1.232 +#endif /* !lxdream_ia64abi_H */
.