Search
lxdream.org :: lxdream/src/sh4/ia64abi.h :: diff
lxdream 0.9.1
released Jun 29
Download Now
filename src/sh4/ia64abi.h
changeset 586:2a3ba82cf243
prev547:d6e00ffc4adc
next590:4db6a084ca3c
author nkeynes
date Tue Jan 15 20:50:23 2008 +0000 (16 years ago)
permissions -rw-r--r--
last change Merged lxdream-mmu r570:596 to trunk
file annotate diff log raw
1.1 --- a/src/sh4/ia64abi.h Thu Dec 06 10:37:55 2007 +0000
1.2 +++ b/src/sh4/ia64abi.h Tue Jan 15 20:50:23 2008 +0000
1.3 @@ -1,5 +1,5 @@
1.4 /**
1.5 - * $Id: ia64abi.in,v 1.20 2007-11-08 11:54:16 nkeynes Exp $
1.6 + * $Id$
1.7 *
1.8 * Provides the implementation for the ia32 ABI (eg prologue, epilogue, and
1.9 * calling conventions)
1.10 @@ -20,6 +20,7 @@
1.11 #ifndef __lxdream_x86_64abi_H
1.12 #define __lxdream_x86_64abi_H 1
1.13
1.14 +#include <unwind.h>
1.15
1.16 #define load_ptr( reg, ptr ) load_imm64( reg, (uint64_t)ptr );
1.17
1.18 @@ -50,7 +51,7 @@
1.19 call_func0(ptr);
1.20 }
1.21
1.22 -#define MEM_WRITE_DOUBLE_SIZE 39
1.23 +#define MEM_WRITE_DOUBLE_SIZE 35
1.24 /**
1.25 * Write a double (64-bit) value into memory, with the first word in arg2a, and
1.26 * the second in arg2b
1.27 @@ -60,10 +61,10 @@
1.28 PUSH_r32(arg2b);
1.29 PUSH_r32(addr);
1.30 call_func2(sh4_write_long, addr, arg2a);
1.31 - POP_r32(addr);
1.32 - POP_r32(arg2b);
1.33 - ADD_imm8s_r32(4, addr);
1.34 - call_func2(sh4_write_long, addr, arg2b);
1.35 + POP_r32(R_EDI);
1.36 + POP_r32(R_ESI);
1.37 + ADD_imm8s_r32(4, R_EDI);
1.38 + call_func0(sh4_write_long);
1.39 }
1.40
1.41 #define MEM_READ_DOUBLE_SIZE 43
1.42 @@ -101,7 +102,9 @@
1.43 sh4_x86.fpuen_checked = FALSE;
1.44 sh4_x86.branch_taken = FALSE;
1.45 sh4_x86.backpatch_posn = 0;
1.46 + sh4_x86.recovery_posn = 0;
1.47 sh4_x86.block_start_pc = pc;
1.48 + sh4_x86.tlb_on = IS_MMU_ENABLED();
1.49 sh4_x86.tstate = TSTATE_NONE;
1.50 }
1.51
1.52 @@ -109,17 +112,21 @@
1.53 * Exit the block with sh4r.pc already written
1.54 * Bytes: 15
1.55 */
1.56 -void exit_block_pcset( pc )
1.57 +void exit_block_pcset( sh4addr_t pc )
1.58 {
1.59 load_imm32( R_ECX, ((pc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
1.60 ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) ); // 6
1.61 load_spreg( R_EAX, REG_OFFSET(pc) );
1.62 - call_func1(xlat_get_code,R_EAX);
1.63 + if( sh4_x86.tlb_on ) {
1.64 + call_func1(xlat_get_code_by_vma,R_EAX);
1.65 + } else {
1.66 + call_func1(xlat_get_code,R_EAX);
1.67 + }
1.68 POP_r32(R_EBP);
1.69 RET();
1.70 }
1.71
1.72 -#define EXIT_BLOCK_SIZE 35
1.73 +#define EXIT_BLOCK_SIZE(pc) (25 + (IS_IN_ICACHE(pc)?10:CALL_FUNC1_SIZE))
1.74 /**
1.75 * Exit the block to an absolute PC
1.76 */
1.77 @@ -127,8 +134,14 @@
1.78 {
1.79 load_imm32( R_ECX, pc ); // 5
1.80 store_spreg( R_ECX, REG_OFFSET(pc) ); // 3
1.81 - REXW(); MOV_moff32_EAX( xlat_get_lut_entry(pc) );
1.82 - REXW(); AND_imm8s_r32( 0xFC, R_EAX ); // 3
1.83 + if( IS_IN_ICACHE(pc) ) {
1.84 + REXW(); MOV_moff32_EAX( xlat_get_lut_entry(pc) );
1.85 + } else if( sh4_x86.tlb_on ) {
1.86 + call_func1(xlat_get_code_by_vma, R_ECX);
1.87 + } else {
1.88 + call_func1(xlat_get_code,R_ECX);
1.89 + }
1.90 + REXW(); AND_imm8s_r32( 0xFC, R_EAX ); // 4
1.91 load_imm32( R_ECX, ((endpc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
1.92 ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) ); // 6
1.93 POP_r32(R_EBP);
1.94 @@ -136,51 +149,112 @@
1.95 }
1.96
1.97
1.98 +#define EXIT_BLOCK_REL_SIZE(pc) (28 + (IS_IN_ICACHE(pc)?10:CALL_FUNC1_SIZE))
1.99 +
1.100 +/**
1.101 + * Exit the block to a relative PC
1.102 + */
1.103 +void exit_block_rel( sh4addr_t pc, sh4addr_t endpc )
1.104 +{
1.105 + load_imm32( R_ECX, pc - sh4_x86.block_start_pc ); // 5
1.106 + ADD_sh4r_r32( R_PC, R_ECX );
1.107 + store_spreg( R_ECX, REG_OFFSET(pc) ); // 3
1.108 + if( IS_IN_ICACHE(pc) ) {
1.109 + REXW(); MOV_moff32_EAX( xlat_get_lut_entry(GET_ICACHE_PHYS(pc)) ); // 5
1.110 + } else if( sh4_x86.tlb_on ) {
1.111 + call_func1(xlat_get_code_by_vma,R_ECX);
1.112 + } else {
1.113 + call_func1(xlat_get_code,R_ECX);
1.114 + }
1.115 + REXW(); AND_imm8s_r32( 0xFC, R_EAX ); // 4
1.116 + load_imm32( R_ECX, ((endpc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
1.117 + ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) ); // 6
1.118 + POP_r32(R_EBP);
1.119 + RET();
1.120 +}
1.121 +
1.122 /**
1.123 * Write the block trailer (exception handling block)
1.124 */
1.125 void sh4_translate_end_block( sh4addr_t pc ) {
1.126 if( sh4_x86.branch_taken == FALSE ) {
1.127 // Didn't exit unconditionally already, so write the termination here
1.128 - exit_block( pc, pc );
1.129 + exit_block_rel( pc, pc );
1.130 }
1.131 if( sh4_x86.backpatch_posn != 0 ) {
1.132 + unsigned int i;
1.133 + // Raise exception
1.134 uint8_t *end_ptr = xlat_output;
1.135 - // Exception termination. Jump block for various exception codes:
1.136 - load_imm32( R_EDI, EXC_DATA_ADDR_READ );
1.137 - JMP_rel8( 33, target1 );
1.138 - load_imm32( R_EDI, EXC_DATA_ADDR_WRITE );
1.139 - JMP_rel8( 26, target2 );
1.140 - load_imm32( R_EDI, EXC_ILLEGAL );
1.141 - JMP_rel8( 19, target3 );
1.142 - load_imm32( R_EDI, EXC_SLOT_ILLEGAL );
1.143 - JMP_rel8( 12, target4 );
1.144 - load_imm32( R_EDI, EXC_FPU_DISABLED );
1.145 - JMP_rel8( 5, target5 );
1.146 - load_imm32( R_EDI, EXC_SLOT_FPU_DISABLED );
1.147 - // target
1.148 - JMP_TARGET(target1);
1.149 - JMP_TARGET(target2);
1.150 - JMP_TARGET(target3);
1.151 - JMP_TARGET(target4);
1.152 - JMP_TARGET(target5);
1.153 - // Raise exception
1.154 - load_spreg( R_ECX, REG_OFFSET(pc) );
1.155 + MOV_r32_r32( R_EDX, R_ECX );
1.156 ADD_r32_r32( R_EDX, R_ECX );
1.157 - ADD_r32_r32( R_EDX, R_ECX );
1.158 - store_spreg( R_ECX, REG_OFFSET(pc) );
1.159 + ADD_r32_sh4r( R_ECX, R_PC );
1.160 MOV_moff32_EAX( &sh4_cpu_period );
1.161 MUL_r32( R_EDX );
1.162 ADD_r32_sh4r( R_EAX, REG_OFFSET(slice_cycle) );
1.163
1.164 call_func0( sh4_raise_exception );
1.165 - load_spreg( R_EAX, REG_OFFSET(pc) );
1.166 - call_func1(xlat_get_code,R_EAX);
1.167 + load_spreg( R_EAX, R_PC );
1.168 + if( sh4_x86.tlb_on ) {
1.169 + call_func1(xlat_get_code_by_vma,R_EAX);
1.170 + } else {
1.171 + call_func1(xlat_get_code,R_EAX);
1.172 + }
1.173 POP_r32(R_EBP);
1.174 RET();
1.175
1.176 - sh4_x86_do_backpatch( end_ptr );
1.177 + // Exception already raised - just cleanup
1.178 + uint8_t *preexc_ptr = xlat_output;
1.179 + MOV_r32_r32( R_EDX, R_ECX );
1.180 + ADD_r32_r32( R_EDX, R_ECX );
1.181 + ADD_r32_sh4r( R_ECX, R_SPC );
1.182 + MOV_moff32_EAX( &sh4_cpu_period );
1.183 + MUL_r32( R_EDX );
1.184 + ADD_r32_sh4r( R_EAX, REG_OFFSET(slice_cycle) );
1.185 + load_spreg( R_EDI, R_PC );
1.186 + if( sh4_x86.tlb_on ) {
1.187 + call_func0(xlat_get_code_by_vma);
1.188 + } else {
1.189 + call_func0(xlat_get_code);
1.190 + }
1.191 + POP_r32(R_EBP);
1.192 + RET();
1.193 +
1.194 + for( i=0; i< sh4_x86.backpatch_posn; i++ ) {
1.195 + *sh4_x86.backpatch_list[i].fixup_addr =
1.196 + xlat_output - ((uint8_t *)sh4_x86.backpatch_list[i].fixup_addr) - 4;
1.197 + if( sh4_x86.backpatch_list[i].exc_code == -1 ) {
1.198 + load_imm32( R_EDX, sh4_x86.backpatch_list[i].fixup_icount );
1.199 + int rel = preexc_ptr - xlat_output;
1.200 + JMP_rel(rel);
1.201 + } else {
1.202 + load_imm32( R_EDI, sh4_x86.backpatch_list[i].exc_code );
1.203 + load_imm32( R_EDX, sh4_x86.backpatch_list[i].fixup_icount );
1.204 + int rel = end_ptr - xlat_output;
1.205 + JMP_rel(rel);
1.206 + }
1.207 + }
1.208 }
1.209 }
1.210
1.211 +_Unwind_Reason_Code xlat_check_frame( struct _Unwind_Context *context, void *arg )
1.212 +{
1.213 + void *rbp = (void *)_Unwind_GetGR(context, 6);
1.214 + if( rbp == (void *)&sh4r ) {
1.215 + void **result = (void **)arg;
1.216 + *result = (void *)_Unwind_GetIP(context);
1.217 + return _URC_NORMAL_STOP;
1.218 + }
1.219 +
1.220 + return _URC_NO_REASON;
1.221 +}
1.222 +
1.223 +void *xlat_get_native_pc()
1.224 +{
1.225 + struct _Unwind_Exception exc;
1.226 +
1.227 + void *result = NULL;
1.228 + _Unwind_Backtrace( xlat_check_frame, &result );
1.229 + return result;
1.230 +}
1.231 +
1.232 #endif
.