Search
lxdream.org :: lxdream :: r927:17b6b9e245d8
lxdream 0.9.1
released Jun 29
Download Now
changeset927:17b6b9e245d8
parent926:68f3e0fe02f1
child928:bf87fbdcc9a5
child953:f4a156508ad1
authornkeynes
dateMon Dec 15 10:44:56 2008 +0000 (15 years ago)
Add return-address-modifying exception return code to mmu TLB lookups (a little bit faster)
acinclude.m4
config.h.in
configure
configure.in
src/sh4/ia32abi.h
src/sh4/ia64abi.h
src/sh4/mmu.c
src/sh4/sh4core.h
src/sh4/sh4core.in
src/sh4/sh4mem.c
src/sh4/sh4x86.in
src/sh4/x86op.h
1.1 --- a/acinclude.m4 Sun Dec 14 07:50:48 2008 +0000
1.2 +++ b/acinclude.m4 Mon Dec 15 10:44:56 2008 +0000
1.3 @@ -18,3 +18,37 @@
1.4 $2 ])
1.5 ])
1.6
1.7 +# AC_CHECK_FRAME_ADDRESS([if-ok],[if-notok])
1.8 +# Test if the compiler will let us modify the return address on the stack
1.9 +# via __builtin_frame_address()
1.10 +# -----------------------
1.11 +AC_DEFUN([AC_CHECK_FRAME_ADDRESS], [
1.12 +AC_MSG_CHECKING([if we have a working __builtin_frame_address()]);
1.13 +AC_RUN_IFELSE([
1.14 + AC_LANG_SOURCE([[
1.15 +void * __attribute__((noinline)) first_arg( void *x, void *y ) { return x; }
1.16 +int __attribute__((noinline)) foo( int arg, void *exc )
1.17 +{
1.18 + if( arg < 2 ) {
1.19 + *(((void **)__builtin_frame_address(0))+1) = exc;
1.20 + }
1.21 + return 0;
1.22 +}
1.23 +
1.24 +int main(int argc, char *argv[])
1.25 +{
1.26 + goto *first_arg(&&start, &&except);
1.27 +
1.28 +start:
1.29 + return foo( argc, &&except ) + 1;
1.30 +
1.31 +except:
1.32 + return 0;
1.33 +}]])], [
1.34 + AC_MSG_RESULT([yes])
1.35 + $1 ], [
1.36 + AC_MSG_RESULT([no])
1.37 + $2 ])
1.38 +])
1.39 +
1.40 +
2.1 --- a/config.h.in Sun Dec 14 07:50:48 2008 +0000
2.2 +++ b/config.h.in Mon Dec 15 10:44:56 2008 +0000
2.3 @@ -49,6 +49,9 @@
2.4 /* Use fast register-passing calling conventions */
2.5 #undef HAVE_FASTCALL
2.6
2.7 +/* Define if we have a working builtin frame_address */
2.8 +#undef HAVE_FRAME_ADDRESS
2.9 +
2.10 /* Define if the GNU gettext() function is already present or preinstalled. */
2.11 #undef HAVE_GETTEXT
2.12
3.1 --- a/configure Sun Dec 14 07:50:48 2008 +0000
3.2 +++ b/configure Mon Dec 15 10:44:56 2008 +0000
3.3 @@ -6745,6 +6745,94 @@
3.4
3.5
3.6
3.7 +{ $as_echo "$as_me:$LINENO: checking if we have a working __builtin_frame_address()" >&5
3.8 +$as_echo_n "checking if we have a working __builtin_frame_address()... " >&6; };
3.9 +if test "$cross_compiling" = yes; then
3.10 + { { $as_echo "$as_me:$LINENO: error: cannot run test program while cross compiling
3.11 +See \`config.log' for more details." >&5
3.12 +$as_echo "$as_me: error: cannot run test program while cross compiling
3.13 +See \`config.log' for more details." >&2;}
3.14 + { (exit 1); exit 1; }; }
3.15 +else
3.16 + cat >conftest.$ac_ext <<_ACEOF
3.17 +
3.18 + /* confdefs.h. */
3.19 +_ACEOF
3.20 +cat confdefs.h >>conftest.$ac_ext
3.21 +cat >>conftest.$ac_ext <<_ACEOF
3.22 +/* end confdefs.h. */
3.23 +
3.24 +void * __attribute__((noinline)) first_arg( void *x, void *y ) { return x; }
3.25 +int __attribute__((noinline)) foo( int arg, void *exc )
3.26 +{
3.27 + if( arg < 2 ) {
3.28 + *(((void **)__builtin_frame_address(0))+1) = exc;
3.29 + }
3.30 + return 0;
3.31 +}
3.32 +
3.33 +int main(int argc, char *argv[])
3.34 +{
3.35 + goto *first_arg(&&start, &&except);
3.36 +
3.37 +start:
3.38 + return foo( argc, &&except ) + 1;
3.39 +
3.40 +except:
3.41 + return 0;
3.42 +}
3.43 +_ACEOF
3.44 +rm -f conftest$ac_exeext
3.45 +if { (ac_try="$ac_link"
3.46 +case "(($ac_try" in
3.47 + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
3.48 + *) ac_try_echo=$ac_try;;
3.49 +esac
3.50 +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
3.51 +$as_echo "$ac_try_echo") >&5
3.52 + (eval "$ac_link") 2>&5
3.53 + ac_status=$?
3.54 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
3.55 + (exit $ac_status); } && { ac_try='./conftest$ac_exeext'
3.56 + { (case "(($ac_try" in
3.57 + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
3.58 + *) ac_try_echo=$ac_try;;
3.59 +esac
3.60 +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\""
3.61 +$as_echo "$ac_try_echo") >&5
3.62 + (eval "$ac_try") 2>&5
3.63 + ac_status=$?
3.64 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5
3.65 + (exit $ac_status); }; }; then
3.66 +
3.67 + { $as_echo "$as_me:$LINENO: result: yes" >&5
3.68 +$as_echo "yes" >&6; }
3.69 +
3.70 +
3.71 +cat >>confdefs.h <<\_ACEOF
3.72 +#define HAVE_FRAME_ADDRESS 1
3.73 +_ACEOF
3.74 +
3.75 +
3.76 +else
3.77 + $as_echo "$as_me: program exited with status $ac_status" >&5
3.78 +$as_echo "$as_me: failed program was:" >&5
3.79 +sed 's/^/| /' conftest.$ac_ext >&5
3.80 +
3.81 +( exit $ac_status )
3.82 +
3.83 + { $as_echo "$as_me:$LINENO: result: no" >&5
3.84 +$as_echo "no" >&6; }
3.85 + { $as_echo "$as_me:$LINENO: WARNING: Memory exceptions will be slow" >&5
3.86 +$as_echo "$as_me: WARNING: Memory exceptions will be slow" >&2;}
3.87 +fi
3.88 +rm -rf conftest.dSYM
3.89 +rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext
3.90 +fi
3.91 +
3.92 +
3.93 +
3.94 +
3.95
3.96
3.97 lxdream_save_cppflags="$CPPFLAGS"
4.1 --- a/configure.in Sun Dec 14 07:50:48 2008 +0000
4.2 +++ b/configure.in Mon Dec 15 10:44:56 2008 +0000
4.3 @@ -83,6 +83,9 @@
4.4 AC_CHECK_FASTCALL([
4.5 AC_DEFINE(HAVE_FASTCALL, [1], [Use fast register-passing calling conventions])
4.6 ])
4.7 +AC_CHECK_FRAME_ADDRESS( [
4.8 + AC_DEFINE(HAVE_FRAME_ADDRESS, [1], [Define if we have a working builtin frame_address])
4.9 +], [ AC_MSG_WARN([Memory exceptions will be slow]) ])
4.10
4.11
4.12 dnl ------------ Check if we're building on Darwin --------------
5.1 --- a/src/sh4/ia32abi.h Sun Dec 14 07:50:48 2008 +0000
5.2 +++ b/src/sh4/ia32abi.h Mon Dec 15 10:44:56 2008 +0000
5.3 @@ -53,6 +53,27 @@
5.4 CALL_ptr(ptr);
5.5 }
5.6
5.7 +static inline void call_func1_exc( void *ptr, int arg1, int pc )
5.8 +{
5.9 + if( arg1 != R_EAX ) {
5.10 + MOV_r32_r32( arg1, R_EAX );
5.11 + }
5.12 + load_exc_backpatch(R_EDX);
5.13 + CALL_ptr(ptr);
5.14 +}
5.15 +
5.16 +static inline void call_func2_exc( void *ptr, int arg1, int arg2, int pc )
5.17 +{
5.18 + if( arg2 != R_EDX ) {
5.19 + MOV_r32_r32( arg2, R_EDX );
5.20 + }
5.21 + if( arg1 != R_EAX ) {
5.22 + MOV_r32_r32( arg1, R_EAX );
5.23 + }
5.24 + load_exc_backpatch(R_ECX);
5.25 + CALL_ptr(ptr);
5.26 +}
5.27 +
5.28 /**
5.29 * Write a double (64-bit) value into memory, with the first word in arg2a, and
5.30 * the second in arg2b
5.31 @@ -285,16 +306,17 @@
5.32
5.33 for( i=0; i< sh4_x86.backpatch_posn; i++ ) {
5.34 uint32_t *fixup_addr = (uint32_t *)&xlat_current_block->code[sh4_x86.backpatch_list[i].fixup_offset];
5.35 - *fixup_addr = xlat_output - (uint8_t *)&xlat_current_block->code[sh4_x86.backpatch_list[i].fixup_offset] - 4;
5.36 if( sh4_x86.backpatch_list[i].exc_code < 0 ) {
5.37 + if( sh4_x86.backpatch_list[i].exc_code == -2 ) {
5.38 + *fixup_addr = (uint32_t)xlat_output;
5.39 + } else {
5.40 + *fixup_addr += xlat_output - (uint8_t *)&xlat_current_block->code[sh4_x86.backpatch_list[i].fixup_offset] - 4;
5.41 + }
5.42 load_imm32( R_EDX, sh4_x86.backpatch_list[i].fixup_icount );
5.43 - int stack_adj = -1 - sh4_x86.backpatch_list[i].exc_code;
5.44 - if( stack_adj > 0 ) {
5.45 - ADD_imm8s_r32( stack_adj, R_ESP );
5.46 - }
5.47 int rel = preexc_ptr - xlat_output;
5.48 JMP_rel(rel);
5.49 } else {
5.50 + *fixup_addr += xlat_output - (uint8_t *)&xlat_current_block->code[sh4_x86.backpatch_list[i].fixup_offset] - 4;
5.51 PUSH_imm32( sh4_x86.backpatch_list[i].exc_code );
5.52 load_imm32( R_EDX, sh4_x86.backpatch_list[i].fixup_icount );
5.53 int rel = end_ptr - xlat_output;
6.1 --- a/src/sh4/ia64abi.h Sun Dec 14 07:50:48 2008 +0000
6.2 +++ b/src/sh4/ia64abi.h Mon Dec 15 10:44:56 2008 +0000
6.3 @@ -43,6 +43,13 @@
6.4 call_func0(ptr);
6.5 }
6.6
6.7 +static inline void call_func1_exc( void *ptr, int arg1, int pc )
6.8 +{
6.9 + REXW(); MOV_r32_r32(arg1, R_EDI);
6.10 + load_exc_backpatch(R_ESI);
6.11 + call_func0(ptr);
6.12 +}
6.13 +
6.14 #define CALL_FUNC2_SIZE 16
6.15 static inline void call_func2( void *ptr, int arg1, int arg2 )
6.16 {
6.17 @@ -230,16 +237,17 @@
6.18
6.19 for( i=0; i< sh4_x86.backpatch_posn; i++ ) {
6.20 uint32_t *fixup_addr = (uint32_t *)&xlat_current_block->code[sh4_x86.backpatch_list[i].fixup_offset];
6.21 - *fixup_addr = xlat_output - (uint8_t *)&xlat_current_block->code[sh4_x86.backpatch_list[i].fixup_offset] - 4;
6.22 if( sh4_x86.backpatch_list[i].exc_code < 0 ) {
6.23 + if( sh4_x86.backpatch_list[i].exc_code == -2 ) {
6.24 + *((uintptr_t *)fixup_addr) = (uintptr_t)xlat_output;
6.25 + } else {
6.26 + *fixup_addr = xlat_output - (uint8_t *)&xlat_current_block->code[sh4_x86.backpatch_list[i].fixup_offset] - 4;
6.27 + }
6.28 load_imm32( R_EDX, sh4_x86.backpatch_list[i].fixup_icount );
6.29 - int stack_adj = -1 - sh4_x86.backpatch_list[i].exc_code;
6.30 - if( stack_adj > 0 ) {
6.31 - REXW(); ADD_imm8s_r32( stack_adj*4, R_ESP );
6.32 - }
6.33 int rel = preexc_ptr - xlat_output;
6.34 JMP_rel(rel);
6.35 } else {
6.36 + *fixup_addr = xlat_output - (uint8_t *)&xlat_current_block->code[sh4_x86.backpatch_list[i].fixup_offset] - 4;
6.37 load_imm32( R_EDI, sh4_x86.backpatch_list[i].exc_code );
6.38 load_imm32( R_EDX, sh4_x86.backpatch_list[i].fixup_icount );
6.39 int rel = end_ptr - xlat_output;
7.1 --- a/src/sh4/mmu.c Sun Dec 14 07:50:48 2008 +0000
7.2 +++ b/src/sh4/mmu.c Mon Dec 15 10:44:56 2008 +0000
7.3 @@ -24,6 +24,12 @@
7.4 #include "sh4/sh4trans.h"
7.5 #include "mem.h"
7.6
7.7 +#ifdef HAVE_FRAME_ADDRESS
7.8 +#define RETURN_VIA(exc) do{ *(((void **)__builtin_frame_address(0))+1) = exc; return; } while(0)
7.9 +#else
7.10 +#define RETURN_VIA(exc) return MMU_VMA_ERROR
7.11 +#endif
7.12 +
7.13 #define VMA_TO_EXT_ADDR(vma) ((vma)&0x1FFFFFFF)
7.14
7.15 /* The MMU (practically unique in the system) is allowed to raise exceptions
7.16 @@ -817,8 +823,12 @@
7.17
7.18 return result;
7.19 }
7.20 -
7.21 +
7.22 +#ifdef HAVE_FRAME_ADDRESS
7.23 +sh4addr_t FASTCALL mmu_vma_to_phys_read( sh4vma_t addr, void *exc )
7.24 +#else
7.25 sh4addr_t FASTCALL mmu_vma_to_phys_read( sh4vma_t addr )
7.26 +#endif
7.27 {
7.28 uint32_t mmucr = MMIO_READ(MMU,MMUCR);
7.29 if( addr & 0x80000000 ) {
7.30 @@ -836,7 +846,7 @@
7.31 return addr;
7.32 }
7.33 MMU_READ_ADDR_ERROR();
7.34 - return MMU_VMA_ERROR;
7.35 + RETURN_VIA(exc);
7.36 }
7.37 }
7.38
7.39 @@ -855,16 +865,16 @@
7.40 switch(entryNo) {
7.41 case -1:
7.42 MMU_TLB_READ_MISS_ERROR(addr);
7.43 - return MMU_VMA_ERROR;
7.44 + RETURN_VIA(exc);
7.45 case -2:
7.46 MMU_TLB_MULTI_HIT_ERROR(addr);
7.47 - return MMU_VMA_ERROR;
7.48 + RETURN_VIA(exc);
7.49 default:
7.50 if( (mmu_utlb[entryNo].flags & TLB_USERMODE) == 0 &&
7.51 !IS_SH4_PRIVMODE() ) {
7.52 /* protection violation */
7.53 MMU_TLB_READ_PROT_ERROR(addr);
7.54 - return MMU_VMA_ERROR;
7.55 + RETURN_VIA(exc);
7.56 }
7.57
7.58 /* finally generate the target address */
7.59 @@ -873,7 +883,11 @@
7.60 }
7.61 }
7.62
7.63 +#ifdef HAVE_FRAME_ADDRESS
7.64 +sh4addr_t FASTCALL mmu_vma_to_phys_write( sh4vma_t addr, void *exc )
7.65 +#else
7.66 sh4addr_t FASTCALL mmu_vma_to_phys_write( sh4vma_t addr )
7.67 +#endif
7.68 {
7.69 uint32_t mmucr = MMIO_READ(MMU,MMUCR);
7.70 if( addr & 0x80000000 ) {
7.71 @@ -891,7 +905,7 @@
7.72 return addr;
7.73 }
7.74 MMU_WRITE_ADDR_ERROR();
7.75 - return MMU_VMA_ERROR;
7.76 + RETURN_VIA(exc);
7.77 }
7.78 }
7.79
7.80 @@ -910,21 +924,21 @@
7.81 switch(entryNo) {
7.82 case -1:
7.83 MMU_TLB_WRITE_MISS_ERROR(addr);
7.84 - return MMU_VMA_ERROR;
7.85 + RETURN_VIA(exc);
7.86 case -2:
7.87 MMU_TLB_MULTI_HIT_ERROR(addr);
7.88 - return MMU_VMA_ERROR;
7.89 + RETURN_VIA(exc);
7.90 default:
7.91 if( IS_SH4_PRIVMODE() ? ((mmu_utlb[entryNo].flags & TLB_WRITABLE) == 0)
7.92 : ((mmu_utlb[entryNo].flags & TLB_USERWRITABLE) != TLB_USERWRITABLE) ) {
7.93 /* protection violation */
7.94 MMU_TLB_WRITE_PROT_ERROR(addr);
7.95 - return MMU_VMA_ERROR;
7.96 + RETURN_VIA(exc);
7.97 }
7.98
7.99 if( (mmu_utlb[entryNo].flags & TLB_DIRTY) == 0 ) {
7.100 MMU_TLB_INITIAL_WRITE_ERROR(addr);
7.101 - return MMU_VMA_ERROR;
7.102 + RETURN_VIA(exc);
7.103 }
7.104
7.105 /* finally generate the target address */
8.1 --- a/src/sh4/sh4core.h Sun Dec 14 07:50:48 2008 +0000
8.2 +++ b/src/sh4/sh4core.h Mon Dec 15 10:44:56 2008 +0000
8.3 @@ -199,8 +199,13 @@
8.4 * @return An external address (0x00000000-0x1FFFFFFF), a P4 address
8.5 * (0xE0000000 - 0xFFFFFFFF), or MMU_VMA_ERROR.
8.6 */
8.7 +#ifdef HAVE_FRAME_ADDRESS
8.8 +sh4addr_t FASTCALL mmu_vma_to_phys_read( sh4vma_t addr, void *exc );
8.9 +sh4addr_t FASTCALL mmu_vma_to_phys_write( sh4vma_t addr, void *exc );
8.10 +#else
8.11 sh4addr_t FASTCALL mmu_vma_to_phys_read( sh4vma_t addr );
8.12 sh4addr_t FASTCALL mmu_vma_to_phys_write( sh4vma_t addr );
8.13 +#endif
8.14 sh4addr_t FASTCALL mmu_vma_to_phys_disasm( sh4vma_t addr );
8.15
8.16 int64_t FASTCALL sh4_read_quad( sh4addr_t addr );
9.1 --- a/src/sh4/sh4core.in Sun Dec 14 07:50:48 2008 +0000
9.2 +++ b/src/sh4/sh4core.in Mon Dec 15 10:44:56 2008 +0000
9.3 @@ -164,62 +164,58 @@
9.4 #define CHECKDEST(p) if( (p) == 0 ) { ERROR( "%08X: Branch/jump to NULL, CPU halted", sh4r.pc ); sh4_core_exit(CORE_EXIT_HALT); return FALSE; }
9.5 #define CHECKSLOTILLEGAL() if(sh4r.in_delay_slot) return sh4_raise_exception(EXC_SLOT_ILLEGAL)
9.6
9.7 -#define MEM_READ_BYTE( addr, val ) memtmp = mmu_vma_to_phys_read(addr); if( memtmp == MMU_VMA_ERROR ) { return TRUE; } else { val = sh4_read_byte(memtmp); }
9.8 -#define MEM_READ_WORD( addr, val ) memtmp = mmu_vma_to_phys_read(addr); if( memtmp == MMU_VMA_ERROR ) { return TRUE; } else { val = sh4_read_word(memtmp); }
9.9 -#define MEM_READ_LONG( addr, val ) memtmp = mmu_vma_to_phys_read(addr); if( memtmp == MMU_VMA_ERROR ) { return TRUE; } else { val = sh4_read_long(memtmp); }
9.10 -#define MEM_WRITE_BYTE( addr, val ) memtmp = mmu_vma_to_phys_write(addr); if( memtmp == MMU_VMA_ERROR ) { return TRUE; } else { sh4_write_byte(memtmp, val); }
9.11 -#define MEM_WRITE_WORD( addr, val ) memtmp = mmu_vma_to_phys_write(addr); if( memtmp == MMU_VMA_ERROR ) { return TRUE; } else { sh4_write_word(memtmp, val); }
9.12 -#define MEM_WRITE_LONG( addr, val ) memtmp = mmu_vma_to_phys_write(addr); if( memtmp == MMU_VMA_ERROR ) { return TRUE; } else { sh4_write_long(memtmp, val); }
9.13 +#ifdef HAVE_FRAME_ADDRESS
9.14 +static FASTCALL __attribute__((noinline)) void *__first_arg(void *a, void *b) { return a; }
9.15 +#define INIT_EXCEPTIONS(label) goto *__first_arg(&&fnstart,&&label); fnstart:
9.16 +#define MMU_TRANSLATE_READ( addr ) memtmp = mmu_vma_to_phys_read(addr, &&except )
9.17 +#define MMU_TRANSLATE_WRITE( addr ) memtmp = mmu_vma_to_phys_write(addr, &&except )
9.18 +#else
9.19 +#define INIT_EXCEPTIONS(label)
9.20 +#define MMU_TRANSLATE_READ( addr ) if( (memtmp = mmu_vma_to_phys_read(addr)) == MMU_VMA_ERROR ) { return TRUE; }
9.21 +#define MMU_TRANSLATE_WRITE( addr ) if( (memtmp = mmu_vma_to_phys_write(addr)) == MMU_VMA_ERROR ) { return TRUE; }
9.22 +#endif
9.23 +
9.24 +#define MEM_READ_BYTE( addr, val ) MMU_TRANSLATE_READ(addr); val = sh4_read_byte(memtmp)
9.25 +#define MEM_READ_WORD( addr, val ) MMU_TRANSLATE_READ(addr); val = sh4_read_word(memtmp)
9.26 +#define MEM_READ_LONG( addr, val ) MMU_TRANSLATE_READ(addr); val = sh4_read_long(memtmp)
9.27 +#define MEM_WRITE_BYTE( addr, val ) MMU_TRANSLATE_WRITE(addr); sh4_write_byte(memtmp, val)
9.28 +#define MEM_WRITE_WORD( addr, val ) MMU_TRANSLATE_WRITE(addr); sh4_write_word(memtmp, val)
9.29 +#define MEM_WRITE_LONG( addr, val ) MMU_TRANSLATE_WRITE(addr); sh4_write_long(memtmp, val)
9.30 +
9.31
9.32 #define FP_WIDTH (IS_FPU_DOUBLESIZE() ? 8 : 4)
9.33
9.34 #define MEM_FP_READ( addr, reg ) \
9.35 if( IS_FPU_DOUBLESIZE() ) { \
9.36 CHECKRALIGN64(addr); \
9.37 - memtmp = mmu_vma_to_phys_read(addr); \
9.38 - if( memtmp == MMU_VMA_ERROR ) { \
9.39 - return TRUE; \
9.40 - } else { \
9.41 - if( reg & 1 ) { \
9.42 - *((uint32_t *)&XF((reg) & 0x0E)) = sh4_read_long(memtmp); \
9.43 - *((uint32_t *)&XF(reg)) = sh4_read_long(memtmp+4); \
9.44 - } else { \
9.45 - *((uint32_t *)&FR(reg)) = sh4_read_long(memtmp); \
9.46 - *((uint32_t *)&FR((reg) | 0x01)) = sh4_read_long(memtmp+4); \
9.47 - } \
9.48 + MMU_TRANSLATE_READ(addr); \
9.49 + if( reg & 1 ) { \
9.50 + *((uint32_t *)&XF((reg) & 0x0E)) = sh4_read_long(memtmp); \
9.51 + *((uint32_t *)&XF(reg)) = sh4_read_long(memtmp+4); \
9.52 + } else { \
9.53 + *((uint32_t *)&FR(reg)) = sh4_read_long(memtmp); \
9.54 + *((uint32_t *)&FR((reg) | 0x01)) = sh4_read_long(memtmp+4); \
9.55 } \
9.56 } else { \
9.57 CHECKRALIGN32(addr); \
9.58 - memtmp = mmu_vma_to_phys_read(addr); \
9.59 - if( memtmp == MMU_VMA_ERROR ) { \
9.60 - return TRUE; \
9.61 - } else { \
9.62 - *((uint32_t *)&FR(reg)) = sh4_read_long(memtmp); \
9.63 - } \
9.64 + MMU_TRANSLATE_READ(addr); \
9.65 + *((uint32_t *)&FR(reg)) = sh4_read_long(memtmp); \
9.66 }
9.67 #define MEM_FP_WRITE( addr, reg ) \
9.68 if( IS_FPU_DOUBLESIZE() ) { \
9.69 CHECKWALIGN64(addr); \
9.70 - memtmp = mmu_vma_to_phys_write(addr); \
9.71 - if( memtmp == MMU_VMA_ERROR ) { \
9.72 - return TRUE; \
9.73 - } else { \
9.74 - if( reg & 1 ) { \
9.75 - sh4_write_long( memtmp, *((uint32_t *)&XF((reg)&0x0E)) ); \
9.76 - sh4_write_long( memtmp+4, *((uint32_t *)&XF(reg)) ); \
9.77 - } else { \
9.78 - sh4_write_long( memtmp, *((uint32_t *)&FR(reg)) ); \
9.79 - sh4_write_long( memtmp+4, *((uint32_t *)&FR((reg)|0x01)) ); \
9.80 - } \
9.81 + MMU_TRANSLATE_WRITE(addr); \
9.82 + if( reg & 1 ) { \
9.83 + sh4_write_long( memtmp, *((uint32_t *)&XF((reg)&0x0E)) ); \
9.84 + sh4_write_long( memtmp+4, *((uint32_t *)&XF(reg)) ); \
9.85 + } else { \
9.86 + sh4_write_long( memtmp, *((uint32_t *)&FR(reg)) ); \
9.87 + sh4_write_long( memtmp+4, *((uint32_t *)&FR((reg)|0x01)) ); \
9.88 } \
9.89 } else { \
9.90 CHECKWALIGN32(addr); \
9.91 - memtmp = mmu_vma_to_phys_write(addr); \
9.92 - if( memtmp == MMU_VMA_ERROR ) { \
9.93 - return TRUE; \
9.94 - } else { \
9.95 - sh4_write_long( memtmp, *((uint32_t *)&FR((reg))) ); \
9.96 - } \
9.97 + MMU_TRANSLATE_WRITE(addr); \
9.98 + sh4_write_long( memtmp, *((uint32_t *)&FR((reg))) ); \
9.99 }
9.100
9.101 gboolean sh4_execute_instruction( void )
9.102 @@ -230,6 +226,8 @@
9.103 float ftmp;
9.104 double dtmp;
9.105 int64_t memtmp; // temporary holder for memory reads
9.106 +
9.107 + INIT_EXCEPTIONS(except)
9.108
9.109 #define R0 sh4r.r[0]
9.110 pc = sh4r.pc;
9.111 @@ -1178,6 +1176,8 @@
9.112 %%
9.113 sh4r.pc = sh4r.new_pc;
9.114 sh4r.new_pc += 2;
9.115 +
9.116 +except:
9.117 sh4r.in_delay_slot = 0;
9.118 return TRUE;
9.119 }
10.1 --- a/src/sh4/sh4mem.c Sun Dec 14 07:50:48 2008 +0000
10.2 +++ b/src/sh4/sh4mem.c Mon Dec 15 10:44:56 2008 +0000
10.3 @@ -432,18 +432,3 @@
10.4 }
10.5 }
10.6
10.7 -sh4ptr_t sh4_get_region_by_vma( sh4addr_t vma )
10.8 -{
10.9 - sh4addr_t addr = mmu_vma_to_phys_read(vma);
10.10 - if( addr == MMU_VMA_ERROR ) {
10.11 - return NULL;
10.12 - }
10.13 -
10.14 - sh4ptr_t page = page_map[ (addr & 0x1FFFFFFF) >> 12 ];
10.15 - if( ((uintptr_t)page) < MAX_IO_REGIONS ) { /* IO Region */
10.16 - return NULL;
10.17 - } else {
10.18 - return page+(addr&0xFFF);
10.19 - }
10.20 -}
10.21 -
11.1 --- a/src/sh4/sh4x86.in Sun Dec 14 07:50:48 2008 +0000
11.2 +++ b/src/sh4/sh4x86.in Mon Dec 15 10:44:56 2008 +0000
11.3 @@ -296,18 +296,22 @@
11.4 #define MEM_WRITE_WORD( addr_reg, value_reg ) call_func2(sh4_write_word, addr_reg, value_reg)
11.5 #define MEM_WRITE_LONG( addr_reg, value_reg ) call_func2(sh4_write_long, addr_reg, value_reg)
11.6
11.7 +#ifdef HAVE_FRAME_ADDRESS
11.8 /**
11.9 * Perform MMU translation on the address in addr_reg for a read operation, iff the TLB is turned
11.10 * on, otherwise do nothing. Clobbers EAX, ECX and EDX. May raise a TLB exception or address error.
11.11 */
11.12 -#define MMU_TRANSLATE_READ( addr_reg ) if( sh4_x86.tlb_on ) { call_func1(mmu_vma_to_phys_read, addr_reg); CMP_imm32_r32(MMU_VMA_ERROR, R_EAX); JE_exc(-1); MEM_RESULT(addr_reg); }
11.13 +#define MMU_TRANSLATE_READ( addr_reg ) if( sh4_x86.tlb_on ) { call_func1_exc(mmu_vma_to_phys_read, addr_reg, pc); MEM_RESULT(addr_reg); }
11.14
11.15 -#define MMU_TRANSLATE_READ_EXC( addr_reg, exc_code ) if( sh4_x86.tlb_on ) { call_func1(mmu_vma_to_phys_read, addr_reg); CMP_imm32_r32(MMU_VMA_ERROR, R_EAX); JE_exc(exc_code); MEM_RESULT(addr_reg) }
11.16 /**
11.17 * Perform MMU translation on the address in addr_reg for a write operation, iff the TLB is turned
11.18 * on, otherwise do nothing. Clobbers EAX, ECX and EDX. May raise a TLB exception or address error.
11.19 */
11.20 +#define MMU_TRANSLATE_WRITE( addr_reg ) if( sh4_x86.tlb_on ) { call_func1_exc(mmu_vma_to_phys_write, addr_reg, pc); MEM_RESULT(addr_reg); }
11.21 +#else
11.22 +#define MMU_TRANSLATE_READ( addr_reg ) if( sh4_x86.tlb_on ) { call_func1(mmu_vma_to_phys_read, addr_reg); CMP_imm32_r32(MMU_VMA_ERROR, R_EAX); JE_exc(-1); MEM_RESULT(addr_reg); }
11.23 #define MMU_TRANSLATE_WRITE( addr_reg ) if( sh4_x86.tlb_on ) { call_func1(mmu_vma_to_phys_write, addr_reg); CMP_imm32_r32(MMU_VMA_ERROR, R_EAX); JE_exc(-1); MEM_RESULT(addr_reg); }
11.24 +#endif
11.25
11.26 #define SLOTILLEGAL() JMP_exc(EXC_SLOT_ILLEGAL); sh4_x86.in_delay_slot = DELAY_NONE; return 1;
11.27
11.28 @@ -320,7 +324,7 @@
11.29
11.30 void sh4_translate_begin_block( sh4addr_t pc )
11.31 {
11.32 - enter_block();
11.33 + enter_block();
11.34 sh4_x86.in_delay_slot = FALSE;
11.35 sh4_x86.priv_checked = FALSE;
11.36 sh4_x86.fpuen_checked = FALSE;
12.1 --- a/src/sh4/x86op.h Sun Dec 14 07:50:48 2008 +0000
12.2 +++ b/src/sh4/x86op.h Mon Dec 15 10:44:56 2008 +0000
12.3 @@ -54,11 +54,13 @@
12.4 #define AND_imm8s_rptr(imm, r1) REXW(); AND_imm8s_r32( imm, r1 )
12.5 #define LEA_sh4r_rptr(disp, r1) REXW(); LEA_sh4r_r32(disp,r1)
12.6 #define MOV_moffptr_EAX(offptr) REXW(); MOV_moff32_EAX( offptr )
12.7 +#define load_exc_backpatch( x86reg ) REXW(); OP(0xB8 + x86reg); sh4_x86_add_backpatch( xlat_output, pc, -2 ); OP64( 0 )
12.8 #else /* 32-bit system */
12.9 #define OPPTR(x) OP32((uint32_t)(x))
12.10 #define AND_imm8s_rptr(imm, r1) AND_imm8s_r32( imm, r1 )
12.11 #define LEA_sh4r_rptr(disp, r1) LEA_sh4r_r32(disp,r1)
12.12 #define MOV_moffptr_EAX(offptr) MOV_moff32_EAX( offptr )
12.13 +#define load_exc_backpatch( x86reg ) OP(0xB8 + x86reg); sh4_x86_add_backpatch( xlat_output, pc, -2 ); OP32( 0 )
12.14 #endif
12.15 #define STACK_ALIGN 16
12.16 #define POP_r32(r1) OP(0x58 + r1)
.