Search
lxdream.org :: lxdream :: r559:06714bc64271
lxdream 0.9.1
released Jun 29
Download Now
changeset559:06714bc64271 lxdream-mmu
parent558:428cf8528699
child560:a7ed8e70d698
authornkeynes
dateTue Jan 01 04:58:57 2008 +0000 (16 years ago)
branchlxdream-mmu
Commit first pass at full TLB support - still needs a lot more work
src/sh4/ia32abi.h
src/sh4/ia32mac.h
src/sh4/ia64abi.h
src/sh4/mmu.c
src/sh4/sh4.c
src/sh4/sh4core.c
src/sh4/sh4core.h
src/sh4/sh4core.in
src/sh4/sh4mem.c
src/sh4/sh4trans.c
src/sh4/sh4x86.c
src/sh4/sh4x86.in
src/sh4/x86op.h
1.1 --- a/src/sh4/ia32abi.h Tue Jan 01 04:56:52 2008 +0000
1.2 +++ b/src/sh4/ia32abi.h Tue Jan 01 04:58:57 2008 +0000
1.3 @@ -153,26 +153,9 @@
1.4 exit_block( pc, pc );
1.5 }
1.6 if( sh4_x86.backpatch_posn != 0 ) {
1.7 + unsigned int i;
1.8 + // Raise exception
1.9 uint8_t *end_ptr = xlat_output;
1.10 - // Exception termination. Jump block for various exception codes:
1.11 - PUSH_imm32( EXC_DATA_ADDR_READ );
1.12 - JMP_rel8( 33, target1 );
1.13 - PUSH_imm32( EXC_DATA_ADDR_WRITE );
1.14 - JMP_rel8( 26, target2 );
1.15 - PUSH_imm32( EXC_ILLEGAL );
1.16 - JMP_rel8( 19, target3 );
1.17 - PUSH_imm32( EXC_SLOT_ILLEGAL );
1.18 - JMP_rel8( 12, target4 );
1.19 - PUSH_imm32( EXC_FPU_DISABLED );
1.20 - JMP_rel8( 5, target5 );
1.21 - PUSH_imm32( EXC_SLOT_FPU_DISABLED );
1.22 - // target
1.23 - JMP_TARGET(target1);
1.24 - JMP_TARGET(target2);
1.25 - JMP_TARGET(target3);
1.26 - JMP_TARGET(target4);
1.27 - JMP_TARGET(target5);
1.28 - // Raise exception
1.29 load_spreg( R_ECX, REG_OFFSET(pc) );
1.30 ADD_r32_r32( R_EDX, R_ECX );
1.31 ADD_r32_r32( R_EDX, R_ECX );
1.32 @@ -188,7 +171,34 @@
1.33 POP_r32(R_EBP);
1.34 RET();
1.35
1.36 - sh4_x86_do_backpatch( end_ptr );
1.37 + // Exception already raised - just cleanup
1.38 + uint8_t *preexc_ptr = xlat_output;
1.39 + load_imm32( R_ECX, sh4_x86.block_start_pc );
1.40 + ADD_r32_r32( R_EDX, R_ECX );
1.41 + ADD_r32_r32( R_EDX, R_ECX );
1.42 + store_spreg( R_ECX, REG_OFFSET(spc) );
1.43 + MOV_moff32_EAX( &sh4_cpu_period );
1.44 + MUL_r32( R_EDX );
1.45 + ADD_r32_sh4r( R_EAX, REG_OFFSET(slice_cycle) );
1.46 + load_spreg( R_EAX, REG_OFFSET(pc) );
1.47 + call_func1(xlat_get_code,R_EAX);
1.48 + POP_r32(R_EBP);
1.49 + RET();
1.50 +
1.51 + for( i=0; i< sh4_x86.backpatch_posn; i++ ) {
1.52 + *sh4_x86.backpatch_list[i].fixup_addr =
1.53 + xlat_output - ((uint8_t *)sh4_x86.backpatch_list[i].fixup_addr) - 4;
1.54 + if( sh4_x86.backpatch_list[i].exc_code == -1 ) {
1.55 + load_imm32( R_EDX, sh4_x86.backpatch_list[i].fixup_icount );
1.56 + int rel = preexc_ptr - xlat_output;
1.57 + JMP_rel(rel);
1.58 + } else {
1.59 + PUSH_imm32( sh4_x86.backpatch_list[i].exc_code );
1.60 + load_imm32( R_EDX, sh4_x86.backpatch_list[i].fixup_icount );
1.61 + int rel = end_ptr - xlat_output;
1.62 + JMP_rel(rel);
1.63 + }
1.64 + }
1.65 }
1.66 }
1.67
2.1 --- a/src/sh4/ia32mac.h Tue Jan 01 04:56:52 2008 +0000
2.2 +++ b/src/sh4/ia32mac.h Tue Jan 01 04:58:57 2008 +0000
2.3 @@ -176,26 +176,9 @@
2.4 exit_block( pc, pc );
2.5 }
2.6 if( sh4_x86.backpatch_posn != 0 ) {
2.7 + unsigned int i;
2.8 + // Raise exception
2.9 uint8_t *end_ptr = xlat_output;
2.10 - // Exception termination. Jump block for various exception codes:
2.11 - PUSH_imm32( EXC_DATA_ADDR_READ );
2.12 - JMP_rel8( 33, target1 );
2.13 - PUSH_imm32( EXC_DATA_ADDR_WRITE );
2.14 - JMP_rel8( 26, target2 );
2.15 - PUSH_imm32( EXC_ILLEGAL );
2.16 - JMP_rel8( 19, target3 );
2.17 - PUSH_imm32( EXC_SLOT_ILLEGAL );
2.18 - JMP_rel8( 12, target4 );
2.19 - PUSH_imm32( EXC_FPU_DISABLED );
2.20 - JMP_rel8( 5, target5 );
2.21 - PUSH_imm32( EXC_SLOT_FPU_DISABLED );
2.22 - // target
2.23 - JMP_TARGET(target1);
2.24 - JMP_TARGET(target2);
2.25 - JMP_TARGET(target3);
2.26 - JMP_TARGET(target4);
2.27 - JMP_TARGET(target5);
2.28 - // Raise exception
2.29 load_spreg( R_ECX, REG_OFFSET(pc) );
2.30 ADD_r32_r32( R_EDX, R_ECX );
2.31 ADD_r32_r32( R_EDX, R_ECX );
2.32 @@ -204,14 +187,41 @@
2.33 MUL_r32( R_EDX );
2.34 ADD_r32_sh4r( R_EAX, REG_OFFSET(slice_cycle) );
2.35
2.36 - POP_r32(R_EDX);
2.37 - call_func1( sh4_raise_exception, R_EDX );
2.38 + POP_r32(R_EDX);
2.39 + call_func1( sh4_raise_exception, R_EDX );
2.40 load_spreg( R_EAX, REG_OFFSET(pc) );
2.41 call_func1(xlat_get_code,R_EAX);
2.42 POP_r32(R_EBP);
2.43 RET();
2.44
2.45 - sh4_x86_do_backpatch( end_ptr );
2.46 + // Exception already raised - just cleanup
2.47 + uint8_t *preexc_ptr = xlat_output;
2.48 + load_imm32( R_ECX, sh4_x86.block_start_pc );
2.49 + ADD_r32_r32( R_EDX, R_ECX );
2.50 + ADD_r32_r32( R_EDX, R_ECX );
2.51 + store_spreg( R_ECX, REG_OFFSET(spc) );
2.52 + MOV_moff32_EAX( &sh4_cpu_period );
2.53 + MUL_r32( R_EDX );
2.54 + ADD_r32_sh4r( R_EAX, REG_OFFSET(slice_cycle) );
2.55 + load_spreg( R_EAX, REG_OFFSET(pc) );
2.56 + call_func1(xlat_get_code,R_EAX);
2.57 + POP_r32(R_EBP);
2.58 + RET();
2.59 +
2.60 + for( i=0; i< sh4_x86.backpatch_posn; i++ ) {
2.61 + *sh4_x86.backpatch_list[i].fixup_addr =
2.62 + xlat_output - ((uint8_t *)sh4_x86.backpatch_list[i].fixup_addr) - 4;
2.63 + if( sh4_x86.backpatch_list[i].exc_code == -1 ) {
2.64 + load_imm32( R_EDX, sh4_x86.backpatch_list[i].fixup_icount );
2.65 + int rel = preexc_ptr - xlat_output;
2.66 + JMP_rel(rel);
2.67 + } else {
2.68 + PUSH_imm32( sh4_x86.backpatch_list[i].exc_code );
2.69 + load_imm32( R_EDX, sh4_x86.backpatch_list[i].fixup_icount );
2.70 + int rel = end_ptr - xlat_output;
2.71 + JMP_rel(rel);
2.72 + }
2.73 + }
2.74 }
2.75 }
2.76
3.1 --- a/src/sh4/ia64abi.h Tue Jan 01 04:56:52 2008 +0000
3.2 +++ b/src/sh4/ia64abi.h Tue Jan 01 04:58:57 2008 +0000
3.3 @@ -135,7 +135,6 @@
3.4 RET();
3.5 }
3.6
3.7 -
3.8 /**
3.9 * Write the block trailer (exception handling block)
3.10 */
3.11 @@ -145,26 +144,9 @@
3.12 exit_block( pc, pc );
3.13 }
3.14 if( sh4_x86.backpatch_posn != 0 ) {
3.15 + unsigned int i;
3.16 + // Raise exception
3.17 uint8_t *end_ptr = xlat_output;
3.18 - // Exception termination. Jump block for various exception codes:
3.19 - load_imm32( R_EDI, EXC_DATA_ADDR_READ );
3.20 - JMP_rel8( 33, target1 );
3.21 - load_imm32( R_EDI, EXC_DATA_ADDR_WRITE );
3.22 - JMP_rel8( 26, target2 );
3.23 - load_imm32( R_EDI, EXC_ILLEGAL );
3.24 - JMP_rel8( 19, target3 );
3.25 - load_imm32( R_EDI, EXC_SLOT_ILLEGAL );
3.26 - JMP_rel8( 12, target4 );
3.27 - load_imm32( R_EDI, EXC_FPU_DISABLED );
3.28 - JMP_rel8( 5, target5 );
3.29 - load_imm32( R_EDI, EXC_SLOT_FPU_DISABLED );
3.30 - // target
3.31 - JMP_TARGET(target1);
3.32 - JMP_TARGET(target2);
3.33 - JMP_TARGET(target3);
3.34 - JMP_TARGET(target4);
3.35 - JMP_TARGET(target5);
3.36 - // Raise exception
3.37 load_spreg( R_ECX, REG_OFFSET(pc) );
3.38 ADD_r32_r32( R_EDX, R_ECX );
3.39 ADD_r32_r32( R_EDX, R_ECX );
3.40 @@ -179,7 +161,34 @@
3.41 POP_r32(R_EBP);
3.42 RET();
3.43
3.44 - sh4_x86_do_backpatch( end_ptr );
3.45 + // Exception already raised - just cleanup
3.46 + uint8_t *preexc_ptr = xlat_output;
3.47 + load_imm32( R_ECX, sh4_x86.block_start_pc );
3.48 + ADD_r32_r32( R_EDX, R_ECX );
3.49 + ADD_r32_r32( R_EDX, R_ECX );
3.50 + store_spreg( R_ECX, REG_OFFSET(spc) );
3.51 + MOV_moff32_EAX( &sh4_cpu_period );
3.52 + MUL_r32( R_EDX );
3.53 + ADD_r32_sh4r( R_EAX, REG_OFFSET(slice_cycle) );
3.54 + load_spreg( R_EAX, REG_OFFSET(pc) );
3.55 + call_func1(xlat_get_code,R_EAX);
3.56 + POP_r32(R_EBP);
3.57 + RET();
3.58 +
3.59 + for( i=0; i< sh4_x86.backpatch_posn; i++ ) {
3.60 + *sh4_x86.backpatch_list[i].fixup_addr =
3.61 + xlat_output - ((uint8_t *)sh4_x86.backpatch_list[i].fixup_addr) - 4;
3.62 + if( sh4_x86.backpatch_list[i].exc_code == -1 ) {
3.63 + load_imm32( R_EDX, sh4_x86.backpatch_list[i].fixup_icount );
3.64 + int rel = preexc_ptr - xlat_output;
3.65 + JMP_rel(rel);
3.66 + } else {
3.67 + load_imm32( R_EDI, sh4_x86.backpatch_list[i].exc_code );
3.68 + load_imm32( R_EDX, sh4_x86.backpatch_list[i].fixup_icount );
3.69 + int rel = end_ptr - xlat_output;
3.70 + JMP_rel(rel);
3.71 + }
3.72 + }
3.73 }
3.74 }
3.75
4.1 --- a/src/sh4/mmu.c Tue Jan 01 04:56:52 2008 +0000
4.2 +++ b/src/sh4/mmu.c Tue Jan 01 04:58:57 2008 +0000
4.3 @@ -32,6 +32,7 @@
4.4 #define TLB_VALID 0x00000100
4.5 #define TLB_USERMODE 0x00000040
4.6 #define TLB_WRITABLE 0x00000020
4.7 +#define TLB_USERWRITABLE (TLB_WRITABLE|TLB_USERMODE)
4.8 #define TLB_SIZE_MASK 0x00000090
4.9 #define TLB_SIZE_1K 0x00000000
4.10 #define TLB_SIZE_4K 0x00000010
4.11 @@ -42,16 +43,22 @@
4.12 #define TLB_SHARE 0x00000002
4.13 #define TLB_WRITETHRU 0x00000001
4.14
4.15 +#define MASK_1K 0xFFFFFC00
4.16 +#define MASK_4K 0xFFFFF000
4.17 +#define MASK_64K 0xFFFF0000
4.18 +#define MASK_1M 0xFFF00000
4.19
4.20 struct itlb_entry {
4.21 sh4addr_t vpn; // Virtual Page Number
4.22 uint32_t asid; // Process ID
4.23 + uint32_t mask;
4.24 sh4addr_t ppn; // Physical Page Number
4.25 uint32_t flags;
4.26 };
4.27
4.28 struct utlb_entry {
4.29 sh4addr_t vpn; // Virtual Page Number
4.30 + uint32_t mask; // Page size mask
4.31 uint32_t asid; // Process ID
4.32 sh4addr_t ppn; // Physical Page Number
4.33 uint32_t flags;
4.34 @@ -69,6 +76,16 @@
4.35 static void mmu_invalidate_tlb();
4.36
4.37
4.38 +static uint32_t get_mask_for_flags( uint32_t flags )
4.39 +{
4.40 + switch( flags & TLB_SIZE_MASK ) {
4.41 + case TLB_SIZE_1K: return MASK_1K;
4.42 + case TLB_SIZE_4K: return MASK_4K;
4.43 + case TLB_SIZE_64K: return MASK_64K;
4.44 + case TLB_SIZE_1M: return MASK_1M;
4.45 + }
4.46 +}
4.47 +
4.48 int32_t mmio_region_MMU_read( uint32_t reg )
4.49 {
4.50 switch( reg ) {
4.51 @@ -125,6 +142,9 @@
4.52 fwrite( cache, 4096, 2, f );
4.53 fwrite( &mmu_itlb, sizeof(mmu_itlb), 1, f );
4.54 fwrite( &mmu_utlb, sizeof(mmu_utlb), 1, f );
4.55 + fwrite( &mmu_urc, sizeof(mmu_urc), 1, f );
4.56 + fwrite( &mmu_urb, sizeof(mmu_urb), 1, f );
4.57 + fwrite( &mmu_lrui, sizeof(mmu_lrui), 1, f );
4.58 }
4.59
4.60 int MMU_load_state( FILE *f )
4.61 @@ -142,6 +162,15 @@
4.62 if( fread( &mmu_utlb, sizeof(mmu_utlb), 1, f ) != 1 ) {
4.63 return 1;
4.64 }
4.65 + if( fread( &mmu_urc, sizeof(mmu_urc), 1, f ) != 1 ) {
4.66 + return 1;
4.67 + }
4.68 + if( fread( &mmu_urc, sizeof(mmu_urb), 1, f ) != 1 ) {
4.69 + return 1;
4.70 + }
4.71 + if( fread( &mmu_lrui, sizeof(mmu_lrui), 1, f ) != 1 ) {
4.72 + return 1;
4.73 + }
4.74 return 0;
4.75 }
4.76
4.77 @@ -177,41 +206,389 @@
4.78 mmu_utlb[mmu_urc].ppn = MMIO_READ(MMU, PTEL) & 0x1FFFFC00;
4.79 mmu_utlb[mmu_urc].flags = MMIO_READ(MMU, PTEL) & 0x00001FF;
4.80 mmu_utlb[mmu_urc].pcmcia = MMIO_READ(MMU, PTEA);
4.81 + mmu_utlb[mmu_urc].mask = get_mask_for_flags(mmu_utlb[mmu_urc].flags);
4.82 }
4.83
4.84 -uint64_t mmu_translate_read( sh4addr_t addr )
4.85 +static inline void mmu_flush_pages( struct utlb_entry *ent )
4.86 {
4.87 - uint32_t mmucr = MMIO_READ(MMU,MMUCR);
4.88 - if( IS_SH4_PRIVMODE() ) {
4.89 - switch( addr & 0xE0000000 ) {
4.90 - case 0x80000000: case 0xA0000000:
4.91 - /* Non-translated read P1,P2 */
4.92 - break;
4.93 - case 0xE0000000:
4.94 - /* Non-translated read P4 */
4.95 - break;
4.96 - default:
4.97 - if( mmucr&MMUCR_AT ) {
4.98 - } else {
4.99 - // direct read
4.100 + unsigned int vpn;
4.101 + switch( ent->flags & TLB_SIZE_MASK ) {
4.102 + case TLB_SIZE_1K: xlat_flush_page( ent->vpn ); break;
4.103 + case TLB_SIZE_4K: xlat_flush_page( ent->vpn ); break;
4.104 + case TLB_SIZE_64K:
4.105 + for( vpn = ent->vpn; vpn < ent->vpn + 0x10000; vpn += 0x1000 ) {
4.106 + xlat_flush_page( vpn );
4.107 + }
4.108 + break;
4.109 + case TLB_SIZE_1M:
4.110 + for( vpn = ent->vpn; vpn < ent->vpn + 0x100000; vpn += 0x1000 ) {
4.111 + xlat_flush_page( vpn );
4.112 + }
4.113 + break;
4.114 + }
4.115 +}
4.116 +
4.117 +/**
4.118 + * The translations are excessively complicated, but unfortunately it's a
4.119 + * complicated system. It can undoubtedly be better optimized too.
4.120 + */
4.121 +
4.122 +/**
4.123 + * Perform the actual utlb lookup.
4.124 + * Possible utcomes are:
4.125 + * 0..63 Single match - good, return entry found
4.126 + * -1 No match - raise a tlb data miss exception
4.127 + * -2 Multiple matches - raise a multi-hit exception (reset)
4.128 + * @param vpn virtual address to resolve
4.129 + * @param asid Address space identifier
4.130 + * @param use_asid whether to require an asid match on non-shared pages.
4.131 + * @return the resultant UTLB entry, or an error.
4.132 + */
4.133 +static inline int mmu_utlb_lookup_vpn( uint32_t vpn, uint32_t asid, int use_asid )
4.134 +{
4.135 + int result = -1;
4.136 + unsigned int i;
4.137 +
4.138 + mmu_urc++;
4.139 + if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
4.140 + mmu_urc = 0;
4.141 + }
4.142 +
4.143 + if( use_asid ) {
4.144 + for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
4.145 + if( (mmu_utlb[i].flags & TLB_VALID) &&
4.146 + ((mmu_utlb[i].flags & TLB_SHARE) || asid == mmu_utlb[i].asid) &&
4.147 + ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
4.148 + if( result != -1 ) {
4.149 + return -2;
4.150 + }
4.151 + result = i;
4.152 }
4.153 }
4.154 } else {
4.155 - if( addr & 0x80000000 ) {
4.156 - if( ((addr&0xFC000000) == 0xE0000000 ) &&
4.157 - ((mmucr&MMUCR_SQMD) == 0) ) {
4.158 - // Store queue
4.159 - return 0;
4.160 + for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
4.161 + if( (mmu_utlb[i].flags & TLB_VALID) &&
4.162 + ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
4.163 + if( result != -1 ) {
4.164 + return -2;
4.165 + }
4.166 + result = i;
4.167 }
4.168 -// MMU_READ_ADDR_ERROR();
4.169 - }
4.170 - if( mmucr&MMUCR_AT ) {
4.171 - uint32_t vpn = addr & 0xFFFFFC00;
4.172 - uint32_t asid = MMIO_READ(MMU,PTEH)&0xFF;
4.173 - } else {
4.174 - // direct read
4.175 }
4.176 }
4.177 + return result;
4.178 +}
4.179 +
4.180 +/**
4.181 + * Find a UTLB entry for the associative TLB write - same as the normal
4.182 + * lookup but ignores the valid bit.
4.183 + */
4.184 +static inline mmu_utlb_lookup_assoc( uint32_t vpn, uint32_t asid )
4.185 +{
4.186 + int result = -1;
4.187 + unsigned int i;
4.188 + for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
4.189 + if( ((mmu_utlb[i].flags & TLB_SHARE) || asid == mmu_utlb[i].asid) &&
4.190 + ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
4.191 + if( result != -1 ) {
4.192 + return -2;
4.193 + }
4.194 + result = i;
4.195 + }
4.196 + }
4.197 + return result;
4.198 +}
4.199 +
4.200 +/**
4.201 + * Perform the actual itlb lookup.
4.202 + * Possible utcomes are:
4.203 + * 0..63 Single match - good, return entry found
4.204 + * -1 No match - raise a tlb data miss exception
4.205 + * -2 Multiple matches - raise a multi-hit exception (reset)
4.206 + * @param vpn virtual address to resolve
4.207 + * @param asid Address space identifier
4.208 + * @param use_asid whether to require an asid match on non-shared pages.
4.209 + * @return the resultant ITLB entry, or an error.
4.210 + */
4.211 +static inline int mmu_itlb_lookup_vpn( uint32_t vpn, uint32_t asid, int use_asid )
4.212 +{
4.213 + int result = -1;
4.214 + unsigned int i;
4.215 + if( use_asid ) {
4.216 + for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
4.217 + if( (mmu_itlb[i].flags & TLB_VALID) &&
4.218 + ((mmu_itlb[i].flags & TLB_SHARE) || asid == mmu_itlb[i].asid) &&
4.219 + ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
4.220 + if( result != -1 ) {
4.221 + return -2;
4.222 + }
4.223 + result = i;
4.224 + }
4.225 + }
4.226 + } else {
4.227 + for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
4.228 + if( (mmu_itlb[i].flags & TLB_VALID) &&
4.229 + ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
4.230 + if( result != -1 ) {
4.231 + return -2;
4.232 + }
4.233 + result = i;
4.234 + }
4.235 + }
4.236 + }
4.237 +
4.238 + switch( result ) {
4.239 + case 0: mmu_lrui = (mmu_lrui & 0x07); break;
4.240 + case 1: mmu_lrui = (mmu_lrui & 0x19) | 0x20; break;
4.241 + case 2: mmu_lrui = (mmu_lrui & 0x3E) | 0x14; break;
4.242 + case 3: mmu_lrui = (mmu_lrui | 0x0B); break;
4.243 + }
4.244 +
4.245 + return result;
4.246 +}
4.247 +
4.248 +static int inline mmu_itlb_update_from_utlb( int entryNo )
4.249 +{
4.250 + int replace;
4.251 + /* Determine entry to replace based on lrui */
4.252 + if( mmu_lrui & 0x38 == 0x38 ) {
4.253 + replace = 0;
4.254 + mmu_lrui = mmu_lrui & 0x07;
4.255 + } else if( (mmu_lrui & 0x26) == 0x06 ) {
4.256 + replace = 1;
4.257 + mmu_lrui = (mmu_lrui & 0x19) | 0x20;
4.258 + } else if( (mmu_lrui & 0x15) == 0x01 ) {
4.259 + replace = 2;
4.260 + mmu_lrui = (mmu_lrui & 0x3E) | 0x14;
4.261 + } else { // Note - gets invalid entries too
4.262 + replace = 3;
4.263 + mmu_lrui = (mmu_lrui | 0x0B);
4.264 + }
4.265 +
4.266 + mmu_itlb[replace].vpn = mmu_utlb[entryNo].vpn;
4.267 + mmu_itlb[replace].mask = mmu_utlb[entryNo].mask;
4.268 + mmu_itlb[replace].ppn = mmu_utlb[entryNo].ppn;
4.269 + mmu_itlb[replace].asid = mmu_utlb[entryNo].asid;
4.270 + mmu_itlb[replace].flags = mmu_utlb[entryNo].flags & 0x01DA;
4.271 + return replace;
4.272 +}
4.273 +
4.274 +/**
4.275 + * Find a ITLB entry for the associative TLB write - same as the normal
4.276 + * lookup but ignores the valid bit.
4.277 + */
4.278 +static inline mmu_itlb_lookup_assoc( uint32_t vpn, uint32_t asid )
4.279 +{
4.280 + int result = -1;
4.281 + unsigned int i;
4.282 + for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
4.283 + if( ((mmu_itlb[i].flags & TLB_SHARE) || asid == mmu_itlb[i].asid) &&
4.284 + ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
4.285 + if( result != -1 ) {
4.286 + return -2;
4.287 + }
4.288 + result = i;
4.289 + }
4.290 + }
4.291 + return result;
4.292 +}
4.293 +
4.294 +#define RAISE_TLB_ERROR(code, vpn) \
4.295 + MMIO_WRITE(MMU, TEA, vpn); \
4.296 + MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
4.297 + sh4_raise_tlb_exception(code); \
4.298 + return (((uint64_t)code)<<32)
4.299 +
4.300 +#define RAISE_MEM_ERROR(code, vpn) \
4.301 + MMIO_WRITE(MMU, TEA, vpn); \
4.302 + MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
4.303 + sh4_raise_exception(code); \
4.304 + return (((uint64_t)code)<<32)
4.305 +
4.306 +#define RAISE_OTHER_ERROR(code) \
4.307 + sh4_raise_exception(code); \
4.308 + return (((uint64_t)EXV_EXCEPTION)<<32)
4.309 +
4.310 +/**
4.311 + * Abort with a non-MMU address error. Caused by user-mode code attempting
4.312 + * to access privileged regions, or alignment faults.
4.313 + */
4.314 +#define MMU_READ_ADDR_ERROR() RAISE_OTHER_ERROR(EXC_DATA_ADDR_READ)
4.315 +#define MMU_WRITE_ADDR_ERROR() RAISE_OTHER_ERROR(EXC_DATA_ADDR_WRITE)
4.316 +
4.317 +#define MMU_TLB_READ_MISS_ERROR(vpn) RAISE_TLB_ERROR(EXC_TLB_MISS_READ, vpn)
4.318 +#define MMU_TLB_WRITE_MISS_ERROR(vpn) RAISE_TLB_ERROR(EXC_TLB_MISS_WRITE, vpn)
4.319 +#define MMU_TLB_INITIAL_WRITE_ERROR(vpn) RAISE_MEM_ERROR(EXC_INIT_PAGE_WRITE, vpn)
4.320 +#define MMU_TLB_READ_PROT_ERROR(vpn) RAISE_MEM_ERROR(EXC_TLB_PROT_READ, vpn)
4.321 +#define MMU_TLB_WRITE_PROT_ERROR(vpn) RAISE_MEM_ERROR(EXC_TLB_PROT_WRITE, vpn)
4.322 +#define MMU_TLB_MULTI_HIT_ERROR(vpn) sh4_raise_reset(EXC_TLB_MULTI_HIT); \
4.323 + MMIO_WRITE(MMU, TEA, vpn); \
4.324 + MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
4.325 + return (((uint64_t)EXC_TLB_MULTI_HIT)<<32)
4.326 +
4.327 +uint64_t mmu_vma_to_phys_write( sh4addr_t addr )
4.328 +{
4.329 + uint32_t mmucr = MMIO_READ(MMU,MMUCR);
4.330 + if( addr & 0x80000000 ) {
4.331 + if( IS_SH4_PRIVMODE() ) {
4.332 + if( addr < 0xC0000000 || addr >= 0xE0000000 ) {
4.333 + /* P1, P2 and P4 regions are pass-through (no translation) */
4.334 + return (uint64_t)addr;
4.335 + }
4.336 + } else {
4.337 + if( addr >= 0xE0000000 && addr < 0xE4000000 &&
4.338 + ((mmucr&MMUCR_SQMD) == 0) ) {
4.339 + /* Conditional user-mode access to the store-queue (no translation) */
4.340 + return (uint64_t)addr;
4.341 + }
4.342 + MMU_WRITE_ADDR_ERROR();
4.343 + }
4.344 + }
4.345 +
4.346 + if( (mmucr & MMUCR_AT) == 0 ) {
4.347 + return (uint64_t)addr;
4.348 + }
4.349 +
4.350 + /* If we get this far, translation is required */
4.351 +
4.352 + int use_asid = ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE();
4.353 + uint32_t asid = MMIO_READ( MMU, PTEH ) & 0xFF;
4.354 +
4.355 + int entryNo = mmu_utlb_lookup_vpn( addr, asid, use_asid );
4.356 +
4.357 + switch(entryNo) {
4.358 + case -1:
4.359 + MMU_TLB_WRITE_MISS_ERROR(addr);
4.360 + break;
4.361 + case -2:
4.362 + MMU_TLB_MULTI_HIT_ERROR(addr);
4.363 + break;
4.364 + default:
4.365 + if( IS_SH4_PRIVMODE() ? ((mmu_utlb[entryNo].flags & TLB_WRITABLE) == 0)
4.366 + : ((mmu_utlb[entryNo].flags & TLB_USERWRITABLE) != TLB_USERWRITABLE) ) {
4.367 + /* protection violation */
4.368 + MMU_TLB_WRITE_PROT_ERROR(addr);
4.369 + }
4.370 +
4.371 + if( (mmu_utlb[entryNo].flags & TLB_DIRTY) == 0 ) {
4.372 + MMU_TLB_INITIAL_WRITE_ERROR(addr);
4.373 + }
4.374 +
4.375 + /* finally generate the target address */
4.376 + return (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) |
4.377 + (addr & (~mmu_utlb[entryNo].mask));
4.378 + }
4.379 + return -1;
4.380 +
4.381 +}
4.382 +
4.383 +uint64_t mmu_vma_to_phys_exec( sh4addr_t addr )
4.384 +{
4.385 + uint32_t mmucr = MMIO_READ(MMU,MMUCR);
4.386 + if( addr & 0x80000000 ) {
4.387 + if( IS_SH4_PRIVMODE() ) {
4.388 + if( addr < 0xC0000000 ) {
4.389 + /* P1, P2 and P4 regions are pass-through (no translation) */
4.390 + return (uint64_t)addr;
4.391 + } else if( addr >= 0xE0000000 ) {
4.392 + MMU_READ_ADDR_ERROR();
4.393 + }
4.394 + } else {
4.395 + MMU_READ_ADDR_ERROR();
4.396 + }
4.397 + }
4.398 +
4.399 + if( (mmucr & MMUCR_AT) == 0 ) {
4.400 + return (uint64_t)addr;
4.401 + }
4.402 +
4.403 + /* If we get this far, translation is required */
4.404 + int use_asid = ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE();
4.405 + uint32_t asid = MMIO_READ( MMU, PTEH ) & 0xFF;
4.406 +
4.407 + int entryNo = mmu_itlb_lookup_vpn( addr, asid, use_asid );
4.408 + if( entryNo == -1 ) {
4.409 + entryNo = mmu_utlb_lookup_vpn( addr, asid, use_asid );
4.410 + if( entryNo >= 0 ) {
4.411 + entryNo = mmu_itlb_update_from_utlb( entryNo );
4.412 + }
4.413 + }
4.414 + switch(entryNo) {
4.415 + case -1:
4.416 + MMU_TLB_READ_MISS_ERROR(addr);
4.417 + break;
4.418 + case -2:
4.419 + MMU_TLB_MULTI_HIT_ERROR(addr);
4.420 + break;
4.421 + default:
4.422 + if( (mmu_itlb[entryNo].flags & TLB_USERMODE) == 0 &&
4.423 + !IS_SH4_PRIVMODE() ) {
4.424 + /* protection violation */
4.425 + MMU_TLB_READ_PROT_ERROR(addr);
4.426 + }
4.427 +
4.428 + /* finally generate the target address */
4.429 + return (mmu_itlb[entryNo].ppn & mmu_itlb[entryNo].mask) |
4.430 + (addr & (~mmu_itlb[entryNo].mask));
4.431 + }
4.432 + return -1;
4.433 +}
4.434 +
4.435 +uint64_t mmu_vma_to_phys_read_noexc( sh4addr_t addr ) {
4.436 +
4.437 +
4.438 +}
4.439 +
4.440 +
4.441 +uint64_t mmu_vma_to_phys_read( sh4addr_t addr )
4.442 +{
4.443 + uint32_t mmucr = MMIO_READ(MMU,MMUCR);
4.444 + if( addr & 0x80000000 ) {
4.445 + if( IS_SH4_PRIVMODE() ) {
4.446 + if( addr < 0xC0000000 || addr >= 0xE0000000 ) {
4.447 + /* P1, P2 and P4 regions are pass-through (no translation) */
4.448 + return (uint64_t)addr;
4.449 + }
4.450 + } else {
4.451 + if( addr >= 0xE0000000 && addr < 0xE4000000 &&
4.452 + ((mmucr&MMUCR_SQMD) == 0) ) {
4.453 + /* Conditional user-mode access to the store-queue (no translation) */
4.454 + return (uint64_t)addr;
4.455 + }
4.456 + MMU_READ_ADDR_ERROR();
4.457 + }
4.458 + }
4.459 +
4.460 + if( (mmucr & MMUCR_AT) == 0 ) {
4.461 + return (uint64_t)addr;
4.462 + }
4.463 +
4.464 + /* If we get this far, translation is required */
4.465 +
4.466 + int use_asid = ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE();
4.467 + uint32_t asid = MMIO_READ( MMU, PTEH ) & 0xFF;
4.468 +
4.469 + int entryNo = mmu_utlb_lookup_vpn( addr, asid, use_asid );
4.470 +
4.471 + switch(entryNo) {
4.472 + case -1:
4.473 + MMU_TLB_READ_MISS_ERROR(addr);
4.474 + break;
4.475 + case -2:
4.476 + MMU_TLB_MULTI_HIT_ERROR(addr);
4.477 + break;
4.478 + default:
4.479 + if( (mmu_utlb[entryNo].flags & TLB_USERMODE) == 0 &&
4.480 + !IS_SH4_PRIVMODE() ) {
4.481 + /* protection violation */
4.482 + MMU_TLB_READ_PROT_ERROR(addr);
4.483 + }
4.484 +
4.485 + /* finally generate the target address */
4.486 + return (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) |
4.487 + (addr & (~mmu_utlb[entryNo].mask));
4.488 + }
4.489 + return -1;
4.490 }
4.491
4.492 static void mmu_invalidate_tlb()
4.493 @@ -251,6 +628,7 @@
4.494 struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
4.495 ent->ppn = val & 0x1FFFFC00;
4.496 ent->flags = val & 0x00001DA;
4.497 + ent->mask = get_mask_for_flags(val);
4.498 }
4.499
4.500 #define UTLB_ENTRY(addr) ((addr>>8)&0x3F)
4.501 @@ -276,6 +654,16 @@
4.502 void mmu_utlb_addr_write( sh4addr_t addr, uint32_t val )
4.503 {
4.504 if( UTLB_ASSOC(addr) ) {
4.505 + uint32_t asid = MMIO_READ( MMU, PTEH ) & 0xFF;
4.506 + int entryNo = mmu_utlb_lookup_assoc( val, asid );
4.507 + if( entryNo >= 0 ) {
4.508 + struct utlb_entry *ent = &mmu_utlb[entryNo];
4.509 + ent->flags = ent->flags & ~(TLB_DIRTY|TLB_VALID);
4.510 + ent->flags |= (val & TLB_VALID);
4.511 + ent->flags |= ((val & 0x200)>>7);
4.512 + } else if( entryNo == -2 ) {
4.513 + MMU_TLB_MULTI_HIT_ERROR(addr);
4.514 + }
4.515 } else {
4.516 struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
4.517 ent->vpn = (val & 0xFFFFFC00);
4.518 @@ -294,6 +682,7 @@
4.519 } else {
4.520 ent->ppn = (val & 0x1FFFFC00);
4.521 ent->flags = (val & 0x000001FF);
4.522 + ent->mask = get_mask_for_flags(val);
4.523 }
4.524 }
4.525
5.1 --- a/src/sh4/sh4.c Tue Jan 01 04:56:52 2008 +0000
5.2 +++ b/src/sh4/sh4.c Tue Jan 01 04:58:57 2008 +0000
5.3 @@ -30,10 +30,6 @@
5.4 #include "clock.h"
5.5 #include "syscall.h"
5.6
5.7 -#define EXV_EXCEPTION 0x100 /* General exception vector */
5.8 -#define EXV_TLBMISS 0x400 /* TLB-miss exception vector */
5.9 -#define EXV_INTERRUPT 0x600 /* External interrupt vector */
5.10 -
5.11 void sh4_init( void );
5.12 void sh4_xlat_init( void );
5.13 void sh4_reset( void );
5.14 @@ -254,6 +250,21 @@
5.15 RAISE( code, EXV_EXCEPTION );
5.16 }
5.17
5.18 +/**
5.19 + * Raise a CPU reset exception with the specified exception code.
5.20 + */
5.21 +gboolean sh4_raise_reset( int code )
5.22 +{
5.23 + // FIXME: reset modules as per "manual reset"
5.24 + sh4_reset();
5.25 + MMIO_WRITE(MMU,EXPEVT,code);
5.26 + sh4r.vbr = 0;
5.27 + sh4r.pc = 0xA0000000;
5.28 + sh4r.new_pc = sh4r.pc + 2;
5.29 + sh4_write_sr( (sh4r.sr|SR_MD|SR_BL|SR_RB|SR_IMASK)
5.30 + &(~SR_FD) );
5.31 +}
5.32 +
5.33 gboolean sh4_raise_trap( int trap )
5.34 {
5.35 MMIO_WRITE( MMU, TRA, trap<<2 );
6.1 --- a/src/sh4/sh4core.c Tue Jan 01 04:56:52 2008 +0000
6.2 +++ b/src/sh4/sh4core.c Tue Jan 01 04:58:57 2008 +0000
6.3 @@ -164,12 +164,12 @@
6.4 #define TRACE_RETURN( source, dest )
6.5 #endif
6.6
6.7 -#define MEM_READ_BYTE( addr ) sh4_read_byte(addr)
6.8 -#define MEM_READ_WORD( addr ) sh4_read_word(addr)
6.9 -#define MEM_READ_LONG( addr ) sh4_read_long(addr)
6.10 -#define MEM_WRITE_BYTE( addr, val ) sh4_write_byte(addr, val)
6.11 -#define MEM_WRITE_WORD( addr, val ) sh4_write_word(addr, val)
6.12 -#define MEM_WRITE_LONG( addr, val ) sh4_write_long(addr, val)
6.13 +#define MEM_READ_BYTE( addr, val ) memtmp = sh4_read_byte(addr); if( memtmp >> 32 ) { return TRUE; } else { val = ((uint32_t)memtmp); }
6.14 +#define MEM_READ_WORD( addr, val ) memtmp = sh4_read_word(addr); if( memtmp >> 32 ) { return TRUE; } else { val = ((uint32_t)memtmp); }
6.15 +#define MEM_READ_LONG( addr, val ) memtmp = sh4_read_long(addr); if( memtmp >> 32 ) { return TRUE; } else { val = ((uint32_t)memtmp); }
6.16 +#define MEM_WRITE_BYTE( addr, val ) if( sh4_write_byte(addr, val) ) { return TRUE; }
6.17 +#define MEM_WRITE_WORD( addr, val ) if( sh4_write_word(addr, val) ) { return TRUE; }
6.18 +#define MEM_WRITE_LONG( addr, val ) if( sh4_write_long(addr, val) ) { return TRUE; }
6.19
6.20 #define FP_WIDTH (IS_FPU_DOUBLESIZE() ? 8 : 4)
6.21
6.22 @@ -223,6 +223,7 @@
6.23 uint32_t tmp;
6.24 float ftmp;
6.25 double dtmp;
6.26 + int64_t memtmp; // temporary holder for memory reads
6.27
6.28 #define R0 sh4r.r[0]
6.29 pc = sh4r.pc;
6.30 @@ -246,7 +247,7 @@
6.31 * region, fallback on the full-blown memory read
6.32 */
6.33 sh4_icache = NULL;
6.34 - ir = MEM_READ_WORD(pc);
6.35 + MEM_READ_WORD(pc, ir);
6.36 } else {
6.37 sh4_icache_addr = pageaddr;
6.38 ir = sh4_icache[(pc&0xFFF)>>1];
6.39 @@ -551,21 +552,21 @@
6.40 case 0xC:
6.41 { /* MOV.B @(R0, Rm), Rn */
6.42 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
6.43 - sh4r.r[Rn] = MEM_READ_BYTE( R0 + sh4r.r[Rm] );
6.44 + MEM_READ_BYTE( R0 + sh4r.r[Rm], sh4r.r[Rn] );
6.45 }
6.46 break;
6.47 case 0xD:
6.48 { /* MOV.W @(R0, Rm), Rn */
6.49 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
6.50 CHECKRALIGN16( R0 + sh4r.r[Rm] );
6.51 - sh4r.r[Rn] = MEM_READ_WORD( R0 + sh4r.r[Rm] );
6.52 + MEM_READ_WORD( R0 + sh4r.r[Rm], sh4r.r[Rn] );
6.53 }
6.54 break;
6.55 case 0xE:
6.56 { /* MOV.L @(R0, Rm), Rn */
6.57 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
6.58 CHECKRALIGN32( R0 + sh4r.r[Rm] );
6.59 - sh4r.r[Rn] = MEM_READ_LONG( R0 + sh4r.r[Rm] );
6.60 + MEM_READ_LONG( R0 + sh4r.r[Rm], sh4r.r[Rn] );
6.61 }
6.62 break;
6.63 case 0xF:
6.64 @@ -573,9 +574,11 @@
6.65 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
6.66 CHECKRALIGN32( sh4r.r[Rm] );
6.67 CHECKRALIGN32( sh4r.r[Rn] );
6.68 - int64_t tmpl = SIGNEXT32(MEM_READ_LONG(sh4r.r[Rn]));
6.69 + MEM_READ_LONG(sh4r.r[Rn], tmp);
6.70 + int64_t tmpl = SIGNEXT32(tmp);
6.71 sh4r.r[Rn] += 4;
6.72 - tmpl = tmpl * SIGNEXT32(MEM_READ_LONG(sh4r.r[Rm])) + sh4r.mac;
6.73 + MEM_READ_LONG(sh4r.r[Rm], tmp);
6.74 + tmpl = tmpl * SIGNEXT32(tmp) + sh4r.mac;
6.75 sh4r.r[Rm] += 4;
6.76 if( sh4r.s ) {
6.77 /* 48-bit Saturation. Yuch */
6.78 @@ -1058,8 +1061,9 @@
6.79 { /* LDS.L @Rm+, MACH */
6.80 uint32_t Rm = ((ir>>8)&0xF);
6.81 CHECKRALIGN32( sh4r.r[Rm] );
6.82 + MEM_READ_LONG(sh4r.r[Rm], tmp);
6.83 sh4r.mac = (sh4r.mac & 0x00000000FFFFFFFF) |
6.84 - (((uint64_t)MEM_READ_LONG(sh4r.r[Rm]))<<32);
6.85 + (((uint64_t)tmp)<<32);
6.86 sh4r.r[Rm] += 4;
6.87 }
6.88 break;
6.89 @@ -1067,8 +1071,9 @@
6.90 { /* LDS.L @Rm+, MACL */
6.91 uint32_t Rm = ((ir>>8)&0xF);
6.92 CHECKRALIGN32( sh4r.r[Rm] );
6.93 + MEM_READ_LONG(sh4r.r[Rm], tmp);
6.94 sh4r.mac = (sh4r.mac & 0xFFFFFFFF00000000LL) |
6.95 - (uint64_t)((uint32_t)MEM_READ_LONG(sh4r.r[Rm]));
6.96 + (uint64_t)((uint32_t)tmp);
6.97 sh4r.r[Rm] += 4;
6.98 }
6.99 break;
6.100 @@ -1076,7 +1081,7 @@
6.101 { /* LDS.L @Rm+, PR */
6.102 uint32_t Rm = ((ir>>8)&0xF);
6.103 CHECKRALIGN32( sh4r.r[Rm] );
6.104 - sh4r.pr = MEM_READ_LONG( sh4r.r[Rm] );
6.105 + MEM_READ_LONG( sh4r.r[Rm], sh4r.pr );
6.106 sh4r.r[Rm] += 4;
6.107 }
6.108 break;
6.109 @@ -1085,7 +1090,7 @@
6.110 uint32_t Rm = ((ir>>8)&0xF);
6.111 CHECKPRIV();
6.112 CHECKRALIGN32( sh4r.r[Rm] );
6.113 - sh4r.sgr = MEM_READ_LONG(sh4r.r[Rm]);
6.114 + MEM_READ_LONG(sh4r.r[Rm], sh4r.sgr);
6.115 sh4r.r[Rm] +=4;
6.116 }
6.117 break;
6.118 @@ -1093,7 +1098,7 @@
6.119 { /* LDS.L @Rm+, FPUL */
6.120 uint32_t Rm = ((ir>>8)&0xF);
6.121 CHECKRALIGN32( sh4r.r[Rm] );
6.122 - sh4r.fpul = MEM_READ_LONG(sh4r.r[Rm]);
6.123 + MEM_READ_LONG(sh4r.r[Rm], sh4r.fpul);
6.124 sh4r.r[Rm] +=4;
6.125 }
6.126 break;
6.127 @@ -1101,7 +1106,7 @@
6.128 { /* LDS.L @Rm+, FPSCR */
6.129 uint32_t Rm = ((ir>>8)&0xF);
6.130 CHECKRALIGN32( sh4r.r[Rm] );
6.131 - sh4r.fpscr = MEM_READ_LONG(sh4r.r[Rm]);
6.132 + MEM_READ_LONG(sh4r.r[Rm], sh4r.fpscr);
6.133 sh4r.r[Rm] +=4;
6.134 sh4r.fr_bank = &sh4r.fr[(sh4r.fpscr&FPSCR_FR)>>21][0];
6.135 }
6.136 @@ -1111,7 +1116,7 @@
6.137 uint32_t Rm = ((ir>>8)&0xF);
6.138 CHECKPRIV();
6.139 CHECKRALIGN32( sh4r.r[Rm] );
6.140 - sh4r.dbr = MEM_READ_LONG(sh4r.r[Rm]);
6.141 + MEM_READ_LONG(sh4r.r[Rm], sh4r.dbr);
6.142 sh4r.r[Rm] +=4;
6.143 }
6.144 break;
6.145 @@ -1130,7 +1135,8 @@
6.146 CHECKSLOTILLEGAL();
6.147 CHECKPRIV();
6.148 CHECKWALIGN32( sh4r.r[Rm] );
6.149 - sh4_write_sr( MEM_READ_LONG(sh4r.r[Rm]) );
6.150 + MEM_READ_LONG(sh4r.r[Rm], tmp);
6.151 + sh4_write_sr( tmp );
6.152 sh4r.r[Rm] +=4;
6.153 }
6.154 break;
6.155 @@ -1138,7 +1144,7 @@
6.156 { /* LDC.L @Rm+, GBR */
6.157 uint32_t Rm = ((ir>>8)&0xF);
6.158 CHECKRALIGN32( sh4r.r[Rm] );
6.159 - sh4r.gbr = MEM_READ_LONG(sh4r.r[Rm]);
6.160 + MEM_READ_LONG(sh4r.r[Rm], sh4r.gbr);
6.161 sh4r.r[Rm] +=4;
6.162 }
6.163 break;
6.164 @@ -1147,7 +1153,7 @@
6.165 uint32_t Rm = ((ir>>8)&0xF);
6.166 CHECKPRIV();
6.167 CHECKRALIGN32( sh4r.r[Rm] );
6.168 - sh4r.vbr = MEM_READ_LONG(sh4r.r[Rm]);
6.169 + MEM_READ_LONG(sh4r.r[Rm], sh4r.vbr);
6.170 sh4r.r[Rm] +=4;
6.171 }
6.172 break;
6.173 @@ -1156,7 +1162,7 @@
6.174 uint32_t Rm = ((ir>>8)&0xF);
6.175 CHECKPRIV();
6.176 CHECKRALIGN32( sh4r.r[Rm] );
6.177 - sh4r.ssr = MEM_READ_LONG(sh4r.r[Rm]);
6.178 + MEM_READ_LONG(sh4r.r[Rm], sh4r.ssr);
6.179 sh4r.r[Rm] +=4;
6.180 }
6.181 break;
6.182 @@ -1165,7 +1171,7 @@
6.183 uint32_t Rm = ((ir>>8)&0xF);
6.184 CHECKPRIV();
6.185 CHECKRALIGN32( sh4r.r[Rm] );
6.186 - sh4r.spc = MEM_READ_LONG(sh4r.r[Rm]);
6.187 + MEM_READ_LONG(sh4r.r[Rm], sh4r.spc);
6.188 sh4r.r[Rm] +=4;
6.189 }
6.190 break;
6.191 @@ -1179,7 +1185,7 @@
6.192 uint32_t Rm = ((ir>>8)&0xF); uint32_t Rn_BANK = ((ir>>4)&0x7);
6.193 CHECKPRIV();
6.194 CHECKRALIGN32( sh4r.r[Rm] );
6.195 - sh4r.r_bank[Rn_BANK] = MEM_READ_LONG( sh4r.r[Rm] );
6.196 + MEM_READ_LONG( sh4r.r[Rm], sh4r.r_bank[Rn_BANK] );
6.197 sh4r.r[Rm] += 4;
6.198 }
6.199 break;
6.200 @@ -1307,7 +1313,7 @@
6.201 case 0x1:
6.202 { /* TAS.B @Rn */
6.203 uint32_t Rn = ((ir>>8)&0xF);
6.204 - tmp = MEM_READ_BYTE( sh4r.r[Rn] );
6.205 + MEM_READ_BYTE( sh4r.r[Rn], tmp );
6.206 sh4r.t = ( tmp == 0 ? 1 : 0 );
6.207 MEM_WRITE_BYTE( sh4r.r[Rn], tmp | 0x80 );
6.208 }
6.209 @@ -1406,9 +1412,11 @@
6.210 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
6.211 CHECKRALIGN16( sh4r.r[Rn] );
6.212 CHECKRALIGN16( sh4r.r[Rm] );
6.213 - int32_t stmp = SIGNEXT16(MEM_READ_WORD(sh4r.r[Rn]));
6.214 + MEM_READ_WORD(sh4r.r[Rn], tmp);
6.215 + int32_t stmp = SIGNEXT16(tmp);
6.216 sh4r.r[Rn] += 2;
6.217 - stmp = stmp * SIGNEXT16(MEM_READ_WORD(sh4r.r[Rm]));
6.218 + MEM_READ_WORD(sh4r.r[Rm], tmp);
6.219 + stmp = stmp * SIGNEXT16(tmp);
6.220 sh4r.r[Rm] += 2;
6.221 if( sh4r.s ) {
6.222 int64_t tmpl = (int64_t)((int32_t)sh4r.mac) + (int64_t)stmp;
6.223 @@ -1432,7 +1440,7 @@
6.224 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF); uint32_t disp = (ir&0xF)<<2;
6.225 tmp = sh4r.r[Rm] + disp;
6.226 CHECKRALIGN32( tmp );
6.227 - sh4r.r[Rn] = MEM_READ_LONG( tmp );
6.228 + MEM_READ_LONG( tmp, sh4r.r[Rn] );
6.229 }
6.230 break;
6.231 case 0x6:
6.232 @@ -1440,19 +1448,19 @@
6.233 case 0x0:
6.234 { /* MOV.B @Rm, Rn */
6.235 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
6.236 - sh4r.r[Rn] = MEM_READ_BYTE( sh4r.r[Rm] );
6.237 + MEM_READ_BYTE( sh4r.r[Rm], sh4r.r[Rn] );
6.238 }
6.239 break;
6.240 case 0x1:
6.241 { /* MOV.W @Rm, Rn */
6.242 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
6.243 - CHECKRALIGN16( sh4r.r[Rm] ); sh4r.r[Rn] = MEM_READ_WORD( sh4r.r[Rm] );
6.244 + CHECKRALIGN16( sh4r.r[Rm] ); MEM_READ_WORD( sh4r.r[Rm], sh4r.r[Rn] );
6.245 }
6.246 break;
6.247 case 0x2:
6.248 { /* MOV.L @Rm, Rn */
6.249 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
6.250 - CHECKRALIGN32( sh4r.r[Rm] ); sh4r.r[Rn] = MEM_READ_LONG( sh4r.r[Rm] );
6.251 + CHECKRALIGN32( sh4r.r[Rm] ); MEM_READ_LONG( sh4r.r[Rm], sh4r.r[Rn] );
6.252 }
6.253 break;
6.254 case 0x3:
6.255 @@ -1464,19 +1472,19 @@
6.256 case 0x4:
6.257 { /* MOV.B @Rm+, Rn */
6.258 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
6.259 - sh4r.r[Rn] = MEM_READ_BYTE( sh4r.r[Rm] ); sh4r.r[Rm] ++;
6.260 + MEM_READ_BYTE( sh4r.r[Rm], sh4r.r[Rn] ); sh4r.r[Rm] ++;
6.261 }
6.262 break;
6.263 case 0x5:
6.264 { /* MOV.W @Rm+, Rn */
6.265 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
6.266 - CHECKRALIGN16( sh4r.r[Rm] ); sh4r.r[Rn] = MEM_READ_WORD( sh4r.r[Rm] ); sh4r.r[Rm] += 2;
6.267 + CHECKRALIGN16( sh4r.r[Rm] ); MEM_READ_WORD( sh4r.r[Rm], sh4r.r[Rn] ); sh4r.r[Rm] += 2;
6.268 }
6.269 break;
6.270 case 0x6:
6.271 { /* MOV.L @Rm+, Rn */
6.272 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
6.273 - CHECKRALIGN32( sh4r.r[Rm] ); sh4r.r[Rn] = MEM_READ_LONG( sh4r.r[Rm] ); sh4r.r[Rm] += 4;
6.274 + CHECKRALIGN32( sh4r.r[Rm] ); MEM_READ_LONG( sh4r.r[Rm], sh4r.r[Rn] ); sh4r.r[Rm] += 4;
6.275 }
6.276 break;
6.277 case 0x7:
6.278 @@ -1562,7 +1570,7 @@
6.279 case 0x4:
6.280 { /* MOV.B @(disp, Rm), R0 */
6.281 uint32_t Rm = ((ir>>4)&0xF); uint32_t disp = (ir&0xF);
6.282 - R0 = MEM_READ_BYTE( sh4r.r[Rm] + disp );
6.283 + MEM_READ_BYTE( sh4r.r[Rm] + disp, R0 );
6.284 }
6.285 break;
6.286 case 0x5:
6.287 @@ -1570,7 +1578,7 @@
6.288 uint32_t Rm = ((ir>>4)&0xF); uint32_t disp = (ir&0xF)<<1;
6.289 tmp = sh4r.r[Rm] + disp;
6.290 CHECKRALIGN16( tmp );
6.291 - R0 = MEM_READ_WORD( tmp );
6.292 + MEM_READ_WORD( tmp, R0 );
6.293 }
6.294 break;
6.295 case 0x8:
6.296 @@ -1640,7 +1648,7 @@
6.297 uint32_t Rn = ((ir>>8)&0xF); uint32_t disp = (ir&0xFF)<<1;
6.298 CHECKSLOTILLEGAL();
6.299 tmp = pc + 4 + disp;
6.300 - sh4r.r[Rn] = MEM_READ_WORD( tmp );
6.301 + MEM_READ_WORD( tmp, sh4r.r[Rn] );
6.302 }
6.303 break;
6.304 case 0xA:
6.305 @@ -1703,7 +1711,7 @@
6.306 case 0x4:
6.307 { /* MOV.B @(disp, GBR), R0 */
6.308 uint32_t disp = (ir&0xFF);
6.309 - R0 = MEM_READ_BYTE( sh4r.gbr + disp );
6.310 + MEM_READ_BYTE( sh4r.gbr + disp, R0 );
6.311 }
6.312 break;
6.313 case 0x5:
6.314 @@ -1711,7 +1719,7 @@
6.315 uint32_t disp = (ir&0xFF)<<1;
6.316 tmp = sh4r.gbr + disp;
6.317 CHECKRALIGN16( tmp );
6.318 - R0 = MEM_READ_WORD( tmp );
6.319 + MEM_READ_WORD( tmp, R0 );
6.320 }
6.321 break;
6.322 case 0x6:
6.323 @@ -1719,7 +1727,7 @@
6.324 uint32_t disp = (ir&0xFF)<<2;
6.325 tmp = sh4r.gbr + disp;
6.326 CHECKRALIGN32( tmp );
6.327 - R0 = MEM_READ_LONG( tmp );
6.328 + MEM_READ_LONG( tmp, R0 );
6.329 }
6.330 break;
6.331 case 0x7:
6.332 @@ -1756,25 +1764,25 @@
6.333 case 0xC:
6.334 { /* TST.B #imm, @(R0, GBR) */
6.335 uint32_t imm = (ir&0xFF);
6.336 - sh4r.t = ( MEM_READ_BYTE(R0 + sh4r.gbr) & imm ? 0 : 1 );
6.337 + MEM_READ_BYTE(R0+sh4r.gbr, tmp); sh4r.t = ( tmp & imm ? 0 : 1 );
6.338 }
6.339 break;
6.340 case 0xD:
6.341 { /* AND.B #imm, @(R0, GBR) */
6.342 uint32_t imm = (ir&0xFF);
6.343 - MEM_WRITE_BYTE( R0 + sh4r.gbr, imm & MEM_READ_BYTE(R0 + sh4r.gbr) );
6.344 + MEM_READ_BYTE(R0+sh4r.gbr, tmp); MEM_WRITE_BYTE( R0 + sh4r.gbr, imm & tmp );
6.345 }
6.346 break;
6.347 case 0xE:
6.348 { /* XOR.B #imm, @(R0, GBR) */
6.349 uint32_t imm = (ir&0xFF);
6.350 - MEM_WRITE_BYTE( R0 + sh4r.gbr, imm ^ MEM_READ_BYTE(R0 + sh4r.gbr) );
6.351 + MEM_READ_BYTE(R0+sh4r.gbr, tmp); MEM_WRITE_BYTE( R0 + sh4r.gbr, imm ^ tmp );
6.352 }
6.353 break;
6.354 case 0xF:
6.355 { /* OR.B #imm, @(R0, GBR) */
6.356 uint32_t imm = (ir&0xFF);
6.357 - MEM_WRITE_BYTE( R0 + sh4r.gbr, imm | MEM_READ_BYTE(R0 + sh4r.gbr) );
6.358 + MEM_READ_BYTE(R0+sh4r.gbr, tmp); MEM_WRITE_BYTE( R0 + sh4r.gbr, imm | tmp );
6.359 }
6.360 break;
6.361 }
6.362 @@ -1784,7 +1792,7 @@
6.363 uint32_t Rn = ((ir>>8)&0xF); uint32_t disp = (ir&0xFF)<<2;
6.364 CHECKSLOTILLEGAL();
6.365 tmp = (pc&0xFFFFFFFC) + disp + 4;
6.366 - sh4r.r[Rn] = MEM_READ_LONG( tmp );
6.367 + MEM_READ_LONG( tmp, sh4r.r[Rn] );
6.368 }
6.369 break;
6.370 case 0xE:
7.1 --- a/src/sh4/sh4core.h Tue Jan 01 04:56:52 2008 +0000
7.2 +++ b/src/sh4/sh4core.h Tue Jan 01 04:58:57 2008 +0000
7.3 @@ -102,6 +102,7 @@
7.4
7.5 gboolean sh4_execute_instruction( void );
7.6 gboolean sh4_raise_exception( int );
7.7 +gboolean sh4_raise_reset( int );
7.8 gboolean sh4_raise_trap( int );
7.9 gboolean sh4_raise_slot_exception( int, int );
7.10 gboolean sh4_raise_tlb_exception( int );
7.11 @@ -114,16 +115,21 @@
7.12 #define BREAK_PERM 2
7.13
7.14 /* SH4 Memory */
7.15 +uint64_t mmu_vma_to_phys_read( sh4addr_t addr );
7.16 +uint64_t mmu_vma_to_phys_write( sh4addr_t addr );
7.17 +uint64_t mmu_vma_to_phys_exec( sh4addr_t addr );
7.18 +
7.19 int64_t sh4_read_quad( sh4addr_t addr );
7.20 -int32_t sh4_read_long( sh4addr_t addr );
7.21 -int32_t sh4_read_word( sh4addr_t addr );
7.22 -int32_t sh4_read_byte( sh4addr_t addr );
7.23 +int64_t sh4_read_long( sh4addr_t addr );
7.24 +int64_t sh4_read_word( sh4addr_t addr );
7.25 +int64_t sh4_read_byte( sh4addr_t addr );
7.26 void sh4_write_quad( sh4addr_t addr, uint64_t val );
7.27 -void sh4_write_long( sh4addr_t addr, uint32_t val );
7.28 -void sh4_write_word( sh4addr_t addr, uint32_t val );
7.29 -void sh4_write_byte( sh4addr_t addr, uint32_t val );
7.30 +int32_t sh4_write_long( sh4addr_t addr, uint32_t val );
7.31 +int32_t sh4_write_word( sh4addr_t addr, uint32_t val );
7.32 +int32_t sh4_write_byte( sh4addr_t addr, uint32_t val );
7.33 int32_t sh4_read_phys_word( sh4addr_t addr );
7.34 void sh4_flush_store_queue( sh4addr_t addr );
7.35 +sh4ptr_t sh4_get_region_by_vma( sh4addr_t addr );
7.36
7.37 /* SH4 Support methods */
7.38 uint32_t sh4_read_sr(void);
7.39 @@ -160,6 +166,7 @@
7.40 #define SIGNEXT16(n) ((int32_t)((int16_t)(n)))
7.41 #define SIGNEXT32(n) ((int64_t)((int32_t)(n)))
7.42 #define SIGNEXT48(n) ((((int64_t)(n))<<16)>>16)
7.43 +#define ZEROEXT32(n) ((int64_t)((uint64_t)((uint32_t)(n))))
7.44
7.45 /* Status Register (SR) bits */
7.46 #define SR_MD 0x40000000 /* Processor mode ( User=0, Privileged=1 ) */
7.47 @@ -201,16 +208,26 @@
7.48 #define FPULi (sh4r.fpul)
7.49
7.50 /* CPU-generated exception code/vector pairs */
7.51 -#define EXC_POWER_RESET 0x000 /* vector special */
7.52 -#define EXC_MANUAL_RESET 0x020
7.53 -#define EXC_DATA_ADDR_READ 0x0E0
7.54 +#define EXC_POWER_RESET 0x000 /* vector special */
7.55 +#define EXC_MANUAL_RESET 0x020
7.56 +#define EXC_TLB_MISS_READ 0x040
7.57 +#define EXC_TLB_MISS_WRITE 0x060
7.58 +#define EXC_INIT_PAGE_WRITE 0x080
7.59 +#define EXC_TLB_PROT_READ 0x0A0
7.60 +#define EXC_TLB_PROT_WRITE 0x0C0
7.61 +#define EXC_DATA_ADDR_READ 0x0E0
7.62 #define EXC_DATA_ADDR_WRITE 0x100
7.63 -#define EXC_SLOT_ILLEGAL 0x1A0
7.64 -#define EXC_ILLEGAL 0x180
7.65 -#define EXC_TRAP 0x160
7.66 -#define EXC_FPU_DISABLED 0x800
7.67 +#define EXC_TLB_MULTI_HIT 0x140
7.68 +#define EXC_SLOT_ILLEGAL 0x1A0
7.69 +#define EXC_ILLEGAL 0x180
7.70 +#define EXC_TRAP 0x160
7.71 +#define EXC_FPU_DISABLED 0x800
7.72 #define EXC_SLOT_FPU_DISABLED 0x820
7.73
7.74 +#define EXV_EXCEPTION 0x100 /* General exception vector */
7.75 +#define EXV_TLBMISS 0x400 /* TLB-miss exception vector */
7.76 +#define EXV_INTERRUPT 0x600 /* External interrupt vector */
7.77 +
7.78 /* Exceptions (for use with sh4_raise_exception) */
7.79
7.80 #define EX_ILLEGAL_INSTRUCTION 0x180, 0x100
8.1 --- a/src/sh4/sh4core.in Tue Jan 01 04:56:52 2008 +0000
8.2 +++ b/src/sh4/sh4core.in Tue Jan 01 04:58:57 2008 +0000
8.3 @@ -164,12 +164,12 @@
8.4 #define TRACE_RETURN( source, dest )
8.5 #endif
8.6
8.7 -#define MEM_READ_BYTE( addr ) sh4_read_byte(addr)
8.8 -#define MEM_READ_WORD( addr ) sh4_read_word(addr)
8.9 -#define MEM_READ_LONG( addr ) sh4_read_long(addr)
8.10 -#define MEM_WRITE_BYTE( addr, val ) sh4_write_byte(addr, val)
8.11 -#define MEM_WRITE_WORD( addr, val ) sh4_write_word(addr, val)
8.12 -#define MEM_WRITE_LONG( addr, val ) sh4_write_long(addr, val)
8.13 +#define MEM_READ_BYTE( addr, val ) memtmp = sh4_read_byte(addr); if( memtmp >> 32 ) { return TRUE; } else { val = ((uint32_t)memtmp); }
8.14 +#define MEM_READ_WORD( addr, val ) memtmp = sh4_read_word(addr); if( memtmp >> 32 ) { return TRUE; } else { val = ((uint32_t)memtmp); }
8.15 +#define MEM_READ_LONG( addr, val ) memtmp = sh4_read_long(addr); if( memtmp >> 32 ) { return TRUE; } else { val = ((uint32_t)memtmp); }
8.16 +#define MEM_WRITE_BYTE( addr, val ) if( sh4_write_byte(addr, val) ) { return TRUE; }
8.17 +#define MEM_WRITE_WORD( addr, val ) if( sh4_write_word(addr, val) ) { return TRUE; }
8.18 +#define MEM_WRITE_LONG( addr, val ) if( sh4_write_long(addr, val) ) { return TRUE; }
8.19
8.20 #define FP_WIDTH (IS_FPU_DOUBLESIZE() ? 8 : 4)
8.21
8.22 @@ -223,6 +223,7 @@
8.23 uint32_t tmp;
8.24 float ftmp;
8.25 double dtmp;
8.26 + int64_t memtmp; // temporary holder for memory reads
8.27
8.28 #define R0 sh4r.r[0]
8.29 pc = sh4r.pc;
8.30 @@ -246,7 +247,7 @@
8.31 * region, fallback on the full-blown memory read
8.32 */
8.33 sh4_icache = NULL;
8.34 - ir = MEM_READ_WORD(pc);
8.35 + MEM_READ_WORD(pc, ir);
8.36 } else {
8.37 sh4_icache_addr = pageaddr;
8.38 ir = sh4_icache[(pc&0xFFF)>>1];
8.39 @@ -255,22 +256,22 @@
8.40 %%
8.41 AND Rm, Rn {: sh4r.r[Rn] &= sh4r.r[Rm]; :}
8.42 AND #imm, R0 {: R0 &= imm; :}
8.43 -AND.B #imm, @(R0, GBR) {: MEM_WRITE_BYTE( R0 + sh4r.gbr, imm & MEM_READ_BYTE(R0 + sh4r.gbr) ); :}
8.44 + AND.B #imm, @(R0, GBR) {: MEM_READ_BYTE(R0+sh4r.gbr, tmp); MEM_WRITE_BYTE( R0 + sh4r.gbr, imm & tmp ); :}
8.45 NOT Rm, Rn {: sh4r.r[Rn] = ~sh4r.r[Rm]; :}
8.46 OR Rm, Rn {: sh4r.r[Rn] |= sh4r.r[Rm]; :}
8.47 OR #imm, R0 {: R0 |= imm; :}
8.48 -OR.B #imm, @(R0, GBR) {: MEM_WRITE_BYTE( R0 + sh4r.gbr, imm | MEM_READ_BYTE(R0 + sh4r.gbr) ); :}
8.49 + OR.B #imm, @(R0, GBR) {: MEM_READ_BYTE(R0+sh4r.gbr, tmp); MEM_WRITE_BYTE( R0 + sh4r.gbr, imm | tmp ); :}
8.50 TAS.B @Rn {:
8.51 - tmp = MEM_READ_BYTE( sh4r.r[Rn] );
8.52 + MEM_READ_BYTE( sh4r.r[Rn], tmp );
8.53 sh4r.t = ( tmp == 0 ? 1 : 0 );
8.54 MEM_WRITE_BYTE( sh4r.r[Rn], tmp | 0x80 );
8.55 :}
8.56 TST Rm, Rn {: sh4r.t = (sh4r.r[Rn]&sh4r.r[Rm] ? 0 : 1); :}
8.57 TST #imm, R0 {: sh4r.t = (R0 & imm ? 0 : 1); :}
8.58 -TST.B #imm, @(R0, GBR) {: sh4r.t = ( MEM_READ_BYTE(R0 + sh4r.gbr) & imm ? 0 : 1 ); :}
8.59 + TST.B #imm, @(R0, GBR) {: MEM_READ_BYTE(R0+sh4r.gbr, tmp); sh4r.t = ( tmp & imm ? 0 : 1 ); :}
8.60 XOR Rm, Rn {: sh4r.r[Rn] ^= sh4r.r[Rm]; :}
8.61 XOR #imm, R0 {: R0 ^= imm; :}
8.62 -XOR.B #imm, @(R0, GBR) {: MEM_WRITE_BYTE( R0 + sh4r.gbr, imm ^ MEM_READ_BYTE(R0 + sh4r.gbr) ); :}
8.63 + XOR.B #imm, @(R0, GBR) {: MEM_READ_BYTE(R0+sh4r.gbr, tmp); MEM_WRITE_BYTE( R0 + sh4r.gbr, imm ^ tmp ); :}
8.64 XTRCT Rm, Rn {: sh4r.r[Rn] = (sh4r.r[Rn]>>16) | (sh4r.r[Rm]<<16); :}
8.65
8.66 ROTL Rn {:
8.67 @@ -365,12 +366,12 @@
8.68 CHECKWALIGN32( R0 + sh4r.r[Rn] );
8.69 MEM_WRITE_LONG( R0 + sh4r.r[Rn], sh4r.r[Rm] );
8.70 :}
8.71 -MOV.B @(R0, Rm), Rn {: sh4r.r[Rn] = MEM_READ_BYTE( R0 + sh4r.r[Rm] ); :}
8.72 +MOV.B @(R0, Rm), Rn {: MEM_READ_BYTE( R0 + sh4r.r[Rm], sh4r.r[Rn] ); :}
8.73 MOV.W @(R0, Rm), Rn {: CHECKRALIGN16( R0 + sh4r.r[Rm] );
8.74 - sh4r.r[Rn] = MEM_READ_WORD( R0 + sh4r.r[Rm] );
8.75 + MEM_READ_WORD( R0 + sh4r.r[Rm], sh4r.r[Rn] );
8.76 :}
8.77 MOV.L @(R0, Rm), Rn {: CHECKRALIGN32( R0 + sh4r.r[Rm] );
8.78 - sh4r.r[Rn] = MEM_READ_LONG( R0 + sh4r.r[Rm] );
8.79 + MEM_READ_LONG( R0 + sh4r.r[Rm], sh4r.r[Rn] );
8.80 :}
8.81 MOV.L Rm, @(disp, Rn) {:
8.82 tmp = sh4r.r[Rn] + disp;
8.83 @@ -386,19 +387,19 @@
8.84 MOV.L @(disp, Rm), Rn {:
8.85 tmp = sh4r.r[Rm] + disp;
8.86 CHECKRALIGN32( tmp );
8.87 - sh4r.r[Rn] = MEM_READ_LONG( tmp );
8.88 + MEM_READ_LONG( tmp, sh4r.r[Rn] );
8.89 :}
8.90 -MOV.B @Rm, Rn {: sh4r.r[Rn] = MEM_READ_BYTE( sh4r.r[Rm] ); :}
8.91 -MOV.W @Rm, Rn {: CHECKRALIGN16( sh4r.r[Rm] ); sh4r.r[Rn] = MEM_READ_WORD( sh4r.r[Rm] ); :}
8.92 -MOV.L @Rm, Rn {: CHECKRALIGN32( sh4r.r[Rm] ); sh4r.r[Rn] = MEM_READ_LONG( sh4r.r[Rm] ); :}
8.93 +MOV.B @Rm, Rn {: MEM_READ_BYTE( sh4r.r[Rm], sh4r.r[Rn] ); :}
8.94 + MOV.W @Rm, Rn {: CHECKRALIGN16( sh4r.r[Rm] ); MEM_READ_WORD( sh4r.r[Rm], sh4r.r[Rn] ); :}
8.95 + MOV.L @Rm, Rn {: CHECKRALIGN32( sh4r.r[Rm] ); MEM_READ_LONG( sh4r.r[Rm], sh4r.r[Rn] ); :}
8.96 MOV Rm, Rn {: sh4r.r[Rn] = sh4r.r[Rm]; :}
8.97 -MOV.B @Rm+, Rn {: sh4r.r[Rn] = MEM_READ_BYTE( sh4r.r[Rm] ); sh4r.r[Rm] ++; :}
8.98 -MOV.W @Rm+, Rn {: CHECKRALIGN16( sh4r.r[Rm] ); sh4r.r[Rn] = MEM_READ_WORD( sh4r.r[Rm] ); sh4r.r[Rm] += 2; :}
8.99 -MOV.L @Rm+, Rn {: CHECKRALIGN32( sh4r.r[Rm] ); sh4r.r[Rn] = MEM_READ_LONG( sh4r.r[Rm] ); sh4r.r[Rm] += 4; :}
8.100 + MOV.B @Rm+, Rn {: MEM_READ_BYTE( sh4r.r[Rm], sh4r.r[Rn] ); sh4r.r[Rm] ++; :}
8.101 + MOV.W @Rm+, Rn {: CHECKRALIGN16( sh4r.r[Rm] ); MEM_READ_WORD( sh4r.r[Rm], sh4r.r[Rn] ); sh4r.r[Rm] += 2; :}
8.102 + MOV.L @Rm+, Rn {: CHECKRALIGN32( sh4r.r[Rm] ); MEM_READ_LONG( sh4r.r[Rm], sh4r.r[Rn] ); sh4r.r[Rm] += 4; :}
8.103 MOV.L @(disp, PC), Rn {:
8.104 CHECKSLOTILLEGAL();
8.105 tmp = (pc&0xFFFFFFFC) + disp + 4;
8.106 - sh4r.r[Rn] = MEM_READ_LONG( tmp );
8.107 + MEM_READ_LONG( tmp, sh4r.r[Rn] );
8.108 :}
8.109 MOV.B R0, @(disp, GBR) {: MEM_WRITE_BYTE( sh4r.gbr + disp, R0 ); :}
8.110 MOV.W R0, @(disp, GBR) {:
8.111 @@ -411,16 +412,16 @@
8.112 CHECKWALIGN32( tmp );
8.113 MEM_WRITE_LONG( tmp, R0 );
8.114 :}
8.115 -MOV.B @(disp, GBR), R0 {: R0 = MEM_READ_BYTE( sh4r.gbr + disp ); :}
8.116 + MOV.B @(disp, GBR), R0 {: MEM_READ_BYTE( sh4r.gbr + disp, R0 ); :}
8.117 MOV.W @(disp, GBR), R0 {:
8.118 tmp = sh4r.gbr + disp;
8.119 CHECKRALIGN16( tmp );
8.120 - R0 = MEM_READ_WORD( tmp );
8.121 + MEM_READ_WORD( tmp, R0 );
8.122 :}
8.123 MOV.L @(disp, GBR), R0 {:
8.124 tmp = sh4r.gbr + disp;
8.125 CHECKRALIGN32( tmp );
8.126 - R0 = MEM_READ_LONG( tmp );
8.127 + MEM_READ_LONG( tmp, R0 );
8.128 :}
8.129 MOV.B R0, @(disp, Rn) {: MEM_WRITE_BYTE( sh4r.r[Rn] + disp, R0 ); :}
8.130 MOV.W R0, @(disp, Rn) {:
8.131 @@ -428,16 +429,16 @@
8.132 CHECKWALIGN16( tmp );
8.133 MEM_WRITE_WORD( tmp, R0 );
8.134 :}
8.135 -MOV.B @(disp, Rm), R0 {: R0 = MEM_READ_BYTE( sh4r.r[Rm] + disp ); :}
8.136 + MOV.B @(disp, Rm), R0 {: MEM_READ_BYTE( sh4r.r[Rm] + disp, R0 ); :}
8.137 MOV.W @(disp, Rm), R0 {:
8.138 tmp = sh4r.r[Rm] + disp;
8.139 CHECKRALIGN16( tmp );
8.140 - R0 = MEM_READ_WORD( tmp );
8.141 + MEM_READ_WORD( tmp, R0 );
8.142 :}
8.143 MOV.W @(disp, PC), Rn {:
8.144 CHECKSLOTILLEGAL();
8.145 tmp = pc + 4 + disp;
8.146 - sh4r.r[Rn] = MEM_READ_WORD( tmp );
8.147 + MEM_READ_WORD( tmp, sh4r.r[Rn] );
8.148 :}
8.149 MOVA @(disp, PC), R0 {:
8.150 CHECKSLOTILLEGAL();
8.151 @@ -506,9 +507,11 @@
8.152 MAC.W @Rm+, @Rn+ {:
8.153 CHECKRALIGN16( sh4r.r[Rn] );
8.154 CHECKRALIGN16( sh4r.r[Rm] );
8.155 - int32_t stmp = SIGNEXT16(MEM_READ_WORD(sh4r.r[Rn]));
8.156 + MEM_READ_WORD(sh4r.r[Rn], tmp);
8.157 + int32_t stmp = SIGNEXT16(tmp);
8.158 sh4r.r[Rn] += 2;
8.159 - stmp = stmp * SIGNEXT16(MEM_READ_WORD(sh4r.r[Rm]));
8.160 + MEM_READ_WORD(sh4r.r[Rm], tmp);
8.161 + stmp = stmp * SIGNEXT16(tmp);
8.162 sh4r.r[Rm] += 2;
8.163 if( sh4r.s ) {
8.164 int64_t tmpl = (int64_t)((int32_t)sh4r.mac) + (int64_t)stmp;
8.165 @@ -527,9 +530,11 @@
8.166 MAC.L @Rm+, @Rn+ {:
8.167 CHECKRALIGN32( sh4r.r[Rm] );
8.168 CHECKRALIGN32( sh4r.r[Rn] );
8.169 - int64_t tmpl = SIGNEXT32(MEM_READ_LONG(sh4r.r[Rn]));
8.170 + MEM_READ_LONG(sh4r.r[Rn], tmp);
8.171 + int64_t tmpl = SIGNEXT32(tmp);
8.172 sh4r.r[Rn] += 4;
8.173 - tmpl = tmpl * SIGNEXT32(MEM_READ_LONG(sh4r.r[Rm])) + sh4r.mac;
8.174 + MEM_READ_LONG(sh4r.r[Rm], tmp);
8.175 + tmpl = tmpl * SIGNEXT32(tmp) + sh4r.mac;
8.176 sh4r.r[Rm] += 4;
8.177 if( sh4r.s ) {
8.178 /* 48-bit Saturation. Yuch */
8.179 @@ -703,15 +708,17 @@
8.180 :}
8.181 LDS.L @Rm+, MACH {:
8.182 CHECKRALIGN32( sh4r.r[Rm] );
8.183 + MEM_READ_LONG(sh4r.r[Rm], tmp);
8.184 sh4r.mac = (sh4r.mac & 0x00000000FFFFFFFF) |
8.185 - (((uint64_t)MEM_READ_LONG(sh4r.r[Rm]))<<32);
8.186 + (((uint64_t)tmp)<<32);
8.187 sh4r.r[Rm] += 4;
8.188 :}
8.189 LDC.L @Rm+, SR {:
8.190 CHECKSLOTILLEGAL();
8.191 CHECKPRIV();
8.192 CHECKWALIGN32( sh4r.r[Rm] );
8.193 - sh4_write_sr( MEM_READ_LONG(sh4r.r[Rm]) );
8.194 + MEM_READ_LONG(sh4r.r[Rm], tmp);
8.195 + sh4_write_sr( tmp );
8.196 sh4r.r[Rm] +=4;
8.197 :}
8.198 LDS Rm, MACH {:
8.199 @@ -730,7 +737,7 @@
8.200 LDC.L @Rm+, SGR {:
8.201 CHECKPRIV();
8.202 CHECKRALIGN32( sh4r.r[Rm] );
8.203 - sh4r.sgr = MEM_READ_LONG(sh4r.r[Rm]);
8.204 + MEM_READ_LONG(sh4r.r[Rm], sh4r.sgr);
8.205 sh4r.r[Rm] +=4;
8.206 :}
8.207 STS MACL, Rn {: sh4r.r[Rn] = (uint32_t)sh4r.mac; :}
8.208 @@ -746,13 +753,14 @@
8.209 :}
8.210 LDS.L @Rm+, MACL {:
8.211 CHECKRALIGN32( sh4r.r[Rm] );
8.212 + MEM_READ_LONG(sh4r.r[Rm], tmp);
8.213 sh4r.mac = (sh4r.mac & 0xFFFFFFFF00000000LL) |
8.214 - (uint64_t)((uint32_t)MEM_READ_LONG(sh4r.r[Rm]));
8.215 + (uint64_t)((uint32_t)tmp);
8.216 sh4r.r[Rm] += 4;
8.217 :}
8.218 LDC.L @Rm+, GBR {:
8.219 CHECKRALIGN32( sh4r.r[Rm] );
8.220 - sh4r.gbr = MEM_READ_LONG(sh4r.r[Rm]);
8.221 + MEM_READ_LONG(sh4r.r[Rm], sh4r.gbr);
8.222 sh4r.r[Rm] +=4;
8.223 :}
8.224 LDS Rm, MACL {:
8.225 @@ -774,13 +782,13 @@
8.226 :}
8.227 LDS.L @Rm+, PR {:
8.228 CHECKRALIGN32( sh4r.r[Rm] );
8.229 - sh4r.pr = MEM_READ_LONG( sh4r.r[Rm] );
8.230 + MEM_READ_LONG( sh4r.r[Rm], sh4r.pr );
8.231 sh4r.r[Rm] += 4;
8.232 :}
8.233 LDC.L @Rm+, VBR {:
8.234 CHECKPRIV();
8.235 CHECKRALIGN32( sh4r.r[Rm] );
8.236 - sh4r.vbr = MEM_READ_LONG(sh4r.r[Rm]);
8.237 + MEM_READ_LONG(sh4r.r[Rm], sh4r.vbr);
8.238 sh4r.r[Rm] +=4;
8.239 :}
8.240 LDS Rm, PR {: sh4r.pr = sh4r.r[Rm]; :}
8.241 @@ -807,7 +815,7 @@
8.242 LDC.L @Rm+, SSR {:
8.243 CHECKPRIV();
8.244 CHECKRALIGN32( sh4r.r[Rm] );
8.245 - sh4r.ssr = MEM_READ_LONG(sh4r.r[Rm]);
8.246 + MEM_READ_LONG(sh4r.r[Rm], sh4r.ssr);
8.247 sh4r.r[Rm] +=4;
8.248 :}
8.249 LDC Rm, SSR {:
8.250 @@ -823,7 +831,7 @@
8.251 LDC.L @Rm+, SPC {:
8.252 CHECKPRIV();
8.253 CHECKRALIGN32( sh4r.r[Rm] );
8.254 - sh4r.spc = MEM_READ_LONG(sh4r.r[Rm]);
8.255 + MEM_READ_LONG(sh4r.r[Rm], sh4r.spc);
8.256 sh4r.r[Rm] +=4;
8.257 :}
8.258 LDC Rm, SPC {:
8.259 @@ -838,7 +846,7 @@
8.260 :}
8.261 LDS.L @Rm+, FPUL {:
8.262 CHECKRALIGN32( sh4r.r[Rm] );
8.263 - sh4r.fpul = MEM_READ_LONG(sh4r.r[Rm]);
8.264 + MEM_READ_LONG(sh4r.r[Rm], sh4r.fpul);
8.265 sh4r.r[Rm] +=4;
8.266 :}
8.267 LDS Rm, FPUL {: sh4r.fpul = sh4r.r[Rm]; :}
8.268 @@ -850,7 +858,7 @@
8.269 :}
8.270 LDS.L @Rm+, FPSCR {:
8.271 CHECKRALIGN32( sh4r.r[Rm] );
8.272 - sh4r.fpscr = MEM_READ_LONG(sh4r.r[Rm]);
8.273 + MEM_READ_LONG(sh4r.r[Rm], sh4r.fpscr);
8.274 sh4r.r[Rm] +=4;
8.275 sh4r.fr_bank = &sh4r.fr[(sh4r.fpscr&FPSCR_FR)>>21][0];
8.276 :}
8.277 @@ -868,7 +876,7 @@
8.278 LDC.L @Rm+, DBR {:
8.279 CHECKPRIV();
8.280 CHECKRALIGN32( sh4r.r[Rm] );
8.281 - sh4r.dbr = MEM_READ_LONG(sh4r.r[Rm]);
8.282 + MEM_READ_LONG(sh4r.r[Rm], sh4r.dbr);
8.283 sh4r.r[Rm] +=4;
8.284 :}
8.285 LDC Rm, DBR {:
8.286 @@ -884,7 +892,7 @@
8.287 LDC.L @Rm+, Rn_BANK {:
8.288 CHECKPRIV();
8.289 CHECKRALIGN32( sh4r.r[Rm] );
8.290 - sh4r.r_bank[Rn_BANK] = MEM_READ_LONG( sh4r.r[Rm] );
8.291 + MEM_READ_LONG( sh4r.r[Rm], sh4r.r_bank[Rn_BANK] );
8.292 sh4r.r[Rm] += 4;
8.293 :}
8.294 LDC Rm, Rn_BANK {:
9.1 --- a/src/sh4/sh4mem.c Tue Jan 01 04:56:52 2008 +0000
9.2 +++ b/src/sh4/sh4mem.c Tue Jan 01 04:58:57 2008 +0000
9.3 @@ -156,16 +156,22 @@
9.4 (((int64_t)((uint32_t)sh4_read_long(addr+4))) << 32);
9.5 }
9.6
9.7 -int32_t sh4_read_long( sh4addr_t addr )
9.8 +int64_t sh4_read_long( sh4addr_t vma )
9.9 {
9.10 sh4ptr_t page;
9.11
9.12 CHECK_READ_WATCH(addr,4);
9.13
9.14 + uint64_t ppa = mmu_vma_to_phys_read(vma);
9.15 + if( ppa>>32 ) {
9.16 + return ppa;
9.17 + }
9.18 + sh4addr_t addr = (sh4addr_t)ppa;
9.19 +
9.20 if( addr >= 0xE0000000 ) { /* P4 Area, handled specially */
9.21 - return sh4_read_p4( addr );
9.22 + return ZEROEXT32(sh4_read_p4( addr ));
9.23 } else if( (addr&0x1C000000) == 0x0C000000 ) {
9.24 - return *(int32_t *)(sh4_main_ram + (addr&0x00FFFFFF));
9.25 + return ZEROEXT32(*(int32_t *)(sh4_main_ram + (addr&0x00FFFFFF)));
9.26 } else if( (addr&0x1F800000) == 0x04000000 ) {
9.27 addr = TRANSLATE_VIDEO_64BIT_ADDRESS(addr);
9.28 pvr2_render_buffer_invalidate(addr, FALSE);
9.29 @@ -182,22 +188,28 @@
9.30 }
9.31 val = io_rgn[(uintptr_t)page]->io_read(addr&0xFFF);
9.32 TRACE_IO( "Long read %08X <= %08X", page, (addr&0xFFF), val, addr );
9.33 - return val;
9.34 + return ZEROEXT32(val);
9.35 } else {
9.36 - return *(int32_t *)(page+(addr&0xFFF));
9.37 + return ZEROEXT32(*(int32_t *)(page+(addr&0xFFF)));
9.38 }
9.39 }
9.40
9.41 -int32_t sh4_read_word( sh4addr_t addr )
9.42 +int64_t sh4_read_word( sh4addr_t vma )
9.43 {
9.44 sh4ptr_t page;
9.45
9.46 CHECK_READ_WATCH(addr,2);
9.47
9.48 + uint64_t ppa = mmu_vma_to_phys_read(vma);
9.49 + if( ppa>>32 ) {
9.50 + return ppa;
9.51 + }
9.52 + sh4addr_t addr = (sh4addr_t)ppa;
9.53 +
9.54 if( addr >= 0xE0000000 ) { /* P4 Area, handled specially */
9.55 - return SIGNEXT16(sh4_read_p4( addr ));
9.56 + return ZEROEXT32(SIGNEXT16(sh4_read_p4( addr )));
9.57 } else if( (addr&0x1C000000) == 0x0C000000 ) {
9.58 - return SIGNEXT16(*(int16_t *)(sh4_main_ram + (addr&0x00FFFFFF)));
9.59 + return ZEROEXT32(SIGNEXT16(*(int16_t *)(sh4_main_ram + (addr&0x00FFFFFF))));
9.60 } else if( (addr&0x1F800000) == 0x04000000 ) {
9.61 addr = TRANSLATE_VIDEO_64BIT_ADDRESS(addr);
9.62 pvr2_render_buffer_invalidate(addr, FALSE);
9.63 @@ -214,22 +226,28 @@
9.64 }
9.65 val = SIGNEXT16(io_rgn[(uintptr_t)page]->io_read(addr&0xFFF));
9.66 TRACE_IO( "Word read %04X <= %08X", page, (addr&0xFFF), val&0xFFFF, addr );
9.67 - return val;
9.68 + return ZEROEXT32(val);
9.69 } else {
9.70 - return SIGNEXT16(*(int16_t *)(page+(addr&0xFFF)));
9.71 + return ZEROEXT32(SIGNEXT16(*(int16_t *)(page+(addr&0xFFF))));
9.72 }
9.73 }
9.74
9.75 -int32_t sh4_read_byte( sh4addr_t addr )
9.76 +int64_t sh4_read_byte( sh4addr_t vma )
9.77 {
9.78 sh4ptr_t page;
9.79
9.80 CHECK_READ_WATCH(addr,1);
9.81
9.82 + uint64_t ppa = mmu_vma_to_phys_read(vma);
9.83 + if( ppa>>32 ) {
9.84 + return ppa;
9.85 + }
9.86 + sh4addr_t addr = (sh4addr_t)ppa;
9.87 +
9.88 if( addr >= 0xE0000000 ) { /* P4 Area, handled specially */
9.89 - return SIGNEXT8(sh4_read_p4( addr ));
9.90 + return ZEROEXT32(SIGNEXT8(sh4_read_p4( addr )));
9.91 } else if( (addr&0x1C000000) == 0x0C000000 ) {
9.92 - return SIGNEXT8(*(int8_t *)(sh4_main_ram + (addr&0x00FFFFFF)));
9.93 + return ZEROEXT32(SIGNEXT8(*(int8_t *)(sh4_main_ram + (addr&0x00FFFFFF))));
9.94 } else if( (addr&0x1F800000) == 0x04000000 ) {
9.95 addr = TRANSLATE_VIDEO_64BIT_ADDRESS(addr);
9.96 pvr2_render_buffer_invalidate(addr, FALSE);
9.97 @@ -247,9 +265,9 @@
9.98 }
9.99 val = SIGNEXT8(io_rgn[(uintptr_t)page]->io_read(addr&0xFFF));
9.100 TRACE_IO( "Byte read %02X <= %08X", page, (addr&0xFFF), val&0xFF, addr );
9.101 - return val;
9.102 + return ZEROEXT32(val);
9.103 } else {
9.104 - return SIGNEXT8(*(int8_t *)(page+(addr&0xFFF)));
9.105 + return ZEROEXT32(SIGNEXT8(*(int8_t *)(page+(addr&0xFFF))));
9.106 }
9.107 }
9.108
9.109 @@ -262,19 +280,25 @@
9.110 sh4_write_long( addr+4, (uint32_t)(val>>32) );
9.111 }
9.112
9.113 -void sh4_write_long( sh4addr_t addr, uint32_t val )
9.114 +int32_t sh4_write_long( sh4addr_t vma, uint32_t val )
9.115 {
9.116 sh4ptr_t page;
9.117
9.118 + uint64_t ppa = mmu_vma_to_phys_write(vma);
9.119 + if( ppa>>32 ) {
9.120 + return ppa>>32;
9.121 + }
9.122 + sh4addr_t addr = (sh4addr_t)ppa;
9.123 +
9.124 CHECK_WRITE_WATCH(addr,4,val);
9.125
9.126 if( addr >= 0xE0000000 ) {
9.127 sh4_write_p4( addr, val );
9.128 - return;
9.129 + return 0;
9.130 } else if( (addr&0x1C000000) == 0x0C000000 ) {
9.131 *(uint32_t *)(sh4_main_ram + (addr&0x00FFFFFF)) = val;
9.132 xlat_invalidate_long(addr);
9.133 - return;
9.134 + return 0;
9.135 } else if( (addr&0x1F800000) == 0x04000000 ||
9.136 (addr&0x1F800000) == 0x11000000 ) {
9.137 texcache_invalidate_page(addr& 0x7FFFFF);
9.138 @@ -287,7 +311,7 @@
9.139 if( (addr&0x1FFFFFFF) < 0x200000 ) {
9.140 WARN( "Attempted write to read-only memory: %08X => %08X", val, addr);
9.141 sh4_stop();
9.142 - return;
9.143 + return 0;
9.144 }
9.145 if( (addr&0x1F800000) == 0x00800000 )
9.146 asic_g2_write_word();
9.147 @@ -297,30 +321,37 @@
9.148 if( page == NULL ) {
9.149 if( (addr & 0x1F000000) >= 0x04000000 &&
9.150 (addr & 0x1F000000) < 0x07000000 )
9.151 - return;
9.152 + return 0;
9.153 WARN( "Long write to missing page: %08X => %08X", val, addr );
9.154 - return;
9.155 + return 0;
9.156 }
9.157 TRACE_IO( "Long write %08X => %08X", page, (addr&0xFFF), val, addr );
9.158 io_rgn[(uintptr_t)page]->io_write(addr&0xFFF, val);
9.159 } else {
9.160 *(uint32_t *)(page+(addr&0xFFF)) = val;
9.161 }
9.162 + return 0;
9.163 }
9.164
9.165 -void sh4_write_word( sh4addr_t addr, uint32_t val )
9.166 +int32_t sh4_write_word( sh4addr_t vma, uint32_t val )
9.167 {
9.168 sh4ptr_t page;
9.169
9.170 + uint64_t ppa = mmu_vma_to_phys_write(vma);
9.171 + if( ppa>>32 ) {
9.172 + return ppa>>32;
9.173 + }
9.174 + sh4addr_t addr = (sh4addr_t)ppa;
9.175 +
9.176 CHECK_WRITE_WATCH(addr,2,val);
9.177
9.178 if( addr >= 0xE0000000 ) {
9.179 sh4_write_p4( addr, (int16_t)val );
9.180 - return;
9.181 + return 0;
9.182 } else if( (addr&0x1C000000) == 0x0C000000 ) {
9.183 *(uint16_t *)(sh4_main_ram + (addr&0x00FFFFFF)) = val;
9.184 xlat_invalidate_word(addr);
9.185 - return;
9.186 + return 0;
9.187 } else if( (addr&0x1F800000) == 0x04000000 ||
9.188 (addr&0x1F800000) == 0x11000000 ) {
9.189 texcache_invalidate_page(addr& 0x7FFFFF);
9.190 @@ -333,34 +364,41 @@
9.191 if( (addr&0x1FFFFFFF) < 0x200000 ) {
9.192 WARN( "Attempted write to read-only memory: %08X => %08X", val, addr);
9.193 sh4_stop();
9.194 - return;
9.195 + return 0;
9.196 }
9.197 page = page_map[ (addr & 0x1FFFFFFF) >> 12 ];
9.198 if( ((uintptr_t)page) < MAX_IO_REGIONS ) { /* IO Region */
9.199 if( page == NULL ) {
9.200 WARN( "Attempted word write to missing page: %08X", addr );
9.201 - return;
9.202 + return 0;
9.203 }
9.204 TRACE_IO( "Word write %04X => %08X", page, (addr&0xFFF), val&0xFFFF, addr );
9.205 io_rgn[(uintptr_t)page]->io_write(addr&0xFFF, val);
9.206 } else {
9.207 *(uint16_t *)(page+(addr&0xFFF)) = val;
9.208 }
9.209 + return 0;
9.210 }
9.211
9.212 -void sh4_write_byte( sh4addr_t addr, uint32_t val )
9.213 +int32_t sh4_write_byte( sh4addr_t vma, uint32_t val )
9.214 {
9.215 sh4ptr_t page;
9.216 +
9.217 + uint64_t ppa = mmu_vma_to_phys_write(vma);
9.218 + if( ppa>>32 ) {
9.219 + return ppa>>32;
9.220 + }
9.221 + sh4addr_t addr = (sh4addr_t)ppa;
9.222
9.223 CHECK_WRITE_WATCH(addr,1,val);
9.224
9.225 if( addr >= 0xE0000000 ) {
9.226 sh4_write_p4( addr, (int8_t)val );
9.227 - return;
9.228 + return 0;
9.229 } else if( (addr&0x1C000000) == 0x0C000000 ) {
9.230 *(uint8_t *)(sh4_main_ram + (addr&0x00FFFFFF)) = val;
9.231 xlat_invalidate_word(addr);
9.232 - return;
9.233 + return 0;
9.234 } else if( (addr&0x1F800000) == 0x04000000 ||
9.235 (addr&0x1F800000) == 0x11000000 ) {
9.236 texcache_invalidate_page(addr& 0x7FFFFF);
9.237 @@ -373,19 +411,20 @@
9.238 if( (addr&0x1FFFFFFF) < 0x200000 ) {
9.239 WARN( "Attempted write to read-only memory: %08X => %08X", val, addr);
9.240 sh4_stop();
9.241 - return;
9.242 + return 0;
9.243 }
9.244 page = page_map[ (addr & 0x1FFFFFFF) >> 12 ];
9.245 if( ((uintptr_t)page) < MAX_IO_REGIONS ) { /* IO Region */
9.246 if( page == NULL ) {
9.247 WARN( "Attempted byte write to missing page: %08X", addr );
9.248 - return;
9.249 + return 0;
9.250 }
9.251 TRACE_IO( "Byte write %02X => %08X", page, (addr&0xFFF), val&0xFF, addr );
9.252 io_rgn[(uintptr_t)page]->io_write( (addr&0xFFF), val);
9.253 } else {
9.254 *(uint8_t *)(page+(addr&0xFFF)) = val;
9.255 }
9.256 + return 0;
9.257 }
9.258
9.259
9.260 @@ -434,3 +473,20 @@
9.261 uint32_t target = (addr&0x03FFFFE0) | hi;
9.262 mem_copy_to_sh4( target, src, 32 );
9.263 }
9.264 +
9.265 +sh4ptr_t sh4_get_region_by_vma( sh4addr_t vma )
9.266 +{
9.267 + uint64_t ppa = mmu_vma_to_phys_read(vma);
9.268 + if( ppa>>32 ) {
9.269 + return 0;
9.270 + }
9.271 +
9.272 + sh4addr_t addr = (sh4addr_t)ppa;
9.273 + sh4ptr_t page = page_map[ (addr & 0x1FFFFFFF) >> 12 ];
9.274 + if( ((uintptr_t)page) < MAX_IO_REGIONS ) { /* IO Region */
9.275 + return NULL;
9.276 + } else {
9.277 + return page+(addr&0xFFF);
9.278 + }
9.279 +}
9.280 +
10.1 --- a/src/sh4/sh4trans.c Tue Jan 01 04:56:52 2008 +0000
10.2 +++ b/src/sh4/sh4trans.c Tue Jan 01 04:58:57 2008 +0000
10.3 @@ -23,6 +23,9 @@
10.4 #include "sh4/sh4trans.h"
10.5 #include "sh4/xltcache.h"
10.6
10.7 +
10.8 +uint32_t last_pc;
10.9 +void *last_code;
10.10 /**
10.11 * Execute a timeslice using translated code only (ie translate/execute loop)
10.12 * Note this version does not support breakpoints
10.13 @@ -60,9 +63,22 @@
10.14
10.15 code = xlat_get_code(sh4r.pc);
10.16 if( code == NULL ) {
10.17 + uint64_t ppa = mmu_vma_to_phys_exec( sh4r.pc );
10.18 + if( ppa>>32 ) {
10.19 + // not found, exception
10.20 + ppa = mmu_vma_to_phys_exec( sh4r.pc );
10.21 + if( ppa>>32 ) {
10.22 + // double fault - halt
10.23 + dreamcast_stop();
10.24 + ERROR( "Double fault - halting" );
10.25 + return nanosecs;
10.26 + }
10.27 + }
10.28 code = sh4_translate_basic_block( sh4r.pc );
10.29 }
10.30 }
10.31 + last_pc = sh4r.pc;
10.32 + last_code = code;
10.33 code = code();
10.34 }
10.35
11.1 --- a/src/sh4/sh4x86.c Tue Jan 01 04:56:52 2008 +0000
11.2 +++ b/src/sh4/sh4x86.c Tue Jan 01 04:58:57 2008 +0000
11.3 @@ -34,6 +34,12 @@
11.4
11.5 #define DEFAULT_BACKPATCH_SIZE 4096
11.6
11.7 +struct backpatch_record {
11.8 + uint32_t *fixup_addr;
11.9 + uint32_t fixup_icount;
11.10 + uint32_t exc_code;
11.11 +};
11.12 +
11.13 /**
11.14 * Struct to manage internal translation state. This state is not saved -
11.15 * it is only valid between calls to sh4_translate_begin_block() and
11.16 @@ -49,7 +55,7 @@
11.17 int tstate;
11.18
11.19 /* Allocated memory for the (block-wide) back-patch list */
11.20 - uint32_t **backpatch_list;
11.21 + struct backpatch_record *backpatch_list;
11.22 uint32_t backpatch_posn;
11.23 uint32_t backpatch_size;
11.24 };
11.25 @@ -75,14 +81,6 @@
11.26 OP(0x70+ (sh4_x86.tstate^1)); OP(rel8); \
11.27 MARK_JMP(rel8, label)
11.28
11.29 -
11.30 -#define EXIT_DATA_ADDR_READ 0
11.31 -#define EXIT_DATA_ADDR_WRITE 7
11.32 -#define EXIT_ILLEGAL 14
11.33 -#define EXIT_SLOT_ILLEGAL 21
11.34 -#define EXIT_FPU_DISABLED 28
11.35 -#define EXIT_SLOT_FPU_DISABLED 35
11.36 -
11.37 static struct sh4_x86_state sh4_x86;
11.38
11.39 static uint32_t max_int = 0x7FFFFFFF;
11.40 @@ -93,26 +91,25 @@
11.41 void sh4_x86_init()
11.42 {
11.43 sh4_x86.backpatch_list = malloc(DEFAULT_BACKPATCH_SIZE);
11.44 - sh4_x86.backpatch_size = DEFAULT_BACKPATCH_SIZE / sizeof(uint32_t *);
11.45 + sh4_x86.backpatch_size = DEFAULT_BACKPATCH_SIZE / sizeof(struct backpatch_record);
11.46 }
11.47
11.48
11.49 -static void sh4_x86_add_backpatch( uint8_t *ptr )
11.50 +static void sh4_x86_add_backpatch( uint8_t *fixup_addr, uint32_t fixup_pc, uint32_t exc_code )
11.51 {
11.52 if( sh4_x86.backpatch_posn == sh4_x86.backpatch_size ) {
11.53 sh4_x86.backpatch_size <<= 1;
11.54 - sh4_x86.backpatch_list = realloc( sh4_x86.backpatch_list, sh4_x86.backpatch_size * sizeof(uint32_t *) );
11.55 + sh4_x86.backpatch_list = realloc( sh4_x86.backpatch_list,
11.56 + sh4_x86.backpatch_size * sizeof(struct backpatch_record));
11.57 assert( sh4_x86.backpatch_list != NULL );
11.58 }
11.59 - sh4_x86.backpatch_list[sh4_x86.backpatch_posn++] = (uint32_t *)ptr;
11.60 -}
11.61 -
11.62 -static void sh4_x86_do_backpatch( uint8_t *reloc_base )
11.63 -{
11.64 - unsigned int i;
11.65 - for( i=0; i<sh4_x86.backpatch_posn; i++ ) {
11.66 - *sh4_x86.backpatch_list[i] += (reloc_base - ((uint8_t *)sh4_x86.backpatch_list[i]) - 4);
11.67 + if( sh4_x86.in_delay_slot ) {
11.68 + fixup_pc -= 2;
11.69 }
11.70 + sh4_x86.backpatch_list[sh4_x86.backpatch_posn].fixup_addr = (uint32_t *)fixup_addr;
11.71 + sh4_x86.backpatch_list[sh4_x86.backpatch_posn].fixup_icount = (fixup_pc - sh4_x86.block_start_pc)>>1;
11.72 + sh4_x86.backpatch_list[sh4_x86.backpatch_posn].exc_code = exc_code;
11.73 + sh4_x86.backpatch_posn++;
11.74 }
11.75
11.76 /**
11.77 @@ -266,97 +263,60 @@
11.78 }
11.79
11.80 /* Exception checks - Note that all exception checks will clobber EAX */
11.81 -#define precheck() load_imm32(R_EDX, (pc-sh4_x86.block_start_pc-(sh4_x86.in_delay_slot?2:0))>>1)
11.82
11.83 #define check_priv( ) \
11.84 if( !sh4_x86.priv_checked ) { \
11.85 sh4_x86.priv_checked = TRUE;\
11.86 - precheck();\
11.87 load_spreg( R_EAX, R_SR );\
11.88 AND_imm32_r32( SR_MD, R_EAX );\
11.89 if( sh4_x86.in_delay_slot ) {\
11.90 - JE_exit( EXIT_SLOT_ILLEGAL );\
11.91 + JE_exc( EXC_SLOT_ILLEGAL );\
11.92 } else {\
11.93 - JE_exit( EXIT_ILLEGAL );\
11.94 + JE_exc( EXC_ILLEGAL );\
11.95 }\
11.96 }\
11.97
11.98 -
11.99 -static void check_priv_no_precheck()
11.100 -{
11.101 - if( !sh4_x86.priv_checked ) {
11.102 - sh4_x86.priv_checked = TRUE;
11.103 - load_spreg( R_EAX, R_SR );
11.104 - AND_imm32_r32( SR_MD, R_EAX );
11.105 - if( sh4_x86.in_delay_slot ) {
11.106 - JE_exit( EXIT_SLOT_ILLEGAL );
11.107 - } else {
11.108 - JE_exit( EXIT_ILLEGAL );
11.109 - }
11.110 - }
11.111 -}
11.112 -
11.113 #define check_fpuen( ) \
11.114 if( !sh4_x86.fpuen_checked ) {\
11.115 sh4_x86.fpuen_checked = TRUE;\
11.116 - precheck();\
11.117 load_spreg( R_EAX, R_SR );\
11.118 AND_imm32_r32( SR_FD, R_EAX );\
11.119 if( sh4_x86.in_delay_slot ) {\
11.120 - JNE_exit(EXIT_SLOT_FPU_DISABLED);\
11.121 + JNE_exc(EXC_SLOT_FPU_DISABLED);\
11.122 } else {\
11.123 - JNE_exit(EXIT_FPU_DISABLED);\
11.124 + JNE_exc(EXC_FPU_DISABLED);\
11.125 }\
11.126 }
11.127
11.128 -static void check_fpuen_no_precheck()
11.129 -{
11.130 - if( !sh4_x86.fpuen_checked ) {
11.131 - sh4_x86.fpuen_checked = TRUE;
11.132 - load_spreg( R_EAX, R_SR );
11.133 - AND_imm32_r32( SR_FD, R_EAX );
11.134 - if( sh4_x86.in_delay_slot ) {
11.135 - JNE_exit(EXIT_SLOT_FPU_DISABLED);
11.136 - } else {
11.137 - JNE_exit(EXIT_FPU_DISABLED);
11.138 - }
11.139 - }
11.140 +#define check_ralign16( x86reg ) \
11.141 + TEST_imm32_r32( 0x00000001, x86reg ); \
11.142 + JNE_exc(EXC_DATA_ADDR_READ)
11.143
11.144 -}
11.145 +#define check_walign16( x86reg ) \
11.146 + TEST_imm32_r32( 0x00000001, x86reg ); \
11.147 + JNE_exc(EXC_DATA_ADDR_WRITE);
11.148
11.149 -static void check_ralign16( int x86reg )
11.150 -{
11.151 - TEST_imm32_r32( 0x00000001, x86reg );
11.152 - JNE_exit(EXIT_DATA_ADDR_READ);
11.153 -}
11.154 +#define check_ralign32( x86reg ) \
11.155 + TEST_imm32_r32( 0x00000003, x86reg ); \
11.156 + JNE_exc(EXC_DATA_ADDR_READ)
11.157
11.158 -static void check_walign16( int x86reg )
11.159 -{
11.160 - TEST_imm32_r32( 0x00000001, x86reg );
11.161 - JNE_exit(EXIT_DATA_ADDR_WRITE);
11.162 -}
11.163 -
11.164 -static void check_ralign32( int x86reg )
11.165 -{
11.166 - TEST_imm32_r32( 0x00000003, x86reg );
11.167 - JNE_exit(EXIT_DATA_ADDR_READ);
11.168 -}
11.169 -static void check_walign32( int x86reg )
11.170 -{
11.171 - TEST_imm32_r32( 0x00000003, x86reg );
11.172 - JNE_exit(EXIT_DATA_ADDR_WRITE);
11.173 -}
11.174 +#define check_walign32( x86reg ) \
11.175 + TEST_imm32_r32( 0x00000003, x86reg ); \
11.176 + JNE_exc(EXC_DATA_ADDR_WRITE);
11.177
11.178 #define UNDEF()
11.179 #define MEM_RESULT(value_reg) if(value_reg != R_EAX) { MOV_r32_r32(R_EAX,value_reg); }
11.180 -#define MEM_READ_BYTE( addr_reg, value_reg ) call_func1(sh4_read_byte, addr_reg ); MEM_RESULT(value_reg)
11.181 -#define MEM_READ_WORD( addr_reg, value_reg ) call_func1(sh4_read_word, addr_reg ); MEM_RESULT(value_reg)
11.182 -#define MEM_READ_LONG( addr_reg, value_reg ) call_func1(sh4_read_long, addr_reg ); MEM_RESULT(value_reg)
11.183 -#define MEM_WRITE_BYTE( addr_reg, value_reg ) call_func2(sh4_write_byte, addr_reg, value_reg)
11.184 -#define MEM_WRITE_WORD( addr_reg, value_reg ) call_func2(sh4_write_word, addr_reg, value_reg)
11.185 -#define MEM_WRITE_LONG( addr_reg, value_reg ) call_func2(sh4_write_long, addr_reg, value_reg)
11.186 +#define MEM_READ_BYTE( addr_reg, value_reg ) call_func1(sh4_read_byte, addr_reg ); TEST_r32_r32( R_EDX, R_EDX ); JNE_exc(-1); MEM_RESULT(value_reg)
11.187 +#define MEM_READ_WORD( addr_reg, value_reg ) call_func1(sh4_read_word, addr_reg ); TEST_r32_r32( R_EDX, R_EDX ); JNE_exc(-1); MEM_RESULT(value_reg)
11.188 +#define MEM_READ_LONG( addr_reg, value_reg ) call_func1(sh4_read_long, addr_reg ); TEST_r32_r32( R_EDX, R_EDX ); JNE_exc(-1); MEM_RESULT(value_reg)
11.189 +#define MEM_WRITE_BYTE( addr_reg, value_reg ) call_func2(sh4_write_byte, addr_reg, value_reg); TEST_r32_r32( R_EAX, R_EAX ); JNE_exc(-1);
11.190 +#define MEM_WRITE_WORD( addr_reg, value_reg ) call_func2(sh4_write_word, addr_reg, value_reg); TEST_r32_r32( R_EAX, R_EAX ); JNE_exc(-1);
11.191 +#define MEM_WRITE_LONG( addr_reg, value_reg ) call_func2(sh4_write_long, addr_reg, value_reg); TEST_r32_r32( R_EAX, R_EAX ); JNE_exc(-1);
11.192
11.193 -#define SLOTILLEGAL() precheck(); JMP_exit(EXIT_SLOT_ILLEGAL); sh4_x86.in_delay_slot = FALSE; return 1;
11.194 +#define MEM_READ_SIZE (CALL_FUNC1_SIZE+8)
11.195 +#define MEM_WRITE_SIZE (CALL_FUNC2_SIZE+8)
11.196 +
11.197 +#define SLOTILLEGAL() JMP_exc(EXC_SLOT_ILLEGAL); sh4_x86.in_delay_slot = FALSE; return 1;
11.198
11.199 extern uint16_t *sh4_icache;
11.200 extern uint32_t sh4_icache_addr;
11.201 @@ -389,7 +349,8 @@
11.202 if( sh4_icache != NULL && pageaddr == sh4_icache_addr ) {
11.203 ir = sh4_icache[(pc&0xFFF)>>1];
11.204 } else {
11.205 - sh4_icache = (uint16_t *)mem_get_page(pc);
11.206 + uint64_t phys = mmu_vma_to_phys_exec(pc);
11.207 + sh4_icache = (uint16_t *)mem_get_page((uint32_t)phys);
11.208 if( ((uintptr_t)sh4_icache) < MAX_IO_REGIONS ) {
11.209 /* If someone's actually been so daft as to try to execute out of an IO
11.210 * region, fallback on the full-blown memory read
11.211 @@ -540,7 +501,6 @@
11.212 uint32_t Rn = ((ir>>8)&0xF);
11.213 load_reg( R_EAX, 0 );
11.214 load_reg( R_ECX, Rn );
11.215 - precheck();
11.216 check_walign32( R_ECX );
11.217 MEM_WRITE_LONG( R_ECX, R_EAX );
11.218 sh4_x86.tstate = TSTATE_NONE;
11.219 @@ -568,7 +528,6 @@
11.220 load_reg( R_EAX, 0 );
11.221 load_reg( R_ECX, Rn );
11.222 ADD_r32_r32( R_EAX, R_ECX );
11.223 - precheck();
11.224 check_walign16( R_ECX );
11.225 load_reg( R_EAX, Rm );
11.226 MEM_WRITE_WORD( R_ECX, R_EAX );
11.227 @@ -581,7 +540,6 @@
11.228 load_reg( R_EAX, 0 );
11.229 load_reg( R_ECX, Rn );
11.230 ADD_r32_r32( R_EAX, R_ECX );
11.231 - precheck();
11.232 check_walign32( R_ECX );
11.233 load_reg( R_EAX, Rm );
11.234 MEM_WRITE_LONG( R_ECX, R_EAX );
11.235 @@ -803,7 +761,6 @@
11.236 load_reg( R_EAX, 0 );
11.237 load_reg( R_ECX, Rm );
11.238 ADD_r32_r32( R_EAX, R_ECX );
11.239 - precheck();
11.240 check_ralign16( R_ECX );
11.241 MEM_READ_WORD( R_ECX, R_EAX );
11.242 store_reg( R_EAX, Rn );
11.243 @@ -816,7 +773,6 @@
11.244 load_reg( R_EAX, 0 );
11.245 load_reg( R_ECX, Rm );
11.246 ADD_r32_r32( R_EAX, R_ECX );
11.247 - precheck();
11.248 check_ralign32( R_ECX );
11.249 MEM_READ_LONG( R_ECX, R_EAX );
11.250 store_reg( R_EAX, Rn );
11.251 @@ -827,7 +783,6 @@
11.252 { /* MAC.L @Rm+, @Rn+ */
11.253 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
11.254 load_reg( R_ECX, Rm );
11.255 - precheck();
11.256 check_ralign32( R_ECX );
11.257 load_reg( R_ECX, Rn );
11.258 check_ralign32( R_ECX );
11.259 @@ -861,7 +816,6 @@
11.260 load_reg( R_ECX, Rn );
11.261 load_reg( R_EAX, Rm );
11.262 ADD_imm32_r32( disp, R_ECX );
11.263 - precheck();
11.264 check_walign32( R_ECX );
11.265 MEM_WRITE_LONG( R_ECX, R_EAX );
11.266 sh4_x86.tstate = TSTATE_NONE;
11.267 @@ -882,7 +836,6 @@
11.268 { /* MOV.W Rm, @Rn */
11.269 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
11.270 load_reg( R_ECX, Rn );
11.271 - precheck();
11.272 check_walign16( R_ECX );
11.273 load_reg( R_EAX, Rm );
11.274 MEM_WRITE_WORD( R_ECX, R_EAX );
11.275 @@ -894,7 +847,6 @@
11.276 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
11.277 load_reg( R_EAX, Rm );
11.278 load_reg( R_ECX, Rn );
11.279 - precheck();
11.280 check_walign32(R_ECX);
11.281 MEM_WRITE_LONG( R_ECX, R_EAX );
11.282 sh4_x86.tstate = TSTATE_NONE;
11.283 @@ -915,7 +867,6 @@
11.284 { /* MOV.W Rm, @-Rn */
11.285 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
11.286 load_reg( R_ECX, Rn );
11.287 - precheck();
11.288 check_walign16( R_ECX );
11.289 load_reg( R_EAX, Rm );
11.290 ADD_imm8s_r32( -2, R_ECX );
11.291 @@ -929,7 +880,6 @@
11.292 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
11.293 load_reg( R_EAX, Rm );
11.294 load_reg( R_ECX, Rn );
11.295 - precheck();
11.296 check_walign32( R_ECX );
11.297 ADD_imm8s_r32( -4, R_ECX );
11.298 store_reg( R_ECX, Rn );
11.299 @@ -1307,7 +1257,6 @@
11.300 { /* STS.L MACH, @-Rn */
11.301 uint32_t Rn = ((ir>>8)&0xF);
11.302 load_reg( R_ECX, Rn );
11.303 - precheck();
11.304 check_walign32( R_ECX );
11.305 ADD_imm8s_r32( -4, R_ECX );
11.306 store_reg( R_ECX, Rn );
11.307 @@ -1320,7 +1269,6 @@
11.308 { /* STS.L MACL, @-Rn */
11.309 uint32_t Rn = ((ir>>8)&0xF);
11.310 load_reg( R_ECX, Rn );
11.311 - precheck();
11.312 check_walign32( R_ECX );
11.313 ADD_imm8s_r32( -4, R_ECX );
11.314 store_reg( R_ECX, Rn );
11.315 @@ -1333,7 +1281,6 @@
11.316 { /* STS.L PR, @-Rn */
11.317 uint32_t Rn = ((ir>>8)&0xF);
11.318 load_reg( R_ECX, Rn );
11.319 - precheck();
11.320 check_walign32( R_ECX );
11.321 ADD_imm8s_r32( -4, R_ECX );
11.322 store_reg( R_ECX, Rn );
11.323 @@ -1345,8 +1292,7 @@
11.324 case 0x3:
11.325 { /* STC.L SGR, @-Rn */
11.326 uint32_t Rn = ((ir>>8)&0xF);
11.327 - precheck();
11.328 - check_priv_no_precheck();
11.329 + check_priv();
11.330 load_reg( R_ECX, Rn );
11.331 check_walign32( R_ECX );
11.332 ADD_imm8s_r32( -4, R_ECX );
11.333 @@ -1360,7 +1306,6 @@
11.334 { /* STS.L FPUL, @-Rn */
11.335 uint32_t Rn = ((ir>>8)&0xF);
11.336 load_reg( R_ECX, Rn );
11.337 - precheck();
11.338 check_walign32( R_ECX );
11.339 ADD_imm8s_r32( -4, R_ECX );
11.340 store_reg( R_ECX, Rn );
11.341 @@ -1373,7 +1318,6 @@
11.342 { /* STS.L FPSCR, @-Rn */
11.343 uint32_t Rn = ((ir>>8)&0xF);
11.344 load_reg( R_ECX, Rn );
11.345 - precheck();
11.346 check_walign32( R_ECX );
11.347 ADD_imm8s_r32( -4, R_ECX );
11.348 store_reg( R_ECX, Rn );
11.349 @@ -1385,8 +1329,7 @@
11.350 case 0xF:
11.351 { /* STC.L DBR, @-Rn */
11.352 uint32_t Rn = ((ir>>8)&0xF);
11.353 - precheck();
11.354 - check_priv_no_precheck();
11.355 + check_priv();
11.356 load_reg( R_ECX, Rn );
11.357 check_walign32( R_ECX );
11.358 ADD_imm8s_r32( -4, R_ECX );
11.359 @@ -1408,8 +1351,7 @@
11.360 case 0x0:
11.361 { /* STC.L SR, @-Rn */
11.362 uint32_t Rn = ((ir>>8)&0xF);
11.363 - precheck();
11.364 - check_priv_no_precheck();
11.365 + check_priv();
11.366 call_func0( sh4_read_sr );
11.367 load_reg( R_ECX, Rn );
11.368 check_walign32( R_ECX );
11.369 @@ -1423,7 +1365,6 @@
11.370 { /* STC.L GBR, @-Rn */
11.371 uint32_t Rn = ((ir>>8)&0xF);
11.372 load_reg( R_ECX, Rn );
11.373 - precheck();
11.374 check_walign32( R_ECX );
11.375 ADD_imm8s_r32( -4, R_ECX );
11.376 store_reg( R_ECX, Rn );
11.377 @@ -1435,8 +1376,7 @@
11.378 case 0x2:
11.379 { /* STC.L VBR, @-Rn */
11.380 uint32_t Rn = ((ir>>8)&0xF);
11.381 - precheck();
11.382 - check_priv_no_precheck();
11.383 + check_priv();
11.384 load_reg( R_ECX, Rn );
11.385 check_walign32( R_ECX );
11.386 ADD_imm8s_r32( -4, R_ECX );
11.387 @@ -1449,8 +1389,7 @@
11.388 case 0x3:
11.389 { /* STC.L SSR, @-Rn */
11.390 uint32_t Rn = ((ir>>8)&0xF);
11.391 - precheck();
11.392 - check_priv_no_precheck();
11.393 + check_priv();
11.394 load_reg( R_ECX, Rn );
11.395 check_walign32( R_ECX );
11.396 ADD_imm8s_r32( -4, R_ECX );
11.397 @@ -1463,8 +1402,7 @@
11.398 case 0x4:
11.399 { /* STC.L SPC, @-Rn */
11.400 uint32_t Rn = ((ir>>8)&0xF);
11.401 - precheck();
11.402 - check_priv_no_precheck();
11.403 + check_priv();
11.404 load_reg( R_ECX, Rn );
11.405 check_walign32( R_ECX );
11.406 ADD_imm8s_r32( -4, R_ECX );
11.407 @@ -1482,8 +1420,7 @@
11.408 case 0x1:
11.409 { /* STC.L Rm_BANK, @-Rn */
11.410 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm_BANK = ((ir>>4)&0x7);
11.411 - precheck();
11.412 - check_priv_no_precheck();
11.413 + check_priv();
11.414 load_reg( R_ECX, Rn );
11.415 check_walign32( R_ECX );
11.416 ADD_imm8s_r32( -4, R_ECX );
11.417 @@ -1570,7 +1507,6 @@
11.418 { /* LDS.L @Rm+, MACH */
11.419 uint32_t Rm = ((ir>>8)&0xF);
11.420 load_reg( R_EAX, Rm );
11.421 - precheck();
11.422 check_ralign32( R_EAX );
11.423 MOV_r32_r32( R_EAX, R_ECX );
11.424 ADD_imm8s_r32( 4, R_EAX );
11.425 @@ -1584,7 +1520,6 @@
11.426 { /* LDS.L @Rm+, MACL */
11.427 uint32_t Rm = ((ir>>8)&0xF);
11.428 load_reg( R_EAX, Rm );
11.429 - precheck();
11.430 check_ralign32( R_EAX );
11.431 MOV_r32_r32( R_EAX, R_ECX );
11.432 ADD_imm8s_r32( 4, R_EAX );
11.433 @@ -1598,7 +1533,6 @@
11.434 { /* LDS.L @Rm+, PR */
11.435 uint32_t Rm = ((ir>>8)&0xF);
11.436 load_reg( R_EAX, Rm );
11.437 - precheck();
11.438 check_ralign32( R_EAX );
11.439 MOV_r32_r32( R_EAX, R_ECX );
11.440 ADD_imm8s_r32( 4, R_EAX );
11.441 @@ -1611,8 +1545,7 @@
11.442 case 0x3:
11.443 { /* LDC.L @Rm+, SGR */
11.444 uint32_t Rm = ((ir>>8)&0xF);
11.445 - precheck();
11.446 - check_priv_no_precheck();
11.447 + check_priv();
11.448 load_reg( R_EAX, Rm );
11.449 check_ralign32( R_EAX );
11.450 MOV_r32_r32( R_EAX, R_ECX );
11.451 @@ -1627,7 +1560,6 @@
11.452 { /* LDS.L @Rm+, FPUL */
11.453 uint32_t Rm = ((ir>>8)&0xF);
11.454 load_reg( R_EAX, Rm );
11.455 - precheck();
11.456 check_ralign32( R_EAX );
11.457 MOV_r32_r32( R_EAX, R_ECX );
11.458 ADD_imm8s_r32( 4, R_EAX );
11.459 @@ -1641,7 +1573,6 @@
11.460 { /* LDS.L @Rm+, FPSCR */
11.461 uint32_t Rm = ((ir>>8)&0xF);
11.462 load_reg( R_EAX, Rm );
11.463 - precheck();
11.464 check_ralign32( R_EAX );
11.465 MOV_r32_r32( R_EAX, R_ECX );
11.466 ADD_imm8s_r32( 4, R_EAX );
11.467 @@ -1655,8 +1586,7 @@
11.468 case 0xF:
11.469 { /* LDC.L @Rm+, DBR */
11.470 uint32_t Rm = ((ir>>8)&0xF);
11.471 - precheck();
11.472 - check_priv_no_precheck();
11.473 + check_priv();
11.474 load_reg( R_EAX, Rm );
11.475 check_ralign32( R_EAX );
11.476 MOV_r32_r32( R_EAX, R_ECX );
11.477 @@ -1682,8 +1612,7 @@
11.478 if( sh4_x86.in_delay_slot ) {
11.479 SLOTILLEGAL();
11.480 } else {
11.481 - precheck();
11.482 - check_priv_no_precheck();
11.483 + check_priv();
11.484 load_reg( R_EAX, Rm );
11.485 check_ralign32( R_EAX );
11.486 MOV_r32_r32( R_EAX, R_ECX );
11.487 @@ -1701,7 +1630,6 @@
11.488 { /* LDC.L @Rm+, GBR */
11.489 uint32_t Rm = ((ir>>8)&0xF);
11.490 load_reg( R_EAX, Rm );
11.491 - precheck();
11.492 check_ralign32( R_EAX );
11.493 MOV_r32_r32( R_EAX, R_ECX );
11.494 ADD_imm8s_r32( 4, R_EAX );
11.495 @@ -1714,8 +1642,7 @@
11.496 case 0x2:
11.497 { /* LDC.L @Rm+, VBR */
11.498 uint32_t Rm = ((ir>>8)&0xF);
11.499 - precheck();
11.500 - check_priv_no_precheck();
11.501 + check_priv();
11.502 load_reg( R_EAX, Rm );
11.503 check_ralign32( R_EAX );
11.504 MOV_r32_r32( R_EAX, R_ECX );
11.505 @@ -1729,8 +1656,7 @@
11.506 case 0x3:
11.507 { /* LDC.L @Rm+, SSR */
11.508 uint32_t Rm = ((ir>>8)&0xF);
11.509 - precheck();
11.510 - check_priv_no_precheck();
11.511 + check_priv();
11.512 load_reg( R_EAX, Rm );
11.513 check_ralign32( R_EAX );
11.514 MOV_r32_r32( R_EAX, R_ECX );
11.515 @@ -1744,8 +1670,7 @@
11.516 case 0x4:
11.517 { /* LDC.L @Rm+, SPC */
11.518 uint32_t Rm = ((ir>>8)&0xF);
11.519 - precheck();
11.520 - check_priv_no_precheck();
11.521 + check_priv();
11.522 load_reg( R_EAX, Rm );
11.523 check_ralign32( R_EAX );
11.524 MOV_r32_r32( R_EAX, R_ECX );
11.525 @@ -1764,8 +1689,7 @@
11.526 case 0x1:
11.527 { /* LDC.L @Rm+, Rn_BANK */
11.528 uint32_t Rm = ((ir>>8)&0xF); uint32_t Rn_BANK = ((ir>>4)&0x7);
11.529 - precheck();
11.530 - check_priv_no_precheck();
11.531 + check_priv();
11.532 load_reg( R_EAX, Rm );
11.533 check_ralign32( R_EAX );
11.534 MOV_r32_r32( R_EAX, R_ECX );
11.535 @@ -2090,7 +2014,6 @@
11.536 { /* MAC.W @Rm+, @Rn+ */
11.537 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
11.538 load_reg( R_ECX, Rm );
11.539 - precheck();
11.540 check_ralign16( R_ECX );
11.541 load_reg( R_ECX, Rn );
11.542 check_ralign16( R_ECX );
11.543 @@ -2137,7 +2060,6 @@
11.544 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF); uint32_t disp = (ir&0xF)<<2;
11.545 load_reg( R_ECX, Rm );
11.546 ADD_imm8s_r32( disp, R_ECX );
11.547 - precheck();
11.548 check_ralign32( R_ECX );
11.549 MEM_READ_LONG( R_ECX, R_EAX );
11.550 store_reg( R_EAX, Rn );
11.551 @@ -2159,7 +2081,6 @@
11.552 { /* MOV.W @Rm, Rn */
11.553 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
11.554 load_reg( R_ECX, Rm );
11.555 - precheck();
11.556 check_ralign16( R_ECX );
11.557 MEM_READ_WORD( R_ECX, R_EAX );
11.558 store_reg( R_EAX, Rn );
11.559 @@ -2170,7 +2091,6 @@
11.560 { /* MOV.L @Rm, Rn */
11.561 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
11.562 load_reg( R_ECX, Rm );
11.563 - precheck();
11.564 check_ralign32( R_ECX );
11.565 MEM_READ_LONG( R_ECX, R_EAX );
11.566 store_reg( R_EAX, Rn );
11.567 @@ -2200,7 +2120,6 @@
11.568 { /* MOV.W @Rm+, Rn */
11.569 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
11.570 load_reg( R_EAX, Rm );
11.571 - precheck();
11.572 check_ralign16( R_EAX );
11.573 MOV_r32_r32( R_EAX, R_ECX );
11.574 ADD_imm8s_r32( 2, R_EAX );
11.575 @@ -2214,7 +2133,6 @@
11.576 { /* MOV.L @Rm+, Rn */
11.577 uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
11.578 load_reg( R_EAX, Rm );
11.579 - precheck();
11.580 check_ralign32( R_EAX );
11.581 MOV_r32_r32( R_EAX, R_ECX );
11.582 ADD_imm8s_r32( 4, R_EAX );
11.583 @@ -2335,7 +2253,6 @@
11.584 load_reg( R_ECX, Rn );
11.585 load_reg( R_EAX, 0 );
11.586 ADD_imm32_r32( disp, R_ECX );
11.587 - precheck();
11.588 check_walign16( R_ECX );
11.589 MEM_WRITE_WORD( R_ECX, R_EAX );
11.590 sh4_x86.tstate = TSTATE_NONE;
11.591 @@ -2356,7 +2273,6 @@
11.592 uint32_t Rm = ((ir>>4)&0xF); uint32_t disp = (ir&0xF)<<1;
11.593 load_reg( R_ECX, Rm );
11.594 ADD_imm32_r32( disp, R_ECX );
11.595 - precheck();
11.596 check_ralign16( R_ECX );
11.597 MEM_READ_WORD( R_ECX, R_EAX );
11.598 store_reg( R_EAX, 0 );
11.599 @@ -2506,7 +2422,6 @@
11.600 load_spreg( R_ECX, R_GBR );
11.601 load_reg( R_EAX, 0 );
11.602 ADD_imm32_r32( disp, R_ECX );
11.603 - precheck();
11.604 check_walign16( R_ECX );
11.605 MEM_WRITE_WORD( R_ECX, R_EAX );
11.606 sh4_x86.tstate = TSTATE_NONE;
11.607 @@ -2518,7 +2433,6 @@
11.608 load_spreg( R_ECX, R_GBR );
11.609 load_reg( R_EAX, 0 );
11.610 ADD_imm32_r32( disp, R_ECX );
11.611 - precheck();
11.612 check_walign32( R_ECX );
11.613 MEM_WRITE_LONG( R_ECX, R_EAX );
11.614 sh4_x86.tstate = TSTATE_NONE;
11.615 @@ -2556,7 +2470,6 @@
11.616 uint32_t disp = (ir&0xFF)<<1;
11.617 load_spreg( R_ECX, R_GBR );
11.618 ADD_imm32_r32( disp, R_ECX );
11.619 - precheck();
11.620 check_ralign16( R_ECX );
11.621 MEM_READ_WORD( R_ECX, R_EAX );
11.622 store_reg( R_EAX, 0 );
11.623 @@ -2568,7 +2481,6 @@
11.624 uint32_t disp = (ir&0xFF)<<2;
11.625 load_spreg( R_ECX, R_GBR );
11.626 ADD_imm32_r32( disp, R_ECX );
11.627 - precheck();
11.628 check_ralign32( R_ECX );
11.629 MEM_READ_LONG( R_ECX, R_EAX );
11.630 store_reg( R_EAX, 0 );
11.631 @@ -2685,7 +2597,7 @@
11.632 SLOTILLEGAL();
11.633 } else {
11.634 uint32_t target = (pc & 0xFFFFFFFC) + disp + 4;
11.635 - sh4ptr_t ptr = mem_get_region(target);
11.636 + sh4ptr_t ptr = sh4_get_region_by_vma(target);
11.637 if( ptr != NULL ) {
11.638 MOV_moff32_EAX( ptr );
11.639 } else {
11.640 @@ -2839,14 +2751,13 @@
11.641 case 0x6:
11.642 { /* FMOV @(R0, Rm), FRn */
11.643 uint32_t FRn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
11.644 - precheck();
11.645 - check_fpuen_no_precheck();
11.646 + check_fpuen();
11.647 load_reg( R_ECX, Rm );
11.648 ADD_sh4r_r32( REG_OFFSET(r[0]), R_ECX );
11.649 check_ralign32( R_ECX );
11.650 load_spreg( R_EDX, R_FPSCR );
11.651 TEST_imm32_r32( FPSCR_SZ, R_EDX );
11.652 - JNE_rel8(8 + CALL_FUNC1_SIZE, doublesize);
11.653 + JNE_rel8(8 + MEM_READ_SIZE, doublesize);
11.654 MEM_READ_LONG( R_ECX, R_EAX );
11.655 load_fr_bank( R_EDX );
11.656 store_fr( R_EDX, R_EAX, FRn );
11.657 @@ -2874,14 +2785,13 @@
11.658 case 0x7:
11.659 { /* FMOV FRm, @(R0, Rn) */
11.660 uint32_t Rn = ((ir>>8)&0xF); uint32_t FRm = ((ir>>4)&0xF);
11.661 - precheck();
11.662 - check_fpuen_no_precheck();
11.663 + check_fpuen();
11.664 load_reg( R_ECX, Rn );
11.665 ADD_sh4r_r32( REG_OFFSET(r[0]), R_ECX );
11.666 check_walign32( R_ECX );
11.667 load_spreg( R_EDX, R_FPSCR );
11.668 TEST_imm32_r32( FPSCR_SZ, R_EDX );
11.669 - JNE_rel8(8 + CALL_FUNC2_SIZE, doublesize);
11.670 + JNE_rel8(8 + MEM_WRITE_SIZE, doublesize);
11.671 load_fr_bank( R_EDX );
11.672 load_fr( R_EDX, R_EAX, FRm );
11.673 MEM_WRITE_LONG( R_ECX, R_EAX ); // 12
11.674 @@ -2908,13 +2818,12 @@
11.675 case 0x8:
11.676 { /* FMOV @Rm, FRn */
11.677 uint32_t FRn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
11.678 - precheck();
11.679 - check_fpuen_no_precheck();
11.680 + check_fpuen();
11.681 load_reg( R_ECX, Rm );
11.682 check_ralign32( R_ECX );
11.683 load_spreg( R_EDX, R_FPSCR );
11.684 TEST_imm32_r32( FPSCR_SZ, R_EDX );
11.685 - JNE_rel8(8 + CALL_FUNC1_SIZE, doublesize);
11.686 + JNE_rel8(8 + MEM_READ_SIZE, doublesize);
11.687 MEM_READ_LONG( R_ECX, R_EAX );
11.688 load_fr_bank( R_EDX );
11.689 store_fr( R_EDX, R_EAX, FRn );
11.690 @@ -2942,14 +2851,13 @@
11.691 case 0x9:
11.692 { /* FMOV @Rm+, FRn */
11.693 uint32_t FRn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF);
11.694 - precheck();
11.695 - check_fpuen_no_precheck();
11.696 + check_fpuen();
11.697 load_reg( R_ECX, Rm );
11.698 check_ralign32( R_ECX );
11.699 MOV_r32_r32( R_ECX, R_EAX );
11.700 load_spreg( R_EDX, R_FPSCR );
11.701 TEST_imm32_r32( FPSCR_SZ, R_EDX );
11.702 - JNE_rel8(14 + CALL_FUNC1_SIZE, doublesize);
11.703 + JNE_rel8(14 + MEM_READ_SIZE, doublesize);
11.704 ADD_imm8s_r32( 4, R_EAX );
11.705 store_reg( R_EAX, Rm );
11.706 MEM_READ_LONG( R_ECX, R_EAX );
11.707 @@ -2982,13 +2890,12 @@
11.708 case 0xA:
11.709 { /* FMOV FRm, @Rn */
11.710 uint32_t Rn = ((ir>>8)&0xF); uint32_t FRm = ((ir>>4)&0xF);
11.711 - precheck();
11.712 - check_fpuen_no_precheck();
11.713 + check_fpuen();
11.714 load_reg( R_ECX, Rn );
11.715 check_walign32( R_ECX );
11.716 load_spreg( R_EDX, R_FPSCR );
11.717 TEST_imm32_r32( FPSCR_SZ, R_EDX );
11.718 - JNE_rel8(8 + CALL_FUNC2_SIZE, doublesize);
11.719 + JNE_rel8(8 + MEM_WRITE_SIZE, doublesize);
11.720 load_fr_bank( R_EDX );
11.721 load_fr( R_EDX, R_EAX, FRm );
11.722 MEM_WRITE_LONG( R_ECX, R_EAX ); // 12
11.723 @@ -3015,13 +2922,12 @@
11.724 case 0xB:
11.725 { /* FMOV FRm, @-Rn */
11.726 uint32_t Rn = ((ir>>8)&0xF); uint32_t FRm = ((ir>>4)&0xF);
11.727 - precheck();
11.728 - check_fpuen_no_precheck();
11.729 + check_fpuen();
11.730 load_reg( R_ECX, Rn );
11.731 check_walign32( R_ECX );
11.732 load_spreg( R_EDX, R_FPSCR );
11.733 TEST_imm32_r32( FPSCR_SZ, R_EDX );
11.734 - JNE_rel8(14 + CALL_FUNC2_SIZE, doublesize);
11.735 + JNE_rel8(14 + MEM_WRITE_SIZE, doublesize);
11.736 load_fr_bank( R_EDX );
11.737 load_fr( R_EDX, R_EAX, FRm );
11.738 ADD_imm8s_r32(-4,R_ECX);
11.739 @@ -3405,8 +3311,7 @@
11.740 if( sh4_x86.in_delay_slot ) {
11.741 SLOTILLEGAL();
11.742 } else {
11.743 - precheck();
11.744 - JMP_exit(EXIT_ILLEGAL);
11.745 + JMP_exc(EXC_ILLEGAL);
11.746 return 2;
11.747 }
11.748 }
12.1 --- a/src/sh4/sh4x86.in Tue Jan 01 04:56:52 2008 +0000
12.2 +++ b/src/sh4/sh4x86.in Tue Jan 01 04:58:57 2008 +0000
12.3 @@ -34,6 +34,12 @@
12.4
12.5 #define DEFAULT_BACKPATCH_SIZE 4096
12.6
12.7 +struct backpatch_record {
12.8 + uint32_t *fixup_addr;
12.9 + uint32_t fixup_icount;
12.10 + uint32_t exc_code;
12.11 +};
12.12 +
12.13 /**
12.14 * Struct to manage internal translation state. This state is not saved -
12.15 * it is only valid between calls to sh4_translate_begin_block() and
12.16 @@ -49,7 +55,7 @@
12.17 int tstate;
12.18
12.19 /* Allocated memory for the (block-wide) back-patch list */
12.20 - uint32_t **backpatch_list;
12.21 + struct backpatch_record *backpatch_list;
12.22 uint32_t backpatch_posn;
12.23 uint32_t backpatch_size;
12.24 };
12.25 @@ -75,14 +81,6 @@
12.26 OP(0x70+ (sh4_x86.tstate^1)); OP(rel8); \
12.27 MARK_JMP(rel8, label)
12.28
12.29 -
12.30 -#define EXIT_DATA_ADDR_READ 0
12.31 -#define EXIT_DATA_ADDR_WRITE 7
12.32 -#define EXIT_ILLEGAL 14
12.33 -#define EXIT_SLOT_ILLEGAL 21
12.34 -#define EXIT_FPU_DISABLED 28
12.35 -#define EXIT_SLOT_FPU_DISABLED 35
12.36 -
12.37 static struct sh4_x86_state sh4_x86;
12.38
12.39 static uint32_t max_int = 0x7FFFFFFF;
12.40 @@ -93,26 +91,25 @@
12.41 void sh4_x86_init()
12.42 {
12.43 sh4_x86.backpatch_list = malloc(DEFAULT_BACKPATCH_SIZE);
12.44 - sh4_x86.backpatch_size = DEFAULT_BACKPATCH_SIZE / sizeof(uint32_t *);
12.45 + sh4_x86.backpatch_size = DEFAULT_BACKPATCH_SIZE / sizeof(struct backpatch_record);
12.46 }
12.47
12.48
12.49 -static void sh4_x86_add_backpatch( uint8_t *ptr )
12.50 +static void sh4_x86_add_backpatch( uint8_t *fixup_addr, uint32_t fixup_pc, uint32_t exc_code )
12.51 {
12.52 if( sh4_x86.backpatch_posn == sh4_x86.backpatch_size ) {
12.53 sh4_x86.backpatch_size <<= 1;
12.54 - sh4_x86.backpatch_list = realloc( sh4_x86.backpatch_list, sh4_x86.backpatch_size * sizeof(uint32_t *) );
12.55 + sh4_x86.backpatch_list = realloc( sh4_x86.backpatch_list,
12.56 + sh4_x86.backpatch_size * sizeof(struct backpatch_record));
12.57 assert( sh4_x86.backpatch_list != NULL );
12.58 }
12.59 - sh4_x86.backpatch_list[sh4_x86.backpatch_posn++] = (uint32_t *)ptr;
12.60 -}
12.61 -
12.62 -static void sh4_x86_do_backpatch( uint8_t *reloc_base )
12.63 -{
12.64 - unsigned int i;
12.65 - for( i=0; i<sh4_x86.backpatch_posn; i++ ) {
12.66 - *sh4_x86.backpatch_list[i] += (reloc_base - ((uint8_t *)sh4_x86.backpatch_list[i]) - 4);
12.67 + if( sh4_x86.in_delay_slot ) {
12.68 + fixup_pc -= 2;
12.69 }
12.70 + sh4_x86.backpatch_list[sh4_x86.backpatch_posn].fixup_addr = (uint32_t *)fixup_addr;
12.71 + sh4_x86.backpatch_list[sh4_x86.backpatch_posn].fixup_icount = (fixup_pc - sh4_x86.block_start_pc)>>1;
12.72 + sh4_x86.backpatch_list[sh4_x86.backpatch_posn].exc_code = exc_code;
12.73 + sh4_x86.backpatch_posn++;
12.74 }
12.75
12.76 /**
12.77 @@ -266,97 +263,60 @@
12.78 }
12.79
12.80 /* Exception checks - Note that all exception checks will clobber EAX */
12.81 -#define precheck() load_imm32(R_EDX, (pc-sh4_x86.block_start_pc-(sh4_x86.in_delay_slot?2:0))>>1)
12.82
12.83 #define check_priv( ) \
12.84 if( !sh4_x86.priv_checked ) { \
12.85 sh4_x86.priv_checked = TRUE;\
12.86 - precheck();\
12.87 load_spreg( R_EAX, R_SR );\
12.88 AND_imm32_r32( SR_MD, R_EAX );\
12.89 if( sh4_x86.in_delay_slot ) {\
12.90 - JE_exit( EXIT_SLOT_ILLEGAL );\
12.91 + JE_exc( EXC_SLOT_ILLEGAL );\
12.92 } else {\
12.93 - JE_exit( EXIT_ILLEGAL );\
12.94 + JE_exc( EXC_ILLEGAL );\
12.95 }\
12.96 }\
12.97
12.98 -
12.99 -static void check_priv_no_precheck()
12.100 -{
12.101 - if( !sh4_x86.priv_checked ) {
12.102 - sh4_x86.priv_checked = TRUE;
12.103 - load_spreg( R_EAX, R_SR );
12.104 - AND_imm32_r32( SR_MD, R_EAX );
12.105 - if( sh4_x86.in_delay_slot ) {
12.106 - JE_exit( EXIT_SLOT_ILLEGAL );
12.107 - } else {
12.108 - JE_exit( EXIT_ILLEGAL );
12.109 - }
12.110 - }
12.111 -}
12.112 -
12.113 #define check_fpuen( ) \
12.114 if( !sh4_x86.fpuen_checked ) {\
12.115 sh4_x86.fpuen_checked = TRUE;\
12.116 - precheck();\
12.117 load_spreg( R_EAX, R_SR );\
12.118 AND_imm32_r32( SR_FD, R_EAX );\
12.119 if( sh4_x86.in_delay_slot ) {\
12.120 - JNE_exit(EXIT_SLOT_FPU_DISABLED);\
12.121 + JNE_exc(EXC_SLOT_FPU_DISABLED);\
12.122 } else {\
12.123 - JNE_exit(EXIT_FPU_DISABLED);\
12.124 + JNE_exc(EXC_FPU_DISABLED);\
12.125 }\
12.126 }
12.127
12.128 -static void check_fpuen_no_precheck()
12.129 -{
12.130 - if( !sh4_x86.fpuen_checked ) {
12.131 - sh4_x86.fpuen_checked = TRUE;
12.132 - load_spreg( R_EAX, R_SR );
12.133 - AND_imm32_r32( SR_FD, R_EAX );
12.134 - if( sh4_x86.in_delay_slot ) {
12.135 - JNE_exit(EXIT_SLOT_FPU_DISABLED);
12.136 - } else {
12.137 - JNE_exit(EXIT_FPU_DISABLED);
12.138 - }
12.139 - }
12.140 +#define check_ralign16( x86reg ) \
12.141 + TEST_imm32_r32( 0x00000001, x86reg ); \
12.142 + JNE_exc(EXC_DATA_ADDR_READ)
12.143
12.144 -}
12.145 +#define check_walign16( x86reg ) \
12.146 + TEST_imm32_r32( 0x00000001, x86reg ); \
12.147 + JNE_exc(EXC_DATA_ADDR_WRITE);
12.148
12.149 -static void check_ralign16( int x86reg )
12.150 -{
12.151 - TEST_imm32_r32( 0x00000001, x86reg );
12.152 - JNE_exit(EXIT_DATA_ADDR_READ);
12.153 -}
12.154 +#define check_ralign32( x86reg ) \
12.155 + TEST_imm32_r32( 0x00000003, x86reg ); \
12.156 + JNE_exc(EXC_DATA_ADDR_READ)
12.157
12.158 -static void check_walign16( int x86reg )
12.159 -{
12.160 - TEST_imm32_r32( 0x00000001, x86reg );
12.161 - JNE_exit(EXIT_DATA_ADDR_WRITE);
12.162 -}
12.163 -
12.164 -static void check_ralign32( int x86reg )
12.165 -{
12.166 - TEST_imm32_r32( 0x00000003, x86reg );
12.167 - JNE_exit(EXIT_DATA_ADDR_READ);
12.168 -}
12.169 -static void check_walign32( int x86reg )
12.170 -{
12.171 - TEST_imm32_r32( 0x00000003, x86reg );
12.172 - JNE_exit(EXIT_DATA_ADDR_WRITE);
12.173 -}
12.174 +#define check_walign32( x86reg ) \
12.175 + TEST_imm32_r32( 0x00000003, x86reg ); \
12.176 + JNE_exc(EXC_DATA_ADDR_WRITE);
12.177
12.178 #define UNDEF()
12.179 #define MEM_RESULT(value_reg) if(value_reg != R_EAX) { MOV_r32_r32(R_EAX,value_reg); }
12.180 -#define MEM_READ_BYTE( addr_reg, value_reg ) call_func1(sh4_read_byte, addr_reg ); MEM_RESULT(value_reg)
12.181 -#define MEM_READ_WORD( addr_reg, value_reg ) call_func1(sh4_read_word, addr_reg ); MEM_RESULT(value_reg)
12.182 -#define MEM_READ_LONG( addr_reg, value_reg ) call_func1(sh4_read_long, addr_reg ); MEM_RESULT(value_reg)
12.183 -#define MEM_WRITE_BYTE( addr_reg, value_reg ) call_func2(sh4_write_byte, addr_reg, value_reg)
12.184 -#define MEM_WRITE_WORD( addr_reg, value_reg ) call_func2(sh4_write_word, addr_reg, value_reg)
12.185 -#define MEM_WRITE_LONG( addr_reg, value_reg ) call_func2(sh4_write_long, addr_reg, value_reg)
12.186 +#define MEM_READ_BYTE( addr_reg, value_reg ) call_func1(sh4_read_byte, addr_reg ); TEST_r32_r32( R_EDX, R_EDX ); JNE_exc(-1); MEM_RESULT(value_reg)
12.187 +#define MEM_READ_WORD( addr_reg, value_reg ) call_func1(sh4_read_word, addr_reg ); TEST_r32_r32( R_EDX, R_EDX ); JNE_exc(-1); MEM_RESULT(value_reg)
12.188 +#define MEM_READ_LONG( addr_reg, value_reg ) call_func1(sh4_read_long, addr_reg ); TEST_r32_r32( R_EDX, R_EDX ); JNE_exc(-1); MEM_RESULT(value_reg)
12.189 +#define MEM_WRITE_BYTE( addr_reg, value_reg ) call_func2(sh4_write_byte, addr_reg, value_reg); TEST_r32_r32( R_EAX, R_EAX ); JNE_exc(-1);
12.190 +#define MEM_WRITE_WORD( addr_reg, value_reg ) call_func2(sh4_write_word, addr_reg, value_reg); TEST_r32_r32( R_EAX, R_EAX ); JNE_exc(-1);
12.191 +#define MEM_WRITE_LONG( addr_reg, value_reg ) call_func2(sh4_write_long, addr_reg, value_reg); TEST_r32_r32( R_EAX, R_EAX ); JNE_exc(-1);
12.192
12.193 -#define SLOTILLEGAL() precheck(); JMP_exit(EXIT_SLOT_ILLEGAL); sh4_x86.in_delay_slot = FALSE; return 1;
12.194 +#define MEM_READ_SIZE (CALL_FUNC1_SIZE+8)
12.195 +#define MEM_WRITE_SIZE (CALL_FUNC2_SIZE+8)
12.196 +
12.197 +#define SLOTILLEGAL() JMP_exc(EXC_SLOT_ILLEGAL); sh4_x86.in_delay_slot = FALSE; return 1;
12.198
12.199 extern uint16_t *sh4_icache;
12.200 extern uint32_t sh4_icache_addr;
12.201 @@ -389,7 +349,8 @@
12.202 if( sh4_icache != NULL && pageaddr == sh4_icache_addr ) {
12.203 ir = sh4_icache[(pc&0xFFF)>>1];
12.204 } else {
12.205 - sh4_icache = (uint16_t *)mem_get_page(pc);
12.206 + uint64_t phys = mmu_vma_to_phys_exec(pc);
12.207 + sh4_icache = (uint16_t *)mem_get_page((uint32_t)phys);
12.208 if( ((uintptr_t)sh4_icache) < MAX_IO_REGIONS ) {
12.209 /* If someone's actually been so daft as to try to execute out of an IO
12.210 * region, fallback on the full-blown memory read
12.211 @@ -619,7 +580,6 @@
12.212 :}
12.213 MAC.L @Rm+, @Rn+ {:
12.214 load_reg( R_ECX, Rm );
12.215 - precheck();
12.216 check_ralign32( R_ECX );
12.217 load_reg( R_ECX, Rn );
12.218 check_ralign32( R_ECX );
12.219 @@ -643,7 +603,6 @@
12.220 :}
12.221 MAC.W @Rm+, @Rn+ {:
12.222 load_reg( R_ECX, Rm );
12.223 - precheck();
12.224 check_ralign16( R_ECX );
12.225 load_reg( R_ECX, Rn );
12.226 check_ralign16( R_ECX );
12.227 @@ -1090,7 +1049,6 @@
12.228 MOV.L Rm, @Rn {:
12.229 load_reg( R_EAX, Rm );
12.230 load_reg( R_ECX, Rn );
12.231 - precheck();
12.232 check_walign32(R_ECX);
12.233 MEM_WRITE_LONG( R_ECX, R_EAX );
12.234 sh4_x86.tstate = TSTATE_NONE;
12.235 @@ -1098,7 +1056,6 @@
12.236 MOV.L Rm, @-Rn {:
12.237 load_reg( R_EAX, Rm );
12.238 load_reg( R_ECX, Rn );
12.239 - precheck();
12.240 check_walign32( R_ECX );
12.241 ADD_imm8s_r32( -4, R_ECX );
12.242 store_reg( R_ECX, Rn );
12.243 @@ -1109,7 +1066,6 @@
12.244 load_reg( R_EAX, 0 );
12.245 load_reg( R_ECX, Rn );
12.246 ADD_r32_r32( R_EAX, R_ECX );
12.247 - precheck();
12.248 check_walign32( R_ECX );
12.249 load_reg( R_EAX, Rm );
12.250 MEM_WRITE_LONG( R_ECX, R_EAX );
12.251 @@ -1119,7 +1075,6 @@
12.252 load_spreg( R_ECX, R_GBR );
12.253 load_reg( R_EAX, 0 );
12.254 ADD_imm32_r32( disp, R_ECX );
12.255 - precheck();
12.256 check_walign32( R_ECX );
12.257 MEM_WRITE_LONG( R_ECX, R_EAX );
12.258 sh4_x86.tstate = TSTATE_NONE;
12.259 @@ -1128,14 +1083,12 @@
12.260 load_reg( R_ECX, Rn );
12.261 load_reg( R_EAX, Rm );
12.262 ADD_imm32_r32( disp, R_ECX );
12.263 - precheck();
12.264 check_walign32( R_ECX );
12.265 MEM_WRITE_LONG( R_ECX, R_EAX );
12.266 sh4_x86.tstate = TSTATE_NONE;
12.267 :}
12.268 MOV.L @Rm, Rn {:
12.269 load_reg( R_ECX, Rm );
12.270 - precheck();
12.271 check_ralign32( R_ECX );
12.272 MEM_READ_LONG( R_ECX, R_EAX );
12.273 store_reg( R_EAX, Rn );
12.274 @@ -1143,7 +1096,6 @@
12.275 :}
12.276 MOV.L @Rm+, Rn {:
12.277 load_reg( R_EAX, Rm );
12.278 - precheck();
12.279 check_ralign32( R_EAX );
12.280 MOV_r32_r32( R_EAX, R_ECX );
12.281 ADD_imm8s_r32( 4, R_EAX );
12.282 @@ -1156,7 +1108,6 @@
12.283 load_reg( R_EAX, 0 );
12.284 load_reg( R_ECX, Rm );
12.285 ADD_r32_r32( R_EAX, R_ECX );
12.286 - precheck();
12.287 check_ralign32( R_ECX );
12.288 MEM_READ_LONG( R_ECX, R_EAX );
12.289 store_reg( R_EAX, Rn );
12.290 @@ -1165,7 +1116,6 @@
12.291 MOV.L @(disp, GBR), R0 {:
12.292 load_spreg( R_ECX, R_GBR );
12.293 ADD_imm32_r32( disp, R_ECX );
12.294 - precheck();
12.295 check_ralign32( R_ECX );
12.296 MEM_READ_LONG( R_ECX, R_EAX );
12.297 store_reg( R_EAX, 0 );
12.298 @@ -1176,7 +1126,7 @@
12.299 SLOTILLEGAL();
12.300 } else {
12.301 uint32_t target = (pc & 0xFFFFFFFC) + disp + 4;
12.302 - sh4ptr_t ptr = mem_get_region(target);
12.303 + sh4ptr_t ptr = sh4_get_region_by_vma(target);
12.304 if( ptr != NULL ) {
12.305 MOV_moff32_EAX( ptr );
12.306 } else {
12.307 @@ -1190,7 +1140,6 @@
12.308 MOV.L @(disp, Rm), Rn {:
12.309 load_reg( R_ECX, Rm );
12.310 ADD_imm8s_r32( disp, R_ECX );
12.311 - precheck();
12.312 check_ralign32( R_ECX );
12.313 MEM_READ_LONG( R_ECX, R_EAX );
12.314 store_reg( R_EAX, Rn );
12.315 @@ -1198,7 +1147,6 @@
12.316 :}
12.317 MOV.W Rm, @Rn {:
12.318 load_reg( R_ECX, Rn );
12.319 - precheck();
12.320 check_walign16( R_ECX );
12.321 load_reg( R_EAX, Rm );
12.322 MEM_WRITE_WORD( R_ECX, R_EAX );
12.323 @@ -1206,7 +1154,6 @@
12.324 :}
12.325 MOV.W Rm, @-Rn {:
12.326 load_reg( R_ECX, Rn );
12.327 - precheck();
12.328 check_walign16( R_ECX );
12.329 load_reg( R_EAX, Rm );
12.330 ADD_imm8s_r32( -2, R_ECX );
12.331 @@ -1218,7 +1165,6 @@
12.332 load_reg( R_EAX, 0 );
12.333 load_reg( R_ECX, Rn );
12.334 ADD_r32_r32( R_EAX, R_ECX );
12.335 - precheck();
12.336 check_walign16( R_ECX );
12.337 load_reg( R_EAX, Rm );
12.338 MEM_WRITE_WORD( R_ECX, R_EAX );
12.339 @@ -1228,7 +1174,6 @@
12.340 load_spreg( R_ECX, R_GBR );
12.341 load_reg( R_EAX, 0 );
12.342 ADD_imm32_r32( disp, R_ECX );
12.343 - precheck();
12.344 check_walign16( R_ECX );
12.345 MEM_WRITE_WORD( R_ECX, R_EAX );
12.346 sh4_x86.tstate = TSTATE_NONE;
12.347 @@ -1237,14 +1182,12 @@
12.348 load_reg( R_ECX, Rn );
12.349 load_reg( R_EAX, 0 );
12.350 ADD_imm32_r32( disp, R_ECX );
12.351 - precheck();
12.352 check_walign16( R_ECX );
12.353 MEM_WRITE_WORD( R_ECX, R_EAX );
12.354 sh4_x86.tstate = TSTATE_NONE;
12.355 :}
12.356 MOV.W @Rm, Rn {:
12.357 load_reg( R_ECX, Rm );
12.358 - precheck();
12.359 check_ralign16( R_ECX );
12.360 MEM_READ_WORD( R_ECX, R_EAX );
12.361 store_reg( R_EAX, Rn );
12.362 @@ -1252,7 +1195,6 @@
12.363 :}
12.364 MOV.W @Rm+, Rn {:
12.365 load_reg( R_EAX, Rm );
12.366 - precheck();
12.367 check_ralign16( R_EAX );
12.368 MOV_r32_r32( R_EAX, R_ECX );
12.369 ADD_imm8s_r32( 2, R_EAX );
12.370 @@ -1265,7 +1207,6 @@
12.371 load_reg( R_EAX, 0 );
12.372 load_reg( R_ECX, Rm );
12.373 ADD_r32_r32( R_EAX, R_ECX );
12.374 - precheck();
12.375 check_ralign16( R_ECX );
12.376 MEM_READ_WORD( R_ECX, R_EAX );
12.377 store_reg( R_EAX, Rn );
12.378 @@ -1274,7 +1215,6 @@
12.379 MOV.W @(disp, GBR), R0 {:
12.380 load_spreg( R_ECX, R_GBR );
12.381 ADD_imm32_r32( disp, R_ECX );
12.382 - precheck();
12.383 check_ralign16( R_ECX );
12.384 MEM_READ_WORD( R_ECX, R_EAX );
12.385 store_reg( R_EAX, 0 );
12.386 @@ -1293,7 +1233,6 @@
12.387 MOV.W @(disp, Rm), R0 {:
12.388 load_reg( R_ECX, Rm );
12.389 ADD_imm32_r32( disp, R_ECX );
12.390 - precheck();
12.391 check_ralign16( R_ECX );
12.392 MEM_READ_WORD( R_ECX, R_EAX );
12.393 store_reg( R_EAX, 0 );
12.394 @@ -1310,7 +1249,6 @@
12.395 MOVCA.L R0, @Rn {:
12.396 load_reg( R_EAX, 0 );
12.397 load_reg( R_ECX, Rn );
12.398 - precheck();
12.399 check_walign32( R_ECX );
12.400 MEM_WRITE_LONG( R_ECX, R_EAX );
12.401 sh4_x86.tstate = TSTATE_NONE;
12.402 @@ -1506,8 +1444,7 @@
12.403 if( sh4_x86.in_delay_slot ) {
12.404 SLOTILLEGAL();
12.405 } else {
12.406 - precheck();
12.407 - JMP_exit(EXIT_ILLEGAL);
12.408 + JMP_exc(EXC_ILLEGAL);
12.409 return 2;
12.410 }
12.411 :}
12.412 @@ -1591,13 +1528,12 @@
12.413 sh4_x86.tstate = TSTATE_NONE;
12.414 :}
12.415 FMOV FRm, @Rn {:
12.416 - precheck();
12.417 - check_fpuen_no_precheck();
12.418 + check_fpuen();
12.419 load_reg( R_ECX, Rn );
12.420 check_walign32( R_ECX );
12.421 load_spreg( R_EDX, R_FPSCR );
12.422 TEST_imm32_r32( FPSCR_SZ, R_EDX );
12.423 - JNE_rel8(8 + CALL_FUNC2_SIZE, doublesize);
12.424 + JNE_rel8(8 + MEM_WRITE_SIZE, doublesize);
12.425 load_fr_bank( R_EDX );
12.426 load_fr( R_EDX, R_EAX, FRm );
12.427 MEM_WRITE_LONG( R_ECX, R_EAX ); // 12
12.428 @@ -1621,13 +1557,12 @@
12.429 sh4_x86.tstate = TSTATE_NONE;
12.430 :}
12.431 FMOV @Rm, FRn {:
12.432 - precheck();
12.433 - check_fpuen_no_precheck();
12.434 + check_fpuen();
12.435 load_reg( R_ECX, Rm );
12.436 check_ralign32( R_ECX );
12.437 load_spreg( R_EDX, R_FPSCR );
12.438 TEST_imm32_r32( FPSCR_SZ, R_EDX );
12.439 - JNE_rel8(8 + CALL_FUNC1_SIZE, doublesize);
12.440 + JNE_rel8(8 + MEM_READ_SIZE, doublesize);
12.441 MEM_READ_LONG( R_ECX, R_EAX );
12.442 load_fr_bank( R_EDX );
12.443 store_fr( R_EDX, R_EAX, FRn );
12.444 @@ -1652,13 +1587,12 @@
12.445 sh4_x86.tstate = TSTATE_NONE;
12.446 :}
12.447 FMOV FRm, @-Rn {:
12.448 - precheck();
12.449 - check_fpuen_no_precheck();
12.450 + check_fpuen();
12.451 load_reg( R_ECX, Rn );
12.452 check_walign32( R_ECX );
12.453 load_spreg( R_EDX, R_FPSCR );
12.454 TEST_imm32_r32( FPSCR_SZ, R_EDX );
12.455 - JNE_rel8(14 + CALL_FUNC2_SIZE, doublesize);
12.456 + JNE_rel8(14 + MEM_WRITE_SIZE, doublesize);
12.457 load_fr_bank( R_EDX );
12.458 load_fr( R_EDX, R_EAX, FRm );
12.459 ADD_imm8s_r32(-4,R_ECX);
12.460 @@ -1688,14 +1622,13 @@
12.461 sh4_x86.tstate = TSTATE_NONE;
12.462 :}
12.463 FMOV @Rm+, FRn {:
12.464 - precheck();
12.465 - check_fpuen_no_precheck();
12.466 + check_fpuen();
12.467 load_reg( R_ECX, Rm );
12.468 check_ralign32( R_ECX );
12.469 MOV_r32_r32( R_ECX, R_EAX );
12.470 load_spreg( R_EDX, R_FPSCR );
12.471 TEST_imm32_r32( FPSCR_SZ, R_EDX );
12.472 - JNE_rel8(14 + CALL_FUNC1_SIZE, doublesize);
12.473 + JNE_rel8(14 + MEM_READ_SIZE, doublesize);
12.474 ADD_imm8s_r32( 4, R_EAX );
12.475 store_reg( R_EAX, Rm );
12.476 MEM_READ_LONG( R_ECX, R_EAX );
12.477 @@ -1725,14 +1658,13 @@
12.478 sh4_x86.tstate = TSTATE_NONE;
12.479 :}
12.480 FMOV FRm, @(R0, Rn) {:
12.481 - precheck();
12.482 - check_fpuen_no_precheck();
12.483 + check_fpuen();
12.484 load_reg( R_ECX, Rn );
12.485 ADD_sh4r_r32( REG_OFFSET(r[0]), R_ECX );
12.486 check_walign32( R_ECX );
12.487 load_spreg( R_EDX, R_FPSCR );
12.488 TEST_imm32_r32( FPSCR_SZ, R_EDX );
12.489 - JNE_rel8(8 + CALL_FUNC2_SIZE, doublesize);
12.490 + JNE_rel8(8 + MEM_WRITE_SIZE, doublesize);
12.491 load_fr_bank( R_EDX );
12.492 load_fr( R_EDX, R_EAX, FRm );
12.493 MEM_WRITE_LONG( R_ECX, R_EAX ); // 12
12.494 @@ -1756,14 +1688,13 @@
12.495 sh4_x86.tstate = TSTATE_NONE;
12.496 :}
12.497 FMOV @(R0, Rm), FRn {:
12.498 - precheck();
12.499 - check_fpuen_no_precheck();
12.500 + check_fpuen();
12.501 load_reg( R_ECX, Rm );
12.502 ADD_sh4r_r32( REG_OFFSET(r[0]), R_ECX );
12.503 check_ralign32( R_ECX );
12.504 load_spreg( R_EDX, R_FPSCR );
12.505 TEST_imm32_r32( FPSCR_SZ, R_EDX );
12.506 - JNE_rel8(8 + CALL_FUNC1_SIZE, doublesize);
12.507 + JNE_rel8(8 + MEM_READ_SIZE, doublesize);
12.508 MEM_READ_LONG( R_ECX, R_EAX );
12.509 load_fr_bank( R_EDX );
12.510 store_fr( R_EDX, R_EAX, FRn );
12.511 @@ -2222,7 +2153,6 @@
12.512 :}
12.513 LDC.L @Rm+, GBR {:
12.514 load_reg( R_EAX, Rm );
12.515 - precheck();
12.516 check_ralign32( R_EAX );
12.517 MOV_r32_r32( R_EAX, R_ECX );
12.518 ADD_imm8s_r32( 4, R_EAX );
12.519 @@ -2235,8 +2165,7 @@
12.520 if( sh4_x86.in_delay_slot ) {
12.521 SLOTILLEGAL();
12.522 } else {
12.523 - precheck();
12.524 - check_priv_no_precheck();
12.525 + check_priv();
12.526 load_reg( R_EAX, Rm );
12.527 check_ralign32( R_EAX );
12.528 MOV_r32_r32( R_EAX, R_ECX );
12.529 @@ -2250,8 +2179,7 @@
12.530 }
12.531 :}
12.532 LDC.L @Rm+, VBR {:
12.533 - precheck();
12.534 - check_priv_no_precheck();
12.535 + check_priv();
12.536 load_reg( R_EAX, Rm );
12.537 check_ralign32( R_EAX );
12.538 MOV_r32_r32( R_EAX, R_ECX );
12.539 @@ -2262,8 +2190,7 @@
12.540 sh4_x86.tstate = TSTATE_NONE;
12.541 :}
12.542 LDC.L @Rm+, SSR {:
12.543 - precheck();
12.544 - check_priv_no_precheck();
12.545 + check_priv();
12.546 load_reg( R_EAX, Rm );
12.547 check_ralign32( R_EAX );
12.548 MOV_r32_r32( R_EAX, R_ECX );
12.549 @@ -2274,8 +2201,7 @@
12.550 sh4_x86.tstate = TSTATE_NONE;
12.551 :}
12.552 LDC.L @Rm+, SGR {:
12.553 - precheck();
12.554 - check_priv_no_precheck();
12.555 + check_priv();
12.556 load_reg( R_EAX, Rm );
12.557 check_ralign32( R_EAX );
12.558 MOV_r32_r32( R_EAX, R_ECX );
12.559 @@ -2286,8 +2212,7 @@
12.560 sh4_x86.tstate = TSTATE_NONE;
12.561 :}
12.562 LDC.L @Rm+, SPC {:
12.563 - precheck();
12.564 - check_priv_no_precheck();
12.565 + check_priv();
12.566 load_reg( R_EAX, Rm );
12.567 check_ralign32( R_EAX );
12.568 MOV_r32_r32( R_EAX, R_ECX );
12.569 @@ -2298,8 +2223,7 @@
12.570 sh4_x86.tstate = TSTATE_NONE;
12.571 :}
12.572 LDC.L @Rm+, DBR {:
12.573 - precheck();
12.574 - check_priv_no_precheck();
12.575 + check_priv();
12.576 load_reg( R_EAX, Rm );
12.577 check_ralign32( R_EAX );
12.578 MOV_r32_r32( R_EAX, R_ECX );
12.579 @@ -2310,8 +2234,7 @@
12.580 sh4_x86.tstate = TSTATE_NONE;
12.581 :}
12.582 LDC.L @Rm+, Rn_BANK {:
12.583 - precheck();
12.584 - check_priv_no_precheck();
12.585 + check_priv();
12.586 load_reg( R_EAX, Rm );
12.587 check_ralign32( R_EAX );
12.588 MOV_r32_r32( R_EAX, R_ECX );
12.589 @@ -2329,7 +2252,6 @@
12.590 :}
12.591 LDS.L @Rm+, FPSCR {:
12.592 load_reg( R_EAX, Rm );
12.593 - precheck();
12.594 check_ralign32( R_EAX );
12.595 MOV_r32_r32( R_EAX, R_ECX );
12.596 ADD_imm8s_r32( 4, R_EAX );
12.597 @@ -2345,7 +2267,6 @@
12.598 :}
12.599 LDS.L @Rm+, FPUL {:
12.600 load_reg( R_EAX, Rm );
12.601 - precheck();
12.602 check_ralign32( R_EAX );
12.603 MOV_r32_r32( R_EAX, R_ECX );
12.604 ADD_imm8s_r32( 4, R_EAX );
12.605 @@ -2360,7 +2281,6 @@
12.606 :}
12.607 LDS.L @Rm+, MACH {:
12.608 load_reg( R_EAX, Rm );
12.609 - precheck();
12.610 check_ralign32( R_EAX );
12.611 MOV_r32_r32( R_EAX, R_ECX );
12.612 ADD_imm8s_r32( 4, R_EAX );
12.613 @@ -2375,7 +2295,6 @@
12.614 :}
12.615 LDS.L @Rm+, MACL {:
12.616 load_reg( R_EAX, Rm );
12.617 - precheck();
12.618 check_ralign32( R_EAX );
12.619 MOV_r32_r32( R_EAX, R_ECX );
12.620 ADD_imm8s_r32( 4, R_EAX );
12.621 @@ -2390,7 +2309,6 @@
12.622 :}
12.623 LDS.L @Rm+, PR {:
12.624 load_reg( R_EAX, Rm );
12.625 - precheck();
12.626 check_ralign32( R_EAX );
12.627 MOV_r32_r32( R_EAX, R_ECX );
12.628 ADD_imm8s_r32( 4, R_EAX );
12.629 @@ -2469,8 +2387,7 @@
12.630 sh4_x86.tstate = TSTATE_NONE;
12.631 :}
12.632 STC.L SR, @-Rn {:
12.633 - precheck();
12.634 - check_priv_no_precheck();
12.635 + check_priv();
12.636 call_func0( sh4_read_sr );
12.637 load_reg( R_ECX, Rn );
12.638 check_walign32( R_ECX );
12.639 @@ -2480,8 +2397,7 @@
12.640 sh4_x86.tstate = TSTATE_NONE;
12.641 :}
12.642 STC.L VBR, @-Rn {:
12.643 - precheck();
12.644 - check_priv_no_precheck();
12.645 + check_priv();
12.646 load_reg( R_ECX, Rn );
12.647 check_walign32( R_ECX );
12.648 ADD_imm8s_r32( -4, R_ECX );
12.649 @@ -2491,8 +2407,7 @@
12.650 sh4_x86.tstate = TSTATE_NONE;
12.651 :}
12.652 STC.L SSR, @-Rn {:
12.653 - precheck();
12.654 - check_priv_no_precheck();
12.655 + check_priv();
12.656 load_reg( R_ECX, Rn );
12.657 check_walign32( R_ECX );
12.658 ADD_imm8s_r32( -4, R_ECX );
12.659 @@ -2502,8 +2417,7 @@
12.660 sh4_x86.tstate = TSTATE_NONE;
12.661 :}
12.662 STC.L SPC, @-Rn {:
12.663 - precheck();
12.664 - check_priv_no_precheck();
12.665 + check_priv();
12.666 load_reg( R_ECX, Rn );
12.667 check_walign32( R_ECX );
12.668 ADD_imm8s_r32( -4, R_ECX );
12.669 @@ -2513,8 +2427,7 @@
12.670 sh4_x86.tstate = TSTATE_NONE;
12.671 :}
12.672 STC.L SGR, @-Rn {:
12.673 - precheck();
12.674 - check_priv_no_precheck();
12.675 + check_priv();
12.676 load_reg( R_ECX, Rn );
12.677 check_walign32( R_ECX );
12.678 ADD_imm8s_r32( -4, R_ECX );
12.679 @@ -2524,8 +2437,7 @@
12.680 sh4_x86.tstate = TSTATE_NONE;
12.681 :}
12.682 STC.L DBR, @-Rn {:
12.683 - precheck();
12.684 - check_priv_no_precheck();
12.685 + check_priv();
12.686 load_reg( R_ECX, Rn );
12.687 check_walign32( R_ECX );
12.688 ADD_imm8s_r32( -4, R_ECX );
12.689 @@ -2535,8 +2447,7 @@
12.690 sh4_x86.tstate = TSTATE_NONE;
12.691 :}
12.692 STC.L Rm_BANK, @-Rn {:
12.693 - precheck();
12.694 - check_priv_no_precheck();
12.695 + check_priv();
12.696 load_reg( R_ECX, Rn );
12.697 check_walign32( R_ECX );
12.698 ADD_imm8s_r32( -4, R_ECX );
12.699 @@ -2547,7 +2458,6 @@
12.700 :}
12.701 STC.L GBR, @-Rn {:
12.702 load_reg( R_ECX, Rn );
12.703 - precheck();
12.704 check_walign32( R_ECX );
12.705 ADD_imm8s_r32( -4, R_ECX );
12.706 store_reg( R_ECX, Rn );
12.707 @@ -2561,7 +2471,6 @@
12.708 :}
12.709 STS.L FPSCR, @-Rn {:
12.710 load_reg( R_ECX, Rn );
12.711 - precheck();
12.712 check_walign32( R_ECX );
12.713 ADD_imm8s_r32( -4, R_ECX );
12.714 store_reg( R_ECX, Rn );
12.715 @@ -2575,7 +2484,6 @@
12.716 :}
12.717 STS.L FPUL, @-Rn {:
12.718 load_reg( R_ECX, Rn );
12.719 - precheck();
12.720 check_walign32( R_ECX );
12.721 ADD_imm8s_r32( -4, R_ECX );
12.722 store_reg( R_ECX, Rn );
12.723 @@ -2589,7 +2497,6 @@
12.724 :}
12.725 STS.L MACH, @-Rn {:
12.726 load_reg( R_ECX, Rn );
12.727 - precheck();
12.728 check_walign32( R_ECX );
12.729 ADD_imm8s_r32( -4, R_ECX );
12.730 store_reg( R_ECX, Rn );
12.731 @@ -2603,7 +2510,6 @@
12.732 :}
12.733 STS.L MACL, @-Rn {:
12.734 load_reg( R_ECX, Rn );
12.735 - precheck();
12.736 check_walign32( R_ECX );
12.737 ADD_imm8s_r32( -4, R_ECX );
12.738 store_reg( R_ECX, Rn );
12.739 @@ -2617,7 +2523,6 @@
12.740 :}
12.741 STS.L PR, @-Rn {:
12.742 load_reg( R_ECX, Rn );
12.743 - precheck();
12.744 check_walign32( R_ECX );
12.745 ADD_imm8s_r32( -4, R_ECX );
12.746 store_reg( R_ECX, Rn );
13.1 --- a/src/sh4/x86op.h Tue Jan 01 04:56:52 2008 +0000
13.2 +++ b/src/sh4/x86op.h Tue Jan 01 04:58:57 2008 +0000
13.3 @@ -233,23 +233,27 @@
13.4 #define JNS_rel8(rel,label) OP(0x79); OP(rel); MARK_JMP(rel,label)
13.5 #define JS_rel8(rel,label) OP(0x78); OP(rel); MARK_JMP(rel,label)
13.6
13.7 +/** JMP relative 8 or 32 depending on size of rel. rel offset
13.8 + * from the start of the instruction (not end)
13.9 + */
13.10 +#define JMP_rel(rel) if((rel)<-126||(rel)>129) { OP(0xE9); OP32((rel)-5); } else { OP(0xEB); OP((rel)-2); }
13.11
13.12 -/* 32-bit long forms w/ backpatching to an exit routine */
13.13 -#define JMP_exit(rel) OP(0xE9); sh4_x86_add_backpatch(xlat_output); OP32(rel)
13.14 -#define JE_exit(rel) OP(0x0F); OP(0x84); sh4_x86_add_backpatch(xlat_output); OP32(rel)
13.15 -#define JA_exit(rel) OP(0x0F); OP(0x87); sh4_x86_add_backpatch(xlat_output); OP32(rel)
13.16 -#define JAE_exit(rel) OP(0x0F); OP(0x83); sh4_x86_add_backpatch(xlat_output); OP32(rel)
13.17 -#define JG_exit(rel) OP(0x0F); OP(0x8F); sh4_x86_add_backpatch(xlat_output); OP32(rel)
13.18 -#define JGE_exit(rel) OP(0x0F); OP(0x8D); sh4_x86_add_backpatch(xlat_output); OP32(rel)
13.19 -#define JC_exit(rel) OP(0x0F); OP(0x82); sh4_x86_add_backpatch(xlat_output); OP32(rel)
13.20 -#define JO_exit(rel) OP(0x0F); OP(0x80); sh4_x86_add_backpatch(xlat_output); OP32(rel)
13.21 -#define JNE_exit(rel) OP(0x0F); OP(0x85); sh4_x86_add_backpatch(xlat_output); OP32(rel)
13.22 -#define JNA_exit(rel) OP(0x0F); OP(0x86); sh4_x86_add_backpatch(xlat_output); OP32(rel)
13.23 -#define JNAE_exit(rel) OP(0x0F);OP(0x82); sh4_x86_add_backpatch(xlat_output); OP32(rel)
13.24 -#define JNG_exit(rel) OP(0x0F); OP(0x8E); sh4_x86_add_backpatch(xlat_output); OP32(rel)
13.25 -#define JNGE_exit(rel) OP(0x0F);OP(0x8C); sh4_x86_add_backpatch(xlat_output); OP32(rel)
13.26 -#define JNC_exit(rel) OP(0x0F); OP(0x83); sh4_x86_add_backpatch(xlat_output); OP32(rel)
13.27 -#define JNO_exit(rel) OP(0x0F); OP(0x81); sh4_x86_add_backpatch(xlat_output); OP32(rel)
13.28 +/* 32-bit long forms w/ backpatching to an exception routine */
13.29 +#define JMP_exc(exc) OP(0xE9); sh4_x86_add_backpatch(xlat_output, pc, exc); OP32(0)
13.30 +#define JE_exc(exc) OP(0x0F); OP(0x84); sh4_x86_add_backpatch(xlat_output, pc, exc); OP32(0)
13.31 +#define JA_exc(exc) OP(0x0F); OP(0x87); sh4_x86_add_backpatch(xlat_output, pc, exc); OP32(0)
13.32 +#define JAE_exc(exc) OP(0x0F); OP(0x83); sh4_x86_add_backpatch(xlat_output, pc, exc); OP32(0)
13.33 +#define JG_exc(exc) OP(0x0F); OP(0x8F); sh4_x86_add_backpatch(xlat_output, pc, exc); OP32(0)
13.34 +#define JGE_exc(exc) OP(0x0F); OP(0x8D); sh4_x86_add_backpatch(xlat_output, pc, exc); OP32(0)
13.35 +#define JC_exc(exc) OP(0x0F); OP(0x82); sh4_x86_add_backpatch(xlat_output, pc, exc); OP32(0)
13.36 +#define JO_exc(exc) OP(0x0F); OP(0x80); sh4_x86_add_backpatch(xlat_output, pc, exc); OP32(0)
13.37 +#define JNE_exc(exc) OP(0x0F); OP(0x85); sh4_x86_add_backpatch(xlat_output, pc, exc); OP32(0)
13.38 +#define JNA_exc(exc) OP(0x0F); OP(0x86); sh4_x86_add_backpatch(xlat_output, pc, exc); OP32(0)
13.39 +#define JNAE_exc(exc) OP(0x0F);OP(0x82); sh4_x86_add_backpatch(xlat_output, pc, exc); OP32(0)
13.40 +#define JNG_exc(exc) OP(0x0F); OP(0x8E); sh4_x86_add_backpatch(xlat_output, pc, exc); OP32(0)
13.41 +#define JNGE_exc(exc) OP(0x0F);OP(0x8C); sh4_x86_add_backpatch(xlat_output, pc, exc); OP32(0)
13.42 +#define JNC_exc(exc) OP(0x0F); OP(0x83); sh4_x86_add_backpatch(xlat_output, pc, exc); OP32(0)
13.43 +#define JNO_exc(exc) OP(0x0F); OP(0x81); sh4_x86_add_backpatch(xlat_output, pc, exc); OP32(0)
13.44
13.45
13.46 /* Conditional moves ebp-rel */
.