Search
lxdream.org :: lxdream :: r570:d2893980fbf5
lxdream 0.9.1
released Jun 29
Download Now
changeset570:d2893980fbf5 lxdream-mmu
parent569:a1c49e1e8776
child571:9bc09948d0f2
authornkeynes
dateSun Jan 06 12:24:18 2008 +0000 (16 years ago)
branchlxdream-mmu
Change to generate different code for mmu on/off cases
src/sh4/ia32abi.h
src/sh4/ia32mac.h
src/sh4/ia64abi.h
src/sh4/mmu.c
src/sh4/sh4core.c
src/sh4/sh4core.h
src/sh4/sh4core.in
src/sh4/sh4mem.c
src/sh4/sh4x86.c
src/sh4/sh4x86.in
1.1 --- a/src/sh4/ia32abi.h Fri Jan 04 11:54:17 2008 +0000
1.2 +++ b/src/sh4/ia32abi.h Sun Jan 06 12:24:18 2008 +0000
1.3 @@ -109,6 +109,7 @@
1.4 sh4_x86.branch_taken = FALSE;
1.5 sh4_x86.backpatch_posn = 0;
1.6 sh4_x86.block_start_pc = pc;
1.7 + sh4_x86.tlb_on = MMIO_READ(MMU,MMUCR)&MMUCR_AT;
1.8 sh4_x86.tstate = TSTATE_NONE;
1.9 #ifdef STACK_ALIGN
1.10 sh4_x86.stack_posn = 8;
1.11 @@ -124,7 +125,11 @@
1.12 load_imm32( R_ECX, ((pc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
1.13 ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) ); // 6
1.14 load_spreg( R_EAX, REG_OFFSET(pc) );
1.15 - call_func1(xlat_get_code_by_vma,R_EAX);
1.16 + if( sh4_x86.tlb_on ) {
1.17 + call_func1(xlat_get_code_by_vma,R_EAX);
1.18 + } else {
1.19 + call_func1(xlat_get_code,R_EAX);
1.20 + }
1.21 POP_r32(R_EBP);
1.22 RET();
1.23 }
1.24 @@ -167,7 +172,11 @@
1.25 call_func0( sh4_raise_exception );
1.26 ADD_imm8s_r32( 4, R_ESP );
1.27 load_spreg( R_EAX, REG_OFFSET(pc) );
1.28 - call_func1(xlat_get_code_by_vma,R_EAX);
1.29 + if( sh4_x86.tlb_on ) {
1.30 + call_func1(xlat_get_code_by_vma,R_EAX);
1.31 + } else {
1.32 + call_func1(xlat_get_code,R_EAX);
1.33 + }
1.34 POP_r32(R_EBP);
1.35 RET();
1.36
1.37 @@ -181,7 +190,11 @@
1.38 MUL_r32( R_EDX );
1.39 ADD_r32_sh4r( R_EAX, REG_OFFSET(slice_cycle) );
1.40 load_spreg( R_EAX, REG_OFFSET(pc) );
1.41 - call_func1(xlat_get_code_by_vma,R_EAX);
1.42 + if( sh4_x86.tlb_on ) {
1.43 + call_func1(xlat_get_code_by_vma,R_EAX);
1.44 + } else {
1.45 + call_func1(xlat_get_code,R_EAX);
1.46 + }
1.47 POP_r32(R_EBP);
1.48 RET();
1.49
2.1 --- a/src/sh4/ia32mac.h Fri Jan 04 11:54:17 2008 +0000
2.2 +++ b/src/sh4/ia32mac.h Sun Jan 06 12:24:18 2008 +0000
2.3 @@ -135,6 +135,7 @@
2.4 sh4_x86.backpatch_posn = 0;
2.5 sh4_x86.block_start_pc = pc;
2.6 sh4_x86.tstate = TSTATE_NONE;
2.7 + sh4_x86.tlb_on = MMIO_READ(MMU,MMUCR)&MMUCR_AT;
2.8 sh4_x86.stack_posn = 8;
2.9 }
2.10
2.11 @@ -147,7 +148,11 @@
2.12 load_imm32( R_ECX, ((pc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
2.13 ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) ); // 6
2.14 load_spreg( R_EAX, REG_OFFSET(pc) );
2.15 - call_func1(xlat_get_code_by_vma,R_EAX);
2.16 + if( sh4_x86.tlb_on ) {
2.17 + call_func1(xlat_get_code_by_vma,R_EAX);
2.18 + } else {
2.19 + call_func1(xlat_get_code,R_EAX);
2.20 + }
2.21 POP_r32(R_EBP);
2.22 RET();
2.23 }
2.24 @@ -190,7 +195,11 @@
2.25 POP_r32(R_EDX);
2.26 call_func1( sh4_raise_exception, R_EDX );
2.27 load_spreg( R_EAX, REG_OFFSET(pc) );
2.28 - call_func1(xlat_get_code_by_vma,R_EAX);
2.29 + if( sh4_x86.tlb_on ) {
2.30 + call_func1(xlat_get_code_by_vma,R_EAX);
2.31 + } else {
2.32 + call_func1(xlat_get_code,R_EAX);
2.33 + }
2.34 POP_r32(R_EBP);
2.35 RET();
2.36
2.37 @@ -204,7 +213,11 @@
2.38 MUL_r32( R_EDX );
2.39 ADD_r32_sh4r( R_EAX, REG_OFFSET(slice_cycle) );
2.40 load_spreg( R_EAX, REG_OFFSET(pc) );
2.41 - call_func1(xlat_get_code_by_vma,R_EAX);
2.42 + if( sh4_x86.tlb_on ) {
2.43 + call_func1(xlat_get_code_by_vma,R_EAX);
2.44 + } else {
2.45 + call_func1(xlat_get_code,R_EAX);
2.46 + }
2.47 POP_r32(R_EBP);
2.48 RET();
2.49
3.1 --- a/src/sh4/ia64abi.h Fri Jan 04 11:54:17 2008 +0000
3.2 +++ b/src/sh4/ia64abi.h Sun Jan 06 12:24:18 2008 +0000
3.3 @@ -102,6 +102,7 @@
3.4 sh4_x86.branch_taken = FALSE;
3.5 sh4_x86.backpatch_posn = 0;
3.6 sh4_x86.block_start_pc = pc;
3.7 + sh4_x86.tlb_on = MMIO_READ(MMU,MMUCR)&MMUCR_AT;
3.8 sh4_x86.tstate = TSTATE_NONE;
3.9 }
3.10
3.11 @@ -114,7 +115,11 @@
3.12 load_imm32( R_ECX, ((pc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
3.13 ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) ); // 6
3.14 load_spreg( R_EAX, REG_OFFSET(pc) );
3.15 - call_func1(xlat_get_code_by_vma,R_EAX);
3.16 + if( sh4_x86.tlb_on ) {
3.17 + call_func1(xlat_get_code_by_vma,R_EAX);
3.18 + } else {
3.19 + call_func1(xlat_get_code,R_EAX);
3.20 + }
3.21 POP_r32(R_EBP);
3.22 RET();
3.23 }
3.24 @@ -157,7 +162,11 @@
3.25
3.26 call_func0( sh4_raise_exception );
3.27 load_spreg( R_EAX, REG_OFFSET(pc) );
3.28 - call_func1(xlat_get_code_by_vma,R_EAX);
3.29 + if( sh4_x86.tlb_on ) {
3.30 + call_func1(xlat_get_code_by_vma,R_EAX);
3.31 + } else {
3.32 + call_func1(xlat_get_code,R_EAX);
3.33 + }
3.34 POP_r32(R_EBP);
3.35 RET();
3.36
3.37 @@ -171,7 +180,11 @@
3.38 MUL_r32( R_EDX );
3.39 ADD_r32_sh4r( R_EAX, REG_OFFSET(slice_cycle) );
3.40 load_spreg( R_EAX, REG_OFFSET(pc) );
3.41 - call_func1(xlat_get_code_by_vma,R_EAX);
3.42 + if( sh4_x86.tlb_on ) {
3.43 + call_func1(xlat_get_code_by_vma,R_EAX);
3.44 + } else {
3.45 + call_func1(xlat_get_code,R_EAX);
3.46 + }
3.47 POP_r32(R_EBP);
3.48 RET();
3.49
4.1 --- a/src/sh4/mmu.c Fri Jan 04 11:54:17 2008 +0000
4.2 +++ b/src/sh4/mmu.c Sun Jan 06 12:24:18 2008 +0000
4.3 @@ -22,6 +22,41 @@
4.4 #include "sh4/sh4core.h"
4.5 #include "mem.h"
4.6
4.7 +#define VMA_TO_EXT_ADDR(vma) ((vma)&0x1FFFFFFF)
4.8 +
4.9 +/* The MMU (practically unique in the system) is allowed to raise exceptions
4.10 + * directly, with a return code indicating that one was raised and the caller
4.11 + * had better behave appropriately.
4.12 + */
4.13 +#define RAISE_TLB_ERROR(code, vpn) \
4.14 + MMIO_WRITE(MMU, TEA, vpn); \
4.15 + MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
4.16 + sh4_raise_tlb_exception(code);
4.17 +
4.18 +#define RAISE_MEM_ERROR(code, vpn) \
4.19 + MMIO_WRITE(MMU, TEA, vpn); \
4.20 + MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
4.21 + sh4_raise_exception(code);
4.22 +
4.23 +#define RAISE_OTHER_ERROR(code) \
4.24 + sh4_raise_exception(code);
4.25 +/**
4.26 + * Abort with a non-MMU address error. Caused by user-mode code attempting
4.27 + * to access privileged regions, or alignment faults.
4.28 + */
4.29 +#define MMU_READ_ADDR_ERROR() RAISE_OTHER_ERROR(EXC_DATA_ADDR_READ)
4.30 +#define MMU_WRITE_ADDR_ERROR() RAISE_OTHER_ERROR(EXC_DATA_ADDR_WRITE)
4.31 +
4.32 +#define MMU_TLB_READ_MISS_ERROR(vpn) RAISE_TLB_ERROR(EXC_TLB_MISS_READ, vpn)
4.33 +#define MMU_TLB_WRITE_MISS_ERROR(vpn) RAISE_TLB_ERROR(EXC_TLB_MISS_WRITE, vpn)
4.34 +#define MMU_TLB_INITIAL_WRITE_ERROR(vpn) RAISE_MEM_ERROR(EXC_INIT_PAGE_WRITE, vpn)
4.35 +#define MMU_TLB_READ_PROT_ERROR(vpn) RAISE_MEM_ERROR(EXC_TLB_PROT_READ, vpn)
4.36 +#define MMU_TLB_WRITE_PROT_ERROR(vpn) RAISE_MEM_ERROR(EXC_TLB_PROT_WRITE, vpn)
4.37 +#define MMU_TLB_MULTI_HIT_ERROR(vpn) sh4_raise_reset(EXC_TLB_MULTI_HIT); \
4.38 + MMIO_WRITE(MMU, TEA, vpn); \
4.39 + MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00)));
4.40 +
4.41 +
4.42 #define OCRAM_START (0x1C000000>>PAGE_BITS)
4.43 #define OCRAM_END (0x20000000>>PAGE_BITS)
4.44
4.45 @@ -99,6 +134,7 @@
4.46
4.47 void mmio_region_MMU_write( uint32_t reg, uint32_t val )
4.48 {
4.49 + uint32_t tmp;
4.50 switch(reg) {
4.51 case PTEH:
4.52 val &= 0xFFFFFCFF;
4.53 @@ -121,6 +157,13 @@
4.54 mmu_urb = (val >> 18) & 0x3F;
4.55 mmu_lrui = (val >> 26) & 0x3F;
4.56 val &= 0x00000301;
4.57 + tmp = MMIO_READ( MMU, MMUCR );
4.58 + if( ((val ^ tmp) & MMUCR_AT) ) {
4.59 + // AT flag has changed state - flush the xlt cache as all bets
4.60 + // are off now. We also need to force an immediate exit from the
4.61 + // current block
4.62 + xlat_flush_cache();
4.63 + }
4.64 break;
4.65 case CCR:
4.66 mmu_set_cache_mode( val & (CCR_OIX|CCR_ORA) );
4.67 @@ -150,6 +193,7 @@
4.68 fwrite( &mmu_urc, sizeof(mmu_urc), 1, f );
4.69 fwrite( &mmu_urb, sizeof(mmu_urb), 1, f );
4.70 fwrite( &mmu_lrui, sizeof(mmu_lrui), 1, f );
4.71 + fwrite( &mmu_asid, sizeof(mmu_asid), 1, f );
4.72 }
4.73
4.74 int MMU_load_state( FILE *f )
4.75 @@ -176,6 +220,9 @@
4.76 if( fread( &mmu_lrui, sizeof(mmu_lrui), 1, f ) != 1 ) {
4.77 return 1;
4.78 }
4.79 + if( fread( &mmu_asid, sizeof(mmu_asid), 1, f ) != 1 ) {
4.80 + return 1;
4.81 + }
4.82 return 0;
4.83 }
4.84
4.85 @@ -214,28 +261,192 @@
4.86 mmu_utlb[mmu_urc].mask = get_mask_for_flags(mmu_utlb[mmu_urc].flags);
4.87 }
4.88
4.89 -static inline void mmu_flush_pages( struct utlb_entry *ent )
4.90 +static void mmu_invalidate_tlb()
4.91 {
4.92 - unsigned int vpn;
4.93 - switch( ent->flags & TLB_SIZE_MASK ) {
4.94 - case TLB_SIZE_1K: xlat_flush_page( ent->vpn ); break;
4.95 - case TLB_SIZE_4K: xlat_flush_page( ent->vpn ); break;
4.96 - case TLB_SIZE_64K:
4.97 - for( vpn = ent->vpn; vpn < ent->vpn + 0x10000; vpn += 0x1000 ) {
4.98 - xlat_flush_page( vpn );
4.99 - }
4.100 - break;
4.101 - case TLB_SIZE_1M:
4.102 - for( vpn = ent->vpn; vpn < ent->vpn + 0x100000; vpn += 0x1000 ) {
4.103 - xlat_flush_page( vpn );
4.104 - }
4.105 - break;
4.106 + int i;
4.107 + for( i=0; i<ITLB_ENTRY_COUNT; i++ ) {
4.108 + mmu_itlb[i].flags &= (~TLB_VALID);
4.109 + }
4.110 + for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
4.111 + mmu_utlb[i].flags &= (~TLB_VALID);
4.112 + }
4.113 +}
4.114 +
4.115 +#define ITLB_ENTRY(addr) ((addr>>7)&0x03)
4.116 +
4.117 +int32_t mmu_itlb_addr_read( sh4addr_t addr )
4.118 +{
4.119 + struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
4.120 + return ent->vpn | ent->asid | (ent->flags & TLB_VALID);
4.121 +}
4.122 +int32_t mmu_itlb_data_read( sh4addr_t addr )
4.123 +{
4.124 + struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
4.125 + return ent->ppn | ent->flags;
4.126 +}
4.127 +
4.128 +void mmu_itlb_addr_write( sh4addr_t addr, uint32_t val )
4.129 +{
4.130 + struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
4.131 + ent->vpn = val & 0xFFFFFC00;
4.132 + ent->asid = val & 0x000000FF;
4.133 + ent->flags = (ent->flags & ~(TLB_VALID)) | (val&TLB_VALID);
4.134 +}
4.135 +
4.136 +void mmu_itlb_data_write( sh4addr_t addr, uint32_t val )
4.137 +{
4.138 + struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
4.139 + ent->ppn = val & 0x1FFFFC00;
4.140 + ent->flags = val & 0x00001DA;
4.141 + ent->mask = get_mask_for_flags(val);
4.142 +}
4.143 +
4.144 +#define UTLB_ENTRY(addr) ((addr>>8)&0x3F)
4.145 +#define UTLB_ASSOC(addr) (addr&0x80)
4.146 +#define UTLB_DATA2(addr) (addr&0x00800000)
4.147 +
4.148 +int32_t mmu_utlb_addr_read( sh4addr_t addr )
4.149 +{
4.150 + struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
4.151 + return ent->vpn | ent->asid | (ent->flags & TLB_VALID) |
4.152 + ((ent->flags & TLB_DIRTY)<<7);
4.153 +}
4.154 +int32_t mmu_utlb_data_read( sh4addr_t addr )
4.155 +{
4.156 + struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
4.157 + if( UTLB_DATA2(addr) ) {
4.158 + return ent->pcmcia;
4.159 + } else {
4.160 + return ent->ppn | ent->flags;
4.161 }
4.162 }
4.163
4.164 /**
4.165 + * Find a UTLB entry for the associative TLB write - same as the normal
4.166 + * lookup but ignores the valid bit.
4.167 + */
4.168 +static inline mmu_utlb_lookup_assoc( uint32_t vpn, uint32_t asid )
4.169 +{
4.170 + int result = -1;
4.171 + unsigned int i;
4.172 + for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
4.173 + if( ((mmu_utlb[i].flags & TLB_SHARE) || asid == mmu_utlb[i].asid) &&
4.174 + ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
4.175 + if( result != -1 ) {
4.176 + return -2;
4.177 + }
4.178 + result = i;
4.179 + }
4.180 + }
4.181 + return result;
4.182 +}
4.183 +
4.184 +/**
4.185 + * Find a ITLB entry for the associative TLB write - same as the normal
4.186 + * lookup but ignores the valid bit.
4.187 + */
4.188 +static inline mmu_itlb_lookup_assoc( uint32_t vpn, uint32_t asid )
4.189 +{
4.190 + int result = -1;
4.191 + unsigned int i;
4.192 + for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
4.193 + if( ((mmu_itlb[i].flags & TLB_SHARE) || asid == mmu_itlb[i].asid) &&
4.194 + ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
4.195 + if( result != -1 ) {
4.196 + return -2;
4.197 + }
4.198 + result = i;
4.199 + }
4.200 + }
4.201 + return result;
4.202 +}
4.203 +
4.204 +void mmu_utlb_addr_write( sh4addr_t addr, uint32_t val )
4.205 +{
4.206 + if( UTLB_ASSOC(addr) ) {
4.207 + uint32_t asid = MMIO_READ( MMU, PTEH ) & 0xFF;
4.208 + int utlb = mmu_utlb_lookup_assoc( val, asid );
4.209 + if( utlb >= 0 ) {
4.210 + struct utlb_entry *ent = &mmu_utlb[utlb];
4.211 + ent->flags = ent->flags & ~(TLB_DIRTY|TLB_VALID);
4.212 + ent->flags |= (val & TLB_VALID);
4.213 + ent->flags |= ((val & 0x200)>>7);
4.214 + }
4.215 +
4.216 + int itlb = mmu_itlb_lookup_assoc( val, asid );
4.217 + if( itlb >= 0 ) {
4.218 + struct itlb_entry *ent = &mmu_itlb[itlb];
4.219 + ent->flags = (ent->flags & (~TLB_VALID)) | (val & TLB_VALID);
4.220 + }
4.221 +
4.222 + if( itlb == -2 || utlb == -2 ) {
4.223 + MMU_TLB_MULTI_HIT_ERROR(addr);
4.224 + return;
4.225 + }
4.226 + } else {
4.227 + struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
4.228 + ent->vpn = (val & 0xFFFFFC00);
4.229 + ent->asid = (val & 0xFF);
4.230 + ent->flags = (ent->flags & ~(TLB_DIRTY|TLB_VALID));
4.231 + ent->flags |= (val & TLB_VALID);
4.232 + ent->flags |= ((val & 0x200)>>7);
4.233 + }
4.234 +}
4.235 +
4.236 +void mmu_utlb_data_write( sh4addr_t addr, uint32_t val )
4.237 +{
4.238 + struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
4.239 + if( UTLB_DATA2(addr) ) {
4.240 + ent->pcmcia = val & 0x0000000F;
4.241 + } else {
4.242 + ent->ppn = (val & 0x1FFFFC00);
4.243 + ent->flags = (val & 0x000001FF);
4.244 + ent->mask = get_mask_for_flags(val);
4.245 + }
4.246 +}
4.247 +
4.248 +/* Cache access - not implemented */
4.249 +
4.250 +int32_t mmu_icache_addr_read( sh4addr_t addr )
4.251 +{
4.252 + return 0; // not implemented
4.253 +}
4.254 +int32_t mmu_icache_data_read( sh4addr_t addr )
4.255 +{
4.256 + return 0; // not implemented
4.257 +}
4.258 +int32_t mmu_ocache_addr_read( sh4addr_t addr )
4.259 +{
4.260 + return 0; // not implemented
4.261 +}
4.262 +int32_t mmu_ocache_data_read( sh4addr_t addr )
4.263 +{
4.264 + return 0; // not implemented
4.265 +}
4.266 +
4.267 +void mmu_icache_addr_write( sh4addr_t addr, uint32_t val )
4.268 +{
4.269 +}
4.270 +
4.271 +void mmu_icache_data_write( sh4addr_t addr, uint32_t val )
4.272 +{
4.273 +}
4.274 +
4.275 +void mmu_ocache_addr_write( sh4addr_t addr, uint32_t val )
4.276 +{
4.277 +}
4.278 +
4.279 +void mmu_ocache_data_write( sh4addr_t addr, uint32_t val )
4.280 +{
4.281 +}
4.282 +
4.283 +/******************************************************************************/
4.284 +/* MMU TLB address translation */
4.285 +/******************************************************************************/
4.286 +
4.287 +/**
4.288 * The translations are excessively complicated, but unfortunately it's a
4.289 - * complicated system. It can undoubtedly be better optimized too.
4.290 + * complicated system. TODO: make this not be painfully slow.
4.291 */
4.292
4.293 /**
4.294 @@ -303,26 +514,6 @@
4.295 }
4.296
4.297 /**
4.298 - * Find a UTLB entry for the associative TLB write - same as the normal
4.299 - * lookup but ignores the valid bit.
4.300 - */
4.301 -static inline mmu_utlb_lookup_assoc( uint32_t vpn, uint32_t asid )
4.302 -{
4.303 - int result = -1;
4.304 - unsigned int i;
4.305 - for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
4.306 - if( ((mmu_utlb[i].flags & TLB_SHARE) || asid == mmu_utlb[i].asid) &&
4.307 - ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
4.308 - if( result != -1 ) {
4.309 - return -2;
4.310 - }
4.311 - result = i;
4.312 - }
4.313 - }
4.314 - return result;
4.315 -}
4.316 -
4.317 -/**
4.318 * Update the ITLB by replacing the LRU entry with the specified UTLB entry.
4.319 * @return the number (0-3) of the replaced entry.
4.320 */
4.321 @@ -439,77 +630,85 @@
4.322 return result;
4.323 }
4.324
4.325 -/**
4.326 - * Find a ITLB entry for the associative TLB write - same as the normal
4.327 - * lookup but ignores the valid bit.
4.328 - */
4.329 -static inline mmu_itlb_lookup_assoc( uint32_t vpn, uint32_t asid )
4.330 -{
4.331 - int result = -1;
4.332 - unsigned int i;
4.333 - for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
4.334 - if( ((mmu_itlb[i].flags & TLB_SHARE) || asid == mmu_itlb[i].asid) &&
4.335 - ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
4.336 - if( result != -1 ) {
4.337 - return -2;
4.338 - }
4.339 - result = i;
4.340 - }
4.341 - }
4.342 - return result;
4.343 -}
4.344 -
4.345 -#define RAISE_TLB_ERROR(code, vpn) \
4.346 - MMIO_WRITE(MMU, TEA, vpn); \
4.347 - MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
4.348 - sh4_raise_tlb_exception(code);
4.349 -
4.350 -#define RAISE_MEM_ERROR(code, vpn) \
4.351 - MMIO_WRITE(MMU, TEA, vpn); \
4.352 - MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
4.353 - sh4_raise_exception(code);
4.354 -
4.355 -#define RAISE_OTHER_ERROR(code) \
4.356 - sh4_raise_exception(code);
4.357 -
4.358 -/**
4.359 - * Abort with a non-MMU address error. Caused by user-mode code attempting
4.360 - * to access privileged regions, or alignment faults.
4.361 - */
4.362 -#define MMU_READ_ADDR_ERROR() RAISE_OTHER_ERROR(EXC_DATA_ADDR_READ)
4.363 -#define MMU_WRITE_ADDR_ERROR() RAISE_OTHER_ERROR(EXC_DATA_ADDR_WRITE)
4.364 -
4.365 -#define MMU_TLB_READ_MISS_ERROR(vpn) RAISE_TLB_ERROR(EXC_TLB_MISS_READ, vpn)
4.366 -#define MMU_TLB_WRITE_MISS_ERROR(vpn) RAISE_TLB_ERROR(EXC_TLB_MISS_WRITE, vpn)
4.367 -#define MMU_TLB_INITIAL_WRITE_ERROR(vpn) RAISE_MEM_ERROR(EXC_INIT_PAGE_WRITE, vpn)
4.368 -#define MMU_TLB_READ_PROT_ERROR(vpn) RAISE_MEM_ERROR(EXC_TLB_PROT_READ, vpn)
4.369 -#define MMU_TLB_WRITE_PROT_ERROR(vpn) RAISE_MEM_ERROR(EXC_TLB_PROT_WRITE, vpn)
4.370 -#define MMU_TLB_MULTI_HIT_ERROR(vpn) sh4_raise_reset(EXC_TLB_MULTI_HIT); \
4.371 - MMIO_WRITE(MMU, TEA, vpn); \
4.372 - MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00)));
4.373 -
4.374 -uint64_t mmu_vma_to_phys_write( sh4addr_t addr )
4.375 +sh4addr_t mmu_vma_to_phys_read( sh4vma_t addr )
4.376 {
4.377 uint32_t mmucr = MMIO_READ(MMU,MMUCR);
4.378 if( addr & 0x80000000 ) {
4.379 if( IS_SH4_PRIVMODE() ) {
4.380 - if( addr < 0xC0000000 || addr >= 0xE0000000 ) {
4.381 - /* P1, P2 and P4 regions are pass-through (no translation) */
4.382 - return (uint64_t)addr;
4.383 + if( addr >= 0xE0000000 ) {
4.384 + return addr; /* P4 - passthrough */
4.385 + } else if( addr < 0xC0000000 ) {
4.386 + /* P1, P2 regions are pass-through (no translation) */
4.387 + return VMA_TO_EXT_ADDR(addr);
4.388 }
4.389 } else {
4.390 if( addr >= 0xE0000000 && addr < 0xE4000000 &&
4.391 ((mmucr&MMUCR_SQMD) == 0) ) {
4.392 /* Conditional user-mode access to the store-queue (no translation) */
4.393 - return (uint64_t)addr;
4.394 + return addr;
4.395 }
4.396 - MMU_WRITE_ADDR_ERROR();
4.397 - return 0x100000000LL;
4.398 + MMU_READ_ADDR_ERROR();
4.399 + return MMU_VMA_ERROR;
4.400 }
4.401 }
4.402
4.403 if( (mmucr & MMUCR_AT) == 0 ) {
4.404 - return (uint64_t)addr;
4.405 + return VMA_TO_EXT_ADDR(addr);
4.406 + }
4.407 +
4.408 + /* If we get this far, translation is required */
4.409 + int entryNo;
4.410 + if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
4.411 + entryNo = mmu_utlb_lookup_vpn_asid( addr );
4.412 + } else {
4.413 + entryNo = mmu_utlb_lookup_vpn( addr );
4.414 + }
4.415 +
4.416 + switch(entryNo) {
4.417 + case -1:
4.418 + MMU_TLB_READ_MISS_ERROR(addr);
4.419 + return MMU_VMA_ERROR;
4.420 + case -2:
4.421 + MMU_TLB_MULTI_HIT_ERROR(addr);
4.422 + return MMU_VMA_ERROR;
4.423 + default:
4.424 + if( (mmu_utlb[entryNo].flags & TLB_USERMODE) == 0 &&
4.425 + !IS_SH4_PRIVMODE() ) {
4.426 + /* protection violation */
4.427 + MMU_TLB_READ_PROT_ERROR(addr);
4.428 + return MMU_VMA_ERROR;
4.429 + }
4.430 +
4.431 + /* finally generate the target address */
4.432 + return (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) |
4.433 + (addr & (~mmu_utlb[entryNo].mask));
4.434 + }
4.435 +}
4.436 +
4.437 +sh4addr_t mmu_vma_to_phys_write( sh4vma_t addr )
4.438 +{
4.439 + uint32_t mmucr = MMIO_READ(MMU,MMUCR);
4.440 + if( addr & 0x80000000 ) {
4.441 + if( IS_SH4_PRIVMODE() ) {
4.442 + if( addr >= 0xE0000000 ) {
4.443 + return addr; /* P4 - passthrough */
4.444 + } else if( addr < 0xC0000000 ) {
4.445 + /* P1, P2 regions are pass-through (no translation) */
4.446 + return VMA_TO_EXT_ADDR(addr);
4.447 + }
4.448 + } else {
4.449 + if( addr >= 0xE0000000 && addr < 0xE4000000 &&
4.450 + ((mmucr&MMUCR_SQMD) == 0) ) {
4.451 + /* Conditional user-mode access to the store-queue (no translation) */
4.452 + return addr;
4.453 + }
4.454 + MMU_WRITE_ADDR_ERROR();
4.455 + return MMU_VMA_ERROR;
4.456 + }
4.457 + }
4.458 +
4.459 + if( (mmucr & MMUCR_AT) == 0 ) {
4.460 + return VMA_TO_EXT_ADDR(addr);
4.461 }
4.462
4.463 /* If we get this far, translation is required */
4.464 @@ -523,213 +722,27 @@
4.465 switch(entryNo) {
4.466 case -1:
4.467 MMU_TLB_WRITE_MISS_ERROR(addr);
4.468 - return 0x100000000LL;
4.469 + return MMU_VMA_ERROR;
4.470 case -2:
4.471 MMU_TLB_MULTI_HIT_ERROR(addr);
4.472 - return 0x100000000LL;
4.473 + return MMU_VMA_ERROR;
4.474 default:
4.475 if( IS_SH4_PRIVMODE() ? ((mmu_utlb[entryNo].flags & TLB_WRITABLE) == 0)
4.476 : ((mmu_utlb[entryNo].flags & TLB_USERWRITABLE) != TLB_USERWRITABLE) ) {
4.477 /* protection violation */
4.478 MMU_TLB_WRITE_PROT_ERROR(addr);
4.479 - return 0x100000000LL;
4.480 + return MMU_VMA_ERROR;
4.481 }
4.482
4.483 if( (mmu_utlb[entryNo].flags & TLB_DIRTY) == 0 ) {
4.484 MMU_TLB_INITIAL_WRITE_ERROR(addr);
4.485 - return 0x100000000LL;
4.486 + return MMU_VMA_ERROR;
4.487 }
4.488
4.489 /* finally generate the target address */
4.490 return (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) |
4.491 (addr & (~mmu_utlb[entryNo].mask));
4.492 }
4.493 - return -1;
4.494 -
4.495 -}
4.496 -
4.497 -uint64_t mmu_vma_to_phys_read( sh4addr_t addr )
4.498 -{
4.499 - uint32_t mmucr = MMIO_READ(MMU,MMUCR);
4.500 - if( addr & 0x80000000 ) {
4.501 - if( IS_SH4_PRIVMODE() ) {
4.502 - if( addr < 0xC0000000 || addr >= 0xE0000000 ) {
4.503 - /* P1, P2 and P4 regions are pass-through (no translation) */
4.504 - return (uint64_t)addr;
4.505 - }
4.506 - } else {
4.507 - if( addr >= 0xE0000000 && addr < 0xE4000000 &&
4.508 - ((mmucr&MMUCR_SQMD) == 0) ) {
4.509 - /* Conditional user-mode access to the store-queue (no translation) */
4.510 - return (uint64_t)addr;
4.511 - }
4.512 - MMU_READ_ADDR_ERROR();
4.513 - return 0x100000000LL;
4.514 - }
4.515 - }
4.516 -
4.517 - if( (mmucr & MMUCR_AT) == 0 ) {
4.518 - return (uint64_t)addr;
4.519 - }
4.520 -
4.521 - /* If we get this far, translation is required */
4.522 - int entryNo;
4.523 - if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
4.524 - entryNo = mmu_utlb_lookup_vpn_asid( addr );
4.525 - } else {
4.526 - entryNo = mmu_utlb_lookup_vpn( addr );
4.527 - }
4.528 -
4.529 - switch(entryNo) {
4.530 - case -1:
4.531 - MMU_TLB_READ_MISS_ERROR(addr);
4.532 - return 0x100000000LL;
4.533 - case -2:
4.534 - MMU_TLB_MULTI_HIT_ERROR(addr);
4.535 - return 0x100000000LL;
4.536 - default:
4.537 - if( (mmu_utlb[entryNo].flags & TLB_USERMODE) == 0 &&
4.538 - !IS_SH4_PRIVMODE() ) {
4.539 - /* protection violation */
4.540 - MMU_TLB_READ_PROT_ERROR(addr);
4.541 - return 0x100000000LL;
4.542 - }
4.543 -
4.544 - /* finally generate the target address */
4.545 - return (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) |
4.546 - (addr & (~mmu_utlb[entryNo].mask));
4.547 - }
4.548 - return -1;
4.549 -}
4.550 -
4.551 -static void mmu_invalidate_tlb()
4.552 -{
4.553 - int i;
4.554 - for( i=0; i<ITLB_ENTRY_COUNT; i++ ) {
4.555 - mmu_itlb[i].flags &= (~TLB_VALID);
4.556 - }
4.557 - for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
4.558 - mmu_utlb[i].flags &= (~TLB_VALID);
4.559 - }
4.560 -}
4.561 -
4.562 -#define ITLB_ENTRY(addr) ((addr>>7)&0x03)
4.563 -
4.564 -int32_t mmu_itlb_addr_read( sh4addr_t addr )
4.565 -{
4.566 - struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
4.567 - return ent->vpn | ent->asid | (ent->flags & TLB_VALID);
4.568 -}
4.569 -int32_t mmu_itlb_data_read( sh4addr_t addr )
4.570 -{
4.571 - struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
4.572 - return ent->ppn | ent->flags;
4.573 -}
4.574 -
4.575 -void mmu_itlb_addr_write( sh4addr_t addr, uint32_t val )
4.576 -{
4.577 - struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
4.578 - ent->vpn = val & 0xFFFFFC00;
4.579 - ent->asid = val & 0x000000FF;
4.580 - ent->flags = (ent->flags & ~(TLB_VALID)) | (val&TLB_VALID);
4.581 -}
4.582 -
4.583 -void mmu_itlb_data_write( sh4addr_t addr, uint32_t val )
4.584 -{
4.585 - struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
4.586 - ent->ppn = val & 0x1FFFFC00;
4.587 - ent->flags = val & 0x00001DA;
4.588 - ent->mask = get_mask_for_flags(val);
4.589 -}
4.590 -
4.591 -#define UTLB_ENTRY(addr) ((addr>>8)&0x3F)
4.592 -#define UTLB_ASSOC(addr) (addr&0x80)
4.593 -#define UTLB_DATA2(addr) (addr&0x00800000)
4.594 -
4.595 -int32_t mmu_utlb_addr_read( sh4addr_t addr )
4.596 -{
4.597 - struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
4.598 - return ent->vpn | ent->asid | (ent->flags & TLB_VALID) |
4.599 - ((ent->flags & TLB_DIRTY)<<7);
4.600 -}
4.601 -int32_t mmu_utlb_data_read( sh4addr_t addr )
4.602 -{
4.603 - struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
4.604 - if( UTLB_DATA2(addr) ) {
4.605 - return ent->pcmcia;
4.606 - } else {
4.607 - return ent->ppn | ent->flags;
4.608 - }
4.609 -}
4.610 -
4.611 -void mmu_utlb_addr_write( sh4addr_t addr, uint32_t val )
4.612 -{
4.613 - if( UTLB_ASSOC(addr) ) {
4.614 - uint32_t asid = MMIO_READ( MMU, PTEH ) & 0xFF;
4.615 - int entryNo = mmu_utlb_lookup_assoc( val, asid );
4.616 - if( entryNo >= 0 ) {
4.617 - struct utlb_entry *ent = &mmu_utlb[entryNo];
4.618 - ent->flags = ent->flags & ~(TLB_DIRTY|TLB_VALID);
4.619 - ent->flags |= (val & TLB_VALID);
4.620 - ent->flags |= ((val & 0x200)>>7);
4.621 - } else if( entryNo == -2 ) {
4.622 - MMU_TLB_MULTI_HIT_ERROR(addr);
4.623 - }
4.624 - } else {
4.625 - struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
4.626 - ent->vpn = (val & 0xFFFFFC00);
4.627 - ent->asid = (val & 0xFF);
4.628 - ent->flags = (ent->flags & ~(TLB_DIRTY|TLB_VALID));
4.629 - ent->flags |= (val & TLB_VALID);
4.630 - ent->flags |= ((val & 0x200)>>7);
4.631 - }
4.632 -}
4.633 -
4.634 -void mmu_utlb_data_write( sh4addr_t addr, uint32_t val )
4.635 -{
4.636 - struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
4.637 - if( UTLB_DATA2(addr) ) {
4.638 - ent->pcmcia = val & 0x0000000F;
4.639 - } else {
4.640 - ent->ppn = (val & 0x1FFFFC00);
4.641 - ent->flags = (val & 0x000001FF);
4.642 - ent->mask = get_mask_for_flags(val);
4.643 - }
4.644 -}
4.645 -
4.646 -/* Cache access - not implemented */
4.647 -
4.648 -int32_t mmu_icache_addr_read( sh4addr_t addr )
4.649 -{
4.650 - return 0; // not implemented
4.651 -}
4.652 -int32_t mmu_icache_data_read( sh4addr_t addr )
4.653 -{
4.654 - return 0; // not implemented
4.655 -}
4.656 -int32_t mmu_ocache_addr_read( sh4addr_t addr )
4.657 -{
4.658 - return 0; // not implemented
4.659 -}
4.660 -int32_t mmu_ocache_data_read( sh4addr_t addr )
4.661 -{
4.662 - return 0; // not implemented
4.663 -}
4.664 -
4.665 -void mmu_icache_addr_write( sh4addr_t addr, uint32_t val )
4.666 -{
4.667 -}
4.668 -
4.669 -void mmu_icache_data_write( sh4addr_t addr, uint32_t val )
4.670 -{
4.671 -}
4.672 -
4.673 -void mmu_ocache_addr_write( sh4addr_t addr, uint32_t val )
4.674 -{
4.675 -}
4.676 -
4.677 -void mmu_ocache_data_write( sh4addr_t addr, uint32_t val )
4.678 -{
4.679 }
4.680
4.681 /**
4.682 @@ -743,7 +756,7 @@
4.683 sh4_icache.page_ppa = 0x0C000000;
4.684 sh4_icache.mask = 0xFF000000;
4.685 sh4_icache.page = sh4_main_ram;
4.686 - } else if( (addr & 0x1FE00000 == 0 ) ) {
4.687 + } else if( (addr & 0x1FE00000) == 0 ) {
4.688 /* BIOS ROM */
4.689 sh4_icache.page_vma = addr & 0xFFE00000;
4.690 sh4_icache.page_ppa = 0;
4.691 @@ -780,9 +793,6 @@
4.692 MMU_READ_ADDR_ERROR();
4.693 return FALSE;
4.694 }
4.695 - } else {
4.696 - MMU_READ_ADDR_ERROR();
4.697 - return FALSE;
4.698 }
4.699
4.700 uint32_t mmucr = MMIO_READ(MMU,MMUCR);
5.1 --- a/src/sh4/sh4core.c Fri Jan 04 11:54:17 2008 +0000
5.2 +++ b/src/sh4/sh4core.c Sun Jan 06 12:24:18 2008 +0000
5.3 @@ -161,12 +161,12 @@
5.4 #define TRACE_RETURN( source, dest )
5.5 #endif
5.6
5.7 -#define MEM_READ_BYTE( addr, val ) memtmp = sh4_read_byte(addr); if( memtmp >> 32 ) { return TRUE; } else { val = ((uint32_t)memtmp); }
5.8 -#define MEM_READ_WORD( addr, val ) memtmp = sh4_read_word(addr); if( memtmp >> 32 ) { return TRUE; } else { val = ((uint32_t)memtmp); }
5.9 -#define MEM_READ_LONG( addr, val ) memtmp = sh4_read_long(addr); if( memtmp >> 32 ) { return TRUE; } else { val = ((uint32_t)memtmp); }
5.10 -#define MEM_WRITE_BYTE( addr, val ) if( sh4_write_byte(addr, val) ) { return TRUE; }
5.11 -#define MEM_WRITE_WORD( addr, val ) if( sh4_write_word(addr, val) ) { return TRUE; }
5.12 -#define MEM_WRITE_LONG( addr, val ) if( sh4_write_long(addr, val) ) { return TRUE; }
5.13 +#define MEM_READ_BYTE( addr, val ) memtmp = mmu_vma_to_phys_read(addr); if( memtmp == MMU_VMA_ERROR ) { return TRUE; } else { val = sh4_read_byte(memtmp); }
5.14 +#define MEM_READ_WORD( addr, val ) memtmp = mmu_vma_to_phys_read(addr); if( memtmp == MMU_VMA_ERROR ) { return TRUE; } else { val = sh4_read_word(memtmp); }
5.15 +#define MEM_READ_LONG( addr, val ) memtmp = mmu_vma_to_phys_read(addr); if( memtmp == MMU_VMA_ERROR ) { return TRUE; } else { val = sh4_read_long(memtmp); }
5.16 +#define MEM_WRITE_BYTE( addr, val ) memtmp = mmu_vma_to_phys_write(addr); if( memtmp == MMU_VMA_ERROR ) { return TRUE; } else { sh4_write_byte(memtmp, val); }
5.17 +#define MEM_WRITE_WORD( addr, val ) memtmp = mmu_vma_to_phys_write(addr); if( memtmp == MMU_VMA_ERROR ) { return TRUE; } else { sh4_write_word(memtmp, val); }
5.18 +#define MEM_WRITE_LONG( addr, val ) memtmp = mmu_vma_to_phys_write(addr); if( memtmp == MMU_VMA_ERROR ) { return TRUE; } else { sh4_write_long(memtmp, val); }
5.19
5.20 #define FP_WIDTH (IS_FPU_DOUBLESIZE() ? 8 : 4)
5.21
6.1 --- a/src/sh4/sh4core.h Fri Jan 04 11:54:17 2008 +0000
6.2 +++ b/src/sh4/sh4core.h Sun Jan 06 12:24:18 2008 +0000
6.3 @@ -105,22 +105,37 @@
6.4 void signsat48(void);
6.5
6.6 /* SH4 Memory */
6.7 +#define MMU_VMA_ERROR 0x8000000
6.8 +/**
6.9 + * Update the sh4_icache structure to contain the specified vma. If the vma
6.10 + * cannot be resolved, an MMU exception is raised and the function returns
6.11 + * FALSE. Otherwise, returns TRUE and updates sh4_icache accordingly.
6.12 + * Note: If the vma resolves to a non-memory area, sh4_icache will be
6.13 + * invalidated, but the function will still return TRUE.
6.14 + * @return FALSE if an MMU exception was raised, otherwise TRUE.
6.15 + */
6.16 gboolean mmu_update_icache( sh4vma_t addr );
6.17 -uint64_t mmu_vma_to_phys_read( sh4vma_t addr );
6.18 -uint64_t mmu_vma_to_phys_write( sh4vma_t addr );
6.19 -uint64_t mmu_vma_to_phys_exec( sh4vma_t addr );
6.20 +
6.21 +/**
6.22 + * Resolve a virtual address through the TLB for a read operation, returning
6.23 + * the resultant P4 or external address. If the resolution fails, the
6.24 + * appropriate MMU exception is raised and the value MMU_VMA_ERROR is returned.
6.25 + * @return An external address (0x00000000-0x1FFFFFFF), a P4 address
6.26 + * (0xE0000000 - 0xFFFFFFFF), or MMU_VMA_ERROR.
6.27 + */
6.28 +sh4addr_t mmu_vma_to_phys_read( sh4vma_t addr );
6.29 +sh4addr_t mmu_vma_to_phys_write( sh4vma_t addr );
6.30
6.31 int64_t sh4_read_quad( sh4addr_t addr );
6.32 -int64_t sh4_read_long( sh4addr_t addr );
6.33 -int64_t sh4_read_word( sh4addr_t addr );
6.34 -int64_t sh4_read_byte( sh4addr_t addr );
6.35 +int32_t sh4_read_long( sh4addr_t addr );
6.36 +int32_t sh4_read_word( sh4addr_t addr );
6.37 +int32_t sh4_read_byte( sh4addr_t addr );
6.38 void sh4_write_quad( sh4addr_t addr, uint64_t val );
6.39 -int32_t sh4_write_long( sh4addr_t addr, uint32_t val );
6.40 -int32_t sh4_write_word( sh4addr_t addr, uint32_t val );
6.41 -int32_t sh4_write_byte( sh4addr_t addr, uint32_t val );
6.42 +void sh4_write_long( sh4addr_t addr, uint32_t val );
6.43 +void sh4_write_word( sh4addr_t addr, uint32_t val );
6.44 +void sh4_write_byte( sh4addr_t addr, uint32_t val );
6.45 int32_t sh4_read_phys_word( sh4addr_t addr );
6.46 void sh4_flush_store_queue( sh4addr_t addr );
6.47 -sh4ptr_t sh4_get_region_by_vma( sh4addr_t addr );
6.48
6.49 /* SH4 Exceptions */
6.50 #define EXC_POWER_RESET 0x000 /* reset vector */
7.1 --- a/src/sh4/sh4core.in Fri Jan 04 11:54:17 2008 +0000
7.2 +++ b/src/sh4/sh4core.in Sun Jan 06 12:24:18 2008 +0000
7.3 @@ -161,12 +161,12 @@
7.4 #define TRACE_RETURN( source, dest )
7.5 #endif
7.6
7.7 -#define MEM_READ_BYTE( addr, val ) memtmp = sh4_read_byte(addr); if( memtmp >> 32 ) { return TRUE; } else { val = ((uint32_t)memtmp); }
7.8 -#define MEM_READ_WORD( addr, val ) memtmp = sh4_read_word(addr); if( memtmp >> 32 ) { return TRUE; } else { val = ((uint32_t)memtmp); }
7.9 -#define MEM_READ_LONG( addr, val ) memtmp = sh4_read_long(addr); if( memtmp >> 32 ) { return TRUE; } else { val = ((uint32_t)memtmp); }
7.10 -#define MEM_WRITE_BYTE( addr, val ) if( sh4_write_byte(addr, val) ) { return TRUE; }
7.11 -#define MEM_WRITE_WORD( addr, val ) if( sh4_write_word(addr, val) ) { return TRUE; }
7.12 -#define MEM_WRITE_LONG( addr, val ) if( sh4_write_long(addr, val) ) { return TRUE; }
7.13 +#define MEM_READ_BYTE( addr, val ) memtmp = mmu_vma_to_phys_read(addr); if( memtmp == MMU_VMA_ERROR ) { return TRUE; } else { val = sh4_read_byte(memtmp); }
7.14 +#define MEM_READ_WORD( addr, val ) memtmp = mmu_vma_to_phys_read(addr); if( memtmp == MMU_VMA_ERROR ) { return TRUE; } else { val = sh4_read_word(memtmp); }
7.15 +#define MEM_READ_LONG( addr, val ) memtmp = mmu_vma_to_phys_read(addr); if( memtmp == MMU_VMA_ERROR ) { return TRUE; } else { val = sh4_read_long(memtmp); }
7.16 +#define MEM_WRITE_BYTE( addr, val ) memtmp = mmu_vma_to_phys_write(addr); if( memtmp == MMU_VMA_ERROR ) { return TRUE; } else { sh4_write_byte(memtmp, val); }
7.17 +#define MEM_WRITE_WORD( addr, val ) memtmp = mmu_vma_to_phys_write(addr); if( memtmp == MMU_VMA_ERROR ) { return TRUE; } else { sh4_write_word(memtmp, val); }
7.18 +#define MEM_WRITE_LONG( addr, val ) memtmp = mmu_vma_to_phys_write(addr); if( memtmp == MMU_VMA_ERROR ) { return TRUE; } else { sh4_write_long(memtmp, val); }
7.19
7.20 #define FP_WIDTH (IS_FPU_DOUBLESIZE() ? 8 : 4)
7.21
8.1 --- a/src/sh4/sh4mem.c Fri Jan 04 11:54:17 2008 +0000
8.2 +++ b/src/sh4/sh4mem.c Sun Jan 06 12:24:18 2008 +0000
8.3 @@ -154,18 +154,12 @@
8.4 (((int64_t)((uint32_t)sh4_read_long(addr+4))) << 32);
8.5 }
8.6
8.7 -int64_t sh4_read_long( sh4addr_t vma )
8.8 +int32_t sh4_read_long( sh4addr_t addr )
8.9 {
8.10 sh4ptr_t page;
8.11
8.12 CHECK_READ_WATCH(addr,4);
8.13
8.14 - uint64_t ppa = mmu_vma_to_phys_read(vma);
8.15 - if( ppa>>32 ) {
8.16 - return ppa;
8.17 - }
8.18 - sh4addr_t addr = (sh4addr_t)ppa;
8.19 -
8.20 if( addr >= 0xE0000000 ) { /* P4 Area, handled specially */
8.21 return ZEROEXT32(sh4_read_p4( addr ));
8.22 } else if( (addr&0x1C000000) == 0x0C000000 ) {
8.23 @@ -192,18 +186,12 @@
8.24 }
8.25 }
8.26
8.27 -int64_t sh4_read_word( sh4addr_t vma )
8.28 +int32_t sh4_read_word( sh4addr_t addr )
8.29 {
8.30 sh4ptr_t page;
8.31
8.32 CHECK_READ_WATCH(addr,2);
8.33
8.34 - uint64_t ppa = mmu_vma_to_phys_read(vma);
8.35 - if( ppa>>32 ) {
8.36 - return ppa;
8.37 - }
8.38 - sh4addr_t addr = (sh4addr_t)ppa;
8.39 -
8.40 if( addr >= 0xE0000000 ) { /* P4 Area, handled specially */
8.41 return ZEROEXT32(SIGNEXT16(sh4_read_p4( addr )));
8.42 } else if( (addr&0x1C000000) == 0x0C000000 ) {
8.43 @@ -230,18 +218,12 @@
8.44 }
8.45 }
8.46
8.47 -int64_t sh4_read_byte( sh4addr_t vma )
8.48 +int32_t sh4_read_byte( sh4addr_t addr )
8.49 {
8.50 sh4ptr_t page;
8.51
8.52 CHECK_READ_WATCH(addr,1);
8.53
8.54 - uint64_t ppa = mmu_vma_to_phys_read(vma);
8.55 - if( ppa>>32 ) {
8.56 - return ppa;
8.57 - }
8.58 - sh4addr_t addr = (sh4addr_t)ppa;
8.59 -
8.60 if( addr >= 0xE0000000 ) { /* P4 Area, handled specially */
8.61 return ZEROEXT32(SIGNEXT8(sh4_read_p4( addr )));
8.62 } else if( (addr&0x1C000000) == 0x0C000000 ) {
8.63 @@ -278,25 +260,19 @@
8.64 sh4_write_long( addr+4, (uint32_t)(val>>32) );
8.65 }
8.66
8.67 -int32_t sh4_write_long( sh4addr_t vma, uint32_t val )
8.68 +void sh4_write_long( sh4addr_t addr, uint32_t val )
8.69 {
8.70 sh4ptr_t page;
8.71
8.72 - uint64_t ppa = mmu_vma_to_phys_write(vma);
8.73 - if( ppa>>32 ) {
8.74 - return ppa>>32;
8.75 - }
8.76 - sh4addr_t addr = (sh4addr_t)ppa;
8.77 -
8.78 CHECK_WRITE_WATCH(addr,4,val);
8.79
8.80 if( addr >= 0xE0000000 ) {
8.81 sh4_write_p4( addr, val );
8.82 - return 0;
8.83 + return;
8.84 } else if( (addr&0x1C000000) == 0x0C000000 ) {
8.85 *(uint32_t *)(sh4_main_ram + (addr&0x00FFFFFF)) = val;
8.86 xlat_invalidate_long(addr);
8.87 - return 0;
8.88 + return;
8.89 } else if( (addr&0x1F800000) == 0x04000000 ||
8.90 (addr&0x1F800000) == 0x11000000 ) {
8.91 texcache_invalidate_page(addr& 0x7FFFFF);
8.92 @@ -309,7 +285,7 @@
8.93 if( (addr&0x1FFFFFFF) < 0x200000 ) {
8.94 WARN( "Attempted write to read-only memory: %08X => %08X", val, addr);
8.95 sh4_stop();
8.96 - return 0;
8.97 + return;
8.98 }
8.99 if( (addr&0x1F800000) == 0x00800000 )
8.100 asic_g2_write_word();
8.101 @@ -319,37 +295,30 @@
8.102 if( page == NULL ) {
8.103 if( (addr & 0x1F000000) >= 0x04000000 &&
8.104 (addr & 0x1F000000) < 0x07000000 )
8.105 - return 0;
8.106 + return;
8.107 WARN( "Long write to missing page: %08X => %08X", val, addr );
8.108 - return 0;
8.109 + return;
8.110 }
8.111 TRACE_IO( "Long write %08X => %08X", page, (addr&0xFFF), val, addr );
8.112 io_rgn[(uintptr_t)page]->io_write(addr&0xFFF, val);
8.113 } else {
8.114 *(uint32_t *)(page+(addr&0xFFF)) = val;
8.115 }
8.116 - return 0;
8.117 }
8.118
8.119 -int32_t sh4_write_word( sh4addr_t vma, uint32_t val )
8.120 +void sh4_write_word( sh4addr_t addr, uint32_t val )
8.121 {
8.122 sh4ptr_t page;
8.123
8.124 - uint64_t ppa = mmu_vma_to_phys_write(vma);
8.125 - if( ppa>>32 ) {
8.126 - return ppa>>32;
8.127 - }
8.128 - sh4addr_t addr = (sh4addr_t)ppa;
8.129 -
8.130 CHECK_WRITE_WATCH(addr,2,val);
8.131
8.132 if( addr >= 0xE0000000 ) {
8.133 sh4_write_p4( addr, (int16_t)val );
8.134 - return 0;
8.135 + return;
8.136 } else if( (addr&0x1C000000) == 0x0C000000 ) {
8.137 *(uint16_t *)(sh4_main_ram + (addr&0x00FFFFFF)) = val;
8.138 xlat_invalidate_word(addr);
8.139 - return 0;
8.140 + return;
8.141 } else if( (addr&0x1F800000) == 0x04000000 ||
8.142 (addr&0x1F800000) == 0x11000000 ) {
8.143 texcache_invalidate_page(addr& 0x7FFFFF);
8.144 @@ -362,41 +331,34 @@
8.145 if( (addr&0x1FFFFFFF) < 0x200000 ) {
8.146 WARN( "Attempted write to read-only memory: %08X => %08X", val, addr);
8.147 sh4_stop();
8.148 - return 0;
8.149 + return;
8.150 }
8.151 page = page_map[ (addr & 0x1FFFFFFF) >> 12 ];
8.152 if( ((uintptr_t)page) < MAX_IO_REGIONS ) { /* IO Region */
8.153 if( page == NULL ) {
8.154 WARN( "Attempted word write to missing page: %08X", addr );
8.155 - return 0;
8.156 + return;
8.157 }
8.158 TRACE_IO( "Word write %04X => %08X", page, (addr&0xFFF), val&0xFFFF, addr );
8.159 io_rgn[(uintptr_t)page]->io_write(addr&0xFFF, val);
8.160 } else {
8.161 *(uint16_t *)(page+(addr&0xFFF)) = val;
8.162 }
8.163 - return 0;
8.164 }
8.165
8.166 -int32_t sh4_write_byte( sh4addr_t vma, uint32_t val )
8.167 +void sh4_write_byte( sh4addr_t addr, uint32_t val )
8.168 {
8.169 sh4ptr_t page;
8.170
8.171 - uint64_t ppa = mmu_vma_to_phys_write(vma);
8.172 - if( ppa>>32 ) {
8.173 - return ppa>>32;
8.174 - }
8.175 - sh4addr_t addr = (sh4addr_t)ppa;
8.176 -
8.177 CHECK_WRITE_WATCH(addr,1,val);
8.178
8.179 if( addr >= 0xE0000000 ) {
8.180 sh4_write_p4( addr, (int8_t)val );
8.181 - return 0;
8.182 + return;
8.183 } else if( (addr&0x1C000000) == 0x0C000000 ) {
8.184 *(uint8_t *)(sh4_main_ram + (addr&0x00FFFFFF)) = val;
8.185 xlat_invalidate_word(addr);
8.186 - return 0;
8.187 + return;
8.188 } else if( (addr&0x1F800000) == 0x04000000 ||
8.189 (addr&0x1F800000) == 0x11000000 ) {
8.190 texcache_invalidate_page(addr& 0x7FFFFF);
8.191 @@ -409,20 +371,19 @@
8.192 if( (addr&0x1FFFFFFF) < 0x200000 ) {
8.193 WARN( "Attempted write to read-only memory: %08X => %08X", val, addr);
8.194 sh4_stop();
8.195 - return 0;
8.196 + return;
8.197 }
8.198 page = page_map[ (addr & 0x1FFFFFFF) >> 12 ];
8.199 if( ((uintptr_t)page) < MAX_IO_REGIONS ) { /* IO Region */
8.200 if( page == NULL ) {
8.201 WARN( "Attempted byte write to missing page: %08X", addr );
8.202 - return 0;
8.203 + return;
8.204 }
8.205 TRACE_IO( "Byte write %02X => %08X", page, (addr&0xFFF), val&0xFF, addr );
8.206 io_rgn[(uintptr_t)page]->io_write( (addr&0xFFF), val);
8.207 } else {
8.208 *(uint8_t *)(page+(addr&0xFFF)) = val;
8.209 }
8.210 - return 0;
8.211 }
8.212
8.213
9.1 --- a/src/sh4/sh4x86.c Fri Jan 04 11:54:17 2008 +0000
9.2 +++ b/src/sh4/sh4x86.c Sun Jan 06 12:24:18 2008 +0000
9.3 @@ -54,6 +54,9 @@
9.4 uint32_t stack_posn; /* Trace stack height for alignment purposes */
9.5 int tstate;
9.6
9.7 + /* mode flags */
9.8 + gboolean tlb_on; /* True if tlb translation is active */
9.9 +
9.10 /* Allocated memory for the (block-wide) back-patch list */
9.11 struct backpatch_record *backpatch_list;
9.12 uint32_t backpatch_posn;
9.13 @@ -306,15 +309,34 @@
9.14
9.15 #define UNDEF()
9.16 #define MEM_RESULT(value_reg) if(value_reg != R_EAX) { MOV_r32_r32(R_EAX,value_reg); }
9.17 -#define MEM_READ_BYTE( addr_reg, value_reg ) call_func1(sh4_read_byte, addr_reg ); TEST_r32_r32( R_EDX, R_EDX ); JNE_exc(-1); MEM_RESULT(value_reg)
9.18 -#define MEM_READ_WORD( addr_reg, value_reg ) call_func1(sh4_read_word, addr_reg ); TEST_r32_r32( R_EDX, R_EDX ); JNE_exc(-1); MEM_RESULT(value_reg)
9.19 -#define MEM_READ_LONG( addr_reg, value_reg ) call_func1(sh4_read_long, addr_reg ); TEST_r32_r32( R_EDX, R_EDX ); JNE_exc(-1); MEM_RESULT(value_reg)
9.20 -#define MEM_WRITE_BYTE( addr_reg, value_reg ) call_func2(sh4_write_byte, addr_reg, value_reg); TEST_r32_r32( R_EAX, R_EAX ); JNE_exc(-1);
9.21 -#define MEM_WRITE_WORD( addr_reg, value_reg ) call_func2(sh4_write_word, addr_reg, value_reg); TEST_r32_r32( R_EAX, R_EAX ); JNE_exc(-1);
9.22 -#define MEM_WRITE_LONG( addr_reg, value_reg ) call_func2(sh4_write_long, addr_reg, value_reg); TEST_r32_r32( R_EAX, R_EAX ); JNE_exc(-1);
9.23 +#define MEM_READ_BYTE_PHYS( addr_reg, value_reg ) call_func1(sh4_read_byte, addr_reg ); MEM_RESULT(value_reg)
9.24 +#define MEM_READ_WORD_PHYS( addr_reg, value_reg ) call_func1(sh4_read_word, addr_reg ); MEM_RESULT(value_reg)
9.25 +#define MEM_READ_LONG_PHYS( addr_reg, value_reg ) call_func1(sh4_read_long, addr_reg ); MEM_RESULT(value_reg)
9.26 +#define MEM_WRITE_BYTE_PHYS( addr_reg, value_reg ) call_func2(sh4_write_byte, addr_reg, value_reg)
9.27 +#define MEM_WRITE_WORD_PHYS( addr_reg, value_reg ) call_func2(sh4_write_word, addr_reg, value_reg)
9.28 +#define MEM_WRITE_LONG_PHYS( addr_reg, value_reg ) call_func2(sh4_write_long, addr_reg, value_reg)
9.29
9.30 -#define MEM_READ_SIZE (CALL_FUNC1_SIZE+8)
9.31 -#define MEM_WRITE_SIZE (CALL_FUNC2_SIZE+8)
9.32 +#define MEM_READ_BYTE_VMA( addr_reg, value_reg ) call_func1(mmu_vma_to_phys_read, addr_reg); CMP_imm32_r32(MMU_VMA_ERROR, R_EAX); JE_exc(-1); call_func1(sh4_read_byte, R_EAX); MEM_RESULT(value_reg)
9.33 +#define MEM_READ_WORD_VMA( addr_reg, value_reg ) call_func1(mmu_vma_to_phys_read, addr_reg); CMP_imm32_r32(MMU_VMA_ERROR, R_EAX); JE_exc(-1); call_func1(sh4_read_word, R_EAX); MEM_RESULT(value_reg)
9.34 +#define MEM_READ_LONG_VMA( addr_reg, value_reg ) call_func1(mmu_vma_to_phys_read, addr_reg); CMP_imm32_r32(MMU_VMA_ERROR, R_EAX); JE_exc(-1); call_func1(sh4_read_long, R_EAX); MEM_RESULT(value_reg)
9.35 +#define MEM_WRITE_BYTE_VMA( addr_reg, value_reg ) call_func1(mmu_vma_to_phys_write, addr_reg); CMP_imm32_r32(MMU_VMA_ERROR, R_EAX); JE_exc(-1); call_func2(sh4_write_byte, R_EAX, value_reg)
9.36 +#define MEM_WRITE_WORD_VMA( addr_reg, value_reg ) call_func1(mmu_vma_to_phys_write, addr_reg); CMP_imm32_r32(MMU_VMA_ERROR, R_EAX); JE_exc(-1); call_func2(sh4_write_word, R_EAX, value_reg)
9.37 +#define MEM_WRITE_LONG_VMA( addr_reg, value_reg ) call_func1(mmu_vma_to_phys_write, addr_reg); CMP_imm32_r32(MMU_VMA_ERROR, R_EAX); JE_exc(-1); call_func2(sh4_write_long, R_EAX, value_reg)
9.38 +
9.39 +#define MEM_READ_BYTE( addr_reg, value_reg ) if(sh4_x86.tlb_on){MEM_READ_BYTE_VMA(addr_reg,value_reg);}else{MEM_READ_BYTE_PHYS(addr_reg, value_reg);}
9.40 +#define MEM_READ_WORD( addr_reg, value_reg ) if(sh4_x86.tlb_on){MEM_READ_WORD_VMA(addr_reg,value_reg);}else{MEM_READ_WORD_PHYS(addr_reg, value_reg);}
9.41 +#define MEM_READ_LONG( addr_reg, value_reg ) if(sh4_x86.tlb_on){MEM_READ_LONG_VMA(addr_reg,value_reg);}else{MEM_READ_LONG_PHYS(addr_reg, value_reg);}
9.42 +#define MEM_WRITE_BYTE( addr_reg, value_reg ) if(sh4_x86.tlb_on){MEM_WRITE_BYTE_VMA(addr_reg,value_reg);}else{MEM_WRITE_BYTE_PHYS(addr_reg, value_reg);}
9.43 +#define MEM_WRITE_WORD( addr_reg, value_reg ) if(sh4_x86.tlb_on){MEM_WRITE_WORD_VMA(addr_reg,value_reg);}else{MEM_WRITE_WORD_PHYS(addr_reg, value_reg);}
9.44 +#define MEM_WRITE_LONG( addr_reg, value_reg ) if(sh4_x86.tlb_on){MEM_WRITE_LONG_VMA(addr_reg,value_reg);}else{MEM_WRITE_LONG_PHYS(addr_reg, value_reg);}
9.45 +
9.46 +#define MEM_READ_SIZE_PHYS (CALL_FUNC1_SIZE)
9.47 +#define MEM_WRITE_SIZE_PHYS (CALL_FUNC2_SIZE)
9.48 +#define MEM_READ_SIZE_VMA (CALL_FUNC1_SIZE + CALL_FUNC1_SIZE + 12)
9.49 +#define MEM_WRITE_SIZE_VMA (CALL_FUNC1_SIZE + CALL_FUNC2_SIZE + 12)
9.50 +
9.51 +#define MEM_READ_SIZE (sh4_x86.tlb_on?MEM_READ_SIZE_VMA:MEM_READ_SIZE_PHYS)
9.52 +#define MEM_WRITE_SIZE (sh4_x86.tlb_on?MEM_WRITE_SIZE_VMA:MEM_WRITE_SIZE_PHYS)
9.53
9.54 #define SLOTILLEGAL() JMP_exc(EXC_SLOT_ILLEGAL); sh4_x86.in_delay_slot = FALSE; return 1;
9.55
10.1 --- a/src/sh4/sh4x86.in Fri Jan 04 11:54:17 2008 +0000
10.2 +++ b/src/sh4/sh4x86.in Sun Jan 06 12:24:18 2008 +0000
10.3 @@ -54,6 +54,9 @@
10.4 uint32_t stack_posn; /* Trace stack height for alignment purposes */
10.5 int tstate;
10.6
10.7 + /* mode flags */
10.8 + gboolean tlb_on; /* True if tlb translation is active */
10.9 +
10.10 /* Allocated memory for the (block-wide) back-patch list */
10.11 struct backpatch_record *backpatch_list;
10.12 uint32_t backpatch_posn;
10.13 @@ -306,15 +309,34 @@
10.14
10.15 #define UNDEF()
10.16 #define MEM_RESULT(value_reg) if(value_reg != R_EAX) { MOV_r32_r32(R_EAX,value_reg); }
10.17 -#define MEM_READ_BYTE( addr_reg, value_reg ) call_func1(sh4_read_byte, addr_reg ); TEST_r32_r32( R_EDX, R_EDX ); JNE_exc(-1); MEM_RESULT(value_reg)
10.18 -#define MEM_READ_WORD( addr_reg, value_reg ) call_func1(sh4_read_word, addr_reg ); TEST_r32_r32( R_EDX, R_EDX ); JNE_exc(-1); MEM_RESULT(value_reg)
10.19 -#define MEM_READ_LONG( addr_reg, value_reg ) call_func1(sh4_read_long, addr_reg ); TEST_r32_r32( R_EDX, R_EDX ); JNE_exc(-1); MEM_RESULT(value_reg)
10.20 -#define MEM_WRITE_BYTE( addr_reg, value_reg ) call_func2(sh4_write_byte, addr_reg, value_reg); TEST_r32_r32( R_EAX, R_EAX ); JNE_exc(-1);
10.21 -#define MEM_WRITE_WORD( addr_reg, value_reg ) call_func2(sh4_write_word, addr_reg, value_reg); TEST_r32_r32( R_EAX, R_EAX ); JNE_exc(-1);
10.22 -#define MEM_WRITE_LONG( addr_reg, value_reg ) call_func2(sh4_write_long, addr_reg, value_reg); TEST_r32_r32( R_EAX, R_EAX ); JNE_exc(-1);
10.23 +#define MEM_READ_BYTE_PHYS( addr_reg, value_reg ) call_func1(sh4_read_byte, addr_reg ); MEM_RESULT(value_reg)
10.24 +#define MEM_READ_WORD_PHYS( addr_reg, value_reg ) call_func1(sh4_read_word, addr_reg ); MEM_RESULT(value_reg)
10.25 +#define MEM_READ_LONG_PHYS( addr_reg, value_reg ) call_func1(sh4_read_long, addr_reg ); MEM_RESULT(value_reg)
10.26 +#define MEM_WRITE_BYTE_PHYS( addr_reg, value_reg ) call_func2(sh4_write_byte, addr_reg, value_reg)
10.27 +#define MEM_WRITE_WORD_PHYS( addr_reg, value_reg ) call_func2(sh4_write_word, addr_reg, value_reg)
10.28 +#define MEM_WRITE_LONG_PHYS( addr_reg, value_reg ) call_func2(sh4_write_long, addr_reg, value_reg)
10.29
10.30 -#define MEM_READ_SIZE (CALL_FUNC1_SIZE+8)
10.31 -#define MEM_WRITE_SIZE (CALL_FUNC2_SIZE+8)
10.32 +#define MEM_READ_BYTE_VMA( addr_reg, value_reg ) call_func1(mmu_vma_to_phys_read, addr_reg); CMP_imm32_r32(MMU_VMA_ERROR, R_EAX); JE_exc(-1); call_func1(sh4_read_byte, R_EAX); MEM_RESULT(value_reg)
10.33 +#define MEM_READ_WORD_VMA( addr_reg, value_reg ) call_func1(mmu_vma_to_phys_read, addr_reg); CMP_imm32_r32(MMU_VMA_ERROR, R_EAX); JE_exc(-1); call_func1(sh4_read_word, R_EAX); MEM_RESULT(value_reg)
10.34 +#define MEM_READ_LONG_VMA( addr_reg, value_reg ) call_func1(mmu_vma_to_phys_read, addr_reg); CMP_imm32_r32(MMU_VMA_ERROR, R_EAX); JE_exc(-1); call_func1(sh4_read_long, R_EAX); MEM_RESULT(value_reg)
10.35 +#define MEM_WRITE_BYTE_VMA( addr_reg, value_reg ) call_func1(mmu_vma_to_phys_write, addr_reg); CMP_imm32_r32(MMU_VMA_ERROR, R_EAX); JE_exc(-1); call_func2(sh4_write_byte, R_EAX, value_reg)
10.36 +#define MEM_WRITE_WORD_VMA( addr_reg, value_reg ) call_func1(mmu_vma_to_phys_write, addr_reg); CMP_imm32_r32(MMU_VMA_ERROR, R_EAX); JE_exc(-1); call_func2(sh4_write_word, R_EAX, value_reg)
10.37 +#define MEM_WRITE_LONG_VMA( addr_reg, value_reg ) call_func1(mmu_vma_to_phys_write, addr_reg); CMP_imm32_r32(MMU_VMA_ERROR, R_EAX); JE_exc(-1); call_func2(sh4_write_long, R_EAX, value_reg)
10.38 +
10.39 +#define MEM_READ_BYTE( addr_reg, value_reg ) if(sh4_x86.tlb_on){MEM_READ_BYTE_VMA(addr_reg,value_reg);}else{MEM_READ_BYTE_PHYS(addr_reg, value_reg);}
10.40 +#define MEM_READ_WORD( addr_reg, value_reg ) if(sh4_x86.tlb_on){MEM_READ_WORD_VMA(addr_reg,value_reg);}else{MEM_READ_WORD_PHYS(addr_reg, value_reg);}
10.41 +#define MEM_READ_LONG( addr_reg, value_reg ) if(sh4_x86.tlb_on){MEM_READ_LONG_VMA(addr_reg,value_reg);}else{MEM_READ_LONG_PHYS(addr_reg, value_reg);}
10.42 +#define MEM_WRITE_BYTE( addr_reg, value_reg ) if(sh4_x86.tlb_on){MEM_WRITE_BYTE_VMA(addr_reg,value_reg);}else{MEM_WRITE_BYTE_PHYS(addr_reg, value_reg);}
10.43 +#define MEM_WRITE_WORD( addr_reg, value_reg ) if(sh4_x86.tlb_on){MEM_WRITE_WORD_VMA(addr_reg,value_reg);}else{MEM_WRITE_WORD_PHYS(addr_reg, value_reg);}
10.44 +#define MEM_WRITE_LONG( addr_reg, value_reg ) if(sh4_x86.tlb_on){MEM_WRITE_LONG_VMA(addr_reg,value_reg);}else{MEM_WRITE_LONG_PHYS(addr_reg, value_reg);}
10.45 +
10.46 +#define MEM_READ_SIZE_PHYS (CALL_FUNC1_SIZE)
10.47 +#define MEM_WRITE_SIZE_PHYS (CALL_FUNC2_SIZE)
10.48 +#define MEM_READ_SIZE_VMA (CALL_FUNC1_SIZE + CALL_FUNC1_SIZE + 12)
10.49 +#define MEM_WRITE_SIZE_VMA (CALL_FUNC1_SIZE + CALL_FUNC2_SIZE + 12)
10.50 +
10.51 +#define MEM_READ_SIZE (sh4_x86.tlb_on?MEM_READ_SIZE_VMA:MEM_READ_SIZE_PHYS)
10.52 +#define MEM_WRITE_SIZE (sh4_x86.tlb_on?MEM_WRITE_SIZE_VMA:MEM_WRITE_SIZE_PHYS)
10.53
10.54 #define SLOTILLEGAL() JMP_exc(EXC_SLOT_ILLEGAL); sh4_x86.in_delay_slot = FALSE; return 1;
10.55
.