Search
lxdream.org :: lxdream/src/sh4/mmu.c :: diff
lxdream 0.9.1
released Jun 29
Download Now
filename src/sh4/mmu.c
changeset 570:d2893980fbf5
prev569:a1c49e1e8776
next571:9bc09948d0f2
author nkeynes
date Sun Jan 06 12:24:18 2008 +0000 (13 years ago)
branchlxdream-mmu
permissions -rw-r--r--
last change Change to generate different code for mmu on/off cases
file annotate diff log raw
1.1 --- a/src/sh4/mmu.c Fri Jan 04 11:54:17 2008 +0000
1.2 +++ b/src/sh4/mmu.c Sun Jan 06 12:24:18 2008 +0000
1.3 @@ -22,6 +22,41 @@
1.4 #include "sh4/sh4core.h"
1.5 #include "mem.h"
1.6
1.7 +#define VMA_TO_EXT_ADDR(vma) ((vma)&0x1FFFFFFF)
1.8 +
1.9 +/* The MMU (practically unique in the system) is allowed to raise exceptions
1.10 + * directly, with a return code indicating that one was raised and the caller
1.11 + * had better behave appropriately.
1.12 + */
1.13 +#define RAISE_TLB_ERROR(code, vpn) \
1.14 + MMIO_WRITE(MMU, TEA, vpn); \
1.15 + MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
1.16 + sh4_raise_tlb_exception(code);
1.17 +
1.18 +#define RAISE_MEM_ERROR(code, vpn) \
1.19 + MMIO_WRITE(MMU, TEA, vpn); \
1.20 + MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
1.21 + sh4_raise_exception(code);
1.22 +
1.23 +#define RAISE_OTHER_ERROR(code) \
1.24 + sh4_raise_exception(code);
1.25 +/**
1.26 + * Abort with a non-MMU address error. Caused by user-mode code attempting
1.27 + * to access privileged regions, or alignment faults.
1.28 + */
1.29 +#define MMU_READ_ADDR_ERROR() RAISE_OTHER_ERROR(EXC_DATA_ADDR_READ)
1.30 +#define MMU_WRITE_ADDR_ERROR() RAISE_OTHER_ERROR(EXC_DATA_ADDR_WRITE)
1.31 +
1.32 +#define MMU_TLB_READ_MISS_ERROR(vpn) RAISE_TLB_ERROR(EXC_TLB_MISS_READ, vpn)
1.33 +#define MMU_TLB_WRITE_MISS_ERROR(vpn) RAISE_TLB_ERROR(EXC_TLB_MISS_WRITE, vpn)
1.34 +#define MMU_TLB_INITIAL_WRITE_ERROR(vpn) RAISE_MEM_ERROR(EXC_INIT_PAGE_WRITE, vpn)
1.35 +#define MMU_TLB_READ_PROT_ERROR(vpn) RAISE_MEM_ERROR(EXC_TLB_PROT_READ, vpn)
1.36 +#define MMU_TLB_WRITE_PROT_ERROR(vpn) RAISE_MEM_ERROR(EXC_TLB_PROT_WRITE, vpn)
1.37 +#define MMU_TLB_MULTI_HIT_ERROR(vpn) sh4_raise_reset(EXC_TLB_MULTI_HIT); \
1.38 + MMIO_WRITE(MMU, TEA, vpn); \
1.39 + MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00)));
1.40 +
1.41 +
1.42 #define OCRAM_START (0x1C000000>>PAGE_BITS)
1.43 #define OCRAM_END (0x20000000>>PAGE_BITS)
1.44
1.45 @@ -99,6 +134,7 @@
1.46
1.47 void mmio_region_MMU_write( uint32_t reg, uint32_t val )
1.48 {
1.49 + uint32_t tmp;
1.50 switch(reg) {
1.51 case PTEH:
1.52 val &= 0xFFFFFCFF;
1.53 @@ -121,6 +157,13 @@
1.54 mmu_urb = (val >> 18) & 0x3F;
1.55 mmu_lrui = (val >> 26) & 0x3F;
1.56 val &= 0x00000301;
1.57 + tmp = MMIO_READ( MMU, MMUCR );
1.58 + if( ((val ^ tmp) & MMUCR_AT) ) {
1.59 + // AT flag has changed state - flush the xlt cache as all bets
1.60 + // are off now. We also need to force an immediate exit from the
1.61 + // current block
1.62 + xlat_flush_cache();
1.63 + }
1.64 break;
1.65 case CCR:
1.66 mmu_set_cache_mode( val & (CCR_OIX|CCR_ORA) );
1.67 @@ -150,6 +193,7 @@
1.68 fwrite( &mmu_urc, sizeof(mmu_urc), 1, f );
1.69 fwrite( &mmu_urb, sizeof(mmu_urb), 1, f );
1.70 fwrite( &mmu_lrui, sizeof(mmu_lrui), 1, f );
1.71 + fwrite( &mmu_asid, sizeof(mmu_asid), 1, f );
1.72 }
1.73
1.74 int MMU_load_state( FILE *f )
1.75 @@ -176,6 +220,9 @@
1.76 if( fread( &mmu_lrui, sizeof(mmu_lrui), 1, f ) != 1 ) {
1.77 return 1;
1.78 }
1.79 + if( fread( &mmu_asid, sizeof(mmu_asid), 1, f ) != 1 ) {
1.80 + return 1;
1.81 + }
1.82 return 0;
1.83 }
1.84
1.85 @@ -214,28 +261,192 @@
1.86 mmu_utlb[mmu_urc].mask = get_mask_for_flags(mmu_utlb[mmu_urc].flags);
1.87 }
1.88
1.89 -static inline void mmu_flush_pages( struct utlb_entry *ent )
1.90 +static void mmu_invalidate_tlb()
1.91 {
1.92 - unsigned int vpn;
1.93 - switch( ent->flags & TLB_SIZE_MASK ) {
1.94 - case TLB_SIZE_1K: xlat_flush_page( ent->vpn ); break;
1.95 - case TLB_SIZE_4K: xlat_flush_page( ent->vpn ); break;
1.96 - case TLB_SIZE_64K:
1.97 - for( vpn = ent->vpn; vpn < ent->vpn + 0x10000; vpn += 0x1000 ) {
1.98 - xlat_flush_page( vpn );
1.99 - }
1.100 - break;
1.101 - case TLB_SIZE_1M:
1.102 - for( vpn = ent->vpn; vpn < ent->vpn + 0x100000; vpn += 0x1000 ) {
1.103 - xlat_flush_page( vpn );
1.104 - }
1.105 - break;
1.106 + int i;
1.107 + for( i=0; i<ITLB_ENTRY_COUNT; i++ ) {
1.108 + mmu_itlb[i].flags &= (~TLB_VALID);
1.109 + }
1.110 + for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
1.111 + mmu_utlb[i].flags &= (~TLB_VALID);
1.112 + }
1.113 +}
1.114 +
1.115 +#define ITLB_ENTRY(addr) ((addr>>7)&0x03)
1.116 +
1.117 +int32_t mmu_itlb_addr_read( sh4addr_t addr )
1.118 +{
1.119 + struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
1.120 + return ent->vpn | ent->asid | (ent->flags & TLB_VALID);
1.121 +}
1.122 +int32_t mmu_itlb_data_read( sh4addr_t addr )
1.123 +{
1.124 + struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
1.125 + return ent->ppn | ent->flags;
1.126 +}
1.127 +
1.128 +void mmu_itlb_addr_write( sh4addr_t addr, uint32_t val )
1.129 +{
1.130 + struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
1.131 + ent->vpn = val & 0xFFFFFC00;
1.132 + ent->asid = val & 0x000000FF;
1.133 + ent->flags = (ent->flags & ~(TLB_VALID)) | (val&TLB_VALID);
1.134 +}
1.135 +
1.136 +void mmu_itlb_data_write( sh4addr_t addr, uint32_t val )
1.137 +{
1.138 + struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
1.139 + ent->ppn = val & 0x1FFFFC00;
1.140 + ent->flags = val & 0x00001DA;
1.141 + ent->mask = get_mask_for_flags(val);
1.142 +}
1.143 +
1.144 +#define UTLB_ENTRY(addr) ((addr>>8)&0x3F)
1.145 +#define UTLB_ASSOC(addr) (addr&0x80)
1.146 +#define UTLB_DATA2(addr) (addr&0x00800000)
1.147 +
1.148 +int32_t mmu_utlb_addr_read( sh4addr_t addr )
1.149 +{
1.150 + struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
1.151 + return ent->vpn | ent->asid | (ent->flags & TLB_VALID) |
1.152 + ((ent->flags & TLB_DIRTY)<<7);
1.153 +}
1.154 +int32_t mmu_utlb_data_read( sh4addr_t addr )
1.155 +{
1.156 + struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
1.157 + if( UTLB_DATA2(addr) ) {
1.158 + return ent->pcmcia;
1.159 + } else {
1.160 + return ent->ppn | ent->flags;
1.161 }
1.162 }
1.163
1.164 /**
1.165 + * Find a UTLB entry for the associative TLB write - same as the normal
1.166 + * lookup but ignores the valid bit.
1.167 + */
1.168 +static inline mmu_utlb_lookup_assoc( uint32_t vpn, uint32_t asid )
1.169 +{
1.170 + int result = -1;
1.171 + unsigned int i;
1.172 + for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
1.173 + if( ((mmu_utlb[i].flags & TLB_SHARE) || asid == mmu_utlb[i].asid) &&
1.174 + ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
1.175 + if( result != -1 ) {
1.176 + return -2;
1.177 + }
1.178 + result = i;
1.179 + }
1.180 + }
1.181 + return result;
1.182 +}
1.183 +
1.184 +/**
1.185 + * Find a ITLB entry for the associative TLB write - same as the normal
1.186 + * lookup but ignores the valid bit.
1.187 + */
1.188 +static inline mmu_itlb_lookup_assoc( uint32_t vpn, uint32_t asid )
1.189 +{
1.190 + int result = -1;
1.191 + unsigned int i;
1.192 + for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
1.193 + if( ((mmu_itlb[i].flags & TLB_SHARE) || asid == mmu_itlb[i].asid) &&
1.194 + ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
1.195 + if( result != -1 ) {
1.196 + return -2;
1.197 + }
1.198 + result = i;
1.199 + }
1.200 + }
1.201 + return result;
1.202 +}
1.203 +
1.204 +void mmu_utlb_addr_write( sh4addr_t addr, uint32_t val )
1.205 +{
1.206 + if( UTLB_ASSOC(addr) ) {
1.207 + uint32_t asid = MMIO_READ( MMU, PTEH ) & 0xFF;
1.208 + int utlb = mmu_utlb_lookup_assoc( val, asid );
1.209 + if( utlb >= 0 ) {
1.210 + struct utlb_entry *ent = &mmu_utlb[utlb];
1.211 + ent->flags = ent->flags & ~(TLB_DIRTY|TLB_VALID);
1.212 + ent->flags |= (val & TLB_VALID);
1.213 + ent->flags |= ((val & 0x200)>>7);
1.214 + }
1.215 +
1.216 + int itlb = mmu_itlb_lookup_assoc( val, asid );
1.217 + if( itlb >= 0 ) {
1.218 + struct itlb_entry *ent = &mmu_itlb[itlb];
1.219 + ent->flags = (ent->flags & (~TLB_VALID)) | (val & TLB_VALID);
1.220 + }
1.221 +
1.222 + if( itlb == -2 || utlb == -2 ) {
1.223 + MMU_TLB_MULTI_HIT_ERROR(addr);
1.224 + return;
1.225 + }
1.226 + } else {
1.227 + struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
1.228 + ent->vpn = (val & 0xFFFFFC00);
1.229 + ent->asid = (val & 0xFF);
1.230 + ent->flags = (ent->flags & ~(TLB_DIRTY|TLB_VALID));
1.231 + ent->flags |= (val & TLB_VALID);
1.232 + ent->flags |= ((val & 0x200)>>7);
1.233 + }
1.234 +}
1.235 +
1.236 +void mmu_utlb_data_write( sh4addr_t addr, uint32_t val )
1.237 +{
1.238 + struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
1.239 + if( UTLB_DATA2(addr) ) {
1.240 + ent->pcmcia = val & 0x0000000F;
1.241 + } else {
1.242 + ent->ppn = (val & 0x1FFFFC00);
1.243 + ent->flags = (val & 0x000001FF);
1.244 + ent->mask = get_mask_for_flags(val);
1.245 + }
1.246 +}
1.247 +
1.248 +/* Cache access - not implemented */
1.249 +
1.250 +int32_t mmu_icache_addr_read( sh4addr_t addr )
1.251 +{
1.252 + return 0; // not implemented
1.253 +}
1.254 +int32_t mmu_icache_data_read( sh4addr_t addr )
1.255 +{
1.256 + return 0; // not implemented
1.257 +}
1.258 +int32_t mmu_ocache_addr_read( sh4addr_t addr )
1.259 +{
1.260 + return 0; // not implemented
1.261 +}
1.262 +int32_t mmu_ocache_data_read( sh4addr_t addr )
1.263 +{
1.264 + return 0; // not implemented
1.265 +}
1.266 +
1.267 +void mmu_icache_addr_write( sh4addr_t addr, uint32_t val )
1.268 +{
1.269 +}
1.270 +
1.271 +void mmu_icache_data_write( sh4addr_t addr, uint32_t val )
1.272 +{
1.273 +}
1.274 +
1.275 +void mmu_ocache_addr_write( sh4addr_t addr, uint32_t val )
1.276 +{
1.277 +}
1.278 +
1.279 +void mmu_ocache_data_write( sh4addr_t addr, uint32_t val )
1.280 +{
1.281 +}
1.282 +
1.283 +/******************************************************************************/
1.284 +/* MMU TLB address translation */
1.285 +/******************************************************************************/
1.286 +
1.287 +/**
1.288 * The translations are excessively complicated, but unfortunately it's a
1.289 - * complicated system. It can undoubtedly be better optimized too.
1.290 + * complicated system. TODO: make this not be painfully slow.
1.291 */
1.292
1.293 /**
1.294 @@ -303,26 +514,6 @@
1.295 }
1.296
1.297 /**
1.298 - * Find a UTLB entry for the associative TLB write - same as the normal
1.299 - * lookup but ignores the valid bit.
1.300 - */
1.301 -static inline mmu_utlb_lookup_assoc( uint32_t vpn, uint32_t asid )
1.302 -{
1.303 - int result = -1;
1.304 - unsigned int i;
1.305 - for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
1.306 - if( ((mmu_utlb[i].flags & TLB_SHARE) || asid == mmu_utlb[i].asid) &&
1.307 - ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
1.308 - if( result != -1 ) {
1.309 - return -2;
1.310 - }
1.311 - result = i;
1.312 - }
1.313 - }
1.314 - return result;
1.315 -}
1.316 -
1.317 -/**
1.318 * Update the ITLB by replacing the LRU entry with the specified UTLB entry.
1.319 * @return the number (0-3) of the replaced entry.
1.320 */
1.321 @@ -439,77 +630,85 @@
1.322 return result;
1.323 }
1.324
1.325 -/**
1.326 - * Find a ITLB entry for the associative TLB write - same as the normal
1.327 - * lookup but ignores the valid bit.
1.328 - */
1.329 -static inline mmu_itlb_lookup_assoc( uint32_t vpn, uint32_t asid )
1.330 -{
1.331 - int result = -1;
1.332 - unsigned int i;
1.333 - for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
1.334 - if( ((mmu_itlb[i].flags & TLB_SHARE) || asid == mmu_itlb[i].asid) &&
1.335 - ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
1.336 - if( result != -1 ) {
1.337 - return -2;
1.338 - }
1.339 - result = i;
1.340 - }
1.341 - }
1.342 - return result;
1.343 -}
1.344 -
1.345 -#define RAISE_TLB_ERROR(code, vpn) \
1.346 - MMIO_WRITE(MMU, TEA, vpn); \
1.347 - MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
1.348 - sh4_raise_tlb_exception(code);
1.349 -
1.350 -#define RAISE_MEM_ERROR(code, vpn) \
1.351 - MMIO_WRITE(MMU, TEA, vpn); \
1.352 - MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
1.353 - sh4_raise_exception(code);
1.354 -
1.355 -#define RAISE_OTHER_ERROR(code) \
1.356 - sh4_raise_exception(code);
1.357 -
1.358 -/**
1.359 - * Abort with a non-MMU address error. Caused by user-mode code attempting
1.360 - * to access privileged regions, or alignment faults.
1.361 - */
1.362 -#define MMU_READ_ADDR_ERROR() RAISE_OTHER_ERROR(EXC_DATA_ADDR_READ)
1.363 -#define MMU_WRITE_ADDR_ERROR() RAISE_OTHER_ERROR(EXC_DATA_ADDR_WRITE)
1.364 -
1.365 -#define MMU_TLB_READ_MISS_ERROR(vpn) RAISE_TLB_ERROR(EXC_TLB_MISS_READ, vpn)
1.366 -#define MMU_TLB_WRITE_MISS_ERROR(vpn) RAISE_TLB_ERROR(EXC_TLB_MISS_WRITE, vpn)
1.367 -#define MMU_TLB_INITIAL_WRITE_ERROR(vpn) RAISE_MEM_ERROR(EXC_INIT_PAGE_WRITE, vpn)
1.368 -#define MMU_TLB_READ_PROT_ERROR(vpn) RAISE_MEM_ERROR(EXC_TLB_PROT_READ, vpn)
1.369 -#define MMU_TLB_WRITE_PROT_ERROR(vpn) RAISE_MEM_ERROR(EXC_TLB_PROT_WRITE, vpn)
1.370 -#define MMU_TLB_MULTI_HIT_ERROR(vpn) sh4_raise_reset(EXC_TLB_MULTI_HIT); \
1.371 - MMIO_WRITE(MMU, TEA, vpn); \
1.372 - MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00)));
1.373 -
1.374 -uint64_t mmu_vma_to_phys_write( sh4addr_t addr )
1.375 +sh4addr_t mmu_vma_to_phys_read( sh4vma_t addr )
1.376 {
1.377 uint32_t mmucr = MMIO_READ(MMU,MMUCR);
1.378 if( addr & 0x80000000 ) {
1.379 if( IS_SH4_PRIVMODE() ) {
1.380 - if( addr < 0xC0000000 || addr >= 0xE0000000 ) {
1.381 - /* P1, P2 and P4 regions are pass-through (no translation) */
1.382 - return (uint64_t)addr;
1.383 + if( addr >= 0xE0000000 ) {
1.384 + return addr; /* P4 - passthrough */
1.385 + } else if( addr < 0xC0000000 ) {
1.386 + /* P1, P2 regions are pass-through (no translation) */
1.387 + return VMA_TO_EXT_ADDR(addr);
1.388 }
1.389 } else {
1.390 if( addr >= 0xE0000000 && addr < 0xE4000000 &&
1.391 ((mmucr&MMUCR_SQMD) == 0) ) {
1.392 /* Conditional user-mode access to the store-queue (no translation) */
1.393 - return (uint64_t)addr;
1.394 + return addr;
1.395 }
1.396 - MMU_WRITE_ADDR_ERROR();
1.397 - return 0x100000000LL;
1.398 + MMU_READ_ADDR_ERROR();
1.399 + return MMU_VMA_ERROR;
1.400 }
1.401 }
1.402
1.403 if( (mmucr & MMUCR_AT) == 0 ) {
1.404 - return (uint64_t)addr;
1.405 + return VMA_TO_EXT_ADDR(addr);
1.406 + }
1.407 +
1.408 + /* If we get this far, translation is required */
1.409 + int entryNo;
1.410 + if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
1.411 + entryNo = mmu_utlb_lookup_vpn_asid( addr );
1.412 + } else {
1.413 + entryNo = mmu_utlb_lookup_vpn( addr );
1.414 + }
1.415 +
1.416 + switch(entryNo) {
1.417 + case -1:
1.418 + MMU_TLB_READ_MISS_ERROR(addr);
1.419 + return MMU_VMA_ERROR;
1.420 + case -2:
1.421 + MMU_TLB_MULTI_HIT_ERROR(addr);
1.422 + return MMU_VMA_ERROR;
1.423 + default:
1.424 + if( (mmu_utlb[entryNo].flags & TLB_USERMODE) == 0 &&
1.425 + !IS_SH4_PRIVMODE() ) {
1.426 + /* protection violation */
1.427 + MMU_TLB_READ_PROT_ERROR(addr);
1.428 + return MMU_VMA_ERROR;
1.429 + }
1.430 +
1.431 + /* finally generate the target address */
1.432 + return (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) |
1.433 + (addr & (~mmu_utlb[entryNo].mask));
1.434 + }
1.435 +}
1.436 +
1.437 +sh4addr_t mmu_vma_to_phys_write( sh4vma_t addr )
1.438 +{
1.439 + uint32_t mmucr = MMIO_READ(MMU,MMUCR);
1.440 + if( addr & 0x80000000 ) {
1.441 + if( IS_SH4_PRIVMODE() ) {
1.442 + if( addr >= 0xE0000000 ) {
1.443 + return addr; /* P4 - passthrough */
1.444 + } else if( addr < 0xC0000000 ) {
1.445 + /* P1, P2 regions are pass-through (no translation) */
1.446 + return VMA_TO_EXT_ADDR(addr);
1.447 + }
1.448 + } else {
1.449 + if( addr >= 0xE0000000 && addr < 0xE4000000 &&
1.450 + ((mmucr&MMUCR_SQMD) == 0) ) {
1.451 + /* Conditional user-mode access to the store-queue (no translation) */
1.452 + return addr;
1.453 + }
1.454 + MMU_WRITE_ADDR_ERROR();
1.455 + return MMU_VMA_ERROR;
1.456 + }
1.457 + }
1.458 +
1.459 + if( (mmucr & MMUCR_AT) == 0 ) {
1.460 + return VMA_TO_EXT_ADDR(addr);
1.461 }
1.462
1.463 /* If we get this far, translation is required */
1.464 @@ -523,213 +722,27 @@
1.465 switch(entryNo) {
1.466 case -1:
1.467 MMU_TLB_WRITE_MISS_ERROR(addr);
1.468 - return 0x100000000LL;
1.469 + return MMU_VMA_ERROR;
1.470 case -2:
1.471 MMU_TLB_MULTI_HIT_ERROR(addr);
1.472 - return 0x100000000LL;
1.473 + return MMU_VMA_ERROR;
1.474 default:
1.475 if( IS_SH4_PRIVMODE() ? ((mmu_utlb[entryNo].flags & TLB_WRITABLE) == 0)
1.476 : ((mmu_utlb[entryNo].flags & TLB_USERWRITABLE) != TLB_USERWRITABLE) ) {
1.477 /* protection violation */
1.478 MMU_TLB_WRITE_PROT_ERROR(addr);
1.479 - return 0x100000000LL;
1.480 + return MMU_VMA_ERROR;
1.481 }
1.482
1.483 if( (mmu_utlb[entryNo].flags & TLB_DIRTY) == 0 ) {
1.484 MMU_TLB_INITIAL_WRITE_ERROR(addr);
1.485 - return 0x100000000LL;
1.486 + return MMU_VMA_ERROR;
1.487 }
1.488
1.489 /* finally generate the target address */
1.490 return (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) |
1.491 (addr & (~mmu_utlb[entryNo].mask));
1.492 }
1.493 - return -1;
1.494 -
1.495 -}
1.496 -
1.497 -uint64_t mmu_vma_to_phys_read( sh4addr_t addr )
1.498 -{
1.499 - uint32_t mmucr = MMIO_READ(MMU,MMUCR);
1.500 - if( addr & 0x80000000 ) {
1.501 - if( IS_SH4_PRIVMODE() ) {
1.502 - if( addr < 0xC0000000 || addr >= 0xE0000000 ) {
1.503 - /* P1, P2 and P4 regions are pass-through (no translation) */
1.504 - return (uint64_t)addr;
1.505 - }
1.506 - } else {
1.507 - if( addr >= 0xE0000000 && addr < 0xE4000000 &&
1.508 - ((mmucr&MMUCR_SQMD) == 0) ) {
1.509 - /* Conditional user-mode access to the store-queue (no translation) */
1.510 - return (uint64_t)addr;
1.511 - }
1.512 - MMU_READ_ADDR_ERROR();
1.513 - return 0x100000000LL;
1.514 - }
1.515 - }
1.516 -
1.517 - if( (mmucr & MMUCR_AT) == 0 ) {
1.518 - return (uint64_t)addr;
1.519 - }
1.520 -
1.521 - /* If we get this far, translation is required */
1.522 - int entryNo;
1.523 - if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
1.524 - entryNo = mmu_utlb_lookup_vpn_asid( addr );
1.525 - } else {
1.526 - entryNo = mmu_utlb_lookup_vpn( addr );
1.527 - }
1.528 -
1.529 - switch(entryNo) {
1.530 - case -1:
1.531 - MMU_TLB_READ_MISS_ERROR(addr);
1.532 - return 0x100000000LL;
1.533 - case -2:
1.534 - MMU_TLB_MULTI_HIT_ERROR(addr);
1.535 - return 0x100000000LL;
1.536 - default:
1.537 - if( (mmu_utlb[entryNo].flags & TLB_USERMODE) == 0 &&
1.538 - !IS_SH4_PRIVMODE() ) {
1.539 - /* protection violation */
1.540 - MMU_TLB_READ_PROT_ERROR(addr);
1.541 - return 0x100000000LL;
1.542 - }
1.543 -
1.544 - /* finally generate the target address */
1.545 - return (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) |
1.546 - (addr & (~mmu_utlb[entryNo].mask));
1.547 - }
1.548 - return -1;
1.549 -}
1.550 -
1.551 -static void mmu_invalidate_tlb()
1.552 -{
1.553 - int i;
1.554 - for( i=0; i<ITLB_ENTRY_COUNT; i++ ) {
1.555 - mmu_itlb[i].flags &= (~TLB_VALID);
1.556 - }
1.557 - for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
1.558 - mmu_utlb[i].flags &= (~TLB_VALID);
1.559 - }
1.560 -}
1.561 -
1.562 -#define ITLB_ENTRY(addr) ((addr>>7)&0x03)
1.563 -
1.564 -int32_t mmu_itlb_addr_read( sh4addr_t addr )
1.565 -{
1.566 - struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
1.567 - return ent->vpn | ent->asid | (ent->flags & TLB_VALID);
1.568 -}
1.569 -int32_t mmu_itlb_data_read( sh4addr_t addr )
1.570 -{
1.571 - struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
1.572 - return ent->ppn | ent->flags;
1.573 -}
1.574 -
1.575 -void mmu_itlb_addr_write( sh4addr_t addr, uint32_t val )
1.576 -{
1.577 - struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
1.578 - ent->vpn = val & 0xFFFFFC00;
1.579 - ent->asid = val & 0x000000FF;
1.580 - ent->flags = (ent->flags & ~(TLB_VALID)) | (val&TLB_VALID);
1.581 -}
1.582 -
1.583 -void mmu_itlb_data_write( sh4addr_t addr, uint32_t val )
1.584 -{
1.585 - struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
1.586 - ent->ppn = val & 0x1FFFFC00;
1.587 - ent->flags = val & 0x00001DA;
1.588 - ent->mask = get_mask_for_flags(val);
1.589 -}
1.590 -
1.591 -#define UTLB_ENTRY(addr) ((addr>>8)&0x3F)
1.592 -#define UTLB_ASSOC(addr) (addr&0x80)
1.593 -#define UTLB_DATA2(addr) (addr&0x00800000)
1.594 -
1.595 -int32_t mmu_utlb_addr_read( sh4addr_t addr )
1.596 -{
1.597 - struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
1.598 - return ent->vpn | ent->asid | (ent->flags & TLB_VALID) |
1.599 - ((ent->flags & TLB_DIRTY)<<7);
1.600 -}
1.601 -int32_t mmu_utlb_data_read( sh4addr_t addr )
1.602 -{
1.603 - struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
1.604 - if( UTLB_DATA2(addr) ) {
1.605 - return ent->pcmcia;
1.606 - } else {
1.607 - return ent->ppn | ent->flags;
1.608 - }
1.609 -}
1.610 -
1.611 -void mmu_utlb_addr_write( sh4addr_t addr, uint32_t val )
1.612 -{
1.613 - if( UTLB_ASSOC(addr) ) {
1.614 - uint32_t asid = MMIO_READ( MMU, PTEH ) & 0xFF;
1.615 - int entryNo = mmu_utlb_lookup_assoc( val, asid );
1.616 - if( entryNo >= 0 ) {
1.617 - struct utlb_entry *ent = &mmu_utlb[entryNo];
1.618 - ent->flags = ent->flags & ~(TLB_DIRTY|TLB_VALID);
1.619 - ent->flags |= (val & TLB_VALID);
1.620 - ent->flags |= ((val & 0x200)>>7);
1.621 - } else if( entryNo == -2 ) {
1.622 - MMU_TLB_MULTI_HIT_ERROR(addr);
1.623 - }
1.624 - } else {
1.625 - struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
1.626 - ent->vpn = (val & 0xFFFFFC00);
1.627 - ent->asid = (val & 0xFF);
1.628 - ent->flags = (ent->flags & ~(TLB_DIRTY|TLB_VALID));
1.629 - ent->flags |= (val & TLB_VALID);
1.630 - ent->flags |= ((val & 0x200)>>7);
1.631 - }
1.632 -}
1.633 -
1.634 -void mmu_utlb_data_write( sh4addr_t addr, uint32_t val )
1.635 -{
1.636 - struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
1.637 - if( UTLB_DATA2(addr) ) {
1.638 - ent->pcmcia = val & 0x0000000F;
1.639 - } else {
1.640 - ent->ppn = (val & 0x1FFFFC00);
1.641 - ent->flags = (val & 0x000001FF);
1.642 - ent->mask = get_mask_for_flags(val);
1.643 - }
1.644 -}
1.645 -
1.646 -/* Cache access - not implemented */
1.647 -
1.648 -int32_t mmu_icache_addr_read( sh4addr_t addr )
1.649 -{
1.650 - return 0; // not implemented
1.651 -}
1.652 -int32_t mmu_icache_data_read( sh4addr_t addr )
1.653 -{
1.654 - return 0; // not implemented
1.655 -}
1.656 -int32_t mmu_ocache_addr_read( sh4addr_t addr )
1.657 -{
1.658 - return 0; // not implemented
1.659 -}
1.660 -int32_t mmu_ocache_data_read( sh4addr_t addr )
1.661 -{
1.662 - return 0; // not implemented
1.663 -}
1.664 -
1.665 -void mmu_icache_addr_write( sh4addr_t addr, uint32_t val )
1.666 -{
1.667 -}
1.668 -
1.669 -void mmu_icache_data_write( sh4addr_t addr, uint32_t val )
1.670 -{
1.671 -}
1.672 -
1.673 -void mmu_ocache_addr_write( sh4addr_t addr, uint32_t val )
1.674 -{
1.675 -}
1.676 -
1.677 -void mmu_ocache_data_write( sh4addr_t addr, uint32_t val )
1.678 -{
1.679 }
1.680
1.681 /**
1.682 @@ -743,7 +756,7 @@
1.683 sh4_icache.page_ppa = 0x0C000000;
1.684 sh4_icache.mask = 0xFF000000;
1.685 sh4_icache.page = sh4_main_ram;
1.686 - } else if( (addr & 0x1FE00000 == 0 ) ) {
1.687 + } else if( (addr & 0x1FE00000) == 0 ) {
1.688 /* BIOS ROM */
1.689 sh4_icache.page_vma = addr & 0xFFE00000;
1.690 sh4_icache.page_ppa = 0;
1.691 @@ -780,9 +793,6 @@
1.692 MMU_READ_ADDR_ERROR();
1.693 return FALSE;
1.694 }
1.695 - } else {
1.696 - MMU_READ_ADDR_ERROR();
1.697 - return FALSE;
1.698 }
1.699
1.700 uint32_t mmucr = MMIO_READ(MMU,MMUCR);
.