Search
lxdream.org :: lxdream/src/sh4/mmu.c :: diff
lxdream 0.9.1
released Jun 29
Download Now
filename src/sh4/mmu.c
changeset 586:2a3ba82cf243
prev550:a27e31340147
next597:87cbdf62aa35
author nkeynes
date Thu Jan 17 10:11:37 2008 +0000 (16 years ago)
permissions -rw-r--r--
last change Add flag to skip breakpoints when it's the very first instruction of a run
(ie, so executing dreamcast_run() when the current pc is a breakpoint doesn't
just return immediately)
file annotate diff log raw
1.1 --- a/src/sh4/mmu.c Thu Dec 06 10:43:30 2007 +0000
1.2 +++ b/src/sh4/mmu.c Thu Jan 17 10:11:37 2008 +0000
1.3 @@ -1,5 +1,5 @@
1.4 /**
1.5 - * $Id: mmu.c,v 1.15 2007-11-08 11:54:16 nkeynes Exp $
1.6 + * $Id$
1.7 *
1.8 * MMU implementation
1.9 *
1.10 @@ -22,6 +22,41 @@
1.11 #include "sh4/sh4core.h"
1.12 #include "mem.h"
1.13
1.14 +#define VMA_TO_EXT_ADDR(vma) ((vma)&0x1FFFFFFF)
1.15 +
1.16 +/* The MMU (practically unique in the system) is allowed to raise exceptions
1.17 + * directly, with a return code indicating that one was raised and the caller
1.18 + * had better behave appropriately.
1.19 + */
1.20 +#define RAISE_TLB_ERROR(code, vpn) \
1.21 + MMIO_WRITE(MMU, TEA, vpn); \
1.22 + MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
1.23 + sh4_raise_tlb_exception(code);
1.24 +
1.25 +#define RAISE_MEM_ERROR(code, vpn) \
1.26 + MMIO_WRITE(MMU, TEA, vpn); \
1.27 + MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
1.28 + sh4_raise_exception(code);
1.29 +
1.30 +#define RAISE_OTHER_ERROR(code) \
1.31 + sh4_raise_exception(code);
1.32 +/**
1.33 + * Abort with a non-MMU address error. Caused by user-mode code attempting
1.34 + * to access privileged regions, or alignment faults.
1.35 + */
1.36 +#define MMU_READ_ADDR_ERROR() RAISE_OTHER_ERROR(EXC_DATA_ADDR_READ)
1.37 +#define MMU_WRITE_ADDR_ERROR() RAISE_OTHER_ERROR(EXC_DATA_ADDR_WRITE)
1.38 +
1.39 +#define MMU_TLB_READ_MISS_ERROR(vpn) RAISE_TLB_ERROR(EXC_TLB_MISS_READ, vpn)
1.40 +#define MMU_TLB_WRITE_MISS_ERROR(vpn) RAISE_TLB_ERROR(EXC_TLB_MISS_WRITE, vpn)
1.41 +#define MMU_TLB_INITIAL_WRITE_ERROR(vpn) RAISE_MEM_ERROR(EXC_INIT_PAGE_WRITE, vpn)
1.42 +#define MMU_TLB_READ_PROT_ERROR(vpn) RAISE_MEM_ERROR(EXC_TLB_PROT_READ, vpn)
1.43 +#define MMU_TLB_WRITE_PROT_ERROR(vpn) RAISE_MEM_ERROR(EXC_TLB_PROT_WRITE, vpn)
1.44 +#define MMU_TLB_MULTI_HIT_ERROR(vpn) sh4_raise_reset(EXC_TLB_MULTI_HIT); \
1.45 + MMIO_WRITE(MMU, TEA, vpn); \
1.46 + MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00)));
1.47 +
1.48 +
1.49 #define OCRAM_START (0x1C000000>>PAGE_BITS)
1.50 #define OCRAM_END (0x20000000>>PAGE_BITS)
1.51
1.52 @@ -32,6 +67,7 @@
1.53 #define TLB_VALID 0x00000100
1.54 #define TLB_USERMODE 0x00000040
1.55 #define TLB_WRITABLE 0x00000020
1.56 +#define TLB_USERWRITABLE (TLB_WRITABLE|TLB_USERMODE)
1.57 #define TLB_SIZE_MASK 0x00000090
1.58 #define TLB_SIZE_1K 0x00000000
1.59 #define TLB_SIZE_4K 0x00000010
1.60 @@ -42,16 +78,22 @@
1.61 #define TLB_SHARE 0x00000002
1.62 #define TLB_WRITETHRU 0x00000001
1.63
1.64 +#define MASK_1K 0xFFFFFC00
1.65 +#define MASK_4K 0xFFFFF000
1.66 +#define MASK_64K 0xFFFF0000
1.67 +#define MASK_1M 0xFFF00000
1.68
1.69 struct itlb_entry {
1.70 sh4addr_t vpn; // Virtual Page Number
1.71 uint32_t asid; // Process ID
1.72 + uint32_t mask;
1.73 sh4addr_t ppn; // Physical Page Number
1.74 uint32_t flags;
1.75 };
1.76
1.77 struct utlb_entry {
1.78 sh4addr_t vpn; // Virtual Page Number
1.79 + uint32_t mask; // Page size mask
1.80 uint32_t asid; // Process ID
1.81 sh4addr_t ppn; // Physical Page Number
1.82 uint32_t flags;
1.83 @@ -63,12 +105,23 @@
1.84 static uint32_t mmu_urc;
1.85 static uint32_t mmu_urb;
1.86 static uint32_t mmu_lrui;
1.87 +static uint32_t mmu_asid; // current asid
1.88
1.89 static sh4ptr_t cache = NULL;
1.90
1.91 static void mmu_invalidate_tlb();
1.92
1.93
1.94 +static uint32_t get_mask_for_flags( uint32_t flags )
1.95 +{
1.96 + switch( flags & TLB_SIZE_MASK ) {
1.97 + case TLB_SIZE_1K: return MASK_1K;
1.98 + case TLB_SIZE_4K: return MASK_4K;
1.99 + case TLB_SIZE_64K: return MASK_64K;
1.100 + case TLB_SIZE_1M: return MASK_1M;
1.101 + }
1.102 +}
1.103 +
1.104 int32_t mmio_region_MMU_read( uint32_t reg )
1.105 {
1.106 switch( reg ) {
1.107 @@ -81,9 +134,14 @@
1.108
1.109 void mmio_region_MMU_write( uint32_t reg, uint32_t val )
1.110 {
1.111 + uint32_t tmp;
1.112 switch(reg) {
1.113 case PTEH:
1.114 val &= 0xFFFFFCFF;
1.115 + if( (val & 0xFF) != mmu_asid ) {
1.116 + mmu_asid = val&0xFF;
1.117 + sh4_icache.page_vma = -1; // invalidate icache as asid has changed
1.118 + }
1.119 break;
1.120 case PTEL:
1.121 val &= 0x1FFFFDFF;
1.122 @@ -99,6 +157,14 @@
1.123 mmu_urb = (val >> 18) & 0x3F;
1.124 mmu_lrui = (val >> 26) & 0x3F;
1.125 val &= 0x00000301;
1.126 + tmp = MMIO_READ( MMU, MMUCR );
1.127 + if( ((val ^ tmp) & MMUCR_AT) && sh4_is_using_xlat() ) {
1.128 + // AT flag has changed state - flush the xlt cache as all bets
1.129 + // are off now. We also need to force an immediate exit from the
1.130 + // current block
1.131 + MMIO_WRITE( MMU, MMUCR, val );
1.132 + sh4_translate_flush_cache();
1.133 + }
1.134 break;
1.135 case CCR:
1.136 mmu_set_cache_mode( val & (CCR_OIX|CCR_ORA) );
1.137 @@ -118,6 +184,7 @@
1.138 void MMU_reset()
1.139 {
1.140 mmio_region_MMU_write( CCR, 0 );
1.141 + mmio_region_MMU_write( MMUCR, 0 );
1.142 }
1.143
1.144 void MMU_save_state( FILE *f )
1.145 @@ -125,6 +192,10 @@
1.146 fwrite( cache, 4096, 2, f );
1.147 fwrite( &mmu_itlb, sizeof(mmu_itlb), 1, f );
1.148 fwrite( &mmu_utlb, sizeof(mmu_utlb), 1, f );
1.149 + fwrite( &mmu_urc, sizeof(mmu_urc), 1, f );
1.150 + fwrite( &mmu_urb, sizeof(mmu_urb), 1, f );
1.151 + fwrite( &mmu_lrui, sizeof(mmu_lrui), 1, f );
1.152 + fwrite( &mmu_asid, sizeof(mmu_asid), 1, f );
1.153 }
1.154
1.155 int MMU_load_state( FILE *f )
1.156 @@ -142,6 +213,18 @@
1.157 if( fread( &mmu_utlb, sizeof(mmu_utlb), 1, f ) != 1 ) {
1.158 return 1;
1.159 }
1.160 + if( fread( &mmu_urc, sizeof(mmu_urc), 1, f ) != 1 ) {
1.161 + return 1;
1.162 + }
1.163 + if( fread( &mmu_urc, sizeof(mmu_urb), 1, f ) != 1 ) {
1.164 + return 1;
1.165 + }
1.166 + if( fread( &mmu_lrui, sizeof(mmu_lrui), 1, f ) != 1 ) {
1.167 + return 1;
1.168 + }
1.169 + if( fread( &mmu_asid, sizeof(mmu_asid), 1, f ) != 1 ) {
1.170 + return 1;
1.171 + }
1.172 return 0;
1.173 }
1.174
1.175 @@ -177,41 +260,7 @@
1.176 mmu_utlb[mmu_urc].ppn = MMIO_READ(MMU, PTEL) & 0x1FFFFC00;
1.177 mmu_utlb[mmu_urc].flags = MMIO_READ(MMU, PTEL) & 0x00001FF;
1.178 mmu_utlb[mmu_urc].pcmcia = MMIO_READ(MMU, PTEA);
1.179 -}
1.180 -
1.181 -uint64_t mmu_translate_read( sh4addr_t addr )
1.182 -{
1.183 - uint32_t mmucr = MMIO_READ(MMU,MMUCR);
1.184 - if( IS_SH4_PRIVMODE() ) {
1.185 - switch( addr & 0xE0000000 ) {
1.186 - case 0x80000000: case 0xA0000000:
1.187 - /* Non-translated read P1,P2 */
1.188 - break;
1.189 - case 0xE0000000:
1.190 - /* Non-translated read P4 */
1.191 - break;
1.192 - default:
1.193 - if( mmucr&MMUCR_AT ) {
1.194 - } else {
1.195 - // direct read
1.196 - }
1.197 - }
1.198 - } else {
1.199 - if( addr & 0x80000000 ) {
1.200 - if( ((addr&0xFC000000) == 0xE0000000 ) &&
1.201 - ((mmucr&MMUCR_SQMD) == 0) ) {
1.202 - // Store queue
1.203 - return 0;
1.204 - }
1.205 -// MMU_READ_ADDR_ERROR();
1.206 - }
1.207 - if( mmucr&MMUCR_AT ) {
1.208 - uint32_t vpn = addr & 0xFFFFFC00;
1.209 - uint32_t asid = MMIO_READ(MMU,PTEH)&0xFF;
1.210 - } else {
1.211 - // direct read
1.212 - }
1.213 - }
1.214 + mmu_utlb[mmu_urc].mask = get_mask_for_flags(mmu_utlb[mmu_urc].flags);
1.215 }
1.216
1.217 static void mmu_invalidate_tlb()
1.218 @@ -251,6 +300,7 @@
1.219 struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
1.220 ent->ppn = val & 0x1FFFFC00;
1.221 ent->flags = val & 0x00001DA;
1.222 + ent->mask = get_mask_for_flags(val);
1.223 }
1.224
1.225 #define UTLB_ENTRY(addr) ((addr>>8)&0x3F)
1.226 @@ -273,9 +323,70 @@
1.227 }
1.228 }
1.229
1.230 +/**
1.231 + * Find a UTLB entry for the associative TLB write - same as the normal
1.232 + * lookup but ignores the valid bit.
1.233 + */
1.234 +static inline mmu_utlb_lookup_assoc( uint32_t vpn, uint32_t asid )
1.235 +{
1.236 + int result = -1;
1.237 + unsigned int i;
1.238 + for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
1.239 + if( (mmu_utlb[i].flags & TLB_VALID) &&
1.240 + ((mmu_utlb[i].flags & TLB_SHARE) || asid == mmu_utlb[i].asid) &&
1.241 + ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
1.242 + if( result != -1 ) {
1.243 + fprintf( stderr, "TLB Multi hit: %d %d\n", result, i );
1.244 + return -2;
1.245 + }
1.246 + result = i;
1.247 + }
1.248 + }
1.249 + return result;
1.250 +}
1.251 +
1.252 +/**
1.253 + * Find a ITLB entry for the associative TLB write - same as the normal
1.254 + * lookup but ignores the valid bit.
1.255 + */
1.256 +static inline mmu_itlb_lookup_assoc( uint32_t vpn, uint32_t asid )
1.257 +{
1.258 + int result = -1;
1.259 + unsigned int i;
1.260 + for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
1.261 + if( (mmu_itlb[i].flags & TLB_VALID) &&
1.262 + ((mmu_itlb[i].flags & TLB_SHARE) || asid == mmu_itlb[i].asid) &&
1.263 + ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
1.264 + if( result != -1 ) {
1.265 + return -2;
1.266 + }
1.267 + result = i;
1.268 + }
1.269 + }
1.270 + return result;
1.271 +}
1.272 +
1.273 void mmu_utlb_addr_write( sh4addr_t addr, uint32_t val )
1.274 {
1.275 if( UTLB_ASSOC(addr) ) {
1.276 + int utlb = mmu_utlb_lookup_assoc( val, mmu_asid );
1.277 + if( utlb >= 0 ) {
1.278 + struct utlb_entry *ent = &mmu_utlb[utlb];
1.279 + ent->flags = ent->flags & ~(TLB_DIRTY|TLB_VALID);
1.280 + ent->flags |= (val & TLB_VALID);
1.281 + ent->flags |= ((val & 0x200)>>7);
1.282 + }
1.283 +
1.284 + int itlb = mmu_itlb_lookup_assoc( val, mmu_asid );
1.285 + if( itlb >= 0 ) {
1.286 + struct itlb_entry *ent = &mmu_itlb[itlb];
1.287 + ent->flags = (ent->flags & (~TLB_VALID)) | (val & TLB_VALID);
1.288 + }
1.289 +
1.290 + if( itlb == -2 || utlb == -2 ) {
1.291 + MMU_TLB_MULTI_HIT_ERROR(addr);
1.292 + return;
1.293 + }
1.294 } else {
1.295 struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
1.296 ent->vpn = (val & 0xFFFFFC00);
1.297 @@ -294,6 +405,7 @@
1.298 } else {
1.299 ent->ppn = (val & 0x1FFFFC00);
1.300 ent->flags = (val & 0x000001FF);
1.301 + ent->mask = get_mask_for_flags(val);
1.302 }
1.303 }
1.304
1.305 @@ -331,3 +443,455 @@
1.306 void mmu_ocache_data_write( sh4addr_t addr, uint32_t val )
1.307 {
1.308 }
1.309 +
1.310 +/******************************************************************************/
1.311 +/* MMU TLB address translation */
1.312 +/******************************************************************************/
1.313 +
1.314 +/**
1.315 + * The translations are excessively complicated, but unfortunately it's a
1.316 + * complicated system. TODO: make this not be painfully slow.
1.317 + */
1.318 +
1.319 +/**
1.320 + * Perform the actual utlb lookup w/ asid matching.
1.321 + * Possible utcomes are:
1.322 + * 0..63 Single match - good, return entry found
1.323 + * -1 No match - raise a tlb data miss exception
1.324 + * -2 Multiple matches - raise a multi-hit exception (reset)
1.325 + * @param vpn virtual address to resolve
1.326 + * @return the resultant UTLB entry, or an error.
1.327 + */
1.328 +static inline int mmu_utlb_lookup_vpn_asid( uint32_t vpn )
1.329 +{
1.330 + int result = -1;
1.331 + unsigned int i;
1.332 +
1.333 + mmu_urc++;
1.334 + if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
1.335 + mmu_urc = 0;
1.336 + }
1.337 +
1.338 + for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
1.339 + if( (mmu_utlb[i].flags & TLB_VALID) &&
1.340 + ((mmu_utlb[i].flags & TLB_SHARE) || mmu_asid == mmu_utlb[i].asid) &&
1.341 + ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
1.342 + if( result != -1 ) {
1.343 + return -2;
1.344 + }
1.345 + result = i;
1.346 + }
1.347 + }
1.348 + return result;
1.349 +}
1.350 +
1.351 +/**
1.352 + * Perform the actual utlb lookup matching on vpn only
1.353 + * Possible utcomes are:
1.354 + * 0..63 Single match - good, return entry found
1.355 + * -1 No match - raise a tlb data miss exception
1.356 + * -2 Multiple matches - raise a multi-hit exception (reset)
1.357 + * @param vpn virtual address to resolve
1.358 + * @return the resultant UTLB entry, or an error.
1.359 + */
1.360 +static inline int mmu_utlb_lookup_vpn( uint32_t vpn )
1.361 +{
1.362 + int result = -1;
1.363 + unsigned int i;
1.364 +
1.365 + mmu_urc++;
1.366 + if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
1.367 + mmu_urc = 0;
1.368 + }
1.369 +
1.370 + for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
1.371 + if( (mmu_utlb[i].flags & TLB_VALID) &&
1.372 + ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
1.373 + if( result != -1 ) {
1.374 + return -2;
1.375 + }
1.376 + result = i;
1.377 + }
1.378 + }
1.379 +
1.380 + return result;
1.381 +}
1.382 +
1.383 +/**
1.384 + * Update the ITLB by replacing the LRU entry with the specified UTLB entry.
1.385 + * @return the number (0-3) of the replaced entry.
1.386 + */
1.387 +static int inline mmu_itlb_update_from_utlb( int entryNo )
1.388 +{
1.389 + int replace;
1.390 + /* Determine entry to replace based on lrui */
1.391 + if( (mmu_lrui & 0x38) == 0x38 ) {
1.392 + replace = 0;
1.393 + mmu_lrui = mmu_lrui & 0x07;
1.394 + } else if( (mmu_lrui & 0x26) == 0x06 ) {
1.395 + replace = 1;
1.396 + mmu_lrui = (mmu_lrui & 0x19) | 0x20;
1.397 + } else if( (mmu_lrui & 0x15) == 0x01 ) {
1.398 + replace = 2;
1.399 + mmu_lrui = (mmu_lrui & 0x3E) | 0x14;
1.400 + } else { // Note - gets invalid entries too
1.401 + replace = 3;
1.402 + mmu_lrui = (mmu_lrui | 0x0B);
1.403 + }
1.404 +
1.405 + mmu_itlb[replace].vpn = mmu_utlb[entryNo].vpn;
1.406 + mmu_itlb[replace].mask = mmu_utlb[entryNo].mask;
1.407 + mmu_itlb[replace].ppn = mmu_utlb[entryNo].ppn;
1.408 + mmu_itlb[replace].asid = mmu_utlb[entryNo].asid;
1.409 + mmu_itlb[replace].flags = mmu_utlb[entryNo].flags & 0x01DA;
1.410 + return replace;
1.411 +}
1.412 +
1.413 +/**
1.414 + * Perform the actual itlb lookup w/ asid protection
1.415 + * Possible utcomes are:
1.416 + * 0..63 Single match - good, return entry found
1.417 + * -1 No match - raise a tlb data miss exception
1.418 + * -2 Multiple matches - raise a multi-hit exception (reset)
1.419 + * @param vpn virtual address to resolve
1.420 + * @return the resultant ITLB entry, or an error.
1.421 + */
1.422 +static inline int mmu_itlb_lookup_vpn_asid( uint32_t vpn )
1.423 +{
1.424 + int result = -1;
1.425 + unsigned int i;
1.426 +
1.427 + for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
1.428 + if( (mmu_itlb[i].flags & TLB_VALID) &&
1.429 + ((mmu_itlb[i].flags & TLB_SHARE) || mmu_asid == mmu_itlb[i].asid) &&
1.430 + ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
1.431 + if( result != -1 ) {
1.432 + return -2;
1.433 + }
1.434 + result = i;
1.435 + }
1.436 + }
1.437 +
1.438 + if( result == -1 ) {
1.439 + int utlbEntry = mmu_utlb_lookup_vpn_asid( vpn );
1.440 + if( utlbEntry < 0 ) {
1.441 + return utlbEntry;
1.442 + } else {
1.443 + return mmu_itlb_update_from_utlb( utlbEntry );
1.444 + }
1.445 + }
1.446 +
1.447 + switch( result ) {
1.448 + case 0: mmu_lrui = (mmu_lrui & 0x07); break;
1.449 + case 1: mmu_lrui = (mmu_lrui & 0x19) | 0x20; break;
1.450 + case 2: mmu_lrui = (mmu_lrui & 0x3E) | 0x14; break;
1.451 + case 3: mmu_lrui = (mmu_lrui | 0x0B); break;
1.452 + }
1.453 +
1.454 + return result;
1.455 +}
1.456 +
1.457 +/**
1.458 + * Perform the actual itlb lookup on vpn only
1.459 + * Possible utcomes are:
1.460 + * 0..63 Single match - good, return entry found
1.461 + * -1 No match - raise a tlb data miss exception
1.462 + * -2 Multiple matches - raise a multi-hit exception (reset)
1.463 + * @param vpn virtual address to resolve
1.464 + * @return the resultant ITLB entry, or an error.
1.465 + */
1.466 +static inline int mmu_itlb_lookup_vpn( uint32_t vpn )
1.467 +{
1.468 + int result = -1;
1.469 + unsigned int i;
1.470 +
1.471 + for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
1.472 + if( (mmu_itlb[i].flags & TLB_VALID) &&
1.473 + ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
1.474 + if( result != -1 ) {
1.475 + return -2;
1.476 + }
1.477 + result = i;
1.478 + }
1.479 + }
1.480 +
1.481 + if( result == -1 ) {
1.482 + int utlbEntry = mmu_utlb_lookup_vpn( vpn );
1.483 + if( utlbEntry < 0 ) {
1.484 + return utlbEntry;
1.485 + } else {
1.486 + return mmu_itlb_update_from_utlb( utlbEntry );
1.487 + }
1.488 + }
1.489 +
1.490 + switch( result ) {
1.491 + case 0: mmu_lrui = (mmu_lrui & 0x07); break;
1.492 + case 1: mmu_lrui = (mmu_lrui & 0x19) | 0x20; break;
1.493 + case 2: mmu_lrui = (mmu_lrui & 0x3E) | 0x14; break;
1.494 + case 3: mmu_lrui = (mmu_lrui | 0x0B); break;
1.495 + }
1.496 +
1.497 + return result;
1.498 +}
1.499 +
1.500 +sh4addr_t mmu_vma_to_phys_read( sh4vma_t addr )
1.501 +{
1.502 + uint32_t mmucr = MMIO_READ(MMU,MMUCR);
1.503 + if( addr & 0x80000000 ) {
1.504 + if( IS_SH4_PRIVMODE() ) {
1.505 + if( addr >= 0xE0000000 ) {
1.506 + return addr; /* P4 - passthrough */
1.507 + } else if( addr < 0xC0000000 ) {
1.508 + /* P1, P2 regions are pass-through (no translation) */
1.509 + return VMA_TO_EXT_ADDR(addr);
1.510 + }
1.511 + } else {
1.512 + if( addr >= 0xE0000000 && addr < 0xE4000000 &&
1.513 + ((mmucr&MMUCR_SQMD) == 0) ) {
1.514 + /* Conditional user-mode access to the store-queue (no translation) */
1.515 + return addr;
1.516 + }
1.517 + MMU_READ_ADDR_ERROR();
1.518 + return MMU_VMA_ERROR;
1.519 + }
1.520 + }
1.521 +
1.522 + if( (mmucr & MMUCR_AT) == 0 ) {
1.523 + return VMA_TO_EXT_ADDR(addr);
1.524 + }
1.525 +
1.526 + /* If we get this far, translation is required */
1.527 + int entryNo;
1.528 + if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
1.529 + entryNo = mmu_utlb_lookup_vpn_asid( addr );
1.530 + } else {
1.531 + entryNo = mmu_utlb_lookup_vpn( addr );
1.532 + }
1.533 +
1.534 + switch(entryNo) {
1.535 + case -1:
1.536 + MMU_TLB_READ_MISS_ERROR(addr);
1.537 + return MMU_VMA_ERROR;
1.538 + case -2:
1.539 + MMU_TLB_MULTI_HIT_ERROR(addr);
1.540 + return MMU_VMA_ERROR;
1.541 + default:
1.542 + if( (mmu_utlb[entryNo].flags & TLB_USERMODE) == 0 &&
1.543 + !IS_SH4_PRIVMODE() ) {
1.544 + /* protection violation */
1.545 + MMU_TLB_READ_PROT_ERROR(addr);
1.546 + return MMU_VMA_ERROR;
1.547 + }
1.548 +
1.549 + /* finally generate the target address */
1.550 + return (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) |
1.551 + (addr & (~mmu_utlb[entryNo].mask));
1.552 + }
1.553 +}
1.554 +
1.555 +sh4addr_t mmu_vma_to_phys_write( sh4vma_t addr )
1.556 +{
1.557 + uint32_t mmucr = MMIO_READ(MMU,MMUCR);
1.558 + if( addr & 0x80000000 ) {
1.559 + if( IS_SH4_PRIVMODE() ) {
1.560 + if( addr >= 0xE0000000 ) {
1.561 + return addr; /* P4 - passthrough */
1.562 + } else if( addr < 0xC0000000 ) {
1.563 + /* P1, P2 regions are pass-through (no translation) */
1.564 + return VMA_TO_EXT_ADDR(addr);
1.565 + }
1.566 + } else {
1.567 + if( addr >= 0xE0000000 && addr < 0xE4000000 &&
1.568 + ((mmucr&MMUCR_SQMD) == 0) ) {
1.569 + /* Conditional user-mode access to the store-queue (no translation) */
1.570 + return addr;
1.571 + }
1.572 + MMU_WRITE_ADDR_ERROR();
1.573 + return MMU_VMA_ERROR;
1.574 + }
1.575 + }
1.576 +
1.577 + if( (mmucr & MMUCR_AT) == 0 ) {
1.578 + return VMA_TO_EXT_ADDR(addr);
1.579 + }
1.580 +
1.581 + /* If we get this far, translation is required */
1.582 + int entryNo;
1.583 + if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
1.584 + entryNo = mmu_utlb_lookup_vpn_asid( addr );
1.585 + } else {
1.586 + entryNo = mmu_utlb_lookup_vpn( addr );
1.587 + }
1.588 +
1.589 + switch(entryNo) {
1.590 + case -1:
1.591 + MMU_TLB_WRITE_MISS_ERROR(addr);
1.592 + return MMU_VMA_ERROR;
1.593 + case -2:
1.594 + MMU_TLB_MULTI_HIT_ERROR(addr);
1.595 + return MMU_VMA_ERROR;
1.596 + default:
1.597 + if( IS_SH4_PRIVMODE() ? ((mmu_utlb[entryNo].flags & TLB_WRITABLE) == 0)
1.598 + : ((mmu_utlb[entryNo].flags & TLB_USERWRITABLE) != TLB_USERWRITABLE) ) {
1.599 + /* protection violation */
1.600 + MMU_TLB_WRITE_PROT_ERROR(addr);
1.601 + return MMU_VMA_ERROR;
1.602 + }
1.603 +
1.604 + if( (mmu_utlb[entryNo].flags & TLB_DIRTY) == 0 ) {
1.605 + MMU_TLB_INITIAL_WRITE_ERROR(addr);
1.606 + return MMU_VMA_ERROR;
1.607 + }
1.608 +
1.609 + /* finally generate the target address */
1.610 + return (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) |
1.611 + (addr & (~mmu_utlb[entryNo].mask));
1.612 + }
1.613 +}
1.614 +
1.615 +/**
1.616 + * Update the icache for an untranslated address
1.617 + */
1.618 +void mmu_update_icache_phys( sh4addr_t addr )
1.619 +{
1.620 + if( (addr & 0x1C000000) == 0x0C000000 ) {
1.621 + /* Main ram */
1.622 + sh4_icache.page_vma = addr & 0xFF000000;
1.623 + sh4_icache.page_ppa = 0x0C000000;
1.624 + sh4_icache.mask = 0xFF000000;
1.625 + sh4_icache.page = sh4_main_ram;
1.626 + } else if( (addr & 0x1FE00000) == 0 ) {
1.627 + /* BIOS ROM */
1.628 + sh4_icache.page_vma = addr & 0xFFE00000;
1.629 + sh4_icache.page_ppa = 0;
1.630 + sh4_icache.mask = 0xFFE00000;
1.631 + sh4_icache.page = mem_get_region(0);
1.632 + } else {
1.633 + /* not supported */
1.634 + sh4_icache.page_vma = -1;
1.635 + }
1.636 +}
1.637 +
1.638 +/**
1.639 + * Update the sh4_icache structure to describe the page(s) containing the
1.640 + * given vma. If the address does not reference a RAM/ROM region, the icache
1.641 + * will be invalidated instead.
1.642 + * If AT is on, this method will raise TLB exceptions normally
1.643 + * (hence this method should only be used immediately prior to execution of
1.644 + * code), and otherwise will set the icache according to the matching TLB entry.
1.645 + * If AT is off, this method will set the entire referenced RAM/ROM region in
1.646 + * the icache.
1.647 + * @return TRUE if the update completed (successfully or otherwise), FALSE
1.648 + * if an exception was raised.
1.649 + */
1.650 +gboolean mmu_update_icache( sh4vma_t addr )
1.651 +{
1.652 + int entryNo;
1.653 + if( IS_SH4_PRIVMODE() ) {
1.654 + if( addr & 0x80000000 ) {
1.655 + if( addr < 0xC0000000 ) {
1.656 + /* P1, P2 and P4 regions are pass-through (no translation) */
1.657 + mmu_update_icache_phys(addr);
1.658 + return TRUE;
1.659 + } else if( addr >= 0xE0000000 && addr < 0xFFFFFF00 ) {
1.660 + MMU_READ_ADDR_ERROR();
1.661 + return FALSE;
1.662 + }
1.663 + }
1.664 +
1.665 + uint32_t mmucr = MMIO_READ(MMU,MMUCR);
1.666 + if( (mmucr & MMUCR_AT) == 0 ) {
1.667 + mmu_update_icache_phys(addr);
1.668 + return TRUE;
1.669 + }
1.670 +
1.671 + entryNo = mmu_itlb_lookup_vpn( addr );
1.672 + } else {
1.673 + if( addr & 0x80000000 ) {
1.674 + MMU_READ_ADDR_ERROR();
1.675 + return FALSE;
1.676 + }
1.677 +
1.678 + uint32_t mmucr = MMIO_READ(MMU,MMUCR);
1.679 + if( (mmucr & MMUCR_AT) == 0 ) {
1.680 + mmu_update_icache_phys(addr);
1.681 + return TRUE;
1.682 + }
1.683 +
1.684 + if( mmucr & MMUCR_SV ) {
1.685 + entryNo = mmu_itlb_lookup_vpn( addr );
1.686 + } else {
1.687 + entryNo = mmu_itlb_lookup_vpn_asid( addr );
1.688 + }
1.689 + if( entryNo != -1 && (mmu_itlb[entryNo].flags & TLB_USERMODE) == 0 ) {
1.690 + MMU_TLB_READ_PROT_ERROR(addr);
1.691 + return FALSE;
1.692 + }
1.693 + }
1.694 +
1.695 + switch(entryNo) {
1.696 + case -1:
1.697 + MMU_TLB_READ_MISS_ERROR(addr);
1.698 + return FALSE;
1.699 + case -2:
1.700 + MMU_TLB_MULTI_HIT_ERROR(addr);
1.701 + return FALSE;
1.702 + default:
1.703 + sh4_icache.page_ppa = mmu_itlb[entryNo].ppn & mmu_itlb[entryNo].mask;
1.704 + sh4_icache.page = mem_get_region( sh4_icache.page_ppa );
1.705 + if( sh4_icache.page == NULL ) {
1.706 + sh4_icache.page_vma = -1;
1.707 + } else {
1.708 + sh4_icache.page_vma = mmu_itlb[entryNo].vpn & mmu_itlb[entryNo].mask;
1.709 + sh4_icache.mask = mmu_itlb[entryNo].mask;
1.710 + }
1.711 + return TRUE;
1.712 + }
1.713 +}
1.714 +
1.715 +gboolean sh4_flush_store_queue( sh4addr_t addr )
1.716 +{
1.717 + uint32_t mmucr = MMIO_READ(MMU,MMUCR);
1.718 + int queue = (addr&0x20)>>2;
1.719 + sh4ptr_t src = (sh4ptr_t)&sh4r.store_queue[queue];
1.720 + sh4addr_t target;
1.721 + /* Store queue operation */
1.722 + if( mmucr & MMUCR_AT ) {
1.723 + int entryNo;
1.724 + if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
1.725 + entryNo = mmu_utlb_lookup_vpn_asid( addr );
1.726 + } else {
1.727 + entryNo = mmu_utlb_lookup_vpn( addr );
1.728 + }
1.729 + switch(entryNo) {
1.730 + case -1:
1.731 + MMU_TLB_WRITE_MISS_ERROR(addr);
1.732 + return FALSE;
1.733 + case -2:
1.734 + MMU_TLB_MULTI_HIT_ERROR(addr);
1.735 + return FALSE;
1.736 + default:
1.737 + if( IS_SH4_PRIVMODE() ? ((mmu_utlb[entryNo].flags & TLB_WRITABLE) == 0)
1.738 + : ((mmu_utlb[entryNo].flags & TLB_USERWRITABLE) != TLB_USERWRITABLE) ) {
1.739 + /* protection violation */
1.740 + MMU_TLB_WRITE_PROT_ERROR(addr);
1.741 + return FALSE;
1.742 + }
1.743 +
1.744 + if( (mmu_utlb[entryNo].flags & TLB_DIRTY) == 0 ) {
1.745 + MMU_TLB_INITIAL_WRITE_ERROR(addr);
1.746 + return FALSE;
1.747 + }
1.748 +
1.749 + /* finally generate the target address */
1.750 + target = ((mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) |
1.751 + (addr & (~mmu_utlb[entryNo].mask))) & 0xFFFFFFE0;
1.752 + }
1.753 + } else {
1.754 + uint32_t hi = (MMIO_READ( MMU, (queue == 0 ? QACR0 : QACR1) ) & 0x1C) << 24;
1.755 + target = (addr&0x03FFFFE0) | hi;
1.756 + }
1.757 + mem_copy_to_sh4( target, src, 32 );
1.758 + return TRUE;
1.759 +}
1.760 +
.