Search
lxdream.org :: lxdream/src/sh4/mmu.c :: diff
lxdream 0.9.1
released Jun 29
Download Now
filename src/sh4/mmu.c
changeset 559:06714bc64271
prev550:a27e31340147
next561:533f6b478071
author nkeynes
date Tue Jan 01 04:58:57 2008 +0000 (12 years ago)
branchlxdream-mmu
permissions -rw-r--r--
last change Commit first pass at full TLB support - still needs a lot more work
file annotate diff log raw
1.1 --- a/src/sh4/mmu.c Thu Dec 06 10:43:30 2007 +0000
1.2 +++ b/src/sh4/mmu.c Tue Jan 01 04:58:57 2008 +0000
1.3 @@ -32,6 +32,7 @@
1.4 #define TLB_VALID 0x00000100
1.5 #define TLB_USERMODE 0x00000040
1.6 #define TLB_WRITABLE 0x00000020
1.7 +#define TLB_USERWRITABLE (TLB_WRITABLE|TLB_USERMODE)
1.8 #define TLB_SIZE_MASK 0x00000090
1.9 #define TLB_SIZE_1K 0x00000000
1.10 #define TLB_SIZE_4K 0x00000010
1.11 @@ -42,16 +43,22 @@
1.12 #define TLB_SHARE 0x00000002
1.13 #define TLB_WRITETHRU 0x00000001
1.14
1.15 +#define MASK_1K 0xFFFFFC00
1.16 +#define MASK_4K 0xFFFFF000
1.17 +#define MASK_64K 0xFFFF0000
1.18 +#define MASK_1M 0xFFF00000
1.19
1.20 struct itlb_entry {
1.21 sh4addr_t vpn; // Virtual Page Number
1.22 uint32_t asid; // Process ID
1.23 + uint32_t mask;
1.24 sh4addr_t ppn; // Physical Page Number
1.25 uint32_t flags;
1.26 };
1.27
1.28 struct utlb_entry {
1.29 sh4addr_t vpn; // Virtual Page Number
1.30 + uint32_t mask; // Page size mask
1.31 uint32_t asid; // Process ID
1.32 sh4addr_t ppn; // Physical Page Number
1.33 uint32_t flags;
1.34 @@ -69,6 +76,16 @@
1.35 static void mmu_invalidate_tlb();
1.36
1.37
1.38 +static uint32_t get_mask_for_flags( uint32_t flags )
1.39 +{
1.40 + switch( flags & TLB_SIZE_MASK ) {
1.41 + case TLB_SIZE_1K: return MASK_1K;
1.42 + case TLB_SIZE_4K: return MASK_4K;
1.43 + case TLB_SIZE_64K: return MASK_64K;
1.44 + case TLB_SIZE_1M: return MASK_1M;
1.45 + }
1.46 +}
1.47 +
1.48 int32_t mmio_region_MMU_read( uint32_t reg )
1.49 {
1.50 switch( reg ) {
1.51 @@ -125,6 +142,9 @@
1.52 fwrite( cache, 4096, 2, f );
1.53 fwrite( &mmu_itlb, sizeof(mmu_itlb), 1, f );
1.54 fwrite( &mmu_utlb, sizeof(mmu_utlb), 1, f );
1.55 + fwrite( &mmu_urc, sizeof(mmu_urc), 1, f );
1.56 + fwrite( &mmu_urb, sizeof(mmu_urb), 1, f );
1.57 + fwrite( &mmu_lrui, sizeof(mmu_lrui), 1, f );
1.58 }
1.59
1.60 int MMU_load_state( FILE *f )
1.61 @@ -142,6 +162,15 @@
1.62 if( fread( &mmu_utlb, sizeof(mmu_utlb), 1, f ) != 1 ) {
1.63 return 1;
1.64 }
1.65 + if( fread( &mmu_urc, sizeof(mmu_urc), 1, f ) != 1 ) {
1.66 + return 1;
1.67 + }
1.68 + if( fread( &mmu_urc, sizeof(mmu_urb), 1, f ) != 1 ) {
1.69 + return 1;
1.70 + }
1.71 + if( fread( &mmu_lrui, sizeof(mmu_lrui), 1, f ) != 1 ) {
1.72 + return 1;
1.73 + }
1.74 return 0;
1.75 }
1.76
1.77 @@ -177,41 +206,389 @@
1.78 mmu_utlb[mmu_urc].ppn = MMIO_READ(MMU, PTEL) & 0x1FFFFC00;
1.79 mmu_utlb[mmu_urc].flags = MMIO_READ(MMU, PTEL) & 0x00001FF;
1.80 mmu_utlb[mmu_urc].pcmcia = MMIO_READ(MMU, PTEA);
1.81 + mmu_utlb[mmu_urc].mask = get_mask_for_flags(mmu_utlb[mmu_urc].flags);
1.82 }
1.83
1.84 -uint64_t mmu_translate_read( sh4addr_t addr )
1.85 +static inline void mmu_flush_pages( struct utlb_entry *ent )
1.86 {
1.87 - uint32_t mmucr = MMIO_READ(MMU,MMUCR);
1.88 - if( IS_SH4_PRIVMODE() ) {
1.89 - switch( addr & 0xE0000000 ) {
1.90 - case 0x80000000: case 0xA0000000:
1.91 - /* Non-translated read P1,P2 */
1.92 - break;
1.93 - case 0xE0000000:
1.94 - /* Non-translated read P4 */
1.95 - break;
1.96 - default:
1.97 - if( mmucr&MMUCR_AT ) {
1.98 - } else {
1.99 - // direct read
1.100 + unsigned int vpn;
1.101 + switch( ent->flags & TLB_SIZE_MASK ) {
1.102 + case TLB_SIZE_1K: xlat_flush_page( ent->vpn ); break;
1.103 + case TLB_SIZE_4K: xlat_flush_page( ent->vpn ); break;
1.104 + case TLB_SIZE_64K:
1.105 + for( vpn = ent->vpn; vpn < ent->vpn + 0x10000; vpn += 0x1000 ) {
1.106 + xlat_flush_page( vpn );
1.107 + }
1.108 + break;
1.109 + case TLB_SIZE_1M:
1.110 + for( vpn = ent->vpn; vpn < ent->vpn + 0x100000; vpn += 0x1000 ) {
1.111 + xlat_flush_page( vpn );
1.112 + }
1.113 + break;
1.114 + }
1.115 +}
1.116 +
1.117 +/**
1.118 + * The translations are excessively complicated, but unfortunately it's a
1.119 + * complicated system. It can undoubtedly be better optimized too.
1.120 + */
1.121 +
1.122 +/**
1.123 + * Perform the actual utlb lookup.
1.124 + * Possible utcomes are:
1.125 + * 0..63 Single match - good, return entry found
1.126 + * -1 No match - raise a tlb data miss exception
1.127 + * -2 Multiple matches - raise a multi-hit exception (reset)
1.128 + * @param vpn virtual address to resolve
1.129 + * @param asid Address space identifier
1.130 + * @param use_asid whether to require an asid match on non-shared pages.
1.131 + * @return the resultant UTLB entry, or an error.
1.132 + */
1.133 +static inline int mmu_utlb_lookup_vpn( uint32_t vpn, uint32_t asid, int use_asid )
1.134 +{
1.135 + int result = -1;
1.136 + unsigned int i;
1.137 +
1.138 + mmu_urc++;
1.139 + if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
1.140 + mmu_urc = 0;
1.141 + }
1.142 +
1.143 + if( use_asid ) {
1.144 + for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
1.145 + if( (mmu_utlb[i].flags & TLB_VALID) &&
1.146 + ((mmu_utlb[i].flags & TLB_SHARE) || asid == mmu_utlb[i].asid) &&
1.147 + ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
1.148 + if( result != -1 ) {
1.149 + return -2;
1.150 + }
1.151 + result = i;
1.152 }
1.153 }
1.154 } else {
1.155 - if( addr & 0x80000000 ) {
1.156 - if( ((addr&0xFC000000) == 0xE0000000 ) &&
1.157 - ((mmucr&MMUCR_SQMD) == 0) ) {
1.158 - // Store queue
1.159 - return 0;
1.160 + for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
1.161 + if( (mmu_utlb[i].flags & TLB_VALID) &&
1.162 + ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
1.163 + if( result != -1 ) {
1.164 + return -2;
1.165 + }
1.166 + result = i;
1.167 }
1.168 -// MMU_READ_ADDR_ERROR();
1.169 - }
1.170 - if( mmucr&MMUCR_AT ) {
1.171 - uint32_t vpn = addr & 0xFFFFFC00;
1.172 - uint32_t asid = MMIO_READ(MMU,PTEH)&0xFF;
1.173 - } else {
1.174 - // direct read
1.175 }
1.176 }
1.177 + return result;
1.178 +}
1.179 +
1.180 +/**
1.181 + * Find a UTLB entry for the associative TLB write - same as the normal
1.182 + * lookup but ignores the valid bit.
1.183 + */
1.184 +static inline mmu_utlb_lookup_assoc( uint32_t vpn, uint32_t asid )
1.185 +{
1.186 + int result = -1;
1.187 + unsigned int i;
1.188 + for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
1.189 + if( ((mmu_utlb[i].flags & TLB_SHARE) || asid == mmu_utlb[i].asid) &&
1.190 + ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
1.191 + if( result != -1 ) {
1.192 + return -2;
1.193 + }
1.194 + result = i;
1.195 + }
1.196 + }
1.197 + return result;
1.198 +}
1.199 +
1.200 +/**
1.201 + * Perform the actual itlb lookup.
1.202 + * Possible utcomes are:
1.203 + * 0..63 Single match - good, return entry found
1.204 + * -1 No match - raise a tlb data miss exception
1.205 + * -2 Multiple matches - raise a multi-hit exception (reset)
1.206 + * @param vpn virtual address to resolve
1.207 + * @param asid Address space identifier
1.208 + * @param use_asid whether to require an asid match on non-shared pages.
1.209 + * @return the resultant ITLB entry, or an error.
1.210 + */
1.211 +static inline int mmu_itlb_lookup_vpn( uint32_t vpn, uint32_t asid, int use_asid )
1.212 +{
1.213 + int result = -1;
1.214 + unsigned int i;
1.215 + if( use_asid ) {
1.216 + for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
1.217 + if( (mmu_itlb[i].flags & TLB_VALID) &&
1.218 + ((mmu_itlb[i].flags & TLB_SHARE) || asid == mmu_itlb[i].asid) &&
1.219 + ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
1.220 + if( result != -1 ) {
1.221 + return -2;
1.222 + }
1.223 + result = i;
1.224 + }
1.225 + }
1.226 + } else {
1.227 + for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
1.228 + if( (mmu_itlb[i].flags & TLB_VALID) &&
1.229 + ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
1.230 + if( result != -1 ) {
1.231 + return -2;
1.232 + }
1.233 + result = i;
1.234 + }
1.235 + }
1.236 + }
1.237 +
1.238 + switch( result ) {
1.239 + case 0: mmu_lrui = (mmu_lrui & 0x07); break;
1.240 + case 1: mmu_lrui = (mmu_lrui & 0x19) | 0x20; break;
1.241 + case 2: mmu_lrui = (mmu_lrui & 0x3E) | 0x14; break;
1.242 + case 3: mmu_lrui = (mmu_lrui | 0x0B); break;
1.243 + }
1.244 +
1.245 + return result;
1.246 +}
1.247 +
1.248 +static int inline mmu_itlb_update_from_utlb( int entryNo )
1.249 +{
1.250 + int replace;
1.251 + /* Determine entry to replace based on lrui */
1.252 + if( mmu_lrui & 0x38 == 0x38 ) {
1.253 + replace = 0;
1.254 + mmu_lrui = mmu_lrui & 0x07;
1.255 + } else if( (mmu_lrui & 0x26) == 0x06 ) {
1.256 + replace = 1;
1.257 + mmu_lrui = (mmu_lrui & 0x19) | 0x20;
1.258 + } else if( (mmu_lrui & 0x15) == 0x01 ) {
1.259 + replace = 2;
1.260 + mmu_lrui = (mmu_lrui & 0x3E) | 0x14;
1.261 + } else { // Note - gets invalid entries too
1.262 + replace = 3;
1.263 + mmu_lrui = (mmu_lrui | 0x0B);
1.264 + }
1.265 +
1.266 + mmu_itlb[replace].vpn = mmu_utlb[entryNo].vpn;
1.267 + mmu_itlb[replace].mask = mmu_utlb[entryNo].mask;
1.268 + mmu_itlb[replace].ppn = mmu_utlb[entryNo].ppn;
1.269 + mmu_itlb[replace].asid = mmu_utlb[entryNo].asid;
1.270 + mmu_itlb[replace].flags = mmu_utlb[entryNo].flags & 0x01DA;
1.271 + return replace;
1.272 +}
1.273 +
1.274 +/**
1.275 + * Find a ITLB entry for the associative TLB write - same as the normal
1.276 + * lookup but ignores the valid bit.
1.277 + */
1.278 +static inline mmu_itlb_lookup_assoc( uint32_t vpn, uint32_t asid )
1.279 +{
1.280 + int result = -1;
1.281 + unsigned int i;
1.282 + for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
1.283 + if( ((mmu_itlb[i].flags & TLB_SHARE) || asid == mmu_itlb[i].asid) &&
1.284 + ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
1.285 + if( result != -1 ) {
1.286 + return -2;
1.287 + }
1.288 + result = i;
1.289 + }
1.290 + }
1.291 + return result;
1.292 +}
1.293 +
1.294 +#define RAISE_TLB_ERROR(code, vpn) \
1.295 + MMIO_WRITE(MMU, TEA, vpn); \
1.296 + MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
1.297 + sh4_raise_tlb_exception(code); \
1.298 + return (((uint64_t)code)<<32)
1.299 +
1.300 +#define RAISE_MEM_ERROR(code, vpn) \
1.301 + MMIO_WRITE(MMU, TEA, vpn); \
1.302 + MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
1.303 + sh4_raise_exception(code); \
1.304 + return (((uint64_t)code)<<32)
1.305 +
1.306 +#define RAISE_OTHER_ERROR(code) \
1.307 + sh4_raise_exception(code); \
1.308 + return (((uint64_t)EXV_EXCEPTION)<<32)
1.309 +
1.310 +/**
1.311 + * Abort with a non-MMU address error. Caused by user-mode code attempting
1.312 + * to access privileged regions, or alignment faults.
1.313 + */
1.314 +#define MMU_READ_ADDR_ERROR() RAISE_OTHER_ERROR(EXC_DATA_ADDR_READ)
1.315 +#define MMU_WRITE_ADDR_ERROR() RAISE_OTHER_ERROR(EXC_DATA_ADDR_WRITE)
1.316 +
1.317 +#define MMU_TLB_READ_MISS_ERROR(vpn) RAISE_TLB_ERROR(EXC_TLB_MISS_READ, vpn)
1.318 +#define MMU_TLB_WRITE_MISS_ERROR(vpn) RAISE_TLB_ERROR(EXC_TLB_MISS_WRITE, vpn)
1.319 +#define MMU_TLB_INITIAL_WRITE_ERROR(vpn) RAISE_MEM_ERROR(EXC_INIT_PAGE_WRITE, vpn)
1.320 +#define MMU_TLB_READ_PROT_ERROR(vpn) RAISE_MEM_ERROR(EXC_TLB_PROT_READ, vpn)
1.321 +#define MMU_TLB_WRITE_PROT_ERROR(vpn) RAISE_MEM_ERROR(EXC_TLB_PROT_WRITE, vpn)
1.322 +#define MMU_TLB_MULTI_HIT_ERROR(vpn) sh4_raise_reset(EXC_TLB_MULTI_HIT); \
1.323 + MMIO_WRITE(MMU, TEA, vpn); \
1.324 + MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
1.325 + return (((uint64_t)EXC_TLB_MULTI_HIT)<<32)
1.326 +
1.327 +uint64_t mmu_vma_to_phys_write( sh4addr_t addr )
1.328 +{
1.329 + uint32_t mmucr = MMIO_READ(MMU,MMUCR);
1.330 + if( addr & 0x80000000 ) {
1.331 + if( IS_SH4_PRIVMODE() ) {
1.332 + if( addr < 0xC0000000 || addr >= 0xE0000000 ) {
1.333 + /* P1, P2 and P4 regions are pass-through (no translation) */
1.334 + return (uint64_t)addr;
1.335 + }
1.336 + } else {
1.337 + if( addr >= 0xE0000000 && addr < 0xE4000000 &&
1.338 + ((mmucr&MMUCR_SQMD) == 0) ) {
1.339 + /* Conditional user-mode access to the store-queue (no translation) */
1.340 + return (uint64_t)addr;
1.341 + }
1.342 + MMU_WRITE_ADDR_ERROR();
1.343 + }
1.344 + }
1.345 +
1.346 + if( (mmucr & MMUCR_AT) == 0 ) {
1.347 + return (uint64_t)addr;
1.348 + }
1.349 +
1.350 + /* If we get this far, translation is required */
1.351 +
1.352 + int use_asid = ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE();
1.353 + uint32_t asid = MMIO_READ( MMU, PTEH ) & 0xFF;
1.354 +
1.355 + int entryNo = mmu_utlb_lookup_vpn( addr, asid, use_asid );
1.356 +
1.357 + switch(entryNo) {
1.358 + case -1:
1.359 + MMU_TLB_WRITE_MISS_ERROR(addr);
1.360 + break;
1.361 + case -2:
1.362 + MMU_TLB_MULTI_HIT_ERROR(addr);
1.363 + break;
1.364 + default:
1.365 + if( IS_SH4_PRIVMODE() ? ((mmu_utlb[entryNo].flags & TLB_WRITABLE) == 0)
1.366 + : ((mmu_utlb[entryNo].flags & TLB_USERWRITABLE) != TLB_USERWRITABLE) ) {
1.367 + /* protection violation */
1.368 + MMU_TLB_WRITE_PROT_ERROR(addr);
1.369 + }
1.370 +
1.371 + if( (mmu_utlb[entryNo].flags & TLB_DIRTY) == 0 ) {
1.372 + MMU_TLB_INITIAL_WRITE_ERROR(addr);
1.373 + }
1.374 +
1.375 + /* finally generate the target address */
1.376 + return (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) |
1.377 + (addr & (~mmu_utlb[entryNo].mask));
1.378 + }
1.379 + return -1;
1.380 +
1.381 +}
1.382 +
1.383 +uint64_t mmu_vma_to_phys_exec( sh4addr_t addr )
1.384 +{
1.385 + uint32_t mmucr = MMIO_READ(MMU,MMUCR);
1.386 + if( addr & 0x80000000 ) {
1.387 + if( IS_SH4_PRIVMODE() ) {
1.388 + if( addr < 0xC0000000 ) {
1.389 + /* P1, P2 and P4 regions are pass-through (no translation) */
1.390 + return (uint64_t)addr;
1.391 + } else if( addr >= 0xE0000000 ) {
1.392 + MMU_READ_ADDR_ERROR();
1.393 + }
1.394 + } else {
1.395 + MMU_READ_ADDR_ERROR();
1.396 + }
1.397 + }
1.398 +
1.399 + if( (mmucr & MMUCR_AT) == 0 ) {
1.400 + return (uint64_t)addr;
1.401 + }
1.402 +
1.403 + /* If we get this far, translation is required */
1.404 + int use_asid = ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE();
1.405 + uint32_t asid = MMIO_READ( MMU, PTEH ) & 0xFF;
1.406 +
1.407 + int entryNo = mmu_itlb_lookup_vpn( addr, asid, use_asid );
1.408 + if( entryNo == -1 ) {
1.409 + entryNo = mmu_utlb_lookup_vpn( addr, asid, use_asid );
1.410 + if( entryNo >= 0 ) {
1.411 + entryNo = mmu_itlb_update_from_utlb( entryNo );
1.412 + }
1.413 + }
1.414 + switch(entryNo) {
1.415 + case -1:
1.416 + MMU_TLB_READ_MISS_ERROR(addr);
1.417 + break;
1.418 + case -2:
1.419 + MMU_TLB_MULTI_HIT_ERROR(addr);
1.420 + break;
1.421 + default:
1.422 + if( (mmu_itlb[entryNo].flags & TLB_USERMODE) == 0 &&
1.423 + !IS_SH4_PRIVMODE() ) {
1.424 + /* protection violation */
1.425 + MMU_TLB_READ_PROT_ERROR(addr);
1.426 + }
1.427 +
1.428 + /* finally generate the target address */
1.429 + return (mmu_itlb[entryNo].ppn & mmu_itlb[entryNo].mask) |
1.430 + (addr & (~mmu_itlb[entryNo].mask));
1.431 + }
1.432 + return -1;
1.433 +}
1.434 +
1.435 +uint64_t mmu_vma_to_phys_read_noexc( sh4addr_t addr ) {
1.436 +
1.437 +
1.438 +}
1.439 +
1.440 +
1.441 +uint64_t mmu_vma_to_phys_read( sh4addr_t addr )
1.442 +{
1.443 + uint32_t mmucr = MMIO_READ(MMU,MMUCR);
1.444 + if( addr & 0x80000000 ) {
1.445 + if( IS_SH4_PRIVMODE() ) {
1.446 + if( addr < 0xC0000000 || addr >= 0xE0000000 ) {
1.447 + /* P1, P2 and P4 regions are pass-through (no translation) */
1.448 + return (uint64_t)addr;
1.449 + }
1.450 + } else {
1.451 + if( addr >= 0xE0000000 && addr < 0xE4000000 &&
1.452 + ((mmucr&MMUCR_SQMD) == 0) ) {
1.453 + /* Conditional user-mode access to the store-queue (no translation) */
1.454 + return (uint64_t)addr;
1.455 + }
1.456 + MMU_READ_ADDR_ERROR();
1.457 + }
1.458 + }
1.459 +
1.460 + if( (mmucr & MMUCR_AT) == 0 ) {
1.461 + return (uint64_t)addr;
1.462 + }
1.463 +
1.464 + /* If we get this far, translation is required */
1.465 +
1.466 + int use_asid = ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE();
1.467 + uint32_t asid = MMIO_READ( MMU, PTEH ) & 0xFF;
1.468 +
1.469 + int entryNo = mmu_utlb_lookup_vpn( addr, asid, use_asid );
1.470 +
1.471 + switch(entryNo) {
1.472 + case -1:
1.473 + MMU_TLB_READ_MISS_ERROR(addr);
1.474 + break;
1.475 + case -2:
1.476 + MMU_TLB_MULTI_HIT_ERROR(addr);
1.477 + break;
1.478 + default:
1.479 + if( (mmu_utlb[entryNo].flags & TLB_USERMODE) == 0 &&
1.480 + !IS_SH4_PRIVMODE() ) {
1.481 + /* protection violation */
1.482 + MMU_TLB_READ_PROT_ERROR(addr);
1.483 + }
1.484 +
1.485 + /* finally generate the target address */
1.486 + return (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) |
1.487 + (addr & (~mmu_utlb[entryNo].mask));
1.488 + }
1.489 + return -1;
1.490 }
1.491
1.492 static void mmu_invalidate_tlb()
1.493 @@ -251,6 +628,7 @@
1.494 struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
1.495 ent->ppn = val & 0x1FFFFC00;
1.496 ent->flags = val & 0x00001DA;
1.497 + ent->mask = get_mask_for_flags(val);
1.498 }
1.499
1.500 #define UTLB_ENTRY(addr) ((addr>>8)&0x3F)
1.501 @@ -276,6 +654,16 @@
1.502 void mmu_utlb_addr_write( sh4addr_t addr, uint32_t val )
1.503 {
1.504 if( UTLB_ASSOC(addr) ) {
1.505 + uint32_t asid = MMIO_READ( MMU, PTEH ) & 0xFF;
1.506 + int entryNo = mmu_utlb_lookup_assoc( val, asid );
1.507 + if( entryNo >= 0 ) {
1.508 + struct utlb_entry *ent = &mmu_utlb[entryNo];
1.509 + ent->flags = ent->flags & ~(TLB_DIRTY|TLB_VALID);
1.510 + ent->flags |= (val & TLB_VALID);
1.511 + ent->flags |= ((val & 0x200)>>7);
1.512 + } else if( entryNo == -2 ) {
1.513 + MMU_TLB_MULTI_HIT_ERROR(addr);
1.514 + }
1.515 } else {
1.516 struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
1.517 ent->vpn = (val & 0xFFFFFC00);
1.518 @@ -294,6 +682,7 @@
1.519 } else {
1.520 ent->ppn = (val & 0x1FFFFC00);
1.521 ent->flags = (val & 0x000001FF);
1.522 + ent->mask = get_mask_for_flags(val);
1.523 }
1.524 }
1.525
.