1.1 --- a/src/sh4/mmu.c Thu Dec 06 10:43:30 2007 +0000
1.2 +++ b/src/sh4/mmu.c Tue Jan 01 04:58:57 2008 +0000
1.4 #define TLB_VALID 0x00000100
1.5 #define TLB_USERMODE 0x00000040
1.6 #define TLB_WRITABLE 0x00000020
1.7 +#define TLB_USERWRITABLE (TLB_WRITABLE|TLB_USERMODE)
1.8 #define TLB_SIZE_MASK 0x00000090
1.9 #define TLB_SIZE_1K 0x00000000
1.10 #define TLB_SIZE_4K 0x00000010
1.12 #define TLB_SHARE 0x00000002
1.13 #define TLB_WRITETHRU 0x00000001
1.15 +#define MASK_1K 0xFFFFFC00
1.16 +#define MASK_4K 0xFFFFF000
1.17 +#define MASK_64K 0xFFFF0000
1.18 +#define MASK_1M 0xFFF00000
1.21 sh4addr_t vpn; // Virtual Page Number
1.22 uint32_t asid; // Process ID
1.24 sh4addr_t ppn; // Physical Page Number
1.29 sh4addr_t vpn; // Virtual Page Number
1.30 + uint32_t mask; // Page size mask
1.31 uint32_t asid; // Process ID
1.32 sh4addr_t ppn; // Physical Page Number
1.35 static void mmu_invalidate_tlb();
1.38 +static uint32_t get_mask_for_flags( uint32_t flags )
1.40 + switch( flags & TLB_SIZE_MASK ) {
1.41 + case TLB_SIZE_1K: return MASK_1K;
1.42 + case TLB_SIZE_4K: return MASK_4K;
1.43 + case TLB_SIZE_64K: return MASK_64K;
1.44 + case TLB_SIZE_1M: return MASK_1M;
1.48 int32_t mmio_region_MMU_read( uint32_t reg )
1.52 fwrite( cache, 4096, 2, f );
1.53 fwrite( &mmu_itlb, sizeof(mmu_itlb), 1, f );
1.54 fwrite( &mmu_utlb, sizeof(mmu_utlb), 1, f );
1.55 + fwrite( &mmu_urc, sizeof(mmu_urc), 1, f );
1.56 + fwrite( &mmu_urb, sizeof(mmu_urb), 1, f );
1.57 + fwrite( &mmu_lrui, sizeof(mmu_lrui), 1, f );
1.60 int MMU_load_state( FILE *f )
1.61 @@ -142,6 +162,15 @@
1.62 if( fread( &mmu_utlb, sizeof(mmu_utlb), 1, f ) != 1 ) {
1.65 + if( fread( &mmu_urc, sizeof(mmu_urc), 1, f ) != 1 ) {
1.68 + if( fread( &mmu_urc, sizeof(mmu_urb), 1, f ) != 1 ) {
1.71 + if( fread( &mmu_lrui, sizeof(mmu_lrui), 1, f ) != 1 ) {
1.77 @@ -177,41 +206,389 @@
1.78 mmu_utlb[mmu_urc].ppn = MMIO_READ(MMU, PTEL) & 0x1FFFFC00;
1.79 mmu_utlb[mmu_urc].flags = MMIO_READ(MMU, PTEL) & 0x00001FF;
1.80 mmu_utlb[mmu_urc].pcmcia = MMIO_READ(MMU, PTEA);
1.81 + mmu_utlb[mmu_urc].mask = get_mask_for_flags(mmu_utlb[mmu_urc].flags);
1.84 -uint64_t mmu_translate_read( sh4addr_t addr )
1.85 +static inline void mmu_flush_pages( struct utlb_entry *ent )
1.87 - uint32_t mmucr = MMIO_READ(MMU,MMUCR);
1.88 - if( IS_SH4_PRIVMODE() ) {
1.89 - switch( addr & 0xE0000000 ) {
1.90 - case 0x80000000: case 0xA0000000:
1.91 - /* Non-translated read P1,P2 */
1.94 - /* Non-translated read P4 */
1.97 - if( mmucr&MMUCR_AT ) {
1.100 + unsigned int vpn;
1.101 + switch( ent->flags & TLB_SIZE_MASK ) {
1.102 + case TLB_SIZE_1K: xlat_flush_page( ent->vpn ); break;
1.103 + case TLB_SIZE_4K: xlat_flush_page( ent->vpn ); break;
1.104 + case TLB_SIZE_64K:
1.105 + for( vpn = ent->vpn; vpn < ent->vpn + 0x10000; vpn += 0x1000 ) {
1.106 + xlat_flush_page( vpn );
1.109 + case TLB_SIZE_1M:
1.110 + for( vpn = ent->vpn; vpn < ent->vpn + 0x100000; vpn += 0x1000 ) {
1.111 + xlat_flush_page( vpn );
1.118 + * The translations are excessively complicated, but unfortunately it's a
1.119 + * complicated system. It can undoubtedly be better optimized too.
1.123 + * Perform the actual utlb lookup.
1.124 + * Possible utcomes are:
1.125 + * 0..63 Single match - good, return entry found
1.126 + * -1 No match - raise a tlb data miss exception
1.127 + * -2 Multiple matches - raise a multi-hit exception (reset)
1.128 + * @param vpn virtual address to resolve
1.129 + * @param asid Address space identifier
1.130 + * @param use_asid whether to require an asid match on non-shared pages.
1.131 + * @return the resultant UTLB entry, or an error.
1.133 +static inline int mmu_utlb_lookup_vpn( uint32_t vpn, uint32_t asid, int use_asid )
1.139 + if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
1.144 + for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
1.145 + if( (mmu_utlb[i].flags & TLB_VALID) &&
1.146 + ((mmu_utlb[i].flags & TLB_SHARE) || asid == mmu_utlb[i].asid) &&
1.147 + ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
1.148 + if( result != -1 ) {
1.155 - if( addr & 0x80000000 ) {
1.156 - if( ((addr&0xFC000000) == 0xE0000000 ) &&
1.157 - ((mmucr&MMUCR_SQMD) == 0) ) {
1.160 + for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
1.161 + if( (mmu_utlb[i].flags & TLB_VALID) &&
1.162 + ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
1.163 + if( result != -1 ) {
1.168 -// MMU_READ_ADDR_ERROR();
1.170 - if( mmucr&MMUCR_AT ) {
1.171 - uint32_t vpn = addr & 0xFFFFFC00;
1.172 - uint32_t asid = MMIO_READ(MMU,PTEH)&0xFF;
1.181 + * Find a UTLB entry for the associative TLB write - same as the normal
1.182 + * lookup but ignores the valid bit.
1.184 +static inline mmu_utlb_lookup_assoc( uint32_t vpn, uint32_t asid )
1.188 + for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
1.189 + if( ((mmu_utlb[i].flags & TLB_SHARE) || asid == mmu_utlb[i].asid) &&
1.190 + ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
1.191 + if( result != -1 ) {
1.201 + * Perform the actual itlb lookup.
1.202 + * Possible utcomes are:
1.203 + * 0..63 Single match - good, return entry found
1.204 + * -1 No match - raise a tlb data miss exception
1.205 + * -2 Multiple matches - raise a multi-hit exception (reset)
1.206 + * @param vpn virtual address to resolve
1.207 + * @param asid Address space identifier
1.208 + * @param use_asid whether to require an asid match on non-shared pages.
1.209 + * @return the resultant ITLB entry, or an error.
1.211 +static inline int mmu_itlb_lookup_vpn( uint32_t vpn, uint32_t asid, int use_asid )
1.216 + for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
1.217 + if( (mmu_itlb[i].flags & TLB_VALID) &&
1.218 + ((mmu_itlb[i].flags & TLB_SHARE) || asid == mmu_itlb[i].asid) &&
1.219 + ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
1.220 + if( result != -1 ) {
1.227 + for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
1.228 + if( (mmu_itlb[i].flags & TLB_VALID) &&
1.229 + ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
1.230 + if( result != -1 ) {
1.238 + switch( result ) {
1.239 + case 0: mmu_lrui = (mmu_lrui & 0x07); break;
1.240 + case 1: mmu_lrui = (mmu_lrui & 0x19) | 0x20; break;
1.241 + case 2: mmu_lrui = (mmu_lrui & 0x3E) | 0x14; break;
1.242 + case 3: mmu_lrui = (mmu_lrui | 0x0B); break;
1.248 +static int inline mmu_itlb_update_from_utlb( int entryNo )
1.251 + /* Determine entry to replace based on lrui */
1.252 + if( mmu_lrui & 0x38 == 0x38 ) {
1.254 + mmu_lrui = mmu_lrui & 0x07;
1.255 + } else if( (mmu_lrui & 0x26) == 0x06 ) {
1.257 + mmu_lrui = (mmu_lrui & 0x19) | 0x20;
1.258 + } else if( (mmu_lrui & 0x15) == 0x01 ) {
1.260 + mmu_lrui = (mmu_lrui & 0x3E) | 0x14;
1.261 + } else { // Note - gets invalid entries too
1.263 + mmu_lrui = (mmu_lrui | 0x0B);
1.266 + mmu_itlb[replace].vpn = mmu_utlb[entryNo].vpn;
1.267 + mmu_itlb[replace].mask = mmu_utlb[entryNo].mask;
1.268 + mmu_itlb[replace].ppn = mmu_utlb[entryNo].ppn;
1.269 + mmu_itlb[replace].asid = mmu_utlb[entryNo].asid;
1.270 + mmu_itlb[replace].flags = mmu_utlb[entryNo].flags & 0x01DA;
1.275 + * Find a ITLB entry for the associative TLB write - same as the normal
1.276 + * lookup but ignores the valid bit.
1.278 +static inline mmu_itlb_lookup_assoc( uint32_t vpn, uint32_t asid )
1.282 + for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
1.283 + if( ((mmu_itlb[i].flags & TLB_SHARE) || asid == mmu_itlb[i].asid) &&
1.284 + ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
1.285 + if( result != -1 ) {
1.294 +#define RAISE_TLB_ERROR(code, vpn) \
1.295 + MMIO_WRITE(MMU, TEA, vpn); \
1.296 + MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
1.297 + sh4_raise_tlb_exception(code); \
1.298 + return (((uint64_t)code)<<32)
1.300 +#define RAISE_MEM_ERROR(code, vpn) \
1.301 + MMIO_WRITE(MMU, TEA, vpn); \
1.302 + MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
1.303 + sh4_raise_exception(code); \
1.304 + return (((uint64_t)code)<<32)
1.306 +#define RAISE_OTHER_ERROR(code) \
1.307 + sh4_raise_exception(code); \
1.308 + return (((uint64_t)EXV_EXCEPTION)<<32)
1.311 + * Abort with a non-MMU address error. Caused by user-mode code attempting
1.312 + * to access privileged regions, or alignment faults.
1.314 +#define MMU_READ_ADDR_ERROR() RAISE_OTHER_ERROR(EXC_DATA_ADDR_READ)
1.315 +#define MMU_WRITE_ADDR_ERROR() RAISE_OTHER_ERROR(EXC_DATA_ADDR_WRITE)
1.317 +#define MMU_TLB_READ_MISS_ERROR(vpn) RAISE_TLB_ERROR(EXC_TLB_MISS_READ, vpn)
1.318 +#define MMU_TLB_WRITE_MISS_ERROR(vpn) RAISE_TLB_ERROR(EXC_TLB_MISS_WRITE, vpn)
1.319 +#define MMU_TLB_INITIAL_WRITE_ERROR(vpn) RAISE_MEM_ERROR(EXC_INIT_PAGE_WRITE, vpn)
1.320 +#define MMU_TLB_READ_PROT_ERROR(vpn) RAISE_MEM_ERROR(EXC_TLB_PROT_READ, vpn)
1.321 +#define MMU_TLB_WRITE_PROT_ERROR(vpn) RAISE_MEM_ERROR(EXC_TLB_PROT_WRITE, vpn)
1.322 +#define MMU_TLB_MULTI_HIT_ERROR(vpn) sh4_raise_reset(EXC_TLB_MULTI_HIT); \
1.323 + MMIO_WRITE(MMU, TEA, vpn); \
1.324 + MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
1.325 + return (((uint64_t)EXC_TLB_MULTI_HIT)<<32)
1.327 +uint64_t mmu_vma_to_phys_write( sh4addr_t addr )
1.329 + uint32_t mmucr = MMIO_READ(MMU,MMUCR);
1.330 + if( addr & 0x80000000 ) {
1.331 + if( IS_SH4_PRIVMODE() ) {
1.332 + if( addr < 0xC0000000 || addr >= 0xE0000000 ) {
1.333 + /* P1, P2 and P4 regions are pass-through (no translation) */
1.334 + return (uint64_t)addr;
1.337 + if( addr >= 0xE0000000 && addr < 0xE4000000 &&
1.338 + ((mmucr&MMUCR_SQMD) == 0) ) {
1.339 + /* Conditional user-mode access to the store-queue (no translation) */
1.340 + return (uint64_t)addr;
1.342 + MMU_WRITE_ADDR_ERROR();
1.346 + if( (mmucr & MMUCR_AT) == 0 ) {
1.347 + return (uint64_t)addr;
1.350 + /* If we get this far, translation is required */
1.352 + int use_asid = ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE();
1.353 + uint32_t asid = MMIO_READ( MMU, PTEH ) & 0xFF;
1.355 + int entryNo = mmu_utlb_lookup_vpn( addr, asid, use_asid );
1.357 + switch(entryNo) {
1.359 + MMU_TLB_WRITE_MISS_ERROR(addr);
1.362 + MMU_TLB_MULTI_HIT_ERROR(addr);
1.365 + if( IS_SH4_PRIVMODE() ? ((mmu_utlb[entryNo].flags & TLB_WRITABLE) == 0)
1.366 + : ((mmu_utlb[entryNo].flags & TLB_USERWRITABLE) != TLB_USERWRITABLE) ) {
1.367 + /* protection violation */
1.368 + MMU_TLB_WRITE_PROT_ERROR(addr);
1.371 + if( (mmu_utlb[entryNo].flags & TLB_DIRTY) == 0 ) {
1.372 + MMU_TLB_INITIAL_WRITE_ERROR(addr);
1.375 + /* finally generate the target address */
1.376 + return (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) |
1.377 + (addr & (~mmu_utlb[entryNo].mask));
1.383 +uint64_t mmu_vma_to_phys_exec( sh4addr_t addr )
1.385 + uint32_t mmucr = MMIO_READ(MMU,MMUCR);
1.386 + if( addr & 0x80000000 ) {
1.387 + if( IS_SH4_PRIVMODE() ) {
1.388 + if( addr < 0xC0000000 ) {
1.389 + /* P1, P2 and P4 regions are pass-through (no translation) */
1.390 + return (uint64_t)addr;
1.391 + } else if( addr >= 0xE0000000 ) {
1.392 + MMU_READ_ADDR_ERROR();
1.395 + MMU_READ_ADDR_ERROR();
1.399 + if( (mmucr & MMUCR_AT) == 0 ) {
1.400 + return (uint64_t)addr;
1.403 + /* If we get this far, translation is required */
1.404 + int use_asid = ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE();
1.405 + uint32_t asid = MMIO_READ( MMU, PTEH ) & 0xFF;
1.407 + int entryNo = mmu_itlb_lookup_vpn( addr, asid, use_asid );
1.408 + if( entryNo == -1 ) {
1.409 + entryNo = mmu_utlb_lookup_vpn( addr, asid, use_asid );
1.410 + if( entryNo >= 0 ) {
1.411 + entryNo = mmu_itlb_update_from_utlb( entryNo );
1.414 + switch(entryNo) {
1.416 + MMU_TLB_READ_MISS_ERROR(addr);
1.419 + MMU_TLB_MULTI_HIT_ERROR(addr);
1.422 + if( (mmu_itlb[entryNo].flags & TLB_USERMODE) == 0 &&
1.423 + !IS_SH4_PRIVMODE() ) {
1.424 + /* protection violation */
1.425 + MMU_TLB_READ_PROT_ERROR(addr);
1.428 + /* finally generate the target address */
1.429 + return (mmu_itlb[entryNo].ppn & mmu_itlb[entryNo].mask) |
1.430 + (addr & (~mmu_itlb[entryNo].mask));
1.435 +uint64_t mmu_vma_to_phys_read_noexc( sh4addr_t addr ) {
1.441 +uint64_t mmu_vma_to_phys_read( sh4addr_t addr )
1.443 + uint32_t mmucr = MMIO_READ(MMU,MMUCR);
1.444 + if( addr & 0x80000000 ) {
1.445 + if( IS_SH4_PRIVMODE() ) {
1.446 + if( addr < 0xC0000000 || addr >= 0xE0000000 ) {
1.447 + /* P1, P2 and P4 regions are pass-through (no translation) */
1.448 + return (uint64_t)addr;
1.451 + if( addr >= 0xE0000000 && addr < 0xE4000000 &&
1.452 + ((mmucr&MMUCR_SQMD) == 0) ) {
1.453 + /* Conditional user-mode access to the store-queue (no translation) */
1.454 + return (uint64_t)addr;
1.456 + MMU_READ_ADDR_ERROR();
1.460 + if( (mmucr & MMUCR_AT) == 0 ) {
1.461 + return (uint64_t)addr;
1.464 + /* If we get this far, translation is required */
1.466 + int use_asid = ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE();
1.467 + uint32_t asid = MMIO_READ( MMU, PTEH ) & 0xFF;
1.469 + int entryNo = mmu_utlb_lookup_vpn( addr, asid, use_asid );
1.471 + switch(entryNo) {
1.473 + MMU_TLB_READ_MISS_ERROR(addr);
1.476 + MMU_TLB_MULTI_HIT_ERROR(addr);
1.479 + if( (mmu_utlb[entryNo].flags & TLB_USERMODE) == 0 &&
1.480 + !IS_SH4_PRIVMODE() ) {
1.481 + /* protection violation */
1.482 + MMU_TLB_READ_PROT_ERROR(addr);
1.485 + /* finally generate the target address */
1.486 + return (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) |
1.487 + (addr & (~mmu_utlb[entryNo].mask));
1.492 static void mmu_invalidate_tlb()
1.493 @@ -251,6 +628,7 @@
1.494 struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
1.495 ent->ppn = val & 0x1FFFFC00;
1.496 ent->flags = val & 0x00001DA;
1.497 + ent->mask = get_mask_for_flags(val);
1.500 #define UTLB_ENTRY(addr) ((addr>>8)&0x3F)
1.501 @@ -276,6 +654,16 @@
1.502 void mmu_utlb_addr_write( sh4addr_t addr, uint32_t val )
1.504 if( UTLB_ASSOC(addr) ) {
1.505 + uint32_t asid = MMIO_READ( MMU, PTEH ) & 0xFF;
1.506 + int entryNo = mmu_utlb_lookup_assoc( val, asid );
1.507 + if( entryNo >= 0 ) {
1.508 + struct utlb_entry *ent = &mmu_utlb[entryNo];
1.509 + ent->flags = ent->flags & ~(TLB_DIRTY|TLB_VALID);
1.510 + ent->flags |= (val & TLB_VALID);
1.511 + ent->flags |= ((val & 0x200)>>7);
1.512 + } else if( entryNo == -2 ) {
1.513 + MMU_TLB_MULTI_HIT_ERROR(addr);
1.516 struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
1.517 ent->vpn = (val & 0xFFFFFC00);
1.518 @@ -294,6 +682,7 @@
1.520 ent->ppn = (val & 0x1FFFFC00);
1.521 ent->flags = (val & 0x000001FF);
1.522 + ent->mask = get_mask_for_flags(val);