6 * Copyright (c) 2005 Nathan Keynes.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 #define MODULE sh4_module
21 #include "sh4/sh4mmio.h"
22 #include "sh4/sh4core.h"
23 #include "sh4/sh4trans.h"
26 #define VMA_TO_EXT_ADDR(vma) ((vma)&0x1FFFFFFF)
28 /* The MMU (practically unique in the system) is allowed to raise exceptions
29 * directly, with a return code indicating that one was raised and the caller
30 * had better behave appropriately.
32 #define RAISE_TLB_ERROR(code, vpn) \
33 MMIO_WRITE(MMU, TEA, vpn); \
34 MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
35 sh4_raise_tlb_exception(code);
37 #define RAISE_MEM_ERROR(code, vpn) \
38 MMIO_WRITE(MMU, TEA, vpn); \
39 MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
40 sh4_raise_exception(code);
42 #define RAISE_OTHER_ERROR(code) \
43 sh4_raise_exception(code);
45 * Abort with a non-MMU address error. Caused by user-mode code attempting
46 * to access privileged regions, or alignment faults.
48 #define MMU_READ_ADDR_ERROR() RAISE_OTHER_ERROR(EXC_DATA_ADDR_READ)
49 #define MMU_WRITE_ADDR_ERROR() RAISE_OTHER_ERROR(EXC_DATA_ADDR_WRITE)
51 #define MMU_TLB_READ_MISS_ERROR(vpn) RAISE_TLB_ERROR(EXC_TLB_MISS_READ, vpn)
52 #define MMU_TLB_WRITE_MISS_ERROR(vpn) RAISE_TLB_ERROR(EXC_TLB_MISS_WRITE, vpn)
53 #define MMU_TLB_INITIAL_WRITE_ERROR(vpn) RAISE_MEM_ERROR(EXC_INIT_PAGE_WRITE, vpn)
54 #define MMU_TLB_READ_PROT_ERROR(vpn) RAISE_MEM_ERROR(EXC_TLB_PROT_READ, vpn)
55 #define MMU_TLB_WRITE_PROT_ERROR(vpn) RAISE_MEM_ERROR(EXC_TLB_PROT_WRITE, vpn)
56 #define MMU_TLB_MULTI_HIT_ERROR(vpn) sh4_raise_reset(EXC_TLB_MULTI_HIT); \
57 MMIO_WRITE(MMU, TEA, vpn); \
58 MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00)));
61 #define OCRAM_START (0x1C000000>>LXDREAM_PAGE_BITS)
62 #define OCRAM_END (0x20000000>>LXDREAM_PAGE_BITS)
64 #define ITLB_ENTRY_COUNT 4
65 #define UTLB_ENTRY_COUNT 64
68 #define TLB_VALID 0x00000100
69 #define TLB_USERMODE 0x00000040
70 #define TLB_WRITABLE 0x00000020
71 #define TLB_USERWRITABLE (TLB_WRITABLE|TLB_USERMODE)
72 #define TLB_SIZE_MASK 0x00000090
73 #define TLB_SIZE_1K 0x00000000
74 #define TLB_SIZE_4K 0x00000010
75 #define TLB_SIZE_64K 0x00000080
76 #define TLB_SIZE_1M 0x00000090
77 #define TLB_CACHEABLE 0x00000008
78 #define TLB_DIRTY 0x00000004
79 #define TLB_SHARE 0x00000002
80 #define TLB_WRITETHRU 0x00000001
82 #define MASK_1K 0xFFFFFC00
83 #define MASK_4K 0xFFFFF000
84 #define MASK_64K 0xFFFF0000
85 #define MASK_1M 0xFFF00000
88 sh4addr_t vpn; // Virtual Page Number
89 uint32_t asid; // Process ID
91 sh4addr_t ppn; // Physical Page Number
96 sh4addr_t vpn; // Virtual Page Number
97 uint32_t mask; // Page size mask
98 uint32_t asid; // Process ID
99 sh4addr_t ppn; // Physical Page Number
101 uint32_t pcmcia; // extra pcmcia data - not used
104 static struct itlb_entry mmu_itlb[ITLB_ENTRY_COUNT];
105 static struct utlb_entry mmu_utlb[UTLB_ENTRY_COUNT];
106 static uint32_t mmu_urc;
107 static uint32_t mmu_urb;
108 static uint32_t mmu_lrui;
109 static uint32_t mmu_asid; // current asid
111 static sh4ptr_t cache = NULL;
113 static void mmu_invalidate_tlb();
116 static uint32_t get_mask_for_flags( uint32_t flags )
118 switch( flags & TLB_SIZE_MASK ) {
119 case TLB_SIZE_1K: return MASK_1K;
120 case TLB_SIZE_4K: return MASK_4K;
121 case TLB_SIZE_64K: return MASK_64K;
122 case TLB_SIZE_1M: return MASK_1M;
123 default: return 0; /* Unreachable */
127 int32_t mmio_region_MMU_read( uint32_t reg )
131 return MMIO_READ( MMU, MMUCR) | (mmu_urc<<10) | (mmu_urb<<18) | (mmu_lrui<<26);
133 return MMIO_READ( MMU, reg );
137 void mmio_region_MMU_write( uint32_t reg, uint32_t val )
143 if( (val & 0xFF) != mmu_asid ) {
145 sh4_icache.page_vma = -1; // invalidate icache as asid has changed
155 if( val & MMUCR_TI ) {
156 mmu_invalidate_tlb();
158 mmu_urc = (val >> 10) & 0x3F;
159 mmu_urb = (val >> 18) & 0x3F;
160 mmu_lrui = (val >> 26) & 0x3F;
162 tmp = MMIO_READ( MMU, MMUCR );
163 if( (val ^ tmp) & MMUCR_AT ) {
164 // AT flag has changed state - flush the xlt cache as all bets
165 // are off now. We also need to force an immediate exit from the
167 MMIO_WRITE( MMU, MMUCR, val );
172 mmu_set_cache_mode( val & (CCR_OIX|CCR_ORA|CCR_OCE) );
178 MMIO_WRITE( MMU, reg, val );
184 cache = mem_alloc_pages(2);
189 mmio_region_MMU_write( CCR, 0 );
190 mmio_region_MMU_write( MMUCR, 0 );
193 void MMU_save_state( FILE *f )
195 fwrite( cache, 4096, 2, f );
196 fwrite( &mmu_itlb, sizeof(mmu_itlb), 1, f );
197 fwrite( &mmu_utlb, sizeof(mmu_utlb), 1, f );
198 fwrite( &mmu_urc, sizeof(mmu_urc), 1, f );
199 fwrite( &mmu_urb, sizeof(mmu_urb), 1, f );
200 fwrite( &mmu_lrui, sizeof(mmu_lrui), 1, f );
201 fwrite( &mmu_asid, sizeof(mmu_asid), 1, f );
204 int MMU_load_state( FILE *f )
206 /* Setup the cache mode according to the saved register value
207 * (mem_load runs before this point to load all MMIO data)
209 mmio_region_MMU_write( CCR, MMIO_READ(MMU, CCR) );
210 if( fread( cache, 4096, 2, f ) != 2 ) {
213 if( fread( &mmu_itlb, sizeof(mmu_itlb), 1, f ) != 1 ) {
216 if( fread( &mmu_utlb, sizeof(mmu_utlb), 1, f ) != 1 ) {
219 if( fread( &mmu_urc, sizeof(mmu_urc), 1, f ) != 1 ) {
222 if( fread( &mmu_urc, sizeof(mmu_urb), 1, f ) != 1 ) {
225 if( fread( &mmu_lrui, sizeof(mmu_lrui), 1, f ) != 1 ) {
228 if( fread( &mmu_asid, sizeof(mmu_asid), 1, f ) != 1 ) {
234 void mmu_set_cache_mode( int mode )
238 case MEM_OC_INDEX0: /* OIX=0 */
239 for( i=OCRAM_START; i<OCRAM_END; i++ )
240 page_map[i] = cache + ((i&0x02)<<(LXDREAM_PAGE_BITS-1));
242 case MEM_OC_INDEX1: /* OIX=1 */
243 for( i=OCRAM_START; i<OCRAM_END; i++ )
244 page_map[i] = cache + ((i&0x02000000)>>(25-LXDREAM_PAGE_BITS));
246 default: /* disabled */
247 for( i=OCRAM_START; i<OCRAM_END; i++ )
253 /* TLB maintanence */
256 * LDTLB instruction implementation. Copies PTEH, PTEL and PTEA into the UTLB
257 * entry identified by MMUCR.URC. Does not modify MMUCR or the ITLB.
261 mmu_utlb[mmu_urc].vpn = MMIO_READ(MMU, PTEH) & 0xFFFFFC00;
262 mmu_utlb[mmu_urc].asid = MMIO_READ(MMU, PTEH) & 0x000000FF;
263 mmu_utlb[mmu_urc].ppn = MMIO_READ(MMU, PTEL) & 0x1FFFFC00;
264 mmu_utlb[mmu_urc].flags = MMIO_READ(MMU, PTEL) & 0x00001FF;
265 mmu_utlb[mmu_urc].pcmcia = MMIO_READ(MMU, PTEA);
266 mmu_utlb[mmu_urc].mask = get_mask_for_flags(mmu_utlb[mmu_urc].flags);
269 static void mmu_invalidate_tlb()
272 for( i=0; i<ITLB_ENTRY_COUNT; i++ ) {
273 mmu_itlb[i].flags &= (~TLB_VALID);
275 for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
276 mmu_utlb[i].flags &= (~TLB_VALID);
280 #define ITLB_ENTRY(addr) ((addr>>7)&0x03)
282 int32_t mmu_itlb_addr_read( sh4addr_t addr )
284 struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
285 return ent->vpn | ent->asid | (ent->flags & TLB_VALID);
287 int32_t mmu_itlb_data_read( sh4addr_t addr )
289 struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
290 return ent->ppn | ent->flags;
293 void mmu_itlb_addr_write( sh4addr_t addr, uint32_t val )
295 struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
296 ent->vpn = val & 0xFFFFFC00;
297 ent->asid = val & 0x000000FF;
298 ent->flags = (ent->flags & ~(TLB_VALID)) | (val&TLB_VALID);
301 void mmu_itlb_data_write( sh4addr_t addr, uint32_t val )
303 struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
304 ent->ppn = val & 0x1FFFFC00;
305 ent->flags = val & 0x00001DA;
306 ent->mask = get_mask_for_flags(val);
309 #define UTLB_ENTRY(addr) ((addr>>8)&0x3F)
310 #define UTLB_ASSOC(addr) (addr&0x80)
311 #define UTLB_DATA2(addr) (addr&0x00800000)
313 int32_t mmu_utlb_addr_read( sh4addr_t addr )
315 struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
316 return ent->vpn | ent->asid | (ent->flags & TLB_VALID) |
317 ((ent->flags & TLB_DIRTY)<<7);
319 int32_t mmu_utlb_data_read( sh4addr_t addr )
321 struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
322 if( UTLB_DATA2(addr) ) {
325 return ent->ppn | ent->flags;
330 * Find a UTLB entry for the associative TLB write - same as the normal
331 * lookup but ignores the valid bit.
333 static inline int mmu_utlb_lookup_assoc( uint32_t vpn, uint32_t asid )
337 for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
338 if( (mmu_utlb[i].flags & TLB_VALID) &&
339 ((mmu_utlb[i].flags & TLB_SHARE) || asid == mmu_utlb[i].asid) &&
340 ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
342 fprintf( stderr, "TLB Multi hit: %d %d\n", result, i );
352 * Find a ITLB entry for the associative TLB write - same as the normal
353 * lookup but ignores the valid bit.
355 static inline int mmu_itlb_lookup_assoc( uint32_t vpn, uint32_t asid )
359 for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
360 if( (mmu_itlb[i].flags & TLB_VALID) &&
361 ((mmu_itlb[i].flags & TLB_SHARE) || asid == mmu_itlb[i].asid) &&
362 ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
372 void mmu_utlb_addr_write( sh4addr_t addr, uint32_t val )
374 if( UTLB_ASSOC(addr) ) {
375 int utlb = mmu_utlb_lookup_assoc( val, mmu_asid );
377 struct utlb_entry *ent = &mmu_utlb[utlb];
378 ent->flags = ent->flags & ~(TLB_DIRTY|TLB_VALID);
379 ent->flags |= (val & TLB_VALID);
380 ent->flags |= ((val & 0x200)>>7);
383 int itlb = mmu_itlb_lookup_assoc( val, mmu_asid );
385 struct itlb_entry *ent = &mmu_itlb[itlb];
386 ent->flags = (ent->flags & (~TLB_VALID)) | (val & TLB_VALID);
389 if( itlb == -2 || utlb == -2 ) {
390 MMU_TLB_MULTI_HIT_ERROR(addr);
394 struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
395 ent->vpn = (val & 0xFFFFFC00);
396 ent->asid = (val & 0xFF);
397 ent->flags = (ent->flags & ~(TLB_DIRTY|TLB_VALID));
398 ent->flags |= (val & TLB_VALID);
399 ent->flags |= ((val & 0x200)>>7);
403 void mmu_utlb_data_write( sh4addr_t addr, uint32_t val )
405 struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
406 if( UTLB_DATA2(addr) ) {
407 ent->pcmcia = val & 0x0000000F;
409 ent->ppn = (val & 0x1FFFFC00);
410 ent->flags = (val & 0x000001FF);
411 ent->mask = get_mask_for_flags(val);
415 /* Cache access - not implemented */
417 int32_t mmu_icache_addr_read( sh4addr_t addr )
419 return 0; // not implemented
421 int32_t mmu_icache_data_read( sh4addr_t addr )
423 return 0; // not implemented
425 int32_t mmu_ocache_addr_read( sh4addr_t addr )
427 return 0; // not implemented
429 int32_t mmu_ocache_data_read( sh4addr_t addr )
431 return 0; // not implemented
434 void mmu_icache_addr_write( sh4addr_t addr, uint32_t val )
438 void mmu_icache_data_write( sh4addr_t addr, uint32_t val )
442 void mmu_ocache_addr_write( sh4addr_t addr, uint32_t val )
446 void mmu_ocache_data_write( sh4addr_t addr, uint32_t val )
450 /******************************************************************************/
451 /* MMU TLB address translation */
452 /******************************************************************************/
455 * The translations are excessively complicated, but unfortunately it's a
456 * complicated system. TODO: make this not be painfully slow.
460 * Perform the actual utlb lookup w/ asid matching.
461 * Possible utcomes are:
462 * 0..63 Single match - good, return entry found
463 * -1 No match - raise a tlb data miss exception
464 * -2 Multiple matches - raise a multi-hit exception (reset)
465 * @param vpn virtual address to resolve
466 * @return the resultant UTLB entry, or an error.
468 static inline int mmu_utlb_lookup_vpn_asid( uint32_t vpn )
474 if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
478 for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
479 if( (mmu_utlb[i].flags & TLB_VALID) &&
480 ((mmu_utlb[i].flags & TLB_SHARE) || mmu_asid == mmu_utlb[i].asid) &&
481 ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
492 * Perform the actual utlb lookup matching on vpn only
493 * Possible utcomes are:
494 * 0..63 Single match - good, return entry found
495 * -1 No match - raise a tlb data miss exception
496 * -2 Multiple matches - raise a multi-hit exception (reset)
497 * @param vpn virtual address to resolve
498 * @return the resultant UTLB entry, or an error.
500 static inline int mmu_utlb_lookup_vpn( uint32_t vpn )
506 if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
510 for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
511 if( (mmu_utlb[i].flags & TLB_VALID) &&
512 ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
524 * Update the ITLB by replacing the LRU entry with the specified UTLB entry.
525 * @return the number (0-3) of the replaced entry.
527 static int inline mmu_itlb_update_from_utlb( int entryNo )
530 /* Determine entry to replace based on lrui */
531 if( (mmu_lrui & 0x38) == 0x38 ) {
533 mmu_lrui = mmu_lrui & 0x07;
534 } else if( (mmu_lrui & 0x26) == 0x06 ) {
536 mmu_lrui = (mmu_lrui & 0x19) | 0x20;
537 } else if( (mmu_lrui & 0x15) == 0x01 ) {
539 mmu_lrui = (mmu_lrui & 0x3E) | 0x14;
540 } else { // Note - gets invalid entries too
542 mmu_lrui = (mmu_lrui | 0x0B);
545 mmu_itlb[replace].vpn = mmu_utlb[entryNo].vpn;
546 mmu_itlb[replace].mask = mmu_utlb[entryNo].mask;
547 mmu_itlb[replace].ppn = mmu_utlb[entryNo].ppn;
548 mmu_itlb[replace].asid = mmu_utlb[entryNo].asid;
549 mmu_itlb[replace].flags = mmu_utlb[entryNo].flags & 0x01DA;
554 * Perform the actual itlb lookup w/ asid protection
555 * Possible utcomes are:
556 * 0..63 Single match - good, return entry found
557 * -1 No match - raise a tlb data miss exception
558 * -2 Multiple matches - raise a multi-hit exception (reset)
559 * @param vpn virtual address to resolve
560 * @return the resultant ITLB entry, or an error.
562 static inline int mmu_itlb_lookup_vpn_asid( uint32_t vpn )
567 for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
568 if( (mmu_itlb[i].flags & TLB_VALID) &&
569 ((mmu_itlb[i].flags & TLB_SHARE) || mmu_asid == mmu_itlb[i].asid) &&
570 ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
579 int utlbEntry = mmu_utlb_lookup_vpn_asid( vpn );
580 if( utlbEntry < 0 ) {
583 return mmu_itlb_update_from_utlb( utlbEntry );
588 case 0: mmu_lrui = (mmu_lrui & 0x07); break;
589 case 1: mmu_lrui = (mmu_lrui & 0x19) | 0x20; break;
590 case 2: mmu_lrui = (mmu_lrui & 0x3E) | 0x14; break;
591 case 3: mmu_lrui = (mmu_lrui | 0x0B); break;
598 * Perform the actual itlb lookup on vpn only
599 * Possible utcomes are:
600 * 0..63 Single match - good, return entry found
601 * -1 No match - raise a tlb data miss exception
602 * -2 Multiple matches - raise a multi-hit exception (reset)
603 * @param vpn virtual address to resolve
604 * @return the resultant ITLB entry, or an error.
606 static inline int mmu_itlb_lookup_vpn( uint32_t vpn )
611 for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
612 if( (mmu_itlb[i].flags & TLB_VALID) &&
613 ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
622 int utlbEntry = mmu_utlb_lookup_vpn( vpn );
623 if( utlbEntry < 0 ) {
626 return mmu_itlb_update_from_utlb( utlbEntry );
631 case 0: mmu_lrui = (mmu_lrui & 0x07); break;
632 case 1: mmu_lrui = (mmu_lrui & 0x19) | 0x20; break;
633 case 2: mmu_lrui = (mmu_lrui & 0x3E) | 0x14; break;
634 case 3: mmu_lrui = (mmu_lrui | 0x0B); break;
640 sh4addr_t mmu_vma_to_phys_read( sh4vma_t addr )
642 uint32_t mmucr = MMIO_READ(MMU,MMUCR);
643 if( addr & 0x80000000 ) {
644 if( IS_SH4_PRIVMODE() ) {
645 if( addr >= 0xE0000000 ) {
646 return addr; /* P4 - passthrough */
647 } else if( addr < 0xC0000000 ) {
648 /* P1, P2 regions are pass-through (no translation) */
649 return VMA_TO_EXT_ADDR(addr);
652 if( addr >= 0xE0000000 && addr < 0xE4000000 &&
653 ((mmucr&MMUCR_SQMD) == 0) ) {
654 /* Conditional user-mode access to the store-queue (no translation) */
657 MMU_READ_ADDR_ERROR();
658 return MMU_VMA_ERROR;
662 if( (mmucr & MMUCR_AT) == 0 ) {
663 return VMA_TO_EXT_ADDR(addr);
666 /* If we get this far, translation is required */
668 if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
669 entryNo = mmu_utlb_lookup_vpn_asid( addr );
671 entryNo = mmu_utlb_lookup_vpn( addr );
676 MMU_TLB_READ_MISS_ERROR(addr);
677 return MMU_VMA_ERROR;
679 MMU_TLB_MULTI_HIT_ERROR(addr);
680 return MMU_VMA_ERROR;
682 if( (mmu_utlb[entryNo].flags & TLB_USERMODE) == 0 &&
683 !IS_SH4_PRIVMODE() ) {
684 /* protection violation */
685 MMU_TLB_READ_PROT_ERROR(addr);
686 return MMU_VMA_ERROR;
689 /* finally generate the target address */
690 sh4addr_t pma = (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) |
691 (addr & (~mmu_utlb[entryNo].mask));
692 if( pma > 0x1C000000 ) // Remap 1Cxx .. 1Fxx region to P4
698 sh4addr_t mmu_vma_to_phys_write( sh4vma_t addr )
700 uint32_t mmucr = MMIO_READ(MMU,MMUCR);
701 if( addr & 0x80000000 ) {
702 if( IS_SH4_PRIVMODE() ) {
703 if( addr >= 0xE0000000 ) {
704 return addr; /* P4 - passthrough */
705 } else if( addr < 0xC0000000 ) {
706 /* P1, P2 regions are pass-through (no translation) */
707 return VMA_TO_EXT_ADDR(addr);
710 if( addr >= 0xE0000000 && addr < 0xE4000000 &&
711 ((mmucr&MMUCR_SQMD) == 0) ) {
712 /* Conditional user-mode access to the store-queue (no translation) */
715 MMU_WRITE_ADDR_ERROR();
716 return MMU_VMA_ERROR;
720 if( (mmucr & MMUCR_AT) == 0 ) {
721 return VMA_TO_EXT_ADDR(addr);
724 /* If we get this far, translation is required */
726 if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
727 entryNo = mmu_utlb_lookup_vpn_asid( addr );
729 entryNo = mmu_utlb_lookup_vpn( addr );
734 MMU_TLB_WRITE_MISS_ERROR(addr);
735 return MMU_VMA_ERROR;
737 MMU_TLB_MULTI_HIT_ERROR(addr);
738 return MMU_VMA_ERROR;
740 if( IS_SH4_PRIVMODE() ? ((mmu_utlb[entryNo].flags & TLB_WRITABLE) == 0)
741 : ((mmu_utlb[entryNo].flags & TLB_USERWRITABLE) != TLB_USERWRITABLE) ) {
742 /* protection violation */
743 MMU_TLB_WRITE_PROT_ERROR(addr);
744 return MMU_VMA_ERROR;
747 if( (mmu_utlb[entryNo].flags & TLB_DIRTY) == 0 ) {
748 MMU_TLB_INITIAL_WRITE_ERROR(addr);
749 return MMU_VMA_ERROR;
752 /* finally generate the target address */
753 sh4addr_t pma = (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) |
754 (addr & (~mmu_utlb[entryNo].mask));
755 if( pma > 0x1C000000 ) // Remap 1Cxx .. 1Fxx region to P4
762 * Update the icache for an untranslated address
764 void mmu_update_icache_phys( sh4addr_t addr )
766 if( (addr & 0x1C000000) == 0x0C000000 ) {
768 sh4_icache.page_vma = addr & 0xFF000000;
769 sh4_icache.page_ppa = 0x0C000000;
770 sh4_icache.mask = 0xFF000000;
771 sh4_icache.page = sh4_main_ram;
772 } else if( (addr & 0x1FE00000) == 0 ) {
774 sh4_icache.page_vma = addr & 0xFFE00000;
775 sh4_icache.page_ppa = 0;
776 sh4_icache.mask = 0xFFE00000;
777 sh4_icache.page = mem_get_region(0);
780 sh4_icache.page_vma = -1;
785 * Update the sh4_icache structure to describe the page(s) containing the
786 * given vma. If the address does not reference a RAM/ROM region, the icache
787 * will be invalidated instead.
788 * If AT is on, this method will raise TLB exceptions normally
789 * (hence this method should only be used immediately prior to execution of
790 * code), and otherwise will set the icache according to the matching TLB entry.
791 * If AT is off, this method will set the entire referenced RAM/ROM region in
793 * @return TRUE if the update completed (successfully or otherwise), FALSE
794 * if an exception was raised.
796 gboolean mmu_update_icache( sh4vma_t addr )
799 if( IS_SH4_PRIVMODE() ) {
800 if( addr & 0x80000000 ) {
801 if( addr < 0xC0000000 ) {
802 /* P1, P2 and P4 regions are pass-through (no translation) */
803 mmu_update_icache_phys(addr);
805 } else if( addr >= 0xE0000000 && addr < 0xFFFFFF00 ) {
806 MMU_READ_ADDR_ERROR();
811 uint32_t mmucr = MMIO_READ(MMU,MMUCR);
812 if( (mmucr & MMUCR_AT) == 0 ) {
813 mmu_update_icache_phys(addr);
817 if( (mmucr & MMUCR_SV) == 0 )
818 entryNo = mmu_itlb_lookup_vpn_asid( addr );
820 entryNo = mmu_itlb_lookup_vpn( addr );
822 if( addr & 0x80000000 ) {
823 MMU_READ_ADDR_ERROR();
827 uint32_t mmucr = MMIO_READ(MMU,MMUCR);
828 if( (mmucr & MMUCR_AT) == 0 ) {
829 mmu_update_icache_phys(addr);
833 entryNo = mmu_itlb_lookup_vpn_asid( addr );
835 if( entryNo != -1 && (mmu_itlb[entryNo].flags & TLB_USERMODE) == 0 ) {
836 MMU_TLB_READ_PROT_ERROR(addr);
843 MMU_TLB_READ_MISS_ERROR(addr);
846 MMU_TLB_MULTI_HIT_ERROR(addr);
849 sh4_icache.page_ppa = mmu_itlb[entryNo].ppn & mmu_itlb[entryNo].mask;
850 sh4_icache.page = mem_get_region( sh4_icache.page_ppa );
851 if( sh4_icache.page == NULL ) {
852 sh4_icache.page_vma = -1;
854 sh4_icache.page_vma = mmu_itlb[entryNo].vpn & mmu_itlb[entryNo].mask;
855 sh4_icache.mask = mmu_itlb[entryNo].mask;
862 * Translate address for disassembly purposes (ie performs an instruction
863 * lookup) - does not raise exceptions or modify any state, and ignores
864 * protection bits. Returns the translated address, or MMU_VMA_ERROR
865 * on translation failure.
867 sh4addr_t mmu_vma_to_phys_disasm( sh4vma_t vma )
869 if( vma & 0x80000000 ) {
870 if( vma < 0xC0000000 ) {
871 /* P1, P2 and P4 regions are pass-through (no translation) */
872 return VMA_TO_EXT_ADDR(vma);
873 } else if( vma >= 0xE0000000 && vma < 0xFFFFFF00 ) {
874 /* Not translatable */
875 return MMU_VMA_ERROR;
879 uint32_t mmucr = MMIO_READ(MMU,MMUCR);
880 if( (mmucr & MMUCR_AT) == 0 ) {
881 return VMA_TO_EXT_ADDR(vma);
884 int entryNo = mmu_itlb_lookup_vpn( vma );
885 if( entryNo == -2 ) {
886 entryNo = mmu_itlb_lookup_vpn_asid( vma );
889 return MMU_VMA_ERROR;
891 return (mmu_itlb[entryNo].ppn & mmu_itlb[entryNo].mask) |
892 (vma & (~mmu_itlb[entryNo].mask));
896 gboolean sh4_flush_store_queue( sh4addr_t addr )
898 uint32_t mmucr = MMIO_READ(MMU,MMUCR);
899 int queue = (addr&0x20)>>2;
900 sh4ptr_t src = (sh4ptr_t)&sh4r.store_queue[queue];
902 /* Store queue operation */
903 if( mmucr & MMUCR_AT ) {
905 if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
906 entryNo = mmu_utlb_lookup_vpn_asid( addr );
908 entryNo = mmu_utlb_lookup_vpn( addr );
912 MMU_TLB_WRITE_MISS_ERROR(addr);
915 MMU_TLB_MULTI_HIT_ERROR(addr);
918 if( IS_SH4_PRIVMODE() ? ((mmu_utlb[entryNo].flags & TLB_WRITABLE) == 0)
919 : ((mmu_utlb[entryNo].flags & TLB_USERWRITABLE) != TLB_USERWRITABLE) ) {
920 /* protection violation */
921 MMU_TLB_WRITE_PROT_ERROR(addr);
925 if( (mmu_utlb[entryNo].flags & TLB_DIRTY) == 0 ) {
926 MMU_TLB_INITIAL_WRITE_ERROR(addr);
930 /* finally generate the target address */
931 target = ((mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) |
932 (addr & (~mmu_utlb[entryNo].mask))) & 0xFFFFFFE0;
935 uint32_t hi = (MMIO_READ( MMU, (queue == 0 ? QACR0 : QACR1) ) & 0x1C) << 24;
936 target = (addr&0x03FFFFE0) | hi;
938 mem_copy_to_sh4( target, src, 32 );
.