6 * Copyright (c) 2005 Nathan Keynes.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 #define MODULE sh4_module
22 #include "sh4/sh4mmio.h"
23 #include "sh4/sh4core.h"
24 #include "sh4/sh4trans.h"
27 #ifdef HAVE_FRAME_ADDRESS
28 #define RETURN_VIA(exc) do{ *(((void **)__builtin_frame_address(0))+1) = exc; return; } while(0)
30 #define RETURN_VIA(exc) return MMU_VMA_ERROR
33 #define VMA_TO_EXT_ADDR(vma) ((vma)&0x1FFFFFFF)
35 /* The MMU (practically unique in the system) is allowed to raise exceptions
36 * directly, with a return code indicating that one was raised and the caller
37 * had better behave appropriately.
39 #define RAISE_TLB_ERROR(code, vpn) \
40 MMIO_WRITE(MMU, TEA, vpn); \
41 MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
42 sh4_raise_tlb_exception(code);
44 #define RAISE_MEM_ERROR(code, vpn) \
45 MMIO_WRITE(MMU, TEA, vpn); \
46 MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
47 sh4_raise_exception(code);
49 #define RAISE_OTHER_ERROR(code) \
50 sh4_raise_exception(code);
52 * Abort with a non-MMU address error. Caused by user-mode code attempting
53 * to access privileged regions, or alignment faults.
55 #define MMU_READ_ADDR_ERROR() RAISE_OTHER_ERROR(EXC_DATA_ADDR_READ)
56 #define MMU_WRITE_ADDR_ERROR() RAISE_OTHER_ERROR(EXC_DATA_ADDR_WRITE)
58 #define MMU_TLB_READ_MISS_ERROR(vpn) RAISE_TLB_ERROR(EXC_TLB_MISS_READ, vpn)
59 #define MMU_TLB_WRITE_MISS_ERROR(vpn) RAISE_TLB_ERROR(EXC_TLB_MISS_WRITE, vpn)
60 #define MMU_TLB_INITIAL_WRITE_ERROR(vpn) RAISE_MEM_ERROR(EXC_INIT_PAGE_WRITE, vpn)
61 #define MMU_TLB_READ_PROT_ERROR(vpn) RAISE_MEM_ERROR(EXC_TLB_PROT_READ, vpn)
62 #define MMU_TLB_WRITE_PROT_ERROR(vpn) RAISE_MEM_ERROR(EXC_TLB_PROT_WRITE, vpn)
63 #define MMU_TLB_MULTI_HIT_ERROR(vpn) sh4_raise_reset(EXC_TLB_MULTI_HIT); \
64 MMIO_WRITE(MMU, TEA, vpn); \
65 MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00)));
68 #define OCRAM_START (0x1C000000>>LXDREAM_PAGE_BITS)
69 #define OCRAM_END (0x20000000>>LXDREAM_PAGE_BITS)
71 #define ITLB_ENTRY_COUNT 4
72 #define UTLB_ENTRY_COUNT 64
75 #define TLB_VALID 0x00000100
76 #define TLB_USERMODE 0x00000040
77 #define TLB_WRITABLE 0x00000020
78 #define TLB_USERWRITABLE (TLB_WRITABLE|TLB_USERMODE)
79 #define TLB_SIZE_MASK 0x00000090
80 #define TLB_SIZE_1K 0x00000000
81 #define TLB_SIZE_4K 0x00000010
82 #define TLB_SIZE_64K 0x00000080
83 #define TLB_SIZE_1M 0x00000090
84 #define TLB_CACHEABLE 0x00000008
85 #define TLB_DIRTY 0x00000004
86 #define TLB_SHARE 0x00000002
87 #define TLB_WRITETHRU 0x00000001
89 #define MASK_1K 0xFFFFFC00
90 #define MASK_4K 0xFFFFF000
91 #define MASK_64K 0xFFFF0000
92 #define MASK_1M 0xFFF00000
95 sh4addr_t vpn; // Virtual Page Number
96 uint32_t asid; // Process ID
98 sh4addr_t ppn; // Physical Page Number
103 sh4addr_t vpn; // Virtual Page Number
104 uint32_t mask; // Page size mask
105 uint32_t asid; // Process ID
106 sh4addr_t ppn; // Physical Page Number
108 uint32_t pcmcia; // extra pcmcia data - not used
111 struct utlb_sort_entry {
112 sh4addr_t key; // Masked VPN + ASID
113 uint32_t mask; // Mask + 0x00FF
118 static struct itlb_entry mmu_itlb[ITLB_ENTRY_COUNT];
119 static struct utlb_entry mmu_utlb[UTLB_ENTRY_COUNT];
120 static uint32_t mmu_urc;
121 static uint32_t mmu_urb;
122 static uint32_t mmu_lrui;
123 static uint32_t mmu_asid; // current asid
125 static struct utlb_sort_entry mmu_utlb_sorted[UTLB_ENTRY_COUNT];
126 static uint32_t mmu_utlb_entries; // Number of entries in mmu_utlb_sorted.
128 static sh4ptr_t cache = NULL;
130 static void mmu_invalidate_tlb();
131 static void mmu_utlb_sorted_reset();
132 static void mmu_utlb_sorted_reload();
135 static uint32_t get_mask_for_flags( uint32_t flags )
137 switch( flags & TLB_SIZE_MASK ) {
138 case TLB_SIZE_1K: return MASK_1K;
139 case TLB_SIZE_4K: return MASK_4K;
140 case TLB_SIZE_64K: return MASK_64K;
141 case TLB_SIZE_1M: return MASK_1M;
142 default: return 0; /* Unreachable */
146 int32_t mmio_region_MMU_read( uint32_t reg )
150 return MMIO_READ( MMU, MMUCR) | (mmu_urc<<10) | (mmu_urb<<18) | (mmu_lrui<<26);
152 return MMIO_READ( MMU, reg );
156 void mmio_region_MMU_write( uint32_t reg, uint32_t val )
164 if( (val & 0xFF) != mmu_asid ) {
166 sh4_icache.page_vma = -1; // invalidate icache as asid has changed
183 if( val & MMUCR_TI ) {
184 mmu_invalidate_tlb();
186 mmu_urc = (val >> 10) & 0x3F;
187 mmu_urb = (val >> 18) & 0x3F;
188 mmu_lrui = (val >> 26) & 0x3F;
190 tmp = MMIO_READ( MMU, MMUCR );
191 if( (val ^ tmp) & (MMUCR_AT|MMUCR_SV) ) {
192 // AT flag has changed state - flush the xlt cache as all bets
193 // are off now. We also need to force an immediate exit from the
195 MMIO_WRITE( MMU, MMUCR, val );
200 mmu_set_cache_mode( val & (CCR_OIX|CCR_ORA|CCR_OCE) );
204 /* Note that if the high bit is set, this appears to reset the machine.
205 * Not emulating this behaviour yet until we know why...
214 PMM_write_control(0, val);
218 PMM_write_control(1, val);
224 MMIO_WRITE( MMU, reg, val );
230 cache = mem_alloc_pages(2);
235 mmio_region_MMU_write( CCR, 0 );
236 mmio_region_MMU_write( MMUCR, 0 );
237 mmu_utlb_sorted_reload();
240 void MMU_save_state( FILE *f )
242 fwrite( cache, 4096, 2, f );
243 fwrite( &mmu_itlb, sizeof(mmu_itlb), 1, f );
244 fwrite( &mmu_utlb, sizeof(mmu_utlb), 1, f );
245 fwrite( &mmu_urc, sizeof(mmu_urc), 1, f );
246 fwrite( &mmu_urb, sizeof(mmu_urb), 1, f );
247 fwrite( &mmu_lrui, sizeof(mmu_lrui), 1, f );
248 fwrite( &mmu_asid, sizeof(mmu_asid), 1, f );
251 int MMU_load_state( FILE *f )
253 /* Setup the cache mode according to the saved register value
254 * (mem_load runs before this point to load all MMIO data)
256 mmio_region_MMU_write( CCR, MMIO_READ(MMU, CCR) );
257 if( fread( cache, 4096, 2, f ) != 2 ) {
260 if( fread( &mmu_itlb, sizeof(mmu_itlb), 1, f ) != 1 ) {
263 if( fread( &mmu_utlb, sizeof(mmu_utlb), 1, f ) != 1 ) {
266 if( fread( &mmu_urc, sizeof(mmu_urc), 1, f ) != 1 ) {
269 if( fread( &mmu_urc, sizeof(mmu_urb), 1, f ) != 1 ) {
272 if( fread( &mmu_lrui, sizeof(mmu_lrui), 1, f ) != 1 ) {
275 if( fread( &mmu_asid, sizeof(mmu_asid), 1, f ) != 1 ) {
278 mmu_utlb_sorted_reload();
282 void mmu_set_cache_mode( int mode )
286 case MEM_OC_INDEX0: /* OIX=0 */
287 for( i=OCRAM_START; i<OCRAM_END; i++ )
288 page_map[i] = cache + ((i&0x02)<<(LXDREAM_PAGE_BITS-1));
290 case MEM_OC_INDEX1: /* OIX=1 */
291 for( i=OCRAM_START; i<OCRAM_END; i++ )
292 page_map[i] = cache + ((i&0x02000000)>>(25-LXDREAM_PAGE_BITS));
294 default: /* disabled */
295 for( i=OCRAM_START; i<OCRAM_END; i++ )
301 /******************* Sorted TLB data structure ****************/
303 * mmu_utlb_sorted maintains a list of all active (valid) entries,
304 * sorted by masked VPN and then ASID. Multi-hit entries are resolved
305 * ahead of time, and have -1 recorded as the corresponding PPN.
307 * FIXME: Multi-hit detection doesn't pick up cases where two pages
308 * overlap due to different sizes (and don't share the same base
311 static void mmu_utlb_sorted_reset()
313 mmu_utlb_entries = 0;
317 * Find an entry in the sorted table (VPN+ASID check).
319 static inline int mmu_utlb_sorted_find( sh4addr_t vma )
322 int high = mmu_utlb_entries;
323 uint32_t lookup = (vma & 0xFFFFFC00) + mmu_asid;
326 if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
330 while( low != high ) {
331 int posn = (high+low)>>1;
332 int masked = lookup & mmu_utlb_sorted[posn].mask;
333 if( mmu_utlb_sorted[posn].key < masked ) {
335 } else if( mmu_utlb_sorted[posn].key > masked ) {
338 return mmu_utlb_sorted[posn].entryNo;
345 static void mmu_utlb_insert_entry( int entry )
348 int high = mmu_utlb_entries;
349 uint32_t key = (mmu_utlb[entry].vpn & mmu_utlb[entry].mask) + mmu_utlb[entry].asid;
351 assert( mmu_utlb_entries < UTLB_ENTRY_COUNT );
352 /* Find the insertion point */
353 while( low != high ) {
354 int posn = (high+low)>>1;
355 if( mmu_utlb_sorted[posn].key < key ) {
357 } else if( mmu_utlb_sorted[posn].key > key ) {
360 /* Exact match - multi-hit */
361 mmu_utlb_sorted[posn].entryNo = -2;
365 memmove( &mmu_utlb_sorted[low+1], &mmu_utlb_sorted[low],
366 (mmu_utlb_entries - low) * sizeof(struct utlb_sort_entry) );
367 mmu_utlb_sorted[low].key = key;
368 mmu_utlb_sorted[low].mask = mmu_utlb[entry].mask | 0x000000FF;
369 mmu_utlb_sorted[low].entryNo = entry;
373 static void mmu_utlb_remove_entry( int entry )
376 int high = mmu_utlb_entries;
377 uint32_t key = (mmu_utlb[entry].vpn & mmu_utlb[entry].mask) + mmu_utlb[entry].asid;
378 while( low != high ) {
379 int posn = (high+low)>>1;
380 if( mmu_utlb_sorted[posn].key < key ) {
382 } else if( mmu_utlb_sorted[posn].key > key ) {
385 if( mmu_utlb_sorted[posn].entryNo == -2 ) {
386 /* Multiple-entry recorded - rebuild the whole table minus entry */
388 mmu_utlb_entries = 0;
389 for( i=0; i< UTLB_ENTRY_COUNT; i++ ) {
390 if( i != entry && (mmu_utlb[i].flags & TLB_VALID) ) {
391 mmu_utlb_insert_entry(i);
396 memmove( &mmu_utlb_sorted[posn], &mmu_utlb_sorted[posn+1],
397 (mmu_utlb_entries - posn)*sizeof(struct utlb_sort_entry) );
402 assert( 0 && "UTLB key not found!" );
405 static void mmu_utlb_sorted_reload()
408 mmu_utlb_entries = 0;
409 for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
410 if( mmu_utlb[i].flags & TLB_VALID )
411 mmu_utlb_insert_entry( i );
415 /* TLB maintanence */
418 * LDTLB instruction implementation. Copies PTEH, PTEL and PTEA into the UTLB
419 * entry identified by MMUCR.URC. Does not modify MMUCR or the ITLB.
423 if( mmu_utlb[mmu_urc].flags & TLB_VALID )
424 mmu_utlb_remove_entry( mmu_urc );
425 mmu_utlb[mmu_urc].vpn = MMIO_READ(MMU, PTEH) & 0xFFFFFC00;
426 mmu_utlb[mmu_urc].asid = MMIO_READ(MMU, PTEH) & 0x000000FF;
427 mmu_utlb[mmu_urc].ppn = MMIO_READ(MMU, PTEL) & 0x1FFFFC00;
428 mmu_utlb[mmu_urc].flags = MMIO_READ(MMU, PTEL) & 0x00001FF;
429 mmu_utlb[mmu_urc].pcmcia = MMIO_READ(MMU, PTEA);
430 mmu_utlb[mmu_urc].mask = get_mask_for_flags(mmu_utlb[mmu_urc].flags);
431 if( mmu_utlb[mmu_urc].ppn >= 0x1C000000 )
432 mmu_utlb[mmu_urc].ppn |= 0xE0000000;
433 if( mmu_utlb[mmu_urc].flags & TLB_VALID )
434 mmu_utlb_insert_entry( mmu_urc );
437 static void mmu_invalidate_tlb()
440 for( i=0; i<ITLB_ENTRY_COUNT; i++ ) {
441 mmu_itlb[i].flags &= (~TLB_VALID);
443 for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
444 mmu_utlb[i].flags &= (~TLB_VALID);
446 mmu_utlb_entries = 0;
449 #define ITLB_ENTRY(addr) ((addr>>7)&0x03)
451 int32_t mmu_itlb_addr_read( sh4addr_t addr )
453 struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
454 return ent->vpn | ent->asid | (ent->flags & TLB_VALID);
456 int32_t mmu_itlb_data_read( sh4addr_t addr )
458 struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
459 return (ent->ppn & 0x1FFFFC00) | ent->flags;
462 void mmu_itlb_addr_write( sh4addr_t addr, uint32_t val )
464 struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
465 ent->vpn = val & 0xFFFFFC00;
466 ent->asid = val & 0x000000FF;
467 ent->flags = (ent->flags & ~(TLB_VALID)) | (val&TLB_VALID);
470 void mmu_itlb_data_write( sh4addr_t addr, uint32_t val )
472 struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
473 ent->ppn = val & 0x1FFFFC00;
474 ent->flags = val & 0x00001DA;
475 ent->mask = get_mask_for_flags(val);
476 if( ent->ppn >= 0x1C000000 )
477 ent->ppn |= 0xE0000000;
480 #define UTLB_ENTRY(addr) ((addr>>8)&0x3F)
481 #define UTLB_ASSOC(addr) (addr&0x80)
482 #define UTLB_DATA2(addr) (addr&0x00800000)
484 int32_t mmu_utlb_addr_read( sh4addr_t addr )
486 struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
487 return ent->vpn | ent->asid | (ent->flags & TLB_VALID) |
488 ((ent->flags & TLB_DIRTY)<<7);
490 int32_t mmu_utlb_data_read( sh4addr_t addr )
492 struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
493 if( UTLB_DATA2(addr) ) {
496 return (ent->ppn&0x1FFFFC00) | ent->flags;
501 * Find a UTLB entry for the associative TLB write - same as the normal
502 * lookup but ignores the valid bit.
504 static inline int mmu_utlb_lookup_assoc( uint32_t vpn, uint32_t asid )
508 for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
509 if( (mmu_utlb[i].flags & TLB_VALID) &&
510 ((mmu_utlb[i].flags & TLB_SHARE) || asid == mmu_utlb[i].asid) &&
511 ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
513 fprintf( stderr, "TLB Multi hit: %d %d\n", result, i );
523 * Find a ITLB entry for the associative TLB write - same as the normal
524 * lookup but ignores the valid bit.
526 static inline int mmu_itlb_lookup_assoc( uint32_t vpn, uint32_t asid )
530 for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
531 if( (mmu_itlb[i].flags & TLB_VALID) &&
532 ((mmu_itlb[i].flags & TLB_SHARE) || asid == mmu_itlb[i].asid) &&
533 ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
543 void mmu_utlb_addr_write( sh4addr_t addr, uint32_t val )
545 if( UTLB_ASSOC(addr) ) {
546 int utlb = mmu_utlb_lookup_assoc( val, mmu_asid );
548 struct utlb_entry *ent = &mmu_utlb[utlb];
549 uint32_t old_flags = ent->flags;
550 ent->flags = ent->flags & ~(TLB_DIRTY|TLB_VALID);
551 ent->flags |= (val & TLB_VALID);
552 ent->flags |= ((val & 0x200)>>7);
553 if( (old_flags & TLB_VALID) && !(ent->flags&TLB_VALID) ) {
554 mmu_utlb_remove_entry( utlb );
555 } else if( !(old_flags & TLB_VALID) && (ent->flags&TLB_VALID) ) {
556 mmu_utlb_insert_entry( utlb );
560 int itlb = mmu_itlb_lookup_assoc( val, mmu_asid );
562 struct itlb_entry *ent = &mmu_itlb[itlb];
563 ent->flags = (ent->flags & (~TLB_VALID)) | (val & TLB_VALID);
566 if( itlb == -2 || utlb == -2 ) {
567 MMU_TLB_MULTI_HIT_ERROR(addr);
571 struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
572 if( ent->flags & TLB_VALID )
573 mmu_utlb_remove_entry( UTLB_ENTRY(addr) );
574 ent->vpn = (val & 0xFFFFFC00);
575 ent->asid = (val & 0xFF);
576 ent->flags = (ent->flags & ~(TLB_DIRTY|TLB_VALID));
577 ent->flags |= (val & TLB_VALID);
578 ent->flags |= ((val & 0x200)>>7);
579 if( ent->flags & TLB_VALID )
580 mmu_utlb_insert_entry( UTLB_ENTRY(addr) );
584 void mmu_utlb_data_write( sh4addr_t addr, uint32_t val )
586 struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
587 if( UTLB_DATA2(addr) ) {
588 ent->pcmcia = val & 0x0000000F;
590 if( ent->flags & TLB_VALID )
591 mmu_utlb_remove_entry( UTLB_ENTRY(addr) );
592 ent->ppn = (val & 0x1FFFFC00);
593 ent->flags = (val & 0x000001FF);
594 ent->mask = get_mask_for_flags(val);
595 if( mmu_utlb[mmu_urc].ppn >= 0x1C000000 )
596 mmu_utlb[mmu_urc].ppn |= 0xE0000000;
597 if( ent->flags & TLB_VALID )
598 mmu_utlb_insert_entry( UTLB_ENTRY(addr) );
602 /* Cache access - not implemented */
604 int32_t mmu_icache_addr_read( sh4addr_t addr )
606 return 0; // not implemented
608 int32_t mmu_icache_data_read( sh4addr_t addr )
610 return 0; // not implemented
612 int32_t mmu_ocache_addr_read( sh4addr_t addr )
614 return 0; // not implemented
616 int32_t mmu_ocache_data_read( sh4addr_t addr )
618 return 0; // not implemented
621 void mmu_icache_addr_write( sh4addr_t addr, uint32_t val )
625 void mmu_icache_data_write( sh4addr_t addr, uint32_t val )
629 void mmu_ocache_addr_write( sh4addr_t addr, uint32_t val )
633 void mmu_ocache_data_write( sh4addr_t addr, uint32_t val )
637 /******************************************************************************/
638 /* MMU TLB address translation */
639 /******************************************************************************/
642 * The translations are excessively complicated, but unfortunately it's a
643 * complicated system. TODO: make this not be painfully slow.
647 * Perform the actual utlb lookup w/ asid matching.
648 * Possible utcomes are:
649 * 0..63 Single match - good, return entry found
650 * -1 No match - raise a tlb data miss exception
651 * -2 Multiple matches - raise a multi-hit exception (reset)
652 * @param vpn virtual address to resolve
653 * @return the resultant UTLB entry, or an error.
655 static inline int mmu_utlb_lookup_vpn_asid( uint32_t vpn )
661 if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
665 for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
666 if( (mmu_utlb[i].flags & TLB_VALID) &&
667 ((mmu_utlb[i].flags & TLB_SHARE) || mmu_asid == mmu_utlb[i].asid) &&
668 ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
679 * Perform the actual utlb lookup matching on vpn only
680 * Possible utcomes are:
681 * 0..63 Single match - good, return entry found
682 * -1 No match - raise a tlb data miss exception
683 * -2 Multiple matches - raise a multi-hit exception (reset)
684 * @param vpn virtual address to resolve
685 * @return the resultant UTLB entry, or an error.
687 static inline int mmu_utlb_lookup_vpn( uint32_t vpn )
693 if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
697 for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
698 if( (mmu_utlb[i].flags & TLB_VALID) &&
699 ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
711 * Update the ITLB by replacing the LRU entry with the specified UTLB entry.
712 * @return the number (0-3) of the replaced entry.
714 static int inline mmu_itlb_update_from_utlb( int entryNo )
717 /* Determine entry to replace based on lrui */
718 if( (mmu_lrui & 0x38) == 0x38 ) {
720 mmu_lrui = mmu_lrui & 0x07;
721 } else if( (mmu_lrui & 0x26) == 0x06 ) {
723 mmu_lrui = (mmu_lrui & 0x19) | 0x20;
724 } else if( (mmu_lrui & 0x15) == 0x01 ) {
726 mmu_lrui = (mmu_lrui & 0x3E) | 0x14;
727 } else { // Note - gets invalid entries too
729 mmu_lrui = (mmu_lrui | 0x0B);
732 mmu_itlb[replace].vpn = mmu_utlb[entryNo].vpn;
733 mmu_itlb[replace].mask = mmu_utlb[entryNo].mask;
734 mmu_itlb[replace].ppn = mmu_utlb[entryNo].ppn;
735 mmu_itlb[replace].asid = mmu_utlb[entryNo].asid;
736 mmu_itlb[replace].flags = mmu_utlb[entryNo].flags & 0x01DA;
741 * Perform the actual itlb lookup w/ asid protection
742 * Possible utcomes are:
743 * 0..63 Single match - good, return entry found
744 * -1 No match - raise a tlb data miss exception
745 * -2 Multiple matches - raise a multi-hit exception (reset)
746 * @param vpn virtual address to resolve
747 * @return the resultant ITLB entry, or an error.
749 static inline int mmu_itlb_lookup_vpn_asid( uint32_t vpn )
754 for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
755 if( (mmu_itlb[i].flags & TLB_VALID) &&
756 ((mmu_itlb[i].flags & TLB_SHARE) || mmu_asid == mmu_itlb[i].asid) &&
757 ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
766 int utlbEntry = mmu_utlb_sorted_find( vpn );
767 if( utlbEntry < 0 ) {
770 return mmu_itlb_update_from_utlb( utlbEntry );
775 case 0: mmu_lrui = (mmu_lrui & 0x07); break;
776 case 1: mmu_lrui = (mmu_lrui & 0x19) | 0x20; break;
777 case 2: mmu_lrui = (mmu_lrui & 0x3E) | 0x14; break;
778 case 3: mmu_lrui = (mmu_lrui | 0x0B); break;
785 * Perform the actual itlb lookup on vpn only
786 * Possible utcomes are:
787 * 0..63 Single match - good, return entry found
788 * -1 No match - raise a tlb data miss exception
789 * -2 Multiple matches - raise a multi-hit exception (reset)
790 * @param vpn virtual address to resolve
791 * @return the resultant ITLB entry, or an error.
793 static inline int mmu_itlb_lookup_vpn( uint32_t vpn )
798 for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
799 if( (mmu_itlb[i].flags & TLB_VALID) &&
800 ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
809 int utlbEntry = mmu_utlb_lookup_vpn( vpn );
810 if( utlbEntry < 0 ) {
813 return mmu_itlb_update_from_utlb( utlbEntry );
818 case 0: mmu_lrui = (mmu_lrui & 0x07); break;
819 case 1: mmu_lrui = (mmu_lrui & 0x19) | 0x20; break;
820 case 2: mmu_lrui = (mmu_lrui & 0x3E) | 0x14; break;
821 case 3: mmu_lrui = (mmu_lrui | 0x0B); break;
827 #ifdef HAVE_FRAME_ADDRESS
828 sh4addr_t FASTCALL mmu_vma_to_phys_read( sh4vma_t addr, void *exc )
830 sh4addr_t FASTCALL mmu_vma_to_phys_read( sh4vma_t addr )
833 uint32_t mmucr = MMIO_READ(MMU,MMUCR);
834 if( addr & 0x80000000 ) {
835 if( IS_SH4_PRIVMODE() ) {
836 if( addr >= 0xE0000000 ) {
837 return addr; /* P4 - passthrough */
838 } else if( addr < 0xC0000000 ) {
839 /* P1, P2 regions are pass-through (no translation) */
840 return VMA_TO_EXT_ADDR(addr);
843 if( addr >= 0xE0000000 && addr < 0xE4000000 &&
844 ((mmucr&MMUCR_SQMD) == 0) ) {
845 /* Conditional user-mode access to the store-queue (no translation) */
848 MMU_READ_ADDR_ERROR();
853 if( (mmucr & MMUCR_AT) == 0 ) {
854 return VMA_TO_EXT_ADDR(addr);
857 /* If we get this far, translation is required */
859 if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
860 entryNo = mmu_utlb_sorted_find( addr );
862 entryNo = mmu_utlb_lookup_vpn( addr );
867 MMU_TLB_READ_MISS_ERROR(addr);
870 MMU_TLB_MULTI_HIT_ERROR(addr);
873 if( (mmu_utlb[entryNo].flags & TLB_USERMODE) == 0 &&
874 !IS_SH4_PRIVMODE() ) {
875 /* protection violation */
876 MMU_TLB_READ_PROT_ERROR(addr);
880 /* finally generate the target address */
881 return (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) |
882 (addr & (~mmu_utlb[entryNo].mask));
886 #ifdef HAVE_FRAME_ADDRESS
887 sh4addr_t FASTCALL mmu_vma_to_phys_write( sh4vma_t addr, void *exc )
889 sh4addr_t FASTCALL mmu_vma_to_phys_write( sh4vma_t addr )
892 uint32_t mmucr = MMIO_READ(MMU,MMUCR);
893 if( addr & 0x80000000 ) {
894 if( IS_SH4_PRIVMODE() ) {
895 if( addr >= 0xE0000000 ) {
896 return addr; /* P4 - passthrough */
897 } else if( addr < 0xC0000000 ) {
898 /* P1, P2 regions are pass-through (no translation) */
899 return VMA_TO_EXT_ADDR(addr);
902 if( addr >= 0xE0000000 && addr < 0xE4000000 &&
903 ((mmucr&MMUCR_SQMD) == 0) ) {
904 /* Conditional user-mode access to the store-queue (no translation) */
907 MMU_WRITE_ADDR_ERROR();
912 if( (mmucr & MMUCR_AT) == 0 ) {
913 return VMA_TO_EXT_ADDR(addr);
916 /* If we get this far, translation is required */
918 if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
919 entryNo = mmu_utlb_sorted_find( addr );
921 entryNo = mmu_utlb_lookup_vpn( addr );
926 MMU_TLB_WRITE_MISS_ERROR(addr);
929 MMU_TLB_MULTI_HIT_ERROR(addr);
932 if( IS_SH4_PRIVMODE() ? ((mmu_utlb[entryNo].flags & TLB_WRITABLE) == 0)
933 : ((mmu_utlb[entryNo].flags & TLB_USERWRITABLE) != TLB_USERWRITABLE) ) {
934 /* protection violation */
935 MMU_TLB_WRITE_PROT_ERROR(addr);
939 if( (mmu_utlb[entryNo].flags & TLB_DIRTY) == 0 ) {
940 MMU_TLB_INITIAL_WRITE_ERROR(addr);
944 /* finally generate the target address */
945 sh4addr_t pma = (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) |
946 (addr & (~mmu_utlb[entryNo].mask));
952 * Update the icache for an untranslated address
954 static inline void mmu_update_icache_phys( sh4addr_t addr )
956 if( (addr & 0x1C000000) == 0x0C000000 ) {
958 sh4_icache.page_vma = addr & 0xFF000000;
959 sh4_icache.page_ppa = 0x0C000000;
960 sh4_icache.mask = 0xFF000000;
961 sh4_icache.page = sh4_main_ram;
962 } else if( (addr & 0x1FE00000) == 0 ) {
964 sh4_icache.page_vma = addr & 0xFFE00000;
965 sh4_icache.page_ppa = 0;
966 sh4_icache.mask = 0xFFE00000;
967 sh4_icache.page = mem_get_region(0);
970 sh4_icache.page_vma = -1;
975 * Update the sh4_icache structure to describe the page(s) containing the
976 * given vma. If the address does not reference a RAM/ROM region, the icache
977 * will be invalidated instead.
978 * If AT is on, this method will raise TLB exceptions normally
979 * (hence this method should only be used immediately prior to execution of
980 * code), and otherwise will set the icache according to the matching TLB entry.
981 * If AT is off, this method will set the entire referenced RAM/ROM region in
983 * @return TRUE if the update completed (successfully or otherwise), FALSE
984 * if an exception was raised.
986 gboolean FASTCALL mmu_update_icache( sh4vma_t addr )
989 if( IS_SH4_PRIVMODE() ) {
990 if( addr & 0x80000000 ) {
991 if( addr < 0xC0000000 ) {
992 /* P1, P2 and P4 regions are pass-through (no translation) */
993 mmu_update_icache_phys(addr);
995 } else if( addr >= 0xE0000000 && addr < 0xFFFFFF00 ) {
996 MMU_READ_ADDR_ERROR();
1001 uint32_t mmucr = MMIO_READ(MMU,MMUCR);
1002 if( (mmucr & MMUCR_AT) == 0 ) {
1003 mmu_update_icache_phys(addr);
1007 if( (mmucr & MMUCR_SV) == 0 )
1008 entryNo = mmu_itlb_lookup_vpn_asid( addr );
1010 entryNo = mmu_itlb_lookup_vpn( addr );
1012 if( addr & 0x80000000 ) {
1013 MMU_READ_ADDR_ERROR();
1017 uint32_t mmucr = MMIO_READ(MMU,MMUCR);
1018 if( (mmucr & MMUCR_AT) == 0 ) {
1019 mmu_update_icache_phys(addr);
1023 entryNo = mmu_itlb_lookup_vpn_asid( addr );
1025 if( entryNo != -1 && (mmu_itlb[entryNo].flags & TLB_USERMODE) == 0 ) {
1026 MMU_TLB_READ_PROT_ERROR(addr);
1033 MMU_TLB_READ_MISS_ERROR(addr);
1036 MMU_TLB_MULTI_HIT_ERROR(addr);
1039 sh4_icache.page_ppa = mmu_itlb[entryNo].ppn & mmu_itlb[entryNo].mask;
1040 sh4_icache.page = mem_get_region( sh4_icache.page_ppa );
1041 if( sh4_icache.page == NULL ) {
1042 sh4_icache.page_vma = -1;
1044 sh4_icache.page_vma = mmu_itlb[entryNo].vpn & mmu_itlb[entryNo].mask;
1045 sh4_icache.mask = mmu_itlb[entryNo].mask;
1052 * Translate address for disassembly purposes (ie performs an instruction
1053 * lookup) - does not raise exceptions or modify any state, and ignores
1054 * protection bits. Returns the translated address, or MMU_VMA_ERROR
1055 * on translation failure.
1057 sh4addr_t FASTCALL mmu_vma_to_phys_disasm( sh4vma_t vma )
1059 if( vma & 0x80000000 ) {
1060 if( vma < 0xC0000000 ) {
1061 /* P1, P2 and P4 regions are pass-through (no translation) */
1062 return VMA_TO_EXT_ADDR(vma);
1063 } else if( vma >= 0xE0000000 && vma < 0xFFFFFF00 ) {
1064 /* Not translatable */
1065 return MMU_VMA_ERROR;
1069 uint32_t mmucr = MMIO_READ(MMU,MMUCR);
1070 if( (mmucr & MMUCR_AT) == 0 ) {
1071 return VMA_TO_EXT_ADDR(vma);
1074 int entryNo = mmu_itlb_lookup_vpn( vma );
1075 if( entryNo == -2 ) {
1076 entryNo = mmu_itlb_lookup_vpn_asid( vma );
1079 return MMU_VMA_ERROR;
1081 return (mmu_itlb[entryNo].ppn & mmu_itlb[entryNo].mask) |
1082 (vma & (~mmu_itlb[entryNo].mask));
1086 void FASTCALL sh4_flush_store_queue( sh4addr_t addr )
1088 int queue = (addr&0x20)>>2;
1089 uint32_t hi = MMIO_READ( MMU, QACR0 + (queue>>1)) << 24;
1090 sh4ptr_t src = (sh4ptr_t)&sh4r.store_queue[queue];
1091 sh4addr_t target = (addr&0x03FFFFE0) | hi;
1092 mem_copy_to_sh4( target, src, 32 );
1095 gboolean FASTCALL sh4_flush_store_queue_mmu( sh4addr_t addr )
1097 uint32_t mmucr = MMIO_READ(MMU,MMUCR);
1098 int queue = (addr&0x20)>>2;
1099 sh4ptr_t src = (sh4ptr_t)&sh4r.store_queue[queue];
1101 /* Store queue operation */
1104 if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
1105 entryNo = mmu_utlb_lookup_vpn_asid( addr );
1107 entryNo = mmu_utlb_lookup_vpn( addr );
1111 MMU_TLB_WRITE_MISS_ERROR(addr);
1114 MMU_TLB_MULTI_HIT_ERROR(addr);
1117 if( IS_SH4_PRIVMODE() ? ((mmu_utlb[entryNo].flags & TLB_WRITABLE) == 0)
1118 : ((mmu_utlb[entryNo].flags & TLB_USERWRITABLE) != TLB_USERWRITABLE) ) {
1119 /* protection violation */
1120 MMU_TLB_WRITE_PROT_ERROR(addr);
1124 if( (mmu_utlb[entryNo].flags & TLB_DIRTY) == 0 ) {
1125 MMU_TLB_INITIAL_WRITE_ERROR(addr);
1129 /* finally generate the target address */
1130 target = ((mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) |
1131 (addr & (~mmu_utlb[entryNo].mask))) & 0xFFFFFFE0;
1134 mem_copy_to_sh4( target, src, 32 );
.