4 * SH4 MMU implementation based on address space page maps. This module
5 * is responsible for all address decoding functions.
7 * Copyright (c) 2005 Nathan Keynes.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 #define MODULE sh4_module
23 #include "sh4/sh4mmio.h"
24 #include "sh4/sh4core.h"
25 #include "sh4/sh4trans.h"
26 #include "dreamcast.h"
30 /* An entry is a 1K entry if it's one of the mmu_utlb_1k_pages entries */
31 #define IS_1K_PAGE_ENTRY(ent) ( ((uintptr_t)(((struct utlb_1k_entry *)ent) - &mmu_utlb_1k_pages[0])) < UTLB_ENTRY_COUNT )
33 /* Primary address space (used directly by SH4 cores) */
34 mem_region_fn_t *sh4_address_space;
35 mem_region_fn_t *sh4_user_address_space;
37 /* External address space (usually the same as the global ext_address_space) */
38 static mem_region_fn_t *sh4_ext_address_space;
40 /* Accessed from the UTLB accessor methods */
43 static gboolean mmu_urc_overflow; /* If true, urc was set >= urb */
46 static struct itlb_entry mmu_itlb[ITLB_ENTRY_COUNT];
47 static struct utlb_entry mmu_utlb[UTLB_ENTRY_COUNT];
48 static struct utlb_page_entry mmu_utlb_pages[UTLB_ENTRY_COUNT];
49 static uint32_t mmu_lrui;
50 static uint32_t mmu_asid; // current asid
51 static struct utlb_default_regions *mmu_user_storequeue_regions;
53 /* Structures for 1K page handling */
54 static struct utlb_1k_entry mmu_utlb_1k_pages[UTLB_ENTRY_COUNT];
55 static int mmu_utlb_1k_free_list[UTLB_ENTRY_COUNT];
56 static int mmu_utlb_1k_free_index;
59 /* Function prototypes */
60 static void mmu_invalidate_tlb();
61 static void mmu_utlb_register_all();
62 static void mmu_utlb_remove_entry(int);
63 static void mmu_utlb_insert_entry(int);
64 static void mmu_register_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn );
65 static void mmu_register_user_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn );
66 static void mmu_set_tlb_enabled( int tlb_on );
67 static void mmu_set_tlb_asid( uint32_t asid );
68 static void mmu_set_storequeue_protected( int protected, int tlb_on );
69 static gboolean mmu_utlb_map_pages( mem_region_fn_t priv_page, mem_region_fn_t user_page, sh4addr_t start_addr, int npages );
70 static void mmu_utlb_remap_pages( gboolean remap_priv, gboolean remap_user, int entryNo );
71 static gboolean mmu_utlb_unmap_pages( gboolean unmap_priv, gboolean unmap_user, sh4addr_t start_addr, int npages );
72 static gboolean mmu_ext_page_remapped( sh4addr_t page, mem_region_fn_t fn, void *user_data );
73 static void mmu_utlb_1k_init();
74 static struct utlb_1k_entry *mmu_utlb_1k_alloc();
75 static void mmu_utlb_1k_free( struct utlb_1k_entry *entry );
76 static int mmu_read_urc();
78 static void FASTCALL tlb_miss_read( sh4addr_t addr, void *exc );
79 static int32_t FASTCALL tlb_protected_read( sh4addr_t addr, void *exc );
80 static void FASTCALL tlb_protected_write( sh4addr_t addr, uint32_t val, void *exc );
81 static int32_t FASTCALL tlb_protected_read_for_write( sh4addr_t addr, void *exc );
82 static void FASTCALL tlb_initial_write( sh4addr_t addr, uint32_t val, void *exc );
83 static int32_t FASTCALL tlb_initial_read_for_write( sh4addr_t addr, void *exc );
84 static uint32_t get_tlb_size_mask( uint32_t flags );
85 static uint32_t get_tlb_size_pages( uint32_t flags );
87 #define DEFAULT_REGIONS 0
88 #define DEFAULT_STOREQUEUE_REGIONS 1
89 #define DEFAULT_STOREQUEUE_SQMD_REGIONS 2
91 static struct utlb_default_regions mmu_default_regions[3] = {
92 { &mem_region_tlb_miss, &mem_region_tlb_protected, &mem_region_tlb_multihit },
93 { &p4_region_storequeue_miss, &p4_region_storequeue_protected, &p4_region_storequeue_multihit },
94 { &p4_region_storequeue_sqmd_miss, &p4_region_storequeue_sqmd_protected, &p4_region_storequeue_sqmd_multihit } };
96 #define IS_STOREQUEUE_PROTECTED() (mmu_user_storequeue_regions == &mmu_default_regions[DEFAULT_STOREQUEUE_SQMD_REGIONS])
98 #ifndef SH4_TRANSLATOR
99 /* Dummy MMU vtable functions */
100 void mmu_utlb_init_vtable( struct utlb_entry *ent, struct utlb_page_entry *page, gboolean writable )
103 void mmu_utlb_init_storequeue_vtable( struct utlb_entry *ent, struct utlb_page_entry *page )
106 void mmu_utlb_1k_init_vtable( struct utlb_1k_entry *entry )
111 /*********************** Module public functions ****************************/
114 * Allocate memory for the address space maps, and initialize them according
115 * to the default (reset) values. (TLB is disabled by default)
120 sh4_ext_address_space = ext_address_space;
121 sh4_address_space = mem_alloc_pages( sizeof(mem_region_fn_t) * 256 );
122 sh4_user_address_space = mem_alloc_pages( sizeof(mem_region_fn_t) * 256 );
123 mmu_user_storequeue_regions = &mmu_default_regions[DEFAULT_STOREQUEUE_REGIONS];
125 mmu_set_tlb_enabled(0);
126 mmu_register_user_mem_region( 0x80000000, 0x00000000, &mem_region_address_error );
127 mmu_register_user_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );
129 /* Setup P4 tlb/cache access regions */
130 mmu_register_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );
131 mmu_register_mem_region( 0xE4000000, 0xF0000000, &mem_region_unmapped );
132 mmu_register_mem_region( 0xF0000000, 0xF1000000, &p4_region_icache_addr );
133 mmu_register_mem_region( 0xF1000000, 0xF2000000, &p4_region_icache_data );
134 mmu_register_mem_region( 0xF2000000, 0xF3000000, &p4_region_itlb_addr );
135 mmu_register_mem_region( 0xF3000000, 0xF4000000, &p4_region_itlb_data );
136 mmu_register_mem_region( 0xF4000000, 0xF5000000, &p4_region_ocache_addr );
137 mmu_register_mem_region( 0xF5000000, 0xF6000000, &p4_region_ocache_data );
138 mmu_register_mem_region( 0xF6000000, 0xF7000000, &p4_region_utlb_addr );
139 mmu_register_mem_region( 0xF7000000, 0xF8000000, &p4_region_utlb_data );
140 mmu_register_mem_region( 0xF8000000, 0x00000000, &mem_region_unmapped );
142 /* Setup P4 control region */
143 mmu_register_mem_region( 0xFF000000, 0xFF001000, &mmio_region_MMU.fn );
144 mmu_register_mem_region( 0xFF100000, 0xFF101000, &mmio_region_PMM.fn );
145 mmu_register_mem_region( 0xFF200000, 0xFF201000, &mmio_region_UBC.fn );
146 mmu_register_mem_region( 0xFF800000, 0xFF801000, &mmio_region_BSC.fn );
147 mmu_register_mem_region( 0xFF900000, 0xFFA00000, &mem_region_unmapped ); // SDMR2 + SDMR3
148 mmu_register_mem_region( 0xFFA00000, 0xFFA01000, &mmio_region_DMAC.fn );
149 mmu_register_mem_region( 0xFFC00000, 0xFFC01000, &mmio_region_CPG.fn );
150 mmu_register_mem_region( 0xFFC80000, 0xFFC81000, &mmio_region_RTC.fn );
151 mmu_register_mem_region( 0xFFD00000, 0xFFD01000, &mmio_region_INTC.fn );
152 mmu_register_mem_region( 0xFFD80000, 0xFFD81000, &mmio_region_TMU.fn );
153 mmu_register_mem_region( 0xFFE00000, 0xFFE01000, &mmio_region_SCI.fn );
154 mmu_register_mem_region( 0xFFE80000, 0xFFE81000, &mmio_region_SCIF.fn );
155 mmu_register_mem_region( 0xFFF00000, 0xFFF01000, &mem_region_unmapped ); // H-UDI
157 register_mem_page_remapped_hook( mmu_ext_page_remapped, NULL );
160 /* Ensure the code regions are executable. Although it might
161 * be more portable to mmap these at runtime rather than using static decls
163 mem_unprotect( mmu_utlb_pages, sizeof(mmu_utlb_pages) );
164 mem_unprotect( mmu_utlb_1k_pages, sizeof(mmu_utlb_1k_pages) );
169 mmio_region_MMU_write( CCR, 0 );
170 mmio_region_MMU_write( MMUCR, 0 );
173 void MMU_save_state( FILE *f )
176 fwrite( &mmu_itlb, sizeof(mmu_itlb), 1, f );
177 fwrite( &mmu_utlb, sizeof(mmu_utlb), 1, f );
178 fwrite( &mmu_urc, sizeof(mmu_urc), 1, f );
179 fwrite( &mmu_urb, sizeof(mmu_urb), 1, f );
180 fwrite( &mmu_lrui, sizeof(mmu_lrui), 1, f );
181 fwrite( &mmu_asid, sizeof(mmu_asid), 1, f );
184 int MMU_load_state( FILE *f )
186 if( fread( &mmu_itlb, sizeof(mmu_itlb), 1, f ) != 1 ) {
189 if( fread( &mmu_utlb, sizeof(mmu_utlb), 1, f ) != 1 ) {
192 if( fread( &mmu_urc, sizeof(mmu_urc), 1, f ) != 1 ) {
195 if( fread( &mmu_urc, sizeof(mmu_urb), 1, f ) != 1 ) {
198 if( fread( &mmu_lrui, sizeof(mmu_lrui), 1, f ) != 1 ) {
201 if( fread( &mmu_asid, sizeof(mmu_asid), 1, f ) != 1 ) {
205 uint32_t mmucr = MMIO_READ(MMU,MMUCR);
206 mmu_urc_overflow = mmu_urc >= mmu_urb;
207 mmu_set_tlb_enabled(mmucr&MMUCR_AT);
208 mmu_set_storequeue_protected(mmucr&MMUCR_SQMD, mmucr&MMUCR_AT);
213 * LDTLB instruction implementation. Copies PTEH, PTEL and PTEA into the UTLB
214 * entry identified by MMUCR.URC. Does not modify MMUCR or the ITLB.
218 int urc = mmu_read_urc();
219 if( IS_TLB_ENABLED() && mmu_utlb[urc].flags & TLB_VALID )
220 mmu_utlb_remove_entry( urc );
221 mmu_utlb[urc].vpn = MMIO_READ(MMU, PTEH) & 0xFFFFFC00;
222 mmu_utlb[urc].asid = MMIO_READ(MMU, PTEH) & 0x000000FF;
223 mmu_utlb[urc].ppn = MMIO_READ(MMU, PTEL) & 0x1FFFFC00;
224 mmu_utlb[urc].flags = MMIO_READ(MMU, PTEL) & 0x00001FF;
225 mmu_utlb[urc].pcmcia = MMIO_READ(MMU, PTEA);
226 mmu_utlb[urc].mask = get_tlb_size_mask(mmu_utlb[urc].flags);
227 if( IS_TLB_ENABLED() && mmu_utlb[urc].flags & TLB_VALID )
228 mmu_utlb_insert_entry( urc );
232 MMIO_REGION_READ_FN( MMU, reg )
237 return MMIO_READ( MMU, MMUCR) | (mmu_read_urc()<<10) | ((mmu_urb&0x3F)<<18) | (mmu_lrui<<26);
239 return MMIO_READ( MMU, reg );
243 MMIO_REGION_READ_DEFSUBFNS(MMU)
245 MMIO_REGION_WRITE_FN( MMU, reg, val )
254 if( (val & 0xFF) != mmu_asid ) {
255 mmu_set_tlb_asid( val&0xFF );
272 if( val & MMUCR_TI ) {
273 mmu_invalidate_tlb();
275 mmu_urc = (val >> 10) & 0x3F;
276 mmu_urb = (val >> 18) & 0x3F;
279 } else if( mmu_urc >= mmu_urb ) {
280 mmu_urc_overflow = TRUE;
282 mmu_lrui = (val >> 26) & 0x3F;
284 tmp = MMIO_READ( MMU, MMUCR );
285 if( (val ^ tmp) & (MMUCR_SQMD) ) {
286 mmu_set_storequeue_protected( val & MMUCR_SQMD, val&MMUCR_AT );
288 if( (val ^ tmp) & (MMUCR_AT) ) {
289 // AT flag has changed state - flush the xlt cache as all bets
290 // are off now. We also need to force an immediate exit from the
292 mmu_set_tlb_enabled( val & MMUCR_AT );
293 MMIO_WRITE( MMU, MMUCR, val );
294 sh4_core_exit( CORE_EXIT_FLUSH_ICACHE );
295 xlat_flush_cache(); // If we're not running, flush the cache anyway
299 CCN_set_cache_control( val );
303 /* Note that if the high bit is set, this appears to reset the machine.
304 * Not emulating this behaviour yet until we know why...
313 PMM_write_control(0, val);
317 PMM_write_control(1, val);
323 MMIO_WRITE( MMU, reg, val );
326 /********************** 1K Page handling ***********************/
327 /* Since we use 4K pages as our native page size, 1K pages need a bit of extra
328 * effort to manage - we justify this on the basis that most programs won't
329 * actually use 1K pages, so we may as well optimize for the common case.
331 * Implementation uses an intermediate page entry (the utlb_1k_entry) that
332 * redirects requests to the 'real' page entry. These are allocated on an
333 * as-needed basis, and returned to the pool when all subpages are empty.
335 static void mmu_utlb_1k_init()
338 for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
339 mmu_utlb_1k_free_list[i] = i;
340 mmu_utlb_1k_init_vtable( &mmu_utlb_1k_pages[i] );
342 mmu_utlb_1k_free_index = 0;
345 static struct utlb_1k_entry *mmu_utlb_1k_alloc()
347 assert( mmu_utlb_1k_free_index < UTLB_ENTRY_COUNT );
348 struct utlb_1k_entry *entry = &mmu_utlb_1k_pages[mmu_utlb_1k_free_list[mmu_utlb_1k_free_index++]];
352 static void mmu_utlb_1k_free( struct utlb_1k_entry *ent )
354 unsigned int entryNo = ent - &mmu_utlb_1k_pages[0];
355 assert( entryNo < UTLB_ENTRY_COUNT );
356 assert( mmu_utlb_1k_free_index > 0 );
357 mmu_utlb_1k_free_list[--mmu_utlb_1k_free_index] = entryNo;
361 /********************** Address space maintenance *************************/
363 mem_region_fn_t *mmu_set_ext_address_space( mem_region_fn_t *ext )
365 mem_region_fn_t *old_ext = sh4_ext_address_space;
366 sh4_ext_address_space = ext;
367 mmu_set_tlb_enabled(IS_TLB_ENABLED());
372 * MMU accessor functions just increment URC - fixup here if necessary
374 static int mmu_read_urc()
376 if( mmu_urc_overflow ) {
377 if( mmu_urc >= 0x40 ) {
378 mmu_urc_overflow = FALSE;
388 static void mmu_register_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn )
390 int count = (end - start) >> 12;
391 mem_region_fn_t *ptr = &sh4_address_space[start>>12];
392 while( count-- > 0 ) {
396 static void mmu_register_user_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn )
398 int count = (end - start) >> 12;
399 mem_region_fn_t *ptr = &sh4_user_address_space[start>>12];
400 while( count-- > 0 ) {
405 static gboolean mmu_ext_page_remapped( sh4addr_t page, mem_region_fn_t fn, void *user_data )
408 if( (MMIO_READ(MMU,MMUCR)) & MMUCR_AT ) {
410 sh4_address_space[(page|0x80000000)>>12] = fn; /* Direct map to P1 and P2 */
411 sh4_address_space[(page|0xA0000000)>>12] = fn;
412 /* Scan UTLB and update any direct-referencing entries */
414 /* Direct map to U0, P0, P1, P2, P3 */
415 for( i=0; i<= 0xC0000000; i+= 0x20000000 ) {
416 sh4_address_space[(page|i)>>12] = fn;
418 for( i=0; i < 0x80000000; i+= 0x20000000 ) {
419 sh4_user_address_space[(page|i)>>12] = fn;
425 static void mmu_set_tlb_enabled( int tlb_on )
427 mem_region_fn_t *ptr;
430 /* Reset the storequeue area */
433 mmu_register_mem_region(0x00000000, 0x80000000, &mem_region_tlb_miss );
434 mmu_register_mem_region(0xC0000000, 0xE0000000, &mem_region_tlb_miss );
435 mmu_register_user_mem_region(0x00000000, 0x80000000, &mem_region_tlb_miss );
437 /* Default SQ prefetch goes to TLB miss (?) */
438 mmu_register_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue_miss );
439 mmu_register_user_mem_region( 0xE0000000, 0xE4000000, mmu_user_storequeue_regions->tlb_miss );
440 mmu_utlb_register_all();
442 for( i=0, ptr = sh4_address_space; i<7; i++, ptr += LXDREAM_PAGE_TABLE_ENTRIES ) {
443 memcpy( ptr, sh4_ext_address_space, sizeof(mem_region_fn_t) * LXDREAM_PAGE_TABLE_ENTRIES );
445 for( i=0, ptr = sh4_user_address_space; i<4; i++, ptr += LXDREAM_PAGE_TABLE_ENTRIES ) {
446 memcpy( ptr, sh4_ext_address_space, sizeof(mem_region_fn_t) * LXDREAM_PAGE_TABLE_ENTRIES );
449 mmu_register_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );
450 if( IS_STOREQUEUE_PROTECTED() ) {
451 mmu_register_user_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue_sqmd );
453 mmu_register_user_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );
460 * Flip the SQMD switch - this is rather expensive, so will need to be changed if
461 * anything expects to do this frequently.
463 static void mmu_set_storequeue_protected( int protected, int tlb_on )
465 mem_region_fn_t nontlb_region;
469 mmu_user_storequeue_regions = &mmu_default_regions[DEFAULT_STOREQUEUE_SQMD_REGIONS];
470 nontlb_region = &p4_region_storequeue_sqmd;
472 mmu_user_storequeue_regions = &mmu_default_regions[DEFAULT_STOREQUEUE_REGIONS];
473 nontlb_region = &p4_region_storequeue;
477 mmu_register_user_mem_region( 0xE0000000, 0xE4000000, mmu_user_storequeue_regions->tlb_miss );
478 for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
479 if( (mmu_utlb[i].vpn & 0xFC000000) == 0xE0000000 ) {
480 mmu_utlb_insert_entry(i);
484 mmu_register_user_mem_region( 0xE0000000, 0xE4000000, nontlb_region );
489 static void mmu_set_tlb_asid( uint32_t asid )
491 if( IS_TLB_ENABLED() ) {
492 /* Scan for pages that need to be remapped */
494 if( IS_SV_ENABLED() ) {
495 for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
496 if( mmu_utlb[i].asid == mmu_asid &&
497 (mmu_utlb[i].flags & (TLB_VALID|TLB_SHARE)) == (TLB_VALID) ) {
498 // Matches old ASID - unmap out
499 if( !mmu_utlb_unmap_pages( FALSE, TRUE, mmu_utlb[i].vpn&mmu_utlb[i].mask,
500 get_tlb_size_pages(mmu_utlb[i].flags) ) )
501 mmu_utlb_remap_pages( FALSE, TRUE, i );
504 for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
505 if( mmu_utlb[i].asid == asid &&
506 (mmu_utlb[i].flags & (TLB_VALID|TLB_SHARE)) == (TLB_VALID) ) {
507 // Matches new ASID - map in
508 mmu_utlb_map_pages( NULL, mmu_utlb_pages[i].user_fn,
509 mmu_utlb[i].vpn&mmu_utlb[i].mask,
510 get_tlb_size_pages(mmu_utlb[i].flags) );
514 // Remap both Priv+user pages
515 for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
516 if( mmu_utlb[i].asid == mmu_asid &&
517 (mmu_utlb[i].flags & (TLB_VALID|TLB_SHARE)) == (TLB_VALID) ) {
518 if( !mmu_utlb_unmap_pages( TRUE, TRUE, mmu_utlb[i].vpn&mmu_utlb[i].mask,
519 get_tlb_size_pages(mmu_utlb[i].flags) ) )
520 mmu_utlb_remap_pages( TRUE, TRUE, i );
523 for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
524 if( mmu_utlb[i].asid == asid &&
525 (mmu_utlb[i].flags & (TLB_VALID|TLB_SHARE)) == (TLB_VALID) ) {
526 mmu_utlb_map_pages( &mmu_utlb_pages[i].fn, mmu_utlb_pages[i].user_fn,
527 mmu_utlb[i].vpn&mmu_utlb[i].mask,
528 get_tlb_size_pages(mmu_utlb[i].flags) );
532 sh4_icache.page_vma = -1; // invalidate icache as asid has changed
537 static uint32_t get_tlb_size_mask( uint32_t flags )
539 switch( flags & TLB_SIZE_MASK ) {
540 case TLB_SIZE_1K: return MASK_1K;
541 case TLB_SIZE_4K: return MASK_4K;
542 case TLB_SIZE_64K: return MASK_64K;
543 case TLB_SIZE_1M: return MASK_1M;
544 default: return 0; /* Unreachable */
547 static uint32_t get_tlb_size_pages( uint32_t flags )
549 switch( flags & TLB_SIZE_MASK ) {
550 case TLB_SIZE_1K: return 0;
551 case TLB_SIZE_4K: return 1;
552 case TLB_SIZE_64K: return 16;
553 case TLB_SIZE_1M: return 256;
554 default: return 0; /* Unreachable */
559 * Add a new TLB entry mapping to the address space table. If any of the pages
560 * are already mapped, they are mapped to the TLB multi-hit page instead.
561 * @return FALSE if a TLB multihit situation was detected, otherwise TRUE.
563 static gboolean mmu_utlb_map_pages( mem_region_fn_t priv_page, mem_region_fn_t user_page, sh4addr_t start_addr, int npages )
565 mem_region_fn_t *ptr = &sh4_address_space[start_addr >> 12];
566 mem_region_fn_t *uptr = &sh4_user_address_space[start_addr >> 12];
567 struct utlb_default_regions *privdefs = &mmu_default_regions[DEFAULT_REGIONS];
568 struct utlb_default_regions *userdefs = privdefs;
570 gboolean mapping_ok = TRUE;
573 if( (start_addr & 0xFC000000) == 0xE0000000 ) {
574 /* Storequeue mapping */
575 privdefs = &mmu_default_regions[DEFAULT_STOREQUEUE_REGIONS];
576 userdefs = mmu_user_storequeue_regions;
577 } else if( (start_addr & 0xE0000000) == 0xC0000000 ) {
578 user_page = NULL; /* No user access to P3 region */
579 } else if( start_addr >= 0x80000000 ) {
580 return TRUE; // No mapping - legal but meaningless
584 struct utlb_1k_entry *ent;
585 int i, idx = (start_addr >> 10) & 0x03;
586 if( IS_1K_PAGE_ENTRY(*ptr) ) {
587 ent = (struct utlb_1k_entry *)*ptr;
589 ent = mmu_utlb_1k_alloc();
590 /* New 1K struct - init to previous contents of region */
591 for( i=0; i<4; i++ ) {
592 ent->subpages[i] = *ptr;
593 ent->user_subpages[i] = *uptr;
596 *uptr = &ent->user_fn;
599 if( priv_page != NULL ) {
600 if( ent->subpages[idx] == privdefs->tlb_miss ) {
601 ent->subpages[idx] = priv_page;
604 ent->subpages[idx] = privdefs->tlb_multihit;
607 if( user_page != NULL ) {
608 if( ent->user_subpages[idx] == userdefs->tlb_miss ) {
609 ent->user_subpages[idx] = user_page;
612 ent->user_subpages[idx] = userdefs->tlb_multihit;
617 if( priv_page != NULL ) {
618 /* Privileged mapping only */
619 for( i=0; i<npages; i++ ) {
620 if( *ptr == privdefs->tlb_miss ) {
624 *ptr++ = privdefs->tlb_multihit;
628 if( user_page != NULL ) {
629 /* User mapping only (eg ASID change remap w/ SV=1) */
630 for( i=0; i<npages; i++ ) {
631 if( *uptr == userdefs->tlb_miss ) {
635 *uptr++ = userdefs->tlb_multihit;
645 * Remap any pages within the region covered by entryNo, but not including
646 * entryNo itself. This is used to reestablish pages that were previously
647 * covered by a multi-hit exception region when one of the pages is removed.
649 static void mmu_utlb_remap_pages( gboolean remap_priv, gboolean remap_user, int entryNo )
651 int mask = mmu_utlb[entryNo].mask;
652 uint32_t remap_addr = mmu_utlb[entryNo].vpn & mask;
655 for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
656 if( i != entryNo && (mmu_utlb[i].vpn & mask) == remap_addr && (mmu_utlb[i].flags & TLB_VALID) ) {
657 /* Overlapping region */
658 mem_region_fn_t priv_page = (remap_priv ? &mmu_utlb_pages[i].fn : NULL);
659 mem_region_fn_t user_page = (remap_priv ? mmu_utlb_pages[i].user_fn : NULL);
663 if( mmu_utlb[i].mask >= mask ) {
664 /* entry is no larger than the area we're replacing - map completely */
665 start_addr = mmu_utlb[i].vpn & mmu_utlb[i].mask;
666 npages = get_tlb_size_pages( mmu_utlb[i].flags );
668 /* Otherwise map subset - region covered by removed page */
669 start_addr = remap_addr;
670 npages = get_tlb_size_pages( mmu_utlb[entryNo].flags );
673 if( (mmu_utlb[i].flags & TLB_SHARE) || mmu_utlb[i].asid == mmu_asid ) {
674 mmu_utlb_map_pages( priv_page, user_page, start_addr, npages );
675 } else if( IS_SV_ENABLED() ) {
676 mmu_utlb_map_pages( priv_page, NULL, start_addr, npages );
684 * Remove a previous TLB mapping (replacing them with the TLB miss region).
685 * @return FALSE if any pages were previously mapped to the TLB multihit page,
686 * otherwise TRUE. In either case, all pages in the region are cleared to TLB miss.
688 static gboolean mmu_utlb_unmap_pages( gboolean unmap_priv, gboolean unmap_user, sh4addr_t start_addr, int npages )
690 mem_region_fn_t *ptr = &sh4_address_space[start_addr >> 12];
691 mem_region_fn_t *uptr = &sh4_user_address_space[start_addr >> 12];
692 struct utlb_default_regions *privdefs = &mmu_default_regions[DEFAULT_REGIONS];
693 struct utlb_default_regions *userdefs = privdefs;
695 gboolean unmapping_ok = TRUE;
698 if( (start_addr & 0xFC000000) == 0xE0000000 ) {
699 /* Storequeue mapping */
700 privdefs = &mmu_default_regions[DEFAULT_STOREQUEUE_REGIONS];
701 userdefs = mmu_user_storequeue_regions;
702 } else if( (start_addr & 0xE0000000) == 0xC0000000 ) {
704 } else if( start_addr >= 0x80000000 ) {
705 return TRUE; // No mapping - legal but meaningless
708 if( npages == 0 ) { // 1K page
709 assert( IS_1K_PAGE_ENTRY( *ptr ) );
710 struct utlb_1k_entry *ent = (struct utlb_1k_entry *)*ptr;
711 int i, idx = (start_addr >> 10) & 0x03, mergeable=1;
712 if( ent->subpages[idx] == privdefs->tlb_multihit ) {
713 unmapping_ok = FALSE;
716 ent->subpages[idx] = privdefs->tlb_miss;
718 ent->user_subpages[idx] = userdefs->tlb_miss;
720 /* If all 4 subpages have the same content, merge them together and
721 * release the 1K entry
723 mem_region_fn_t priv_page = ent->subpages[0];
724 mem_region_fn_t user_page = ent->user_subpages[0];
725 for( i=1; i<4; i++ ) {
726 if( priv_page != ent->subpages[i] || user_page != ent->user_subpages[i] ) {
732 mmu_utlb_1k_free(ent);
738 /* Privileged (un)mapping */
739 for( i=0; i<npages; i++ ) {
740 if( *ptr == privdefs->tlb_multihit ) {
741 unmapping_ok = FALSE;
743 *ptr++ = privdefs->tlb_miss;
747 /* User (un)mapping */
748 for( i=0; i<npages; i++ ) {
749 if( *uptr == userdefs->tlb_multihit ) {
750 unmapping_ok = FALSE;
752 *uptr++ = userdefs->tlb_miss;
760 static void mmu_utlb_insert_entry( int entry )
762 struct utlb_entry *ent = &mmu_utlb[entry];
763 mem_region_fn_t page = &mmu_utlb_pages[entry].fn;
764 mem_region_fn_t upage;
765 sh4addr_t start_addr = ent->vpn & ent->mask;
766 int npages = get_tlb_size_pages(ent->flags);
768 if( (start_addr & 0xFC000000) == 0xE0000000 ) {
769 /* Store queue mappings are a bit different - normal access is fixed to
770 * the store queue register block, and we only map prefetches through
773 mmu_utlb_init_storequeue_vtable( ent, &mmu_utlb_pages[entry] );
775 if( (ent->flags & TLB_USERMODE) == 0 ) {
776 upage = mmu_user_storequeue_regions->tlb_prot;
777 } else if( IS_STOREQUEUE_PROTECTED() ) {
778 upage = &p4_region_storequeue_sqmd;
785 if( (ent->flags & TLB_USERMODE) == 0 ) {
786 upage = &mem_region_tlb_protected;
791 if( (ent->flags & TLB_WRITABLE) == 0 ) {
792 page->write_long = (mem_write_fn_t)tlb_protected_write;
793 page->write_word = (mem_write_fn_t)tlb_protected_write;
794 page->write_byte = (mem_write_fn_t)tlb_protected_write;
795 page->write_burst = (mem_write_burst_fn_t)tlb_protected_write;
796 page->read_byte_for_write = (mem_read_fn_t)tlb_protected_read_for_write;
797 mmu_utlb_init_vtable( ent, &mmu_utlb_pages[entry], FALSE );
798 } else if( (ent->flags & TLB_DIRTY) == 0 ) {
799 page->write_long = (mem_write_fn_t)tlb_initial_write;
800 page->write_word = (mem_write_fn_t)tlb_initial_write;
801 page->write_byte = (mem_write_fn_t)tlb_initial_write;
802 page->write_burst = (mem_write_burst_fn_t)tlb_initial_write;
803 page->read_byte_for_write = (mem_read_fn_t)tlb_initial_read_for_write;
804 mmu_utlb_init_vtable( ent, &mmu_utlb_pages[entry], FALSE );
806 mmu_utlb_init_vtable( ent, &mmu_utlb_pages[entry], TRUE );
810 mmu_utlb_pages[entry].user_fn = upage;
812 /* Is page visible? */
813 if( (ent->flags & TLB_SHARE) || ent->asid == mmu_asid ) {
814 mmu_utlb_map_pages( page, upage, start_addr, npages );
815 } else if( IS_SV_ENABLED() ) {
816 mmu_utlb_map_pages( page, NULL, start_addr, npages );
820 static void mmu_utlb_remove_entry( int entry )
822 struct utlb_entry *ent = &mmu_utlb[entry];
823 sh4addr_t start_addr = ent->vpn&ent->mask;
825 int npages = get_tlb_size_pages(ent->flags);
827 if( (ent->flags & TLB_SHARE) || ent->asid == mmu_asid ) {
829 } else if( IS_SV_ENABLED() ) {
832 return; // Not mapped
835 gboolean clean_unmap = mmu_utlb_unmap_pages( TRUE, unmap_user, start_addr, npages );
838 mmu_utlb_remap_pages( TRUE, unmap_user, entry );
842 static void mmu_utlb_register_all()
845 for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
846 if( mmu_utlb[i].flags & TLB_VALID )
847 mmu_utlb_insert_entry( i );
851 static void mmu_invalidate_tlb()
854 for( i=0; i<ITLB_ENTRY_COUNT; i++ ) {
855 mmu_itlb[i].flags &= (~TLB_VALID);
857 if( IS_TLB_ENABLED() ) {
858 for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
859 if( mmu_utlb[i].flags & TLB_VALID ) {
860 mmu_utlb_remove_entry( i );
864 for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
865 mmu_utlb[i].flags &= (~TLB_VALID);
869 /******************************************************************************/
870 /* MMU TLB address translation */
871 /******************************************************************************/
874 * Translate a 32-bit address into a UTLB entry number. Does not check for
875 * page protection etc.
876 * @return the entryNo if found, -1 if not found, and -2 for a multi-hit.
878 int mmu_utlb_entry_for_vpn( uint32_t vpn )
881 mem_region_fn_t fn = sh4_address_space[vpn>>12];
882 if( fn >= &mmu_utlb_pages[0].fn && fn < &mmu_utlb_pages[UTLB_ENTRY_COUNT].fn ) {
883 return ((struct utlb_page_entry *)fn) - &mmu_utlb_pages[0];
884 } else if( fn >= &mmu_utlb_1k_pages[0].fn && fn < &mmu_utlb_1k_pages[UTLB_ENTRY_COUNT].fn ) {
885 struct utlb_1k_entry *ent = (struct utlb_1k_entry *)fn;
886 fn = ent->subpages[(vpn>>10)&0x03];
887 if( fn >= &mmu_utlb_pages[0].fn && fn < &mmu_utlb_pages[UTLB_ENTRY_COUNT].fn ) {
888 return ((struct utlb_page_entry *)fn) - &mmu_utlb_pages[0];
891 if( fn == &mem_region_tlb_multihit ) {
900 * Perform the actual utlb lookup w/ asid matching.
901 * Possible utcomes are:
902 * 0..63 Single match - good, return entry found
903 * -1 No match - raise a tlb data miss exception
904 * -2 Multiple matches - raise a multi-hit exception (reset)
905 * @param vpn virtual address to resolve
906 * @return the resultant UTLB entry, or an error.
908 static inline int mmu_utlb_lookup_vpn_asid( uint32_t vpn )
914 if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
918 for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
919 if( (mmu_utlb[i].flags & TLB_VALID) &&
920 ((mmu_utlb[i].flags & TLB_SHARE) || mmu_asid == mmu_utlb[i].asid) &&
921 ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
932 * Perform the actual utlb lookup matching on vpn only
933 * Possible utcomes are:
934 * 0..63 Single match - good, return entry found
935 * -1 No match - raise a tlb data miss exception
936 * -2 Multiple matches - raise a multi-hit exception (reset)
937 * @param vpn virtual address to resolve
938 * @return the resultant UTLB entry, or an error.
940 static inline int mmu_utlb_lookup_vpn( uint32_t vpn )
946 if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
950 for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
951 if( (mmu_utlb[i].flags & TLB_VALID) &&
952 ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
964 * Update the ITLB by replacing the LRU entry with the specified UTLB entry.
965 * @return the number (0-3) of the replaced entry.
967 static int inline mmu_itlb_update_from_utlb( int entryNo )
970 /* Determine entry to replace based on lrui */
971 if( (mmu_lrui & 0x38) == 0x38 ) {
973 mmu_lrui = mmu_lrui & 0x07;
974 } else if( (mmu_lrui & 0x26) == 0x06 ) {
976 mmu_lrui = (mmu_lrui & 0x19) | 0x20;
977 } else if( (mmu_lrui & 0x15) == 0x01 ) {
979 mmu_lrui = (mmu_lrui & 0x3E) | 0x14;
980 } else { // Note - gets invalid entries too
982 mmu_lrui = (mmu_lrui | 0x0B);
985 mmu_itlb[replace].vpn = mmu_utlb[entryNo].vpn;
986 mmu_itlb[replace].mask = mmu_utlb[entryNo].mask;
987 mmu_itlb[replace].ppn = mmu_utlb[entryNo].ppn;
988 mmu_itlb[replace].asid = mmu_utlb[entryNo].asid;
989 mmu_itlb[replace].flags = mmu_utlb[entryNo].flags & 0x01DA;
994 * Perform the actual itlb lookup w/ asid protection
995 * Possible utcomes are:
996 * 0..63 Single match - good, return entry found
997 * -1 No match - raise a tlb data miss exception
998 * -2 Multiple matches - raise a multi-hit exception (reset)
999 * @param vpn virtual address to resolve
1000 * @return the resultant ITLB entry, or an error.
1002 static inline int mmu_itlb_lookup_vpn_asid( uint32_t vpn )
1007 for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
1008 if( (mmu_itlb[i].flags & TLB_VALID) &&
1009 ((mmu_itlb[i].flags & TLB_SHARE) || mmu_asid == mmu_itlb[i].asid) &&
1010 ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
1011 if( result != -1 ) {
1018 if( result == -1 ) {
1019 int utlbEntry = mmu_utlb_entry_for_vpn( vpn );
1020 if( utlbEntry < 0 ) {
1023 return mmu_itlb_update_from_utlb( utlbEntry );
1028 case 0: mmu_lrui = (mmu_lrui & 0x07); break;
1029 case 1: mmu_lrui = (mmu_lrui & 0x19) | 0x20; break;
1030 case 2: mmu_lrui = (mmu_lrui & 0x3E) | 0x14; break;
1031 case 3: mmu_lrui = (mmu_lrui | 0x0B); break;
1038 * Perform the actual itlb lookup on vpn only
1039 * Possible utcomes are:
1040 * 0..63 Single match - good, return entry found
1041 * -1 No match - raise a tlb data miss exception
1042 * -2 Multiple matches - raise a multi-hit exception (reset)
1043 * @param vpn virtual address to resolve
1044 * @return the resultant ITLB entry, or an error.
1046 static inline int mmu_itlb_lookup_vpn( uint32_t vpn )
1051 for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
1052 if( (mmu_itlb[i].flags & TLB_VALID) &&
1053 ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
1054 if( result != -1 ) {
1061 if( result == -1 ) {
1062 int utlbEntry = mmu_utlb_lookup_vpn( vpn );
1063 if( utlbEntry < 0 ) {
1066 return mmu_itlb_update_from_utlb( utlbEntry );
1071 case 0: mmu_lrui = (mmu_lrui & 0x07); break;
1072 case 1: mmu_lrui = (mmu_lrui & 0x19) | 0x20; break;
1073 case 2: mmu_lrui = (mmu_lrui & 0x3E) | 0x14; break;
1074 case 3: mmu_lrui = (mmu_lrui | 0x0B); break;
1081 * Update the icache for an untranslated address
1083 static inline void mmu_update_icache_phys( sh4addr_t addr )
1085 if( (addr & 0x1C000000) == 0x0C000000 ) {
1087 sh4_icache.page_vma = addr & 0xFF000000;
1088 sh4_icache.page_ppa = 0x0C000000;
1089 sh4_icache.mask = 0xFF000000;
1090 sh4_icache.page = dc_main_ram;
1091 } else if( (addr & 0x1FE00000) == 0 ) {
1093 sh4_icache.page_vma = addr & 0xFFE00000;
1094 sh4_icache.page_ppa = 0;
1095 sh4_icache.mask = 0xFFE00000;
1096 sh4_icache.page = dc_boot_rom;
1099 sh4_icache.page_vma = -1;
1104 * Update the sh4_icache structure to describe the page(s) containing the
1105 * given vma. If the address does not reference a RAM/ROM region, the icache
1106 * will be invalidated instead.
1107 * If AT is on, this method will raise TLB exceptions normally
1108 * (hence this method should only be used immediately prior to execution of
1109 * code), and otherwise will set the icache according to the matching TLB entry.
1110 * If AT is off, this method will set the entire referenced RAM/ROM region in
1112 * @return TRUE if the update completed (successfully or otherwise), FALSE
1113 * if an exception was raised.
1115 gboolean FASTCALL mmu_update_icache( sh4vma_t addr )
1118 if( IS_SH4_PRIVMODE() ) {
1119 if( addr & 0x80000000 ) {
1120 if( addr < 0xC0000000 ) {
1121 /* P1, P2 and P4 regions are pass-through (no translation) */
1122 mmu_update_icache_phys(addr);
1124 } else if( addr >= 0xE0000000 && addr < 0xFFFFFF00 ) {
1125 RAISE_MEM_ERROR(EXC_DATA_ADDR_READ, addr);
1130 uint32_t mmucr = MMIO_READ(MMU,MMUCR);
1131 if( (mmucr & MMUCR_AT) == 0 ) {
1132 mmu_update_icache_phys(addr);
1136 if( (mmucr & MMUCR_SV) == 0 )
1137 entryNo = mmu_itlb_lookup_vpn_asid( addr );
1139 entryNo = mmu_itlb_lookup_vpn( addr );
1141 if( addr & 0x80000000 ) {
1142 RAISE_MEM_ERROR(EXC_DATA_ADDR_READ, addr);
1146 uint32_t mmucr = MMIO_READ(MMU,MMUCR);
1147 if( (mmucr & MMUCR_AT) == 0 ) {
1148 mmu_update_icache_phys(addr);
1152 entryNo = mmu_itlb_lookup_vpn_asid( addr );
1154 if( entryNo != -1 && (mmu_itlb[entryNo].flags & TLB_USERMODE) == 0 ) {
1155 RAISE_MEM_ERROR(EXC_TLB_PROT_READ, addr);
1162 RAISE_TLB_ERROR(EXC_TLB_MISS_READ, addr);
1165 RAISE_TLB_MULTIHIT_ERROR(addr);
1168 sh4_icache.page_ppa = mmu_itlb[entryNo].ppn & mmu_itlb[entryNo].mask;
1169 sh4_icache.page = mem_get_region( sh4_icache.page_ppa );
1170 if( sh4_icache.page == NULL ) {
1171 sh4_icache.page_vma = -1;
1173 sh4_icache.page_vma = mmu_itlb[entryNo].vpn & mmu_itlb[entryNo].mask;
1174 sh4_icache.mask = mmu_itlb[entryNo].mask;
1181 * Translate address for disassembly purposes (ie performs an instruction
1182 * lookup) - does not raise exceptions or modify any state, and ignores
1183 * protection bits. Returns the translated address, or MMU_VMA_ERROR
1184 * on translation failure.
1186 sh4addr_t FASTCALL mmu_vma_to_phys_disasm( sh4vma_t vma )
1188 if( vma & 0x80000000 ) {
1189 if( vma < 0xC0000000 ) {
1190 /* P1, P2 and P4 regions are pass-through (no translation) */
1191 return VMA_TO_EXT_ADDR(vma);
1192 } else if( vma >= 0xE0000000 && vma < 0xFFFFFF00 ) {
1193 /* Not translatable */
1194 return MMU_VMA_ERROR;
1198 uint32_t mmucr = MMIO_READ(MMU,MMUCR);
1199 if( (mmucr & MMUCR_AT) == 0 ) {
1200 return VMA_TO_EXT_ADDR(vma);
1203 int entryNo = mmu_itlb_lookup_vpn( vma );
1204 if( entryNo == -2 ) {
1205 entryNo = mmu_itlb_lookup_vpn_asid( vma );
1208 return MMU_VMA_ERROR;
1210 return (mmu_itlb[entryNo].ppn & mmu_itlb[entryNo].mask) |
1211 (vma & (~mmu_itlb[entryNo].mask));
1216 * Translate a virtual to physical address for reading, raising exceptions as
1218 * @param addr Pointer to the virtual memory address. On successful return,
1219 * will be updated to contain the physical address.
1221 mem_region_fn_t FASTCALL mmu_get_region_for_vma_read( sh4vma_t *paddr )
1223 sh4vma_t addr = *paddr;
1224 uint32_t mmucr = MMIO_READ(MMU,MMUCR);
1225 if( addr & 0x80000000 ) {
1226 if( IS_SH4_PRIVMODE() ) {
1227 if( addr >= 0xE0000000 ) {
1228 return sh4_address_space[((uint32_t)addr)>>12]; /* P4 - passthrough */
1229 } else if( addr < 0xC0000000 ) {
1230 /* P1, P2 regions are pass-through (no translation) */
1231 return sh4_ext_address_space[VMA_TO_EXT_ADDR(addr)>>12];
1234 if( addr >= 0xE0000000 && addr < 0xE4000000 &&
1235 ((mmucr&MMUCR_SQMD) == 0) ) {
1236 /* Conditional user-mode access to the store-queue (no translation) */
1237 return &p4_region_storequeue;
1239 sh4_raise_exception(EXC_DATA_ADDR_READ);
1244 if( (mmucr & MMUCR_AT) == 0 ) {
1245 return sh4_address_space[addr>>12];
1248 /* If we get this far, translation is required */
1250 if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
1251 entryNo = mmu_utlb_lookup_vpn_asid( addr );
1253 entryNo = mmu_utlb_lookup_vpn( addr );
1258 RAISE_TLB_ERROR(EXC_TLB_MISS_READ,addr);
1261 RAISE_TLB_MULTIHIT_ERROR(addr);
1264 if( (mmu_utlb[entryNo].flags & TLB_USERMODE) == 0 &&
1265 !IS_SH4_PRIVMODE() ) {
1266 /* protection violation */
1267 RAISE_MEM_ERROR(EXC_TLB_PROT_READ,addr);
1271 /* finally generate the target address */
1272 sh4addr_t pma = (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) |
1273 (addr & (~mmu_utlb[entryNo].mask));
1274 if( pma > 0x1C000000 ) { // Remap 1Cxx .. 1Fxx region to P4
1275 addr = pma | 0xE0000000;
1277 return sh4_address_space[addr>>12];
1280 return sh4_ext_address_space[pma>>12];
1286 * Translate a virtual to physical address for prefetch, which mostly
1287 * does not raise exceptions.
1288 * @param addr Pointer to the virtual memory address. On successful return,
1289 * will be updated to contain the physical address.
1291 mem_region_fn_t FASTCALL mmu_get_region_for_vma_prefetch( sh4vma_t *paddr )
1293 sh4vma_t addr = *paddr;
1294 uint32_t mmucr = MMIO_READ(MMU,MMUCR);
1295 if( addr & 0x80000000 ) {
1296 if( IS_SH4_PRIVMODE() ) {
1297 if( addr >= 0xE0000000 ) {
1298 return sh4_address_space[((uint32_t)addr)>>12]; /* P4 - passthrough */
1299 } else if( addr < 0xC0000000 ) {
1300 /* P1, P2 regions are pass-through (no translation) */
1301 return sh4_ext_address_space[VMA_TO_EXT_ADDR(addr)>>12];
1304 if( addr >= 0xE0000000 && addr < 0xE4000000 &&
1305 ((mmucr&MMUCR_SQMD) == 0) ) {
1306 /* Conditional user-mode access to the store-queue (no translation) */
1307 return &p4_region_storequeue;
1309 sh4_raise_exception(EXC_DATA_ADDR_READ);
1314 if( (mmucr & MMUCR_AT) == 0 ) {
1315 return sh4_address_space[addr>>12];
1318 /* If we get this far, translation is required */
1320 if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
1321 entryNo = mmu_utlb_lookup_vpn_asid( addr );
1323 entryNo = mmu_utlb_lookup_vpn( addr );
1328 return &mem_region_unmapped;
1330 RAISE_TLB_MULTIHIT_ERROR(addr);
1333 if( (mmu_utlb[entryNo].flags & TLB_USERMODE) == 0 &&
1334 !IS_SH4_PRIVMODE() ) {
1335 /* protection violation */
1336 return &mem_region_unmapped;
1339 /* finally generate the target address */
1340 sh4addr_t pma = (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) |
1341 (addr & (~mmu_utlb[entryNo].mask));
1342 if( pma > 0x1C000000 ) { // Remap 1Cxx .. 1Fxx region to P4
1343 addr = pma | 0xE0000000;
1345 return sh4_address_space[addr>>12];
1348 return sh4_ext_address_space[pma>>12];
1354 * Translate a virtual to physical address for writing, raising exceptions as
1357 mem_region_fn_t FASTCALL mmu_get_region_for_vma_write( sh4vma_t *paddr )
1359 sh4vma_t addr = *paddr;
1360 uint32_t mmucr = MMIO_READ(MMU,MMUCR);
1361 if( addr & 0x80000000 ) {
1362 if( IS_SH4_PRIVMODE() ) {
1363 if( addr >= 0xE0000000 ) {
1364 return sh4_address_space[((uint32_t)addr)>>12]; /* P4 - passthrough */
1365 } else if( addr < 0xC0000000 ) {
1366 /* P1, P2 regions are pass-through (no translation) */
1367 return sh4_ext_address_space[VMA_TO_EXT_ADDR(addr)>>12];
1370 if( addr >= 0xE0000000 && addr < 0xE4000000 &&
1371 ((mmucr&MMUCR_SQMD) == 0) ) {
1372 /* Conditional user-mode access to the store-queue (no translation) */
1373 return &p4_region_storequeue;
1375 sh4_raise_exception(EXC_DATA_ADDR_WRITE);
1380 if( (mmucr & MMUCR_AT) == 0 ) {
1381 return sh4_address_space[addr>>12];
1384 /* If we get this far, translation is required */
1386 if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
1387 entryNo = mmu_utlb_lookup_vpn_asid( addr );
1389 entryNo = mmu_utlb_lookup_vpn( addr );
1394 RAISE_TLB_ERROR(EXC_TLB_MISS_WRITE,addr);
1397 RAISE_TLB_MULTIHIT_ERROR(addr);
1400 if( IS_SH4_PRIVMODE() ? ((mmu_utlb[entryNo].flags & TLB_WRITABLE) == 0)
1401 : ((mmu_utlb[entryNo].flags & TLB_USERWRITABLE) != TLB_USERWRITABLE) ) {
1402 /* protection violation */
1403 RAISE_MEM_ERROR(EXC_TLB_PROT_WRITE,addr);
1407 if( (mmu_utlb[entryNo].flags & TLB_DIRTY) == 0 ) {
1408 RAISE_MEM_ERROR(EXC_INIT_PAGE_WRITE, addr);
1412 /* finally generate the target address */
1413 sh4addr_t pma = (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) |
1414 (addr & (~mmu_utlb[entryNo].mask));
1415 if( pma > 0x1C000000 ) { // Remap 1Cxx .. 1Fxx region to P4
1416 addr = pma | 0xE0000000;
1418 return sh4_address_space[addr>>12];
1421 return sh4_ext_address_space[pma>>12];
1428 /********************** TLB Direct-Access Regions ***************************/
1429 #define ITLB_ENTRY(addr) ((addr>>7)&0x03)
1431 int32_t FASTCALL mmu_itlb_addr_read( sh4addr_t addr )
1433 struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
1434 return ent->vpn | ent->asid | (ent->flags & TLB_VALID);
1437 void FASTCALL mmu_itlb_addr_write( sh4addr_t addr, uint32_t val )
1439 struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
1440 ent->vpn = val & 0xFFFFFC00;
1441 ent->asid = val & 0x000000FF;
1442 ent->flags = (ent->flags & ~(TLB_VALID)) | (val&TLB_VALID);
1445 int32_t FASTCALL mmu_itlb_data_read( sh4addr_t addr )
1447 struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
1448 return (ent->ppn & 0x1FFFFC00) | ent->flags;
1451 void FASTCALL mmu_itlb_data_write( sh4addr_t addr, uint32_t val )
1453 struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
1454 ent->ppn = val & 0x1FFFFC00;
1455 ent->flags = val & 0x00001DA;
1456 ent->mask = get_tlb_size_mask(val);
1457 if( ent->ppn >= 0x1C000000 )
1458 ent->ppn |= 0xE0000000;
1461 #define UTLB_ENTRY(addr) ((addr>>8)&0x3F)
1462 #define UTLB_ASSOC(addr) (addr&0x80)
1463 #define UTLB_DATA2(addr) (addr&0x00800000)
1465 int32_t FASTCALL mmu_utlb_addr_read( sh4addr_t addr )
1467 struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
1468 return ent->vpn | ent->asid | (ent->flags & TLB_VALID) |
1469 ((ent->flags & TLB_DIRTY)<<7);
1471 int32_t FASTCALL mmu_utlb_data_read( sh4addr_t addr )
1473 struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
1474 if( UTLB_DATA2(addr) ) {
1477 return (ent->ppn&0x1FFFFC00) | ent->flags;
1482 * Find a UTLB entry for the associative TLB write - same as the normal
1483 * lookup but ignores the valid bit.
1485 static inline int mmu_utlb_lookup_assoc( uint32_t vpn, uint32_t asid )
1489 for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
1490 if( (mmu_utlb[i].flags & TLB_VALID) &&
1491 ((mmu_utlb[i].flags & TLB_SHARE) || asid == mmu_utlb[i].asid) &&
1492 ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
1493 if( result != -1 ) {
1494 fprintf( stderr, "TLB Multi hit: %d %d\n", result, i );
1504 * Find a ITLB entry for the associative TLB write - same as the normal
1505 * lookup but ignores the valid bit.
1507 static inline int mmu_itlb_lookup_assoc( uint32_t vpn, uint32_t asid )
1511 for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
1512 if( (mmu_itlb[i].flags & TLB_VALID) &&
1513 ((mmu_itlb[i].flags & TLB_SHARE) || asid == mmu_itlb[i].asid) &&
1514 ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
1515 if( result != -1 ) {
1524 void FASTCALL mmu_utlb_addr_write( sh4addr_t addr, uint32_t val, void *exc )
1526 if( UTLB_ASSOC(addr) ) {
1527 int utlb = mmu_utlb_lookup_assoc( val, mmu_asid );
1529 struct utlb_entry *ent = &mmu_utlb[utlb];
1530 uint32_t old_flags = ent->flags;
1531 ent->flags = ent->flags & ~(TLB_DIRTY|TLB_VALID);
1532 ent->flags |= (val & TLB_VALID);
1533 ent->flags |= ((val & 0x200)>>7);
1534 if( IS_TLB_ENABLED() && ((old_flags^ent->flags) & (TLB_VALID|TLB_DIRTY)) != 0 ) {
1535 if( old_flags & TLB_VALID )
1536 mmu_utlb_remove_entry( utlb );
1537 if( ent->flags & TLB_VALID )
1538 mmu_utlb_insert_entry( utlb );
1542 int itlb = mmu_itlb_lookup_assoc( val, mmu_asid );
1544 struct itlb_entry *ent = &mmu_itlb[itlb];
1545 ent->flags = (ent->flags & (~TLB_VALID)) | (val & TLB_VALID);
1548 if( itlb == -2 || utlb == -2 ) {
1549 RAISE_TLB_MULTIHIT_ERROR(addr); /* FIXME: should this only be raised if TLB is enabled? */
1550 SH4_EXCEPTION_EXIT();
1554 struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
1555 if( IS_TLB_ENABLED() && ent->flags & TLB_VALID )
1556 mmu_utlb_remove_entry( UTLB_ENTRY(addr) );
1557 ent->vpn = (val & 0xFFFFFC00);
1558 ent->asid = (val & 0xFF);
1559 ent->flags = (ent->flags & ~(TLB_DIRTY|TLB_VALID));
1560 ent->flags |= (val & TLB_VALID);
1561 ent->flags |= ((val & 0x200)>>7);
1562 if( IS_TLB_ENABLED() && ent->flags & TLB_VALID )
1563 mmu_utlb_insert_entry( UTLB_ENTRY(addr) );
1567 void FASTCALL mmu_utlb_data_write( sh4addr_t addr, uint32_t val )
1569 struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
1570 if( UTLB_DATA2(addr) ) {
1571 ent->pcmcia = val & 0x0000000F;
1573 if( IS_TLB_ENABLED() && ent->flags & TLB_VALID )
1574 mmu_utlb_remove_entry( UTLB_ENTRY(addr) );
1575 ent->ppn = (val & 0x1FFFFC00);
1576 ent->flags = (val & 0x000001FF);
1577 ent->mask = get_tlb_size_mask(val);
1578 if( IS_TLB_ENABLED() && ent->flags & TLB_VALID )
1579 mmu_utlb_insert_entry( UTLB_ENTRY(addr) );
1583 struct mem_region_fn p4_region_itlb_addr = {
1584 mmu_itlb_addr_read, mmu_itlb_addr_write,
1585 mmu_itlb_addr_read, mmu_itlb_addr_write,
1586 mmu_itlb_addr_read, mmu_itlb_addr_write,
1587 unmapped_read_burst, unmapped_write_burst,
1588 unmapped_prefetch, mmu_itlb_addr_read };
1589 struct mem_region_fn p4_region_itlb_data = {
1590 mmu_itlb_data_read, mmu_itlb_data_write,
1591 mmu_itlb_data_read, mmu_itlb_data_write,
1592 mmu_itlb_data_read, mmu_itlb_data_write,
1593 unmapped_read_burst, unmapped_write_burst,
1594 unmapped_prefetch, mmu_itlb_data_read };
1595 struct mem_region_fn p4_region_utlb_addr = {
1596 mmu_utlb_addr_read, (mem_write_fn_t)mmu_utlb_addr_write,
1597 mmu_utlb_addr_read, (mem_write_fn_t)mmu_utlb_addr_write,
1598 mmu_utlb_addr_read, (mem_write_fn_t)mmu_utlb_addr_write,
1599 unmapped_read_burst, unmapped_write_burst,
1600 unmapped_prefetch, mmu_utlb_addr_read };
1601 struct mem_region_fn p4_region_utlb_data = {
1602 mmu_utlb_data_read, mmu_utlb_data_write,
1603 mmu_utlb_data_read, mmu_utlb_data_write,
1604 mmu_utlb_data_read, mmu_utlb_data_write,
1605 unmapped_read_burst, unmapped_write_burst,
1606 unmapped_prefetch, mmu_utlb_data_read };
1608 /********************** Error regions **************************/
1610 static void FASTCALL address_error_read( sh4addr_t addr, void *exc )
1612 RAISE_MEM_ERROR(EXC_DATA_ADDR_READ, addr);
1613 SH4_EXCEPTION_EXIT();
1616 static void FASTCALL address_error_read_for_write( sh4addr_t addr, void *exc )
1618 RAISE_MEM_ERROR(EXC_DATA_ADDR_WRITE, addr);
1619 SH4_EXCEPTION_EXIT();
1622 static void FASTCALL address_error_read_burst( unsigned char *dest, sh4addr_t addr, void *exc )
1624 RAISE_MEM_ERROR(EXC_DATA_ADDR_READ, addr);
1625 SH4_EXCEPTION_EXIT();
1628 static void FASTCALL address_error_write( sh4addr_t addr, uint32_t val, void *exc )
1630 RAISE_MEM_ERROR(EXC_DATA_ADDR_WRITE, addr);
1631 SH4_EXCEPTION_EXIT();
1634 static void FASTCALL tlb_miss_read( sh4addr_t addr, void *exc )
1637 RAISE_TLB_ERROR(EXC_TLB_MISS_READ, addr);
1638 SH4_EXCEPTION_EXIT();
1641 static void FASTCALL tlb_miss_read_for_write( sh4addr_t addr, void *exc )
1644 RAISE_TLB_ERROR(EXC_TLB_MISS_WRITE, addr);
1645 SH4_EXCEPTION_EXIT();
1648 static void FASTCALL tlb_miss_read_burst( unsigned char *dest, sh4addr_t addr, void *exc )
1651 RAISE_TLB_ERROR(EXC_TLB_MISS_READ, addr);
1652 SH4_EXCEPTION_EXIT();
1655 static void FASTCALL tlb_miss_write( sh4addr_t addr, uint32_t val, void *exc )
1658 RAISE_TLB_ERROR(EXC_TLB_MISS_WRITE, addr);
1659 SH4_EXCEPTION_EXIT();
1662 static int32_t FASTCALL tlb_protected_read( sh4addr_t addr, void *exc )
1665 RAISE_MEM_ERROR(EXC_TLB_PROT_READ, addr);
1666 SH4_EXCEPTION_EXIT();
1670 static int32_t FASTCALL tlb_protected_read_for_write( sh4addr_t addr, void *exc )
1673 RAISE_MEM_ERROR(EXC_TLB_PROT_WRITE, addr);
1674 SH4_EXCEPTION_EXIT();
1678 static int32_t FASTCALL tlb_protected_read_burst( unsigned char *dest, sh4addr_t addr, void *exc )
1681 RAISE_MEM_ERROR(EXC_TLB_PROT_READ, addr);
1682 SH4_EXCEPTION_EXIT();
1686 static void FASTCALL tlb_protected_write( sh4addr_t addr, uint32_t val, void *exc )
1689 RAISE_MEM_ERROR(EXC_TLB_PROT_WRITE, addr);
1690 SH4_EXCEPTION_EXIT();
1693 static void FASTCALL tlb_initial_write( sh4addr_t addr, uint32_t val, void *exc )
1696 RAISE_MEM_ERROR(EXC_INIT_PAGE_WRITE, addr);
1697 SH4_EXCEPTION_EXIT();
1700 static int32_t FASTCALL tlb_initial_read_for_write( sh4addr_t addr, void *exc )
1703 RAISE_MEM_ERROR(EXC_INIT_PAGE_WRITE, addr);
1704 SH4_EXCEPTION_EXIT();
1708 static int32_t FASTCALL tlb_multi_hit_read( sh4addr_t addr, void *exc )
1710 sh4_raise_tlb_multihit(addr);
1711 SH4_EXCEPTION_EXIT();
1715 static int32_t FASTCALL tlb_multi_hit_read_burst( unsigned char *dest, sh4addr_t addr, void *exc )
1717 sh4_raise_tlb_multihit(addr);
1718 SH4_EXCEPTION_EXIT();
1721 static void FASTCALL tlb_multi_hit_write( sh4addr_t addr, uint32_t val, void *exc )
1723 sh4_raise_tlb_multihit(addr);
1724 SH4_EXCEPTION_EXIT();
1728 * Note: Per sec 4.6.4 of the SH7750 manual, SQ
1730 struct mem_region_fn mem_region_address_error = {
1731 (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1732 (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1733 (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1734 (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write,
1735 unmapped_prefetch, (mem_read_fn_t)address_error_read_for_write };
1737 struct mem_region_fn mem_region_tlb_miss = {
1738 (mem_read_fn_t)tlb_miss_read, (mem_write_fn_t)tlb_miss_write,
1739 (mem_read_fn_t)tlb_miss_read, (mem_write_fn_t)tlb_miss_write,
1740 (mem_read_fn_t)tlb_miss_read, (mem_write_fn_t)tlb_miss_write,
1741 (mem_read_burst_fn_t)tlb_miss_read_burst, (mem_write_burst_fn_t)tlb_miss_write,
1742 unmapped_prefetch, (mem_read_fn_t)tlb_miss_read_for_write };
1744 struct mem_region_fn mem_region_tlb_protected = {
1745 (mem_read_fn_t)tlb_protected_read, (mem_write_fn_t)tlb_protected_write,
1746 (mem_read_fn_t)tlb_protected_read, (mem_write_fn_t)tlb_protected_write,
1747 (mem_read_fn_t)tlb_protected_read, (mem_write_fn_t)tlb_protected_write,
1748 (mem_read_burst_fn_t)tlb_protected_read_burst, (mem_write_burst_fn_t)tlb_protected_write,
1749 unmapped_prefetch, (mem_read_fn_t)tlb_protected_read_for_write };
1751 struct mem_region_fn mem_region_tlb_multihit = {
1752 (mem_read_fn_t)tlb_multi_hit_read, (mem_write_fn_t)tlb_multi_hit_write,
1753 (mem_read_fn_t)tlb_multi_hit_read, (mem_write_fn_t)tlb_multi_hit_write,
1754 (mem_read_fn_t)tlb_multi_hit_read, (mem_write_fn_t)tlb_multi_hit_write,
1755 (mem_read_burst_fn_t)tlb_multi_hit_read_burst, (mem_write_burst_fn_t)tlb_multi_hit_write,
1756 (mem_prefetch_fn_t)tlb_multi_hit_read, (mem_read_fn_t)tlb_multi_hit_read };
1759 /* Store-queue regions */
1760 /* These are a bit of a pain - the first 8 fields are controlled by SQMD, while
1761 * the final (prefetch) is controlled by the actual TLB settings (plus SQMD in
1762 * some cases), in contrast to the ordinary fields above.
1764 * There is probably a simpler way to do this.
1767 struct mem_region_fn p4_region_storequeue = {
1768 ccn_storequeue_read_long, ccn_storequeue_write_long,
1769 unmapped_read_long, unmapped_write_long, /* TESTME: Officially only long access is supported */
1770 unmapped_read_long, unmapped_write_long,
1771 unmapped_read_burst, unmapped_write_burst,
1772 ccn_storequeue_prefetch, unmapped_read_long };
1774 struct mem_region_fn p4_region_storequeue_miss = {
1775 ccn_storequeue_read_long, ccn_storequeue_write_long,
1776 unmapped_read_long, unmapped_write_long, /* TESTME: Officially only long access is supported */
1777 unmapped_read_long, unmapped_write_long,
1778 unmapped_read_burst, unmapped_write_burst,
1779 (mem_prefetch_fn_t)tlb_miss_read, unmapped_read_long };
1781 struct mem_region_fn p4_region_storequeue_multihit = {
1782 ccn_storequeue_read_long, ccn_storequeue_write_long,
1783 unmapped_read_long, unmapped_write_long, /* TESTME: Officially only long access is supported */
1784 unmapped_read_long, unmapped_write_long,
1785 unmapped_read_burst, unmapped_write_burst,
1786 (mem_prefetch_fn_t)tlb_multi_hit_read, unmapped_read_long };
1788 struct mem_region_fn p4_region_storequeue_protected = {
1789 ccn_storequeue_read_long, ccn_storequeue_write_long,
1790 unmapped_read_long, unmapped_write_long,
1791 unmapped_read_long, unmapped_write_long,
1792 unmapped_read_burst, unmapped_write_burst,
1793 (mem_prefetch_fn_t)tlb_protected_read, unmapped_read_long };
1795 struct mem_region_fn p4_region_storequeue_sqmd = {
1796 (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1797 (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1798 (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1799 (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write,
1800 (mem_prefetch_fn_t)address_error_read, (mem_read_fn_t)address_error_read_for_write };
1802 struct mem_region_fn p4_region_storequeue_sqmd_miss = {
1803 (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1804 (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1805 (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1806 (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write,
1807 (mem_prefetch_fn_t)tlb_miss_read, (mem_read_fn_t)address_error_read_for_write };
1809 struct mem_region_fn p4_region_storequeue_sqmd_multihit = {
1810 (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1811 (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1812 (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1813 (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write,
1814 (mem_prefetch_fn_t)tlb_multi_hit_read, (mem_read_fn_t)address_error_read_for_write };
1816 struct mem_region_fn p4_region_storequeue_sqmd_protected = {
1817 (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1818 (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1819 (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1820 (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write,
1821 (mem_prefetch_fn_t)tlb_protected_read, (mem_read_fn_t)address_error_read_for_write };
.