4 * SH4 MMU implementation based on address space page maps. This module
5 * is responsible for all address decoding functions.
7 * Copyright (c) 2005 Nathan Keynes.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 #define MODULE sh4_module
23 #include "sh4/sh4mmio.h"
24 #include "sh4/sh4core.h"
25 #include "sh4/sh4trans.h"
26 #include "dreamcast.h"
30 #define RAISE_TLB_ERROR(code, vpn) \
31 MMIO_WRITE(MMU, TEA, vpn); \
32 MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
33 sh4_raise_tlb_exception(code);
34 #define RAISE_MEM_ERROR(code, vpn) \
35 MMIO_WRITE(MMU, TEA, vpn); \
36 MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
37 sh4_raise_exception(code);
38 #define RAISE_TLB_MULTIHIT_ERROR(vpn) \
39 sh4_raise_reset(EXC_TLB_MULTI_HIT); \
40 MMIO_WRITE(MMU, TEA, vpn); \
41 MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00)));
43 /* An entry is a 1K entry if it's one of the mmu_utlb_1k_pages entries */
44 #define IS_1K_PAGE_ENTRY(ent) ( ((uintptr_t)(((struct utlb_1k_entry *)ent) - &mmu_utlb_1k_pages[0])) < UTLB_ENTRY_COUNT )
46 /* Primary address space (used directly by SH4 cores) */
47 mem_region_fn_t *sh4_address_space;
48 mem_region_fn_t *sh4_user_address_space;
50 /* MMU-mapped storequeue targets. Only used with TLB on */
51 mem_region_fn_t *storequeue_address_space;
52 mem_region_fn_t *storequeue_user_address_space;
54 /* Accessed from the UTLB accessor methods */
59 static struct itlb_entry mmu_itlb[ITLB_ENTRY_COUNT];
60 static struct utlb_entry mmu_utlb[UTLB_ENTRY_COUNT];
61 static struct utlb_page_entry mmu_utlb_pages[UTLB_ENTRY_COUNT];
62 static uint32_t mmu_lrui;
63 static uint32_t mmu_asid; // current asid
65 /* Structures for 1K page handling */
66 static struct utlb_1k_entry mmu_utlb_1k_pages[UTLB_ENTRY_COUNT];
67 static int mmu_utlb_1k_free_list[UTLB_ENTRY_COUNT];
68 static int mmu_utlb_1k_free_index;
71 /* Function prototypes */
72 static void mmu_invalidate_tlb();
73 static void mmu_utlb_register_all();
74 static void mmu_utlb_remove_entry(int);
75 static void mmu_utlb_insert_entry(int);
76 static void mmu_register_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn );
77 static void mmu_register_user_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn );
78 static void mmu_set_tlb_enabled( int tlb_on );
79 static void mmu_set_tlb_asid( uint32_t asid );
80 static void mmu_set_storequeue_protected( int protected );
81 static gboolean mmu_utlb_map_pages( mem_region_fn_t priv_page, mem_region_fn_t user_page, sh4addr_t start_addr, int npages );
82 static void mmu_utlb_remap_pages( gboolean remap_priv, gboolean remap_user, int entryNo );
83 static gboolean mmu_utlb_unmap_pages( gboolean unmap_priv, gboolean unmap_user, sh4addr_t start_addr, int npages );
84 static gboolean mmu_ext_page_remapped( sh4addr_t page, mem_region_fn_t fn, void *user_data );
85 static void mmu_utlb_1k_init();
86 static struct utlb_1k_entry *mmu_utlb_1k_alloc();
87 static void mmu_utlb_1k_free( struct utlb_1k_entry *entry );
89 static int32_t FASTCALL tlb_protected_read( sh4addr_t addr, void *exc );
90 static void FASTCALL tlb_protected_write( sh4addr_t addr, uint32_t val, void *exc );
91 static void FASTCALL tlb_initial_write( sh4addr_t addr, uint32_t val, void *exc );
92 static uint32_t get_tlb_size_mask( uint32_t flags );
93 static uint32_t get_tlb_size_pages( uint32_t flags );
96 /*********************** Module public functions ****************************/
99 * Allocate memory for the address space maps, and initialize them according
100 * to the default (reset) values. (TLB is disabled by default)
105 sh4_address_space = mem_alloc_pages( sizeof(mem_region_fn_t) * 256 );
106 sh4_user_address_space = mem_alloc_pages( sizeof(mem_region_fn_t) * 256 );
107 storequeue_address_space = mem_alloc_pages( sizeof(mem_region_fn_t) * 4 );
108 storequeue_user_address_space = mem_alloc_pages( sizeof(mem_region_fn_t) * 4 );
110 mmu_set_tlb_enabled(0);
111 mmu_register_user_mem_region( 0x80000000, 0x00000000, &mem_region_address_error );
112 mmu_register_user_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue);
114 /* Setup P4 tlb/cache access regions */
115 mmu_register_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );
116 mmu_register_mem_region( 0xE4000000, 0xF0000000, &mem_region_unmapped );
117 mmu_register_mem_region( 0xF0000000, 0xF1000000, &p4_region_icache_addr );
118 mmu_register_mem_region( 0xF1000000, 0xF2000000, &p4_region_icache_data );
119 mmu_register_mem_region( 0xF2000000, 0xF3000000, &p4_region_itlb_addr );
120 mmu_register_mem_region( 0xF3000000, 0xF4000000, &p4_region_itlb_data );
121 mmu_register_mem_region( 0xF4000000, 0xF5000000, &p4_region_ocache_addr );
122 mmu_register_mem_region( 0xF5000000, 0xF6000000, &p4_region_ocache_data );
123 mmu_register_mem_region( 0xF6000000, 0xF7000000, &p4_region_utlb_addr );
124 mmu_register_mem_region( 0xF7000000, 0xF8000000, &p4_region_utlb_data );
125 mmu_register_mem_region( 0xF8000000, 0x00000000, &mem_region_unmapped );
127 /* Setup P4 control region */
128 mmu_register_mem_region( 0xFF000000, 0xFF001000, &mmio_region_MMU.fn );
129 mmu_register_mem_region( 0xFF100000, 0xFF101000, &mmio_region_PMM.fn );
130 mmu_register_mem_region( 0xFF200000, 0xFF201000, &mmio_region_UBC.fn );
131 mmu_register_mem_region( 0xFF800000, 0xFF801000, &mmio_region_BSC.fn );
132 mmu_register_mem_region( 0xFF900000, 0xFFA00000, &mem_region_unmapped ); // SDMR2 + SDMR3
133 mmu_register_mem_region( 0xFFA00000, 0xFFA01000, &mmio_region_DMAC.fn );
134 mmu_register_mem_region( 0xFFC00000, 0xFFC01000, &mmio_region_CPG.fn );
135 mmu_register_mem_region( 0xFFC80000, 0xFFC81000, &mmio_region_RTC.fn );
136 mmu_register_mem_region( 0xFFD00000, 0xFFD01000, &mmio_region_INTC.fn );
137 mmu_register_mem_region( 0xFFD80000, 0xFFD81000, &mmio_region_TMU.fn );
138 mmu_register_mem_region( 0xFFE00000, 0xFFE01000, &mmio_region_SCI.fn );
139 mmu_register_mem_region( 0xFFE80000, 0xFFE81000, &mmio_region_SCIF.fn );
140 mmu_register_mem_region( 0xFFF00000, 0xFFF01000, &mem_region_unmapped ); // H-UDI
142 register_mem_page_remapped_hook( mmu_ext_page_remapped, NULL );
145 /* Ensure the code regions are executable */
146 mem_unprotect( mmu_utlb_pages, sizeof(mmu_utlb_pages) );
147 mem_unprotect( mmu_utlb_1k_pages, sizeof(mmu_utlb_1k_pages) );
152 mmio_region_MMU_write( CCR, 0 );
153 mmio_region_MMU_write( MMUCR, 0 );
156 void MMU_save_state( FILE *f )
158 fwrite( &mmu_itlb, sizeof(mmu_itlb), 1, f );
159 fwrite( &mmu_utlb, sizeof(mmu_utlb), 1, f );
160 fwrite( &mmu_urc, sizeof(mmu_urc), 1, f );
161 fwrite( &mmu_urb, sizeof(mmu_urb), 1, f );
162 fwrite( &mmu_lrui, sizeof(mmu_lrui), 1, f );
163 fwrite( &mmu_asid, sizeof(mmu_asid), 1, f );
166 int MMU_load_state( FILE *f )
168 if( fread( &mmu_itlb, sizeof(mmu_itlb), 1, f ) != 1 ) {
171 if( fread( &mmu_utlb, sizeof(mmu_utlb), 1, f ) != 1 ) {
174 if( fread( &mmu_urc, sizeof(mmu_urc), 1, f ) != 1 ) {
177 if( fread( &mmu_urc, sizeof(mmu_urb), 1, f ) != 1 ) {
180 if( fread( &mmu_lrui, sizeof(mmu_lrui), 1, f ) != 1 ) {
183 if( fread( &mmu_asid, sizeof(mmu_asid), 1, f ) != 1 ) {
187 uint32_t mmucr = MMIO_READ(MMU,MMUCR);
188 mmu_set_tlb_enabled(mmucr&MMUCR_AT);
189 mmu_set_storequeue_protected(mmucr&MMUCR_SQMD);
194 * LDTLB instruction implementation. Copies PTEH, PTEL and PTEA into the UTLB
195 * entry identified by MMUCR.URC. Does not modify MMUCR or the ITLB.
200 if( mmu_utlb[mmu_urc].flags & TLB_VALID )
201 mmu_utlb_remove_entry( mmu_urc );
202 mmu_utlb[mmu_urc].vpn = MMIO_READ(MMU, PTEH) & 0xFFFFFC00;
203 mmu_utlb[mmu_urc].asid = MMIO_READ(MMU, PTEH) & 0x000000FF;
204 mmu_utlb[mmu_urc].ppn = MMIO_READ(MMU, PTEL) & 0x1FFFFC00;
205 mmu_utlb[mmu_urc].flags = MMIO_READ(MMU, PTEL) & 0x00001FF;
206 mmu_utlb[mmu_urc].pcmcia = MMIO_READ(MMU, PTEA);
207 mmu_utlb[mmu_urc].mask = get_tlb_size_mask(mmu_utlb[mmu_urc].flags);
208 if( mmu_utlb[mmu_urc].flags & TLB_VALID )
209 mmu_utlb_insert_entry( mmu_urc );
213 MMIO_REGION_READ_FN( MMU, reg )
219 return MMIO_READ( MMU, MMUCR) | (mmu_urc<<10) | ((mmu_urb&0x3F)<<18) | (mmu_lrui<<26);
221 return MMIO_READ( MMU, reg );
225 MMIO_REGION_WRITE_FN( MMU, reg, val )
234 if( (val & 0xFF) != mmu_asid ) {
235 mmu_set_tlb_asid( val&0xFF );
236 sh4_icache.page_vma = -1; // invalidate icache as asid has changed
253 if( val & MMUCR_TI ) {
254 mmu_invalidate_tlb();
256 mmu_urc = (val >> 10) & 0x3F;
257 mmu_urb = (val >> 18) & 0x3F;
261 mmu_lrui = (val >> 26) & 0x3F;
263 tmp = MMIO_READ( MMU, MMUCR );
264 if( (val ^ tmp) & (MMUCR_SQMD) ) {
265 mmu_set_storequeue_protected( val & MMUCR_SQMD );
267 if( (val ^ tmp) & (MMUCR_AT) ) {
268 // AT flag has changed state - flush the xlt cache as all bets
269 // are off now. We also need to force an immediate exit from the
271 mmu_set_tlb_enabled( val & MMUCR_AT );
272 MMIO_WRITE( MMU, MMUCR, val );
277 CCN_set_cache_control( val );
281 /* Note that if the high bit is set, this appears to reset the machine.
282 * Not emulating this behaviour yet until we know why...
291 PMM_write_control(0, val);
295 PMM_write_control(1, val);
301 MMIO_WRITE( MMU, reg, val );
304 /********************** 1K Page handling ***********************/
305 /* Since we use 4K pages as our native page size, 1K pages need a bit of extra
306 * effort to manage - we justify this on the basis that most programs won't
307 * actually use 1K pages, so we may as well optimize for the common case.
309 * Implementation uses an intermediate page entry (the utlb_1k_entry) that
310 * redirects requests to the 'real' page entry. These are allocated on an
311 * as-needed basis, and returned to the pool when all subpages are empty.
313 static void mmu_utlb_1k_init()
316 for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
317 mmu_utlb_1k_free_list[i] = i;
318 mmu_utlb_1k_init_vtable( &mmu_utlb_1k_pages[i] );
320 mmu_utlb_1k_free_index = 0;
323 static struct utlb_1k_entry *mmu_utlb_1k_alloc()
325 assert( mmu_utlb_1k_free_index < UTLB_ENTRY_COUNT );
326 struct utlb_1k_entry *entry = &mmu_utlb_1k_pages[mmu_utlb_1k_free_index++];
330 static void mmu_utlb_1k_free( struct utlb_1k_entry *ent )
332 unsigned int entryNo = ent - &mmu_utlb_1k_pages[0];
333 assert( entryNo < UTLB_ENTRY_COUNT );
334 assert( mmu_utlb_1k_free_index > 0 );
335 mmu_utlb_1k_free_list[--mmu_utlb_1k_free_index] = entryNo;
339 /********************** Address space maintenance *************************/
342 * MMU accessor functions just increment URC - fixup here if necessary
344 static inline void mmu_urc_fixup()
349 static void mmu_register_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn )
351 int count = (end - start) >> 12;
352 mem_region_fn_t *ptr = &sh4_address_space[start>>12];
353 while( count-- > 0 ) {
357 static void mmu_register_user_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn )
359 int count = (end - start) >> 12;
360 mem_region_fn_t *ptr = &sh4_user_address_space[start>>12];
361 while( count-- > 0 ) {
366 static gboolean mmu_ext_page_remapped( sh4addr_t page, mem_region_fn_t fn, void *user_data )
369 if( (MMIO_READ(MMU,MMUCR)) & MMUCR_AT ) {
371 sh4_address_space[(page|0x80000000)>>12] = fn; /* Direct map to P1 and P2 */
372 sh4_address_space[(page|0xA0000000)>>12] = fn;
373 /* Scan UTLB and update any direct-referencing entries */
375 /* Direct map to U0, P0, P1, P2, P3 */
376 for( i=0; i<= 0xC0000000; i+= 0x20000000 ) {
377 sh4_address_space[(page|i)>>12] = fn;
379 for( i=0; i < 0x80000000; i+= 0x20000000 ) {
380 sh4_user_address_space[(page|i)>>12] = fn;
385 static void mmu_set_tlb_enabled( int tlb_on )
387 mem_region_fn_t *ptr, *uptr;
391 mmu_register_mem_region(0x00000000, 0x80000000, &mem_region_tlb_miss );
392 mmu_register_mem_region(0xC0000000, 0xE0000000, &mem_region_tlb_miss );
393 mmu_register_user_mem_region(0x00000000, 0x80000000, &mem_region_tlb_miss );
394 for( i=0, ptr = storequeue_address_space, uptr = storequeue_user_address_space;
395 i<0x04000000; i+= LXDREAM_PAGE_SIZE ) {
396 *ptr++ = &mem_region_tlb_miss;
397 *uptr++ = &mem_region_tlb_miss;
399 mmu_utlb_register_all();
401 for( i=0, ptr = sh4_address_space; i<7; i++, ptr += LXDREAM_PAGE_TABLE_ENTRIES ) {
402 memcpy( ptr, ext_address_space, sizeof(mem_region_fn_t) * LXDREAM_PAGE_TABLE_ENTRIES );
404 for( i=0, ptr = sh4_user_address_space; i<4; i++, ptr += LXDREAM_PAGE_TABLE_ENTRIES ) {
405 memcpy( ptr, ext_address_space, sizeof(mem_region_fn_t) * LXDREAM_PAGE_TABLE_ENTRIES );
410 static void mmu_set_storequeue_protected( int protected )
413 mmu_register_user_mem_region( 0xE0000000, 0xE4000000, &mem_region_address_error );
415 mmu_register_user_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );
419 static void mmu_set_tlb_asid( uint32_t asid )
421 /* Scan for pages that need to be remapped */
423 if( IS_SV_ENABLED() ) {
424 for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
425 if( mmu_utlb[i].flags & TLB_VALID ) {
426 if( (mmu_utlb[i].flags & TLB_SHARE) == 0 ) {
427 if( mmu_utlb[i].asid == mmu_asid ) { // Matches old ASID - unmap out
428 if( !mmu_utlb_unmap_pages( FALSE, TRUE, mmu_utlb[i].vpn&mmu_utlb[i].mask,
429 get_tlb_size_pages(mmu_utlb[i].flags) ) )
430 mmu_utlb_remap_pages( FALSE, TRUE, i );
431 } else if( mmu_utlb[i].asid == asid ) { // Matches new ASID - map in
432 mmu_utlb_map_pages( NULL, mmu_utlb_pages[i].user_fn,
433 mmu_utlb[i].vpn&mmu_utlb[i].mask,
434 get_tlb_size_pages(mmu_utlb[i].flags) );
440 // Remap both Priv+user pages
441 for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
442 if( mmu_utlb[i].flags & TLB_VALID ) {
443 if( (mmu_utlb[i].flags & TLB_SHARE) == 0 ) {
444 if( mmu_utlb[i].asid == mmu_asid ) { // Matches old ASID - unmap out
445 if( !mmu_utlb_unmap_pages( TRUE, TRUE, mmu_utlb[i].vpn&mmu_utlb[i].mask,
446 get_tlb_size_pages(mmu_utlb[i].flags) ) )
447 mmu_utlb_remap_pages( TRUE, TRUE, i );
448 } else if( mmu_utlb[i].asid == asid ) { // Matches new ASID - map in
449 mmu_utlb_map_pages( &mmu_utlb_pages[i].fn, mmu_utlb_pages[i].user_fn,
450 mmu_utlb[i].vpn&mmu_utlb[i].mask,
451 get_tlb_size_pages(mmu_utlb[i].flags) );
461 static uint32_t get_tlb_size_mask( uint32_t flags )
463 switch( flags & TLB_SIZE_MASK ) {
464 case TLB_SIZE_1K: return MASK_1K;
465 case TLB_SIZE_4K: return MASK_4K;
466 case TLB_SIZE_64K: return MASK_64K;
467 case TLB_SIZE_1M: return MASK_1M;
468 default: return 0; /* Unreachable */
471 static uint32_t get_tlb_size_pages( uint32_t flags )
473 switch( flags & TLB_SIZE_MASK ) {
474 case TLB_SIZE_1K: return 0;
475 case TLB_SIZE_4K: return 1;
476 case TLB_SIZE_64K: return 16;
477 case TLB_SIZE_1M: return 256;
478 default: return 0; /* Unreachable */
483 * Add a new TLB entry mapping to the address space table. If any of the pages
484 * are already mapped, they are mapped to the TLB multi-hit page instead.
485 * @return FALSE if a TLB multihit situation was detected, otherwise TRUE.
487 static gboolean mmu_utlb_map_pages( mem_region_fn_t priv_page, mem_region_fn_t user_page, sh4addr_t start_addr, int npages )
489 mem_region_fn_t *ptr = &sh4_address_space[start_addr >> 12];
490 mem_region_fn_t *uptr = &sh4_user_address_space[start_addr >> 12];
491 gboolean mapping_ok = TRUE;
494 if( (start_addr & 0xFC000000) == 0xE0000000 ) {
495 /* Storequeue mapping */
496 ptr = &storequeue_address_space[(start_addr-0xE0000000) >> 12];
497 uptr = &storequeue_user_address_space[(start_addr-0xE0000000) >> 12];
498 } else if( (start_addr & 0xE0000000) == 0xC0000000 ) {
499 user_page = NULL; /* No user access to P3 region */
500 } else if( start_addr >= 0x80000000 ) {
501 return TRUE; // No mapping - legal but meaningless
505 struct utlb_1k_entry *ent;
506 int i, idx = (start_addr >> 10) & 0x03;
507 if( IS_1K_PAGE_ENTRY(*ptr) ) {
508 ent = (struct utlb_1k_entry *)*ptr;
510 ent = mmu_utlb_1k_alloc();
511 /* New 1K struct - init to previous contents of region */
512 for( i=0; i<4; i++ ) {
513 ent->subpages[i] = *ptr;
514 ent->user_subpages[i] = *uptr;
517 *uptr = &ent->user_fn;
520 if( priv_page != NULL ) {
521 if( ent->subpages[idx] == &mem_region_tlb_miss ) {
522 ent->subpages[idx] = priv_page;
525 ent->subpages[idx] = &mem_region_tlb_multihit;
528 if( user_page != NULL ) {
529 if( ent->user_subpages[idx] == &mem_region_tlb_miss ) {
530 ent->user_subpages[idx] = user_page;
533 ent->user_subpages[idx] = &mem_region_tlb_multihit;
538 if( priv_page != NULL ) {
539 if( user_page != NULL ) {
540 for( i=0; i<npages; i++ ) {
541 if( *ptr == &mem_region_tlb_miss ) {
546 *ptr++ = &mem_region_tlb_multihit;
547 *uptr++ = &mem_region_tlb_multihit;
551 /* Privileged mapping only */
552 for( i=0; i<npages; i++ ) {
553 if( *ptr == &mem_region_tlb_miss ) {
557 *ptr++ = &mem_region_tlb_multihit;
561 } else if( user_page != NULL ) {
562 /* User mapping only (eg ASID change remap w/ SV=1) */
563 for( i=0; i<npages; i++ ) {
564 if( *uptr == &mem_region_tlb_miss ) {
568 *uptr++ = &mem_region_tlb_multihit;
577 * Remap any pages within the region covered by entryNo, but not including
578 * entryNo itself. This is used to reestablish pages that were previously
579 * covered by a multi-hit exception region when one of the pages is removed.
581 static void mmu_utlb_remap_pages( gboolean remap_priv, gboolean remap_user, int entryNo )
583 int mask = mmu_utlb[entryNo].mask;
584 uint32_t remap_addr = mmu_utlb[entryNo].vpn & mask;
587 for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
588 if( i != entryNo && (mmu_utlb[i].vpn & mask) == remap_addr && (mmu_utlb[i].flags & TLB_VALID) ) {
589 /* Overlapping region */
590 mem_region_fn_t priv_page = (remap_priv ? &mmu_utlb_pages[i].fn : NULL);
591 mem_region_fn_t user_page = (remap_priv ? mmu_utlb_pages[i].user_fn : NULL);
595 if( mmu_utlb[i].mask >= mask ) {
596 /* entry is no larger than the area we're replacing - map completely */
597 start_addr = mmu_utlb[i].vpn & mmu_utlb[i].mask;
598 npages = get_tlb_size_pages( mmu_utlb[i].flags );
600 /* Otherwise map subset - region covered by removed page */
601 start_addr = remap_addr;
602 npages = get_tlb_size_pages( mmu_utlb[entryNo].flags );
605 if( (mmu_utlb[i].flags & TLB_SHARE) || mmu_utlb[i].asid == mmu_asid ) {
606 mmu_utlb_map_pages( priv_page, user_page, start_addr, npages );
607 } else if( IS_SV_ENABLED() ) {
608 mmu_utlb_map_pages( priv_page, NULL, start_addr, npages );
616 * Remove a previous TLB mapping (replacing them with the TLB miss region).
617 * @return FALSE if any pages were previously mapped to the TLB multihit page,
618 * otherwise TRUE. In either case, all pages in the region are cleared to TLB miss.
620 static gboolean mmu_utlb_unmap_pages( gboolean unmap_priv, gboolean unmap_user, sh4addr_t start_addr, int npages )
622 mem_region_fn_t *ptr = &sh4_address_space[start_addr >> 12];
623 mem_region_fn_t *uptr = &sh4_user_address_space[start_addr >> 12];
624 gboolean unmapping_ok = TRUE;
627 if( (start_addr & 0xFC000000) == 0xE0000000 ) {
628 /* Storequeue mapping */
629 ptr = &storequeue_address_space[(start_addr-0xE0000000) >> 12];
630 uptr = &storequeue_user_address_space[(start_addr-0xE0000000) >> 12];
631 } else if( (start_addr & 0xE0000000) == 0xC0000000 ) {
633 } else if( start_addr >= 0x80000000 ) {
634 return TRUE; // No mapping - legal but meaningless
637 if( npages == 0 ) { // 1K page
638 assert( IS_1K_PAGE_ENTRY( *ptr ) );
639 struct utlb_1k_entry *ent = (struct utlb_1k_entry *)*ptr;
640 int i, idx = (start_addr >> 10) & 0x03, mergeable=1;
641 if( ent->subpages[idx] == &mem_region_tlb_multihit ) {
642 unmapping_ok = FALSE;
645 ent->subpages[idx] = &mem_region_tlb_miss;
647 ent->user_subpages[idx] = &mem_region_tlb_miss;
649 /* If all 4 subpages have the same content, merge them together and
650 * release the 1K entry
652 mem_region_fn_t priv_page = ent->subpages[0];
653 mem_region_fn_t user_page = ent->user_subpages[0];
654 for( i=1; i<4; i++ ) {
655 if( priv_page != ent->subpages[i] || user_page != ent->user_subpages[i] ) {
661 mmu_utlb_1k_free(ent);
668 for( i=0; i<npages; i++ ) {
669 if( *ptr == &mem_region_tlb_multihit ) {
670 unmapping_ok = FALSE;
672 *ptr++ = &mem_region_tlb_miss;
673 *uptr++ = &mem_region_tlb_miss;
676 /* Privileged (un)mapping only */
677 for( i=0; i<npages; i++ ) {
678 if( *ptr == &mem_region_tlb_multihit ) {
679 unmapping_ok = FALSE;
681 *ptr++ = &mem_region_tlb_miss;
684 } else if( unmap_user ) {
685 /* User (un)mapping only */
686 for( i=0; i<npages; i++ ) {
687 if( *uptr == &mem_region_tlb_multihit ) {
688 unmapping_ok = FALSE;
690 *uptr++ = &mem_region_tlb_miss;
698 static void mmu_utlb_insert_entry( int entry )
700 struct utlb_entry *ent = &mmu_utlb[entry];
701 mem_region_fn_t page = &mmu_utlb_pages[entry].fn;
702 mem_region_fn_t upage;
703 sh4addr_t start_addr = ent->vpn & ent->mask;
704 int npages = get_tlb_size_pages(ent->flags);
706 if( (ent->flags & TLB_USERMODE) == 0 ) {
707 upage = &mem_region_user_protected;
711 mmu_utlb_pages[entry].user_fn = upage;
713 if( (ent->flags & TLB_WRITABLE) == 0 ) {
714 page->write_long = (mem_write_fn_t)tlb_protected_write;
715 page->write_word = (mem_write_fn_t)tlb_protected_write;
716 page->write_byte = (mem_write_fn_t)tlb_protected_write;
717 page->write_burst = (mem_write_burst_fn_t)tlb_protected_write;
718 mmu_utlb_init_vtable( ent, &mmu_utlb_pages[entry], FALSE );
719 } else if( (ent->flags & TLB_DIRTY) == 0 ) {
720 page->write_long = (mem_write_fn_t)tlb_initial_write;
721 page->write_word = (mem_write_fn_t)tlb_initial_write;
722 page->write_byte = (mem_write_fn_t)tlb_initial_write;
723 page->write_burst = (mem_write_burst_fn_t)tlb_initial_write;
724 mmu_utlb_init_vtable( ent, &mmu_utlb_pages[entry], FALSE );
726 mmu_utlb_init_vtable( ent, &mmu_utlb_pages[entry], TRUE );
729 /* Is page visible? */
730 if( (ent->flags & TLB_SHARE) || ent->asid == mmu_asid ) {
731 mmu_utlb_map_pages( page, upage, start_addr, npages );
732 } else if( IS_SV_ENABLED() ) {
733 mmu_utlb_map_pages( page, NULL, start_addr, npages );
737 static void mmu_utlb_remove_entry( int entry )
740 struct utlb_entry *ent = &mmu_utlb[entry];
741 sh4addr_t start_addr = ent->vpn&ent->mask;
742 mem_region_fn_t *ptr = &sh4_address_space[start_addr >> 12];
743 mem_region_fn_t *uptr = &sh4_user_address_space[start_addr >> 12];
745 int npages = get_tlb_size_pages(ent->flags);
747 if( (ent->flags & TLB_SHARE) || ent->asid == mmu_asid ) {
749 } else if( IS_SV_ENABLED() ) {
752 return; // Not mapped
755 gboolean clean_unmap = mmu_utlb_unmap_pages( TRUE, unmap_user, start_addr, npages );
758 mmu_utlb_remap_pages( TRUE, unmap_user, entry );
762 static void mmu_utlb_register_all()
765 for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
766 if( mmu_utlb[i].flags & TLB_VALID )
767 mmu_utlb_insert_entry( i );
771 static void mmu_invalidate_tlb()
774 for( i=0; i<ITLB_ENTRY_COUNT; i++ ) {
775 mmu_itlb[i].flags &= (~TLB_VALID);
777 if( IS_TLB_ENABLED() ) {
778 for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
779 if( mmu_utlb[i].flags & TLB_VALID ) {
780 mmu_utlb_remove_entry( i );
784 for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
785 mmu_utlb[i].flags &= (~TLB_VALID);
789 /******************************************************************************/
790 /* MMU TLB address translation */
791 /******************************************************************************/
794 * Translate a 32-bit address into a UTLB entry number. Does not check for
795 * page protection etc.
796 * @return the entryNo if found, -1 if not found, and -2 for a multi-hit.
798 int mmu_utlb_entry_for_vpn( uint32_t vpn )
800 mem_region_fn_t fn = sh4_address_space[vpn>>12];
801 if( fn >= &mmu_utlb_pages[0].fn && fn < &mmu_utlb_pages[UTLB_ENTRY_COUNT].fn ) {
802 return ((struct utlb_page_entry *)fn) - &mmu_utlb_pages[0];
803 } else if( fn == &mem_region_tlb_multihit ) {
812 * Perform the actual utlb lookup w/ asid matching.
813 * Possible utcomes are:
814 * 0..63 Single match - good, return entry found
815 * -1 No match - raise a tlb data miss exception
816 * -2 Multiple matches - raise a multi-hit exception (reset)
817 * @param vpn virtual address to resolve
818 * @return the resultant UTLB entry, or an error.
820 static inline int mmu_utlb_lookup_vpn_asid( uint32_t vpn )
826 if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
830 for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
831 if( (mmu_utlb[i].flags & TLB_VALID) &&
832 ((mmu_utlb[i].flags & TLB_SHARE) || mmu_asid == mmu_utlb[i].asid) &&
833 ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
844 * Perform the actual utlb lookup matching on vpn only
845 * Possible utcomes are:
846 * 0..63 Single match - good, return entry found
847 * -1 No match - raise a tlb data miss exception
848 * -2 Multiple matches - raise a multi-hit exception (reset)
849 * @param vpn virtual address to resolve
850 * @return the resultant UTLB entry, or an error.
852 static inline int mmu_utlb_lookup_vpn( uint32_t vpn )
858 if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
862 for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
863 if( (mmu_utlb[i].flags & TLB_VALID) &&
864 ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
876 * Update the ITLB by replacing the LRU entry with the specified UTLB entry.
877 * @return the number (0-3) of the replaced entry.
879 static int inline mmu_itlb_update_from_utlb( int entryNo )
882 /* Determine entry to replace based on lrui */
883 if( (mmu_lrui & 0x38) == 0x38 ) {
885 mmu_lrui = mmu_lrui & 0x07;
886 } else if( (mmu_lrui & 0x26) == 0x06 ) {
888 mmu_lrui = (mmu_lrui & 0x19) | 0x20;
889 } else if( (mmu_lrui & 0x15) == 0x01 ) {
891 mmu_lrui = (mmu_lrui & 0x3E) | 0x14;
892 } else { // Note - gets invalid entries too
894 mmu_lrui = (mmu_lrui | 0x0B);
897 mmu_itlb[replace].vpn = mmu_utlb[entryNo].vpn;
898 mmu_itlb[replace].mask = mmu_utlb[entryNo].mask;
899 mmu_itlb[replace].ppn = mmu_utlb[entryNo].ppn;
900 mmu_itlb[replace].asid = mmu_utlb[entryNo].asid;
901 mmu_itlb[replace].flags = mmu_utlb[entryNo].flags & 0x01DA;
906 * Perform the actual itlb lookup w/ asid protection
907 * Possible utcomes are:
908 * 0..63 Single match - good, return entry found
909 * -1 No match - raise a tlb data miss exception
910 * -2 Multiple matches - raise a multi-hit exception (reset)
911 * @param vpn virtual address to resolve
912 * @return the resultant ITLB entry, or an error.
914 static inline int mmu_itlb_lookup_vpn_asid( uint32_t vpn )
919 for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
920 if( (mmu_itlb[i].flags & TLB_VALID) &&
921 ((mmu_itlb[i].flags & TLB_SHARE) || mmu_asid == mmu_itlb[i].asid) &&
922 ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
931 int utlbEntry = mmu_utlb_entry_for_vpn( vpn );
932 if( utlbEntry < 0 ) {
935 return mmu_itlb_update_from_utlb( utlbEntry );
940 case 0: mmu_lrui = (mmu_lrui & 0x07); break;
941 case 1: mmu_lrui = (mmu_lrui & 0x19) | 0x20; break;
942 case 2: mmu_lrui = (mmu_lrui & 0x3E) | 0x14; break;
943 case 3: mmu_lrui = (mmu_lrui | 0x0B); break;
950 * Perform the actual itlb lookup on vpn only
951 * Possible utcomes are:
952 * 0..63 Single match - good, return entry found
953 * -1 No match - raise a tlb data miss exception
954 * -2 Multiple matches - raise a multi-hit exception (reset)
955 * @param vpn virtual address to resolve
956 * @return the resultant ITLB entry, or an error.
958 static inline int mmu_itlb_lookup_vpn( uint32_t vpn )
963 for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
964 if( (mmu_itlb[i].flags & TLB_VALID) &&
965 ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
974 int utlbEntry = mmu_utlb_lookup_vpn( vpn );
975 if( utlbEntry < 0 ) {
978 return mmu_itlb_update_from_utlb( utlbEntry );
983 case 0: mmu_lrui = (mmu_lrui & 0x07); break;
984 case 1: mmu_lrui = (mmu_lrui & 0x19) | 0x20; break;
985 case 2: mmu_lrui = (mmu_lrui & 0x3E) | 0x14; break;
986 case 3: mmu_lrui = (mmu_lrui | 0x0B); break;
993 * Update the icache for an untranslated address
995 static inline void mmu_update_icache_phys( sh4addr_t addr )
997 if( (addr & 0x1C000000) == 0x0C000000 ) {
999 sh4_icache.page_vma = addr & 0xFF000000;
1000 sh4_icache.page_ppa = 0x0C000000;
1001 sh4_icache.mask = 0xFF000000;
1002 sh4_icache.page = dc_main_ram;
1003 } else if( (addr & 0x1FE00000) == 0 ) {
1005 sh4_icache.page_vma = addr & 0xFFE00000;
1006 sh4_icache.page_ppa = 0;
1007 sh4_icache.mask = 0xFFE00000;
1008 sh4_icache.page = dc_boot_rom;
1011 sh4_icache.page_vma = -1;
1016 * Update the sh4_icache structure to describe the page(s) containing the
1017 * given vma. If the address does not reference a RAM/ROM region, the icache
1018 * will be invalidated instead.
1019 * If AT is on, this method will raise TLB exceptions normally
1020 * (hence this method should only be used immediately prior to execution of
1021 * code), and otherwise will set the icache according to the matching TLB entry.
1022 * If AT is off, this method will set the entire referenced RAM/ROM region in
1024 * @return TRUE if the update completed (successfully or otherwise), FALSE
1025 * if an exception was raised.
1027 gboolean FASTCALL mmu_update_icache( sh4vma_t addr )
1030 if( IS_SH4_PRIVMODE() ) {
1031 if( addr & 0x80000000 ) {
1032 if( addr < 0xC0000000 ) {
1033 /* P1, P2 and P4 regions are pass-through (no translation) */
1034 mmu_update_icache_phys(addr);
1036 } else if( addr >= 0xE0000000 && addr < 0xFFFFFF00 ) {
1037 RAISE_MEM_ERROR(EXC_DATA_ADDR_READ, addr);
1042 uint32_t mmucr = MMIO_READ(MMU,MMUCR);
1043 if( (mmucr & MMUCR_AT) == 0 ) {
1044 mmu_update_icache_phys(addr);
1048 if( (mmucr & MMUCR_SV) == 0 )
1049 entryNo = mmu_itlb_lookup_vpn_asid( addr );
1051 entryNo = mmu_itlb_lookup_vpn( addr );
1053 if( addr & 0x80000000 ) {
1054 RAISE_MEM_ERROR(EXC_DATA_ADDR_READ, addr);
1058 uint32_t mmucr = MMIO_READ(MMU,MMUCR);
1059 if( (mmucr & MMUCR_AT) == 0 ) {
1060 mmu_update_icache_phys(addr);
1064 entryNo = mmu_itlb_lookup_vpn_asid( addr );
1066 if( entryNo != -1 && (mmu_itlb[entryNo].flags & TLB_USERMODE) == 0 ) {
1067 RAISE_MEM_ERROR(EXC_TLB_PROT_READ, addr);
1074 RAISE_TLB_ERROR(EXC_TLB_MISS_READ, addr);
1077 RAISE_TLB_MULTIHIT_ERROR(addr);
1080 sh4_icache.page_ppa = mmu_itlb[entryNo].ppn & mmu_itlb[entryNo].mask;
1081 sh4_icache.page = mem_get_region( sh4_icache.page_ppa );
1082 if( sh4_icache.page == NULL ) {
1083 sh4_icache.page_vma = -1;
1085 sh4_icache.page_vma = mmu_itlb[entryNo].vpn & mmu_itlb[entryNo].mask;
1086 sh4_icache.mask = mmu_itlb[entryNo].mask;
1093 * Translate address for disassembly purposes (ie performs an instruction
1094 * lookup) - does not raise exceptions or modify any state, and ignores
1095 * protection bits. Returns the translated address, or MMU_VMA_ERROR
1096 * on translation failure.
1098 sh4addr_t FASTCALL mmu_vma_to_phys_disasm( sh4vma_t vma )
1100 if( vma & 0x80000000 ) {
1101 if( vma < 0xC0000000 ) {
1102 /* P1, P2 and P4 regions are pass-through (no translation) */
1103 return VMA_TO_EXT_ADDR(vma);
1104 } else if( vma >= 0xE0000000 && vma < 0xFFFFFF00 ) {
1105 /* Not translatable */
1106 return MMU_VMA_ERROR;
1110 uint32_t mmucr = MMIO_READ(MMU,MMUCR);
1111 if( (mmucr & MMUCR_AT) == 0 ) {
1112 return VMA_TO_EXT_ADDR(vma);
1115 int entryNo = mmu_itlb_lookup_vpn( vma );
1116 if( entryNo == -2 ) {
1117 entryNo = mmu_itlb_lookup_vpn_asid( vma );
1120 return MMU_VMA_ERROR;
1122 return (mmu_itlb[entryNo].ppn & mmu_itlb[entryNo].mask) |
1123 (vma & (~mmu_itlb[entryNo].mask));
1127 void FASTCALL sh4_flush_store_queue( sh4addr_t addr )
1129 int queue = (addr&0x20)>>2;
1130 uint32_t hi = MMIO_READ( MMU, QACR0 + (queue>>1)) << 24;
1131 sh4ptr_t src = (sh4ptr_t)&sh4r.store_queue[queue];
1132 sh4addr_t target = (addr&0x03FFFFE0) | hi;
1133 ext_address_space[target>>12]->write_burst( target, src );
1136 void FASTCALL sh4_flush_store_queue_mmu( sh4addr_t addr, void *exc )
1138 int queue = (addr&0x20)>>2;
1139 sh4ptr_t src = (sh4ptr_t)&sh4r.store_queue[queue];
1141 /* Store queue operation */
1142 storequeue_address_space[(addr&0x03FFFFFE0)>>12]->write_burst( addr, src);
1145 /********************** TLB Direct-Access Regions ***************************/
1146 #ifdef HAVE_FRAME_ADDRESS
1147 #define EXCEPTION_EXIT() do{ *(((void **)__builtin_frame_address(0))+1) = exc; return; } while(0)
1149 #define EXCEPTION_EXIT() sh4_core_exit(CORE_EXIT_EXCEPTION)
1153 #define ITLB_ENTRY(addr) ((addr>>7)&0x03)
1155 int32_t FASTCALL mmu_itlb_addr_read( sh4addr_t addr )
1157 struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
1158 return ent->vpn | ent->asid | (ent->flags & TLB_VALID);
1161 void FASTCALL mmu_itlb_addr_write( sh4addr_t addr, uint32_t val )
1163 struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
1164 ent->vpn = val & 0xFFFFFC00;
1165 ent->asid = val & 0x000000FF;
1166 ent->flags = (ent->flags & ~(TLB_VALID)) | (val&TLB_VALID);
1169 int32_t FASTCALL mmu_itlb_data_read( sh4addr_t addr )
1171 struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
1172 return (ent->ppn & 0x1FFFFC00) | ent->flags;
1175 void FASTCALL mmu_itlb_data_write( sh4addr_t addr, uint32_t val )
1177 struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
1178 ent->ppn = val & 0x1FFFFC00;
1179 ent->flags = val & 0x00001DA;
1180 ent->mask = get_tlb_size_mask(val);
1181 if( ent->ppn >= 0x1C000000 )
1182 ent->ppn |= 0xE0000000;
1185 #define UTLB_ENTRY(addr) ((addr>>8)&0x3F)
1186 #define UTLB_ASSOC(addr) (addr&0x80)
1187 #define UTLB_DATA2(addr) (addr&0x00800000)
1189 int32_t FASTCALL mmu_utlb_addr_read( sh4addr_t addr )
1191 struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
1192 return ent->vpn | ent->asid | (ent->flags & TLB_VALID) |
1193 ((ent->flags & TLB_DIRTY)<<7);
1195 int32_t FASTCALL mmu_utlb_data_read( sh4addr_t addr )
1197 struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
1198 if( UTLB_DATA2(addr) ) {
1201 return (ent->ppn&0x1FFFFC00) | ent->flags;
1206 * Find a UTLB entry for the associative TLB write - same as the normal
1207 * lookup but ignores the valid bit.
1209 static inline int mmu_utlb_lookup_assoc( uint32_t vpn, uint32_t asid )
1213 for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
1214 if( (mmu_utlb[i].flags & TLB_VALID) &&
1215 ((mmu_utlb[i].flags & TLB_SHARE) || asid == mmu_utlb[i].asid) &&
1216 ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
1217 if( result != -1 ) {
1218 fprintf( stderr, "TLB Multi hit: %d %d\n", result, i );
1228 * Find a ITLB entry for the associative TLB write - same as the normal
1229 * lookup but ignores the valid bit.
1231 static inline int mmu_itlb_lookup_assoc( uint32_t vpn, uint32_t asid )
1235 for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
1236 if( (mmu_itlb[i].flags & TLB_VALID) &&
1237 ((mmu_itlb[i].flags & TLB_SHARE) || asid == mmu_itlb[i].asid) &&
1238 ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
1239 if( result != -1 ) {
1248 void FASTCALL mmu_utlb_addr_write( sh4addr_t addr, uint32_t val, void *exc )
1250 if( UTLB_ASSOC(addr) ) {
1251 int utlb = mmu_utlb_lookup_assoc( val, mmu_asid );
1253 struct utlb_entry *ent = &mmu_utlb[utlb];
1254 uint32_t old_flags = ent->flags;
1255 ent->flags = ent->flags & ~(TLB_DIRTY|TLB_VALID);
1256 ent->flags |= (val & TLB_VALID);
1257 ent->flags |= ((val & 0x200)>>7);
1258 if( ((old_flags^ent->flags) & (TLB_VALID|TLB_DIRTY)) != 0 ) {
1259 if( old_flags & TLB_VALID )
1260 mmu_utlb_remove_entry( utlb );
1261 if( ent->flags & TLB_VALID )
1262 mmu_utlb_insert_entry( utlb );
1266 int itlb = mmu_itlb_lookup_assoc( val, mmu_asid );
1268 struct itlb_entry *ent = &mmu_itlb[itlb];
1269 ent->flags = (ent->flags & (~TLB_VALID)) | (val & TLB_VALID);
1272 if( itlb == -2 || utlb == -2 ) {
1273 RAISE_TLB_MULTIHIT_ERROR(addr);
1278 struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
1279 if( ent->flags & TLB_VALID )
1280 mmu_utlb_remove_entry( UTLB_ENTRY(addr) );
1281 ent->vpn = (val & 0xFFFFFC00);
1282 ent->asid = (val & 0xFF);
1283 ent->flags = (ent->flags & ~(TLB_DIRTY|TLB_VALID));
1284 ent->flags |= (val & TLB_VALID);
1285 ent->flags |= ((val & 0x200)>>7);
1286 if( ent->flags & TLB_VALID )
1287 mmu_utlb_insert_entry( UTLB_ENTRY(addr) );
1291 void FASTCALL mmu_utlb_data_write( sh4addr_t addr, uint32_t val )
1293 struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
1294 if( UTLB_DATA2(addr) ) {
1295 ent->pcmcia = val & 0x0000000F;
1297 if( ent->flags & TLB_VALID )
1298 mmu_utlb_remove_entry( UTLB_ENTRY(addr) );
1299 ent->ppn = (val & 0x1FFFFC00);
1300 ent->flags = (val & 0x000001FF);
1301 ent->mask = get_tlb_size_mask(val);
1302 if( ent->flags & TLB_VALID )
1303 mmu_utlb_insert_entry( UTLB_ENTRY(addr) );
1307 struct mem_region_fn p4_region_itlb_addr = {
1308 mmu_itlb_addr_read, mmu_itlb_addr_write,
1309 mmu_itlb_addr_read, mmu_itlb_addr_write,
1310 mmu_itlb_addr_read, mmu_itlb_addr_write,
1311 unmapped_read_burst, unmapped_write_burst };
1312 struct mem_region_fn p4_region_itlb_data = {
1313 mmu_itlb_data_read, mmu_itlb_data_write,
1314 mmu_itlb_data_read, mmu_itlb_data_write,
1315 mmu_itlb_data_read, mmu_itlb_data_write,
1316 unmapped_read_burst, unmapped_write_burst };
1317 struct mem_region_fn p4_region_utlb_addr = {
1318 mmu_utlb_addr_read, (mem_write_fn_t)mmu_utlb_addr_write,
1319 mmu_utlb_addr_read, (mem_write_fn_t)mmu_utlb_addr_write,
1320 mmu_utlb_addr_read, (mem_write_fn_t)mmu_utlb_addr_write,
1321 unmapped_read_burst, unmapped_write_burst };
1322 struct mem_region_fn p4_region_utlb_data = {
1323 mmu_utlb_data_read, mmu_utlb_data_write,
1324 mmu_utlb_data_read, mmu_utlb_data_write,
1325 mmu_utlb_data_read, mmu_utlb_data_write,
1326 unmapped_read_burst, unmapped_write_burst };
1328 /********************** Error regions **************************/
1330 static void FASTCALL address_error_read( sh4addr_t addr, void *exc )
1332 RAISE_MEM_ERROR(EXC_DATA_ADDR_READ, addr);
1336 static void FASTCALL address_error_read_burst( unsigned char *dest, sh4addr_t addr, void *exc )
1338 RAISE_MEM_ERROR(EXC_DATA_ADDR_READ, addr);
1342 static void FASTCALL address_error_write( sh4addr_t addr, uint32_t val, void *exc )
1344 RAISE_MEM_ERROR(EXC_DATA_ADDR_WRITE, addr);
1348 static void FASTCALL tlb_miss_read( sh4addr_t addr, void *exc )
1350 RAISE_TLB_ERROR(EXC_TLB_MISS_READ, addr);
1354 static void FASTCALL tlb_miss_read_burst( unsigned char *dest, sh4addr_t addr, void *exc )
1356 RAISE_TLB_ERROR(EXC_TLB_MISS_READ, addr);
1360 static void FASTCALL tlb_miss_write( sh4addr_t addr, uint32_t val, void *exc )
1362 RAISE_TLB_ERROR(EXC_TLB_MISS_WRITE, addr);
1366 static int32_t FASTCALL tlb_protected_read( sh4addr_t addr, void *exc )
1368 RAISE_MEM_ERROR(EXC_TLB_PROT_READ, addr);
1372 static int32_t FASTCALL tlb_protected_read_burst( unsigned char *dest, sh4addr_t addr, void *exc )
1374 RAISE_MEM_ERROR(EXC_TLB_PROT_READ, addr);
1378 static void FASTCALL tlb_protected_write( sh4addr_t addr, uint32_t val, void *exc )
1380 RAISE_MEM_ERROR(EXC_TLB_PROT_WRITE, addr);
1384 static void FASTCALL tlb_initial_write( sh4addr_t addr, uint32_t val, void *exc )
1386 RAISE_MEM_ERROR(EXC_INIT_PAGE_WRITE, addr);
1390 static int32_t FASTCALL tlb_multi_hit_read( sh4addr_t addr, void *exc )
1392 MMIO_WRITE(MMU, TEA, addr);
1393 MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (addr&0xFFFFFC00)));
1394 sh4_raise_reset(EXC_TLB_MULTI_HIT);
1398 static int32_t FASTCALL tlb_multi_hit_read_burst( unsigned char *dest, sh4addr_t addr, void *exc )
1400 MMIO_WRITE(MMU, TEA, addr);
1401 MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (addr&0xFFFFFC00)));
1402 sh4_raise_reset(EXC_TLB_MULTI_HIT);
1405 static void FASTCALL tlb_multi_hit_write( sh4addr_t addr, uint32_t val, void *exc )
1407 MMIO_WRITE(MMU, TEA, addr);
1408 MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (addr&0xFFFFFC00)));
1409 sh4_raise_reset(EXC_TLB_MULTI_HIT);
1414 * Note: Per sec 4.6.4 of the SH7750 manual, SQ
1416 struct mem_region_fn mem_region_address_error = {
1417 (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1418 (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1419 (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1420 (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write };
1422 struct mem_region_fn mem_region_tlb_miss = {
1423 (mem_read_fn_t)tlb_miss_read, (mem_write_fn_t)tlb_miss_write,
1424 (mem_read_fn_t)tlb_miss_read, (mem_write_fn_t)tlb_miss_write,
1425 (mem_read_fn_t)tlb_miss_read, (mem_write_fn_t)tlb_miss_write,
1426 (mem_read_burst_fn_t)tlb_miss_read_burst, (mem_write_burst_fn_t)tlb_miss_write };
1428 struct mem_region_fn mem_region_user_protected = {
1429 (mem_read_fn_t)tlb_protected_read, (mem_write_fn_t)tlb_protected_write,
1430 (mem_read_fn_t)tlb_protected_read, (mem_write_fn_t)tlb_protected_write,
1431 (mem_read_fn_t)tlb_protected_read, (mem_write_fn_t)tlb_protected_write,
1432 (mem_read_burst_fn_t)tlb_protected_read_burst, (mem_write_burst_fn_t)tlb_protected_write };
1434 struct mem_region_fn mem_region_tlb_multihit = {
1435 (mem_read_fn_t)tlb_multi_hit_read, (mem_write_fn_t)tlb_multi_hit_write,
1436 (mem_read_fn_t)tlb_multi_hit_read, (mem_write_fn_t)tlb_multi_hit_write,
1437 (mem_read_fn_t)tlb_multi_hit_read, (mem_write_fn_t)tlb_multi_hit_write,
1438 (mem_read_burst_fn_t)tlb_multi_hit_read_burst, (mem_write_burst_fn_t)tlb_multi_hit_write };
.