4 * SH4 MMU implementation based on address space page maps. This module
5 * is responsible for all address decoding functions.
7 * Copyright (c) 2005 Nathan Keynes.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 #define MODULE sh4_module
23 #include "sh4/sh4mmio.h"
24 #include "sh4/sh4core.h"
25 #include "sh4/sh4trans.h"
26 #include "dreamcast.h"
30 #define RAISE_TLB_ERROR(code, vpn) sh4_raise_tlb_exception(code, vpn)
31 #define RAISE_MEM_ERROR(code, vpn) \
32 MMIO_WRITE(MMU, TEA, vpn); \
33 MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
34 sh4_raise_exception(code);
35 #define RAISE_TLB_MULTIHIT_ERROR(vpn) sh4_raise_tlb_multihit(vpn)
37 /* An entry is a 1K entry if it's one of the mmu_utlb_1k_pages entries */
38 #define IS_1K_PAGE_ENTRY(ent) ( ((uintptr_t)(((struct utlb_1k_entry *)ent) - &mmu_utlb_1k_pages[0])) < UTLB_ENTRY_COUNT )
40 /* Primary address space (used directly by SH4 cores) */
41 mem_region_fn_t *sh4_address_space;
42 mem_region_fn_t *sh4_user_address_space;
44 /* Accessed from the UTLB accessor methods */
47 static gboolean mmu_urc_overflow; /* If true, urc was set >= urb */
50 static struct itlb_entry mmu_itlb[ITLB_ENTRY_COUNT];
51 static struct utlb_entry mmu_utlb[UTLB_ENTRY_COUNT];
52 static struct utlb_page_entry mmu_utlb_pages[UTLB_ENTRY_COUNT];
53 static uint32_t mmu_lrui;
54 static uint32_t mmu_asid; // current asid
55 static struct utlb_default_regions *mmu_user_storequeue_regions;
57 /* Structures for 1K page handling */
58 static struct utlb_1k_entry mmu_utlb_1k_pages[UTLB_ENTRY_COUNT];
59 static int mmu_utlb_1k_free_list[UTLB_ENTRY_COUNT];
60 static int mmu_utlb_1k_free_index;
63 /* Function prototypes */
64 static void mmu_invalidate_tlb();
65 static void mmu_utlb_register_all();
66 static void mmu_utlb_remove_entry(int);
67 static void mmu_utlb_insert_entry(int);
68 static void mmu_register_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn );
69 static void mmu_register_user_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn );
70 static void mmu_set_tlb_enabled( int tlb_on );
71 static void mmu_set_tlb_asid( uint32_t asid );
72 static void mmu_set_storequeue_protected( int protected, int tlb_on );
73 static gboolean mmu_utlb_map_pages( mem_region_fn_t priv_page, mem_region_fn_t user_page, sh4addr_t start_addr, int npages );
74 static void mmu_utlb_remap_pages( gboolean remap_priv, gboolean remap_user, int entryNo );
75 static gboolean mmu_utlb_unmap_pages( gboolean unmap_priv, gboolean unmap_user, sh4addr_t start_addr, int npages );
76 static gboolean mmu_ext_page_remapped( sh4addr_t page, mem_region_fn_t fn, void *user_data );
77 static void mmu_utlb_1k_init();
78 static struct utlb_1k_entry *mmu_utlb_1k_alloc();
79 static void mmu_utlb_1k_free( struct utlb_1k_entry *entry );
80 static int mmu_read_urc();
82 static void FASTCALL tlb_miss_read( sh4addr_t addr, void *exc );
83 static int32_t FASTCALL tlb_protected_read( sh4addr_t addr, void *exc );
84 static void FASTCALL tlb_protected_write( sh4addr_t addr, uint32_t val, void *exc );
85 static int32_t FASTCALL tlb_protected_read_for_write( sh4addr_t addr, void *exc );
86 static void FASTCALL tlb_initial_write( sh4addr_t addr, uint32_t val, void *exc );
87 static int32_t FASTCALL tlb_initial_read_for_write( sh4addr_t addr, void *exc );
88 static uint32_t get_tlb_size_mask( uint32_t flags );
89 static uint32_t get_tlb_size_pages( uint32_t flags );
91 #define DEFAULT_REGIONS 0
92 #define DEFAULT_STOREQUEUE_REGIONS 1
93 #define DEFAULT_STOREQUEUE_SQMD_REGIONS 2
95 static struct utlb_default_regions mmu_default_regions[3] = {
96 { &mem_region_tlb_miss, &mem_region_tlb_protected, &mem_region_tlb_multihit },
97 { &p4_region_storequeue_miss, &p4_region_storequeue_protected, &p4_region_storequeue_multihit },
98 { &p4_region_storequeue_sqmd_miss, &p4_region_storequeue_sqmd_protected, &p4_region_storequeue_sqmd_multihit } };
100 #define IS_STOREQUEUE_PROTECTED() (mmu_user_storequeue_regions == &mmu_default_regions[DEFAULT_STOREQUEUE_SQMD_REGIONS])
102 /*********************** Module public functions ****************************/
105 * Allocate memory for the address space maps, and initialize them according
106 * to the default (reset) values. (TLB is disabled by default)
111 sh4_address_space = mem_alloc_pages( sizeof(mem_region_fn_t) * 256 );
112 sh4_user_address_space = mem_alloc_pages( sizeof(mem_region_fn_t) * 256 );
113 mmu_user_storequeue_regions = &mmu_default_regions[DEFAULT_STOREQUEUE_REGIONS];
115 mmu_set_tlb_enabled(0);
116 mmu_register_user_mem_region( 0x80000000, 0x00000000, &mem_region_address_error );
117 mmu_register_user_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );
119 /* Setup P4 tlb/cache access regions */
120 mmu_register_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );
121 mmu_register_mem_region( 0xE4000000, 0xF0000000, &mem_region_unmapped );
122 mmu_register_mem_region( 0xF0000000, 0xF1000000, &p4_region_icache_addr );
123 mmu_register_mem_region( 0xF1000000, 0xF2000000, &p4_region_icache_data );
124 mmu_register_mem_region( 0xF2000000, 0xF3000000, &p4_region_itlb_addr );
125 mmu_register_mem_region( 0xF3000000, 0xF4000000, &p4_region_itlb_data );
126 mmu_register_mem_region( 0xF4000000, 0xF5000000, &p4_region_ocache_addr );
127 mmu_register_mem_region( 0xF5000000, 0xF6000000, &p4_region_ocache_data );
128 mmu_register_mem_region( 0xF6000000, 0xF7000000, &p4_region_utlb_addr );
129 mmu_register_mem_region( 0xF7000000, 0xF8000000, &p4_region_utlb_data );
130 mmu_register_mem_region( 0xF8000000, 0x00000000, &mem_region_unmapped );
132 /* Setup P4 control region */
133 mmu_register_mem_region( 0xFF000000, 0xFF001000, &mmio_region_MMU.fn );
134 mmu_register_mem_region( 0xFF100000, 0xFF101000, &mmio_region_PMM.fn );
135 mmu_register_mem_region( 0xFF200000, 0xFF201000, &mmio_region_UBC.fn );
136 mmu_register_mem_region( 0xFF800000, 0xFF801000, &mmio_region_BSC.fn );
137 mmu_register_mem_region( 0xFF900000, 0xFFA00000, &mem_region_unmapped ); // SDMR2 + SDMR3
138 mmu_register_mem_region( 0xFFA00000, 0xFFA01000, &mmio_region_DMAC.fn );
139 mmu_register_mem_region( 0xFFC00000, 0xFFC01000, &mmio_region_CPG.fn );
140 mmu_register_mem_region( 0xFFC80000, 0xFFC81000, &mmio_region_RTC.fn );
141 mmu_register_mem_region( 0xFFD00000, 0xFFD01000, &mmio_region_INTC.fn );
142 mmu_register_mem_region( 0xFFD80000, 0xFFD81000, &mmio_region_TMU.fn );
143 mmu_register_mem_region( 0xFFE00000, 0xFFE01000, &mmio_region_SCI.fn );
144 mmu_register_mem_region( 0xFFE80000, 0xFFE81000, &mmio_region_SCIF.fn );
145 mmu_register_mem_region( 0xFFF00000, 0xFFF01000, &mem_region_unmapped ); // H-UDI
147 register_mem_page_remapped_hook( mmu_ext_page_remapped, NULL );
150 /* Ensure the code regions are executable. Although it might
151 * be more portable to mmap these at runtime rather than using static decls
153 mem_unprotect( mmu_utlb_pages, sizeof(mmu_utlb_pages) );
154 mem_unprotect( mmu_utlb_1k_pages, sizeof(mmu_utlb_1k_pages) );
159 mmio_region_MMU_write( CCR, 0 );
160 mmio_region_MMU_write( MMUCR, 0 );
163 void MMU_save_state( FILE *f )
166 fwrite( &mmu_itlb, sizeof(mmu_itlb), 1, f );
167 fwrite( &mmu_utlb, sizeof(mmu_utlb), 1, f );
168 fwrite( &mmu_urc, sizeof(mmu_urc), 1, f );
169 fwrite( &mmu_urb, sizeof(mmu_urb), 1, f );
170 fwrite( &mmu_lrui, sizeof(mmu_lrui), 1, f );
171 fwrite( &mmu_asid, sizeof(mmu_asid), 1, f );
174 int MMU_load_state( FILE *f )
176 if( fread( &mmu_itlb, sizeof(mmu_itlb), 1, f ) != 1 ) {
179 if( fread( &mmu_utlb, sizeof(mmu_utlb), 1, f ) != 1 ) {
182 if( fread( &mmu_urc, sizeof(mmu_urc), 1, f ) != 1 ) {
185 if( fread( &mmu_urc, sizeof(mmu_urb), 1, f ) != 1 ) {
188 if( fread( &mmu_lrui, sizeof(mmu_lrui), 1, f ) != 1 ) {
191 if( fread( &mmu_asid, sizeof(mmu_asid), 1, f ) != 1 ) {
195 uint32_t mmucr = MMIO_READ(MMU,MMUCR);
196 mmu_urc_overflow = mmu_urc >= mmu_urb;
197 mmu_set_tlb_enabled(mmucr&MMUCR_AT);
198 mmu_set_storequeue_protected(mmucr&MMUCR_SQMD, mmucr&MMUCR_AT);
203 * LDTLB instruction implementation. Copies PTEH, PTEL and PTEA into the UTLB
204 * entry identified by MMUCR.URC. Does not modify MMUCR or the ITLB.
208 int urc = mmu_read_urc();
209 if( IS_TLB_ENABLED() && mmu_utlb[urc].flags & TLB_VALID )
210 mmu_utlb_remove_entry( urc );
211 mmu_utlb[urc].vpn = MMIO_READ(MMU, PTEH) & 0xFFFFFC00;
212 mmu_utlb[urc].asid = MMIO_READ(MMU, PTEH) & 0x000000FF;
213 mmu_utlb[urc].ppn = MMIO_READ(MMU, PTEL) & 0x1FFFFC00;
214 mmu_utlb[urc].flags = MMIO_READ(MMU, PTEL) & 0x00001FF;
215 mmu_utlb[urc].pcmcia = MMIO_READ(MMU, PTEA);
216 mmu_utlb[urc].mask = get_tlb_size_mask(mmu_utlb[urc].flags);
217 if( IS_TLB_ENABLED() && mmu_utlb[urc].flags & TLB_VALID )
218 mmu_utlb_insert_entry( urc );
222 MMIO_REGION_READ_FN( MMU, reg )
227 return MMIO_READ( MMU, MMUCR) | (mmu_read_urc()<<10) | ((mmu_urb&0x3F)<<18) | (mmu_lrui<<26);
229 return MMIO_READ( MMU, reg );
233 MMIO_REGION_READ_DEFSUBFNS(MMU)
235 MMIO_REGION_WRITE_FN( MMU, reg, val )
244 if( (val & 0xFF) != mmu_asid ) {
245 mmu_set_tlb_asid( val&0xFF );
262 if( val & MMUCR_TI ) {
263 mmu_invalidate_tlb();
265 mmu_urc = (val >> 10) & 0x3F;
266 mmu_urb = (val >> 18) & 0x3F;
269 } else if( mmu_urc >= mmu_urb ) {
270 mmu_urc_overflow = TRUE;
272 mmu_lrui = (val >> 26) & 0x3F;
274 tmp = MMIO_READ( MMU, MMUCR );
275 if( (val ^ tmp) & (MMUCR_SQMD) ) {
276 mmu_set_storequeue_protected( val & MMUCR_SQMD, val&MMUCR_AT );
278 if( (val ^ tmp) & (MMUCR_AT) ) {
279 // AT flag has changed state - flush the xlt cache as all bets
280 // are off now. We also need to force an immediate exit from the
282 mmu_set_tlb_enabled( val & MMUCR_AT );
283 MMIO_WRITE( MMU, MMUCR, val );
284 sh4_core_exit( CORE_EXIT_FLUSH_ICACHE );
285 xlat_flush_cache(); // If we're not running, flush the cache anyway
289 CCN_set_cache_control( val );
293 /* Note that if the high bit is set, this appears to reset the machine.
294 * Not emulating this behaviour yet until we know why...
303 PMM_write_control(0, val);
307 PMM_write_control(1, val);
313 MMIO_WRITE( MMU, reg, val );
316 /********************** 1K Page handling ***********************/
317 /* Since we use 4K pages as our native page size, 1K pages need a bit of extra
318 * effort to manage - we justify this on the basis that most programs won't
319 * actually use 1K pages, so we may as well optimize for the common case.
321 * Implementation uses an intermediate page entry (the utlb_1k_entry) that
322 * redirects requests to the 'real' page entry. These are allocated on an
323 * as-needed basis, and returned to the pool when all subpages are empty.
325 static void mmu_utlb_1k_init()
328 for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
329 mmu_utlb_1k_free_list[i] = i;
330 mmu_utlb_1k_init_vtable( &mmu_utlb_1k_pages[i] );
332 mmu_utlb_1k_free_index = 0;
335 static struct utlb_1k_entry *mmu_utlb_1k_alloc()
337 assert( mmu_utlb_1k_free_index < UTLB_ENTRY_COUNT );
338 struct utlb_1k_entry *entry = &mmu_utlb_1k_pages[mmu_utlb_1k_free_list[mmu_utlb_1k_free_index++]];
342 static void mmu_utlb_1k_free( struct utlb_1k_entry *ent )
344 unsigned int entryNo = ent - &mmu_utlb_1k_pages[0];
345 assert( entryNo < UTLB_ENTRY_COUNT );
346 assert( mmu_utlb_1k_free_index > 0 );
347 mmu_utlb_1k_free_list[--mmu_utlb_1k_free_index] = entryNo;
351 /********************** Address space maintenance *************************/
354 * MMU accessor functions just increment URC - fixup here if necessary
356 static int mmu_read_urc()
358 if( mmu_urc_overflow ) {
359 if( mmu_urc >= 0x40 ) {
360 mmu_urc_overflow = FALSE;
370 static void mmu_register_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn )
372 int count = (end - start) >> 12;
373 mem_region_fn_t *ptr = &sh4_address_space[start>>12];
374 while( count-- > 0 ) {
378 static void mmu_register_user_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn )
380 int count = (end - start) >> 12;
381 mem_region_fn_t *ptr = &sh4_user_address_space[start>>12];
382 while( count-- > 0 ) {
387 static gboolean mmu_ext_page_remapped( sh4addr_t page, mem_region_fn_t fn, void *user_data )
390 if( (MMIO_READ(MMU,MMUCR)) & MMUCR_AT ) {
392 sh4_address_space[(page|0x80000000)>>12] = fn; /* Direct map to P1 and P2 */
393 sh4_address_space[(page|0xA0000000)>>12] = fn;
394 /* Scan UTLB and update any direct-referencing entries */
396 /* Direct map to U0, P0, P1, P2, P3 */
397 for( i=0; i<= 0xC0000000; i+= 0x20000000 ) {
398 sh4_address_space[(page|i)>>12] = fn;
400 for( i=0; i < 0x80000000; i+= 0x20000000 ) {
401 sh4_user_address_space[(page|i)>>12] = fn;
407 static void mmu_set_tlb_enabled( int tlb_on )
409 mem_region_fn_t *ptr, *uptr;
412 /* Reset the storequeue area */
415 mmu_register_mem_region(0x00000000, 0x80000000, &mem_region_tlb_miss );
416 mmu_register_mem_region(0xC0000000, 0xE0000000, &mem_region_tlb_miss );
417 mmu_register_user_mem_region(0x00000000, 0x80000000, &mem_region_tlb_miss );
419 /* Default SQ prefetch goes to TLB miss (?) */
420 mmu_register_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue_miss );
421 mmu_register_user_mem_region( 0xE0000000, 0xE4000000, mmu_user_storequeue_regions->tlb_miss );
422 mmu_utlb_register_all();
424 for( i=0, ptr = sh4_address_space; i<7; i++, ptr += LXDREAM_PAGE_TABLE_ENTRIES ) {
425 memcpy( ptr, ext_address_space, sizeof(mem_region_fn_t) * LXDREAM_PAGE_TABLE_ENTRIES );
427 for( i=0, ptr = sh4_user_address_space; i<4; i++, ptr += LXDREAM_PAGE_TABLE_ENTRIES ) {
428 memcpy( ptr, ext_address_space, sizeof(mem_region_fn_t) * LXDREAM_PAGE_TABLE_ENTRIES );
431 mmu_register_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );
432 if( IS_STOREQUEUE_PROTECTED() ) {
433 mmu_register_user_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue_sqmd );
435 mmu_register_user_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );
442 * Flip the SQMD switch - this is rather expensive, so will need to be changed if
443 * anything expects to do this frequently.
445 static void mmu_set_storequeue_protected( int protected, int tlb_on )
447 mem_region_fn_t nontlb_region;
451 mmu_user_storequeue_regions = &mmu_default_regions[DEFAULT_STOREQUEUE_SQMD_REGIONS];
452 nontlb_region = &p4_region_storequeue_sqmd;
454 mmu_user_storequeue_regions = &mmu_default_regions[DEFAULT_STOREQUEUE_REGIONS];
455 nontlb_region = &p4_region_storequeue;
459 mmu_register_user_mem_region( 0xE0000000, 0xE4000000, mmu_user_storequeue_regions->tlb_miss );
460 for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
461 if( (mmu_utlb[i].vpn & 0xFC000000) == 0xE0000000 ) {
462 mmu_utlb_insert_entry(i);
466 mmu_register_user_mem_region( 0xE0000000, 0xE4000000, nontlb_region );
471 static void mmu_set_tlb_asid( uint32_t asid )
473 if( IS_TLB_ENABLED() ) {
474 /* Scan for pages that need to be remapped */
476 if( IS_SV_ENABLED() ) {
477 for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
478 if( mmu_utlb[i].asid == mmu_asid &&
479 (mmu_utlb[i].flags & (TLB_VALID|TLB_SHARE)) == (TLB_VALID) ) {
480 // Matches old ASID - unmap out
481 if( !mmu_utlb_unmap_pages( FALSE, TRUE, mmu_utlb[i].vpn&mmu_utlb[i].mask,
482 get_tlb_size_pages(mmu_utlb[i].flags) ) )
483 mmu_utlb_remap_pages( FALSE, TRUE, i );
486 for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
487 if( mmu_utlb[i].asid == asid &&
488 (mmu_utlb[i].flags & (TLB_VALID|TLB_SHARE)) == (TLB_VALID) ) {
489 // Matches new ASID - map in
490 mmu_utlb_map_pages( NULL, mmu_utlb_pages[i].user_fn,
491 mmu_utlb[i].vpn&mmu_utlb[i].mask,
492 get_tlb_size_pages(mmu_utlb[i].flags) );
496 // Remap both Priv+user pages
497 for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
498 if( mmu_utlb[i].asid == mmu_asid &&
499 (mmu_utlb[i].flags & (TLB_VALID|TLB_SHARE)) == (TLB_VALID) ) {
500 if( !mmu_utlb_unmap_pages( TRUE, TRUE, mmu_utlb[i].vpn&mmu_utlb[i].mask,
501 get_tlb_size_pages(mmu_utlb[i].flags) ) )
502 mmu_utlb_remap_pages( TRUE, TRUE, i );
505 for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
506 if( mmu_utlb[i].asid == asid &&
507 (mmu_utlb[i].flags & (TLB_VALID|TLB_SHARE)) == (TLB_VALID) ) {
508 mmu_utlb_map_pages( &mmu_utlb_pages[i].fn, mmu_utlb_pages[i].user_fn,
509 mmu_utlb[i].vpn&mmu_utlb[i].mask,
510 get_tlb_size_pages(mmu_utlb[i].flags) );
514 sh4_icache.page_vma = -1; // invalidate icache as asid has changed
519 static uint32_t get_tlb_size_mask( uint32_t flags )
521 switch( flags & TLB_SIZE_MASK ) {
522 case TLB_SIZE_1K: return MASK_1K;
523 case TLB_SIZE_4K: return MASK_4K;
524 case TLB_SIZE_64K: return MASK_64K;
525 case TLB_SIZE_1M: return MASK_1M;
526 default: return 0; /* Unreachable */
529 static uint32_t get_tlb_size_pages( uint32_t flags )
531 switch( flags & TLB_SIZE_MASK ) {
532 case TLB_SIZE_1K: return 0;
533 case TLB_SIZE_4K: return 1;
534 case TLB_SIZE_64K: return 16;
535 case TLB_SIZE_1M: return 256;
536 default: return 0; /* Unreachable */
541 * Add a new TLB entry mapping to the address space table. If any of the pages
542 * are already mapped, they are mapped to the TLB multi-hit page instead.
543 * @return FALSE if a TLB multihit situation was detected, otherwise TRUE.
545 static gboolean mmu_utlb_map_pages( mem_region_fn_t priv_page, mem_region_fn_t user_page, sh4addr_t start_addr, int npages )
547 mem_region_fn_t *ptr = &sh4_address_space[start_addr >> 12];
548 mem_region_fn_t *uptr = &sh4_user_address_space[start_addr >> 12];
549 struct utlb_default_regions *privdefs = &mmu_default_regions[DEFAULT_REGIONS];
550 struct utlb_default_regions *userdefs = privdefs;
552 gboolean mapping_ok = TRUE;
555 if( (start_addr & 0xFC000000) == 0xE0000000 ) {
556 /* Storequeue mapping */
557 privdefs = &mmu_default_regions[DEFAULT_STOREQUEUE_REGIONS];
558 userdefs = mmu_user_storequeue_regions;
559 } else if( (start_addr & 0xE0000000) == 0xC0000000 ) {
560 user_page = NULL; /* No user access to P3 region */
561 } else if( start_addr >= 0x80000000 ) {
562 return TRUE; // No mapping - legal but meaningless
566 struct utlb_1k_entry *ent;
567 int i, idx = (start_addr >> 10) & 0x03;
568 if( IS_1K_PAGE_ENTRY(*ptr) ) {
569 ent = (struct utlb_1k_entry *)*ptr;
571 ent = mmu_utlb_1k_alloc();
572 /* New 1K struct - init to previous contents of region */
573 for( i=0; i<4; i++ ) {
574 ent->subpages[i] = *ptr;
575 ent->user_subpages[i] = *uptr;
578 *uptr = &ent->user_fn;
581 if( priv_page != NULL ) {
582 if( ent->subpages[idx] == privdefs->tlb_miss ) {
583 ent->subpages[idx] = priv_page;
586 ent->subpages[idx] = privdefs->tlb_multihit;
589 if( user_page != NULL ) {
590 if( ent->user_subpages[idx] == userdefs->tlb_miss ) {
591 ent->user_subpages[idx] = user_page;
594 ent->user_subpages[idx] = userdefs->tlb_multihit;
599 if( priv_page != NULL ) {
600 /* Privileged mapping only */
601 for( i=0; i<npages; i++ ) {
602 if( *ptr == privdefs->tlb_miss ) {
606 *ptr++ = privdefs->tlb_multihit;
610 if( user_page != NULL ) {
611 /* User mapping only (eg ASID change remap w/ SV=1) */
612 for( i=0; i<npages; i++ ) {
613 if( *uptr == userdefs->tlb_miss ) {
617 *uptr++ = userdefs->tlb_multihit;
627 * Remap any pages within the region covered by entryNo, but not including
628 * entryNo itself. This is used to reestablish pages that were previously
629 * covered by a multi-hit exception region when one of the pages is removed.
631 static void mmu_utlb_remap_pages( gboolean remap_priv, gboolean remap_user, int entryNo )
633 int mask = mmu_utlb[entryNo].mask;
634 uint32_t remap_addr = mmu_utlb[entryNo].vpn & mask;
637 for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
638 if( i != entryNo && (mmu_utlb[i].vpn & mask) == remap_addr && (mmu_utlb[i].flags & TLB_VALID) ) {
639 /* Overlapping region */
640 mem_region_fn_t priv_page = (remap_priv ? &mmu_utlb_pages[i].fn : NULL);
641 mem_region_fn_t user_page = (remap_priv ? mmu_utlb_pages[i].user_fn : NULL);
645 if( mmu_utlb[i].mask >= mask ) {
646 /* entry is no larger than the area we're replacing - map completely */
647 start_addr = mmu_utlb[i].vpn & mmu_utlb[i].mask;
648 npages = get_tlb_size_pages( mmu_utlb[i].flags );
650 /* Otherwise map subset - region covered by removed page */
651 start_addr = remap_addr;
652 npages = get_tlb_size_pages( mmu_utlb[entryNo].flags );
655 if( (mmu_utlb[i].flags & TLB_SHARE) || mmu_utlb[i].asid == mmu_asid ) {
656 mmu_utlb_map_pages( priv_page, user_page, start_addr, npages );
657 } else if( IS_SV_ENABLED() ) {
658 mmu_utlb_map_pages( priv_page, NULL, start_addr, npages );
666 * Remove a previous TLB mapping (replacing them with the TLB miss region).
667 * @return FALSE if any pages were previously mapped to the TLB multihit page,
668 * otherwise TRUE. In either case, all pages in the region are cleared to TLB miss.
670 static gboolean mmu_utlb_unmap_pages( gboolean unmap_priv, gboolean unmap_user, sh4addr_t start_addr, int npages )
672 mem_region_fn_t *ptr = &sh4_address_space[start_addr >> 12];
673 mem_region_fn_t *uptr = &sh4_user_address_space[start_addr >> 12];
674 struct utlb_default_regions *privdefs = &mmu_default_regions[DEFAULT_REGIONS];
675 struct utlb_default_regions *userdefs = privdefs;
677 gboolean unmapping_ok = TRUE;
680 if( (start_addr & 0xFC000000) == 0xE0000000 ) {
681 /* Storequeue mapping */
682 privdefs = &mmu_default_regions[DEFAULT_STOREQUEUE_REGIONS];
683 userdefs = mmu_user_storequeue_regions;
684 } else if( (start_addr & 0xE0000000) == 0xC0000000 ) {
686 } else if( start_addr >= 0x80000000 ) {
687 return TRUE; // No mapping - legal but meaningless
690 if( npages == 0 ) { // 1K page
691 assert( IS_1K_PAGE_ENTRY( *ptr ) );
692 struct utlb_1k_entry *ent = (struct utlb_1k_entry *)*ptr;
693 int i, idx = (start_addr >> 10) & 0x03, mergeable=1;
694 if( ent->subpages[idx] == privdefs->tlb_multihit ) {
695 unmapping_ok = FALSE;
698 ent->subpages[idx] = privdefs->tlb_miss;
700 ent->user_subpages[idx] = userdefs->tlb_miss;
702 /* If all 4 subpages have the same content, merge them together and
703 * release the 1K entry
705 mem_region_fn_t priv_page = ent->subpages[0];
706 mem_region_fn_t user_page = ent->user_subpages[0];
707 for( i=1; i<4; i++ ) {
708 if( priv_page != ent->subpages[i] || user_page != ent->user_subpages[i] ) {
714 mmu_utlb_1k_free(ent);
720 /* Privileged (un)mapping */
721 for( i=0; i<npages; i++ ) {
722 if( *ptr == privdefs->tlb_multihit ) {
723 unmapping_ok = FALSE;
725 *ptr++ = privdefs->tlb_miss;
729 /* User (un)mapping */
730 for( i=0; i<npages; i++ ) {
731 if( *uptr == userdefs->tlb_multihit ) {
732 unmapping_ok = FALSE;
734 *uptr++ = userdefs->tlb_miss;
742 static void mmu_utlb_insert_entry( int entry )
744 struct utlb_entry *ent = &mmu_utlb[entry];
745 mem_region_fn_t page = &mmu_utlb_pages[entry].fn;
746 mem_region_fn_t upage;
747 sh4addr_t start_addr = ent->vpn & ent->mask;
748 int npages = get_tlb_size_pages(ent->flags);
750 if( (start_addr & 0xFC000000) == 0xE0000000 ) {
751 /* Store queue mappings are a bit different - normal access is fixed to
752 * the store queue register block, and we only map prefetches through
755 mmu_utlb_init_storequeue_vtable( ent, &mmu_utlb_pages[entry] );
757 if( (ent->flags & TLB_USERMODE) == 0 ) {
758 upage = mmu_user_storequeue_regions->tlb_prot;
759 } else if( IS_STOREQUEUE_PROTECTED() ) {
760 upage = &p4_region_storequeue_sqmd;
767 if( (ent->flags & TLB_USERMODE) == 0 ) {
768 upage = &mem_region_tlb_protected;
773 if( (ent->flags & TLB_WRITABLE) == 0 ) {
774 page->write_long = (mem_write_fn_t)tlb_protected_write;
775 page->write_word = (mem_write_fn_t)tlb_protected_write;
776 page->write_byte = (mem_write_fn_t)tlb_protected_write;
777 page->write_burst = (mem_write_burst_fn_t)tlb_protected_write;
778 page->read_byte_for_write = (mem_read_fn_t)tlb_protected_read_for_write;
779 mmu_utlb_init_vtable( ent, &mmu_utlb_pages[entry], FALSE );
780 } else if( (ent->flags & TLB_DIRTY) == 0 ) {
781 page->write_long = (mem_write_fn_t)tlb_initial_write;
782 page->write_word = (mem_write_fn_t)tlb_initial_write;
783 page->write_byte = (mem_write_fn_t)tlb_initial_write;
784 page->write_burst = (mem_write_burst_fn_t)tlb_initial_write;
785 page->read_byte_for_write = (mem_read_fn_t)tlb_initial_read_for_write;
786 mmu_utlb_init_vtable( ent, &mmu_utlb_pages[entry], FALSE );
788 mmu_utlb_init_vtable( ent, &mmu_utlb_pages[entry], TRUE );
792 mmu_utlb_pages[entry].user_fn = upage;
794 /* Is page visible? */
795 if( (ent->flags & TLB_SHARE) || ent->asid == mmu_asid ) {
796 mmu_utlb_map_pages( page, upage, start_addr, npages );
797 } else if( IS_SV_ENABLED() ) {
798 mmu_utlb_map_pages( page, NULL, start_addr, npages );
802 static void mmu_utlb_remove_entry( int entry )
805 struct utlb_entry *ent = &mmu_utlb[entry];
806 sh4addr_t start_addr = ent->vpn&ent->mask;
807 mem_region_fn_t *ptr = &sh4_address_space[start_addr >> 12];
808 mem_region_fn_t *uptr = &sh4_user_address_space[start_addr >> 12];
810 int npages = get_tlb_size_pages(ent->flags);
812 if( (ent->flags & TLB_SHARE) || ent->asid == mmu_asid ) {
814 } else if( IS_SV_ENABLED() ) {
817 return; // Not mapped
820 gboolean clean_unmap = mmu_utlb_unmap_pages( TRUE, unmap_user, start_addr, npages );
823 mmu_utlb_remap_pages( TRUE, unmap_user, entry );
827 static void mmu_utlb_register_all()
830 for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
831 if( mmu_utlb[i].flags & TLB_VALID )
832 mmu_utlb_insert_entry( i );
836 static void mmu_invalidate_tlb()
839 for( i=0; i<ITLB_ENTRY_COUNT; i++ ) {
840 mmu_itlb[i].flags &= (~TLB_VALID);
842 if( IS_TLB_ENABLED() ) {
843 for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
844 if( mmu_utlb[i].flags & TLB_VALID ) {
845 mmu_utlb_remove_entry( i );
849 for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
850 mmu_utlb[i].flags &= (~TLB_VALID);
854 /******************************************************************************/
855 /* MMU TLB address translation */
856 /******************************************************************************/
859 * Translate a 32-bit address into a UTLB entry number. Does not check for
860 * page protection etc.
861 * @return the entryNo if found, -1 if not found, and -2 for a multi-hit.
863 int mmu_utlb_entry_for_vpn( uint32_t vpn )
866 mem_region_fn_t fn = sh4_address_space[vpn>>12];
867 if( fn >= &mmu_utlb_pages[0].fn && fn < &mmu_utlb_pages[UTLB_ENTRY_COUNT].fn ) {
868 return ((struct utlb_page_entry *)fn) - &mmu_utlb_pages[0];
869 } else if( fn >= &mmu_utlb_1k_pages[0].fn && fn < &mmu_utlb_1k_pages[UTLB_ENTRY_COUNT].fn ) {
870 struct utlb_1k_entry *ent = (struct utlb_1k_entry *)fn;
871 fn = ent->subpages[(vpn>>10)&0x03];
872 if( fn >= &mmu_utlb_pages[0].fn && fn < &mmu_utlb_pages[UTLB_ENTRY_COUNT].fn ) {
873 return ((struct utlb_page_entry *)fn) - &mmu_utlb_pages[0];
876 if( fn == &mem_region_tlb_multihit ) {
885 * Perform the actual utlb lookup w/ asid matching.
886 * Possible utcomes are:
887 * 0..63 Single match - good, return entry found
888 * -1 No match - raise a tlb data miss exception
889 * -2 Multiple matches - raise a multi-hit exception (reset)
890 * @param vpn virtual address to resolve
891 * @return the resultant UTLB entry, or an error.
893 static inline int mmu_utlb_lookup_vpn_asid( uint32_t vpn )
899 if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
903 for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
904 if( (mmu_utlb[i].flags & TLB_VALID) &&
905 ((mmu_utlb[i].flags & TLB_SHARE) || mmu_asid == mmu_utlb[i].asid) &&
906 ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
917 * Perform the actual utlb lookup matching on vpn only
918 * Possible utcomes are:
919 * 0..63 Single match - good, return entry found
920 * -1 No match - raise a tlb data miss exception
921 * -2 Multiple matches - raise a multi-hit exception (reset)
922 * @param vpn virtual address to resolve
923 * @return the resultant UTLB entry, or an error.
925 static inline int mmu_utlb_lookup_vpn( uint32_t vpn )
931 if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
935 for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
936 if( (mmu_utlb[i].flags & TLB_VALID) &&
937 ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
949 * Update the ITLB by replacing the LRU entry with the specified UTLB entry.
950 * @return the number (0-3) of the replaced entry.
952 static int inline mmu_itlb_update_from_utlb( int entryNo )
955 /* Determine entry to replace based on lrui */
956 if( (mmu_lrui & 0x38) == 0x38 ) {
958 mmu_lrui = mmu_lrui & 0x07;
959 } else if( (mmu_lrui & 0x26) == 0x06 ) {
961 mmu_lrui = (mmu_lrui & 0x19) | 0x20;
962 } else if( (mmu_lrui & 0x15) == 0x01 ) {
964 mmu_lrui = (mmu_lrui & 0x3E) | 0x14;
965 } else { // Note - gets invalid entries too
967 mmu_lrui = (mmu_lrui | 0x0B);
970 mmu_itlb[replace].vpn = mmu_utlb[entryNo].vpn;
971 mmu_itlb[replace].mask = mmu_utlb[entryNo].mask;
972 mmu_itlb[replace].ppn = mmu_utlb[entryNo].ppn;
973 mmu_itlb[replace].asid = mmu_utlb[entryNo].asid;
974 mmu_itlb[replace].flags = mmu_utlb[entryNo].flags & 0x01DA;
979 * Perform the actual itlb lookup w/ asid protection
980 * Possible utcomes are:
981 * 0..63 Single match - good, return entry found
982 * -1 No match - raise a tlb data miss exception
983 * -2 Multiple matches - raise a multi-hit exception (reset)
984 * @param vpn virtual address to resolve
985 * @return the resultant ITLB entry, or an error.
987 static inline int mmu_itlb_lookup_vpn_asid( uint32_t vpn )
992 for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
993 if( (mmu_itlb[i].flags & TLB_VALID) &&
994 ((mmu_itlb[i].flags & TLB_SHARE) || mmu_asid == mmu_itlb[i].asid) &&
995 ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
1003 if( result == -1 ) {
1004 int utlbEntry = mmu_utlb_entry_for_vpn( vpn );
1005 if( utlbEntry < 0 ) {
1008 return mmu_itlb_update_from_utlb( utlbEntry );
1013 case 0: mmu_lrui = (mmu_lrui & 0x07); break;
1014 case 1: mmu_lrui = (mmu_lrui & 0x19) | 0x20; break;
1015 case 2: mmu_lrui = (mmu_lrui & 0x3E) | 0x14; break;
1016 case 3: mmu_lrui = (mmu_lrui | 0x0B); break;
1023 * Perform the actual itlb lookup on vpn only
1024 * Possible utcomes are:
1025 * 0..63 Single match - good, return entry found
1026 * -1 No match - raise a tlb data miss exception
1027 * -2 Multiple matches - raise a multi-hit exception (reset)
1028 * @param vpn virtual address to resolve
1029 * @return the resultant ITLB entry, or an error.
1031 static inline int mmu_itlb_lookup_vpn( uint32_t vpn )
1036 for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
1037 if( (mmu_itlb[i].flags & TLB_VALID) &&
1038 ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
1039 if( result != -1 ) {
1046 if( result == -1 ) {
1047 int utlbEntry = mmu_utlb_lookup_vpn( vpn );
1048 if( utlbEntry < 0 ) {
1051 return mmu_itlb_update_from_utlb( utlbEntry );
1056 case 0: mmu_lrui = (mmu_lrui & 0x07); break;
1057 case 1: mmu_lrui = (mmu_lrui & 0x19) | 0x20; break;
1058 case 2: mmu_lrui = (mmu_lrui & 0x3E) | 0x14; break;
1059 case 3: mmu_lrui = (mmu_lrui | 0x0B); break;
1066 * Update the icache for an untranslated address
1068 static inline void mmu_update_icache_phys( sh4addr_t addr )
1070 if( (addr & 0x1C000000) == 0x0C000000 ) {
1072 sh4_icache.page_vma = addr & 0xFF000000;
1073 sh4_icache.page_ppa = 0x0C000000;
1074 sh4_icache.mask = 0xFF000000;
1075 sh4_icache.page = dc_main_ram;
1076 } else if( (addr & 0x1FE00000) == 0 ) {
1078 sh4_icache.page_vma = addr & 0xFFE00000;
1079 sh4_icache.page_ppa = 0;
1080 sh4_icache.mask = 0xFFE00000;
1081 sh4_icache.page = dc_boot_rom;
1084 sh4_icache.page_vma = -1;
1089 * Update the sh4_icache structure to describe the page(s) containing the
1090 * given vma. If the address does not reference a RAM/ROM region, the icache
1091 * will be invalidated instead.
1092 * If AT is on, this method will raise TLB exceptions normally
1093 * (hence this method should only be used immediately prior to execution of
1094 * code), and otherwise will set the icache according to the matching TLB entry.
1095 * If AT is off, this method will set the entire referenced RAM/ROM region in
1097 * @return TRUE if the update completed (successfully or otherwise), FALSE
1098 * if an exception was raised.
1100 gboolean FASTCALL mmu_update_icache( sh4vma_t addr )
1103 if( IS_SH4_PRIVMODE() ) {
1104 if( addr & 0x80000000 ) {
1105 if( addr < 0xC0000000 ) {
1106 /* P1, P2 and P4 regions are pass-through (no translation) */
1107 mmu_update_icache_phys(addr);
1109 } else if( addr >= 0xE0000000 && addr < 0xFFFFFF00 ) {
1110 RAISE_MEM_ERROR(EXC_DATA_ADDR_READ, addr);
1115 uint32_t mmucr = MMIO_READ(MMU,MMUCR);
1116 if( (mmucr & MMUCR_AT) == 0 ) {
1117 mmu_update_icache_phys(addr);
1121 if( (mmucr & MMUCR_SV) == 0 )
1122 entryNo = mmu_itlb_lookup_vpn_asid( addr );
1124 entryNo = mmu_itlb_lookup_vpn( addr );
1126 if( addr & 0x80000000 ) {
1127 RAISE_MEM_ERROR(EXC_DATA_ADDR_READ, addr);
1131 uint32_t mmucr = MMIO_READ(MMU,MMUCR);
1132 if( (mmucr & MMUCR_AT) == 0 ) {
1133 mmu_update_icache_phys(addr);
1137 entryNo = mmu_itlb_lookup_vpn_asid( addr );
1139 if( entryNo != -1 && (mmu_itlb[entryNo].flags & TLB_USERMODE) == 0 ) {
1140 RAISE_MEM_ERROR(EXC_TLB_PROT_READ, addr);
1147 RAISE_TLB_ERROR(EXC_TLB_MISS_READ, addr);
1150 RAISE_TLB_MULTIHIT_ERROR(addr);
1153 sh4_icache.page_ppa = mmu_itlb[entryNo].ppn & mmu_itlb[entryNo].mask;
1154 sh4_icache.page = mem_get_region( sh4_icache.page_ppa );
1155 if( sh4_icache.page == NULL ) {
1156 sh4_icache.page_vma = -1;
1158 sh4_icache.page_vma = mmu_itlb[entryNo].vpn & mmu_itlb[entryNo].mask;
1159 sh4_icache.mask = mmu_itlb[entryNo].mask;
1166 * Translate address for disassembly purposes (ie performs an instruction
1167 * lookup) - does not raise exceptions or modify any state, and ignores
1168 * protection bits. Returns the translated address, or MMU_VMA_ERROR
1169 * on translation failure.
1171 sh4addr_t FASTCALL mmu_vma_to_phys_disasm( sh4vma_t vma )
1173 if( vma & 0x80000000 ) {
1174 if( vma < 0xC0000000 ) {
1175 /* P1, P2 and P4 regions are pass-through (no translation) */
1176 return VMA_TO_EXT_ADDR(vma);
1177 } else if( vma >= 0xE0000000 && vma < 0xFFFFFF00 ) {
1178 /* Not translatable */
1179 return MMU_VMA_ERROR;
1183 uint32_t mmucr = MMIO_READ(MMU,MMUCR);
1184 if( (mmucr & MMUCR_AT) == 0 ) {
1185 return VMA_TO_EXT_ADDR(vma);
1188 int entryNo = mmu_itlb_lookup_vpn( vma );
1189 if( entryNo == -2 ) {
1190 entryNo = mmu_itlb_lookup_vpn_asid( vma );
1193 return MMU_VMA_ERROR;
1195 return (mmu_itlb[entryNo].ppn & mmu_itlb[entryNo].mask) |
1196 (vma & (~mmu_itlb[entryNo].mask));
1200 /********************** TLB Direct-Access Regions ***************************/
1201 #ifdef HAVE_FRAME_ADDRESS
1202 #define EXCEPTION_EXIT() do{ *(((void * volatile *)__builtin_frame_address(0))+1) = exc; } while(0)
1204 #define EXCEPTION_EXIT() sh4_core_exit(CORE_EXIT_EXCEPTION)
1208 #define ITLB_ENTRY(addr) ((addr>>7)&0x03)
1210 int32_t FASTCALL mmu_itlb_addr_read( sh4addr_t addr )
1212 struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
1213 return ent->vpn | ent->asid | (ent->flags & TLB_VALID);
1216 void FASTCALL mmu_itlb_addr_write( sh4addr_t addr, uint32_t val )
1218 struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
1219 ent->vpn = val & 0xFFFFFC00;
1220 ent->asid = val & 0x000000FF;
1221 ent->flags = (ent->flags & ~(TLB_VALID)) | (val&TLB_VALID);
1224 int32_t FASTCALL mmu_itlb_data_read( sh4addr_t addr )
1226 struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
1227 return (ent->ppn & 0x1FFFFC00) | ent->flags;
1230 void FASTCALL mmu_itlb_data_write( sh4addr_t addr, uint32_t val )
1232 struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
1233 ent->ppn = val & 0x1FFFFC00;
1234 ent->flags = val & 0x00001DA;
1235 ent->mask = get_tlb_size_mask(val);
1236 if( ent->ppn >= 0x1C000000 )
1237 ent->ppn |= 0xE0000000;
1240 #define UTLB_ENTRY(addr) ((addr>>8)&0x3F)
1241 #define UTLB_ASSOC(addr) (addr&0x80)
1242 #define UTLB_DATA2(addr) (addr&0x00800000)
1244 int32_t FASTCALL mmu_utlb_addr_read( sh4addr_t addr )
1246 struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
1247 return ent->vpn | ent->asid | (ent->flags & TLB_VALID) |
1248 ((ent->flags & TLB_DIRTY)<<7);
1250 int32_t FASTCALL mmu_utlb_data_read( sh4addr_t addr )
1252 struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
1253 if( UTLB_DATA2(addr) ) {
1256 return (ent->ppn&0x1FFFFC00) | ent->flags;
1261 * Find a UTLB entry for the associative TLB write - same as the normal
1262 * lookup but ignores the valid bit.
1264 static inline int mmu_utlb_lookup_assoc( uint32_t vpn, uint32_t asid )
1268 for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
1269 if( (mmu_utlb[i].flags & TLB_VALID) &&
1270 ((mmu_utlb[i].flags & TLB_SHARE) || asid == mmu_utlb[i].asid) &&
1271 ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
1272 if( result != -1 ) {
1273 fprintf( stderr, "TLB Multi hit: %d %d\n", result, i );
1283 * Find a ITLB entry for the associative TLB write - same as the normal
1284 * lookup but ignores the valid bit.
1286 static inline int mmu_itlb_lookup_assoc( uint32_t vpn, uint32_t asid )
1290 for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
1291 if( (mmu_itlb[i].flags & TLB_VALID) &&
1292 ((mmu_itlb[i].flags & TLB_SHARE) || asid == mmu_itlb[i].asid) &&
1293 ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
1294 if( result != -1 ) {
1303 void FASTCALL mmu_utlb_addr_write( sh4addr_t addr, uint32_t val, void *exc )
1305 if( UTLB_ASSOC(addr) ) {
1306 int utlb = mmu_utlb_lookup_assoc( val, mmu_asid );
1308 struct utlb_entry *ent = &mmu_utlb[utlb];
1309 uint32_t old_flags = ent->flags;
1310 ent->flags = ent->flags & ~(TLB_DIRTY|TLB_VALID);
1311 ent->flags |= (val & TLB_VALID);
1312 ent->flags |= ((val & 0x200)>>7);
1313 if( IS_TLB_ENABLED() && ((old_flags^ent->flags) & (TLB_VALID|TLB_DIRTY)) != 0 ) {
1314 if( old_flags & TLB_VALID )
1315 mmu_utlb_remove_entry( utlb );
1316 if( ent->flags & TLB_VALID )
1317 mmu_utlb_insert_entry( utlb );
1321 int itlb = mmu_itlb_lookup_assoc( val, mmu_asid );
1323 struct itlb_entry *ent = &mmu_itlb[itlb];
1324 ent->flags = (ent->flags & (~TLB_VALID)) | (val & TLB_VALID);
1327 if( itlb == -2 || utlb == -2 ) {
1328 RAISE_TLB_MULTIHIT_ERROR(addr); /* FIXME: should this only be raised if TLB is enabled? */
1333 struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
1334 if( IS_TLB_ENABLED() && ent->flags & TLB_VALID )
1335 mmu_utlb_remove_entry( UTLB_ENTRY(addr) );
1336 ent->vpn = (val & 0xFFFFFC00);
1337 ent->asid = (val & 0xFF);
1338 ent->flags = (ent->flags & ~(TLB_DIRTY|TLB_VALID));
1339 ent->flags |= (val & TLB_VALID);
1340 ent->flags |= ((val & 0x200)>>7);
1341 if( IS_TLB_ENABLED() && ent->flags & TLB_VALID )
1342 mmu_utlb_insert_entry( UTLB_ENTRY(addr) );
1346 void FASTCALL mmu_utlb_data_write( sh4addr_t addr, uint32_t val )
1348 struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
1349 if( UTLB_DATA2(addr) ) {
1350 ent->pcmcia = val & 0x0000000F;
1352 if( IS_TLB_ENABLED() && ent->flags & TLB_VALID )
1353 mmu_utlb_remove_entry( UTLB_ENTRY(addr) );
1354 ent->ppn = (val & 0x1FFFFC00);
1355 ent->flags = (val & 0x000001FF);
1356 ent->mask = get_tlb_size_mask(val);
1357 if( IS_TLB_ENABLED() && ent->flags & TLB_VALID )
1358 mmu_utlb_insert_entry( UTLB_ENTRY(addr) );
1362 struct mem_region_fn p4_region_itlb_addr = {
1363 mmu_itlb_addr_read, mmu_itlb_addr_write,
1364 mmu_itlb_addr_read, mmu_itlb_addr_write,
1365 mmu_itlb_addr_read, mmu_itlb_addr_write,
1366 unmapped_read_burst, unmapped_write_burst,
1367 unmapped_prefetch, mmu_itlb_addr_read };
1368 struct mem_region_fn p4_region_itlb_data = {
1369 mmu_itlb_data_read, mmu_itlb_data_write,
1370 mmu_itlb_data_read, mmu_itlb_data_write,
1371 mmu_itlb_data_read, mmu_itlb_data_write,
1372 unmapped_read_burst, unmapped_write_burst,
1373 unmapped_prefetch, mmu_itlb_data_read };
1374 struct mem_region_fn p4_region_utlb_addr = {
1375 mmu_utlb_addr_read, (mem_write_fn_t)mmu_utlb_addr_write,
1376 mmu_utlb_addr_read, (mem_write_fn_t)mmu_utlb_addr_write,
1377 mmu_utlb_addr_read, (mem_write_fn_t)mmu_utlb_addr_write,
1378 unmapped_read_burst, unmapped_write_burst,
1379 unmapped_prefetch, mmu_utlb_addr_read };
1380 struct mem_region_fn p4_region_utlb_data = {
1381 mmu_utlb_data_read, mmu_utlb_data_write,
1382 mmu_utlb_data_read, mmu_utlb_data_write,
1383 mmu_utlb_data_read, mmu_utlb_data_write,
1384 unmapped_read_burst, unmapped_write_burst,
1385 unmapped_prefetch, mmu_utlb_data_read };
1387 /********************** Error regions **************************/
1389 static void FASTCALL address_error_read( sh4addr_t addr, void *exc )
1391 RAISE_MEM_ERROR(EXC_DATA_ADDR_READ, addr);
1395 static void FASTCALL address_error_read_for_write( sh4addr_t addr, void *exc )
1397 RAISE_MEM_ERROR(EXC_DATA_ADDR_WRITE, addr);
1401 static void FASTCALL address_error_read_burst( unsigned char *dest, sh4addr_t addr, void *exc )
1403 RAISE_MEM_ERROR(EXC_DATA_ADDR_READ, addr);
1407 static void FASTCALL address_error_write( sh4addr_t addr, uint32_t val, void *exc )
1409 RAISE_MEM_ERROR(EXC_DATA_ADDR_WRITE, addr);
1413 static void FASTCALL tlb_miss_read( sh4addr_t addr, void *exc )
1416 RAISE_TLB_ERROR(EXC_TLB_MISS_READ, addr);
1420 static void FASTCALL tlb_miss_read_for_write( sh4addr_t addr, void *exc )
1423 RAISE_TLB_ERROR(EXC_TLB_MISS_WRITE, addr);
1427 static void FASTCALL tlb_miss_read_burst( unsigned char *dest, sh4addr_t addr, void *exc )
1430 RAISE_TLB_ERROR(EXC_TLB_MISS_READ, addr);
1434 static void FASTCALL tlb_miss_write( sh4addr_t addr, uint32_t val, void *exc )
1437 RAISE_TLB_ERROR(EXC_TLB_MISS_WRITE, addr);
1441 static int32_t FASTCALL tlb_protected_read( sh4addr_t addr, void *exc )
1444 RAISE_MEM_ERROR(EXC_TLB_PROT_READ, addr);
1449 static int32_t FASTCALL tlb_protected_read_for_write( sh4addr_t addr, void *exc )
1452 RAISE_MEM_ERROR(EXC_TLB_PROT_WRITE, addr);
1457 static int32_t FASTCALL tlb_protected_read_burst( unsigned char *dest, sh4addr_t addr, void *exc )
1460 RAISE_MEM_ERROR(EXC_TLB_PROT_READ, addr);
1465 static void FASTCALL tlb_protected_write( sh4addr_t addr, uint32_t val, void *exc )
1468 RAISE_MEM_ERROR(EXC_TLB_PROT_WRITE, addr);
1472 static void FASTCALL tlb_initial_write( sh4addr_t addr, uint32_t val, void *exc )
1475 RAISE_MEM_ERROR(EXC_INIT_PAGE_WRITE, addr);
1479 static int32_t FASTCALL tlb_initial_read_for_write( sh4addr_t addr, void *exc )
1482 RAISE_MEM_ERROR(EXC_INIT_PAGE_WRITE, addr);
1487 static int32_t FASTCALL tlb_multi_hit_read( sh4addr_t addr, void *exc )
1489 sh4_raise_tlb_multihit(addr);
1494 static int32_t FASTCALL tlb_multi_hit_read_burst( unsigned char *dest, sh4addr_t addr, void *exc )
1496 sh4_raise_tlb_multihit(addr);
1500 static void FASTCALL tlb_multi_hit_write( sh4addr_t addr, uint32_t val, void *exc )
1502 sh4_raise_tlb_multihit(addr);
1507 * Note: Per sec 4.6.4 of the SH7750 manual, SQ
1509 struct mem_region_fn mem_region_address_error = {
1510 (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1511 (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1512 (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1513 (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write,
1514 unmapped_prefetch, (mem_read_fn_t)address_error_read_for_write };
1516 struct mem_region_fn mem_region_tlb_miss = {
1517 (mem_read_fn_t)tlb_miss_read, (mem_write_fn_t)tlb_miss_write,
1518 (mem_read_fn_t)tlb_miss_read, (mem_write_fn_t)tlb_miss_write,
1519 (mem_read_fn_t)tlb_miss_read, (mem_write_fn_t)tlb_miss_write,
1520 (mem_read_burst_fn_t)tlb_miss_read_burst, (mem_write_burst_fn_t)tlb_miss_write,
1521 unmapped_prefetch, (mem_read_fn_t)tlb_miss_read_for_write };
1523 struct mem_region_fn mem_region_tlb_protected = {
1524 (mem_read_fn_t)tlb_protected_read, (mem_write_fn_t)tlb_protected_write,
1525 (mem_read_fn_t)tlb_protected_read, (mem_write_fn_t)tlb_protected_write,
1526 (mem_read_fn_t)tlb_protected_read, (mem_write_fn_t)tlb_protected_write,
1527 (mem_read_burst_fn_t)tlb_protected_read_burst, (mem_write_burst_fn_t)tlb_protected_write,
1528 unmapped_prefetch, (mem_read_fn_t)tlb_protected_read_for_write };
1530 struct mem_region_fn mem_region_tlb_multihit = {
1531 (mem_read_fn_t)tlb_multi_hit_read, (mem_write_fn_t)tlb_multi_hit_write,
1532 (mem_read_fn_t)tlb_multi_hit_read, (mem_write_fn_t)tlb_multi_hit_write,
1533 (mem_read_fn_t)tlb_multi_hit_read, (mem_write_fn_t)tlb_multi_hit_write,
1534 (mem_read_burst_fn_t)tlb_multi_hit_read_burst, (mem_write_burst_fn_t)tlb_multi_hit_write,
1535 (mem_prefetch_fn_t)tlb_multi_hit_read, (mem_read_fn_t)tlb_multi_hit_read };
1538 /* Store-queue regions */
1539 /* These are a bit of a pain - the first 8 fields are controlled by SQMD, while
1540 * the final (prefetch) is controlled by the actual TLB settings (plus SQMD in
1541 * some cases), in contrast to the ordinary fields above.
1543 * There is probably a simpler way to do this.
1546 struct mem_region_fn p4_region_storequeue = {
1547 ccn_storequeue_read_long, ccn_storequeue_write_long,
1548 unmapped_read_long, unmapped_write_long, /* TESTME: Officially only long access is supported */
1549 unmapped_read_long, unmapped_write_long,
1550 unmapped_read_burst, unmapped_write_burst,
1551 ccn_storequeue_prefetch, unmapped_read_long };
1553 struct mem_region_fn p4_region_storequeue_miss = {
1554 ccn_storequeue_read_long, ccn_storequeue_write_long,
1555 unmapped_read_long, unmapped_write_long, /* TESTME: Officially only long access is supported */
1556 unmapped_read_long, unmapped_write_long,
1557 unmapped_read_burst, unmapped_write_burst,
1558 (mem_prefetch_fn_t)tlb_miss_read, unmapped_read_long };
1560 struct mem_region_fn p4_region_storequeue_multihit = {
1561 ccn_storequeue_read_long, ccn_storequeue_write_long,
1562 unmapped_read_long, unmapped_write_long, /* TESTME: Officially only long access is supported */
1563 unmapped_read_long, unmapped_write_long,
1564 unmapped_read_burst, unmapped_write_burst,
1565 (mem_prefetch_fn_t)tlb_multi_hit_read, unmapped_read_long };
1567 struct mem_region_fn p4_region_storequeue_protected = {
1568 ccn_storequeue_read_long, ccn_storequeue_write_long,
1569 unmapped_read_long, unmapped_write_long,
1570 unmapped_read_long, unmapped_write_long,
1571 unmapped_read_burst, unmapped_write_burst,
1572 (mem_prefetch_fn_t)tlb_protected_read, unmapped_read_long };
1574 struct mem_region_fn p4_region_storequeue_sqmd = {
1575 (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1576 (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1577 (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1578 (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write,
1579 (mem_prefetch_fn_t)address_error_read, (mem_read_fn_t)address_error_read_for_write };
1581 struct mem_region_fn p4_region_storequeue_sqmd_miss = {
1582 (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1583 (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1584 (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1585 (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write,
1586 (mem_prefetch_fn_t)tlb_miss_read, (mem_read_fn_t)address_error_read_for_write };
1588 struct mem_region_fn p4_region_storequeue_sqmd_multihit = {
1589 (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1590 (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1591 (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1592 (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write,
1593 (mem_prefetch_fn_t)tlb_multi_hit_read, (mem_read_fn_t)address_error_read_for_write };
1595 struct mem_region_fn p4_region_storequeue_sqmd_protected = {
1596 (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1597 (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1598 (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1599 (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write,
1600 (mem_prefetch_fn_t)tlb_protected_read, (mem_read_fn_t)address_error_read_for_write };
.