4 * SH4 MMU implementation based on address space page maps. This module
5 * is responsible for all address decoding functions.
7 * Copyright (c) 2005 Nathan Keynes.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 #define MODULE sh4_module
23 #include "sh4/sh4mmio.h"
24 #include "sh4/sh4core.h"
25 #include "sh4/sh4trans.h"
26 #include "dreamcast.h"
30 #define RAISE_TLB_ERROR(code, vpn) sh4_raise_tlb_exception(code, vpn)
31 #define RAISE_MEM_ERROR(code, vpn) \
32 MMIO_WRITE(MMU, TEA, vpn); \
33 MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
34 sh4_raise_exception(code);
35 #define RAISE_TLB_MULTIHIT_ERROR(vpn) sh4_raise_tlb_multihit(vpn)
37 /* An entry is a 1K entry if it's one of the mmu_utlb_1k_pages entries */
38 #define IS_1K_PAGE_ENTRY(ent) ( ((uintptr_t)(((struct utlb_1k_entry *)ent) - &mmu_utlb_1k_pages[0])) < UTLB_ENTRY_COUNT )
40 /* Primary address space (used directly by SH4 cores) */
41 mem_region_fn_t *sh4_address_space;
42 mem_region_fn_t *sh4_user_address_space;
44 /* Accessed from the UTLB accessor methods */
47 static gboolean mmu_urc_overflow; /* If true, urc was set >= urb */
50 static struct itlb_entry mmu_itlb[ITLB_ENTRY_COUNT];
51 static struct utlb_entry mmu_utlb[UTLB_ENTRY_COUNT];
52 static struct utlb_page_entry mmu_utlb_pages[UTLB_ENTRY_COUNT];
53 static uint32_t mmu_lrui;
54 static uint32_t mmu_asid; // current asid
55 static struct utlb_default_regions *mmu_user_storequeue_regions;
57 /* Structures for 1K page handling */
58 static struct utlb_1k_entry mmu_utlb_1k_pages[UTLB_ENTRY_COUNT];
59 static int mmu_utlb_1k_free_list[UTLB_ENTRY_COUNT];
60 static int mmu_utlb_1k_free_index;
63 /* Function prototypes */
64 static void mmu_invalidate_tlb();
65 static void mmu_utlb_register_all();
66 static void mmu_utlb_remove_entry(int);
67 static void mmu_utlb_insert_entry(int);
68 static void mmu_register_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn );
69 static void mmu_register_user_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn );
70 static void mmu_set_tlb_enabled( int tlb_on );
71 static void mmu_set_tlb_asid( uint32_t asid );
72 static void mmu_set_storequeue_protected( int protected, int tlb_on );
73 static gboolean mmu_utlb_map_pages( mem_region_fn_t priv_page, mem_region_fn_t user_page, sh4addr_t start_addr, int npages );
74 static void mmu_utlb_remap_pages( gboolean remap_priv, gboolean remap_user, int entryNo );
75 static gboolean mmu_utlb_unmap_pages( gboolean unmap_priv, gboolean unmap_user, sh4addr_t start_addr, int npages );
76 static gboolean mmu_ext_page_remapped( sh4addr_t page, mem_region_fn_t fn, void *user_data );
77 static void mmu_utlb_1k_init();
78 static struct utlb_1k_entry *mmu_utlb_1k_alloc();
79 static void mmu_utlb_1k_free( struct utlb_1k_entry *entry );
80 static void mmu_fix_urc();
82 static void FASTCALL tlb_miss_read( sh4addr_t addr, void *exc );
83 static int32_t FASTCALL tlb_protected_read( sh4addr_t addr, void *exc );
84 static void FASTCALL tlb_protected_write( sh4addr_t addr, uint32_t val, void *exc );
85 static void FASTCALL tlb_initial_write( sh4addr_t addr, uint32_t val, void *exc );
86 static uint32_t get_tlb_size_mask( uint32_t flags );
87 static uint32_t get_tlb_size_pages( uint32_t flags );
89 #define DEFAULT_REGIONS 0
90 #define DEFAULT_STOREQUEUE_REGIONS 1
91 #define DEFAULT_STOREQUEUE_SQMD_REGIONS 2
93 static struct utlb_default_regions mmu_default_regions[3] = {
94 { &mem_region_tlb_miss, &mem_region_tlb_protected, &mem_region_tlb_multihit },
95 { &p4_region_storequeue_miss, &p4_region_storequeue_protected, &p4_region_storequeue_multihit },
96 { &p4_region_storequeue_sqmd_miss, &p4_region_storequeue_sqmd_protected, &p4_region_storequeue_sqmd_multihit } };
98 #define IS_STOREQUEUE_PROTECTED() (mmu_user_storequeue_regions == &mmu_default_regions[DEFAULT_STOREQUEUE_SQMD_REGIONS])
100 /*********************** Module public functions ****************************/
103 * Allocate memory for the address space maps, and initialize them according
104 * to the default (reset) values. (TLB is disabled by default)
109 sh4_address_space = mem_alloc_pages( sizeof(mem_region_fn_t) * 256 );
110 sh4_user_address_space = mem_alloc_pages( sizeof(mem_region_fn_t) * 256 );
111 mmu_user_storequeue_regions = &mmu_default_regions[DEFAULT_STOREQUEUE_REGIONS];
113 mmu_set_tlb_enabled(0);
114 mmu_register_user_mem_region( 0x80000000, 0x00000000, &mem_region_address_error );
115 mmu_register_user_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );
117 /* Setup P4 tlb/cache access regions */
118 mmu_register_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );
119 mmu_register_mem_region( 0xE4000000, 0xF0000000, &mem_region_unmapped );
120 mmu_register_mem_region( 0xF0000000, 0xF1000000, &p4_region_icache_addr );
121 mmu_register_mem_region( 0xF1000000, 0xF2000000, &p4_region_icache_data );
122 mmu_register_mem_region( 0xF2000000, 0xF3000000, &p4_region_itlb_addr );
123 mmu_register_mem_region( 0xF3000000, 0xF4000000, &p4_region_itlb_data );
124 mmu_register_mem_region( 0xF4000000, 0xF5000000, &p4_region_ocache_addr );
125 mmu_register_mem_region( 0xF5000000, 0xF6000000, &p4_region_ocache_data );
126 mmu_register_mem_region( 0xF6000000, 0xF7000000, &p4_region_utlb_addr );
127 mmu_register_mem_region( 0xF7000000, 0xF8000000, &p4_region_utlb_data );
128 mmu_register_mem_region( 0xF8000000, 0x00000000, &mem_region_unmapped );
130 /* Setup P4 control region */
131 mmu_register_mem_region( 0xFF000000, 0xFF001000, &mmio_region_MMU.fn );
132 mmu_register_mem_region( 0xFF100000, 0xFF101000, &mmio_region_PMM.fn );
133 mmu_register_mem_region( 0xFF200000, 0xFF201000, &mmio_region_UBC.fn );
134 mmu_register_mem_region( 0xFF800000, 0xFF801000, &mmio_region_BSC.fn );
135 mmu_register_mem_region( 0xFF900000, 0xFFA00000, &mem_region_unmapped ); // SDMR2 + SDMR3
136 mmu_register_mem_region( 0xFFA00000, 0xFFA01000, &mmio_region_DMAC.fn );
137 mmu_register_mem_region( 0xFFC00000, 0xFFC01000, &mmio_region_CPG.fn );
138 mmu_register_mem_region( 0xFFC80000, 0xFFC81000, &mmio_region_RTC.fn );
139 mmu_register_mem_region( 0xFFD00000, 0xFFD01000, &mmio_region_INTC.fn );
140 mmu_register_mem_region( 0xFFD80000, 0xFFD81000, &mmio_region_TMU.fn );
141 mmu_register_mem_region( 0xFFE00000, 0xFFE01000, &mmio_region_SCI.fn );
142 mmu_register_mem_region( 0xFFE80000, 0xFFE81000, &mmio_region_SCIF.fn );
143 mmu_register_mem_region( 0xFFF00000, 0xFFF01000, &mem_region_unmapped ); // H-UDI
145 register_mem_page_remapped_hook( mmu_ext_page_remapped, NULL );
148 /* Ensure the code regions are executable */
149 mem_unprotect( mmu_utlb_pages, sizeof(mmu_utlb_pages) );
150 mem_unprotect( mmu_utlb_1k_pages, sizeof(mmu_utlb_1k_pages) );
155 mmio_region_MMU_write( CCR, 0 );
156 mmio_region_MMU_write( MMUCR, 0 );
159 void MMU_save_state( FILE *f )
162 fwrite( &mmu_itlb, sizeof(mmu_itlb), 1, f );
163 fwrite( &mmu_utlb, sizeof(mmu_utlb), 1, f );
164 fwrite( &mmu_urc, sizeof(mmu_urc), 1, f );
165 fwrite( &mmu_urb, sizeof(mmu_urb), 1, f );
166 fwrite( &mmu_lrui, sizeof(mmu_lrui), 1, f );
167 fwrite( &mmu_asid, sizeof(mmu_asid), 1, f );
170 int MMU_load_state( FILE *f )
172 if( fread( &mmu_itlb, sizeof(mmu_itlb), 1, f ) != 1 ) {
175 if( fread( &mmu_utlb, sizeof(mmu_utlb), 1, f ) != 1 ) {
178 if( fread( &mmu_urc, sizeof(mmu_urc), 1, f ) != 1 ) {
181 if( fread( &mmu_urc, sizeof(mmu_urb), 1, f ) != 1 ) {
184 if( fread( &mmu_lrui, sizeof(mmu_lrui), 1, f ) != 1 ) {
187 if( fread( &mmu_asid, sizeof(mmu_asid), 1, f ) != 1 ) {
191 uint32_t mmucr = MMIO_READ(MMU,MMUCR);
192 mmu_urc_overflow = mmu_urc >= mmu_urb;
193 mmu_set_tlb_enabled(mmucr&MMUCR_AT);
194 mmu_set_storequeue_protected(mmucr&MMUCR_SQMD, mmucr&MMUCR_AT);
199 * LDTLB instruction implementation. Copies PTEH, PTEL and PTEA into the UTLB
200 * entry identified by MMUCR.URC. Does not modify MMUCR or the ITLB.
205 if( mmu_utlb[mmu_urc].flags & TLB_VALID )
206 mmu_utlb_remove_entry( mmu_urc );
207 mmu_utlb[mmu_urc].vpn = MMIO_READ(MMU, PTEH) & 0xFFFFFC00;
208 mmu_utlb[mmu_urc].asid = MMIO_READ(MMU, PTEH) & 0x000000FF;
209 mmu_utlb[mmu_urc].ppn = MMIO_READ(MMU, PTEL) & 0x1FFFFC00;
210 mmu_utlb[mmu_urc].flags = MMIO_READ(MMU, PTEL) & 0x00001FF;
211 mmu_utlb[mmu_urc].pcmcia = MMIO_READ(MMU, PTEA);
212 mmu_utlb[mmu_urc].mask = get_tlb_size_mask(mmu_utlb[mmu_urc].flags);
213 if( mmu_utlb[mmu_urc].flags & TLB_VALID )
214 mmu_utlb_insert_entry( mmu_urc );
218 MMIO_REGION_READ_FN( MMU, reg )
224 return MMIO_READ( MMU, MMUCR) | (mmu_urc<<10) | ((mmu_urb&0x3F)<<18) | (mmu_lrui<<26);
226 return MMIO_READ( MMU, reg );
230 MMIO_REGION_WRITE_FN( MMU, reg, val )
239 if( (val & 0xFF) != mmu_asid ) {
240 mmu_set_tlb_asid( val&0xFF );
241 sh4_icache.page_vma = -1; // invalidate icache as asid has changed
258 if( val & MMUCR_TI ) {
259 mmu_invalidate_tlb();
261 mmu_urc = (val >> 10) & 0x3F;
262 mmu_urb = (val >> 18) & 0x3F;
265 } else if( mmu_urc >= mmu_urb ) {
266 mmu_urc_overflow = TRUE;
268 mmu_lrui = (val >> 26) & 0x3F;
270 tmp = MMIO_READ( MMU, MMUCR );
271 if( (val ^ tmp) & (MMUCR_SQMD) ) {
272 mmu_set_storequeue_protected( val & MMUCR_SQMD, val&MMUCR_AT );
274 if( (val ^ tmp) & (MMUCR_AT) ) {
275 // AT flag has changed state - flush the xlt cache as all bets
276 // are off now. We also need to force an immediate exit from the
278 mmu_set_tlb_enabled( val & MMUCR_AT );
279 MMIO_WRITE( MMU, MMUCR, val );
280 sh4_core_exit( CORE_EXIT_FLUSH_ICACHE );
281 xlat_flush_cache(); // If we're not running, flush the cache anyway
285 CCN_set_cache_control( val );
289 /* Note that if the high bit is set, this appears to reset the machine.
290 * Not emulating this behaviour yet until we know why...
299 PMM_write_control(0, val);
303 PMM_write_control(1, val);
309 MMIO_WRITE( MMU, reg, val );
312 /********************** 1K Page handling ***********************/
313 /* Since we use 4K pages as our native page size, 1K pages need a bit of extra
314 * effort to manage - we justify this on the basis that most programs won't
315 * actually use 1K pages, so we may as well optimize for the common case.
317 * Implementation uses an intermediate page entry (the utlb_1k_entry) that
318 * redirects requests to the 'real' page entry. These are allocated on an
319 * as-needed basis, and returned to the pool when all subpages are empty.
321 static void mmu_utlb_1k_init()
324 for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
325 mmu_utlb_1k_free_list[i] = i;
326 mmu_utlb_1k_init_vtable( &mmu_utlb_1k_pages[i] );
328 mmu_utlb_1k_free_index = 0;
331 static struct utlb_1k_entry *mmu_utlb_1k_alloc()
333 assert( mmu_utlb_1k_free_index < UTLB_ENTRY_COUNT );
334 struct utlb_1k_entry *entry = &mmu_utlb_1k_pages[mmu_utlb_1k_free_index++];
338 static void mmu_utlb_1k_free( struct utlb_1k_entry *ent )
340 unsigned int entryNo = ent - &mmu_utlb_1k_pages[0];
341 assert( entryNo < UTLB_ENTRY_COUNT );
342 assert( mmu_utlb_1k_free_index > 0 );
343 mmu_utlb_1k_free_list[--mmu_utlb_1k_free_index] = entryNo;
347 /********************** Address space maintenance *************************/
350 * MMU accessor functions just increment URC - fixup here if necessary
352 static inline void mmu_fix_urc()
354 if( mmu_urc_overflow ) {
355 if( mmu_urc >= 0x40 ) {
356 mmu_urc_overflow = FALSE;
365 static void mmu_register_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn )
367 int count = (end - start) >> 12;
368 mem_region_fn_t *ptr = &sh4_address_space[start>>12];
369 while( count-- > 0 ) {
373 static void mmu_register_user_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn )
375 int count = (end - start) >> 12;
376 mem_region_fn_t *ptr = &sh4_user_address_space[start>>12];
377 while( count-- > 0 ) {
382 static gboolean mmu_ext_page_remapped( sh4addr_t page, mem_region_fn_t fn, void *user_data )
385 if( (MMIO_READ(MMU,MMUCR)) & MMUCR_AT ) {
387 sh4_address_space[(page|0x80000000)>>12] = fn; /* Direct map to P1 and P2 */
388 sh4_address_space[(page|0xA0000000)>>12] = fn;
389 /* Scan UTLB and update any direct-referencing entries */
391 /* Direct map to U0, P0, P1, P2, P3 */
392 for( i=0; i<= 0xC0000000; i+= 0x20000000 ) {
393 sh4_address_space[(page|i)>>12] = fn;
395 for( i=0; i < 0x80000000; i+= 0x20000000 ) {
396 sh4_user_address_space[(page|i)>>12] = fn;
401 static void mmu_set_tlb_enabled( int tlb_on )
403 mem_region_fn_t *ptr, *uptr;
406 /* Reset the storequeue area */
409 mmu_register_mem_region(0x00000000, 0x80000000, &mem_region_tlb_miss );
410 mmu_register_mem_region(0xC0000000, 0xE0000000, &mem_region_tlb_miss );
411 mmu_register_user_mem_region(0x00000000, 0x80000000, &mem_region_tlb_miss );
413 /* Default SQ prefetch goes to TLB miss (?) */
414 mmu_register_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue_miss );
415 mmu_register_user_mem_region( 0xE0000000, 0xE4000000, mmu_user_storequeue_regions->tlb_miss );
416 mmu_utlb_register_all();
418 for( i=0, ptr = sh4_address_space; i<7; i++, ptr += LXDREAM_PAGE_TABLE_ENTRIES ) {
419 memcpy( ptr, ext_address_space, sizeof(mem_region_fn_t) * LXDREAM_PAGE_TABLE_ENTRIES );
421 for( i=0, ptr = sh4_user_address_space; i<4; i++, ptr += LXDREAM_PAGE_TABLE_ENTRIES ) {
422 memcpy( ptr, ext_address_space, sizeof(mem_region_fn_t) * LXDREAM_PAGE_TABLE_ENTRIES );
425 mmu_register_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );
426 if( IS_STOREQUEUE_PROTECTED() ) {
427 mmu_register_user_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue_sqmd );
429 mmu_register_user_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );
436 * Flip the SQMD switch - this is rather expensive, so will need to be changed if
437 * anything expects to do this frequently.
439 static void mmu_set_storequeue_protected( int protected, int tlb_on )
441 mem_region_fn_t nontlb_region;
445 mmu_user_storequeue_regions = &mmu_default_regions[DEFAULT_STOREQUEUE_SQMD_REGIONS];
446 nontlb_region = &p4_region_storequeue_sqmd;
448 mmu_user_storequeue_regions = &mmu_default_regions[DEFAULT_STOREQUEUE_REGIONS];
449 nontlb_region = &p4_region_storequeue;
453 mmu_register_user_mem_region( 0xE0000000, 0xE4000000, mmu_user_storequeue_regions->tlb_miss );
454 for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
455 if( (mmu_utlb[i].vpn & 0xFC000000) == 0xE0000000 ) {
456 mmu_utlb_insert_entry(i);
460 mmu_register_user_mem_region( 0xE0000000, 0xE4000000, nontlb_region );
465 static void mmu_set_tlb_asid( uint32_t asid )
467 /* Scan for pages that need to be remapped */
469 if( IS_SV_ENABLED() ) {
470 for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
471 if( mmu_utlb[i].flags & TLB_VALID ) {
472 if( (mmu_utlb[i].flags & TLB_SHARE) == 0 ) {
473 if( mmu_utlb[i].asid == mmu_asid ) { // Matches old ASID - unmap out
474 if( !mmu_utlb_unmap_pages( FALSE, TRUE, mmu_utlb[i].vpn&mmu_utlb[i].mask,
475 get_tlb_size_pages(mmu_utlb[i].flags) ) )
476 mmu_utlb_remap_pages( FALSE, TRUE, i );
477 } else if( mmu_utlb[i].asid == asid ) { // Matches new ASID - map in
478 mmu_utlb_map_pages( NULL, mmu_utlb_pages[i].user_fn,
479 mmu_utlb[i].vpn&mmu_utlb[i].mask,
480 get_tlb_size_pages(mmu_utlb[i].flags) );
486 // Remap both Priv+user pages
487 for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
488 if( mmu_utlb[i].flags & TLB_VALID ) {
489 if( (mmu_utlb[i].flags & TLB_SHARE) == 0 ) {
490 if( mmu_utlb[i].asid == mmu_asid ) { // Matches old ASID - unmap out
491 if( !mmu_utlb_unmap_pages( TRUE, TRUE, mmu_utlb[i].vpn&mmu_utlb[i].mask,
492 get_tlb_size_pages(mmu_utlb[i].flags) ) )
493 mmu_utlb_remap_pages( TRUE, TRUE, i );
494 } else if( mmu_utlb[i].asid == asid ) { // Matches new ASID - map in
495 mmu_utlb_map_pages( &mmu_utlb_pages[i].fn, mmu_utlb_pages[i].user_fn,
496 mmu_utlb[i].vpn&mmu_utlb[i].mask,
497 get_tlb_size_pages(mmu_utlb[i].flags) );
507 static uint32_t get_tlb_size_mask( uint32_t flags )
509 switch( flags & TLB_SIZE_MASK ) {
510 case TLB_SIZE_1K: return MASK_1K;
511 case TLB_SIZE_4K: return MASK_4K;
512 case TLB_SIZE_64K: return MASK_64K;
513 case TLB_SIZE_1M: return MASK_1M;
514 default: return 0; /* Unreachable */
517 static uint32_t get_tlb_size_pages( uint32_t flags )
519 switch( flags & TLB_SIZE_MASK ) {
520 case TLB_SIZE_1K: return 0;
521 case TLB_SIZE_4K: return 1;
522 case TLB_SIZE_64K: return 16;
523 case TLB_SIZE_1M: return 256;
524 default: return 0; /* Unreachable */
529 * Add a new TLB entry mapping to the address space table. If any of the pages
530 * are already mapped, they are mapped to the TLB multi-hit page instead.
531 * @return FALSE if a TLB multihit situation was detected, otherwise TRUE.
533 static gboolean mmu_utlb_map_pages( mem_region_fn_t priv_page, mem_region_fn_t user_page, sh4addr_t start_addr, int npages )
535 mem_region_fn_t *ptr = &sh4_address_space[start_addr >> 12];
536 mem_region_fn_t *uptr = &sh4_user_address_space[start_addr >> 12];
537 struct utlb_default_regions *privdefs = &mmu_default_regions[DEFAULT_REGIONS];
538 struct utlb_default_regions *userdefs = privdefs;
540 gboolean mapping_ok = TRUE;
543 if( (start_addr & 0xFC000000) == 0xE0000000 ) {
544 /* Storequeue mapping */
545 privdefs = &mmu_default_regions[DEFAULT_STOREQUEUE_REGIONS];
546 userdefs = mmu_user_storequeue_regions;
547 } else if( (start_addr & 0xE0000000) == 0xC0000000 ) {
548 user_page = NULL; /* No user access to P3 region */
549 } else if( start_addr >= 0x80000000 ) {
550 return TRUE; // No mapping - legal but meaningless
554 struct utlb_1k_entry *ent;
555 int i, idx = (start_addr >> 10) & 0x03;
556 if( IS_1K_PAGE_ENTRY(*ptr) ) {
557 ent = (struct utlb_1k_entry *)*ptr;
559 ent = mmu_utlb_1k_alloc();
560 /* New 1K struct - init to previous contents of region */
561 for( i=0; i<4; i++ ) {
562 ent->subpages[i] = *ptr;
563 ent->user_subpages[i] = *uptr;
566 *uptr = &ent->user_fn;
569 if( priv_page != NULL ) {
570 if( ent->subpages[idx] == privdefs->tlb_miss ) {
571 ent->subpages[idx] = priv_page;
574 ent->subpages[idx] = privdefs->tlb_multihit;
577 if( user_page != NULL ) {
578 if( ent->user_subpages[idx] == userdefs->tlb_miss ) {
579 ent->user_subpages[idx] = user_page;
582 ent->user_subpages[idx] = userdefs->tlb_multihit;
587 if( priv_page != NULL ) {
588 /* Privileged mapping only */
589 for( i=0; i<npages; i++ ) {
590 if( *ptr == privdefs->tlb_miss ) {
594 *ptr++ = privdefs->tlb_multihit;
598 if( user_page != NULL ) {
599 /* User mapping only (eg ASID change remap w/ SV=1) */
600 for( i=0; i<npages; i++ ) {
601 if( *uptr == userdefs->tlb_miss ) {
605 *uptr++ = userdefs->tlb_multihit;
615 * Remap any pages within the region covered by entryNo, but not including
616 * entryNo itself. This is used to reestablish pages that were previously
617 * covered by a multi-hit exception region when one of the pages is removed.
619 static void mmu_utlb_remap_pages( gboolean remap_priv, gboolean remap_user, int entryNo )
621 int mask = mmu_utlb[entryNo].mask;
622 uint32_t remap_addr = mmu_utlb[entryNo].vpn & mask;
625 for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
626 if( i != entryNo && (mmu_utlb[i].vpn & mask) == remap_addr && (mmu_utlb[i].flags & TLB_VALID) ) {
627 /* Overlapping region */
628 mem_region_fn_t priv_page = (remap_priv ? &mmu_utlb_pages[i].fn : NULL);
629 mem_region_fn_t user_page = (remap_priv ? mmu_utlb_pages[i].user_fn : NULL);
633 if( mmu_utlb[i].mask >= mask ) {
634 /* entry is no larger than the area we're replacing - map completely */
635 start_addr = mmu_utlb[i].vpn & mmu_utlb[i].mask;
636 npages = get_tlb_size_pages( mmu_utlb[i].flags );
638 /* Otherwise map subset - region covered by removed page */
639 start_addr = remap_addr;
640 npages = get_tlb_size_pages( mmu_utlb[entryNo].flags );
643 if( (mmu_utlb[i].flags & TLB_SHARE) || mmu_utlb[i].asid == mmu_asid ) {
644 mmu_utlb_map_pages( priv_page, user_page, start_addr, npages );
645 } else if( IS_SV_ENABLED() ) {
646 mmu_utlb_map_pages( priv_page, NULL, start_addr, npages );
654 * Remove a previous TLB mapping (replacing them with the TLB miss region).
655 * @return FALSE if any pages were previously mapped to the TLB multihit page,
656 * otherwise TRUE. In either case, all pages in the region are cleared to TLB miss.
658 static gboolean mmu_utlb_unmap_pages( gboolean unmap_priv, gboolean unmap_user, sh4addr_t start_addr, int npages )
660 mem_region_fn_t *ptr = &sh4_address_space[start_addr >> 12];
661 mem_region_fn_t *uptr = &sh4_user_address_space[start_addr >> 12];
662 struct utlb_default_regions *privdefs = &mmu_default_regions[DEFAULT_REGIONS];
663 struct utlb_default_regions *userdefs = privdefs;
665 gboolean unmapping_ok = TRUE;
668 if( (start_addr & 0xFC000000) == 0xE0000000 ) {
669 /* Storequeue mapping */
670 privdefs = &mmu_default_regions[DEFAULT_STOREQUEUE_REGIONS];
671 userdefs = mmu_user_storequeue_regions;
672 } else if( (start_addr & 0xE0000000) == 0xC0000000 ) {
674 } else if( start_addr >= 0x80000000 ) {
675 return TRUE; // No mapping - legal but meaningless
678 if( npages == 0 ) { // 1K page
679 assert( IS_1K_PAGE_ENTRY( *ptr ) );
680 struct utlb_1k_entry *ent = (struct utlb_1k_entry *)*ptr;
681 int i, idx = (start_addr >> 10) & 0x03, mergeable=1;
682 if( ent->subpages[idx] == privdefs->tlb_multihit ) {
683 unmapping_ok = FALSE;
686 ent->subpages[idx] = privdefs->tlb_miss;
688 ent->user_subpages[idx] = userdefs->tlb_miss;
690 /* If all 4 subpages have the same content, merge them together and
691 * release the 1K entry
693 mem_region_fn_t priv_page = ent->subpages[0];
694 mem_region_fn_t user_page = ent->user_subpages[0];
695 for( i=1; i<4; i++ ) {
696 if( priv_page != ent->subpages[i] || user_page != ent->user_subpages[i] ) {
702 mmu_utlb_1k_free(ent);
708 /* Privileged (un)mapping */
709 for( i=0; i<npages; i++ ) {
710 if( *ptr == privdefs->tlb_multihit ) {
711 unmapping_ok = FALSE;
713 *ptr++ = privdefs->tlb_miss;
717 /* User (un)mapping */
718 for( i=0; i<npages; i++ ) {
719 if( *uptr == userdefs->tlb_multihit ) {
720 unmapping_ok = FALSE;
722 *uptr++ = userdefs->tlb_miss;
730 static void mmu_utlb_insert_entry( int entry )
732 struct utlb_entry *ent = &mmu_utlb[entry];
733 mem_region_fn_t page = &mmu_utlb_pages[entry].fn;
734 mem_region_fn_t upage;
735 sh4addr_t start_addr = ent->vpn & ent->mask;
736 int npages = get_tlb_size_pages(ent->flags);
738 if( (start_addr & 0xFC000000) == 0xE0000000 ) {
739 /* Store queue mappings are a bit different - normal access is fixed to
740 * the store queue register block, and we only map prefetches through
743 mmu_utlb_init_storequeue_vtable( ent, &mmu_utlb_pages[entry] );
745 if( (ent->flags & TLB_USERMODE) == 0 ) {
746 upage = mmu_user_storequeue_regions->tlb_prot;
747 } else if( IS_STOREQUEUE_PROTECTED() ) {
748 upage = &p4_region_storequeue_sqmd;
755 if( (ent->flags & TLB_USERMODE) == 0 ) {
756 upage = &mem_region_tlb_protected;
761 if( (ent->flags & TLB_WRITABLE) == 0 ) {
762 page->write_long = (mem_write_fn_t)tlb_protected_write;
763 page->write_word = (mem_write_fn_t)tlb_protected_write;
764 page->write_byte = (mem_write_fn_t)tlb_protected_write;
765 page->write_burst = (mem_write_burst_fn_t)tlb_protected_write;
766 mmu_utlb_init_vtable( ent, &mmu_utlb_pages[entry], FALSE );
767 } else if( (ent->flags & TLB_DIRTY) == 0 ) {
768 page->write_long = (mem_write_fn_t)tlb_initial_write;
769 page->write_word = (mem_write_fn_t)tlb_initial_write;
770 page->write_byte = (mem_write_fn_t)tlb_initial_write;
771 page->write_burst = (mem_write_burst_fn_t)tlb_initial_write;
772 mmu_utlb_init_vtable( ent, &mmu_utlb_pages[entry], FALSE );
774 mmu_utlb_init_vtable( ent, &mmu_utlb_pages[entry], TRUE );
778 mmu_utlb_pages[entry].user_fn = upage;
780 /* Is page visible? */
781 if( (ent->flags & TLB_SHARE) || ent->asid == mmu_asid ) {
782 mmu_utlb_map_pages( page, upage, start_addr, npages );
783 } else if( IS_SV_ENABLED() ) {
784 mmu_utlb_map_pages( page, NULL, start_addr, npages );
788 static void mmu_utlb_remove_entry( int entry )
791 struct utlb_entry *ent = &mmu_utlb[entry];
792 sh4addr_t start_addr = ent->vpn&ent->mask;
793 mem_region_fn_t *ptr = &sh4_address_space[start_addr >> 12];
794 mem_region_fn_t *uptr = &sh4_user_address_space[start_addr >> 12];
796 int npages = get_tlb_size_pages(ent->flags);
798 if( (ent->flags & TLB_SHARE) || ent->asid == mmu_asid ) {
800 } else if( IS_SV_ENABLED() ) {
803 return; // Not mapped
806 gboolean clean_unmap = mmu_utlb_unmap_pages( TRUE, unmap_user, start_addr, npages );
809 mmu_utlb_remap_pages( TRUE, unmap_user, entry );
813 static void mmu_utlb_register_all()
816 for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
817 if( mmu_utlb[i].flags & TLB_VALID )
818 mmu_utlb_insert_entry( i );
822 static void mmu_invalidate_tlb()
825 for( i=0; i<ITLB_ENTRY_COUNT; i++ ) {
826 mmu_itlb[i].flags &= (~TLB_VALID);
828 if( IS_TLB_ENABLED() ) {
829 for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
830 if( mmu_utlb[i].flags & TLB_VALID ) {
831 mmu_utlb_remove_entry( i );
835 for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
836 mmu_utlb[i].flags &= (~TLB_VALID);
840 /******************************************************************************/
841 /* MMU TLB address translation */
842 /******************************************************************************/
845 * Translate a 32-bit address into a UTLB entry number. Does not check for
846 * page protection etc.
847 * @return the entryNo if found, -1 if not found, and -2 for a multi-hit.
849 int mmu_utlb_entry_for_vpn( uint32_t vpn )
851 mem_region_fn_t fn = sh4_address_space[vpn>>12];
852 if( fn >= &mmu_utlb_pages[0].fn && fn < &mmu_utlb_pages[UTLB_ENTRY_COUNT].fn ) {
853 return ((struct utlb_page_entry *)fn) - &mmu_utlb_pages[0];
854 } else if( fn == &mem_region_tlb_multihit ) {
863 * Perform the actual utlb lookup w/ asid matching.
864 * Possible utcomes are:
865 * 0..63 Single match - good, return entry found
866 * -1 No match - raise a tlb data miss exception
867 * -2 Multiple matches - raise a multi-hit exception (reset)
868 * @param vpn virtual address to resolve
869 * @return the resultant UTLB entry, or an error.
871 static inline int mmu_utlb_lookup_vpn_asid( uint32_t vpn )
877 if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
881 for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
882 if( (mmu_utlb[i].flags & TLB_VALID) &&
883 ((mmu_utlb[i].flags & TLB_SHARE) || mmu_asid == mmu_utlb[i].asid) &&
884 ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
895 * Perform the actual utlb lookup matching on vpn only
896 * Possible utcomes are:
897 * 0..63 Single match - good, return entry found
898 * -1 No match - raise a tlb data miss exception
899 * -2 Multiple matches - raise a multi-hit exception (reset)
900 * @param vpn virtual address to resolve
901 * @return the resultant UTLB entry, or an error.
903 static inline int mmu_utlb_lookup_vpn( uint32_t vpn )
909 if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
913 for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
914 if( (mmu_utlb[i].flags & TLB_VALID) &&
915 ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
927 * Update the ITLB by replacing the LRU entry with the specified UTLB entry.
928 * @return the number (0-3) of the replaced entry.
930 static int inline mmu_itlb_update_from_utlb( int entryNo )
933 /* Determine entry to replace based on lrui */
934 if( (mmu_lrui & 0x38) == 0x38 ) {
936 mmu_lrui = mmu_lrui & 0x07;
937 } else if( (mmu_lrui & 0x26) == 0x06 ) {
939 mmu_lrui = (mmu_lrui & 0x19) | 0x20;
940 } else if( (mmu_lrui & 0x15) == 0x01 ) {
942 mmu_lrui = (mmu_lrui & 0x3E) | 0x14;
943 } else { // Note - gets invalid entries too
945 mmu_lrui = (mmu_lrui | 0x0B);
948 mmu_itlb[replace].vpn = mmu_utlb[entryNo].vpn;
949 mmu_itlb[replace].mask = mmu_utlb[entryNo].mask;
950 mmu_itlb[replace].ppn = mmu_utlb[entryNo].ppn;
951 mmu_itlb[replace].asid = mmu_utlb[entryNo].asid;
952 mmu_itlb[replace].flags = mmu_utlb[entryNo].flags & 0x01DA;
957 * Perform the actual itlb lookup w/ asid protection
958 * Possible utcomes are:
959 * 0..63 Single match - good, return entry found
960 * -1 No match - raise a tlb data miss exception
961 * -2 Multiple matches - raise a multi-hit exception (reset)
962 * @param vpn virtual address to resolve
963 * @return the resultant ITLB entry, or an error.
965 static inline int mmu_itlb_lookup_vpn_asid( uint32_t vpn )
970 for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
971 if( (mmu_itlb[i].flags & TLB_VALID) &&
972 ((mmu_itlb[i].flags & TLB_SHARE) || mmu_asid == mmu_itlb[i].asid) &&
973 ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
982 int utlbEntry = mmu_utlb_entry_for_vpn( vpn );
983 if( utlbEntry < 0 ) {
986 return mmu_itlb_update_from_utlb( utlbEntry );
991 case 0: mmu_lrui = (mmu_lrui & 0x07); break;
992 case 1: mmu_lrui = (mmu_lrui & 0x19) | 0x20; break;
993 case 2: mmu_lrui = (mmu_lrui & 0x3E) | 0x14; break;
994 case 3: mmu_lrui = (mmu_lrui | 0x0B); break;
1001 * Perform the actual itlb lookup on vpn only
1002 * Possible utcomes are:
1003 * 0..63 Single match - good, return entry found
1004 * -1 No match - raise a tlb data miss exception
1005 * -2 Multiple matches - raise a multi-hit exception (reset)
1006 * @param vpn virtual address to resolve
1007 * @return the resultant ITLB entry, or an error.
1009 static inline int mmu_itlb_lookup_vpn( uint32_t vpn )
1014 for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
1015 if( (mmu_itlb[i].flags & TLB_VALID) &&
1016 ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
1017 if( result != -1 ) {
1024 if( result == -1 ) {
1025 int utlbEntry = mmu_utlb_lookup_vpn( vpn );
1026 if( utlbEntry < 0 ) {
1029 return mmu_itlb_update_from_utlb( utlbEntry );
1034 case 0: mmu_lrui = (mmu_lrui & 0x07); break;
1035 case 1: mmu_lrui = (mmu_lrui & 0x19) | 0x20; break;
1036 case 2: mmu_lrui = (mmu_lrui & 0x3E) | 0x14; break;
1037 case 3: mmu_lrui = (mmu_lrui | 0x0B); break;
1044 * Update the icache for an untranslated address
1046 static inline void mmu_update_icache_phys( sh4addr_t addr )
1048 if( (addr & 0x1C000000) == 0x0C000000 ) {
1050 sh4_icache.page_vma = addr & 0xFF000000;
1051 sh4_icache.page_ppa = 0x0C000000;
1052 sh4_icache.mask = 0xFF000000;
1053 sh4_icache.page = dc_main_ram;
1054 } else if( (addr & 0x1FE00000) == 0 ) {
1056 sh4_icache.page_vma = addr & 0xFFE00000;
1057 sh4_icache.page_ppa = 0;
1058 sh4_icache.mask = 0xFFE00000;
1059 sh4_icache.page = dc_boot_rom;
1062 sh4_icache.page_vma = -1;
1067 * Update the sh4_icache structure to describe the page(s) containing the
1068 * given vma. If the address does not reference a RAM/ROM region, the icache
1069 * will be invalidated instead.
1070 * If AT is on, this method will raise TLB exceptions normally
1071 * (hence this method should only be used immediately prior to execution of
1072 * code), and otherwise will set the icache according to the matching TLB entry.
1073 * If AT is off, this method will set the entire referenced RAM/ROM region in
1075 * @return TRUE if the update completed (successfully or otherwise), FALSE
1076 * if an exception was raised.
1078 gboolean FASTCALL mmu_update_icache( sh4vma_t addr )
1081 if( IS_SH4_PRIVMODE() ) {
1082 if( addr & 0x80000000 ) {
1083 if( addr < 0xC0000000 ) {
1084 /* P1, P2 and P4 regions are pass-through (no translation) */
1085 mmu_update_icache_phys(addr);
1087 } else if( addr >= 0xE0000000 && addr < 0xFFFFFF00 ) {
1088 RAISE_MEM_ERROR(EXC_DATA_ADDR_READ, addr);
1093 uint32_t mmucr = MMIO_READ(MMU,MMUCR);
1094 if( (mmucr & MMUCR_AT) == 0 ) {
1095 mmu_update_icache_phys(addr);
1099 if( (mmucr & MMUCR_SV) == 0 )
1100 entryNo = mmu_itlb_lookup_vpn_asid( addr );
1102 entryNo = mmu_itlb_lookup_vpn( addr );
1104 if( addr & 0x80000000 ) {
1105 RAISE_MEM_ERROR(EXC_DATA_ADDR_READ, addr);
1109 uint32_t mmucr = MMIO_READ(MMU,MMUCR);
1110 if( (mmucr & MMUCR_AT) == 0 ) {
1111 mmu_update_icache_phys(addr);
1115 entryNo = mmu_itlb_lookup_vpn_asid( addr );
1117 if( entryNo != -1 && (mmu_itlb[entryNo].flags & TLB_USERMODE) == 0 ) {
1118 RAISE_MEM_ERROR(EXC_TLB_PROT_READ, addr);
1125 RAISE_TLB_ERROR(EXC_TLB_MISS_READ, addr);
1128 RAISE_TLB_MULTIHIT_ERROR(addr);
1131 sh4_icache.page_ppa = mmu_itlb[entryNo].ppn & mmu_itlb[entryNo].mask;
1132 sh4_icache.page = mem_get_region( sh4_icache.page_ppa );
1133 if( sh4_icache.page == NULL ) {
1134 sh4_icache.page_vma = -1;
1136 sh4_icache.page_vma = mmu_itlb[entryNo].vpn & mmu_itlb[entryNo].mask;
1137 sh4_icache.mask = mmu_itlb[entryNo].mask;
1144 * Translate address for disassembly purposes (ie performs an instruction
1145 * lookup) - does not raise exceptions or modify any state, and ignores
1146 * protection bits. Returns the translated address, or MMU_VMA_ERROR
1147 * on translation failure.
1149 sh4addr_t FASTCALL mmu_vma_to_phys_disasm( sh4vma_t vma )
1151 if( vma & 0x80000000 ) {
1152 if( vma < 0xC0000000 ) {
1153 /* P1, P2 and P4 regions are pass-through (no translation) */
1154 return VMA_TO_EXT_ADDR(vma);
1155 } else if( vma >= 0xE0000000 && vma < 0xFFFFFF00 ) {
1156 /* Not translatable */
1157 return MMU_VMA_ERROR;
1161 uint32_t mmucr = MMIO_READ(MMU,MMUCR);
1162 if( (mmucr & MMUCR_AT) == 0 ) {
1163 return VMA_TO_EXT_ADDR(vma);
1166 int entryNo = mmu_itlb_lookup_vpn( vma );
1167 if( entryNo == -2 ) {
1168 entryNo = mmu_itlb_lookup_vpn_asid( vma );
1171 return MMU_VMA_ERROR;
1173 return (mmu_itlb[entryNo].ppn & mmu_itlb[entryNo].mask) |
1174 (vma & (~mmu_itlb[entryNo].mask));
1178 /********************** TLB Direct-Access Regions ***************************/
1179 #ifdef HAVE_FRAME_ADDRESS
1180 #define EXCEPTION_EXIT() do{ *(((void **)__builtin_frame_address(0))+1) = exc; return; } while(0)
1182 #define EXCEPTION_EXIT() sh4_core_exit(CORE_EXIT_EXCEPTION)
1186 #define ITLB_ENTRY(addr) ((addr>>7)&0x03)
1188 int32_t FASTCALL mmu_itlb_addr_read( sh4addr_t addr )
1190 struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
1191 return ent->vpn | ent->asid | (ent->flags & TLB_VALID);
1194 void FASTCALL mmu_itlb_addr_write( sh4addr_t addr, uint32_t val )
1196 struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
1197 ent->vpn = val & 0xFFFFFC00;
1198 ent->asid = val & 0x000000FF;
1199 ent->flags = (ent->flags & ~(TLB_VALID)) | (val&TLB_VALID);
1202 int32_t FASTCALL mmu_itlb_data_read( sh4addr_t addr )
1204 struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
1205 return (ent->ppn & 0x1FFFFC00) | ent->flags;
1208 void FASTCALL mmu_itlb_data_write( sh4addr_t addr, uint32_t val )
1210 struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
1211 ent->ppn = val & 0x1FFFFC00;
1212 ent->flags = val & 0x00001DA;
1213 ent->mask = get_tlb_size_mask(val);
1214 if( ent->ppn >= 0x1C000000 )
1215 ent->ppn |= 0xE0000000;
1218 #define UTLB_ENTRY(addr) ((addr>>8)&0x3F)
1219 #define UTLB_ASSOC(addr) (addr&0x80)
1220 #define UTLB_DATA2(addr) (addr&0x00800000)
1222 int32_t FASTCALL mmu_utlb_addr_read( sh4addr_t addr )
1224 struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
1225 return ent->vpn | ent->asid | (ent->flags & TLB_VALID) |
1226 ((ent->flags & TLB_DIRTY)<<7);
1228 int32_t FASTCALL mmu_utlb_data_read( sh4addr_t addr )
1230 struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
1231 if( UTLB_DATA2(addr) ) {
1234 return (ent->ppn&0x1FFFFC00) | ent->flags;
1239 * Find a UTLB entry for the associative TLB write - same as the normal
1240 * lookup but ignores the valid bit.
1242 static inline int mmu_utlb_lookup_assoc( uint32_t vpn, uint32_t asid )
1246 for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
1247 if( (mmu_utlb[i].flags & TLB_VALID) &&
1248 ((mmu_utlb[i].flags & TLB_SHARE) || asid == mmu_utlb[i].asid) &&
1249 ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
1250 if( result != -1 ) {
1251 fprintf( stderr, "TLB Multi hit: %d %d\n", result, i );
1261 * Find a ITLB entry for the associative TLB write - same as the normal
1262 * lookup but ignores the valid bit.
1264 static inline int mmu_itlb_lookup_assoc( uint32_t vpn, uint32_t asid )
1268 for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
1269 if( (mmu_itlb[i].flags & TLB_VALID) &&
1270 ((mmu_itlb[i].flags & TLB_SHARE) || asid == mmu_itlb[i].asid) &&
1271 ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
1272 if( result != -1 ) {
1281 void FASTCALL mmu_utlb_addr_write( sh4addr_t addr, uint32_t val, void *exc )
1283 if( UTLB_ASSOC(addr) ) {
1284 int utlb = mmu_utlb_lookup_assoc( val, mmu_asid );
1286 struct utlb_entry *ent = &mmu_utlb[utlb];
1287 uint32_t old_flags = ent->flags;
1288 ent->flags = ent->flags & ~(TLB_DIRTY|TLB_VALID);
1289 ent->flags |= (val & TLB_VALID);
1290 ent->flags |= ((val & 0x200)>>7);
1291 if( ((old_flags^ent->flags) & (TLB_VALID|TLB_DIRTY)) != 0 ) {
1292 if( old_flags & TLB_VALID )
1293 mmu_utlb_remove_entry( utlb );
1294 if( ent->flags & TLB_VALID )
1295 mmu_utlb_insert_entry( utlb );
1299 int itlb = mmu_itlb_lookup_assoc( val, mmu_asid );
1301 struct itlb_entry *ent = &mmu_itlb[itlb];
1302 ent->flags = (ent->flags & (~TLB_VALID)) | (val & TLB_VALID);
1305 if( itlb == -2 || utlb == -2 ) {
1306 RAISE_TLB_MULTIHIT_ERROR(addr);
1311 struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
1312 if( ent->flags & TLB_VALID )
1313 mmu_utlb_remove_entry( UTLB_ENTRY(addr) );
1314 ent->vpn = (val & 0xFFFFFC00);
1315 ent->asid = (val & 0xFF);
1316 ent->flags = (ent->flags & ~(TLB_DIRTY|TLB_VALID));
1317 ent->flags |= (val & TLB_VALID);
1318 ent->flags |= ((val & 0x200)>>7);
1319 if( ent->flags & TLB_VALID )
1320 mmu_utlb_insert_entry( UTLB_ENTRY(addr) );
1324 void FASTCALL mmu_utlb_data_write( sh4addr_t addr, uint32_t val )
1326 struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
1327 if( UTLB_DATA2(addr) ) {
1328 ent->pcmcia = val & 0x0000000F;
1330 if( ent->flags & TLB_VALID )
1331 mmu_utlb_remove_entry( UTLB_ENTRY(addr) );
1332 ent->ppn = (val & 0x1FFFFC00);
1333 ent->flags = (val & 0x000001FF);
1334 ent->mask = get_tlb_size_mask(val);
1335 if( ent->flags & TLB_VALID )
1336 mmu_utlb_insert_entry( UTLB_ENTRY(addr) );
1340 struct mem_region_fn p4_region_itlb_addr = {
1341 mmu_itlb_addr_read, mmu_itlb_addr_write,
1342 mmu_itlb_addr_read, mmu_itlb_addr_write,
1343 mmu_itlb_addr_read, mmu_itlb_addr_write,
1344 unmapped_read_burst, unmapped_write_burst,
1345 unmapped_prefetch };
1346 struct mem_region_fn p4_region_itlb_data = {
1347 mmu_itlb_data_read, mmu_itlb_data_write,
1348 mmu_itlb_data_read, mmu_itlb_data_write,
1349 mmu_itlb_data_read, mmu_itlb_data_write,
1350 unmapped_read_burst, unmapped_write_burst,
1351 unmapped_prefetch };
1352 struct mem_region_fn p4_region_utlb_addr = {
1353 mmu_utlb_addr_read, (mem_write_fn_t)mmu_utlb_addr_write,
1354 mmu_utlb_addr_read, (mem_write_fn_t)mmu_utlb_addr_write,
1355 mmu_utlb_addr_read, (mem_write_fn_t)mmu_utlb_addr_write,
1356 unmapped_read_burst, unmapped_write_burst,
1357 unmapped_prefetch };
1358 struct mem_region_fn p4_region_utlb_data = {
1359 mmu_utlb_data_read, mmu_utlb_data_write,
1360 mmu_utlb_data_read, mmu_utlb_data_write,
1361 mmu_utlb_data_read, mmu_utlb_data_write,
1362 unmapped_read_burst, unmapped_write_burst,
1363 unmapped_prefetch };
1365 /********************** Error regions **************************/
1367 static void FASTCALL address_error_read( sh4addr_t addr, void *exc )
1369 RAISE_MEM_ERROR(EXC_DATA_ADDR_READ, addr);
1373 static void FASTCALL address_error_read_burst( unsigned char *dest, sh4addr_t addr, void *exc )
1375 RAISE_MEM_ERROR(EXC_DATA_ADDR_READ, addr);
1379 static void FASTCALL address_error_write( sh4addr_t addr, uint32_t val, void *exc )
1381 RAISE_MEM_ERROR(EXC_DATA_ADDR_WRITE, addr);
1385 static void FASTCALL tlb_miss_read( sh4addr_t addr, void *exc )
1387 RAISE_TLB_ERROR(EXC_TLB_MISS_READ, addr);
1391 static void FASTCALL tlb_miss_read_burst( unsigned char *dest, sh4addr_t addr, void *exc )
1393 RAISE_TLB_ERROR(EXC_TLB_MISS_READ, addr);
1397 static void FASTCALL tlb_miss_write( sh4addr_t addr, uint32_t val, void *exc )
1399 RAISE_TLB_ERROR(EXC_TLB_MISS_WRITE, addr);
1403 static int32_t FASTCALL tlb_protected_read( sh4addr_t addr, void *exc )
1405 RAISE_MEM_ERROR(EXC_TLB_PROT_READ, addr);
1409 static int32_t FASTCALL tlb_protected_read_burst( unsigned char *dest, sh4addr_t addr, void *exc )
1411 RAISE_MEM_ERROR(EXC_TLB_PROT_READ, addr);
1415 static void FASTCALL tlb_protected_write( sh4addr_t addr, uint32_t val, void *exc )
1417 RAISE_MEM_ERROR(EXC_TLB_PROT_WRITE, addr);
1421 static void FASTCALL tlb_initial_write( sh4addr_t addr, uint32_t val, void *exc )
1423 RAISE_MEM_ERROR(EXC_INIT_PAGE_WRITE, addr);
1427 static int32_t FASTCALL tlb_multi_hit_read( sh4addr_t addr, void *exc )
1429 sh4_raise_tlb_multihit(addr);
1433 static int32_t FASTCALL tlb_multi_hit_read_burst( unsigned char *dest, sh4addr_t addr, void *exc )
1435 sh4_raise_tlb_multihit(addr);
1438 static void FASTCALL tlb_multi_hit_write( sh4addr_t addr, uint32_t val, void *exc )
1440 sh4_raise_tlb_multihit(addr);
1445 * Note: Per sec 4.6.4 of the SH7750 manual, SQ
1447 struct mem_region_fn mem_region_address_error = {
1448 (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1449 (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1450 (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1451 (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write,
1452 unmapped_prefetch };
1454 struct mem_region_fn mem_region_tlb_miss = {
1455 (mem_read_fn_t)tlb_miss_read, (mem_write_fn_t)tlb_miss_write,
1456 (mem_read_fn_t)tlb_miss_read, (mem_write_fn_t)tlb_miss_write,
1457 (mem_read_fn_t)tlb_miss_read, (mem_write_fn_t)tlb_miss_write,
1458 (mem_read_burst_fn_t)tlb_miss_read_burst, (mem_write_burst_fn_t)tlb_miss_write,
1459 unmapped_prefetch };
1461 struct mem_region_fn mem_region_tlb_protected = {
1462 (mem_read_fn_t)tlb_protected_read, (mem_write_fn_t)tlb_protected_write,
1463 (mem_read_fn_t)tlb_protected_read, (mem_write_fn_t)tlb_protected_write,
1464 (mem_read_fn_t)tlb_protected_read, (mem_write_fn_t)tlb_protected_write,
1465 (mem_read_burst_fn_t)tlb_protected_read_burst, (mem_write_burst_fn_t)tlb_protected_write,
1466 unmapped_prefetch };
1468 struct mem_region_fn mem_region_tlb_multihit = {
1469 (mem_read_fn_t)tlb_multi_hit_read, (mem_write_fn_t)tlb_multi_hit_write,
1470 (mem_read_fn_t)tlb_multi_hit_read, (mem_write_fn_t)tlb_multi_hit_write,
1471 (mem_read_fn_t)tlb_multi_hit_read, (mem_write_fn_t)tlb_multi_hit_write,
1472 (mem_read_burst_fn_t)tlb_multi_hit_read_burst, (mem_write_burst_fn_t)tlb_multi_hit_write,
1473 (mem_prefetch_fn_t)tlb_multi_hit_read };
1476 /* Store-queue regions */
1477 /* These are a bit of a pain - the first 8 fields are controlled by SQMD, while
1478 * the final (prefetch) is controlled by the actual TLB settings (plus SQMD in
1479 * some cases), in contrast to the ordinary fields above.
1481 * There is probably a simpler way to do this.
1484 struct mem_region_fn p4_region_storequeue = {
1485 ccn_storequeue_read_long, ccn_storequeue_write_long,
1486 unmapped_read_long, unmapped_write_long, /* TESTME: Officially only long access is supported */
1487 unmapped_read_long, unmapped_write_long,
1488 unmapped_read_burst, unmapped_write_burst,
1489 ccn_storequeue_prefetch };
1491 struct mem_region_fn p4_region_storequeue_miss = {
1492 ccn_storequeue_read_long, ccn_storequeue_write_long,
1493 unmapped_read_long, unmapped_write_long, /* TESTME: Officially only long access is supported */
1494 unmapped_read_long, unmapped_write_long,
1495 unmapped_read_burst, unmapped_write_burst,
1496 (mem_prefetch_fn_t)tlb_miss_read };
1498 struct mem_region_fn p4_region_storequeue_multihit = {
1499 ccn_storequeue_read_long, ccn_storequeue_write_long,
1500 unmapped_read_long, unmapped_write_long, /* TESTME: Officially only long access is supported */
1501 unmapped_read_long, unmapped_write_long,
1502 unmapped_read_burst, unmapped_write_burst,
1503 (mem_prefetch_fn_t)tlb_multi_hit_read };
1505 struct mem_region_fn p4_region_storequeue_protected = {
1506 ccn_storequeue_read_long, ccn_storequeue_write_long,
1507 unmapped_read_long, unmapped_write_long,
1508 unmapped_read_long, unmapped_write_long,
1509 unmapped_read_burst, unmapped_write_burst,
1510 (mem_prefetch_fn_t)tlb_protected_read };
1512 struct mem_region_fn p4_region_storequeue_sqmd = {
1513 (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1514 (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1515 (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1516 (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write,
1517 (mem_prefetch_fn_t)address_error_read };
1519 struct mem_region_fn p4_region_storequeue_sqmd_miss = {
1520 (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1521 (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1522 (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1523 (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write,
1524 (mem_prefetch_fn_t)tlb_miss_read };
1526 struct mem_region_fn p4_region_storequeue_sqmd_multihit = {
1527 (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1528 (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1529 (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1530 (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write,
1531 (mem_prefetch_fn_t)tlb_multi_hit_read };
1533 struct mem_region_fn p4_region_storequeue_sqmd_protected = {
1534 (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1535 (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1536 (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1537 (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write,
1538 (mem_prefetch_fn_t)tlb_protected_read };
.