filename | src/sh4/mmu.c |
changeset | 1202:01ae5cbad4c8 |
prev | 1198:407659e01ef0 |
next | 1217:677b1d85f1b4 |
author | nkeynes |
date | Sun Feb 12 16:30:26 2012 +1000 (12 years ago) |
permissions | -rw-r--r-- |
last change | Add -Werror for mregparm check, so it actually fails if mregparm isn't accepted |
view | annotate | diff | log | raw |
1 /**
2 * $Id$
3 *
4 * SH4 MMU implementation based on address space page maps. This module
5 * is responsible for all address decoding functions.
6 *
7 * Copyright (c) 2005 Nathan Keynes.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 */
19 #define MODULE sh4_module
21 #include <stdio.h>
22 #include <assert.h>
23 #include "sh4/sh4mmio.h"
24 #include "sh4/sh4core.h"
25 #include "sh4/sh4trans.h"
26 #include "dreamcast.h"
27 #include "mem.h"
28 #include "mmu.h"
30 /* An entry is a 1K entry if it's one of the mmu_utlb_1k_pages entries */
31 #define IS_1K_PAGE_ENTRY(ent) ( ((uintptr_t)(((struct utlb_1k_entry *)ent) - &mmu_utlb_1k_pages[0])) < UTLB_ENTRY_COUNT )
33 /* Primary address space (used directly by SH4 cores) */
34 mem_region_fn_t *sh4_address_space;
35 mem_region_fn_t *sh4_user_address_space;
37 /* Accessed from the UTLB accessor methods */
38 uint32_t mmu_urc;
39 uint32_t mmu_urb;
40 static gboolean mmu_urc_overflow; /* If true, urc was set >= urb */
42 /* Module globals */
43 static struct itlb_entry mmu_itlb[ITLB_ENTRY_COUNT];
44 static struct utlb_entry mmu_utlb[UTLB_ENTRY_COUNT];
45 static struct utlb_page_entry mmu_utlb_pages[UTLB_ENTRY_COUNT];
46 static uint32_t mmu_lrui;
47 static uint32_t mmu_asid; // current asid
48 static struct utlb_default_regions *mmu_user_storequeue_regions;
50 /* Structures for 1K page handling */
51 static struct utlb_1k_entry mmu_utlb_1k_pages[UTLB_ENTRY_COUNT];
52 static int mmu_utlb_1k_free_list[UTLB_ENTRY_COUNT];
53 static int mmu_utlb_1k_free_index;
56 /* Function prototypes */
57 static void mmu_invalidate_tlb();
58 static void mmu_utlb_register_all();
59 static void mmu_utlb_remove_entry(int);
60 static void mmu_utlb_insert_entry(int);
61 static void mmu_register_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn );
62 static void mmu_register_user_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn );
63 static void mmu_set_tlb_enabled( int tlb_on );
64 static void mmu_set_tlb_asid( uint32_t asid );
65 static void mmu_set_storequeue_protected( int protected, int tlb_on );
66 static gboolean mmu_utlb_map_pages( mem_region_fn_t priv_page, mem_region_fn_t user_page, sh4addr_t start_addr, int npages );
67 static void mmu_utlb_remap_pages( gboolean remap_priv, gboolean remap_user, int entryNo );
68 static gboolean mmu_utlb_unmap_pages( gboolean unmap_priv, gboolean unmap_user, sh4addr_t start_addr, int npages );
69 static gboolean mmu_ext_page_remapped( sh4addr_t page, mem_region_fn_t fn, void *user_data );
70 static void mmu_utlb_1k_init();
71 static struct utlb_1k_entry *mmu_utlb_1k_alloc();
72 static void mmu_utlb_1k_free( struct utlb_1k_entry *entry );
73 static int mmu_read_urc();
75 static void FASTCALL tlb_miss_read( sh4addr_t addr, void *exc );
76 static int32_t FASTCALL tlb_protected_read( sh4addr_t addr, void *exc );
77 static void FASTCALL tlb_protected_write( sh4addr_t addr, uint32_t val, void *exc );
78 static int32_t FASTCALL tlb_protected_read_for_write( sh4addr_t addr, void *exc );
79 static void FASTCALL tlb_initial_write( sh4addr_t addr, uint32_t val, void *exc );
80 static int32_t FASTCALL tlb_initial_read_for_write( sh4addr_t addr, void *exc );
81 static uint32_t get_tlb_size_mask( uint32_t flags );
82 static uint32_t get_tlb_size_pages( uint32_t flags );
84 #define DEFAULT_REGIONS 0
85 #define DEFAULT_STOREQUEUE_REGIONS 1
86 #define DEFAULT_STOREQUEUE_SQMD_REGIONS 2
88 static struct utlb_default_regions mmu_default_regions[3] = {
89 { &mem_region_tlb_miss, &mem_region_tlb_protected, &mem_region_tlb_multihit },
90 { &p4_region_storequeue_miss, &p4_region_storequeue_protected, &p4_region_storequeue_multihit },
91 { &p4_region_storequeue_sqmd_miss, &p4_region_storequeue_sqmd_protected, &p4_region_storequeue_sqmd_multihit } };
93 #define IS_STOREQUEUE_PROTECTED() (mmu_user_storequeue_regions == &mmu_default_regions[DEFAULT_STOREQUEUE_SQMD_REGIONS])
95 /*********************** Module public functions ****************************/
97 /**
98 * Allocate memory for the address space maps, and initialize them according
99 * to the default (reset) values. (TLB is disabled by default)
100 */
102 void MMU_init()
103 {
104 sh4_address_space = mem_alloc_pages( sizeof(mem_region_fn_t) * 256 );
105 sh4_user_address_space = mem_alloc_pages( sizeof(mem_region_fn_t) * 256 );
106 mmu_user_storequeue_regions = &mmu_default_regions[DEFAULT_STOREQUEUE_REGIONS];
108 mmu_set_tlb_enabled(0);
109 mmu_register_user_mem_region( 0x80000000, 0x00000000, &mem_region_address_error );
110 mmu_register_user_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );
112 /* Setup P4 tlb/cache access regions */
113 mmu_register_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );
114 mmu_register_mem_region( 0xE4000000, 0xF0000000, &mem_region_unmapped );
115 mmu_register_mem_region( 0xF0000000, 0xF1000000, &p4_region_icache_addr );
116 mmu_register_mem_region( 0xF1000000, 0xF2000000, &p4_region_icache_data );
117 mmu_register_mem_region( 0xF2000000, 0xF3000000, &p4_region_itlb_addr );
118 mmu_register_mem_region( 0xF3000000, 0xF4000000, &p4_region_itlb_data );
119 mmu_register_mem_region( 0xF4000000, 0xF5000000, &p4_region_ocache_addr );
120 mmu_register_mem_region( 0xF5000000, 0xF6000000, &p4_region_ocache_data );
121 mmu_register_mem_region( 0xF6000000, 0xF7000000, &p4_region_utlb_addr );
122 mmu_register_mem_region( 0xF7000000, 0xF8000000, &p4_region_utlb_data );
123 mmu_register_mem_region( 0xF8000000, 0x00000000, &mem_region_unmapped );
125 /* Setup P4 control region */
126 mmu_register_mem_region( 0xFF000000, 0xFF001000, &mmio_region_MMU.fn );
127 mmu_register_mem_region( 0xFF100000, 0xFF101000, &mmio_region_PMM.fn );
128 mmu_register_mem_region( 0xFF200000, 0xFF201000, &mmio_region_UBC.fn );
129 mmu_register_mem_region( 0xFF800000, 0xFF801000, &mmio_region_BSC.fn );
130 mmu_register_mem_region( 0xFF900000, 0xFFA00000, &mem_region_unmapped ); // SDMR2 + SDMR3
131 mmu_register_mem_region( 0xFFA00000, 0xFFA01000, &mmio_region_DMAC.fn );
132 mmu_register_mem_region( 0xFFC00000, 0xFFC01000, &mmio_region_CPG.fn );
133 mmu_register_mem_region( 0xFFC80000, 0xFFC81000, &mmio_region_RTC.fn );
134 mmu_register_mem_region( 0xFFD00000, 0xFFD01000, &mmio_region_INTC.fn );
135 mmu_register_mem_region( 0xFFD80000, 0xFFD81000, &mmio_region_TMU.fn );
136 mmu_register_mem_region( 0xFFE00000, 0xFFE01000, &mmio_region_SCI.fn );
137 mmu_register_mem_region( 0xFFE80000, 0xFFE81000, &mmio_region_SCIF.fn );
138 mmu_register_mem_region( 0xFFF00000, 0xFFF01000, &mem_region_unmapped ); // H-UDI
140 register_mem_page_remapped_hook( mmu_ext_page_remapped, NULL );
141 mmu_utlb_1k_init();
143 /* Ensure the code regions are executable. Although it might
144 * be more portable to mmap these at runtime rather than using static decls
145 */
146 mem_unprotect( mmu_utlb_pages, sizeof(mmu_utlb_pages) );
147 mem_unprotect( mmu_utlb_1k_pages, sizeof(mmu_utlb_1k_pages) );
148 }
150 void MMU_reset()
151 {
152 mmio_region_MMU_write( CCR, 0 );
153 mmio_region_MMU_write( MMUCR, 0 );
154 }
156 void MMU_save_state( FILE *f )
157 {
158 mmu_read_urc();
159 fwrite( &mmu_itlb, sizeof(mmu_itlb), 1, f );
160 fwrite( &mmu_utlb, sizeof(mmu_utlb), 1, f );
161 fwrite( &mmu_urc, sizeof(mmu_urc), 1, f );
162 fwrite( &mmu_urb, sizeof(mmu_urb), 1, f );
163 fwrite( &mmu_lrui, sizeof(mmu_lrui), 1, f );
164 fwrite( &mmu_asid, sizeof(mmu_asid), 1, f );
165 }
167 int MMU_load_state( FILE *f )
168 {
169 if( fread( &mmu_itlb, sizeof(mmu_itlb), 1, f ) != 1 ) {
170 return 1;
171 }
172 if( fread( &mmu_utlb, sizeof(mmu_utlb), 1, f ) != 1 ) {
173 return 1;
174 }
175 if( fread( &mmu_urc, sizeof(mmu_urc), 1, f ) != 1 ) {
176 return 1;
177 }
178 if( fread( &mmu_urc, sizeof(mmu_urb), 1, f ) != 1 ) {
179 return 1;
180 }
181 if( fread( &mmu_lrui, sizeof(mmu_lrui), 1, f ) != 1 ) {
182 return 1;
183 }
184 if( fread( &mmu_asid, sizeof(mmu_asid), 1, f ) != 1 ) {
185 return 1;
186 }
188 uint32_t mmucr = MMIO_READ(MMU,MMUCR);
189 mmu_urc_overflow = mmu_urc >= mmu_urb;
190 mmu_set_tlb_enabled(mmucr&MMUCR_AT);
191 mmu_set_storequeue_protected(mmucr&MMUCR_SQMD, mmucr&MMUCR_AT);
192 return 0;
193 }
195 /**
196 * LDTLB instruction implementation. Copies PTEH, PTEL and PTEA into the UTLB
197 * entry identified by MMUCR.URC. Does not modify MMUCR or the ITLB.
198 */
199 void MMU_ldtlb()
200 {
201 int urc = mmu_read_urc();
202 if( IS_TLB_ENABLED() && mmu_utlb[urc].flags & TLB_VALID )
203 mmu_utlb_remove_entry( urc );
204 mmu_utlb[urc].vpn = MMIO_READ(MMU, PTEH) & 0xFFFFFC00;
205 mmu_utlb[urc].asid = MMIO_READ(MMU, PTEH) & 0x000000FF;
206 mmu_utlb[urc].ppn = MMIO_READ(MMU, PTEL) & 0x1FFFFC00;
207 mmu_utlb[urc].flags = MMIO_READ(MMU, PTEL) & 0x00001FF;
208 mmu_utlb[urc].pcmcia = MMIO_READ(MMU, PTEA);
209 mmu_utlb[urc].mask = get_tlb_size_mask(mmu_utlb[urc].flags);
210 if( IS_TLB_ENABLED() && mmu_utlb[urc].flags & TLB_VALID )
211 mmu_utlb_insert_entry( urc );
212 }
215 MMIO_REGION_READ_FN( MMU, reg )
216 {
217 reg &= 0xFFF;
218 switch( reg ) {
219 case MMUCR:
220 return MMIO_READ( MMU, MMUCR) | (mmu_read_urc()<<10) | ((mmu_urb&0x3F)<<18) | (mmu_lrui<<26);
221 default:
222 return MMIO_READ( MMU, reg );
223 }
224 }
226 MMIO_REGION_READ_DEFSUBFNS(MMU)
228 MMIO_REGION_WRITE_FN( MMU, reg, val )
229 {
230 uint32_t tmp;
231 reg &= 0xFFF;
232 switch(reg) {
233 case SH4VER:
234 return;
235 case PTEH:
236 val &= 0xFFFFFCFF;
237 if( (val & 0xFF) != mmu_asid ) {
238 mmu_set_tlb_asid( val&0xFF );
239 }
240 break;
241 case PTEL:
242 val &= 0x1FFFFDFF;
243 break;
244 case PTEA:
245 val &= 0x0000000F;
246 break;
247 case TRA:
248 val &= 0x000003FC;
249 break;
250 case EXPEVT:
251 case INTEVT:
252 val &= 0x00000FFF;
253 break;
254 case MMUCR:
255 if( val & MMUCR_TI ) {
256 mmu_invalidate_tlb();
257 }
258 mmu_urc = (val >> 10) & 0x3F;
259 mmu_urb = (val >> 18) & 0x3F;
260 if( mmu_urb == 0 ) {
261 mmu_urb = 0x40;
262 } else if( mmu_urc >= mmu_urb ) {
263 mmu_urc_overflow = TRUE;
264 }
265 mmu_lrui = (val >> 26) & 0x3F;
266 val &= 0x00000301;
267 tmp = MMIO_READ( MMU, MMUCR );
268 if( (val ^ tmp) & (MMUCR_SQMD) ) {
269 mmu_set_storequeue_protected( val & MMUCR_SQMD, val&MMUCR_AT );
270 }
271 if( (val ^ tmp) & (MMUCR_AT) ) {
272 // AT flag has changed state - flush the xlt cache as all bets
273 // are off now. We also need to force an immediate exit from the
274 // current block
275 mmu_set_tlb_enabled( val & MMUCR_AT );
276 MMIO_WRITE( MMU, MMUCR, val );
277 sh4_core_exit( CORE_EXIT_FLUSH_ICACHE );
278 xlat_flush_cache(); // If we're not running, flush the cache anyway
279 }
280 break;
281 case CCR:
282 CCN_set_cache_control( val );
283 val &= 0x81A7;
284 break;
285 case MMUUNK1:
286 /* Note that if the high bit is set, this appears to reset the machine.
287 * Not emulating this behaviour yet until we know why...
288 */
289 val &= 0x00010007;
290 break;
291 case QACR0:
292 case QACR1:
293 val &= 0x0000001C;
294 break;
295 case PMCR1:
296 PMM_write_control(0, val);
297 val &= 0x0000C13F;
298 break;
299 case PMCR2:
300 PMM_write_control(1, val);
301 val &= 0x0000C13F;
302 break;
303 default:
304 break;
305 }
306 MMIO_WRITE( MMU, reg, val );
307 }
309 /********************** 1K Page handling ***********************/
310 /* Since we use 4K pages as our native page size, 1K pages need a bit of extra
311 * effort to manage - we justify this on the basis that most programs won't
312 * actually use 1K pages, so we may as well optimize for the common case.
313 *
314 * Implementation uses an intermediate page entry (the utlb_1k_entry) that
315 * redirects requests to the 'real' page entry. These are allocated on an
316 * as-needed basis, and returned to the pool when all subpages are empty.
317 */
318 static void mmu_utlb_1k_init()
319 {
320 int i;
321 for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
322 mmu_utlb_1k_free_list[i] = i;
323 mmu_utlb_1k_init_vtable( &mmu_utlb_1k_pages[i] );
324 }
325 mmu_utlb_1k_free_index = 0;
326 }
328 static struct utlb_1k_entry *mmu_utlb_1k_alloc()
329 {
330 assert( mmu_utlb_1k_free_index < UTLB_ENTRY_COUNT );
331 struct utlb_1k_entry *entry = &mmu_utlb_1k_pages[mmu_utlb_1k_free_list[mmu_utlb_1k_free_index++]];
332 return entry;
333 }
335 static void mmu_utlb_1k_free( struct utlb_1k_entry *ent )
336 {
337 unsigned int entryNo = ent - &mmu_utlb_1k_pages[0];
338 assert( entryNo < UTLB_ENTRY_COUNT );
339 assert( mmu_utlb_1k_free_index > 0 );
340 mmu_utlb_1k_free_list[--mmu_utlb_1k_free_index] = entryNo;
341 }
344 /********************** Address space maintenance *************************/
346 /**
347 * MMU accessor functions just increment URC - fixup here if necessary
348 */
349 static int mmu_read_urc()
350 {
351 if( mmu_urc_overflow ) {
352 if( mmu_urc >= 0x40 ) {
353 mmu_urc_overflow = FALSE;
354 mmu_urc -= 0x40;
355 mmu_urc %= mmu_urb;
356 }
357 } else {
358 mmu_urc %= mmu_urb;
359 }
360 return mmu_urc;
361 }
363 static void mmu_register_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn )
364 {
365 int count = (end - start) >> 12;
366 mem_region_fn_t *ptr = &sh4_address_space[start>>12];
367 while( count-- > 0 ) {
368 *ptr++ = fn;
369 }
370 }
371 static void mmu_register_user_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn )
372 {
373 int count = (end - start) >> 12;
374 mem_region_fn_t *ptr = &sh4_user_address_space[start>>12];
375 while( count-- > 0 ) {
376 *ptr++ = fn;
377 }
378 }
380 static gboolean mmu_ext_page_remapped( sh4addr_t page, mem_region_fn_t fn, void *user_data )
381 {
382 unsigned int i;
383 if( (MMIO_READ(MMU,MMUCR)) & MMUCR_AT ) {
384 /* TLB on */
385 sh4_address_space[(page|0x80000000)>>12] = fn; /* Direct map to P1 and P2 */
386 sh4_address_space[(page|0xA0000000)>>12] = fn;
387 /* Scan UTLB and update any direct-referencing entries */
388 } else {
389 /* Direct map to U0, P0, P1, P2, P3 */
390 for( i=0; i<= 0xC0000000; i+= 0x20000000 ) {
391 sh4_address_space[(page|i)>>12] = fn;
392 }
393 for( i=0; i < 0x80000000; i+= 0x20000000 ) {
394 sh4_user_address_space[(page|i)>>12] = fn;
395 }
396 }
397 return TRUE;
398 }
400 static void mmu_set_tlb_enabled( int tlb_on )
401 {
402 mem_region_fn_t *ptr, *uptr;
403 int i;
405 /* Reset the storequeue area */
407 if( tlb_on ) {
408 mmu_register_mem_region(0x00000000, 0x80000000, &mem_region_tlb_miss );
409 mmu_register_mem_region(0xC0000000, 0xE0000000, &mem_region_tlb_miss );
410 mmu_register_user_mem_region(0x00000000, 0x80000000, &mem_region_tlb_miss );
412 /* Default SQ prefetch goes to TLB miss (?) */
413 mmu_register_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue_miss );
414 mmu_register_user_mem_region( 0xE0000000, 0xE4000000, mmu_user_storequeue_regions->tlb_miss );
415 mmu_utlb_register_all();
416 } else {
417 for( i=0, ptr = sh4_address_space; i<7; i++, ptr += LXDREAM_PAGE_TABLE_ENTRIES ) {
418 memcpy( ptr, ext_address_space, sizeof(mem_region_fn_t) * LXDREAM_PAGE_TABLE_ENTRIES );
419 }
420 for( i=0, ptr = sh4_user_address_space; i<4; i++, ptr += LXDREAM_PAGE_TABLE_ENTRIES ) {
421 memcpy( ptr, ext_address_space, sizeof(mem_region_fn_t) * LXDREAM_PAGE_TABLE_ENTRIES );
422 }
424 mmu_register_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );
425 if( IS_STOREQUEUE_PROTECTED() ) {
426 mmu_register_user_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue_sqmd );
427 } else {
428 mmu_register_user_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );
429 }
430 }
432 }
434 /**
435 * Flip the SQMD switch - this is rather expensive, so will need to be changed if
436 * anything expects to do this frequently.
437 */
438 static void mmu_set_storequeue_protected( int protected, int tlb_on )
439 {
440 mem_region_fn_t nontlb_region;
441 int i;
443 if( protected ) {
444 mmu_user_storequeue_regions = &mmu_default_regions[DEFAULT_STOREQUEUE_SQMD_REGIONS];
445 nontlb_region = &p4_region_storequeue_sqmd;
446 } else {
447 mmu_user_storequeue_regions = &mmu_default_regions[DEFAULT_STOREQUEUE_REGIONS];
448 nontlb_region = &p4_region_storequeue;
449 }
451 if( tlb_on ) {
452 mmu_register_user_mem_region( 0xE0000000, 0xE4000000, mmu_user_storequeue_regions->tlb_miss );
453 for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
454 if( (mmu_utlb[i].vpn & 0xFC000000) == 0xE0000000 ) {
455 mmu_utlb_insert_entry(i);
456 }
457 }
458 } else {
459 mmu_register_user_mem_region( 0xE0000000, 0xE4000000, nontlb_region );
460 }
462 }
464 static void mmu_set_tlb_asid( uint32_t asid )
465 {
466 if( IS_TLB_ENABLED() ) {
467 /* Scan for pages that need to be remapped */
468 int i;
469 if( IS_SV_ENABLED() ) {
470 for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
471 if( mmu_utlb[i].asid == mmu_asid &&
472 (mmu_utlb[i].flags & (TLB_VALID|TLB_SHARE)) == (TLB_VALID) ) {
473 // Matches old ASID - unmap out
474 if( !mmu_utlb_unmap_pages( FALSE, TRUE, mmu_utlb[i].vpn&mmu_utlb[i].mask,
475 get_tlb_size_pages(mmu_utlb[i].flags) ) )
476 mmu_utlb_remap_pages( FALSE, TRUE, i );
477 }
478 }
479 for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
480 if( mmu_utlb[i].asid == asid &&
481 (mmu_utlb[i].flags & (TLB_VALID|TLB_SHARE)) == (TLB_VALID) ) {
482 // Matches new ASID - map in
483 mmu_utlb_map_pages( NULL, mmu_utlb_pages[i].user_fn,
484 mmu_utlb[i].vpn&mmu_utlb[i].mask,
485 get_tlb_size_pages(mmu_utlb[i].flags) );
486 }
487 }
488 } else {
489 // Remap both Priv+user pages
490 for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
491 if( mmu_utlb[i].asid == mmu_asid &&
492 (mmu_utlb[i].flags & (TLB_VALID|TLB_SHARE)) == (TLB_VALID) ) {
493 if( !mmu_utlb_unmap_pages( TRUE, TRUE, mmu_utlb[i].vpn&mmu_utlb[i].mask,
494 get_tlb_size_pages(mmu_utlb[i].flags) ) )
495 mmu_utlb_remap_pages( TRUE, TRUE, i );
496 }
497 }
498 for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
499 if( mmu_utlb[i].asid == asid &&
500 (mmu_utlb[i].flags & (TLB_VALID|TLB_SHARE)) == (TLB_VALID) ) {
501 mmu_utlb_map_pages( &mmu_utlb_pages[i].fn, mmu_utlb_pages[i].user_fn,
502 mmu_utlb[i].vpn&mmu_utlb[i].mask,
503 get_tlb_size_pages(mmu_utlb[i].flags) );
504 }
505 }
506 }
507 sh4_icache.page_vma = -1; // invalidate icache as asid has changed
508 }
509 mmu_asid = asid;
510 }
512 static uint32_t get_tlb_size_mask( uint32_t flags )
513 {
514 switch( flags & TLB_SIZE_MASK ) {
515 case TLB_SIZE_1K: return MASK_1K;
516 case TLB_SIZE_4K: return MASK_4K;
517 case TLB_SIZE_64K: return MASK_64K;
518 case TLB_SIZE_1M: return MASK_1M;
519 default: return 0; /* Unreachable */
520 }
521 }
522 static uint32_t get_tlb_size_pages( uint32_t flags )
523 {
524 switch( flags & TLB_SIZE_MASK ) {
525 case TLB_SIZE_1K: return 0;
526 case TLB_SIZE_4K: return 1;
527 case TLB_SIZE_64K: return 16;
528 case TLB_SIZE_1M: return 256;
529 default: return 0; /* Unreachable */
530 }
531 }
533 /**
534 * Add a new TLB entry mapping to the address space table. If any of the pages
535 * are already mapped, they are mapped to the TLB multi-hit page instead.
536 * @return FALSE if a TLB multihit situation was detected, otherwise TRUE.
537 */
538 static gboolean mmu_utlb_map_pages( mem_region_fn_t priv_page, mem_region_fn_t user_page, sh4addr_t start_addr, int npages )
539 {
540 mem_region_fn_t *ptr = &sh4_address_space[start_addr >> 12];
541 mem_region_fn_t *uptr = &sh4_user_address_space[start_addr >> 12];
542 struct utlb_default_regions *privdefs = &mmu_default_regions[DEFAULT_REGIONS];
543 struct utlb_default_regions *userdefs = privdefs;
545 gboolean mapping_ok = TRUE;
546 int i;
548 if( (start_addr & 0xFC000000) == 0xE0000000 ) {
549 /* Storequeue mapping */
550 privdefs = &mmu_default_regions[DEFAULT_STOREQUEUE_REGIONS];
551 userdefs = mmu_user_storequeue_regions;
552 } else if( (start_addr & 0xE0000000) == 0xC0000000 ) {
553 user_page = NULL; /* No user access to P3 region */
554 } else if( start_addr >= 0x80000000 ) {
555 return TRUE; // No mapping - legal but meaningless
556 }
558 if( npages == 0 ) {
559 struct utlb_1k_entry *ent;
560 int i, idx = (start_addr >> 10) & 0x03;
561 if( IS_1K_PAGE_ENTRY(*ptr) ) {
562 ent = (struct utlb_1k_entry *)*ptr;
563 } else {
564 ent = mmu_utlb_1k_alloc();
565 /* New 1K struct - init to previous contents of region */
566 for( i=0; i<4; i++ ) {
567 ent->subpages[i] = *ptr;
568 ent->user_subpages[i] = *uptr;
569 }
570 *ptr = &ent->fn;
571 *uptr = &ent->user_fn;
572 }
574 if( priv_page != NULL ) {
575 if( ent->subpages[idx] == privdefs->tlb_miss ) {
576 ent->subpages[idx] = priv_page;
577 } else {
578 mapping_ok = FALSE;
579 ent->subpages[idx] = privdefs->tlb_multihit;
580 }
581 }
582 if( user_page != NULL ) {
583 if( ent->user_subpages[idx] == userdefs->tlb_miss ) {
584 ent->user_subpages[idx] = user_page;
585 } else {
586 mapping_ok = FALSE;
587 ent->user_subpages[idx] = userdefs->tlb_multihit;
588 }
589 }
591 } else {
592 if( priv_page != NULL ) {
593 /* Privileged mapping only */
594 for( i=0; i<npages; i++ ) {
595 if( *ptr == privdefs->tlb_miss ) {
596 *ptr++ = priv_page;
597 } else {
598 mapping_ok = FALSE;
599 *ptr++ = privdefs->tlb_multihit;
600 }
601 }
602 }
603 if( user_page != NULL ) {
604 /* User mapping only (eg ASID change remap w/ SV=1) */
605 for( i=0; i<npages; i++ ) {
606 if( *uptr == userdefs->tlb_miss ) {
607 *uptr++ = user_page;
608 } else {
609 mapping_ok = FALSE;
610 *uptr++ = userdefs->tlb_multihit;
611 }
612 }
613 }
614 }
616 return mapping_ok;
617 }
619 /**
620 * Remap any pages within the region covered by entryNo, but not including
621 * entryNo itself. This is used to reestablish pages that were previously
622 * covered by a multi-hit exception region when one of the pages is removed.
623 */
624 static void mmu_utlb_remap_pages( gboolean remap_priv, gboolean remap_user, int entryNo )
625 {
626 int mask = mmu_utlb[entryNo].mask;
627 uint32_t remap_addr = mmu_utlb[entryNo].vpn & mask;
628 int i;
630 for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
631 if( i != entryNo && (mmu_utlb[i].vpn & mask) == remap_addr && (mmu_utlb[i].flags & TLB_VALID) ) {
632 /* Overlapping region */
633 mem_region_fn_t priv_page = (remap_priv ? &mmu_utlb_pages[i].fn : NULL);
634 mem_region_fn_t user_page = (remap_priv ? mmu_utlb_pages[i].user_fn : NULL);
635 uint32_t start_addr;
636 int npages;
638 if( mmu_utlb[i].mask >= mask ) {
639 /* entry is no larger than the area we're replacing - map completely */
640 start_addr = mmu_utlb[i].vpn & mmu_utlb[i].mask;
641 npages = get_tlb_size_pages( mmu_utlb[i].flags );
642 } else {
643 /* Otherwise map subset - region covered by removed page */
644 start_addr = remap_addr;
645 npages = get_tlb_size_pages( mmu_utlb[entryNo].flags );
646 }
648 if( (mmu_utlb[i].flags & TLB_SHARE) || mmu_utlb[i].asid == mmu_asid ) {
649 mmu_utlb_map_pages( priv_page, user_page, start_addr, npages );
650 } else if( IS_SV_ENABLED() ) {
651 mmu_utlb_map_pages( priv_page, NULL, start_addr, npages );
652 }
654 }
655 }
656 }
658 /**
659 * Remove a previous TLB mapping (replacing them with the TLB miss region).
660 * @return FALSE if any pages were previously mapped to the TLB multihit page,
661 * otherwise TRUE. In either case, all pages in the region are cleared to TLB miss.
662 */
663 static gboolean mmu_utlb_unmap_pages( gboolean unmap_priv, gboolean unmap_user, sh4addr_t start_addr, int npages )
664 {
665 mem_region_fn_t *ptr = &sh4_address_space[start_addr >> 12];
666 mem_region_fn_t *uptr = &sh4_user_address_space[start_addr >> 12];
667 struct utlb_default_regions *privdefs = &mmu_default_regions[DEFAULT_REGIONS];
668 struct utlb_default_regions *userdefs = privdefs;
670 gboolean unmapping_ok = TRUE;
671 int i;
673 if( (start_addr & 0xFC000000) == 0xE0000000 ) {
674 /* Storequeue mapping */
675 privdefs = &mmu_default_regions[DEFAULT_STOREQUEUE_REGIONS];
676 userdefs = mmu_user_storequeue_regions;
677 } else if( (start_addr & 0xE0000000) == 0xC0000000 ) {
678 unmap_user = FALSE;
679 } else if( start_addr >= 0x80000000 ) {
680 return TRUE; // No mapping - legal but meaningless
681 }
683 if( npages == 0 ) { // 1K page
684 assert( IS_1K_PAGE_ENTRY( *ptr ) );
685 struct utlb_1k_entry *ent = (struct utlb_1k_entry *)*ptr;
686 int i, idx = (start_addr >> 10) & 0x03, mergeable=1;
687 if( ent->subpages[idx] == privdefs->tlb_multihit ) {
688 unmapping_ok = FALSE;
689 }
690 if( unmap_priv )
691 ent->subpages[idx] = privdefs->tlb_miss;
692 if( unmap_user )
693 ent->user_subpages[idx] = userdefs->tlb_miss;
695 /* If all 4 subpages have the same content, merge them together and
696 * release the 1K entry
697 */
698 mem_region_fn_t priv_page = ent->subpages[0];
699 mem_region_fn_t user_page = ent->user_subpages[0];
700 for( i=1; i<4; i++ ) {
701 if( priv_page != ent->subpages[i] || user_page != ent->user_subpages[i] ) {
702 mergeable = 0;
703 break;
704 }
705 }
706 if( mergeable ) {
707 mmu_utlb_1k_free(ent);
708 *ptr = priv_page;
709 *uptr = user_page;
710 }
711 } else {
712 if( unmap_priv ) {
713 /* Privileged (un)mapping */
714 for( i=0; i<npages; i++ ) {
715 if( *ptr == privdefs->tlb_multihit ) {
716 unmapping_ok = FALSE;
717 }
718 *ptr++ = privdefs->tlb_miss;
719 }
720 }
721 if( unmap_user ) {
722 /* User (un)mapping */
723 for( i=0; i<npages; i++ ) {
724 if( *uptr == userdefs->tlb_multihit ) {
725 unmapping_ok = FALSE;
726 }
727 *uptr++ = userdefs->tlb_miss;
728 }
729 }
730 }
732 return unmapping_ok;
733 }
735 static void mmu_utlb_insert_entry( int entry )
736 {
737 struct utlb_entry *ent = &mmu_utlb[entry];
738 mem_region_fn_t page = &mmu_utlb_pages[entry].fn;
739 mem_region_fn_t upage;
740 sh4addr_t start_addr = ent->vpn & ent->mask;
741 int npages = get_tlb_size_pages(ent->flags);
743 if( (start_addr & 0xFC000000) == 0xE0000000 ) {
744 /* Store queue mappings are a bit different - normal access is fixed to
745 * the store queue register block, and we only map prefetches through
746 * the TLB
747 */
748 mmu_utlb_init_storequeue_vtable( ent, &mmu_utlb_pages[entry] );
750 if( (ent->flags & TLB_USERMODE) == 0 ) {
751 upage = mmu_user_storequeue_regions->tlb_prot;
752 } else if( IS_STOREQUEUE_PROTECTED() ) {
753 upage = &p4_region_storequeue_sqmd;
754 } else {
755 upage = page;
756 }
758 } else {
760 if( (ent->flags & TLB_USERMODE) == 0 ) {
761 upage = &mem_region_tlb_protected;
762 } else {
763 upage = page;
764 }
766 if( (ent->flags & TLB_WRITABLE) == 0 ) {
767 page->write_long = (mem_write_fn_t)tlb_protected_write;
768 page->write_word = (mem_write_fn_t)tlb_protected_write;
769 page->write_byte = (mem_write_fn_t)tlb_protected_write;
770 page->write_burst = (mem_write_burst_fn_t)tlb_protected_write;
771 page->read_byte_for_write = (mem_read_fn_t)tlb_protected_read_for_write;
772 mmu_utlb_init_vtable( ent, &mmu_utlb_pages[entry], FALSE );
773 } else if( (ent->flags & TLB_DIRTY) == 0 ) {
774 page->write_long = (mem_write_fn_t)tlb_initial_write;
775 page->write_word = (mem_write_fn_t)tlb_initial_write;
776 page->write_byte = (mem_write_fn_t)tlb_initial_write;
777 page->write_burst = (mem_write_burst_fn_t)tlb_initial_write;
778 page->read_byte_for_write = (mem_read_fn_t)tlb_initial_read_for_write;
779 mmu_utlb_init_vtable( ent, &mmu_utlb_pages[entry], FALSE );
780 } else {
781 mmu_utlb_init_vtable( ent, &mmu_utlb_pages[entry], TRUE );
782 }
783 }
785 mmu_utlb_pages[entry].user_fn = upage;
787 /* Is page visible? */
788 if( (ent->flags & TLB_SHARE) || ent->asid == mmu_asid ) {
789 mmu_utlb_map_pages( page, upage, start_addr, npages );
790 } else if( IS_SV_ENABLED() ) {
791 mmu_utlb_map_pages( page, NULL, start_addr, npages );
792 }
793 }
795 static void mmu_utlb_remove_entry( int entry )
796 {
797 int i, j;
798 struct utlb_entry *ent = &mmu_utlb[entry];
799 sh4addr_t start_addr = ent->vpn&ent->mask;
800 mem_region_fn_t *ptr = &sh4_address_space[start_addr >> 12];
801 mem_region_fn_t *uptr = &sh4_user_address_space[start_addr >> 12];
802 gboolean unmap_user;
803 int npages = get_tlb_size_pages(ent->flags);
805 if( (ent->flags & TLB_SHARE) || ent->asid == mmu_asid ) {
806 unmap_user = TRUE;
807 } else if( IS_SV_ENABLED() ) {
808 unmap_user = FALSE;
809 } else {
810 return; // Not mapped
811 }
813 gboolean clean_unmap = mmu_utlb_unmap_pages( TRUE, unmap_user, start_addr, npages );
815 if( !clean_unmap ) {
816 mmu_utlb_remap_pages( TRUE, unmap_user, entry );
817 }
818 }
820 static void mmu_utlb_register_all()
821 {
822 int i;
823 for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
824 if( mmu_utlb[i].flags & TLB_VALID )
825 mmu_utlb_insert_entry( i );
826 }
827 }
829 static void mmu_invalidate_tlb()
830 {
831 int i;
832 for( i=0; i<ITLB_ENTRY_COUNT; i++ ) {
833 mmu_itlb[i].flags &= (~TLB_VALID);
834 }
835 if( IS_TLB_ENABLED() ) {
836 for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
837 if( mmu_utlb[i].flags & TLB_VALID ) {
838 mmu_utlb_remove_entry( i );
839 }
840 }
841 }
842 for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
843 mmu_utlb[i].flags &= (~TLB_VALID);
844 }
845 }
847 /******************************************************************************/
848 /* MMU TLB address translation */
849 /******************************************************************************/
851 /**
852 * Translate a 32-bit address into a UTLB entry number. Does not check for
853 * page protection etc.
854 * @return the entryNo if found, -1 if not found, and -2 for a multi-hit.
855 */
856 int mmu_utlb_entry_for_vpn( uint32_t vpn )
857 {
858 mmu_urc++;
859 mem_region_fn_t fn = sh4_address_space[vpn>>12];
860 if( fn >= &mmu_utlb_pages[0].fn && fn < &mmu_utlb_pages[UTLB_ENTRY_COUNT].fn ) {
861 return ((struct utlb_page_entry *)fn) - &mmu_utlb_pages[0];
862 } else if( fn >= &mmu_utlb_1k_pages[0].fn && fn < &mmu_utlb_1k_pages[UTLB_ENTRY_COUNT].fn ) {
863 struct utlb_1k_entry *ent = (struct utlb_1k_entry *)fn;
864 fn = ent->subpages[(vpn>>10)&0x03];
865 if( fn >= &mmu_utlb_pages[0].fn && fn < &mmu_utlb_pages[UTLB_ENTRY_COUNT].fn ) {
866 return ((struct utlb_page_entry *)fn) - &mmu_utlb_pages[0];
867 }
868 }
869 if( fn == &mem_region_tlb_multihit ) {
870 return -2;
871 } else {
872 return -1;
873 }
874 }
877 /**
878 * Perform the actual utlb lookup w/ asid matching.
879 * Possible utcomes are:
880 * 0..63 Single match - good, return entry found
881 * -1 No match - raise a tlb data miss exception
882 * -2 Multiple matches - raise a multi-hit exception (reset)
883 * @param vpn virtual address to resolve
884 * @return the resultant UTLB entry, or an error.
885 */
886 static inline int mmu_utlb_lookup_vpn_asid( uint32_t vpn )
887 {
888 int result = -1;
889 unsigned int i;
891 mmu_urc++;
892 if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
893 mmu_urc = 0;
894 }
896 for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
897 if( (mmu_utlb[i].flags & TLB_VALID) &&
898 ((mmu_utlb[i].flags & TLB_SHARE) || mmu_asid == mmu_utlb[i].asid) &&
899 ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
900 if( result != -1 ) {
901 return -2;
902 }
903 result = i;
904 }
905 }
906 return result;
907 }
909 /**
910 * Perform the actual utlb lookup matching on vpn only
911 * Possible utcomes are:
912 * 0..63 Single match - good, return entry found
913 * -1 No match - raise a tlb data miss exception
914 * -2 Multiple matches - raise a multi-hit exception (reset)
915 * @param vpn virtual address to resolve
916 * @return the resultant UTLB entry, or an error.
917 */
918 static inline int mmu_utlb_lookup_vpn( uint32_t vpn )
919 {
920 int result = -1;
921 unsigned int i;
923 mmu_urc++;
924 if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
925 mmu_urc = 0;
926 }
928 for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
929 if( (mmu_utlb[i].flags & TLB_VALID) &&
930 ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
931 if( result != -1 ) {
932 return -2;
933 }
934 result = i;
935 }
936 }
938 return result;
939 }
941 /**
942 * Update the ITLB by replacing the LRU entry with the specified UTLB entry.
943 * @return the number (0-3) of the replaced entry.
944 */
945 static int inline mmu_itlb_update_from_utlb( int entryNo )
946 {
947 int replace;
948 /* Determine entry to replace based on lrui */
949 if( (mmu_lrui & 0x38) == 0x38 ) {
950 replace = 0;
951 mmu_lrui = mmu_lrui & 0x07;
952 } else if( (mmu_lrui & 0x26) == 0x06 ) {
953 replace = 1;
954 mmu_lrui = (mmu_lrui & 0x19) | 0x20;
955 } else if( (mmu_lrui & 0x15) == 0x01 ) {
956 replace = 2;
957 mmu_lrui = (mmu_lrui & 0x3E) | 0x14;
958 } else { // Note - gets invalid entries too
959 replace = 3;
960 mmu_lrui = (mmu_lrui | 0x0B);
961 }
963 mmu_itlb[replace].vpn = mmu_utlb[entryNo].vpn;
964 mmu_itlb[replace].mask = mmu_utlb[entryNo].mask;
965 mmu_itlb[replace].ppn = mmu_utlb[entryNo].ppn;
966 mmu_itlb[replace].asid = mmu_utlb[entryNo].asid;
967 mmu_itlb[replace].flags = mmu_utlb[entryNo].flags & 0x01DA;
968 return replace;
969 }
971 /**
972 * Perform the actual itlb lookup w/ asid protection
973 * Possible utcomes are:
974 * 0..63 Single match - good, return entry found
975 * -1 No match - raise a tlb data miss exception
976 * -2 Multiple matches - raise a multi-hit exception (reset)
977 * @param vpn virtual address to resolve
978 * @return the resultant ITLB entry, or an error.
979 */
980 static inline int mmu_itlb_lookup_vpn_asid( uint32_t vpn )
981 {
982 int result = -1;
983 unsigned int i;
985 for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
986 if( (mmu_itlb[i].flags & TLB_VALID) &&
987 ((mmu_itlb[i].flags & TLB_SHARE) || mmu_asid == mmu_itlb[i].asid) &&
988 ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
989 if( result != -1 ) {
990 return -2;
991 }
992 result = i;
993 }
994 }
996 if( result == -1 ) {
997 int utlbEntry = mmu_utlb_entry_for_vpn( vpn );
998 if( utlbEntry < 0 ) {
999 return utlbEntry;
1000 } else {
1001 return mmu_itlb_update_from_utlb( utlbEntry );
1002 }
1003 }
1005 switch( result ) {
1006 case 0: mmu_lrui = (mmu_lrui & 0x07); break;
1007 case 1: mmu_lrui = (mmu_lrui & 0x19) | 0x20; break;
1008 case 2: mmu_lrui = (mmu_lrui & 0x3E) | 0x14; break;
1009 case 3: mmu_lrui = (mmu_lrui | 0x0B); break;
1010 }
1012 return result;
1013 }
1015 /**
1016 * Perform the actual itlb lookup on vpn only
1017 * Possible utcomes are:
1018 * 0..63 Single match - good, return entry found
1019 * -1 No match - raise a tlb data miss exception
1020 * -2 Multiple matches - raise a multi-hit exception (reset)
1021 * @param vpn virtual address to resolve
1022 * @return the resultant ITLB entry, or an error.
1023 */
1024 static inline int mmu_itlb_lookup_vpn( uint32_t vpn )
1025 {
1026 int result = -1;
1027 unsigned int i;
1029 for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
1030 if( (mmu_itlb[i].flags & TLB_VALID) &&
1031 ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
1032 if( result != -1 ) {
1033 return -2;
1034 }
1035 result = i;
1036 }
1037 }
1039 if( result == -1 ) {
1040 int utlbEntry = mmu_utlb_lookup_vpn( vpn );
1041 if( utlbEntry < 0 ) {
1042 return utlbEntry;
1043 } else {
1044 return mmu_itlb_update_from_utlb( utlbEntry );
1045 }
1046 }
1048 switch( result ) {
1049 case 0: mmu_lrui = (mmu_lrui & 0x07); break;
1050 case 1: mmu_lrui = (mmu_lrui & 0x19) | 0x20; break;
1051 case 2: mmu_lrui = (mmu_lrui & 0x3E) | 0x14; break;
1052 case 3: mmu_lrui = (mmu_lrui | 0x0B); break;
1053 }
1055 return result;
1056 }
1058 /**
1059 * Update the icache for an untranslated address
1060 */
1061 static inline void mmu_update_icache_phys( sh4addr_t addr )
1062 {
1063 if( (addr & 0x1C000000) == 0x0C000000 ) {
1064 /* Main ram */
1065 sh4_icache.page_vma = addr & 0xFF000000;
1066 sh4_icache.page_ppa = 0x0C000000;
1067 sh4_icache.mask = 0xFF000000;
1068 sh4_icache.page = dc_main_ram;
1069 } else if( (addr & 0x1FE00000) == 0 ) {
1070 /* BIOS ROM */
1071 sh4_icache.page_vma = addr & 0xFFE00000;
1072 sh4_icache.page_ppa = 0;
1073 sh4_icache.mask = 0xFFE00000;
1074 sh4_icache.page = dc_boot_rom;
1075 } else {
1076 /* not supported */
1077 sh4_icache.page_vma = -1;
1078 }
1079 }
1081 /**
1082 * Update the sh4_icache structure to describe the page(s) containing the
1083 * given vma. If the address does not reference a RAM/ROM region, the icache
1084 * will be invalidated instead.
1085 * If AT is on, this method will raise TLB exceptions normally
1086 * (hence this method should only be used immediately prior to execution of
1087 * code), and otherwise will set the icache according to the matching TLB entry.
1088 * If AT is off, this method will set the entire referenced RAM/ROM region in
1089 * the icache.
1090 * @return TRUE if the update completed (successfully or otherwise), FALSE
1091 * if an exception was raised.
1092 */
1093 gboolean FASTCALL mmu_update_icache( sh4vma_t addr )
1094 {
1095 int entryNo;
1096 if( IS_SH4_PRIVMODE() ) {
1097 if( addr & 0x80000000 ) {
1098 if( addr < 0xC0000000 ) {
1099 /* P1, P2 and P4 regions are pass-through (no translation) */
1100 mmu_update_icache_phys(addr);
1101 return TRUE;
1102 } else if( addr >= 0xE0000000 && addr < 0xFFFFFF00 ) {
1103 RAISE_MEM_ERROR(EXC_DATA_ADDR_READ, addr);
1104 return FALSE;
1105 }
1106 }
1108 uint32_t mmucr = MMIO_READ(MMU,MMUCR);
1109 if( (mmucr & MMUCR_AT) == 0 ) {
1110 mmu_update_icache_phys(addr);
1111 return TRUE;
1112 }
1114 if( (mmucr & MMUCR_SV) == 0 )
1115 entryNo = mmu_itlb_lookup_vpn_asid( addr );
1116 else
1117 entryNo = mmu_itlb_lookup_vpn( addr );
1118 } else {
1119 if( addr & 0x80000000 ) {
1120 RAISE_MEM_ERROR(EXC_DATA_ADDR_READ, addr);
1121 return FALSE;
1122 }
1124 uint32_t mmucr = MMIO_READ(MMU,MMUCR);
1125 if( (mmucr & MMUCR_AT) == 0 ) {
1126 mmu_update_icache_phys(addr);
1127 return TRUE;
1128 }
1130 entryNo = mmu_itlb_lookup_vpn_asid( addr );
1132 if( entryNo != -1 && (mmu_itlb[entryNo].flags & TLB_USERMODE) == 0 ) {
1133 RAISE_MEM_ERROR(EXC_TLB_PROT_READ, addr);
1134 return FALSE;
1135 }
1136 }
1138 switch(entryNo) {
1139 case -1:
1140 RAISE_TLB_ERROR(EXC_TLB_MISS_READ, addr);
1141 return FALSE;
1142 case -2:
1143 RAISE_TLB_MULTIHIT_ERROR(addr);
1144 return FALSE;
1145 default:
1146 sh4_icache.page_ppa = mmu_itlb[entryNo].ppn & mmu_itlb[entryNo].mask;
1147 sh4_icache.page = mem_get_region( sh4_icache.page_ppa );
1148 if( sh4_icache.page == NULL ) {
1149 sh4_icache.page_vma = -1;
1150 } else {
1151 sh4_icache.page_vma = mmu_itlb[entryNo].vpn & mmu_itlb[entryNo].mask;
1152 sh4_icache.mask = mmu_itlb[entryNo].mask;
1153 }
1154 return TRUE;
1155 }
1156 }
1158 /**
1159 * Translate address for disassembly purposes (ie performs an instruction
1160 * lookup) - does not raise exceptions or modify any state, and ignores
1161 * protection bits. Returns the translated address, or MMU_VMA_ERROR
1162 * on translation failure.
1163 */
1164 sh4addr_t FASTCALL mmu_vma_to_phys_disasm( sh4vma_t vma )
1165 {
1166 if( vma & 0x80000000 ) {
1167 if( vma < 0xC0000000 ) {
1168 /* P1, P2 and P4 regions are pass-through (no translation) */
1169 return VMA_TO_EXT_ADDR(vma);
1170 } else if( vma >= 0xE0000000 && vma < 0xFFFFFF00 ) {
1171 /* Not translatable */
1172 return MMU_VMA_ERROR;
1173 }
1174 }
1176 uint32_t mmucr = MMIO_READ(MMU,MMUCR);
1177 if( (mmucr & MMUCR_AT) == 0 ) {
1178 return VMA_TO_EXT_ADDR(vma);
1179 }
1181 int entryNo = mmu_itlb_lookup_vpn( vma );
1182 if( entryNo == -2 ) {
1183 entryNo = mmu_itlb_lookup_vpn_asid( vma );
1184 }
1185 if( entryNo < 0 ) {
1186 return MMU_VMA_ERROR;
1187 } else {
1188 return (mmu_itlb[entryNo].ppn & mmu_itlb[entryNo].mask) |
1189 (vma & (~mmu_itlb[entryNo].mask));
1190 }
1191 }
1193 /********************** TLB Direct-Access Regions ***************************/
1194 #define ITLB_ENTRY(addr) ((addr>>7)&0x03)
1196 int32_t FASTCALL mmu_itlb_addr_read( sh4addr_t addr )
1197 {
1198 struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
1199 return ent->vpn | ent->asid | (ent->flags & TLB_VALID);
1200 }
1202 void FASTCALL mmu_itlb_addr_write( sh4addr_t addr, uint32_t val )
1203 {
1204 struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
1205 ent->vpn = val & 0xFFFFFC00;
1206 ent->asid = val & 0x000000FF;
1207 ent->flags = (ent->flags & ~(TLB_VALID)) | (val&TLB_VALID);
1208 }
1210 int32_t FASTCALL mmu_itlb_data_read( sh4addr_t addr )
1211 {
1212 struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
1213 return (ent->ppn & 0x1FFFFC00) | ent->flags;
1214 }
1216 void FASTCALL mmu_itlb_data_write( sh4addr_t addr, uint32_t val )
1217 {
1218 struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
1219 ent->ppn = val & 0x1FFFFC00;
1220 ent->flags = val & 0x00001DA;
1221 ent->mask = get_tlb_size_mask(val);
1222 if( ent->ppn >= 0x1C000000 )
1223 ent->ppn |= 0xE0000000;
1224 }
1226 #define UTLB_ENTRY(addr) ((addr>>8)&0x3F)
1227 #define UTLB_ASSOC(addr) (addr&0x80)
1228 #define UTLB_DATA2(addr) (addr&0x00800000)
1230 int32_t FASTCALL mmu_utlb_addr_read( sh4addr_t addr )
1231 {
1232 struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
1233 return ent->vpn | ent->asid | (ent->flags & TLB_VALID) |
1234 ((ent->flags & TLB_DIRTY)<<7);
1235 }
1236 int32_t FASTCALL mmu_utlb_data_read( sh4addr_t addr )
1237 {
1238 struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
1239 if( UTLB_DATA2(addr) ) {
1240 return ent->pcmcia;
1241 } else {
1242 return (ent->ppn&0x1FFFFC00) | ent->flags;
1243 }
1244 }
1246 /**
1247 * Find a UTLB entry for the associative TLB write - same as the normal
1248 * lookup but ignores the valid bit.
1249 */
1250 static inline int mmu_utlb_lookup_assoc( uint32_t vpn, uint32_t asid )
1251 {
1252 int result = -1;
1253 unsigned int i;
1254 for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
1255 if( (mmu_utlb[i].flags & TLB_VALID) &&
1256 ((mmu_utlb[i].flags & TLB_SHARE) || asid == mmu_utlb[i].asid) &&
1257 ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
1258 if( result != -1 ) {
1259 fprintf( stderr, "TLB Multi hit: %d %d\n", result, i );
1260 return -2;
1261 }
1262 result = i;
1263 }
1264 }
1265 return result;
1266 }
1268 /**
1269 * Find a ITLB entry for the associative TLB write - same as the normal
1270 * lookup but ignores the valid bit.
1271 */
1272 static inline int mmu_itlb_lookup_assoc( uint32_t vpn, uint32_t asid )
1273 {
1274 int result = -1;
1275 unsigned int i;
1276 for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
1277 if( (mmu_itlb[i].flags & TLB_VALID) &&
1278 ((mmu_itlb[i].flags & TLB_SHARE) || asid == mmu_itlb[i].asid) &&
1279 ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
1280 if( result != -1 ) {
1281 return -2;
1282 }
1283 result = i;
1284 }
1285 }
1286 return result;
1287 }
1289 void FASTCALL mmu_utlb_addr_write( sh4addr_t addr, uint32_t val, void *exc )
1290 {
1291 if( UTLB_ASSOC(addr) ) {
1292 int utlb = mmu_utlb_lookup_assoc( val, mmu_asid );
1293 if( utlb >= 0 ) {
1294 struct utlb_entry *ent = &mmu_utlb[utlb];
1295 uint32_t old_flags = ent->flags;
1296 ent->flags = ent->flags & ~(TLB_DIRTY|TLB_VALID);
1297 ent->flags |= (val & TLB_VALID);
1298 ent->flags |= ((val & 0x200)>>7);
1299 if( IS_TLB_ENABLED() && ((old_flags^ent->flags) & (TLB_VALID|TLB_DIRTY)) != 0 ) {
1300 if( old_flags & TLB_VALID )
1301 mmu_utlb_remove_entry( utlb );
1302 if( ent->flags & TLB_VALID )
1303 mmu_utlb_insert_entry( utlb );
1304 }
1305 }
1307 int itlb = mmu_itlb_lookup_assoc( val, mmu_asid );
1308 if( itlb >= 0 ) {
1309 struct itlb_entry *ent = &mmu_itlb[itlb];
1310 ent->flags = (ent->flags & (~TLB_VALID)) | (val & TLB_VALID);
1311 }
1313 if( itlb == -2 || utlb == -2 ) {
1314 RAISE_TLB_MULTIHIT_ERROR(addr); /* FIXME: should this only be raised if TLB is enabled? */
1315 SH4_EXCEPTION_EXIT();
1316 return;
1317 }
1318 } else {
1319 struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
1320 if( IS_TLB_ENABLED() && ent->flags & TLB_VALID )
1321 mmu_utlb_remove_entry( UTLB_ENTRY(addr) );
1322 ent->vpn = (val & 0xFFFFFC00);
1323 ent->asid = (val & 0xFF);
1324 ent->flags = (ent->flags & ~(TLB_DIRTY|TLB_VALID));
1325 ent->flags |= (val & TLB_VALID);
1326 ent->flags |= ((val & 0x200)>>7);
1327 if( IS_TLB_ENABLED() && ent->flags & TLB_VALID )
1328 mmu_utlb_insert_entry( UTLB_ENTRY(addr) );
1329 }
1330 }
1332 void FASTCALL mmu_utlb_data_write( sh4addr_t addr, uint32_t val )
1333 {
1334 struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
1335 if( UTLB_DATA2(addr) ) {
1336 ent->pcmcia = val & 0x0000000F;
1337 } else {
1338 if( IS_TLB_ENABLED() && ent->flags & TLB_VALID )
1339 mmu_utlb_remove_entry( UTLB_ENTRY(addr) );
1340 ent->ppn = (val & 0x1FFFFC00);
1341 ent->flags = (val & 0x000001FF);
1342 ent->mask = get_tlb_size_mask(val);
1343 if( IS_TLB_ENABLED() && ent->flags & TLB_VALID )
1344 mmu_utlb_insert_entry( UTLB_ENTRY(addr) );
1345 }
1346 }
1348 struct mem_region_fn p4_region_itlb_addr = {
1349 mmu_itlb_addr_read, mmu_itlb_addr_write,
1350 mmu_itlb_addr_read, mmu_itlb_addr_write,
1351 mmu_itlb_addr_read, mmu_itlb_addr_write,
1352 unmapped_read_burst, unmapped_write_burst,
1353 unmapped_prefetch, mmu_itlb_addr_read };
1354 struct mem_region_fn p4_region_itlb_data = {
1355 mmu_itlb_data_read, mmu_itlb_data_write,
1356 mmu_itlb_data_read, mmu_itlb_data_write,
1357 mmu_itlb_data_read, mmu_itlb_data_write,
1358 unmapped_read_burst, unmapped_write_burst,
1359 unmapped_prefetch, mmu_itlb_data_read };
1360 struct mem_region_fn p4_region_utlb_addr = {
1361 mmu_utlb_addr_read, (mem_write_fn_t)mmu_utlb_addr_write,
1362 mmu_utlb_addr_read, (mem_write_fn_t)mmu_utlb_addr_write,
1363 mmu_utlb_addr_read, (mem_write_fn_t)mmu_utlb_addr_write,
1364 unmapped_read_burst, unmapped_write_burst,
1365 unmapped_prefetch, mmu_utlb_addr_read };
1366 struct mem_region_fn p4_region_utlb_data = {
1367 mmu_utlb_data_read, mmu_utlb_data_write,
1368 mmu_utlb_data_read, mmu_utlb_data_write,
1369 mmu_utlb_data_read, mmu_utlb_data_write,
1370 unmapped_read_burst, unmapped_write_burst,
1371 unmapped_prefetch, mmu_utlb_data_read };
1373 /********************** Error regions **************************/
1375 static void FASTCALL address_error_read( sh4addr_t addr, void *exc )
1376 {
1377 RAISE_MEM_ERROR(EXC_DATA_ADDR_READ, addr);
1378 SH4_EXCEPTION_EXIT();
1379 }
1381 static void FASTCALL address_error_read_for_write( sh4addr_t addr, void *exc )
1382 {
1383 RAISE_MEM_ERROR(EXC_DATA_ADDR_WRITE, addr);
1384 SH4_EXCEPTION_EXIT();
1385 }
1387 static void FASTCALL address_error_read_burst( unsigned char *dest, sh4addr_t addr, void *exc )
1388 {
1389 RAISE_MEM_ERROR(EXC_DATA_ADDR_READ, addr);
1390 SH4_EXCEPTION_EXIT();
1391 }
1393 static void FASTCALL address_error_write( sh4addr_t addr, uint32_t val, void *exc )
1394 {
1395 RAISE_MEM_ERROR(EXC_DATA_ADDR_WRITE, addr);
1396 SH4_EXCEPTION_EXIT();
1397 }
1399 static void FASTCALL tlb_miss_read( sh4addr_t addr, void *exc )
1400 {
1401 mmu_urc++;
1402 RAISE_TLB_ERROR(EXC_TLB_MISS_READ, addr);
1403 SH4_EXCEPTION_EXIT();
1404 }
1406 static void FASTCALL tlb_miss_read_for_write( sh4addr_t addr, void *exc )
1407 {
1408 mmu_urc++;
1409 RAISE_TLB_ERROR(EXC_TLB_MISS_WRITE, addr);
1410 SH4_EXCEPTION_EXIT();
1411 }
1413 static void FASTCALL tlb_miss_read_burst( unsigned char *dest, sh4addr_t addr, void *exc )
1414 {
1415 mmu_urc++;
1416 RAISE_TLB_ERROR(EXC_TLB_MISS_READ, addr);
1417 SH4_EXCEPTION_EXIT();
1418 }
1420 static void FASTCALL tlb_miss_write( sh4addr_t addr, uint32_t val, void *exc )
1421 {
1422 mmu_urc++;
1423 RAISE_TLB_ERROR(EXC_TLB_MISS_WRITE, addr);
1424 SH4_EXCEPTION_EXIT();
1425 }
1427 static int32_t FASTCALL tlb_protected_read( sh4addr_t addr, void *exc )
1428 {
1429 mmu_urc++;
1430 RAISE_MEM_ERROR(EXC_TLB_PROT_READ, addr);
1431 SH4_EXCEPTION_EXIT();
1432 return 0;
1433 }
1435 static int32_t FASTCALL tlb_protected_read_for_write( sh4addr_t addr, void *exc )
1436 {
1437 mmu_urc++;
1438 RAISE_MEM_ERROR(EXC_TLB_PROT_WRITE, addr);
1439 SH4_EXCEPTION_EXIT();
1440 return 0;
1441 }
1443 static int32_t FASTCALL tlb_protected_read_burst( unsigned char *dest, sh4addr_t addr, void *exc )
1444 {
1445 mmu_urc++;
1446 RAISE_MEM_ERROR(EXC_TLB_PROT_READ, addr);
1447 SH4_EXCEPTION_EXIT();
1448 return 0;
1449 }
1451 static void FASTCALL tlb_protected_write( sh4addr_t addr, uint32_t val, void *exc )
1452 {
1453 mmu_urc++;
1454 RAISE_MEM_ERROR(EXC_TLB_PROT_WRITE, addr);
1455 SH4_EXCEPTION_EXIT();
1456 }
1458 static void FASTCALL tlb_initial_write( sh4addr_t addr, uint32_t val, void *exc )
1459 {
1460 mmu_urc++;
1461 RAISE_MEM_ERROR(EXC_INIT_PAGE_WRITE, addr);
1462 SH4_EXCEPTION_EXIT();
1463 }
1465 static int32_t FASTCALL tlb_initial_read_for_write( sh4addr_t addr, void *exc )
1466 {
1467 mmu_urc++;
1468 RAISE_MEM_ERROR(EXC_INIT_PAGE_WRITE, addr);
1469 SH4_EXCEPTION_EXIT();
1470 return 0;
1471 }
1473 static int32_t FASTCALL tlb_multi_hit_read( sh4addr_t addr, void *exc )
1474 {
1475 sh4_raise_tlb_multihit(addr);
1476 SH4_EXCEPTION_EXIT();
1477 return 0;
1478 }
1480 static int32_t FASTCALL tlb_multi_hit_read_burst( unsigned char *dest, sh4addr_t addr, void *exc )
1481 {
1482 sh4_raise_tlb_multihit(addr);
1483 SH4_EXCEPTION_EXIT();
1484 return 0;
1485 }
1486 static void FASTCALL tlb_multi_hit_write( sh4addr_t addr, uint32_t val, void *exc )
1487 {
1488 sh4_raise_tlb_multihit(addr);
1489 SH4_EXCEPTION_EXIT();
1490 }
1492 /**
1493 * Note: Per sec 4.6.4 of the SH7750 manual, SQ
1494 */
1495 struct mem_region_fn mem_region_address_error = {
1496 (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1497 (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1498 (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1499 (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write,
1500 unmapped_prefetch, (mem_read_fn_t)address_error_read_for_write };
1502 struct mem_region_fn mem_region_tlb_miss = {
1503 (mem_read_fn_t)tlb_miss_read, (mem_write_fn_t)tlb_miss_write,
1504 (mem_read_fn_t)tlb_miss_read, (mem_write_fn_t)tlb_miss_write,
1505 (mem_read_fn_t)tlb_miss_read, (mem_write_fn_t)tlb_miss_write,
1506 (mem_read_burst_fn_t)tlb_miss_read_burst, (mem_write_burst_fn_t)tlb_miss_write,
1507 unmapped_prefetch, (mem_read_fn_t)tlb_miss_read_for_write };
1509 struct mem_region_fn mem_region_tlb_protected = {
1510 (mem_read_fn_t)tlb_protected_read, (mem_write_fn_t)tlb_protected_write,
1511 (mem_read_fn_t)tlb_protected_read, (mem_write_fn_t)tlb_protected_write,
1512 (mem_read_fn_t)tlb_protected_read, (mem_write_fn_t)tlb_protected_write,
1513 (mem_read_burst_fn_t)tlb_protected_read_burst, (mem_write_burst_fn_t)tlb_protected_write,
1514 unmapped_prefetch, (mem_read_fn_t)tlb_protected_read_for_write };
1516 struct mem_region_fn mem_region_tlb_multihit = {
1517 (mem_read_fn_t)tlb_multi_hit_read, (mem_write_fn_t)tlb_multi_hit_write,
1518 (mem_read_fn_t)tlb_multi_hit_read, (mem_write_fn_t)tlb_multi_hit_write,
1519 (mem_read_fn_t)tlb_multi_hit_read, (mem_write_fn_t)tlb_multi_hit_write,
1520 (mem_read_burst_fn_t)tlb_multi_hit_read_burst, (mem_write_burst_fn_t)tlb_multi_hit_write,
1521 (mem_prefetch_fn_t)tlb_multi_hit_read, (mem_read_fn_t)tlb_multi_hit_read };
1524 /* Store-queue regions */
1525 /* These are a bit of a pain - the first 8 fields are controlled by SQMD, while
1526 * the final (prefetch) is controlled by the actual TLB settings (plus SQMD in
1527 * some cases), in contrast to the ordinary fields above.
1528 *
1529 * There is probably a simpler way to do this.
1530 */
1532 struct mem_region_fn p4_region_storequeue = {
1533 ccn_storequeue_read_long, ccn_storequeue_write_long,
1534 unmapped_read_long, unmapped_write_long, /* TESTME: Officially only long access is supported */
1535 unmapped_read_long, unmapped_write_long,
1536 unmapped_read_burst, unmapped_write_burst,
1537 ccn_storequeue_prefetch, unmapped_read_long };
1539 struct mem_region_fn p4_region_storequeue_miss = {
1540 ccn_storequeue_read_long, ccn_storequeue_write_long,
1541 unmapped_read_long, unmapped_write_long, /* TESTME: Officially only long access is supported */
1542 unmapped_read_long, unmapped_write_long,
1543 unmapped_read_burst, unmapped_write_burst,
1544 (mem_prefetch_fn_t)tlb_miss_read, unmapped_read_long };
1546 struct mem_region_fn p4_region_storequeue_multihit = {
1547 ccn_storequeue_read_long, ccn_storequeue_write_long,
1548 unmapped_read_long, unmapped_write_long, /* TESTME: Officially only long access is supported */
1549 unmapped_read_long, unmapped_write_long,
1550 unmapped_read_burst, unmapped_write_burst,
1551 (mem_prefetch_fn_t)tlb_multi_hit_read, unmapped_read_long };
1553 struct mem_region_fn p4_region_storequeue_protected = {
1554 ccn_storequeue_read_long, ccn_storequeue_write_long,
1555 unmapped_read_long, unmapped_write_long,
1556 unmapped_read_long, unmapped_write_long,
1557 unmapped_read_burst, unmapped_write_burst,
1558 (mem_prefetch_fn_t)tlb_protected_read, unmapped_read_long };
1560 struct mem_region_fn p4_region_storequeue_sqmd = {
1561 (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1562 (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1563 (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1564 (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write,
1565 (mem_prefetch_fn_t)address_error_read, (mem_read_fn_t)address_error_read_for_write };
1567 struct mem_region_fn p4_region_storequeue_sqmd_miss = {
1568 (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1569 (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1570 (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1571 (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write,
1572 (mem_prefetch_fn_t)tlb_miss_read, (mem_read_fn_t)address_error_read_for_write };
1574 struct mem_region_fn p4_region_storequeue_sqmd_multihit = {
1575 (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1576 (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1577 (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1578 (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write,
1579 (mem_prefetch_fn_t)tlb_multi_hit_read, (mem_read_fn_t)address_error_read_for_write };
1581 struct mem_region_fn p4_region_storequeue_sqmd_protected = {
1582 (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1583 (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1584 (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1585 (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write,
1586 (mem_prefetch_fn_t)tlb_protected_read, (mem_read_fn_t)address_error_read_for_write };
.