filename | src/sh4/mmu.c |
changeset | 931:430048ea8b71 |
prev | 929:fd8cb0c82f5f |
next | 933:880c37bb1909 |
author | nkeynes |
date | Tue Dec 23 05:48:05 2008 +0000 (13 years ago) |
branch | lxdream-mem |
permissions | -rw-r--r-- |
last change | More refactoring and general cleanup. Most things should be working again now. Split off cache and start real implementation, breaking save states in the process |
view | annotate | diff | log | raw |
1 /**
2 * $Id$
3 *
4 * MMU implementation
5 *
6 * Copyright (c) 2005 Nathan Keynes.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 */
18 #define MODULE sh4_module
20 #include <stdio.h>
21 #include <assert.h>
22 #include "sh4/sh4mmio.h"
23 #include "sh4/sh4core.h"
24 #include "sh4/sh4trans.h"
25 #include "mem.h"
26 #include "mmu.h"
28 #ifdef HAVE_FRAME_ADDRESS
29 #define RETURN_VIA(exc) do{ *(((void **)__builtin_frame_address(0))+1) = exc; return; } while(0)
30 #else
31 #define RETURN_VIA(exc) return MMU_VMA_ERROR
32 #endif
34 /* The MMU (practically unique in the system) is allowed to raise exceptions
35 * directly, with a return code indicating that one was raised and the caller
36 * had better behave appropriately.
37 */
38 #define RAISE_TLB_ERROR(code, vpn) \
39 MMIO_WRITE(MMU, TEA, vpn); \
40 MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
41 sh4_raise_tlb_exception(code);
43 #define RAISE_MEM_ERROR(code, vpn) \
44 MMIO_WRITE(MMU, TEA, vpn); \
45 MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
46 sh4_raise_exception(code);
48 #define RAISE_OTHER_ERROR(code) \
49 sh4_raise_exception(code);
50 /**
51 * Abort with a non-MMU address error. Caused by user-mode code attempting
52 * to access privileged regions, or alignment faults.
53 */
54 #define MMU_READ_ADDR_ERROR() RAISE_OTHER_ERROR(EXC_DATA_ADDR_READ)
55 #define MMU_WRITE_ADDR_ERROR() RAISE_OTHER_ERROR(EXC_DATA_ADDR_WRITE)
57 #define MMU_TLB_READ_MISS_ERROR(vpn) RAISE_TLB_ERROR(EXC_TLB_MISS_READ, vpn)
58 #define MMU_TLB_WRITE_MISS_ERROR(vpn) RAISE_TLB_ERROR(EXC_TLB_MISS_WRITE, vpn)
59 #define MMU_TLB_INITIAL_WRITE_ERROR(vpn) RAISE_MEM_ERROR(EXC_INIT_PAGE_WRITE, vpn)
60 #define MMU_TLB_READ_PROT_ERROR(vpn) RAISE_MEM_ERROR(EXC_TLB_PROT_READ, vpn)
61 #define MMU_TLB_WRITE_PROT_ERROR(vpn) RAISE_MEM_ERROR(EXC_TLB_PROT_WRITE, vpn)
62 #define MMU_TLB_MULTI_HIT_ERROR(vpn) sh4_raise_reset(EXC_TLB_MULTI_HIT); \
63 MMIO_WRITE(MMU, TEA, vpn); \
64 MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00)));
67 #define OCRAM_START (0x1C000000>>LXDREAM_PAGE_BITS)
68 #define OCRAM_END (0x20000000>>LXDREAM_PAGE_BITS)
71 static struct itlb_entry mmu_itlb[ITLB_ENTRY_COUNT];
72 static struct utlb_entry mmu_utlb[UTLB_ENTRY_COUNT];
73 static uint32_t mmu_urc;
74 static uint32_t mmu_urb;
75 static uint32_t mmu_lrui;
76 static uint32_t mmu_asid; // current asid
78 static struct utlb_sort_entry mmu_utlb_sorted[UTLB_ENTRY_COUNT];
79 static uint32_t mmu_utlb_entries; // Number of entries in mmu_utlb_sorted.
81 static sh4ptr_t cache = NULL;
83 static void mmu_invalidate_tlb();
84 static void mmu_utlb_sorted_reset();
85 static void mmu_utlb_sorted_reload();
87 static uint32_t get_mask_for_flags( uint32_t flags )
88 {
89 switch( flags & TLB_SIZE_MASK ) {
90 case TLB_SIZE_1K: return MASK_1K;
91 case TLB_SIZE_4K: return MASK_4K;
92 case TLB_SIZE_64K: return MASK_64K;
93 case TLB_SIZE_1M: return MASK_1M;
94 default: return 0; /* Unreachable */
95 }
96 }
98 MMIO_REGION_READ_FN( MMU, reg )
99 {
100 reg &= 0xFFF;
101 switch( reg ) {
102 case MMUCR:
103 return MMIO_READ( MMU, MMUCR) | (mmu_urc<<10) | (mmu_urb<<18) | (mmu_lrui<<26);
104 default:
105 return MMIO_READ( MMU, reg );
106 }
107 }
109 MMIO_REGION_WRITE_FN( MMU, reg, val )
110 {
111 uint32_t tmp;
112 reg &= 0xFFF;
113 switch(reg) {
114 case SH4VER:
115 return;
116 case PTEH:
117 val &= 0xFFFFFCFF;
118 if( (val & 0xFF) != mmu_asid ) {
119 mmu_asid = val&0xFF;
120 sh4_icache.page_vma = -1; // invalidate icache as asid has changed
121 }
122 break;
123 case PTEL:
124 val &= 0x1FFFFDFF;
125 break;
126 case PTEA:
127 val &= 0x0000000F;
128 break;
129 case TRA:
130 val &= 0x000003FC;
131 break;
132 case EXPEVT:
133 case INTEVT:
134 val &= 0x00000FFF;
135 break;
136 case MMUCR:
137 if( val & MMUCR_TI ) {
138 mmu_invalidate_tlb();
139 }
140 mmu_urc = (val >> 10) & 0x3F;
141 mmu_urb = (val >> 18) & 0x3F;
142 mmu_lrui = (val >> 26) & 0x3F;
143 val &= 0x00000301;
144 tmp = MMIO_READ( MMU, MMUCR );
145 if( (val ^ tmp) & (MMUCR_AT|MMUCR_SV) ) {
146 // AT flag has changed state - flush the xlt cache as all bets
147 // are off now. We also need to force an immediate exit from the
148 // current block
149 MMIO_WRITE( MMU, MMUCR, val );
150 sh4_flush_icache();
151 }
152 break;
153 case CCR:
154 CCN_set_cache_control( val );
155 val &= 0x81A7;
156 break;
157 case MMUUNK1:
158 /* Note that if the high bit is set, this appears to reset the machine.
159 * Not emulating this behaviour yet until we know why...
160 */
161 val &= 0x00010007;
162 break;
163 case QACR0:
164 case QACR1:
165 val &= 0x0000001C;
166 break;
167 case PMCR1:
168 PMM_write_control(0, val);
169 val &= 0x0000C13F;
170 break;
171 case PMCR2:
172 PMM_write_control(1, val);
173 val &= 0x0000C13F;
174 break;
175 default:
176 break;
177 }
178 MMIO_WRITE( MMU, reg, val );
179 }
182 void MMU_init()
183 {
184 }
186 void MMU_reset()
187 {
188 mmio_region_MMU_write( CCR, 0 );
189 mmio_region_MMU_write( MMUCR, 0 );
190 mmu_utlb_sorted_reload();
191 }
193 void MMU_save_state( FILE *f )
194 {
195 fwrite( &mmu_itlb, sizeof(mmu_itlb), 1, f );
196 fwrite( &mmu_utlb, sizeof(mmu_utlb), 1, f );
197 fwrite( &mmu_urc, sizeof(mmu_urc), 1, f );
198 fwrite( &mmu_urb, sizeof(mmu_urb), 1, f );
199 fwrite( &mmu_lrui, sizeof(mmu_lrui), 1, f );
200 fwrite( &mmu_asid, sizeof(mmu_asid), 1, f );
201 }
203 int MMU_load_state( FILE *f )
204 {
205 if( fread( &mmu_itlb, sizeof(mmu_itlb), 1, f ) != 1 ) {
206 return 1;
207 }
208 if( fread( &mmu_utlb, sizeof(mmu_utlb), 1, f ) != 1 ) {
209 return 1;
210 }
211 if( fread( &mmu_urc, sizeof(mmu_urc), 1, f ) != 1 ) {
212 return 1;
213 }
214 if( fread( &mmu_urc, sizeof(mmu_urb), 1, f ) != 1 ) {
215 return 1;
216 }
217 if( fread( &mmu_lrui, sizeof(mmu_lrui), 1, f ) != 1 ) {
218 return 1;
219 }
220 if( fread( &mmu_asid, sizeof(mmu_asid), 1, f ) != 1 ) {
221 return 1;
222 }
223 mmu_utlb_sorted_reload();
224 return 0;
225 }
228 /******************* Sorted TLB data structure ****************/
229 /*
230 * mmu_utlb_sorted maintains a list of all active (valid) entries,
231 * sorted by masked VPN and then ASID. Multi-hit entries are resolved
232 * ahead of time, and have -1 recorded as the corresponding PPN.
233 *
234 * FIXME: Multi-hit detection doesn't pick up cases where two pages
235 * overlap due to different sizes (and don't share the same base
236 * address).
237 */
238 static void mmu_utlb_sorted_reset()
239 {
240 mmu_utlb_entries = 0;
241 }
243 /**
244 * Find an entry in the sorted table (VPN+ASID check).
245 */
246 static inline int mmu_utlb_sorted_find( sh4addr_t vma )
247 {
248 int low = 0;
249 int high = mmu_utlb_entries;
250 uint32_t lookup = (vma & 0xFFFFFC00) + mmu_asid;
252 mmu_urc++;
253 if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
254 mmu_urc = 0;
255 }
257 while( low != high ) {
258 int posn = (high+low)>>1;
259 int masked = lookup & mmu_utlb_sorted[posn].mask;
260 if( mmu_utlb_sorted[posn].key < masked ) {
261 low = posn+1;
262 } else if( mmu_utlb_sorted[posn].key > masked ) {
263 high = posn;
264 } else {
265 return mmu_utlb_sorted[posn].entryNo;
266 }
267 }
268 return -1;
270 }
272 static void mmu_utlb_insert_entry( int entry )
273 {
274 int low = 0;
275 int high = mmu_utlb_entries;
276 uint32_t key = (mmu_utlb[entry].vpn & mmu_utlb[entry].mask) + mmu_utlb[entry].asid;
278 assert( mmu_utlb_entries < UTLB_ENTRY_COUNT );
279 /* Find the insertion point */
280 while( low != high ) {
281 int posn = (high+low)>>1;
282 if( mmu_utlb_sorted[posn].key < key ) {
283 low = posn+1;
284 } else if( mmu_utlb_sorted[posn].key > key ) {
285 high = posn;
286 } else {
287 /* Exact match - multi-hit */
288 mmu_utlb_sorted[posn].entryNo = -2;
289 return;
290 }
291 } /* 0 2 4 6 */
292 memmove( &mmu_utlb_sorted[low+1], &mmu_utlb_sorted[low],
293 (mmu_utlb_entries - low) * sizeof(struct utlb_sort_entry) );
294 mmu_utlb_sorted[low].key = key;
295 mmu_utlb_sorted[low].mask = mmu_utlb[entry].mask | 0x000000FF;
296 mmu_utlb_sorted[low].entryNo = entry;
297 mmu_utlb_entries++;
298 }
300 static void mmu_utlb_remove_entry( int entry )
301 {
302 int low = 0;
303 int high = mmu_utlb_entries;
304 uint32_t key = (mmu_utlb[entry].vpn & mmu_utlb[entry].mask) + mmu_utlb[entry].asid;
305 while( low != high ) {
306 int posn = (high+low)>>1;
307 if( mmu_utlb_sorted[posn].key < key ) {
308 low = posn+1;
309 } else if( mmu_utlb_sorted[posn].key > key ) {
310 high = posn;
311 } else {
312 if( mmu_utlb_sorted[posn].entryNo == -2 ) {
313 /* Multiple-entry recorded - rebuild the whole table minus entry */
314 int i;
315 mmu_utlb_entries = 0;
316 for( i=0; i< UTLB_ENTRY_COUNT; i++ ) {
317 if( i != entry && (mmu_utlb[i].flags & TLB_VALID) ) {
318 mmu_utlb_insert_entry(i);
319 }
320 }
321 } else {
322 mmu_utlb_entries--;
323 memmove( &mmu_utlb_sorted[posn], &mmu_utlb_sorted[posn+1],
324 (mmu_utlb_entries - posn)*sizeof(struct utlb_sort_entry) );
325 }
326 return;
327 }
328 }
329 assert( 0 && "UTLB key not found!" );
330 }
332 static void mmu_utlb_sorted_reload()
333 {
334 int i;
335 mmu_utlb_entries = 0;
336 for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
337 if( mmu_utlb[i].flags & TLB_VALID )
338 mmu_utlb_insert_entry( i );
339 }
340 }
342 /* TLB maintanence */
344 /**
345 * LDTLB instruction implementation. Copies PTEH, PTEL and PTEA into the UTLB
346 * entry identified by MMUCR.URC. Does not modify MMUCR or the ITLB.
347 */
348 void MMU_ldtlb()
349 {
350 if( mmu_utlb[mmu_urc].flags & TLB_VALID )
351 mmu_utlb_remove_entry( mmu_urc );
352 mmu_utlb[mmu_urc].vpn = MMIO_READ(MMU, PTEH) & 0xFFFFFC00;
353 mmu_utlb[mmu_urc].asid = MMIO_READ(MMU, PTEH) & 0x000000FF;
354 mmu_utlb[mmu_urc].ppn = MMIO_READ(MMU, PTEL) & 0x1FFFFC00;
355 mmu_utlb[mmu_urc].flags = MMIO_READ(MMU, PTEL) & 0x00001FF;
356 mmu_utlb[mmu_urc].pcmcia = MMIO_READ(MMU, PTEA);
357 mmu_utlb[mmu_urc].mask = get_mask_for_flags(mmu_utlb[mmu_urc].flags);
358 if( mmu_utlb[mmu_urc].ppn >= 0x1C000000 )
359 mmu_utlb[mmu_urc].ppn |= 0xE0000000;
360 if( mmu_utlb[mmu_urc].flags & TLB_VALID )
361 mmu_utlb_insert_entry( mmu_urc );
362 }
364 static void mmu_invalidate_tlb()
365 {
366 int i;
367 for( i=0; i<ITLB_ENTRY_COUNT; i++ ) {
368 mmu_itlb[i].flags &= (~TLB_VALID);
369 }
370 for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
371 mmu_utlb[i].flags &= (~TLB_VALID);
372 }
373 mmu_utlb_entries = 0;
374 }
376 #define ITLB_ENTRY(addr) ((addr>>7)&0x03)
378 int32_t FASTCALL mmu_itlb_addr_read( sh4addr_t addr )
379 {
380 struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
381 return ent->vpn | ent->asid | (ent->flags & TLB_VALID);
382 }
383 int32_t FASTCALL mmu_itlb_data_read( sh4addr_t addr )
384 {
385 struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
386 return (ent->ppn & 0x1FFFFC00) | ent->flags;
387 }
389 void FASTCALL mmu_itlb_addr_write( sh4addr_t addr, uint32_t val )
390 {
391 struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
392 ent->vpn = val & 0xFFFFFC00;
393 ent->asid = val & 0x000000FF;
394 ent->flags = (ent->flags & ~(TLB_VALID)) | (val&TLB_VALID);
395 }
397 void FASTCALL mmu_itlb_data_write( sh4addr_t addr, uint32_t val )
398 {
399 struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
400 ent->ppn = val & 0x1FFFFC00;
401 ent->flags = val & 0x00001DA;
402 ent->mask = get_mask_for_flags(val);
403 if( ent->ppn >= 0x1C000000 )
404 ent->ppn |= 0xE0000000;
405 }
407 #define UTLB_ENTRY(addr) ((addr>>8)&0x3F)
408 #define UTLB_ASSOC(addr) (addr&0x80)
409 #define UTLB_DATA2(addr) (addr&0x00800000)
411 int32_t FASTCALL mmu_utlb_addr_read( sh4addr_t addr )
412 {
413 struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
414 return ent->vpn | ent->asid | (ent->flags & TLB_VALID) |
415 ((ent->flags & TLB_DIRTY)<<7);
416 }
417 int32_t FASTCALL mmu_utlb_data_read( sh4addr_t addr )
418 {
419 struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
420 if( UTLB_DATA2(addr) ) {
421 return ent->pcmcia;
422 } else {
423 return (ent->ppn&0x1FFFFC00) | ent->flags;
424 }
425 }
427 /**
428 * Find a UTLB entry for the associative TLB write - same as the normal
429 * lookup but ignores the valid bit.
430 */
431 static inline int mmu_utlb_lookup_assoc( uint32_t vpn, uint32_t asid )
432 {
433 int result = -1;
434 unsigned int i;
435 for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
436 if( (mmu_utlb[i].flags & TLB_VALID) &&
437 ((mmu_utlb[i].flags & TLB_SHARE) || asid == mmu_utlb[i].asid) &&
438 ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
439 if( result != -1 ) {
440 fprintf( stderr, "TLB Multi hit: %d %d\n", result, i );
441 return -2;
442 }
443 result = i;
444 }
445 }
446 return result;
447 }
449 /**
450 * Find a ITLB entry for the associative TLB write - same as the normal
451 * lookup but ignores the valid bit.
452 */
453 static inline int mmu_itlb_lookup_assoc( uint32_t vpn, uint32_t asid )
454 {
455 int result = -1;
456 unsigned int i;
457 for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
458 if( (mmu_itlb[i].flags & TLB_VALID) &&
459 ((mmu_itlb[i].flags & TLB_SHARE) || asid == mmu_itlb[i].asid) &&
460 ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
461 if( result != -1 ) {
462 return -2;
463 }
464 result = i;
465 }
466 }
467 return result;
468 }
470 void FASTCALL mmu_utlb_addr_write( sh4addr_t addr, uint32_t val )
471 {
472 if( UTLB_ASSOC(addr) ) {
473 int utlb = mmu_utlb_lookup_assoc( val, mmu_asid );
474 if( utlb >= 0 ) {
475 struct utlb_entry *ent = &mmu_utlb[utlb];
476 uint32_t old_flags = ent->flags;
477 ent->flags = ent->flags & ~(TLB_DIRTY|TLB_VALID);
478 ent->flags |= (val & TLB_VALID);
479 ent->flags |= ((val & 0x200)>>7);
480 if( (old_flags & TLB_VALID) && !(ent->flags&TLB_VALID) ) {
481 mmu_utlb_remove_entry( utlb );
482 } else if( !(old_flags & TLB_VALID) && (ent->flags&TLB_VALID) ) {
483 mmu_utlb_insert_entry( utlb );
484 }
485 }
487 int itlb = mmu_itlb_lookup_assoc( val, mmu_asid );
488 if( itlb >= 0 ) {
489 struct itlb_entry *ent = &mmu_itlb[itlb];
490 ent->flags = (ent->flags & (~TLB_VALID)) | (val & TLB_VALID);
491 }
493 if( itlb == -2 || utlb == -2 ) {
494 MMU_TLB_MULTI_HIT_ERROR(addr);
495 return;
496 }
497 } else {
498 struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
499 if( ent->flags & TLB_VALID )
500 mmu_utlb_remove_entry( UTLB_ENTRY(addr) );
501 ent->vpn = (val & 0xFFFFFC00);
502 ent->asid = (val & 0xFF);
503 ent->flags = (ent->flags & ~(TLB_DIRTY|TLB_VALID));
504 ent->flags |= (val & TLB_VALID);
505 ent->flags |= ((val & 0x200)>>7);
506 if( ent->flags & TLB_VALID )
507 mmu_utlb_insert_entry( UTLB_ENTRY(addr) );
508 }
509 }
511 void FASTCALL mmu_utlb_data_write( sh4addr_t addr, uint32_t val )
512 {
513 struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
514 if( UTLB_DATA2(addr) ) {
515 ent->pcmcia = val & 0x0000000F;
516 } else {
517 if( ent->flags & TLB_VALID )
518 mmu_utlb_remove_entry( UTLB_ENTRY(addr) );
519 ent->ppn = (val & 0x1FFFFC00);
520 ent->flags = (val & 0x000001FF);
521 ent->mask = get_mask_for_flags(val);
522 if( mmu_utlb[mmu_urc].ppn >= 0x1C000000 )
523 mmu_utlb[mmu_urc].ppn |= 0xE0000000;
524 if( ent->flags & TLB_VALID )
525 mmu_utlb_insert_entry( UTLB_ENTRY(addr) );
526 }
527 }
529 /* Cache access - not implemented */
531 int32_t FASTCALL mmu_icache_addr_read( sh4addr_t addr )
532 {
533 return 0; // not implemented
534 }
535 int32_t FASTCALL mmu_icache_data_read( sh4addr_t addr )
536 {
537 return 0; // not implemented
538 }
539 int32_t FASTCALL mmu_ocache_addr_read( sh4addr_t addr )
540 {
541 return 0; // not implemented
542 }
543 int32_t FASTCALL mmu_ocache_data_read( sh4addr_t addr )
544 {
545 return 0; // not implemented
546 }
548 void FASTCALL mmu_icache_addr_write( sh4addr_t addr, uint32_t val )
549 {
550 }
552 void FASTCALL mmu_icache_data_write( sh4addr_t addr, uint32_t val )
553 {
554 }
556 void FASTCALL mmu_ocache_addr_write( sh4addr_t addr, uint32_t val )
557 {
558 }
560 void FASTCALL mmu_ocache_data_write( sh4addr_t addr, uint32_t val )
561 {
562 }
564 /******************************************************************************/
565 /* MMU TLB address translation */
566 /******************************************************************************/
568 /**
569 * The translations are excessively complicated, but unfortunately it's a
570 * complicated system. TODO: make this not be painfully slow.
571 */
573 /**
574 * Perform the actual utlb lookup w/ asid matching.
575 * Possible utcomes are:
576 * 0..63 Single match - good, return entry found
577 * -1 No match - raise a tlb data miss exception
578 * -2 Multiple matches - raise a multi-hit exception (reset)
579 * @param vpn virtual address to resolve
580 * @return the resultant UTLB entry, or an error.
581 */
582 static inline int mmu_utlb_lookup_vpn_asid( uint32_t vpn )
583 {
584 int result = -1;
585 unsigned int i;
587 mmu_urc++;
588 if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
589 mmu_urc = 0;
590 }
592 for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
593 if( (mmu_utlb[i].flags & TLB_VALID) &&
594 ((mmu_utlb[i].flags & TLB_SHARE) || mmu_asid == mmu_utlb[i].asid) &&
595 ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
596 if( result != -1 ) {
597 return -2;
598 }
599 result = i;
600 }
601 }
602 return result;
603 }
605 /**
606 * Perform the actual utlb lookup matching on vpn only
607 * Possible utcomes are:
608 * 0..63 Single match - good, return entry found
609 * -1 No match - raise a tlb data miss exception
610 * -2 Multiple matches - raise a multi-hit exception (reset)
611 * @param vpn virtual address to resolve
612 * @return the resultant UTLB entry, or an error.
613 */
614 static inline int mmu_utlb_lookup_vpn( uint32_t vpn )
615 {
616 int result = -1;
617 unsigned int i;
619 mmu_urc++;
620 if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
621 mmu_urc = 0;
622 }
624 for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
625 if( (mmu_utlb[i].flags & TLB_VALID) &&
626 ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
627 if( result != -1 ) {
628 return -2;
629 }
630 result = i;
631 }
632 }
634 return result;
635 }
637 /**
638 * Update the ITLB by replacing the LRU entry with the specified UTLB entry.
639 * @return the number (0-3) of the replaced entry.
640 */
641 static int inline mmu_itlb_update_from_utlb( int entryNo )
642 {
643 int replace;
644 /* Determine entry to replace based on lrui */
645 if( (mmu_lrui & 0x38) == 0x38 ) {
646 replace = 0;
647 mmu_lrui = mmu_lrui & 0x07;
648 } else if( (mmu_lrui & 0x26) == 0x06 ) {
649 replace = 1;
650 mmu_lrui = (mmu_lrui & 0x19) | 0x20;
651 } else if( (mmu_lrui & 0x15) == 0x01 ) {
652 replace = 2;
653 mmu_lrui = (mmu_lrui & 0x3E) | 0x14;
654 } else { // Note - gets invalid entries too
655 replace = 3;
656 mmu_lrui = (mmu_lrui | 0x0B);
657 }
659 mmu_itlb[replace].vpn = mmu_utlb[entryNo].vpn;
660 mmu_itlb[replace].mask = mmu_utlb[entryNo].mask;
661 mmu_itlb[replace].ppn = mmu_utlb[entryNo].ppn;
662 mmu_itlb[replace].asid = mmu_utlb[entryNo].asid;
663 mmu_itlb[replace].flags = mmu_utlb[entryNo].flags & 0x01DA;
664 return replace;
665 }
667 /**
668 * Perform the actual itlb lookup w/ asid protection
669 * Possible utcomes are:
670 * 0..63 Single match - good, return entry found
671 * -1 No match - raise a tlb data miss exception
672 * -2 Multiple matches - raise a multi-hit exception (reset)
673 * @param vpn virtual address to resolve
674 * @return the resultant ITLB entry, or an error.
675 */
676 static inline int mmu_itlb_lookup_vpn_asid( uint32_t vpn )
677 {
678 int result = -1;
679 unsigned int i;
681 for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
682 if( (mmu_itlb[i].flags & TLB_VALID) &&
683 ((mmu_itlb[i].flags & TLB_SHARE) || mmu_asid == mmu_itlb[i].asid) &&
684 ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
685 if( result != -1 ) {
686 return -2;
687 }
688 result = i;
689 }
690 }
692 if( result == -1 ) {
693 int utlbEntry = mmu_utlb_sorted_find( vpn );
694 if( utlbEntry < 0 ) {
695 return utlbEntry;
696 } else {
697 return mmu_itlb_update_from_utlb( utlbEntry );
698 }
699 }
701 switch( result ) {
702 case 0: mmu_lrui = (mmu_lrui & 0x07); break;
703 case 1: mmu_lrui = (mmu_lrui & 0x19) | 0x20; break;
704 case 2: mmu_lrui = (mmu_lrui & 0x3E) | 0x14; break;
705 case 3: mmu_lrui = (mmu_lrui | 0x0B); break;
706 }
708 return result;
709 }
711 /**
712 * Perform the actual itlb lookup on vpn only
713 * Possible utcomes are:
714 * 0..63 Single match - good, return entry found
715 * -1 No match - raise a tlb data miss exception
716 * -2 Multiple matches - raise a multi-hit exception (reset)
717 * @param vpn virtual address to resolve
718 * @return the resultant ITLB entry, or an error.
719 */
720 static inline int mmu_itlb_lookup_vpn( uint32_t vpn )
721 {
722 int result = -1;
723 unsigned int i;
725 for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
726 if( (mmu_itlb[i].flags & TLB_VALID) &&
727 ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
728 if( result != -1 ) {
729 return -2;
730 }
731 result = i;
732 }
733 }
735 if( result == -1 ) {
736 int utlbEntry = mmu_utlb_lookup_vpn( vpn );
737 if( utlbEntry < 0 ) {
738 return utlbEntry;
739 } else {
740 return mmu_itlb_update_from_utlb( utlbEntry );
741 }
742 }
744 switch( result ) {
745 case 0: mmu_lrui = (mmu_lrui & 0x07); break;
746 case 1: mmu_lrui = (mmu_lrui & 0x19) | 0x20; break;
747 case 2: mmu_lrui = (mmu_lrui & 0x3E) | 0x14; break;
748 case 3: mmu_lrui = (mmu_lrui | 0x0B); break;
749 }
751 return result;
752 }
754 #ifdef HAVE_FRAME_ADDRESS
755 sh4addr_t FASTCALL mmu_vma_to_phys_read( sh4vma_t addr, void *exc )
756 #else
757 sh4addr_t FASTCALL mmu_vma_to_phys_read( sh4vma_t addr )
758 #endif
759 {
760 uint32_t mmucr = MMIO_READ(MMU,MMUCR);
761 if( addr & 0x80000000 ) {
762 if( IS_SH4_PRIVMODE() ) {
763 if( addr >= 0xE0000000 ) {
764 return addr; /* P4 - passthrough */
765 } else if( addr < 0xC0000000 ) {
766 /* P1, P2 regions are pass-through (no translation) */
767 return VMA_TO_EXT_ADDR(addr);
768 }
769 } else {
770 if( addr >= 0xE0000000 && addr < 0xE4000000 &&
771 ((mmucr&MMUCR_SQMD) == 0) ) {
772 /* Conditional user-mode access to the store-queue (no translation) */
773 return addr;
774 }
775 MMU_READ_ADDR_ERROR();
776 RETURN_VIA(exc);
777 }
778 }
780 if( (mmucr & MMUCR_AT) == 0 ) {
781 return VMA_TO_EXT_ADDR(addr);
782 }
784 /* If we get this far, translation is required */
785 int entryNo;
786 if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
787 entryNo = mmu_utlb_sorted_find( addr );
788 } else {
789 entryNo = mmu_utlb_lookup_vpn( addr );
790 }
792 switch(entryNo) {
793 case -1:
794 MMU_TLB_READ_MISS_ERROR(addr);
795 RETURN_VIA(exc);
796 case -2:
797 MMU_TLB_MULTI_HIT_ERROR(addr);
798 RETURN_VIA(exc);
799 default:
800 if( (mmu_utlb[entryNo].flags & TLB_USERMODE) == 0 &&
801 !IS_SH4_PRIVMODE() ) {
802 /* protection violation */
803 MMU_TLB_READ_PROT_ERROR(addr);
804 RETURN_VIA(exc);
805 }
807 /* finally generate the target address */
808 return (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) |
809 (addr & (~mmu_utlb[entryNo].mask));
810 }
811 }
813 #ifdef HAVE_FRAME_ADDRESS
814 sh4addr_t FASTCALL mmu_vma_to_phys_write( sh4vma_t addr, void *exc )
815 #else
816 sh4addr_t FASTCALL mmu_vma_to_phys_write( sh4vma_t addr )
817 #endif
818 {
819 uint32_t mmucr = MMIO_READ(MMU,MMUCR);
820 if( addr & 0x80000000 ) {
821 if( IS_SH4_PRIVMODE() ) {
822 if( addr >= 0xE0000000 ) {
823 return addr; /* P4 - passthrough */
824 } else if( addr < 0xC0000000 ) {
825 /* P1, P2 regions are pass-through (no translation) */
826 return VMA_TO_EXT_ADDR(addr);
827 }
828 } else {
829 if( addr >= 0xE0000000 && addr < 0xE4000000 &&
830 ((mmucr&MMUCR_SQMD) == 0) ) {
831 /* Conditional user-mode access to the store-queue (no translation) */
832 return addr;
833 }
834 MMU_WRITE_ADDR_ERROR();
835 RETURN_VIA(exc);
836 }
837 }
839 if( (mmucr & MMUCR_AT) == 0 ) {
840 return VMA_TO_EXT_ADDR(addr);
841 }
843 /* If we get this far, translation is required */
844 int entryNo;
845 if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
846 entryNo = mmu_utlb_sorted_find( addr );
847 } else {
848 entryNo = mmu_utlb_lookup_vpn( addr );
849 }
851 switch(entryNo) {
852 case -1:
853 MMU_TLB_WRITE_MISS_ERROR(addr);
854 RETURN_VIA(exc);
855 case -2:
856 MMU_TLB_MULTI_HIT_ERROR(addr);
857 RETURN_VIA(exc);
858 default:
859 if( IS_SH4_PRIVMODE() ? ((mmu_utlb[entryNo].flags & TLB_WRITABLE) == 0)
860 : ((mmu_utlb[entryNo].flags & TLB_USERWRITABLE) != TLB_USERWRITABLE) ) {
861 /* protection violation */
862 MMU_TLB_WRITE_PROT_ERROR(addr);
863 RETURN_VIA(exc);
864 }
866 if( (mmu_utlb[entryNo].flags & TLB_DIRTY) == 0 ) {
867 MMU_TLB_INITIAL_WRITE_ERROR(addr);
868 RETURN_VIA(exc);
869 }
871 /* finally generate the target address */
872 sh4addr_t pma = (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) |
873 (addr & (~mmu_utlb[entryNo].mask));
874 return pma;
875 }
876 }
878 /**
879 * Update the icache for an untranslated address
880 */
881 static inline void mmu_update_icache_phys( sh4addr_t addr )
882 {
883 if( (addr & 0x1C000000) == 0x0C000000 ) {
884 /* Main ram */
885 sh4_icache.page_vma = addr & 0xFF000000;
886 sh4_icache.page_ppa = 0x0C000000;
887 sh4_icache.mask = 0xFF000000;
888 sh4_icache.page = sh4_main_ram;
889 } else if( (addr & 0x1FE00000) == 0 ) {
890 /* BIOS ROM */
891 sh4_icache.page_vma = addr & 0xFFE00000;
892 sh4_icache.page_ppa = 0;
893 sh4_icache.mask = 0xFFE00000;
894 sh4_icache.page = mem_get_region(0);
895 } else {
896 /* not supported */
897 sh4_icache.page_vma = -1;
898 }
899 }
901 /**
902 * Update the sh4_icache structure to describe the page(s) containing the
903 * given vma. If the address does not reference a RAM/ROM region, the icache
904 * will be invalidated instead.
905 * If AT is on, this method will raise TLB exceptions normally
906 * (hence this method should only be used immediately prior to execution of
907 * code), and otherwise will set the icache according to the matching TLB entry.
908 * If AT is off, this method will set the entire referenced RAM/ROM region in
909 * the icache.
910 * @return TRUE if the update completed (successfully or otherwise), FALSE
911 * if an exception was raised.
912 */
913 gboolean FASTCALL mmu_update_icache( sh4vma_t addr )
914 {
915 int entryNo;
916 if( IS_SH4_PRIVMODE() ) {
917 if( addr & 0x80000000 ) {
918 if( addr < 0xC0000000 ) {
919 /* P1, P2 and P4 regions are pass-through (no translation) */
920 mmu_update_icache_phys(addr);
921 return TRUE;
922 } else if( addr >= 0xE0000000 && addr < 0xFFFFFF00 ) {
923 MMU_READ_ADDR_ERROR();
924 return FALSE;
925 }
926 }
928 uint32_t mmucr = MMIO_READ(MMU,MMUCR);
929 if( (mmucr & MMUCR_AT) == 0 ) {
930 mmu_update_icache_phys(addr);
931 return TRUE;
932 }
934 if( (mmucr & MMUCR_SV) == 0 )
935 entryNo = mmu_itlb_lookup_vpn_asid( addr );
936 else
937 entryNo = mmu_itlb_lookup_vpn( addr );
938 } else {
939 if( addr & 0x80000000 ) {
940 MMU_READ_ADDR_ERROR();
941 return FALSE;
942 }
944 uint32_t mmucr = MMIO_READ(MMU,MMUCR);
945 if( (mmucr & MMUCR_AT) == 0 ) {
946 mmu_update_icache_phys(addr);
947 return TRUE;
948 }
950 entryNo = mmu_itlb_lookup_vpn_asid( addr );
952 if( entryNo != -1 && (mmu_itlb[entryNo].flags & TLB_USERMODE) == 0 ) {
953 MMU_TLB_READ_PROT_ERROR(addr);
954 return FALSE;
955 }
956 }
958 switch(entryNo) {
959 case -1:
960 MMU_TLB_READ_MISS_ERROR(addr);
961 return FALSE;
962 case -2:
963 MMU_TLB_MULTI_HIT_ERROR(addr);
964 return FALSE;
965 default:
966 sh4_icache.page_ppa = mmu_itlb[entryNo].ppn & mmu_itlb[entryNo].mask;
967 sh4_icache.page = mem_get_region( sh4_icache.page_ppa );
968 if( sh4_icache.page == NULL ) {
969 sh4_icache.page_vma = -1;
970 } else {
971 sh4_icache.page_vma = mmu_itlb[entryNo].vpn & mmu_itlb[entryNo].mask;
972 sh4_icache.mask = mmu_itlb[entryNo].mask;
973 }
974 return TRUE;
975 }
976 }
978 /**
979 * Translate address for disassembly purposes (ie performs an instruction
980 * lookup) - does not raise exceptions or modify any state, and ignores
981 * protection bits. Returns the translated address, or MMU_VMA_ERROR
982 * on translation failure.
983 */
984 sh4addr_t FASTCALL mmu_vma_to_phys_disasm( sh4vma_t vma )
985 {
986 if( vma & 0x80000000 ) {
987 if( vma < 0xC0000000 ) {
988 /* P1, P2 and P4 regions are pass-through (no translation) */
989 return VMA_TO_EXT_ADDR(vma);
990 } else if( vma >= 0xE0000000 && vma < 0xFFFFFF00 ) {
991 /* Not translatable */
992 return MMU_VMA_ERROR;
993 }
994 }
996 uint32_t mmucr = MMIO_READ(MMU,MMUCR);
997 if( (mmucr & MMUCR_AT) == 0 ) {
998 return VMA_TO_EXT_ADDR(vma);
999 }
1001 int entryNo = mmu_itlb_lookup_vpn( vma );
1002 if( entryNo == -2 ) {
1003 entryNo = mmu_itlb_lookup_vpn_asid( vma );
1004 }
1005 if( entryNo < 0 ) {
1006 return MMU_VMA_ERROR;
1007 } else {
1008 return (mmu_itlb[entryNo].ppn & mmu_itlb[entryNo].mask) |
1009 (vma & (~mmu_itlb[entryNo].mask));
1010 }
1011 }
1013 void FASTCALL sh4_flush_store_queue( sh4addr_t addr )
1014 {
1015 int queue = (addr&0x20)>>2;
1016 uint32_t hi = MMIO_READ( MMU, QACR0 + (queue>>1)) << 24;
1017 sh4ptr_t src = (sh4ptr_t)&sh4r.store_queue[queue];
1018 sh4addr_t target = (addr&0x03FFFFE0) | hi;
1019 ext_address_space[target>>12]->write_burst( target, src );
1020 // mem_copy_to_sh4( target, src, 32 );
1021 }
1023 gboolean FASTCALL sh4_flush_store_queue_mmu( sh4addr_t addr )
1024 {
1025 uint32_t mmucr = MMIO_READ(MMU,MMUCR);
1026 int queue = (addr&0x20)>>2;
1027 sh4ptr_t src = (sh4ptr_t)&sh4r.store_queue[queue];
1028 sh4addr_t target;
1029 /* Store queue operation */
1031 int entryNo;
1032 if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
1033 entryNo = mmu_utlb_lookup_vpn_asid( addr );
1034 } else {
1035 entryNo = mmu_utlb_lookup_vpn( addr );
1036 }
1037 switch(entryNo) {
1038 case -1:
1039 MMU_TLB_WRITE_MISS_ERROR(addr);
1040 return FALSE;
1041 case -2:
1042 MMU_TLB_MULTI_HIT_ERROR(addr);
1043 return FALSE;
1044 default:
1045 if( IS_SH4_PRIVMODE() ? ((mmu_utlb[entryNo].flags & TLB_WRITABLE) == 0)
1046 : ((mmu_utlb[entryNo].flags & TLB_USERWRITABLE) != TLB_USERWRITABLE) ) {
1047 /* protection violation */
1048 MMU_TLB_WRITE_PROT_ERROR(addr);
1049 return FALSE;
1050 }
1052 if( (mmu_utlb[entryNo].flags & TLB_DIRTY) == 0 ) {
1053 MMU_TLB_INITIAL_WRITE_ERROR(addr);
1054 return FALSE;
1055 }
1057 /* finally generate the target address */
1058 target = ((mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) |
1059 (addr & (~mmu_utlb[entryNo].mask))) & 0xFFFFFFE0;
1060 }
1062 ext_address_space[target>>12]->write_burst( target, src );
1063 // mem_copy_to_sh4( target, src, 32 );
1064 return TRUE;
1065 }
.