Search
lxdream.org :: lxdream/src/sh4/mmu.c :: diff
lxdream 0.9.1
released Jun 29
Download Now
filename src/sh4/mmu.c
changeset 939:6f2302afeb89
prev934:3acd3b3ee6d1
next943:9a277733eafa
author nkeynes
date Sat Jan 03 03:30:26 2009 +0000 (13 years ago)
branchlxdream-mem
permissions -rw-r--r--
last change MMU work-in-progress
* Move SDRAM out into separate sdram.c
* Move all page-table management into mmu.c
* Convert UTLB management to use the new page-tables
* Rip out all calls to mmu_vma_to_phys_* and replace with direct access
file annotate diff log raw
1.1 --- a/src/sh4/mmu.c Fri Dec 26 14:25:23 2008 +0000
1.2 +++ b/src/sh4/mmu.c Sat Jan 03 03:30:26 2009 +0000
1.3 @@ -1,7 +1,8 @@
1.4 /**
1.5 * $Id$
1.6 *
1.7 - * MMU implementation
1.8 + * SH4 MMU implementation based on address space page maps. This module
1.9 + * is responsible for all address decoding functions.
1.10 *
1.11 * Copyright (c) 2005 Nathan Keynes.
1.12 *
1.13 @@ -26,169 +27,129 @@
1.14 #include "mem.h"
1.15 #include "mmu.h"
1.16
1.17 -#ifdef HAVE_FRAME_ADDRESS
1.18 -#define RETURN_VIA(exc) do{ *(((void **)__builtin_frame_address(0))+1) = exc; return; } while(0)
1.19 -#else
1.20 -#define RETURN_VIA(exc) return MMU_VMA_ERROR
1.21 -#endif
1.22 -
1.23 -/* The MMU (practically unique in the system) is allowed to raise exceptions
1.24 - * directly, with a return code indicating that one was raised and the caller
1.25 - * had better behave appropriately.
1.26 - */
1.27 #define RAISE_TLB_ERROR(code, vpn) \
1.28 MMIO_WRITE(MMU, TEA, vpn); \
1.29 MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
1.30 sh4_raise_tlb_exception(code);
1.31 -
1.32 #define RAISE_MEM_ERROR(code, vpn) \
1.33 MMIO_WRITE(MMU, TEA, vpn); \
1.34 MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
1.35 sh4_raise_exception(code);
1.36 -
1.37 -#define RAISE_OTHER_ERROR(code) \
1.38 - sh4_raise_exception(code);
1.39 -/**
1.40 - * Abort with a non-MMU address error. Caused by user-mode code attempting
1.41 - * to access privileged regions, or alignment faults.
1.42 - */
1.43 -#define MMU_READ_ADDR_ERROR() RAISE_OTHER_ERROR(EXC_DATA_ADDR_READ)
1.44 -#define MMU_WRITE_ADDR_ERROR() RAISE_OTHER_ERROR(EXC_DATA_ADDR_WRITE)
1.45 -
1.46 -#define MMU_TLB_READ_MISS_ERROR(vpn) RAISE_TLB_ERROR(EXC_TLB_MISS_READ, vpn)
1.47 -#define MMU_TLB_WRITE_MISS_ERROR(vpn) RAISE_TLB_ERROR(EXC_TLB_MISS_WRITE, vpn)
1.48 -#define MMU_TLB_INITIAL_WRITE_ERROR(vpn) RAISE_MEM_ERROR(EXC_INIT_PAGE_WRITE, vpn)
1.49 -#define MMU_TLB_READ_PROT_ERROR(vpn) RAISE_MEM_ERROR(EXC_TLB_PROT_READ, vpn)
1.50 -#define MMU_TLB_WRITE_PROT_ERROR(vpn) RAISE_MEM_ERROR(EXC_TLB_PROT_WRITE, vpn)
1.51 -#define MMU_TLB_MULTI_HIT_ERROR(vpn) sh4_raise_reset(EXC_TLB_MULTI_HIT); \
1.52 +#define RAISE_TLB_MULTIHIT_ERROR(vpn) \
1.53 + sh4_raise_reset(EXC_TLB_MULTI_HIT); \
1.54 MMIO_WRITE(MMU, TEA, vpn); \
1.55 MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00)));
1.56
1.57 +/* An entry is a 1K entry if it's one of the mmu_utlb_1k_pages entries */
1.58 +#define IS_1K_PAGE_ENTRY(ent) ( ((uintptr_t)(((struct utlb_1k_entry *)ent) - &mmu_utlb_1k_pages[0])) < UTLB_ENTRY_COUNT )
1.59
1.60 -#define OCRAM_START (0x1C000000>>LXDREAM_PAGE_BITS)
1.61 -#define OCRAM_END (0x20000000>>LXDREAM_PAGE_BITS)
1.62 +/* Primary address space (used directly by SH4 cores) */
1.63 +mem_region_fn_t *sh4_address_space;
1.64 +mem_region_fn_t *sh4_user_address_space;
1.65
1.66 +/* MMU-mapped storequeue targets. Only used with TLB on */
1.67 +mem_region_fn_t *storequeue_address_space;
1.68 +mem_region_fn_t *storequeue_user_address_space;
1.69
1.70 +/* Accessed from the UTLB accessor methods */
1.71 +uint32_t mmu_urc;
1.72 +uint32_t mmu_urb;
1.73 +
1.74 +/* Module globals */
1.75 static struct itlb_entry mmu_itlb[ITLB_ENTRY_COUNT];
1.76 static struct utlb_entry mmu_utlb[UTLB_ENTRY_COUNT];
1.77 -static uint32_t mmu_urc;
1.78 -static uint32_t mmu_urb;
1.79 +static struct utlb_page_entry mmu_utlb_pages[UTLB_ENTRY_COUNT];
1.80 static uint32_t mmu_lrui;
1.81 static uint32_t mmu_asid; // current asid
1.82
1.83 -static struct utlb_sort_entry mmu_utlb_sorted[UTLB_ENTRY_COUNT];
1.84 -static uint32_t mmu_utlb_entries; // Number of entries in mmu_utlb_sorted.
1.85 +/* Structures for 1K page handling */
1.86 +static struct utlb_1k_entry mmu_utlb_1k_pages[UTLB_ENTRY_COUNT];
1.87 +static int mmu_utlb_1k_free_list[UTLB_ENTRY_COUNT];
1.88 +static int mmu_utlb_1k_free_index;
1.89
1.90 -static sh4ptr_t cache = NULL;
1.91
1.92 +/* Function prototypes */
1.93 static void mmu_invalidate_tlb();
1.94 -static void mmu_utlb_sorted_reset();
1.95 -static void mmu_utlb_sorted_reload();
1.96 +static void mmu_utlb_register_all();
1.97 +static void mmu_utlb_remove_entry(int);
1.98 +static void mmu_utlb_insert_entry(int);
1.99 +static void mmu_register_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn );
1.100 +static void mmu_register_user_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn );
1.101 +static void mmu_set_tlb_enabled( int tlb_on );
1.102 +static void mmu_set_tlb_asid( uint32_t asid );
1.103 +static void mmu_set_storequeue_protected( int protected );
1.104 +static gboolean mmu_utlb_map_pages( mem_region_fn_t priv_page, mem_region_fn_t user_page, sh4addr_t start_addr, int npages );
1.105 +static gboolean mmu_utlb_unmap_pages( gboolean unmap_user, sh4addr_t start_addr, int npages );
1.106 +static gboolean mmu_ext_page_remapped( sh4addr_t page, mem_region_fn_t fn, void *user_data );
1.107 +static void mmu_utlb_1k_init();
1.108 +static struct utlb_1k_entry *mmu_utlb_1k_alloc();
1.109 +static void mmu_utlb_1k_free( struct utlb_1k_entry *entry );
1.110
1.111 -static uint32_t get_mask_for_flags( uint32_t flags )
1.112 -{
1.113 - switch( flags & TLB_SIZE_MASK ) {
1.114 - case TLB_SIZE_1K: return MASK_1K;
1.115 - case TLB_SIZE_4K: return MASK_4K;
1.116 - case TLB_SIZE_64K: return MASK_64K;
1.117 - case TLB_SIZE_1M: return MASK_1M;
1.118 - default: return 0; /* Unreachable */
1.119 - }
1.120 -}
1.121 +static int32_t FASTCALL tlb_protected_read( sh4addr_t addr, void *exc );
1.122 +static void FASTCALL tlb_protected_write( sh4addr_t addr, uint32_t val, void *exc );
1.123 +static void FASTCALL tlb_initial_write( sh4addr_t addr, uint32_t val, void *exc );
1.124 +static uint32_t get_tlb_size_mask( uint32_t flags );
1.125 +static uint32_t get_tlb_size_pages( uint32_t flags );
1.126
1.127 -MMIO_REGION_READ_FN( MMU, reg )
1.128 -{
1.129 - reg &= 0xFFF;
1.130 - switch( reg ) {
1.131 - case MMUCR:
1.132 - return MMIO_READ( MMU, MMUCR) | (mmu_urc<<10) | (mmu_urb<<18) | (mmu_lrui<<26);
1.133 - default:
1.134 - return MMIO_READ( MMU, reg );
1.135 - }
1.136 -}
1.137
1.138 -MMIO_REGION_WRITE_FN( MMU, reg, val )
1.139 -{
1.140 - uint32_t tmp;
1.141 - reg &= 0xFFF;
1.142 - switch(reg) {
1.143 - case SH4VER:
1.144 - return;
1.145 - case PTEH:
1.146 - val &= 0xFFFFFCFF;
1.147 - if( (val & 0xFF) != mmu_asid ) {
1.148 - mmu_asid = val&0xFF;
1.149 - sh4_icache.page_vma = -1; // invalidate icache as asid has changed
1.150 - }
1.151 - break;
1.152 - case PTEL:
1.153 - val &= 0x1FFFFDFF;
1.154 - break;
1.155 - case PTEA:
1.156 - val &= 0x0000000F;
1.157 - break;
1.158 - case TRA:
1.159 - val &= 0x000003FC;
1.160 - break;
1.161 - case EXPEVT:
1.162 - case INTEVT:
1.163 - val &= 0x00000FFF;
1.164 - break;
1.165 - case MMUCR:
1.166 - if( val & MMUCR_TI ) {
1.167 - mmu_invalidate_tlb();
1.168 - }
1.169 - mmu_urc = (val >> 10) & 0x3F;
1.170 - mmu_urb = (val >> 18) & 0x3F;
1.171 - mmu_lrui = (val >> 26) & 0x3F;
1.172 - val &= 0x00000301;
1.173 - tmp = MMIO_READ( MMU, MMUCR );
1.174 - if( (val ^ tmp) & (MMUCR_AT|MMUCR_SV) ) {
1.175 - // AT flag has changed state - flush the xlt cache as all bets
1.176 - // are off now. We also need to force an immediate exit from the
1.177 - // current block
1.178 - MMIO_WRITE( MMU, MMUCR, val );
1.179 - sh4_flush_icache();
1.180 - }
1.181 - break;
1.182 - case CCR:
1.183 - CCN_set_cache_control( val );
1.184 - val &= 0x81A7;
1.185 - break;
1.186 - case MMUUNK1:
1.187 - /* Note that if the high bit is set, this appears to reset the machine.
1.188 - * Not emulating this behaviour yet until we know why...
1.189 - */
1.190 - val &= 0x00010007;
1.191 - break;
1.192 - case QACR0:
1.193 - case QACR1:
1.194 - val &= 0x0000001C;
1.195 - break;
1.196 - case PMCR1:
1.197 - PMM_write_control(0, val);
1.198 - val &= 0x0000C13F;
1.199 - break;
1.200 - case PMCR2:
1.201 - PMM_write_control(1, val);
1.202 - val &= 0x0000C13F;
1.203 - break;
1.204 - default:
1.205 - break;
1.206 - }
1.207 - MMIO_WRITE( MMU, reg, val );
1.208 -}
1.209 +/*********************** Module public functions ****************************/
1.210
1.211 -
1.212 +/**
1.213 + * Allocate memory for the address space maps, and initialize them according
1.214 + * to the default (reset) values. (TLB is disabled by default)
1.215 + */
1.216 +
1.217 void MMU_init()
1.218 {
1.219 + sh4_address_space = mem_alloc_pages( sizeof(mem_region_fn_t) * 256 );
1.220 + sh4_user_address_space = mem_alloc_pages( sizeof(mem_region_fn_t) * 256 );
1.221 + storequeue_address_space = mem_alloc_pages( sizeof(mem_region_fn_t) * 4 );
1.222 + storequeue_user_address_space = mem_alloc_pages( sizeof(mem_region_fn_t) * 4 );
1.223 +
1.224 + mmu_set_tlb_enabled(0);
1.225 + mmu_register_user_mem_region( 0x80000000, 0x00000000, &mem_region_address_error );
1.226 + mmu_register_user_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue);
1.227 +
1.228 + /* Setup P4 tlb/cache access regions */
1.229 + mmu_register_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );
1.230 + mmu_register_mem_region( 0xE4000000, 0xF0000000, &mem_region_unmapped );
1.231 + mmu_register_mem_region( 0xF0000000, 0xF1000000, &p4_region_icache_addr );
1.232 + mmu_register_mem_region( 0xF1000000, 0xF2000000, &p4_region_icache_data );
1.233 + mmu_register_mem_region( 0xF2000000, 0xF3000000, &p4_region_itlb_addr );
1.234 + mmu_register_mem_region( 0xF3000000, 0xF4000000, &p4_region_itlb_data );
1.235 + mmu_register_mem_region( 0xF4000000, 0xF5000000, &p4_region_ocache_addr );
1.236 + mmu_register_mem_region( 0xF5000000, 0xF6000000, &p4_region_ocache_data );
1.237 + mmu_register_mem_region( 0xF6000000, 0xF7000000, &p4_region_utlb_addr );
1.238 + mmu_register_mem_region( 0xF7000000, 0xF8000000, &p4_region_utlb_data );
1.239 + mmu_register_mem_region( 0xF8000000, 0x00000000, &mem_region_unmapped );
1.240 +
1.241 + /* Setup P4 control region */
1.242 + mmu_register_mem_region( 0xFF000000, 0xFF001000, &mmio_region_MMU.fn );
1.243 + mmu_register_mem_region( 0xFF100000, 0xFF101000, &mmio_region_PMM.fn );
1.244 + mmu_register_mem_region( 0xFF200000, 0xFF201000, &mmio_region_UBC.fn );
1.245 + mmu_register_mem_region( 0xFF800000, 0xFF801000, &mmio_region_BSC.fn );
1.246 + mmu_register_mem_region( 0xFF900000, 0xFFA00000, &mem_region_unmapped ); // SDMR2 + SDMR3
1.247 + mmu_register_mem_region( 0xFFA00000, 0xFFA01000, &mmio_region_DMAC.fn );
1.248 + mmu_register_mem_region( 0xFFC00000, 0xFFC01000, &mmio_region_CPG.fn );
1.249 + mmu_register_mem_region( 0xFFC80000, 0xFFC81000, &mmio_region_RTC.fn );
1.250 + mmu_register_mem_region( 0xFFD00000, 0xFFD01000, &mmio_region_INTC.fn );
1.251 + mmu_register_mem_region( 0xFFD80000, 0xFFD81000, &mmio_region_TMU.fn );
1.252 + mmu_register_mem_region( 0xFFE00000, 0xFFE01000, &mmio_region_SCI.fn );
1.253 + mmu_register_mem_region( 0xFFE80000, 0xFFE81000, &mmio_region_SCIF.fn );
1.254 + mmu_register_mem_region( 0xFFF00000, 0xFFF01000, &mem_region_unmapped ); // H-UDI
1.255 +
1.256 + register_mem_page_remapped_hook( mmu_ext_page_remapped, NULL );
1.257 + mmu_utlb_1k_init();
1.258 +
1.259 + /* Ensure the code regions are executable */
1.260 + mem_unprotect( mmu_utlb_pages, sizeof(mmu_utlb_pages) );
1.261 + mem_unprotect( mmu_utlb_1k_pages, sizeof(mmu_utlb_1k_pages) );
1.262 }
1.263
1.264 void MMU_reset()
1.265 {
1.266 mmio_region_MMU_write( CCR, 0 );
1.267 mmio_region_MMU_write( MMUCR, 0 );
1.268 - mmu_utlb_sorted_reload();
1.269 }
1.270
1.271 void MMU_save_state( FILE *f )
1.272 @@ -221,133 +182,20 @@
1.273 if( fread( &mmu_asid, sizeof(mmu_asid), 1, f ) != 1 ) {
1.274 return 1;
1.275 }
1.276 - mmu_utlb_sorted_reload();
1.277 +
1.278 + uint32_t mmucr = MMIO_READ(MMU,MMUCR);
1.279 + mmu_set_tlb_enabled(mmucr&MMUCR_AT);
1.280 + mmu_set_storequeue_protected(mmucr&MMUCR_SQMD);
1.281 return 0;
1.282 }
1.283
1.284 -
1.285 -/******************* Sorted TLB data structure ****************/
1.286 -/*
1.287 - * mmu_utlb_sorted maintains a list of all active (valid) entries,
1.288 - * sorted by masked VPN and then ASID. Multi-hit entries are resolved
1.289 - * ahead of time, and have -1 recorded as the corresponding PPN.
1.290 - *
1.291 - * FIXME: Multi-hit detection doesn't pick up cases where two pages
1.292 - * overlap due to different sizes (and don't share the same base
1.293 - * address).
1.294 - */
1.295 -static void mmu_utlb_sorted_reset()
1.296 -{
1.297 - mmu_utlb_entries = 0;
1.298 -}
1.299 -
1.300 -/**
1.301 - * Find an entry in the sorted table (VPN+ASID check).
1.302 - */
1.303 -static inline int mmu_utlb_sorted_find( sh4addr_t vma )
1.304 -{
1.305 - int low = 0;
1.306 - int high = mmu_utlb_entries;
1.307 - uint32_t lookup = (vma & 0xFFFFFC00) + mmu_asid;
1.308 -
1.309 - mmu_urc++;
1.310 - if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
1.311 - mmu_urc = 0;
1.312 - }
1.313 -
1.314 - while( low != high ) {
1.315 - int posn = (high+low)>>1;
1.316 - int masked = lookup & mmu_utlb_sorted[posn].mask;
1.317 - if( mmu_utlb_sorted[posn].key < masked ) {
1.318 - low = posn+1;
1.319 - } else if( mmu_utlb_sorted[posn].key > masked ) {
1.320 - high = posn;
1.321 - } else {
1.322 - return mmu_utlb_sorted[posn].entryNo;
1.323 - }
1.324 - }
1.325 - return -1;
1.326 -
1.327 -}
1.328 -
1.329 -static void mmu_utlb_insert_entry( int entry )
1.330 -{
1.331 - int low = 0;
1.332 - int high = mmu_utlb_entries;
1.333 - uint32_t key = (mmu_utlb[entry].vpn & mmu_utlb[entry].mask) + mmu_utlb[entry].asid;
1.334 -
1.335 - assert( mmu_utlb_entries < UTLB_ENTRY_COUNT );
1.336 - /* Find the insertion point */
1.337 - while( low != high ) {
1.338 - int posn = (high+low)>>1;
1.339 - if( mmu_utlb_sorted[posn].key < key ) {
1.340 - low = posn+1;
1.341 - } else if( mmu_utlb_sorted[posn].key > key ) {
1.342 - high = posn;
1.343 - } else {
1.344 - /* Exact match - multi-hit */
1.345 - mmu_utlb_sorted[posn].entryNo = -2;
1.346 - return;
1.347 - }
1.348 - } /* 0 2 4 6 */
1.349 - memmove( &mmu_utlb_sorted[low+1], &mmu_utlb_sorted[low],
1.350 - (mmu_utlb_entries - low) * sizeof(struct utlb_sort_entry) );
1.351 - mmu_utlb_sorted[low].key = key;
1.352 - mmu_utlb_sorted[low].mask = mmu_utlb[entry].mask | 0x000000FF;
1.353 - mmu_utlb_sorted[low].entryNo = entry;
1.354 - mmu_utlb_entries++;
1.355 -}
1.356 -
1.357 -static void mmu_utlb_remove_entry( int entry )
1.358 -{
1.359 - int low = 0;
1.360 - int high = mmu_utlb_entries;
1.361 - uint32_t key = (mmu_utlb[entry].vpn & mmu_utlb[entry].mask) + mmu_utlb[entry].asid;
1.362 - while( low != high ) {
1.363 - int posn = (high+low)>>1;
1.364 - if( mmu_utlb_sorted[posn].key < key ) {
1.365 - low = posn+1;
1.366 - } else if( mmu_utlb_sorted[posn].key > key ) {
1.367 - high = posn;
1.368 - } else {
1.369 - if( mmu_utlb_sorted[posn].entryNo == -2 ) {
1.370 - /* Multiple-entry recorded - rebuild the whole table minus entry */
1.371 - int i;
1.372 - mmu_utlb_entries = 0;
1.373 - for( i=0; i< UTLB_ENTRY_COUNT; i++ ) {
1.374 - if( i != entry && (mmu_utlb[i].flags & TLB_VALID) ) {
1.375 - mmu_utlb_insert_entry(i);
1.376 - }
1.377 - }
1.378 - } else {
1.379 - mmu_utlb_entries--;
1.380 - memmove( &mmu_utlb_sorted[posn], &mmu_utlb_sorted[posn+1],
1.381 - (mmu_utlb_entries - posn)*sizeof(struct utlb_sort_entry) );
1.382 - }
1.383 - return;
1.384 - }
1.385 - }
1.386 - assert( 0 && "UTLB key not found!" );
1.387 -}
1.388 -
1.389 -static void mmu_utlb_sorted_reload()
1.390 -{
1.391 - int i;
1.392 - mmu_utlb_entries = 0;
1.393 - for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
1.394 - if( mmu_utlb[i].flags & TLB_VALID )
1.395 - mmu_utlb_insert_entry( i );
1.396 - }
1.397 -}
1.398 -
1.399 -/* TLB maintanence */
1.400 -
1.401 /**
1.402 * LDTLB instruction implementation. Copies PTEH, PTEL and PTEA into the UTLB
1.403 * entry identified by MMUCR.URC. Does not modify MMUCR or the ITLB.
1.404 */
1.405 void MMU_ldtlb()
1.406 {
1.407 + mmu_urc %= mmu_urb;
1.408 if( mmu_utlb[mmu_urc].flags & TLB_VALID )
1.409 mmu_utlb_remove_entry( mmu_urc );
1.410 mmu_utlb[mmu_urc].vpn = MMIO_READ(MMU, PTEH) & 0xFFFFFC00;
1.411 @@ -355,176 +203,539 @@
1.412 mmu_utlb[mmu_urc].ppn = MMIO_READ(MMU, PTEL) & 0x1FFFFC00;
1.413 mmu_utlb[mmu_urc].flags = MMIO_READ(MMU, PTEL) & 0x00001FF;
1.414 mmu_utlb[mmu_urc].pcmcia = MMIO_READ(MMU, PTEA);
1.415 - mmu_utlb[mmu_urc].mask = get_mask_for_flags(mmu_utlb[mmu_urc].flags);
1.416 - if( mmu_utlb[mmu_urc].ppn >= 0x1C000000 )
1.417 - mmu_utlb[mmu_urc].ppn |= 0xE0000000;
1.418 + mmu_utlb[mmu_urc].mask = get_tlb_size_mask(mmu_utlb[mmu_urc].flags);
1.419 if( mmu_utlb[mmu_urc].flags & TLB_VALID )
1.420 mmu_utlb_insert_entry( mmu_urc );
1.421 }
1.422
1.423 +
1.424 +MMIO_REGION_READ_FN( MMU, reg )
1.425 +{
1.426 + reg &= 0xFFF;
1.427 + switch( reg ) {
1.428 + case MMUCR:
1.429 + mmu_urc %= mmu_urb;
1.430 + return MMIO_READ( MMU, MMUCR) | (mmu_urc<<10) | ((mmu_urb&0x3F)<<18) | (mmu_lrui<<26);
1.431 + default:
1.432 + return MMIO_READ( MMU, reg );
1.433 + }
1.434 +}
1.435 +
1.436 +MMIO_REGION_WRITE_FN( MMU, reg, val )
1.437 +{
1.438 + uint32_t tmp;
1.439 + reg &= 0xFFF;
1.440 + switch(reg) {
1.441 + case SH4VER:
1.442 + return;
1.443 + case PTEH:
1.444 + val &= 0xFFFFFCFF;
1.445 + if( (val & 0xFF) != mmu_asid ) {
1.446 + mmu_set_tlb_asid( val&0xFF );
1.447 + sh4_icache.page_vma = -1; // invalidate icache as asid has changed
1.448 + }
1.449 + break;
1.450 + case PTEL:
1.451 + val &= 0x1FFFFDFF;
1.452 + break;
1.453 + case PTEA:
1.454 + val &= 0x0000000F;
1.455 + break;
1.456 + case TRA:
1.457 + val &= 0x000003FC;
1.458 + break;
1.459 + case EXPEVT:
1.460 + case INTEVT:
1.461 + val &= 0x00000FFF;
1.462 + break;
1.463 + case MMUCR:
1.464 + if( val & MMUCR_TI ) {
1.465 + mmu_invalidate_tlb();
1.466 + }
1.467 + mmu_urc = (val >> 10) & 0x3F;
1.468 + mmu_urb = (val >> 18) & 0x3F;
1.469 + if( mmu_urb == 0 ) {
1.470 + mmu_urb = 0x40;
1.471 + }
1.472 + mmu_lrui = (val >> 26) & 0x3F;
1.473 + val &= 0x00000301;
1.474 + tmp = MMIO_READ( MMU, MMUCR );
1.475 + if( (val ^ tmp) & (MMUCR_SQMD) ) {
1.476 + mmu_set_storequeue_protected( val & MMUCR_SQMD );
1.477 + }
1.478 + if( (val ^ tmp) & (MMUCR_AT) ) {
1.479 + // AT flag has changed state - flush the xlt cache as all bets
1.480 + // are off now. We also need to force an immediate exit from the
1.481 + // current block
1.482 + mmu_set_tlb_enabled( val & MMUCR_AT );
1.483 + MMIO_WRITE( MMU, MMUCR, val );
1.484 + sh4_flush_icache();
1.485 + }
1.486 + break;
1.487 + case CCR:
1.488 + CCN_set_cache_control( val );
1.489 + val &= 0x81A7;
1.490 + break;
1.491 + case MMUUNK1:
1.492 + /* Note that if the high bit is set, this appears to reset the machine.
1.493 + * Not emulating this behaviour yet until we know why...
1.494 + */
1.495 + val &= 0x00010007;
1.496 + break;
1.497 + case QACR0:
1.498 + case QACR1:
1.499 + val &= 0x0000001C;
1.500 + break;
1.501 + case PMCR1:
1.502 + PMM_write_control(0, val);
1.503 + val &= 0x0000C13F;
1.504 + break;
1.505 + case PMCR2:
1.506 + PMM_write_control(1, val);
1.507 + val &= 0x0000C13F;
1.508 + break;
1.509 + default:
1.510 + break;
1.511 + }
1.512 + MMIO_WRITE( MMU, reg, val );
1.513 +}
1.514 +
1.515 +/********************** 1K Page handling ***********************/
1.516 +/* Since we use 4K pages as our native page size, 1K pages need a bit of extra
1.517 + * effort to manage - we justify this on the basis that most programs won't
1.518 + * actually use 1K pages, so we may as well optimize for the common case.
1.519 + *
1.520 + * Implementation uses an intermediate page entry (the utlb_1k_entry) that
1.521 + * redirects requests to the 'real' page entry. These are allocated on an
1.522 + * as-needed basis, and returned to the pool when all subpages are empty.
1.523 + */
1.524 +static void mmu_utlb_1k_init()
1.525 +{
1.526 + int i;
1.527 + for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
1.528 + mmu_utlb_1k_free_list[i] = i;
1.529 + mmu_utlb_1k_init_vtable( &mmu_utlb_1k_pages[i] );
1.530 + }
1.531 + mmu_utlb_1k_free_index = 0;
1.532 +}
1.533 +
1.534 +static struct utlb_1k_entry *mmu_utlb_1k_alloc()
1.535 +{
1.536 + assert( mmu_utlb_1k_free_index < UTLB_ENTRY_COUNT );
1.537 + struct utlb_1k_entry *entry = &mmu_utlb_1k_pages[mmu_utlb_1k_free_index++];
1.538 + return entry;
1.539 +}
1.540 +
1.541 +static void mmu_utlb_1k_free( struct utlb_1k_entry *ent )
1.542 +{
1.543 + unsigned int entryNo = ent - &mmu_utlb_1k_pages[0];
1.544 + assert( entryNo < UTLB_ENTRY_COUNT );
1.545 + assert( mmu_utlb_1k_free_index > 0 );
1.546 + mmu_utlb_1k_free_list[--mmu_utlb_1k_free_index] = entryNo;
1.547 +}
1.548 +
1.549 +
1.550 +/********************** Address space maintenance *************************/
1.551 +
1.552 +/**
1.553 + * MMU accessor functions just increment URC - fixup here if necessary
1.554 + */
1.555 +static inline void mmu_urc_fixup()
1.556 +{
1.557 + mmu_urc %= mmu_urb;
1.558 +}
1.559 +
1.560 +static void mmu_register_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn )
1.561 +{
1.562 + int count = (end - start) >> 12;
1.563 + mem_region_fn_t *ptr = &sh4_address_space[start>>12];
1.564 + while( count-- > 0 ) {
1.565 + *ptr++ = fn;
1.566 + }
1.567 +}
1.568 +static void mmu_register_user_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn )
1.569 +{
1.570 + int count = (end - start) >> 12;
1.571 + mem_region_fn_t *ptr = &sh4_user_address_space[start>>12];
1.572 + while( count-- > 0 ) {
1.573 + *ptr++ = fn;
1.574 + }
1.575 +}
1.576 +
1.577 +static gboolean mmu_ext_page_remapped( sh4addr_t page, mem_region_fn_t fn, void *user_data )
1.578 +{
1.579 + int i;
1.580 + if( (MMIO_READ(MMU,MMUCR)) & MMUCR_AT ) {
1.581 + /* TLB on */
1.582 + sh4_address_space[(page|0x80000000)>>12] = fn; /* Direct map to P1 and P2 */
1.583 + sh4_address_space[(page|0xA0000000)>>12] = fn;
1.584 + /* Scan UTLB and update any direct-referencing entries */
1.585 + } else {
1.586 + /* Direct map to U0, P0, P1, P2, P3 */
1.587 + for( i=0; i<= 0xC0000000; i+= 0x20000000 ) {
1.588 + sh4_address_space[(page|i)>>12] = fn;
1.589 + }
1.590 + for( i=0; i < 0x80000000; i+= 0x20000000 ) {
1.591 + sh4_user_address_space[(page|i)>>12] = fn;
1.592 + }
1.593 + }
1.594 +}
1.595 +
1.596 +static void mmu_set_tlb_enabled( int tlb_on )
1.597 +{
1.598 + mem_region_fn_t *ptr, *uptr;
1.599 + int i;
1.600 +
1.601 + if( tlb_on ) {
1.602 + mmu_register_mem_region(0x00000000, 0x80000000, &mem_region_tlb_miss );
1.603 + mmu_register_mem_region(0xC0000000, 0xE0000000, &mem_region_tlb_miss );
1.604 + mmu_register_user_mem_region(0x00000000, 0x80000000, &mem_region_tlb_miss );
1.605 + for( i=0, ptr = storequeue_address_space, uptr = storequeue_user_address_space;
1.606 + i<0x04000000; i+= LXDREAM_PAGE_SIZE ) {
1.607 + *ptr++ = &mem_region_tlb_miss;
1.608 + *uptr++ = &mem_region_tlb_miss;
1.609 + }
1.610 + mmu_utlb_register_all();
1.611 + } else {
1.612 + for( i=0, ptr = sh4_address_space; i<7; i++, ptr += LXDREAM_PAGE_TABLE_ENTRIES ) {
1.613 + memcpy( ptr, ext_address_space, sizeof(mem_region_fn_t) * LXDREAM_PAGE_TABLE_ENTRIES );
1.614 + }
1.615 + for( i=0, ptr = sh4_user_address_space; i<4; i++, ptr += LXDREAM_PAGE_TABLE_ENTRIES ) {
1.616 + memcpy( ptr, ext_address_space, sizeof(mem_region_fn_t) * LXDREAM_PAGE_TABLE_ENTRIES );
1.617 + }
1.618 + }
1.619 +}
1.620 +
1.621 +static void mmu_set_storequeue_protected( int protected )
1.622 +{
1.623 + if( protected ) {
1.624 + mmu_register_user_mem_region( 0xE0000000, 0xE4000000, &mem_region_address_error );
1.625 + } else {
1.626 + mmu_register_user_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );
1.627 + }
1.628 +}
1.629 +
1.630 +static void mmu_set_tlb_asid( uint32_t asid )
1.631 +{
1.632 + /* Scan for pages that need to be remapped */
1.633 + int i;
1.634 + if( IS_SV_ENABLED() ) {
1.635 + // FIXME: Priv pages don't change - only user pages are mapped in/out
1.636 + for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
1.637 + if( mmu_utlb[i].flags & TLB_VALID ) {
1.638 + if( (mmu_utlb[i].flags & TLB_SHARE) == 0 ) {
1.639 + if( mmu_utlb[i].asid == mmu_asid ) { // Matches old ASID - unmap out
1.640 + mmu_utlb_unmap_pages( TRUE, mmu_utlb[i].vpn&mmu_utlb[i].mask,
1.641 + get_tlb_size_pages(mmu_utlb[i].flags) );
1.642 + } else if( mmu_utlb[i].asid == asid ) { // Matches new ASID - map in
1.643 + mmu_utlb_map_pages( NULL, mmu_utlb_pages[i].user_fn,
1.644 + mmu_utlb[i].vpn&mmu_utlb[i].mask,
1.645 + get_tlb_size_pages(mmu_utlb[i].flags) );
1.646 + }
1.647 + }
1.648 + }
1.649 + }
1.650 + } else {
1.651 + // Remap both Priv+user pages
1.652 + for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
1.653 + if( mmu_utlb[i].flags & TLB_VALID ) {
1.654 + if( (mmu_utlb[i].flags & TLB_SHARE) == 0 ) {
1.655 + if( mmu_utlb[i].asid == mmu_asid ) { // Matches old ASID - unmap out
1.656 + mmu_utlb_unmap_pages( TRUE, mmu_utlb[i].vpn&mmu_utlb[i].mask,
1.657 + get_tlb_size_pages(mmu_utlb[i].flags) );
1.658 + } else if( mmu_utlb[i].asid == asid ) { // Matches new ASID - map in
1.659 + mmu_utlb_map_pages( &mmu_utlb_pages[i].fn, mmu_utlb_pages[i].user_fn,
1.660 + mmu_utlb[i].vpn&mmu_utlb[i].mask,
1.661 + get_tlb_size_pages(mmu_utlb[i].flags) );
1.662 + }
1.663 + }
1.664 + }
1.665 + }
1.666 + }
1.667 +
1.668 + mmu_asid = asid;
1.669 +}
1.670 +
1.671 +static uint32_t get_tlb_size_mask( uint32_t flags )
1.672 +{
1.673 + switch( flags & TLB_SIZE_MASK ) {
1.674 + case TLB_SIZE_1K: return MASK_1K;
1.675 + case TLB_SIZE_4K: return MASK_4K;
1.676 + case TLB_SIZE_64K: return MASK_64K;
1.677 + case TLB_SIZE_1M: return MASK_1M;
1.678 + default: return 0; /* Unreachable */
1.679 + }
1.680 +}
1.681 +static uint32_t get_tlb_size_pages( uint32_t flags )
1.682 +{
1.683 + switch( flags & TLB_SIZE_MASK ) {
1.684 + case TLB_SIZE_1K: return 0;
1.685 + case TLB_SIZE_4K: return 1;
1.686 + case TLB_SIZE_64K: return 16;
1.687 + case TLB_SIZE_1M: return 256;
1.688 + default: return 0; /* Unreachable */
1.689 + }
1.690 +}
1.691 +
1.692 +/**
1.693 + * Add a new TLB entry mapping to the address space table. If any of the pages
1.694 + * are already mapped, they are mapped to the TLB multi-hit page instead.
1.695 + * @return FALSE if a TLB multihit situation was detected, otherwise TRUE.
1.696 + */
1.697 +static gboolean mmu_utlb_map_pages( mem_region_fn_t priv_page, mem_region_fn_t user_page, sh4addr_t start_addr, int npages )
1.698 +{
1.699 + mem_region_fn_t *ptr = &sh4_address_space[start_addr >> 12];
1.700 + mem_region_fn_t *uptr = &sh4_user_address_space[start_addr >> 12];
1.701 + gboolean mapping_ok = TRUE;
1.702 + int i;
1.703 +
1.704 + if( (start_addr & 0xFC000000) == 0xE0000000 ) {
1.705 + /* Storequeue mapping */
1.706 + ptr = &storequeue_address_space[(start_addr-0xE0000000) >> 12];
1.707 + uptr = &storequeue_user_address_space[(start_addr-0xE0000000) >> 12];
1.708 + } else if( (start_addr & 0xE0000000) == 0xC0000000 ) {
1.709 + user_page = NULL; /* No user access to P3 region */
1.710 + } else if( start_addr >= 0x80000000 ) {
1.711 + return TRUE; // No mapping - legal but meaningless
1.712 + }
1.713 +
1.714 + if( npages == 0 ) {
1.715 + struct utlb_1k_entry *ent;
1.716 + int i, idx = (start_addr >> 10) & 0x03;
1.717 + if( IS_1K_PAGE_ENTRY(*ptr) ) {
1.718 + ent = (struct utlb_1k_entry *)*ptr;
1.719 + } else {
1.720 + ent = mmu_utlb_1k_alloc();
1.721 + /* New 1K struct - init to previous contents of region */
1.722 + for( i=0; i<4; i++ ) {
1.723 + ent->subpages[i] = *ptr;
1.724 + ent->user_subpages[i] = *uptr;
1.725 + }
1.726 + *ptr = &ent->fn;
1.727 + *uptr = &ent->user_fn;
1.728 + }
1.729 +
1.730 + if( priv_page != NULL ) {
1.731 + if( ent->subpages[idx] == &mem_region_tlb_miss ) {
1.732 + ent->subpages[idx] = priv_page;
1.733 + } else {
1.734 + mapping_ok = FALSE;
1.735 + ent->subpages[idx] = &mem_region_tlb_multihit;
1.736 + }
1.737 + }
1.738 + if( user_page != NULL ) {
1.739 + if( ent->user_subpages[idx] == &mem_region_tlb_miss ) {
1.740 + ent->user_subpages[idx] = user_page;
1.741 + } else {
1.742 + mapping_ok = FALSE;
1.743 + ent->user_subpages[idx] = &mem_region_tlb_multihit;
1.744 + }
1.745 + }
1.746 +
1.747 + } else {
1.748 +
1.749 + if( user_page == NULL ) {
1.750 + /* Privileged mapping only */
1.751 + for( i=0; i<npages; i++ ) {
1.752 + if( *ptr == &mem_region_tlb_miss ) {
1.753 + *ptr++ = priv_page;
1.754 + } else {
1.755 + mapping_ok = FALSE;
1.756 + *ptr++ = &mem_region_tlb_multihit;
1.757 + }
1.758 + }
1.759 + } else if( priv_page == NULL ) {
1.760 + /* User mapping only (eg ASID change remap) */
1.761 + for( i=0; i<npages; i++ ) {
1.762 + if( *uptr == &mem_region_tlb_miss ) {
1.763 + *uptr++ = user_page;
1.764 + } else {
1.765 + mapping_ok = FALSE;
1.766 + *uptr++ = &mem_region_tlb_multihit;
1.767 + }
1.768 + }
1.769 + } else {
1.770 + for( i=0; i<npages; i++ ) {
1.771 + if( *ptr == &mem_region_tlb_miss ) {
1.772 + *ptr++ = priv_page;
1.773 + *uptr++ = user_page;
1.774 + } else {
1.775 + mapping_ok = FALSE;
1.776 + *ptr++ = &mem_region_tlb_multihit;
1.777 + *uptr++ = &mem_region_tlb_multihit;
1.778 + }
1.779 + }
1.780 + }
1.781 + }
1.782 + return mapping_ok;
1.783 +}
1.784 +
1.785 +/**
1.786 + * Remove a previous TLB mapping (replacing them with the TLB miss region).
1.787 + * @return FALSE if any pages were previously mapped to the TLB multihit page,
1.788 + * otherwise TRUE. In either case, all pages in the region are cleared to TLB miss.
1.789 + */
1.790 +static gboolean mmu_utlb_unmap_pages( gboolean unmap_user, sh4addr_t start_addr, int npages )
1.791 +{
1.792 + mem_region_fn_t *ptr = &sh4_address_space[start_addr >> 12];
1.793 + mem_region_fn_t *uptr = &sh4_user_address_space[start_addr >> 12];
1.794 + gboolean unmapping_ok = TRUE;
1.795 + int i;
1.796 +
1.797 + if( (start_addr & 0xFC000000) == 0xE0000000 ) {
1.798 + /* Storequeue mapping */
1.799 + ptr = &storequeue_address_space[(start_addr-0xE0000000) >> 12];
1.800 + uptr = &storequeue_user_address_space[(start_addr-0xE0000000) >> 12];
1.801 + } else if( (start_addr & 0xE0000000) == 0xC0000000 ) {
1.802 + unmap_user = FALSE;
1.803 + } else if( start_addr >= 0x80000000 ) {
1.804 + return TRUE; // No mapping - legal but meaningless
1.805 + }
1.806 +
1.807 + if( npages == 0 ) { // 1K page
1.808 + assert( IS_1K_PAGE_ENTRY( *ptr ) );
1.809 + struct utlb_1k_entry *ent = (struct utlb_1k_entry *)*ptr;
1.810 + int i, idx = (start_addr >> 10) & 0x03, mergeable=1;
1.811 + if( ent->subpages[idx] == &mem_region_tlb_multihit ) {
1.812 + unmapping_ok = FALSE;
1.813 + }
1.814 + ent->subpages[idx] = &mem_region_tlb_miss;
1.815 + ent->user_subpages[idx] = &mem_region_tlb_miss;
1.816 +
1.817 + /* If all 4 subpages have the same content, merge them together and
1.818 + * release the 1K entry
1.819 + */
1.820 + mem_region_fn_t priv_page = ent->subpages[0];
1.821 + mem_region_fn_t user_page = ent->user_subpages[0];
1.822 + for( i=1; i<4; i++ ) {
1.823 + if( priv_page != ent->subpages[i] || user_page != ent->user_subpages[i] ) {
1.824 + mergeable = 0;
1.825 + break;
1.826 + }
1.827 + }
1.828 + if( mergeable ) {
1.829 + mmu_utlb_1k_free(ent);
1.830 + *ptr = priv_page;
1.831 + *uptr = user_page;
1.832 + }
1.833 + } else {
1.834 + if( !unmap_user ) {
1.835 + /* Privileged (un)mapping only */
1.836 + for( i=0; i<npages; i++ ) {
1.837 + if( *ptr == &mem_region_tlb_multihit ) {
1.838 + unmapping_ok = FALSE;
1.839 + }
1.840 + *ptr++ = &mem_region_tlb_miss;
1.841 + }
1.842 + } else {
1.843 + for( i=0; i<npages; i++ ) {
1.844 + if( *ptr == &mem_region_tlb_multihit ) {
1.845 + unmapping_ok = FALSE;
1.846 + }
1.847 + *ptr++ = &mem_region_tlb_miss;
1.848 + *uptr++ = &mem_region_tlb_miss;
1.849 + }
1.850 + }
1.851 + }
1.852 + return unmapping_ok;
1.853 +}
1.854 +
1.855 +static void mmu_utlb_insert_entry( int entry )
1.856 +{
1.857 + struct utlb_entry *ent = &mmu_utlb[entry];
1.858 + mem_region_fn_t page = &mmu_utlb_pages[entry].fn;
1.859 + mem_region_fn_t upage;
1.860 + sh4addr_t start_addr = ent->vpn & ent->mask;
1.861 + int npages = get_tlb_size_pages(ent->flags);
1.862 +
1.863 + if( (ent->flags & TLB_USERMODE) == 0 ) {
1.864 + upage = &mem_region_user_protected;
1.865 + } else {
1.866 + upage = page;
1.867 + }
1.868 + mmu_utlb_pages[entry].user_fn = upage;
1.869 +
1.870 + if( (ent->flags & TLB_WRITABLE) == 0 ) {
1.871 + page->write_long = (mem_write_fn_t)tlb_protected_write;
1.872 + page->write_word = (mem_write_fn_t)tlb_protected_write;
1.873 + page->write_byte = (mem_write_fn_t)tlb_protected_write;
1.874 + page->write_burst = (mem_write_burst_fn_t)tlb_protected_write;
1.875 + mmu_utlb_init_vtable( ent, &mmu_utlb_pages[entry], FALSE );
1.876 + } else if( (ent->flags & TLB_DIRTY) == 0 ) {
1.877 + page->write_long = (mem_write_fn_t)tlb_initial_write;
1.878 + page->write_word = (mem_write_fn_t)tlb_initial_write;
1.879 + page->write_byte = (mem_write_fn_t)tlb_initial_write;
1.880 + page->write_burst = (mem_write_burst_fn_t)tlb_initial_write;
1.881 + mmu_utlb_init_vtable( ent, &mmu_utlb_pages[entry], FALSE );
1.882 + } else {
1.883 + mmu_utlb_init_vtable( ent, &mmu_utlb_pages[entry], TRUE );
1.884 + }
1.885 +
1.886 + /* Is page visible? */
1.887 + if( (ent->flags & TLB_SHARE) || ent->asid == mmu_asid ) {
1.888 + mmu_utlb_map_pages( page, upage, start_addr, npages );
1.889 + } else if( IS_SV_ENABLED() ) {
1.890 + mmu_utlb_map_pages( page, NULL, start_addr, npages );
1.891 + }
1.892 +}
1.893 +
1.894 +static void mmu_utlb_remove_entry( int entry )
1.895 +{
1.896 + int i, j;
1.897 + struct utlb_entry *ent = &mmu_utlb[entry];
1.898 + sh4addr_t start_addr = ent->vpn&ent->mask;
1.899 + mem_region_fn_t *ptr = &sh4_address_space[start_addr >> 12];
1.900 + mem_region_fn_t *uptr = &sh4_user_address_space[start_addr >> 12];
1.901 + gboolean unmap_user;
1.902 + int npages = get_tlb_size_pages(ent->flags);
1.903 +
1.904 + if( (ent->flags & TLB_SHARE) || ent->asid == mmu_asid ) {
1.905 + unmap_user = TRUE;
1.906 + } else if( IS_SV_ENABLED() ) {
1.907 + unmap_user = FALSE;
1.908 + } else {
1.909 + return; // Not mapped
1.910 + }
1.911 +
1.912 + gboolean clean_unmap = mmu_utlb_unmap_pages( unmap_user, start_addr, npages );
1.913 +
1.914 + if( !clean_unmap ) {
1.915 + /* If we ran into a multi-hit, we now need to rescan the UTLB for the other entries
1.916 + * and remap them */
1.917 + for( j=0; j<UTLB_ENTRY_COUNT; j++ ) {
1.918 + uint32_t mask = MIN(mmu_utlb[j].mask, ent->mask);
1.919 + if( j != entry && (start_addr & mask) == (mmu_utlb[j].vpn & mask) ) {
1.920 +
1.921 + }
1.922 + }
1.923 + }
1.924 +}
1.925 +
1.926 +static void mmu_utlb_register_all()
1.927 +{
1.928 + int i;
1.929 + for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
1.930 + if( mmu_utlb[i].flags & TLB_VALID )
1.931 + mmu_utlb_insert_entry( i );
1.932 + }
1.933 +}
1.934 +
1.935 static void mmu_invalidate_tlb()
1.936 {
1.937 int i;
1.938 for( i=0; i<ITLB_ENTRY_COUNT; i++ ) {
1.939 mmu_itlb[i].flags &= (~TLB_VALID);
1.940 }
1.941 + if( IS_TLB_ENABLED() ) {
1.942 + for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
1.943 + if( mmu_utlb[i].flags & TLB_VALID ) {
1.944 + mmu_utlb_remove_entry( i );
1.945 + }
1.946 + }
1.947 + }
1.948 for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
1.949 mmu_utlb[i].flags &= (~TLB_VALID);
1.950 }
1.951 - mmu_utlb_entries = 0;
1.952 -}
1.953 -
1.954 -#define ITLB_ENTRY(addr) ((addr>>7)&0x03)
1.955 -
1.956 -int32_t FASTCALL mmu_itlb_addr_read( sh4addr_t addr )
1.957 -{
1.958 - struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
1.959 - return ent->vpn | ent->asid | (ent->flags & TLB_VALID);
1.960 -}
1.961 -int32_t FASTCALL mmu_itlb_data_read( sh4addr_t addr )
1.962 -{
1.963 - struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
1.964 - return (ent->ppn & 0x1FFFFC00) | ent->flags;
1.965 -}
1.966 -
1.967 -void FASTCALL mmu_itlb_addr_write( sh4addr_t addr, uint32_t val )
1.968 -{
1.969 - struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
1.970 - ent->vpn = val & 0xFFFFFC00;
1.971 - ent->asid = val & 0x000000FF;
1.972 - ent->flags = (ent->flags & ~(TLB_VALID)) | (val&TLB_VALID);
1.973 -}
1.974 -
1.975 -void FASTCALL mmu_itlb_data_write( sh4addr_t addr, uint32_t val )
1.976 -{
1.977 - struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
1.978 - ent->ppn = val & 0x1FFFFC00;
1.979 - ent->flags = val & 0x00001DA;
1.980 - ent->mask = get_mask_for_flags(val);
1.981 - if( ent->ppn >= 0x1C000000 )
1.982 - ent->ppn |= 0xE0000000;
1.983 -}
1.984 -
1.985 -#define UTLB_ENTRY(addr) ((addr>>8)&0x3F)
1.986 -#define UTLB_ASSOC(addr) (addr&0x80)
1.987 -#define UTLB_DATA2(addr) (addr&0x00800000)
1.988 -
1.989 -int32_t FASTCALL mmu_utlb_addr_read( sh4addr_t addr )
1.990 -{
1.991 - struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
1.992 - return ent->vpn | ent->asid | (ent->flags & TLB_VALID) |
1.993 - ((ent->flags & TLB_DIRTY)<<7);
1.994 -}
1.995 -int32_t FASTCALL mmu_utlb_data_read( sh4addr_t addr )
1.996 -{
1.997 - struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
1.998 - if( UTLB_DATA2(addr) ) {
1.999 - return ent->pcmcia;
1.1000 - } else {
1.1001 - return (ent->ppn&0x1FFFFC00) | ent->flags;
1.1002 - }
1.1003 -}
1.1004 -
1.1005 -/**
1.1006 - * Find a UTLB entry for the associative TLB write - same as the normal
1.1007 - * lookup but ignores the valid bit.
1.1008 - */
1.1009 -static inline int mmu_utlb_lookup_assoc( uint32_t vpn, uint32_t asid )
1.1010 -{
1.1011 - int result = -1;
1.1012 - unsigned int i;
1.1013 - for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
1.1014 - if( (mmu_utlb[i].flags & TLB_VALID) &&
1.1015 - ((mmu_utlb[i].flags & TLB_SHARE) || asid == mmu_utlb[i].asid) &&
1.1016 - ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
1.1017 - if( result != -1 ) {
1.1018 - fprintf( stderr, "TLB Multi hit: %d %d\n", result, i );
1.1019 - return -2;
1.1020 - }
1.1021 - result = i;
1.1022 - }
1.1023 - }
1.1024 - return result;
1.1025 -}
1.1026 -
1.1027 -/**
1.1028 - * Find a ITLB entry for the associative TLB write - same as the normal
1.1029 - * lookup but ignores the valid bit.
1.1030 - */
1.1031 -static inline int mmu_itlb_lookup_assoc( uint32_t vpn, uint32_t asid )
1.1032 -{
1.1033 - int result = -1;
1.1034 - unsigned int i;
1.1035 - for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
1.1036 - if( (mmu_itlb[i].flags & TLB_VALID) &&
1.1037 - ((mmu_itlb[i].flags & TLB_SHARE) || asid == mmu_itlb[i].asid) &&
1.1038 - ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
1.1039 - if( result != -1 ) {
1.1040 - return -2;
1.1041 - }
1.1042 - result = i;
1.1043 - }
1.1044 - }
1.1045 - return result;
1.1046 -}
1.1047 -
1.1048 -void FASTCALL mmu_utlb_addr_write( sh4addr_t addr, uint32_t val )
1.1049 -{
1.1050 - if( UTLB_ASSOC(addr) ) {
1.1051 - int utlb = mmu_utlb_lookup_assoc( val, mmu_asid );
1.1052 - if( utlb >= 0 ) {
1.1053 - struct utlb_entry *ent = &mmu_utlb[utlb];
1.1054 - uint32_t old_flags = ent->flags;
1.1055 - ent->flags = ent->flags & ~(TLB_DIRTY|TLB_VALID);
1.1056 - ent->flags |= (val & TLB_VALID);
1.1057 - ent->flags |= ((val & 0x200)>>7);
1.1058 - if( (old_flags & TLB_VALID) && !(ent->flags&TLB_VALID) ) {
1.1059 - mmu_utlb_remove_entry( utlb );
1.1060 - } else if( !(old_flags & TLB_VALID) && (ent->flags&TLB_VALID) ) {
1.1061 - mmu_utlb_insert_entry( utlb );
1.1062 - }
1.1063 - }
1.1064 -
1.1065 - int itlb = mmu_itlb_lookup_assoc( val, mmu_asid );
1.1066 - if( itlb >= 0 ) {
1.1067 - struct itlb_entry *ent = &mmu_itlb[itlb];
1.1068 - ent->flags = (ent->flags & (~TLB_VALID)) | (val & TLB_VALID);
1.1069 - }
1.1070 -
1.1071 - if( itlb == -2 || utlb == -2 ) {
1.1072 - MMU_TLB_MULTI_HIT_ERROR(addr);
1.1073 - return;
1.1074 - }
1.1075 - } else {
1.1076 - struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
1.1077 - if( ent->flags & TLB_VALID )
1.1078 - mmu_utlb_remove_entry( UTLB_ENTRY(addr) );
1.1079 - ent->vpn = (val & 0xFFFFFC00);
1.1080 - ent->asid = (val & 0xFF);
1.1081 - ent->flags = (ent->flags & ~(TLB_DIRTY|TLB_VALID));
1.1082 - ent->flags |= (val & TLB_VALID);
1.1083 - ent->flags |= ((val & 0x200)>>7);
1.1084 - if( ent->flags & TLB_VALID )
1.1085 - mmu_utlb_insert_entry( UTLB_ENTRY(addr) );
1.1086 - }
1.1087 -}
1.1088 -
1.1089 -void FASTCALL mmu_utlb_data_write( sh4addr_t addr, uint32_t val )
1.1090 -{
1.1091 - struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
1.1092 - if( UTLB_DATA2(addr) ) {
1.1093 - ent->pcmcia = val & 0x0000000F;
1.1094 - } else {
1.1095 - if( ent->flags & TLB_VALID )
1.1096 - mmu_utlb_remove_entry( UTLB_ENTRY(addr) );
1.1097 - ent->ppn = (val & 0x1FFFFC00);
1.1098 - ent->flags = (val & 0x000001FF);
1.1099 - ent->mask = get_mask_for_flags(val);
1.1100 - if( mmu_utlb[mmu_urc].ppn >= 0x1C000000 )
1.1101 - mmu_utlb[mmu_urc].ppn |= 0xE0000000;
1.1102 - if( ent->flags & TLB_VALID )
1.1103 - mmu_utlb_insert_entry( UTLB_ENTRY(addr) );
1.1104 - }
1.1105 }
1.1106
1.1107 /******************************************************************************/
1.1108 @@ -532,9 +743,22 @@
1.1109 /******************************************************************************/
1.1110
1.1111 /**
1.1112 - * The translations are excessively complicated, but unfortunately it's a
1.1113 - * complicated system. TODO: make this not be painfully slow.
1.1114 + * Translate a 32-bit address into a UTLB entry number. Does not check for
1.1115 + * page protection etc.
1.1116 + * @return the entryNo if found, -1 if not found, and -2 for a multi-hit.
1.1117 */
1.1118 +int mmu_utlb_entry_for_vpn( uint32_t vpn )
1.1119 +{
1.1120 + mem_region_fn_t fn = sh4_address_space[vpn>>12];
1.1121 + if( fn >= &mmu_utlb_pages[0].fn && fn < &mmu_utlb_pages[UTLB_ENTRY_COUNT].fn ) {
1.1122 + return ((struct utlb_page_entry *)fn) - &mmu_utlb_pages[0];
1.1123 + } else if( fn == &mem_region_tlb_multihit ) {
1.1124 + return -2;
1.1125 + } else {
1.1126 + return -1;
1.1127 + }
1.1128 +}
1.1129 +
1.1130
1.1131 /**
1.1132 * Perform the actual utlb lookup w/ asid matching.
1.1133 @@ -656,7 +880,7 @@
1.1134 }
1.1135
1.1136 if( result == -1 ) {
1.1137 - int utlbEntry = mmu_utlb_sorted_find( vpn );
1.1138 + int utlbEntry = mmu_utlb_entry_for_vpn( vpn );
1.1139 if( utlbEntry < 0 ) {
1.1140 return utlbEntry;
1.1141 } else {
1.1142 @@ -717,130 +941,6 @@
1.1143 return result;
1.1144 }
1.1145
1.1146 -#ifdef HAVE_FRAME_ADDRESS
1.1147 -sh4addr_t FASTCALL mmu_vma_to_phys_read( sh4vma_t addr, void *exc )
1.1148 -#else
1.1149 -sh4addr_t FASTCALL mmu_vma_to_phys_read( sh4vma_t addr )
1.1150 -#endif
1.1151 -{
1.1152 - uint32_t mmucr = MMIO_READ(MMU,MMUCR);
1.1153 - if( addr & 0x80000000 ) {
1.1154 - if( IS_SH4_PRIVMODE() ) {
1.1155 - if( addr >= 0xE0000000 ) {
1.1156 - return addr; /* P4 - passthrough */
1.1157 - } else if( addr < 0xC0000000 ) {
1.1158 - /* P1, P2 regions are pass-through (no translation) */
1.1159 - return VMA_TO_EXT_ADDR(addr);
1.1160 - }
1.1161 - } else {
1.1162 - if( addr >= 0xE0000000 && addr < 0xE4000000 &&
1.1163 - ((mmucr&MMUCR_SQMD) == 0) ) {
1.1164 - /* Conditional user-mode access to the store-queue (no translation) */
1.1165 - return addr;
1.1166 - }
1.1167 - MMU_READ_ADDR_ERROR();
1.1168 - RETURN_VIA(exc);
1.1169 - }
1.1170 - }
1.1171 -
1.1172 - if( (mmucr & MMUCR_AT) == 0 ) {
1.1173 - return VMA_TO_EXT_ADDR(addr);
1.1174 - }
1.1175 -
1.1176 - /* If we get this far, translation is required */
1.1177 - int entryNo;
1.1178 - if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
1.1179 - entryNo = mmu_utlb_sorted_find( addr );
1.1180 - } else {
1.1181 - entryNo = mmu_utlb_lookup_vpn( addr );
1.1182 - }
1.1183 -
1.1184 - switch(entryNo) {
1.1185 - case -1:
1.1186 - MMU_TLB_READ_MISS_ERROR(addr);
1.1187 - RETURN_VIA(exc);
1.1188 - case -2:
1.1189 - MMU_TLB_MULTI_HIT_ERROR(addr);
1.1190 - RETURN_VIA(exc);
1.1191 - default:
1.1192 - if( (mmu_utlb[entryNo].flags & TLB_USERMODE) == 0 &&
1.1193 - !IS_SH4_PRIVMODE() ) {
1.1194 - /* protection violation */
1.1195 - MMU_TLB_READ_PROT_ERROR(addr);
1.1196 - RETURN_VIA(exc);
1.1197 - }
1.1198 -
1.1199 - /* finally generate the target address */
1.1200 - return (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) |
1.1201 - (addr & (~mmu_utlb[entryNo].mask));
1.1202 - }
1.1203 -}
1.1204 -
1.1205 -#ifdef HAVE_FRAME_ADDRESS
1.1206 -sh4addr_t FASTCALL mmu_vma_to_phys_write( sh4vma_t addr, void *exc )
1.1207 -#else
1.1208 -sh4addr_t FASTCALL mmu_vma_to_phys_write( sh4vma_t addr )
1.1209 -#endif
1.1210 -{
1.1211 - uint32_t mmucr = MMIO_READ(MMU,MMUCR);
1.1212 - if( addr & 0x80000000 ) {
1.1213 - if( IS_SH4_PRIVMODE() ) {
1.1214 - if( addr >= 0xE0000000 ) {
1.1215 - return addr; /* P4 - passthrough */
1.1216 - } else if( addr < 0xC0000000 ) {
1.1217 - /* P1, P2 regions are pass-through (no translation) */
1.1218 - return VMA_TO_EXT_ADDR(addr);
1.1219 - }
1.1220 - } else {
1.1221 - if( addr >= 0xE0000000 && addr < 0xE4000000 &&
1.1222 - ((mmucr&MMUCR_SQMD) == 0) ) {
1.1223 - /* Conditional user-mode access to the store-queue (no translation) */
1.1224 - return addr;
1.1225 - }
1.1226 - MMU_WRITE_ADDR_ERROR();
1.1227 - RETURN_VIA(exc);
1.1228 - }
1.1229 - }
1.1230 -
1.1231 - if( (mmucr & MMUCR_AT) == 0 ) {
1.1232 - return VMA_TO_EXT_ADDR(addr);
1.1233 - }
1.1234 -
1.1235 - /* If we get this far, translation is required */
1.1236 - int entryNo;
1.1237 - if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
1.1238 - entryNo = mmu_utlb_sorted_find( addr );
1.1239 - } else {
1.1240 - entryNo = mmu_utlb_lookup_vpn( addr );
1.1241 - }
1.1242 -
1.1243 - switch(entryNo) {
1.1244 - case -1:
1.1245 - MMU_TLB_WRITE_MISS_ERROR(addr);
1.1246 - RETURN_VIA(exc);
1.1247 - case -2:
1.1248 - MMU_TLB_MULTI_HIT_ERROR(addr);
1.1249 - RETURN_VIA(exc);
1.1250 - default:
1.1251 - if( IS_SH4_PRIVMODE() ? ((mmu_utlb[entryNo].flags & TLB_WRITABLE) == 0)
1.1252 - : ((mmu_utlb[entryNo].flags & TLB_USERWRITABLE) != TLB_USERWRITABLE) ) {
1.1253 - /* protection violation */
1.1254 - MMU_TLB_WRITE_PROT_ERROR(addr);
1.1255 - RETURN_VIA(exc);
1.1256 - }
1.1257 -
1.1258 - if( (mmu_utlb[entryNo].flags & TLB_DIRTY) == 0 ) {
1.1259 - MMU_TLB_INITIAL_WRITE_ERROR(addr);
1.1260 - RETURN_VIA(exc);
1.1261 - }
1.1262 -
1.1263 - /* finally generate the target address */
1.1264 - sh4addr_t pma = (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) |
1.1265 - (addr & (~mmu_utlb[entryNo].mask));
1.1266 - return pma;
1.1267 - }
1.1268 -}
1.1269 -
1.1270 /**
1.1271 * Update the icache for an untranslated address
1.1272 */
1.1273 @@ -886,7 +986,7 @@
1.1274 mmu_update_icache_phys(addr);
1.1275 return TRUE;
1.1276 } else if( addr >= 0xE0000000 && addr < 0xFFFFFF00 ) {
1.1277 - MMU_READ_ADDR_ERROR();
1.1278 + RAISE_MEM_ERROR(EXC_DATA_ADDR_READ, addr);
1.1279 return FALSE;
1.1280 }
1.1281 }
1.1282 @@ -903,7 +1003,7 @@
1.1283 entryNo = mmu_itlb_lookup_vpn( addr );
1.1284 } else {
1.1285 if( addr & 0x80000000 ) {
1.1286 - MMU_READ_ADDR_ERROR();
1.1287 + RAISE_MEM_ERROR(EXC_DATA_ADDR_READ, addr);
1.1288 return FALSE;
1.1289 }
1.1290
1.1291 @@ -916,17 +1016,17 @@
1.1292 entryNo = mmu_itlb_lookup_vpn_asid( addr );
1.1293
1.1294 if( entryNo != -1 && (mmu_itlb[entryNo].flags & TLB_USERMODE) == 0 ) {
1.1295 - MMU_TLB_READ_PROT_ERROR(addr);
1.1296 + RAISE_MEM_ERROR(EXC_TLB_PROT_READ, addr);
1.1297 return FALSE;
1.1298 }
1.1299 }
1.1300
1.1301 switch(entryNo) {
1.1302 case -1:
1.1303 - MMU_TLB_READ_MISS_ERROR(addr);
1.1304 + RAISE_TLB_ERROR(EXC_TLB_MISS_READ, addr);
1.1305 return FALSE;
1.1306 case -2:
1.1307 - MMU_TLB_MULTI_HIT_ERROR(addr);
1.1308 + RAISE_TLB_MULTIHIT_ERROR(addr);
1.1309 return FALSE;
1.1310 default:
1.1311 sh4_icache.page_ppa = mmu_itlb[entryNo].ppn & mmu_itlb[entryNo].mask;
1.1312 @@ -985,46 +1085,309 @@
1.1313 ext_address_space[target>>12]->write_burst( target, src );
1.1314 }
1.1315
1.1316 -gboolean FASTCALL sh4_flush_store_queue_mmu( sh4addr_t addr )
1.1317 +void FASTCALL sh4_flush_store_queue_mmu( sh4addr_t addr, void *exc )
1.1318 {
1.1319 - uint32_t mmucr = MMIO_READ(MMU,MMUCR);
1.1320 int queue = (addr&0x20)>>2;
1.1321 sh4ptr_t src = (sh4ptr_t)&sh4r.store_queue[queue];
1.1322 sh4addr_t target;
1.1323 /* Store queue operation */
1.1324 -
1.1325 - int entryNo;
1.1326 - if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
1.1327 - entryNo = mmu_utlb_lookup_vpn_asid( addr );
1.1328 - } else {
1.1329 - entryNo = mmu_utlb_lookup_vpn( addr );
1.1330 - }
1.1331 - switch(entryNo) {
1.1332 - case -1:
1.1333 - MMU_TLB_WRITE_MISS_ERROR(addr);
1.1334 - return FALSE;
1.1335 - case -2:
1.1336 - MMU_TLB_MULTI_HIT_ERROR(addr);
1.1337 - return FALSE;
1.1338 - default:
1.1339 - if( IS_SH4_PRIVMODE() ? ((mmu_utlb[entryNo].flags & TLB_WRITABLE) == 0)
1.1340 - : ((mmu_utlb[entryNo].flags & TLB_USERWRITABLE) != TLB_USERWRITABLE) ) {
1.1341 - /* protection violation */
1.1342 - MMU_TLB_WRITE_PROT_ERROR(addr);
1.1343 - return FALSE;
1.1344 - }
1.1345 -
1.1346 - if( (mmu_utlb[entryNo].flags & TLB_DIRTY) == 0 ) {
1.1347 - MMU_TLB_INITIAL_WRITE_ERROR(addr);
1.1348 - return FALSE;
1.1349 - }
1.1350 -
1.1351 - /* finally generate the target address */
1.1352 - target = ((mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) |
1.1353 - (addr & (~mmu_utlb[entryNo].mask))) & 0xFFFFFFE0;
1.1354 - }
1.1355 -
1.1356 - ext_address_space[target>>12]->write_burst( target, src );
1.1357 - return TRUE;
1.1358 + storequeue_address_space[(addr&0x03FFFFFE0)>>12]->write_burst( addr, src);
1.1359 }
1.1360
1.1361 +/********************** TLB Direct-Access Regions ***************************/
1.1362 +#ifdef HAVE_FRAME_ADDRESS
1.1363 +#define EXCEPTION_EXIT() do{ *(((void **)__builtin_frame_address(0))+1) = exc; return; } while(0)
1.1364 +#else
1.1365 +#define EXCEPTION_EXIT() sh4_core_exit(CORE_EXIT_EXCEPTION)
1.1366 +#endif
1.1367 +
1.1368 +
1.1369 +#define ITLB_ENTRY(addr) ((addr>>7)&0x03)
1.1370 +
1.1371 +int32_t FASTCALL mmu_itlb_addr_read( sh4addr_t addr )
1.1372 +{
1.1373 + struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
1.1374 + return ent->vpn | ent->asid | (ent->flags & TLB_VALID);
1.1375 +}
1.1376 +
1.1377 +void FASTCALL mmu_itlb_addr_write( sh4addr_t addr, uint32_t val )
1.1378 +{
1.1379 + struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
1.1380 + ent->vpn = val & 0xFFFFFC00;
1.1381 + ent->asid = val & 0x000000FF;
1.1382 + ent->flags = (ent->flags & ~(TLB_VALID)) | (val&TLB_VALID);
1.1383 +}
1.1384 +
1.1385 +int32_t FASTCALL mmu_itlb_data_read( sh4addr_t addr )
1.1386 +{
1.1387 + struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
1.1388 + return (ent->ppn & 0x1FFFFC00) | ent->flags;
1.1389 +}
1.1390 +
1.1391 +void FASTCALL mmu_itlb_data_write( sh4addr_t addr, uint32_t val )
1.1392 +{
1.1393 + struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
1.1394 + ent->ppn = val & 0x1FFFFC00;
1.1395 + ent->flags = val & 0x00001DA;
1.1396 + ent->mask = get_tlb_size_mask(val);
1.1397 + if( ent->ppn >= 0x1C000000 )
1.1398 + ent->ppn |= 0xE0000000;
1.1399 +}
1.1400 +
1.1401 +#define UTLB_ENTRY(addr) ((addr>>8)&0x3F)
1.1402 +#define UTLB_ASSOC(addr) (addr&0x80)
1.1403 +#define UTLB_DATA2(addr) (addr&0x00800000)
1.1404 +
1.1405 +int32_t FASTCALL mmu_utlb_addr_read( sh4addr_t addr )
1.1406 +{
1.1407 + struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
1.1408 + return ent->vpn | ent->asid | (ent->flags & TLB_VALID) |
1.1409 + ((ent->flags & TLB_DIRTY)<<7);
1.1410 +}
1.1411 +int32_t FASTCALL mmu_utlb_data_read( sh4addr_t addr )
1.1412 +{
1.1413 + struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
1.1414 + if( UTLB_DATA2(addr) ) {
1.1415 + return ent->pcmcia;
1.1416 + } else {
1.1417 + return (ent->ppn&0x1FFFFC00) | ent->flags;
1.1418 + }
1.1419 +}
1.1420 +
1.1421 +/**
1.1422 + * Find a UTLB entry for the associative TLB write - same as the normal
1.1423 + * lookup but ignores the valid bit.
1.1424 + */
1.1425 +static inline int mmu_utlb_lookup_assoc( uint32_t vpn, uint32_t asid )
1.1426 +{
1.1427 + int result = -1;
1.1428 + unsigned int i;
1.1429 + for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
1.1430 + if( (mmu_utlb[i].flags & TLB_VALID) &&
1.1431 + ((mmu_utlb[i].flags & TLB_SHARE) || asid == mmu_utlb[i].asid) &&
1.1432 + ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
1.1433 + if( result != -1 ) {
1.1434 + fprintf( stderr, "TLB Multi hit: %d %d\n", result, i );
1.1435 + return -2;
1.1436 + }
1.1437 + result = i;
1.1438 + }
1.1439 + }
1.1440 + return result;
1.1441 +}
1.1442 +
1.1443 +/**
1.1444 + * Find a ITLB entry for the associative TLB write - same as the normal
1.1445 + * lookup but ignores the valid bit.
1.1446 + */
1.1447 +static inline int mmu_itlb_lookup_assoc( uint32_t vpn, uint32_t asid )
1.1448 +{
1.1449 + int result = -1;
1.1450 + unsigned int i;
1.1451 + for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
1.1452 + if( (mmu_itlb[i].flags & TLB_VALID) &&
1.1453 + ((mmu_itlb[i].flags & TLB_SHARE) || asid == mmu_itlb[i].asid) &&
1.1454 + ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
1.1455 + if( result != -1 ) {
1.1456 + return -2;
1.1457 + }
1.1458 + result = i;
1.1459 + }
1.1460 + }
1.1461 + return result;
1.1462 +}
1.1463 +
1.1464 +void FASTCALL mmu_utlb_addr_write( sh4addr_t addr, uint32_t val, void *exc )
1.1465 +{
1.1466 + if( UTLB_ASSOC(addr) ) {
1.1467 + int utlb = mmu_utlb_lookup_assoc( val, mmu_asid );
1.1468 + if( utlb >= 0 ) {
1.1469 + struct utlb_entry *ent = &mmu_utlb[utlb];
1.1470 + uint32_t old_flags = ent->flags;
1.1471 + ent->flags = ent->flags & ~(TLB_DIRTY|TLB_VALID);
1.1472 + ent->flags |= (val & TLB_VALID);
1.1473 + ent->flags |= ((val & 0x200)>>7);
1.1474 + if( ((old_flags^ent->flags) & (TLB_VALID|TLB_DIRTY)) != 0 ) {
1.1475 + if( old_flags & TLB_VALID )
1.1476 + mmu_utlb_remove_entry( utlb );
1.1477 + if( ent->flags & TLB_VALID )
1.1478 + mmu_utlb_insert_entry( utlb );
1.1479 + }
1.1480 + }
1.1481 +
1.1482 + int itlb = mmu_itlb_lookup_assoc( val, mmu_asid );
1.1483 + if( itlb >= 0 ) {
1.1484 + struct itlb_entry *ent = &mmu_itlb[itlb];
1.1485 + ent->flags = (ent->flags & (~TLB_VALID)) | (val & TLB_VALID);
1.1486 + }
1.1487 +
1.1488 + if( itlb == -2 || utlb == -2 ) {
1.1489 + RAISE_TLB_MULTIHIT_ERROR(addr);
1.1490 + EXCEPTION_EXIT();
1.1491 + return;
1.1492 + }
1.1493 + } else {
1.1494 + struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
1.1495 + if( ent->flags & TLB_VALID )
1.1496 + mmu_utlb_remove_entry( UTLB_ENTRY(addr) );
1.1497 + ent->vpn = (val & 0xFFFFFC00);
1.1498 + ent->asid = (val & 0xFF);
1.1499 + ent->flags = (ent->flags & ~(TLB_DIRTY|TLB_VALID));
1.1500 + ent->flags |= (val & TLB_VALID);
1.1501 + ent->flags |= ((val & 0x200)>>7);
1.1502 + if( ent->flags & TLB_VALID )
1.1503 + mmu_utlb_insert_entry( UTLB_ENTRY(addr) );
1.1504 + }
1.1505 +}
1.1506 +
1.1507 +void FASTCALL mmu_utlb_data_write( sh4addr_t addr, uint32_t val )
1.1508 +{
1.1509 + struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
1.1510 + if( UTLB_DATA2(addr) ) {
1.1511 + ent->pcmcia = val & 0x0000000F;
1.1512 + } else {
1.1513 + if( ent->flags & TLB_VALID )
1.1514 + mmu_utlb_remove_entry( UTLB_ENTRY(addr) );
1.1515 + ent->ppn = (val & 0x1FFFFC00);
1.1516 + ent->flags = (val & 0x000001FF);
1.1517 + ent->mask = get_tlb_size_mask(val);
1.1518 + if( ent->flags & TLB_VALID )
1.1519 + mmu_utlb_insert_entry( UTLB_ENTRY(addr) );
1.1520 + }
1.1521 +}
1.1522 +
1.1523 +struct mem_region_fn p4_region_itlb_addr = {
1.1524 + mmu_itlb_addr_read, mmu_itlb_addr_write,
1.1525 + mmu_itlb_addr_read, mmu_itlb_addr_write,
1.1526 + mmu_itlb_addr_read, mmu_itlb_addr_write,
1.1527 + unmapped_read_burst, unmapped_write_burst };
1.1528 +struct mem_region_fn p4_region_itlb_data = {
1.1529 + mmu_itlb_data_read, mmu_itlb_data_write,
1.1530 + mmu_itlb_data_read, mmu_itlb_data_write,
1.1531 + mmu_itlb_data_read, mmu_itlb_data_write,
1.1532 + unmapped_read_burst, unmapped_write_burst };
1.1533 +struct mem_region_fn p4_region_utlb_addr = {
1.1534 + mmu_utlb_addr_read, (mem_write_fn_t)mmu_utlb_addr_write,
1.1535 + mmu_utlb_addr_read, (mem_write_fn_t)mmu_utlb_addr_write,
1.1536 + mmu_utlb_addr_read, (mem_write_fn_t)mmu_utlb_addr_write,
1.1537 + unmapped_read_burst, unmapped_write_burst };
1.1538 +struct mem_region_fn p4_region_utlb_data = {
1.1539 + mmu_utlb_data_read, mmu_utlb_data_write,
1.1540 + mmu_utlb_data_read, mmu_utlb_data_write,
1.1541 + mmu_utlb_data_read, mmu_utlb_data_write,
1.1542 + unmapped_read_burst, unmapped_write_burst };
1.1543 +
1.1544 +/********************** Error regions **************************/
1.1545 +
1.1546 +static void FASTCALL address_error_read( sh4addr_t addr, void *exc )
1.1547 +{
1.1548 + RAISE_MEM_ERROR(EXC_DATA_ADDR_READ, addr);
1.1549 + EXCEPTION_EXIT();
1.1550 +}
1.1551 +
1.1552 +static void FASTCALL address_error_read_burst( unsigned char *dest, sh4addr_t addr, void *exc )
1.1553 +{
1.1554 + RAISE_MEM_ERROR(EXC_DATA_ADDR_READ, addr);
1.1555 + EXCEPTION_EXIT();
1.1556 +}
1.1557 +
1.1558 +static void FASTCALL address_error_write( sh4addr_t addr, uint32_t val, void *exc )
1.1559 +{
1.1560 + RAISE_MEM_ERROR(EXC_DATA_ADDR_WRITE, addr);
1.1561 + EXCEPTION_EXIT();
1.1562 +}
1.1563 +
1.1564 +static void FASTCALL tlb_miss_read( sh4addr_t addr, void *exc )
1.1565 +{
1.1566 + RAISE_TLB_ERROR(EXC_TLB_MISS_READ, addr);
1.1567 + EXCEPTION_EXIT();
1.1568 +}
1.1569 +
1.1570 +static void FASTCALL tlb_miss_read_burst( unsigned char *dest, sh4addr_t addr, void *exc )
1.1571 +{
1.1572 + RAISE_TLB_ERROR(EXC_TLB_MISS_READ, addr);
1.1573 + EXCEPTION_EXIT();
1.1574 +}
1.1575 +
1.1576 +static void FASTCALL tlb_miss_write( sh4addr_t addr, uint32_t val, void *exc )
1.1577 +{
1.1578 + RAISE_TLB_ERROR(EXC_TLB_MISS_WRITE, addr);
1.1579 + EXCEPTION_EXIT();
1.1580 +}
1.1581 +
1.1582 +static int32_t FASTCALL tlb_protected_read( sh4addr_t addr, void *exc )
1.1583 +{
1.1584 + RAISE_MEM_ERROR(EXC_TLB_PROT_READ, addr);
1.1585 + EXCEPTION_EXIT();
1.1586 +}
1.1587 +
1.1588 +static int32_t FASTCALL tlb_protected_read_burst( unsigned char *dest, sh4addr_t addr, void *exc )
1.1589 +{
1.1590 + RAISE_MEM_ERROR(EXC_TLB_PROT_READ, addr);
1.1591 + EXCEPTION_EXIT();
1.1592 +}
1.1593 +
1.1594 +static void FASTCALL tlb_protected_write( sh4addr_t addr, uint32_t val, void *exc )
1.1595 +{
1.1596 + RAISE_MEM_ERROR(EXC_TLB_PROT_WRITE, addr);
1.1597 + EXCEPTION_EXIT();
1.1598 +}
1.1599 +
1.1600 +static void FASTCALL tlb_initial_write( sh4addr_t addr, uint32_t val, void *exc )
1.1601 +{
1.1602 + RAISE_MEM_ERROR(EXC_INIT_PAGE_WRITE, addr);
1.1603 + EXCEPTION_EXIT();
1.1604 +}
1.1605 +
1.1606 +static int32_t FASTCALL tlb_multi_hit_read( sh4addr_t addr, void *exc )
1.1607 +{
1.1608 + MMIO_WRITE(MMU, TEA, addr);
1.1609 + MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (addr&0xFFFFFC00)));
1.1610 + sh4_raise_reset(EXC_TLB_MULTI_HIT);
1.1611 + EXCEPTION_EXIT();
1.1612 +}
1.1613 +
1.1614 +static int32_t FASTCALL tlb_multi_hit_read_burst( unsigned char *dest, sh4addr_t addr, void *exc )
1.1615 +{
1.1616 + MMIO_WRITE(MMU, TEA, addr);
1.1617 + MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (addr&0xFFFFFC00)));
1.1618 + sh4_raise_reset(EXC_TLB_MULTI_HIT);
1.1619 + EXCEPTION_EXIT();
1.1620 +}
1.1621 +static void FASTCALL tlb_multi_hit_write( sh4addr_t addr, uint32_t val, void *exc )
1.1622 +{
1.1623 + MMIO_WRITE(MMU, TEA, addr);
1.1624 + MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (addr&0xFFFFFC00)));
1.1625 + sh4_raise_reset(EXC_TLB_MULTI_HIT);
1.1626 + EXCEPTION_EXIT();
1.1627 +}
1.1628 +
1.1629 +/**
1.1630 + * Note: Per sec 4.6.4 of the SH7750 manual, SQ
1.1631 + */
1.1632 +struct mem_region_fn mem_region_address_error = {
1.1633 + (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1.1634 + (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1.1635 + (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1.1636 + (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write };
1.1637 +
1.1638 +struct mem_region_fn mem_region_tlb_miss = {
1.1639 + (mem_read_fn_t)tlb_miss_read, (mem_write_fn_t)tlb_miss_write,
1.1640 + (mem_read_fn_t)tlb_miss_read, (mem_write_fn_t)tlb_miss_write,
1.1641 + (mem_read_fn_t)tlb_miss_read, (mem_write_fn_t)tlb_miss_write,
1.1642 + (mem_read_burst_fn_t)tlb_miss_read_burst, (mem_write_burst_fn_t)tlb_miss_write };
1.1643 +
1.1644 +struct mem_region_fn mem_region_user_protected = {
1.1645 + (mem_read_fn_t)tlb_protected_read, (mem_write_fn_t)tlb_protected_write,
1.1646 + (mem_read_fn_t)tlb_protected_read, (mem_write_fn_t)tlb_protected_write,
1.1647 + (mem_read_fn_t)tlb_protected_read, (mem_write_fn_t)tlb_protected_write,
1.1648 + (mem_read_burst_fn_t)tlb_protected_read_burst, (mem_write_burst_fn_t)tlb_protected_write };
1.1649 +
1.1650 +struct mem_region_fn mem_region_tlb_multihit = {
1.1651 + (mem_read_fn_t)tlb_multi_hit_read, (mem_write_fn_t)tlb_multi_hit_write,
1.1652 + (mem_read_fn_t)tlb_multi_hit_read, (mem_write_fn_t)tlb_multi_hit_write,
1.1653 + (mem_read_fn_t)tlb_multi_hit_read, (mem_write_fn_t)tlb_multi_hit_write,
1.1654 + (mem_read_burst_fn_t)tlb_multi_hit_read_burst, (mem_write_burst_fn_t)tlb_multi_hit_write };
1.1655 +
1.1656 +
1.1657 +
1.1658 \ No newline at end of file
.