Search
lxdream.org :: lxdream/src/sh4/mmu.c :: diff
lxdream 0.9.1
released Jun 29
Download Now
filename src/sh4/mmu.c
changeset 953:f4a156508ad1
prev927:17b6b9e245d8
next955:e289b49c28f1
author nkeynes
date Tue Jan 13 11:56:28 2009 +0000 (13 years ago)
permissions -rw-r--r--
last change Merge lxdream-mem branch back to trunk
file annotate diff log raw
1.1 --- a/src/sh4/mmu.c Mon Dec 15 10:44:56 2008 +0000
1.2 +++ b/src/sh4/mmu.c Tue Jan 13 11:56:28 2009 +0000
1.3 @@ -1,7 +1,8 @@
1.4 /**
1.5 * $Id$
1.6 *
1.7 - * MMU implementation
1.8 + * SH4 MMU implementation based on address space page maps. This module
1.9 + * is responsible for all address decoding functions.
1.10 *
1.11 * Copyright (c) 2005 Nathan Keynes.
1.12 *
1.13 @@ -22,224 +23,142 @@
1.14 #include "sh4/sh4mmio.h"
1.15 #include "sh4/sh4core.h"
1.16 #include "sh4/sh4trans.h"
1.17 +#include "dreamcast.h"
1.18 #include "mem.h"
1.19 +#include "mmu.h"
1.20
1.21 -#ifdef HAVE_FRAME_ADDRESS
1.22 -#define RETURN_VIA(exc) do{ *(((void **)__builtin_frame_address(0))+1) = exc; return; } while(0)
1.23 -#else
1.24 -#define RETURN_VIA(exc) return MMU_VMA_ERROR
1.25 -#endif
1.26 -
1.27 -#define VMA_TO_EXT_ADDR(vma) ((vma)&0x1FFFFFFF)
1.28 -
1.29 -/* The MMU (practically unique in the system) is allowed to raise exceptions
1.30 - * directly, with a return code indicating that one was raised and the caller
1.31 - * had better behave appropriately.
1.32 - */
1.33 -#define RAISE_TLB_ERROR(code, vpn) \
1.34 - MMIO_WRITE(MMU, TEA, vpn); \
1.35 - MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
1.36 - sh4_raise_tlb_exception(code);
1.37 -
1.38 +#define RAISE_TLB_ERROR(code, vpn) sh4_raise_tlb_exception(code, vpn)
1.39 #define RAISE_MEM_ERROR(code, vpn) \
1.40 MMIO_WRITE(MMU, TEA, vpn); \
1.41 MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
1.42 sh4_raise_exception(code);
1.43 +#define RAISE_TLB_MULTIHIT_ERROR(vpn) sh4_raise_tlb_multihit(vpn)
1.44
1.45 -#define RAISE_OTHER_ERROR(code) \
1.46 - sh4_raise_exception(code);
1.47 -/**
1.48 - * Abort with a non-MMU address error. Caused by user-mode code attempting
1.49 - * to access privileged regions, or alignment faults.
1.50 - */
1.51 -#define MMU_READ_ADDR_ERROR() RAISE_OTHER_ERROR(EXC_DATA_ADDR_READ)
1.52 -#define MMU_WRITE_ADDR_ERROR() RAISE_OTHER_ERROR(EXC_DATA_ADDR_WRITE)
1.53 +/* An entry is a 1K entry if it's one of the mmu_utlb_1k_pages entries */
1.54 +#define IS_1K_PAGE_ENTRY(ent) ( ((uintptr_t)(((struct utlb_1k_entry *)ent) - &mmu_utlb_1k_pages[0])) < UTLB_ENTRY_COUNT )
1.55
1.56 -#define MMU_TLB_READ_MISS_ERROR(vpn) RAISE_TLB_ERROR(EXC_TLB_MISS_READ, vpn)
1.57 -#define MMU_TLB_WRITE_MISS_ERROR(vpn) RAISE_TLB_ERROR(EXC_TLB_MISS_WRITE, vpn)
1.58 -#define MMU_TLB_INITIAL_WRITE_ERROR(vpn) RAISE_MEM_ERROR(EXC_INIT_PAGE_WRITE, vpn)
1.59 -#define MMU_TLB_READ_PROT_ERROR(vpn) RAISE_MEM_ERROR(EXC_TLB_PROT_READ, vpn)
1.60 -#define MMU_TLB_WRITE_PROT_ERROR(vpn) RAISE_MEM_ERROR(EXC_TLB_PROT_WRITE, vpn)
1.61 -#define MMU_TLB_MULTI_HIT_ERROR(vpn) sh4_raise_reset(EXC_TLB_MULTI_HIT); \
1.62 - MMIO_WRITE(MMU, TEA, vpn); \
1.63 - MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00)));
1.64 +/* Primary address space (used directly by SH4 cores) */
1.65 +mem_region_fn_t *sh4_address_space;
1.66 +mem_region_fn_t *sh4_user_address_space;
1.67
1.68 +/* Accessed from the UTLB accessor methods */
1.69 +uint32_t mmu_urc;
1.70 +uint32_t mmu_urb;
1.71 +static gboolean mmu_urc_overflow; /* If true, urc was set >= urb */
1.72
1.73 -#define OCRAM_START (0x1C000000>>LXDREAM_PAGE_BITS)
1.74 -#define OCRAM_END (0x20000000>>LXDREAM_PAGE_BITS)
1.75 -
1.76 -#define ITLB_ENTRY_COUNT 4
1.77 -#define UTLB_ENTRY_COUNT 64
1.78 -
1.79 -/* Entry address */
1.80 -#define TLB_VALID 0x00000100
1.81 -#define TLB_USERMODE 0x00000040
1.82 -#define TLB_WRITABLE 0x00000020
1.83 -#define TLB_USERWRITABLE (TLB_WRITABLE|TLB_USERMODE)
1.84 -#define TLB_SIZE_MASK 0x00000090
1.85 -#define TLB_SIZE_1K 0x00000000
1.86 -#define TLB_SIZE_4K 0x00000010
1.87 -#define TLB_SIZE_64K 0x00000080
1.88 -#define TLB_SIZE_1M 0x00000090
1.89 -#define TLB_CACHEABLE 0x00000008
1.90 -#define TLB_DIRTY 0x00000004
1.91 -#define TLB_SHARE 0x00000002
1.92 -#define TLB_WRITETHRU 0x00000001
1.93 -
1.94 -#define MASK_1K 0xFFFFFC00
1.95 -#define MASK_4K 0xFFFFF000
1.96 -#define MASK_64K 0xFFFF0000
1.97 -#define MASK_1M 0xFFF00000
1.98 -
1.99 -struct itlb_entry {
1.100 - sh4addr_t vpn; // Virtual Page Number
1.101 - uint32_t asid; // Process ID
1.102 - uint32_t mask;
1.103 - sh4addr_t ppn; // Physical Page Number
1.104 - uint32_t flags;
1.105 -};
1.106 -
1.107 -struct utlb_entry {
1.108 - sh4addr_t vpn; // Virtual Page Number
1.109 - uint32_t mask; // Page size mask
1.110 - uint32_t asid; // Process ID
1.111 - sh4addr_t ppn; // Physical Page Number
1.112 - uint32_t flags;
1.113 - uint32_t pcmcia; // extra pcmcia data - not used
1.114 -};
1.115 -
1.116 -struct utlb_sort_entry {
1.117 - sh4addr_t key; // Masked VPN + ASID
1.118 - uint32_t mask; // Mask + 0x00FF
1.119 - int entryNo;
1.120 -};
1.121 -
1.122 -
1.123 +/* Module globals */
1.124 static struct itlb_entry mmu_itlb[ITLB_ENTRY_COUNT];
1.125 static struct utlb_entry mmu_utlb[UTLB_ENTRY_COUNT];
1.126 -static uint32_t mmu_urc;
1.127 -static uint32_t mmu_urb;
1.128 +static struct utlb_page_entry mmu_utlb_pages[UTLB_ENTRY_COUNT];
1.129 static uint32_t mmu_lrui;
1.130 static uint32_t mmu_asid; // current asid
1.131 +static struct utlb_default_regions *mmu_user_storequeue_regions;
1.132
1.133 -static struct utlb_sort_entry mmu_utlb_sorted[UTLB_ENTRY_COUNT];
1.134 -static uint32_t mmu_utlb_entries; // Number of entries in mmu_utlb_sorted.
1.135 +/* Structures for 1K page handling */
1.136 +static struct utlb_1k_entry mmu_utlb_1k_pages[UTLB_ENTRY_COUNT];
1.137 +static int mmu_utlb_1k_free_list[UTLB_ENTRY_COUNT];
1.138 +static int mmu_utlb_1k_free_index;
1.139
1.140 -static sh4ptr_t cache = NULL;
1.141
1.142 +/* Function prototypes */
1.143 static void mmu_invalidate_tlb();
1.144 -static void mmu_utlb_sorted_reset();
1.145 -static void mmu_utlb_sorted_reload();
1.146 +static void mmu_utlb_register_all();
1.147 +static void mmu_utlb_remove_entry(int);
1.148 +static void mmu_utlb_insert_entry(int);
1.149 +static void mmu_register_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn );
1.150 +static void mmu_register_user_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn );
1.151 +static void mmu_set_tlb_enabled( int tlb_on );
1.152 +static void mmu_set_tlb_asid( uint32_t asid );
1.153 +static void mmu_set_storequeue_protected( int protected, int tlb_on );
1.154 +static gboolean mmu_utlb_map_pages( mem_region_fn_t priv_page, mem_region_fn_t user_page, sh4addr_t start_addr, int npages );
1.155 +static void mmu_utlb_remap_pages( gboolean remap_priv, gboolean remap_user, int entryNo );
1.156 +static gboolean mmu_utlb_unmap_pages( gboolean unmap_priv, gboolean unmap_user, sh4addr_t start_addr, int npages );
1.157 +static gboolean mmu_ext_page_remapped( sh4addr_t page, mem_region_fn_t fn, void *user_data );
1.158 +static void mmu_utlb_1k_init();
1.159 +static struct utlb_1k_entry *mmu_utlb_1k_alloc();
1.160 +static void mmu_utlb_1k_free( struct utlb_1k_entry *entry );
1.161 +static void mmu_fix_urc();
1.162
1.163 +static void FASTCALL tlb_miss_read( sh4addr_t addr, void *exc );
1.164 +static int32_t FASTCALL tlb_protected_read( sh4addr_t addr, void *exc );
1.165 +static void FASTCALL tlb_protected_write( sh4addr_t addr, uint32_t val, void *exc );
1.166 +static void FASTCALL tlb_initial_write( sh4addr_t addr, uint32_t val, void *exc );
1.167 +static uint32_t get_tlb_size_mask( uint32_t flags );
1.168 +static uint32_t get_tlb_size_pages( uint32_t flags );
1.169
1.170 -static uint32_t get_mask_for_flags( uint32_t flags )
1.171 -{
1.172 - switch( flags & TLB_SIZE_MASK ) {
1.173 - case TLB_SIZE_1K: return MASK_1K;
1.174 - case TLB_SIZE_4K: return MASK_4K;
1.175 - case TLB_SIZE_64K: return MASK_64K;
1.176 - case TLB_SIZE_1M: return MASK_1M;
1.177 - default: return 0; /* Unreachable */
1.178 - }
1.179 -}
1.180 +#define DEFAULT_REGIONS 0
1.181 +#define DEFAULT_STOREQUEUE_REGIONS 1
1.182 +#define DEFAULT_STOREQUEUE_SQMD_REGIONS 2
1.183
1.184 -int32_t mmio_region_MMU_read( uint32_t reg )
1.185 -{
1.186 - switch( reg ) {
1.187 - case MMUCR:
1.188 - return MMIO_READ( MMU, MMUCR) | (mmu_urc<<10) | (mmu_urb<<18) | (mmu_lrui<<26);
1.189 - default:
1.190 - return MMIO_READ( MMU, reg );
1.191 - }
1.192 -}
1.193 +static struct utlb_default_regions mmu_default_regions[3] = {
1.194 + { &mem_region_tlb_miss, &mem_region_tlb_protected, &mem_region_tlb_multihit },
1.195 + { &p4_region_storequeue_miss, &p4_region_storequeue_protected, &p4_region_storequeue_multihit },
1.196 + { &p4_region_storequeue_sqmd_miss, &p4_region_storequeue_sqmd_protected, &p4_region_storequeue_sqmd_multihit } };
1.197
1.198 -void mmio_region_MMU_write( uint32_t reg, uint32_t val )
1.199 -{
1.200 - uint32_t tmp;
1.201 - switch(reg) {
1.202 - case SH4VER:
1.203 - return;
1.204 - case PTEH:
1.205 - val &= 0xFFFFFCFF;
1.206 - if( (val & 0xFF) != mmu_asid ) {
1.207 - mmu_asid = val&0xFF;
1.208 - sh4_icache.page_vma = -1; // invalidate icache as asid has changed
1.209 - }
1.210 - break;
1.211 - case PTEL:
1.212 - val &= 0x1FFFFDFF;
1.213 - break;
1.214 - case PTEA:
1.215 - val &= 0x0000000F;
1.216 - break;
1.217 - case TRA:
1.218 - val &= 0x000003FC;
1.219 - break;
1.220 - case EXPEVT:
1.221 - case INTEVT:
1.222 - val &= 0x00000FFF;
1.223 - break;
1.224 - case MMUCR:
1.225 - if( val & MMUCR_TI ) {
1.226 - mmu_invalidate_tlb();
1.227 - }
1.228 - mmu_urc = (val >> 10) & 0x3F;
1.229 - mmu_urb = (val >> 18) & 0x3F;
1.230 - mmu_lrui = (val >> 26) & 0x3F;
1.231 - val &= 0x00000301;
1.232 - tmp = MMIO_READ( MMU, MMUCR );
1.233 - if( (val ^ tmp) & (MMUCR_AT|MMUCR_SV) ) {
1.234 - // AT flag has changed state - flush the xlt cache as all bets
1.235 - // are off now. We also need to force an immediate exit from the
1.236 - // current block
1.237 - MMIO_WRITE( MMU, MMUCR, val );
1.238 - sh4_flush_icache();
1.239 - }
1.240 - break;
1.241 - case CCR:
1.242 - mmu_set_cache_mode( val & (CCR_OIX|CCR_ORA|CCR_OCE) );
1.243 - val &= 0x81A7;
1.244 - break;
1.245 - case MMUUNK1:
1.246 - /* Note that if the high bit is set, this appears to reset the machine.
1.247 - * Not emulating this behaviour yet until we know why...
1.248 - */
1.249 - val &= 0x00010007;
1.250 - break;
1.251 - case QACR0:
1.252 - case QACR1:
1.253 - val &= 0x0000001C;
1.254 - break;
1.255 - case PMCR1:
1.256 - PMM_write_control(0, val);
1.257 - val &= 0x0000C13F;
1.258 - break;
1.259 - case PMCR2:
1.260 - PMM_write_control(1, val);
1.261 - val &= 0x0000C13F;
1.262 - break;
1.263 - default:
1.264 - break;
1.265 - }
1.266 - MMIO_WRITE( MMU, reg, val );
1.267 -}
1.268 +#define IS_STOREQUEUE_PROTECTED() (mmu_user_storequeue_regions == &mmu_default_regions[DEFAULT_STOREQUEUE_SQMD_REGIONS])
1.269
1.270 +/*********************** Module public functions ****************************/
1.271
1.272 +/**
1.273 + * Allocate memory for the address space maps, and initialize them according
1.274 + * to the default (reset) values. (TLB is disabled by default)
1.275 + */
1.276 +
1.277 void MMU_init()
1.278 {
1.279 - cache = mem_alloc_pages(2);
1.280 + sh4_address_space = mem_alloc_pages( sizeof(mem_region_fn_t) * 256 );
1.281 + sh4_user_address_space = mem_alloc_pages( sizeof(mem_region_fn_t) * 256 );
1.282 + mmu_user_storequeue_regions = &mmu_default_regions[DEFAULT_STOREQUEUE_REGIONS];
1.283 +
1.284 + mmu_set_tlb_enabled(0);
1.285 + mmu_register_user_mem_region( 0x80000000, 0x00000000, &mem_region_address_error );
1.286 + mmu_register_user_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );
1.287 +
1.288 + /* Setup P4 tlb/cache access regions */
1.289 + mmu_register_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );
1.290 + mmu_register_mem_region( 0xE4000000, 0xF0000000, &mem_region_unmapped );
1.291 + mmu_register_mem_region( 0xF0000000, 0xF1000000, &p4_region_icache_addr );
1.292 + mmu_register_mem_region( 0xF1000000, 0xF2000000, &p4_region_icache_data );
1.293 + mmu_register_mem_region( 0xF2000000, 0xF3000000, &p4_region_itlb_addr );
1.294 + mmu_register_mem_region( 0xF3000000, 0xF4000000, &p4_region_itlb_data );
1.295 + mmu_register_mem_region( 0xF4000000, 0xF5000000, &p4_region_ocache_addr );
1.296 + mmu_register_mem_region( 0xF5000000, 0xF6000000, &p4_region_ocache_data );
1.297 + mmu_register_mem_region( 0xF6000000, 0xF7000000, &p4_region_utlb_addr );
1.298 + mmu_register_mem_region( 0xF7000000, 0xF8000000, &p4_region_utlb_data );
1.299 + mmu_register_mem_region( 0xF8000000, 0x00000000, &mem_region_unmapped );
1.300 +
1.301 + /* Setup P4 control region */
1.302 + mmu_register_mem_region( 0xFF000000, 0xFF001000, &mmio_region_MMU.fn );
1.303 + mmu_register_mem_region( 0xFF100000, 0xFF101000, &mmio_region_PMM.fn );
1.304 + mmu_register_mem_region( 0xFF200000, 0xFF201000, &mmio_region_UBC.fn );
1.305 + mmu_register_mem_region( 0xFF800000, 0xFF801000, &mmio_region_BSC.fn );
1.306 + mmu_register_mem_region( 0xFF900000, 0xFFA00000, &mem_region_unmapped ); // SDMR2 + SDMR3
1.307 + mmu_register_mem_region( 0xFFA00000, 0xFFA01000, &mmio_region_DMAC.fn );
1.308 + mmu_register_mem_region( 0xFFC00000, 0xFFC01000, &mmio_region_CPG.fn );
1.309 + mmu_register_mem_region( 0xFFC80000, 0xFFC81000, &mmio_region_RTC.fn );
1.310 + mmu_register_mem_region( 0xFFD00000, 0xFFD01000, &mmio_region_INTC.fn );
1.311 + mmu_register_mem_region( 0xFFD80000, 0xFFD81000, &mmio_region_TMU.fn );
1.312 + mmu_register_mem_region( 0xFFE00000, 0xFFE01000, &mmio_region_SCI.fn );
1.313 + mmu_register_mem_region( 0xFFE80000, 0xFFE81000, &mmio_region_SCIF.fn );
1.314 + mmu_register_mem_region( 0xFFF00000, 0xFFF01000, &mem_region_unmapped ); // H-UDI
1.315 +
1.316 + register_mem_page_remapped_hook( mmu_ext_page_remapped, NULL );
1.317 + mmu_utlb_1k_init();
1.318 +
1.319 + /* Ensure the code regions are executable */
1.320 + mem_unprotect( mmu_utlb_pages, sizeof(mmu_utlb_pages) );
1.321 + mem_unprotect( mmu_utlb_1k_pages, sizeof(mmu_utlb_1k_pages) );
1.322 }
1.323
1.324 void MMU_reset()
1.325 {
1.326 mmio_region_MMU_write( CCR, 0 );
1.327 mmio_region_MMU_write( MMUCR, 0 );
1.328 - mmu_utlb_sorted_reload();
1.329 }
1.330
1.331 void MMU_save_state( FILE *f )
1.332 {
1.333 - fwrite( cache, 4096, 2, f );
1.334 + mmu_fix_urc();
1.335 fwrite( &mmu_itlb, sizeof(mmu_itlb), 1, f );
1.336 fwrite( &mmu_utlb, sizeof(mmu_utlb), 1, f );
1.337 fwrite( &mmu_urc, sizeof(mmu_urc), 1, f );
1.338 @@ -250,13 +169,6 @@
1.339
1.340 int MMU_load_state( FILE *f )
1.341 {
1.342 - /* Setup the cache mode according to the saved register value
1.343 - * (mem_load runs before this point to load all MMIO data)
1.344 - */
1.345 - mmio_region_MMU_write( CCR, MMIO_READ(MMU, CCR) );
1.346 - if( fread( cache, 4096, 2, f ) != 2 ) {
1.347 - return 1;
1.348 - }
1.349 if( fread( &mmu_itlb, sizeof(mmu_itlb), 1, f ) != 1 ) {
1.350 return 1;
1.351 }
1.352 @@ -275,151 +187,21 @@
1.353 if( fread( &mmu_asid, sizeof(mmu_asid), 1, f ) != 1 ) {
1.354 return 1;
1.355 }
1.356 - mmu_utlb_sorted_reload();
1.357 +
1.358 + uint32_t mmucr = MMIO_READ(MMU,MMUCR);
1.359 + mmu_urc_overflow = mmu_urc >= mmu_urb;
1.360 + mmu_set_tlb_enabled(mmucr&MMUCR_AT);
1.361 + mmu_set_storequeue_protected(mmucr&MMUCR_SQMD, mmucr&MMUCR_AT);
1.362 return 0;
1.363 }
1.364
1.365 -void mmu_set_cache_mode( int mode )
1.366 -{
1.367 - uint32_t i;
1.368 - switch( mode ) {
1.369 - case MEM_OC_INDEX0: /* OIX=0 */
1.370 - for( i=OCRAM_START; i<OCRAM_END; i++ )
1.371 - page_map[i] = cache + ((i&0x02)<<(LXDREAM_PAGE_BITS-1));
1.372 - break;
1.373 - case MEM_OC_INDEX1: /* OIX=1 */
1.374 - for( i=OCRAM_START; i<OCRAM_END; i++ )
1.375 - page_map[i] = cache + ((i&0x02000000)>>(25-LXDREAM_PAGE_BITS));
1.376 - break;
1.377 - default: /* disabled */
1.378 - for( i=OCRAM_START; i<OCRAM_END; i++ )
1.379 - page_map[i] = NULL;
1.380 - break;
1.381 - }
1.382 -}
1.383 -
1.384 -/******************* Sorted TLB data structure ****************/
1.385 -/*
1.386 - * mmu_utlb_sorted maintains a list of all active (valid) entries,
1.387 - * sorted by masked VPN and then ASID. Multi-hit entries are resolved
1.388 - * ahead of time, and have -1 recorded as the corresponding PPN.
1.389 - *
1.390 - * FIXME: Multi-hit detection doesn't pick up cases where two pages
1.391 - * overlap due to different sizes (and don't share the same base
1.392 - * address).
1.393 - */
1.394 -static void mmu_utlb_sorted_reset()
1.395 -{
1.396 - mmu_utlb_entries = 0;
1.397 -}
1.398 -
1.399 -/**
1.400 - * Find an entry in the sorted table (VPN+ASID check).
1.401 - */
1.402 -static inline int mmu_utlb_sorted_find( sh4addr_t vma )
1.403 -{
1.404 - int low = 0;
1.405 - int high = mmu_utlb_entries;
1.406 - uint32_t lookup = (vma & 0xFFFFFC00) + mmu_asid;
1.407 -
1.408 - mmu_urc++;
1.409 - if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
1.410 - mmu_urc = 0;
1.411 - }
1.412 -
1.413 - while( low != high ) {
1.414 - int posn = (high+low)>>1;
1.415 - int masked = lookup & mmu_utlb_sorted[posn].mask;
1.416 - if( mmu_utlb_sorted[posn].key < masked ) {
1.417 - low = posn+1;
1.418 - } else if( mmu_utlb_sorted[posn].key > masked ) {
1.419 - high = posn;
1.420 - } else {
1.421 - return mmu_utlb_sorted[posn].entryNo;
1.422 - }
1.423 - }
1.424 - return -1;
1.425 -
1.426 -}
1.427 -
1.428 -static void mmu_utlb_insert_entry( int entry )
1.429 -{
1.430 - int low = 0;
1.431 - int high = mmu_utlb_entries;
1.432 - uint32_t key = (mmu_utlb[entry].vpn & mmu_utlb[entry].mask) + mmu_utlb[entry].asid;
1.433 -
1.434 - assert( mmu_utlb_entries < UTLB_ENTRY_COUNT );
1.435 - /* Find the insertion point */
1.436 - while( low != high ) {
1.437 - int posn = (high+low)>>1;
1.438 - if( mmu_utlb_sorted[posn].key < key ) {
1.439 - low = posn+1;
1.440 - } else if( mmu_utlb_sorted[posn].key > key ) {
1.441 - high = posn;
1.442 - } else {
1.443 - /* Exact match - multi-hit */
1.444 - mmu_utlb_sorted[posn].entryNo = -2;
1.445 - return;
1.446 - }
1.447 - } /* 0 2 4 6 */
1.448 - memmove( &mmu_utlb_sorted[low+1], &mmu_utlb_sorted[low],
1.449 - (mmu_utlb_entries - low) * sizeof(struct utlb_sort_entry) );
1.450 - mmu_utlb_sorted[low].key = key;
1.451 - mmu_utlb_sorted[low].mask = mmu_utlb[entry].mask | 0x000000FF;
1.452 - mmu_utlb_sorted[low].entryNo = entry;
1.453 - mmu_utlb_entries++;
1.454 -}
1.455 -
1.456 -static void mmu_utlb_remove_entry( int entry )
1.457 -{
1.458 - int low = 0;
1.459 - int high = mmu_utlb_entries;
1.460 - uint32_t key = (mmu_utlb[entry].vpn & mmu_utlb[entry].mask) + mmu_utlb[entry].asid;
1.461 - while( low != high ) {
1.462 - int posn = (high+low)>>1;
1.463 - if( mmu_utlb_sorted[posn].key < key ) {
1.464 - low = posn+1;
1.465 - } else if( mmu_utlb_sorted[posn].key > key ) {
1.466 - high = posn;
1.467 - } else {
1.468 - if( mmu_utlb_sorted[posn].entryNo == -2 ) {
1.469 - /* Multiple-entry recorded - rebuild the whole table minus entry */
1.470 - int i;
1.471 - mmu_utlb_entries = 0;
1.472 - for( i=0; i< UTLB_ENTRY_COUNT; i++ ) {
1.473 - if( i != entry && (mmu_utlb[i].flags & TLB_VALID) ) {
1.474 - mmu_utlb_insert_entry(i);
1.475 - }
1.476 - }
1.477 - } else {
1.478 - mmu_utlb_entries--;
1.479 - memmove( &mmu_utlb_sorted[posn], &mmu_utlb_sorted[posn+1],
1.480 - (mmu_utlb_entries - posn)*sizeof(struct utlb_sort_entry) );
1.481 - }
1.482 - return;
1.483 - }
1.484 - }
1.485 - assert( 0 && "UTLB key not found!" );
1.486 -}
1.487 -
1.488 -static void mmu_utlb_sorted_reload()
1.489 -{
1.490 - int i;
1.491 - mmu_utlb_entries = 0;
1.492 - for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
1.493 - if( mmu_utlb[i].flags & TLB_VALID )
1.494 - mmu_utlb_insert_entry( i );
1.495 - }
1.496 -}
1.497 -
1.498 -/* TLB maintanence */
1.499 -
1.500 /**
1.501 * LDTLB instruction implementation. Copies PTEH, PTEL and PTEA into the UTLB
1.502 * entry identified by MMUCR.URC. Does not modify MMUCR or the ITLB.
1.503 */
1.504 void MMU_ldtlb()
1.505 {
1.506 + mmu_fix_urc();
1.507 if( mmu_utlb[mmu_urc].flags & TLB_VALID )
1.508 mmu_utlb_remove_entry( mmu_urc );
1.509 mmu_utlb[mmu_urc].vpn = MMIO_READ(MMU, PTEH) & 0xFFFFFC00;
1.510 @@ -427,211 +209,632 @@
1.511 mmu_utlb[mmu_urc].ppn = MMIO_READ(MMU, PTEL) & 0x1FFFFC00;
1.512 mmu_utlb[mmu_urc].flags = MMIO_READ(MMU, PTEL) & 0x00001FF;
1.513 mmu_utlb[mmu_urc].pcmcia = MMIO_READ(MMU, PTEA);
1.514 - mmu_utlb[mmu_urc].mask = get_mask_for_flags(mmu_utlb[mmu_urc].flags);
1.515 - if( mmu_utlb[mmu_urc].ppn >= 0x1C000000 )
1.516 - mmu_utlb[mmu_urc].ppn |= 0xE0000000;
1.517 + mmu_utlb[mmu_urc].mask = get_tlb_size_mask(mmu_utlb[mmu_urc].flags);
1.518 if( mmu_utlb[mmu_urc].flags & TLB_VALID )
1.519 mmu_utlb_insert_entry( mmu_urc );
1.520 }
1.521
1.522 +
1.523 +MMIO_REGION_READ_FN( MMU, reg )
1.524 +{
1.525 + reg &= 0xFFF;
1.526 + switch( reg ) {
1.527 + case MMUCR:
1.528 + mmu_fix_urc();
1.529 + return MMIO_READ( MMU, MMUCR) | (mmu_urc<<10) | ((mmu_urb&0x3F)<<18) | (mmu_lrui<<26);
1.530 + default:
1.531 + return MMIO_READ( MMU, reg );
1.532 + }
1.533 +}
1.534 +
1.535 +MMIO_REGION_WRITE_FN( MMU, reg, val )
1.536 +{
1.537 + uint32_t tmp;
1.538 + reg &= 0xFFF;
1.539 + switch(reg) {
1.540 + case SH4VER:
1.541 + return;
1.542 + case PTEH:
1.543 + val &= 0xFFFFFCFF;
1.544 + if( (val & 0xFF) != mmu_asid ) {
1.545 + mmu_set_tlb_asid( val&0xFF );
1.546 + sh4_icache.page_vma = -1; // invalidate icache as asid has changed
1.547 + }
1.548 + break;
1.549 + case PTEL:
1.550 + val &= 0x1FFFFDFF;
1.551 + break;
1.552 + case PTEA:
1.553 + val &= 0x0000000F;
1.554 + break;
1.555 + case TRA:
1.556 + val &= 0x000003FC;
1.557 + break;
1.558 + case EXPEVT:
1.559 + case INTEVT:
1.560 + val &= 0x00000FFF;
1.561 + break;
1.562 + case MMUCR:
1.563 + if( val & MMUCR_TI ) {
1.564 + mmu_invalidate_tlb();
1.565 + }
1.566 + mmu_urc = (val >> 10) & 0x3F;
1.567 + mmu_urb = (val >> 18) & 0x3F;
1.568 + if( mmu_urb == 0 ) {
1.569 + mmu_urb = 0x40;
1.570 + } else if( mmu_urc >= mmu_urb ) {
1.571 + mmu_urc_overflow = TRUE;
1.572 + }
1.573 + mmu_lrui = (val >> 26) & 0x3F;
1.574 + val &= 0x00000301;
1.575 + tmp = MMIO_READ( MMU, MMUCR );
1.576 + if( (val ^ tmp) & (MMUCR_SQMD) ) {
1.577 + mmu_set_storequeue_protected( val & MMUCR_SQMD, val&MMUCR_AT );
1.578 + }
1.579 + if( (val ^ tmp) & (MMUCR_AT) ) {
1.580 + // AT flag has changed state - flush the xlt cache as all bets
1.581 + // are off now. We also need to force an immediate exit from the
1.582 + // current block
1.583 + mmu_set_tlb_enabled( val & MMUCR_AT );
1.584 + MMIO_WRITE( MMU, MMUCR, val );
1.585 + sh4_core_exit( CORE_EXIT_FLUSH_ICACHE );
1.586 + xlat_flush_cache(); // If we're not running, flush the cache anyway
1.587 + }
1.588 + break;
1.589 + case CCR:
1.590 + CCN_set_cache_control( val );
1.591 + val &= 0x81A7;
1.592 + break;
1.593 + case MMUUNK1:
1.594 + /* Note that if the high bit is set, this appears to reset the machine.
1.595 + * Not emulating this behaviour yet until we know why...
1.596 + */
1.597 + val &= 0x00010007;
1.598 + break;
1.599 + case QACR0:
1.600 + case QACR1:
1.601 + val &= 0x0000001C;
1.602 + break;
1.603 + case PMCR1:
1.604 + PMM_write_control(0, val);
1.605 + val &= 0x0000C13F;
1.606 + break;
1.607 + case PMCR2:
1.608 + PMM_write_control(1, val);
1.609 + val &= 0x0000C13F;
1.610 + break;
1.611 + default:
1.612 + break;
1.613 + }
1.614 + MMIO_WRITE( MMU, reg, val );
1.615 +}
1.616 +
1.617 +/********************** 1K Page handling ***********************/
1.618 +/* Since we use 4K pages as our native page size, 1K pages need a bit of extra
1.619 + * effort to manage - we justify this on the basis that most programs won't
1.620 + * actually use 1K pages, so we may as well optimize for the common case.
1.621 + *
1.622 + * Implementation uses an intermediate page entry (the utlb_1k_entry) that
1.623 + * redirects requests to the 'real' page entry. These are allocated on an
1.624 + * as-needed basis, and returned to the pool when all subpages are empty.
1.625 + */
1.626 +static void mmu_utlb_1k_init()
1.627 +{
1.628 + int i;
1.629 + for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
1.630 + mmu_utlb_1k_free_list[i] = i;
1.631 + mmu_utlb_1k_init_vtable( &mmu_utlb_1k_pages[i] );
1.632 + }
1.633 + mmu_utlb_1k_free_index = 0;
1.634 +}
1.635 +
1.636 +static struct utlb_1k_entry *mmu_utlb_1k_alloc()
1.637 +{
1.638 + assert( mmu_utlb_1k_free_index < UTLB_ENTRY_COUNT );
1.639 + struct utlb_1k_entry *entry = &mmu_utlb_1k_pages[mmu_utlb_1k_free_index++];
1.640 + return entry;
1.641 +}
1.642 +
1.643 +static void mmu_utlb_1k_free( struct utlb_1k_entry *ent )
1.644 +{
1.645 + unsigned int entryNo = ent - &mmu_utlb_1k_pages[0];
1.646 + assert( entryNo < UTLB_ENTRY_COUNT );
1.647 + assert( mmu_utlb_1k_free_index > 0 );
1.648 + mmu_utlb_1k_free_list[--mmu_utlb_1k_free_index] = entryNo;
1.649 +}
1.650 +
1.651 +
1.652 +/********************** Address space maintenance *************************/
1.653 +
1.654 +/**
1.655 + * MMU accessor functions just increment URC - fixup here if necessary
1.656 + */
1.657 +static inline void mmu_fix_urc()
1.658 +{
1.659 + if( mmu_urc_overflow ) {
1.660 + if( mmu_urc >= 0x40 ) {
1.661 + mmu_urc_overflow = FALSE;
1.662 + mmu_urc -= 0x40;
1.663 + mmu_urc %= mmu_urb;
1.664 + }
1.665 + } else {
1.666 + mmu_urc %= mmu_urb;
1.667 + }
1.668 +}
1.669 +
1.670 +static void mmu_register_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn )
1.671 +{
1.672 + int count = (end - start) >> 12;
1.673 + mem_region_fn_t *ptr = &sh4_address_space[start>>12];
1.674 + while( count-- > 0 ) {
1.675 + *ptr++ = fn;
1.676 + }
1.677 +}
1.678 +static void mmu_register_user_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn )
1.679 +{
1.680 + int count = (end - start) >> 12;
1.681 + mem_region_fn_t *ptr = &sh4_user_address_space[start>>12];
1.682 + while( count-- > 0 ) {
1.683 + *ptr++ = fn;
1.684 + }
1.685 +}
1.686 +
1.687 +static gboolean mmu_ext_page_remapped( sh4addr_t page, mem_region_fn_t fn, void *user_data )
1.688 +{
1.689 + int i;
1.690 + if( (MMIO_READ(MMU,MMUCR)) & MMUCR_AT ) {
1.691 + /* TLB on */
1.692 + sh4_address_space[(page|0x80000000)>>12] = fn; /* Direct map to P1 and P2 */
1.693 + sh4_address_space[(page|0xA0000000)>>12] = fn;
1.694 + /* Scan UTLB and update any direct-referencing entries */
1.695 + } else {
1.696 + /* Direct map to U0, P0, P1, P2, P3 */
1.697 + for( i=0; i<= 0xC0000000; i+= 0x20000000 ) {
1.698 + sh4_address_space[(page|i)>>12] = fn;
1.699 + }
1.700 + for( i=0; i < 0x80000000; i+= 0x20000000 ) {
1.701 + sh4_user_address_space[(page|i)>>12] = fn;
1.702 + }
1.703 + }
1.704 +}
1.705 +
1.706 +static void mmu_set_tlb_enabled( int tlb_on )
1.707 +{
1.708 + mem_region_fn_t *ptr, *uptr;
1.709 + int i;
1.710 +
1.711 + /* Reset the storequeue area */
1.712 +
1.713 + if( tlb_on ) {
1.714 + mmu_register_mem_region(0x00000000, 0x80000000, &mem_region_tlb_miss );
1.715 + mmu_register_mem_region(0xC0000000, 0xE0000000, &mem_region_tlb_miss );
1.716 + mmu_register_user_mem_region(0x00000000, 0x80000000, &mem_region_tlb_miss );
1.717 +
1.718 + /* Default SQ prefetch goes to TLB miss (?) */
1.719 + mmu_register_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue_miss );
1.720 + mmu_register_user_mem_region( 0xE0000000, 0xE4000000, mmu_user_storequeue_regions->tlb_miss );
1.721 + mmu_utlb_register_all();
1.722 + } else {
1.723 + for( i=0, ptr = sh4_address_space; i<7; i++, ptr += LXDREAM_PAGE_TABLE_ENTRIES ) {
1.724 + memcpy( ptr, ext_address_space, sizeof(mem_region_fn_t) * LXDREAM_PAGE_TABLE_ENTRIES );
1.725 + }
1.726 + for( i=0, ptr = sh4_user_address_space; i<4; i++, ptr += LXDREAM_PAGE_TABLE_ENTRIES ) {
1.727 + memcpy( ptr, ext_address_space, sizeof(mem_region_fn_t) * LXDREAM_PAGE_TABLE_ENTRIES );
1.728 + }
1.729 +
1.730 + mmu_register_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );
1.731 + if( IS_STOREQUEUE_PROTECTED() ) {
1.732 + mmu_register_user_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue_sqmd );
1.733 + } else {
1.734 + mmu_register_user_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );
1.735 + }
1.736 + }
1.737 +
1.738 +}
1.739 +
1.740 +/**
1.741 + * Flip the SQMD switch - this is rather expensive, so will need to be changed if
1.742 + * anything expects to do this frequently.
1.743 + */
1.744 +static void mmu_set_storequeue_protected( int protected, int tlb_on )
1.745 +{
1.746 + mem_region_fn_t nontlb_region;
1.747 + int i;
1.748 +
1.749 + if( protected ) {
1.750 + mmu_user_storequeue_regions = &mmu_default_regions[DEFAULT_STOREQUEUE_SQMD_REGIONS];
1.751 + nontlb_region = &p4_region_storequeue_sqmd;
1.752 + } else {
1.753 + mmu_user_storequeue_regions = &mmu_default_regions[DEFAULT_STOREQUEUE_REGIONS];
1.754 + nontlb_region = &p4_region_storequeue;
1.755 + }
1.756 +
1.757 + if( tlb_on ) {
1.758 + mmu_register_user_mem_region( 0xE0000000, 0xE4000000, mmu_user_storequeue_regions->tlb_miss );
1.759 + for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
1.760 + if( (mmu_utlb[i].vpn & 0xFC000000) == 0xE0000000 ) {
1.761 + mmu_utlb_insert_entry(i);
1.762 + }
1.763 + }
1.764 + } else {
1.765 + mmu_register_user_mem_region( 0xE0000000, 0xE4000000, nontlb_region );
1.766 + }
1.767 +
1.768 +}
1.769 +
1.770 +static void mmu_set_tlb_asid( uint32_t asid )
1.771 +{
1.772 + /* Scan for pages that need to be remapped */
1.773 + int i;
1.774 + if( IS_SV_ENABLED() ) {
1.775 + for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
1.776 + if( mmu_utlb[i].flags & TLB_VALID ) {
1.777 + if( (mmu_utlb[i].flags & TLB_SHARE) == 0 ) {
1.778 + if( mmu_utlb[i].asid == mmu_asid ) { // Matches old ASID - unmap out
1.779 + if( !mmu_utlb_unmap_pages( FALSE, TRUE, mmu_utlb[i].vpn&mmu_utlb[i].mask,
1.780 + get_tlb_size_pages(mmu_utlb[i].flags) ) )
1.781 + mmu_utlb_remap_pages( FALSE, TRUE, i );
1.782 + } else if( mmu_utlb[i].asid == asid ) { // Matches new ASID - map in
1.783 + mmu_utlb_map_pages( NULL, mmu_utlb_pages[i].user_fn,
1.784 + mmu_utlb[i].vpn&mmu_utlb[i].mask,
1.785 + get_tlb_size_pages(mmu_utlb[i].flags) );
1.786 + }
1.787 + }
1.788 + }
1.789 + }
1.790 + } else {
1.791 + // Remap both Priv+user pages
1.792 + for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
1.793 + if( mmu_utlb[i].flags & TLB_VALID ) {
1.794 + if( (mmu_utlb[i].flags & TLB_SHARE) == 0 ) {
1.795 + if( mmu_utlb[i].asid == mmu_asid ) { // Matches old ASID - unmap out
1.796 + if( !mmu_utlb_unmap_pages( TRUE, TRUE, mmu_utlb[i].vpn&mmu_utlb[i].mask,
1.797 + get_tlb_size_pages(mmu_utlb[i].flags) ) )
1.798 + mmu_utlb_remap_pages( TRUE, TRUE, i );
1.799 + } else if( mmu_utlb[i].asid == asid ) { // Matches new ASID - map in
1.800 + mmu_utlb_map_pages( &mmu_utlb_pages[i].fn, mmu_utlb_pages[i].user_fn,
1.801 + mmu_utlb[i].vpn&mmu_utlb[i].mask,
1.802 + get_tlb_size_pages(mmu_utlb[i].flags) );
1.803 + }
1.804 + }
1.805 + }
1.806 + }
1.807 + }
1.808 +
1.809 + mmu_asid = asid;
1.810 +}
1.811 +
1.812 +static uint32_t get_tlb_size_mask( uint32_t flags )
1.813 +{
1.814 + switch( flags & TLB_SIZE_MASK ) {
1.815 + case TLB_SIZE_1K: return MASK_1K;
1.816 + case TLB_SIZE_4K: return MASK_4K;
1.817 + case TLB_SIZE_64K: return MASK_64K;
1.818 + case TLB_SIZE_1M: return MASK_1M;
1.819 + default: return 0; /* Unreachable */
1.820 + }
1.821 +}
1.822 +static uint32_t get_tlb_size_pages( uint32_t flags )
1.823 +{
1.824 + switch( flags & TLB_SIZE_MASK ) {
1.825 + case TLB_SIZE_1K: return 0;
1.826 + case TLB_SIZE_4K: return 1;
1.827 + case TLB_SIZE_64K: return 16;
1.828 + case TLB_SIZE_1M: return 256;
1.829 + default: return 0; /* Unreachable */
1.830 + }
1.831 +}
1.832 +
1.833 +/**
1.834 + * Add a new TLB entry mapping to the address space table. If any of the pages
1.835 + * are already mapped, they are mapped to the TLB multi-hit page instead.
1.836 + * @return FALSE if a TLB multihit situation was detected, otherwise TRUE.
1.837 + */
1.838 +static gboolean mmu_utlb_map_pages( mem_region_fn_t priv_page, mem_region_fn_t user_page, sh4addr_t start_addr, int npages )
1.839 +{
1.840 + mem_region_fn_t *ptr = &sh4_address_space[start_addr >> 12];
1.841 + mem_region_fn_t *uptr = &sh4_user_address_space[start_addr >> 12];
1.842 + struct utlb_default_regions *privdefs = &mmu_default_regions[DEFAULT_REGIONS];
1.843 + struct utlb_default_regions *userdefs = privdefs;
1.844 +
1.845 + gboolean mapping_ok = TRUE;
1.846 + int i;
1.847 +
1.848 + if( (start_addr & 0xFC000000) == 0xE0000000 ) {
1.849 + /* Storequeue mapping */
1.850 + privdefs = &mmu_default_regions[DEFAULT_STOREQUEUE_REGIONS];
1.851 + userdefs = mmu_user_storequeue_regions;
1.852 + } else if( (start_addr & 0xE0000000) == 0xC0000000 ) {
1.853 + user_page = NULL; /* No user access to P3 region */
1.854 + } else if( start_addr >= 0x80000000 ) {
1.855 + return TRUE; // No mapping - legal but meaningless
1.856 + }
1.857 +
1.858 + if( npages == 0 ) {
1.859 + struct utlb_1k_entry *ent;
1.860 + int i, idx = (start_addr >> 10) & 0x03;
1.861 + if( IS_1K_PAGE_ENTRY(*ptr) ) {
1.862 + ent = (struct utlb_1k_entry *)*ptr;
1.863 + } else {
1.864 + ent = mmu_utlb_1k_alloc();
1.865 + /* New 1K struct - init to previous contents of region */
1.866 + for( i=0; i<4; i++ ) {
1.867 + ent->subpages[i] = *ptr;
1.868 + ent->user_subpages[i] = *uptr;
1.869 + }
1.870 + *ptr = &ent->fn;
1.871 + *uptr = &ent->user_fn;
1.872 + }
1.873 +
1.874 + if( priv_page != NULL ) {
1.875 + if( ent->subpages[idx] == privdefs->tlb_miss ) {
1.876 + ent->subpages[idx] = priv_page;
1.877 + } else {
1.878 + mapping_ok = FALSE;
1.879 + ent->subpages[idx] = privdefs->tlb_multihit;
1.880 + }
1.881 + }
1.882 + if( user_page != NULL ) {
1.883 + if( ent->user_subpages[idx] == userdefs->tlb_miss ) {
1.884 + ent->user_subpages[idx] = user_page;
1.885 + } else {
1.886 + mapping_ok = FALSE;
1.887 + ent->user_subpages[idx] = userdefs->tlb_multihit;
1.888 + }
1.889 + }
1.890 +
1.891 + } else {
1.892 + if( priv_page != NULL ) {
1.893 + /* Privileged mapping only */
1.894 + for( i=0; i<npages; i++ ) {
1.895 + if( *ptr == privdefs->tlb_miss ) {
1.896 + *ptr++ = priv_page;
1.897 + } else {
1.898 + mapping_ok = FALSE;
1.899 + *ptr++ = privdefs->tlb_multihit;
1.900 + }
1.901 + }
1.902 + }
1.903 + if( user_page != NULL ) {
1.904 + /* User mapping only (eg ASID change remap w/ SV=1) */
1.905 + for( i=0; i<npages; i++ ) {
1.906 + if( *uptr == userdefs->tlb_miss ) {
1.907 + *uptr++ = user_page;
1.908 + } else {
1.909 + mapping_ok = FALSE;
1.910 + *uptr++ = userdefs->tlb_multihit;
1.911 + }
1.912 + }
1.913 + }
1.914 + }
1.915 +
1.916 + return mapping_ok;
1.917 +}
1.918 +
1.919 +/**
1.920 + * Remap any pages within the region covered by entryNo, but not including
1.921 + * entryNo itself. This is used to reestablish pages that were previously
1.922 + * covered by a multi-hit exception region when one of the pages is removed.
1.923 + */
1.924 +static void mmu_utlb_remap_pages( gboolean remap_priv, gboolean remap_user, int entryNo )
1.925 +{
1.926 + int mask = mmu_utlb[entryNo].mask;
1.927 + uint32_t remap_addr = mmu_utlb[entryNo].vpn & mask;
1.928 + int i;
1.929 +
1.930 + for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
1.931 + if( i != entryNo && (mmu_utlb[i].vpn & mask) == remap_addr && (mmu_utlb[i].flags & TLB_VALID) ) {
1.932 + /* Overlapping region */
1.933 + mem_region_fn_t priv_page = (remap_priv ? &mmu_utlb_pages[i].fn : NULL);
1.934 + mem_region_fn_t user_page = (remap_priv ? mmu_utlb_pages[i].user_fn : NULL);
1.935 + uint32_t start_addr;
1.936 + int npages;
1.937 +
1.938 + if( mmu_utlb[i].mask >= mask ) {
1.939 + /* entry is no larger than the area we're replacing - map completely */
1.940 + start_addr = mmu_utlb[i].vpn & mmu_utlb[i].mask;
1.941 + npages = get_tlb_size_pages( mmu_utlb[i].flags );
1.942 + } else {
1.943 + /* Otherwise map subset - region covered by removed page */
1.944 + start_addr = remap_addr;
1.945 + npages = get_tlb_size_pages( mmu_utlb[entryNo].flags );
1.946 + }
1.947 +
1.948 + if( (mmu_utlb[i].flags & TLB_SHARE) || mmu_utlb[i].asid == mmu_asid ) {
1.949 + mmu_utlb_map_pages( priv_page, user_page, start_addr, npages );
1.950 + } else if( IS_SV_ENABLED() ) {
1.951 + mmu_utlb_map_pages( priv_page, NULL, start_addr, npages );
1.952 + }
1.953 +
1.954 + }
1.955 + }
1.956 +}
1.957 +
1.958 +/**
1.959 + * Remove a previous TLB mapping (replacing them with the TLB miss region).
1.960 + * @return FALSE if any pages were previously mapped to the TLB multihit page,
1.961 + * otherwise TRUE. In either case, all pages in the region are cleared to TLB miss.
1.962 + */
1.963 +static gboolean mmu_utlb_unmap_pages( gboolean unmap_priv, gboolean unmap_user, sh4addr_t start_addr, int npages )
1.964 +{
1.965 + mem_region_fn_t *ptr = &sh4_address_space[start_addr >> 12];
1.966 + mem_region_fn_t *uptr = &sh4_user_address_space[start_addr >> 12];
1.967 + struct utlb_default_regions *privdefs = &mmu_default_regions[DEFAULT_REGIONS];
1.968 + struct utlb_default_regions *userdefs = privdefs;
1.969 +
1.970 + gboolean unmapping_ok = TRUE;
1.971 + int i;
1.972 +
1.973 + if( (start_addr & 0xFC000000) == 0xE0000000 ) {
1.974 + /* Storequeue mapping */
1.975 + privdefs = &mmu_default_regions[DEFAULT_STOREQUEUE_REGIONS];
1.976 + userdefs = mmu_user_storequeue_regions;
1.977 + } else if( (start_addr & 0xE0000000) == 0xC0000000 ) {
1.978 + unmap_user = FALSE;
1.979 + } else if( start_addr >= 0x80000000 ) {
1.980 + return TRUE; // No mapping - legal but meaningless
1.981 + }
1.982 +
1.983 + if( npages == 0 ) { // 1K page
1.984 + assert( IS_1K_PAGE_ENTRY( *ptr ) );
1.985 + struct utlb_1k_entry *ent = (struct utlb_1k_entry *)*ptr;
1.986 + int i, idx = (start_addr >> 10) & 0x03, mergeable=1;
1.987 + if( ent->subpages[idx] == privdefs->tlb_multihit ) {
1.988 + unmapping_ok = FALSE;
1.989 + }
1.990 + if( unmap_priv )
1.991 + ent->subpages[idx] = privdefs->tlb_miss;
1.992 + if( unmap_user )
1.993 + ent->user_subpages[idx] = userdefs->tlb_miss;
1.994 +
1.995 + /* If all 4 subpages have the same content, merge them together and
1.996 + * release the 1K entry
1.997 + */
1.998 + mem_region_fn_t priv_page = ent->subpages[0];
1.999 + mem_region_fn_t user_page = ent->user_subpages[0];
1.1000 + for( i=1; i<4; i++ ) {
1.1001 + if( priv_page != ent->subpages[i] || user_page != ent->user_subpages[i] ) {
1.1002 + mergeable = 0;
1.1003 + break;
1.1004 + }
1.1005 + }
1.1006 + if( mergeable ) {
1.1007 + mmu_utlb_1k_free(ent);
1.1008 + *ptr = priv_page;
1.1009 + *uptr = user_page;
1.1010 + }
1.1011 + } else {
1.1012 + if( unmap_priv ) {
1.1013 + /* Privileged (un)mapping */
1.1014 + for( i=0; i<npages; i++ ) {
1.1015 + if( *ptr == privdefs->tlb_multihit ) {
1.1016 + unmapping_ok = FALSE;
1.1017 + }
1.1018 + *ptr++ = privdefs->tlb_miss;
1.1019 + }
1.1020 + }
1.1021 + if( unmap_user ) {
1.1022 + /* User (un)mapping */
1.1023 + for( i=0; i<npages; i++ ) {
1.1024 + if( *uptr == userdefs->tlb_multihit ) {
1.1025 + unmapping_ok = FALSE;
1.1026 + }
1.1027 + *uptr++ = userdefs->tlb_miss;
1.1028 + }
1.1029 + }
1.1030 + }
1.1031 +
1.1032 + return unmapping_ok;
1.1033 +}
1.1034 +
1.1035 +static void mmu_utlb_insert_entry( int entry )
1.1036 +{
1.1037 + struct utlb_entry *ent = &mmu_utlb[entry];
1.1038 + mem_region_fn_t page = &mmu_utlb_pages[entry].fn;
1.1039 + mem_region_fn_t upage;
1.1040 + sh4addr_t start_addr = ent->vpn & ent->mask;
1.1041 + int npages = get_tlb_size_pages(ent->flags);
1.1042 +
1.1043 + if( (start_addr & 0xFC000000) == 0xE0000000 ) {
1.1044 + /* Store queue mappings are a bit different - normal access is fixed to
1.1045 + * the store queue register block, and we only map prefetches through
1.1046 + * the TLB
1.1047 + */
1.1048 + mmu_utlb_init_storequeue_vtable( ent, &mmu_utlb_pages[entry] );
1.1049 +
1.1050 + if( (ent->flags & TLB_USERMODE) == 0 ) {
1.1051 + upage = mmu_user_storequeue_regions->tlb_prot;
1.1052 + } else if( IS_STOREQUEUE_PROTECTED() ) {
1.1053 + upage = &p4_region_storequeue_sqmd;
1.1054 + } else {
1.1055 + upage = page;
1.1056 + }
1.1057 +
1.1058 + } else {
1.1059 +
1.1060 + if( (ent->flags & TLB_USERMODE) == 0 ) {
1.1061 + upage = &mem_region_tlb_protected;
1.1062 + } else {
1.1063 + upage = page;
1.1064 + }
1.1065 +
1.1066 + if( (ent->flags & TLB_WRITABLE) == 0 ) {
1.1067 + page->write_long = (mem_write_fn_t)tlb_protected_write;
1.1068 + page->write_word = (mem_write_fn_t)tlb_protected_write;
1.1069 + page->write_byte = (mem_write_fn_t)tlb_protected_write;
1.1070 + page->write_burst = (mem_write_burst_fn_t)tlb_protected_write;
1.1071 + mmu_utlb_init_vtable( ent, &mmu_utlb_pages[entry], FALSE );
1.1072 + } else if( (ent->flags & TLB_DIRTY) == 0 ) {
1.1073 + page->write_long = (mem_write_fn_t)tlb_initial_write;
1.1074 + page->write_word = (mem_write_fn_t)tlb_initial_write;
1.1075 + page->write_byte = (mem_write_fn_t)tlb_initial_write;
1.1076 + page->write_burst = (mem_write_burst_fn_t)tlb_initial_write;
1.1077 + mmu_utlb_init_vtable( ent, &mmu_utlb_pages[entry], FALSE );
1.1078 + } else {
1.1079 + mmu_utlb_init_vtable( ent, &mmu_utlb_pages[entry], TRUE );
1.1080 + }
1.1081 + }
1.1082 +
1.1083 + mmu_utlb_pages[entry].user_fn = upage;
1.1084 +
1.1085 + /* Is page visible? */
1.1086 + if( (ent->flags & TLB_SHARE) || ent->asid == mmu_asid ) {
1.1087 + mmu_utlb_map_pages( page, upage, start_addr, npages );
1.1088 + } else if( IS_SV_ENABLED() ) {
1.1089 + mmu_utlb_map_pages( page, NULL, start_addr, npages );
1.1090 + }
1.1091 +}
1.1092 +
1.1093 +static void mmu_utlb_remove_entry( int entry )
1.1094 +{
1.1095 + int i, j;
1.1096 + struct utlb_entry *ent = &mmu_utlb[entry];
1.1097 + sh4addr_t start_addr = ent->vpn&ent->mask;
1.1098 + mem_region_fn_t *ptr = &sh4_address_space[start_addr >> 12];
1.1099 + mem_region_fn_t *uptr = &sh4_user_address_space[start_addr >> 12];
1.1100 + gboolean unmap_user;
1.1101 + int npages = get_tlb_size_pages(ent->flags);
1.1102 +
1.1103 + if( (ent->flags & TLB_SHARE) || ent->asid == mmu_asid ) {
1.1104 + unmap_user = TRUE;
1.1105 + } else if( IS_SV_ENABLED() ) {
1.1106 + unmap_user = FALSE;
1.1107 + } else {
1.1108 + return; // Not mapped
1.1109 + }
1.1110 +
1.1111 + gboolean clean_unmap = mmu_utlb_unmap_pages( TRUE, unmap_user, start_addr, npages );
1.1112 +
1.1113 + if( !clean_unmap ) {
1.1114 + mmu_utlb_remap_pages( TRUE, unmap_user, entry );
1.1115 + }
1.1116 +}
1.1117 +
1.1118 +static void mmu_utlb_register_all()
1.1119 +{
1.1120 + int i;
1.1121 + for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
1.1122 + if( mmu_utlb[i].flags & TLB_VALID )
1.1123 + mmu_utlb_insert_entry( i );
1.1124 + }
1.1125 +}
1.1126 +
1.1127 static void mmu_invalidate_tlb()
1.1128 {
1.1129 int i;
1.1130 for( i=0; i<ITLB_ENTRY_COUNT; i++ ) {
1.1131 mmu_itlb[i].flags &= (~TLB_VALID);
1.1132 }
1.1133 + if( IS_TLB_ENABLED() ) {
1.1134 + for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
1.1135 + if( mmu_utlb[i].flags & TLB_VALID ) {
1.1136 + mmu_utlb_remove_entry( i );
1.1137 + }
1.1138 + }
1.1139 + }
1.1140 for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
1.1141 mmu_utlb[i].flags &= (~TLB_VALID);
1.1142 }
1.1143 - mmu_utlb_entries = 0;
1.1144 -}
1.1145 -
1.1146 -#define ITLB_ENTRY(addr) ((addr>>7)&0x03)
1.1147 -
1.1148 -int32_t mmu_itlb_addr_read( sh4addr_t addr )
1.1149 -{
1.1150 - struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
1.1151 - return ent->vpn | ent->asid | (ent->flags & TLB_VALID);
1.1152 -}
1.1153 -int32_t mmu_itlb_data_read( sh4addr_t addr )
1.1154 -{
1.1155 - struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
1.1156 - return (ent->ppn & 0x1FFFFC00) | ent->flags;
1.1157 -}
1.1158 -
1.1159 -void mmu_itlb_addr_write( sh4addr_t addr, uint32_t val )
1.1160 -{
1.1161 - struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
1.1162 - ent->vpn = val & 0xFFFFFC00;
1.1163 - ent->asid = val & 0x000000FF;
1.1164 - ent->flags = (ent->flags & ~(TLB_VALID)) | (val&TLB_VALID);
1.1165 -}
1.1166 -
1.1167 -void mmu_itlb_data_write( sh4addr_t addr, uint32_t val )
1.1168 -{
1.1169 - struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
1.1170 - ent->ppn = val & 0x1FFFFC00;
1.1171 - ent->flags = val & 0x00001DA;
1.1172 - ent->mask = get_mask_for_flags(val);
1.1173 - if( ent->ppn >= 0x1C000000 )
1.1174 - ent->ppn |= 0xE0000000;
1.1175 -}
1.1176 -
1.1177 -#define UTLB_ENTRY(addr) ((addr>>8)&0x3F)
1.1178 -#define UTLB_ASSOC(addr) (addr&0x80)
1.1179 -#define UTLB_DATA2(addr) (addr&0x00800000)
1.1180 -
1.1181 -int32_t mmu_utlb_addr_read( sh4addr_t addr )
1.1182 -{
1.1183 - struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
1.1184 - return ent->vpn | ent->asid | (ent->flags & TLB_VALID) |
1.1185 - ((ent->flags & TLB_DIRTY)<<7);
1.1186 -}
1.1187 -int32_t mmu_utlb_data_read( sh4addr_t addr )
1.1188 -{
1.1189 - struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
1.1190 - if( UTLB_DATA2(addr) ) {
1.1191 - return ent->pcmcia;
1.1192 - } else {
1.1193 - return (ent->ppn&0x1FFFFC00) | ent->flags;
1.1194 - }
1.1195 -}
1.1196 -
1.1197 -/**
1.1198 - * Find a UTLB entry for the associative TLB write - same as the normal
1.1199 - * lookup but ignores the valid bit.
1.1200 - */
1.1201 -static inline int mmu_utlb_lookup_assoc( uint32_t vpn, uint32_t asid )
1.1202 -{
1.1203 - int result = -1;
1.1204 - unsigned int i;
1.1205 - for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
1.1206 - if( (mmu_utlb[i].flags & TLB_VALID) &&
1.1207 - ((mmu_utlb[i].flags & TLB_SHARE) || asid == mmu_utlb[i].asid) &&
1.1208 - ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
1.1209 - if( result != -1 ) {
1.1210 - fprintf( stderr, "TLB Multi hit: %d %d\n", result, i );
1.1211 - return -2;
1.1212 - }
1.1213 - result = i;
1.1214 - }
1.1215 - }
1.1216 - return result;
1.1217 -}
1.1218 -
1.1219 -/**
1.1220 - * Find a ITLB entry for the associative TLB write - same as the normal
1.1221 - * lookup but ignores the valid bit.
1.1222 - */
1.1223 -static inline int mmu_itlb_lookup_assoc( uint32_t vpn, uint32_t asid )
1.1224 -{
1.1225 - int result = -1;
1.1226 - unsigned int i;
1.1227 - for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
1.1228 - if( (mmu_itlb[i].flags & TLB_VALID) &&
1.1229 - ((mmu_itlb[i].flags & TLB_SHARE) || asid == mmu_itlb[i].asid) &&
1.1230 - ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
1.1231 - if( result != -1 ) {
1.1232 - return -2;
1.1233 - }
1.1234 - result = i;
1.1235 - }
1.1236 - }
1.1237 - return result;
1.1238 -}
1.1239 -
1.1240 -void mmu_utlb_addr_write( sh4addr_t addr, uint32_t val )
1.1241 -{
1.1242 - if( UTLB_ASSOC(addr) ) {
1.1243 - int utlb = mmu_utlb_lookup_assoc( val, mmu_asid );
1.1244 - if( utlb >= 0 ) {
1.1245 - struct utlb_entry *ent = &mmu_utlb[utlb];
1.1246 - uint32_t old_flags = ent->flags;
1.1247 - ent->flags = ent->flags & ~(TLB_DIRTY|TLB_VALID);
1.1248 - ent->flags |= (val & TLB_VALID);
1.1249 - ent->flags |= ((val & 0x200)>>7);
1.1250 - if( (old_flags & TLB_VALID) && !(ent->flags&TLB_VALID) ) {
1.1251 - mmu_utlb_remove_entry( utlb );
1.1252 - } else if( !(old_flags & TLB_VALID) && (ent->flags&TLB_VALID) ) {
1.1253 - mmu_utlb_insert_entry( utlb );
1.1254 - }
1.1255 - }
1.1256 -
1.1257 - int itlb = mmu_itlb_lookup_assoc( val, mmu_asid );
1.1258 - if( itlb >= 0 ) {
1.1259 - struct itlb_entry *ent = &mmu_itlb[itlb];
1.1260 - ent->flags = (ent->flags & (~TLB_VALID)) | (val & TLB_VALID);
1.1261 - }
1.1262 -
1.1263 - if( itlb == -2 || utlb == -2 ) {
1.1264 - MMU_TLB_MULTI_HIT_ERROR(addr);
1.1265 - return;
1.1266 - }
1.1267 - } else {
1.1268 - struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
1.1269 - if( ent->flags & TLB_VALID )
1.1270 - mmu_utlb_remove_entry( UTLB_ENTRY(addr) );
1.1271 - ent->vpn = (val & 0xFFFFFC00);
1.1272 - ent->asid = (val & 0xFF);
1.1273 - ent->flags = (ent->flags & ~(TLB_DIRTY|TLB_VALID));
1.1274 - ent->flags |= (val & TLB_VALID);
1.1275 - ent->flags |= ((val & 0x200)>>7);
1.1276 - if( ent->flags & TLB_VALID )
1.1277 - mmu_utlb_insert_entry( UTLB_ENTRY(addr) );
1.1278 - }
1.1279 -}
1.1280 -
1.1281 -void mmu_utlb_data_write( sh4addr_t addr, uint32_t val )
1.1282 -{
1.1283 - struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
1.1284 - if( UTLB_DATA2(addr) ) {
1.1285 - ent->pcmcia = val & 0x0000000F;
1.1286 - } else {
1.1287 - if( ent->flags & TLB_VALID )
1.1288 - mmu_utlb_remove_entry( UTLB_ENTRY(addr) );
1.1289 - ent->ppn = (val & 0x1FFFFC00);
1.1290 - ent->flags = (val & 0x000001FF);
1.1291 - ent->mask = get_mask_for_flags(val);
1.1292 - if( mmu_utlb[mmu_urc].ppn >= 0x1C000000 )
1.1293 - mmu_utlb[mmu_urc].ppn |= 0xE0000000;
1.1294 - if( ent->flags & TLB_VALID )
1.1295 - mmu_utlb_insert_entry( UTLB_ENTRY(addr) );
1.1296 - }
1.1297 -}
1.1298 -
1.1299 -/* Cache access - not implemented */
1.1300 -
1.1301 -int32_t mmu_icache_addr_read( sh4addr_t addr )
1.1302 -{
1.1303 - return 0; // not implemented
1.1304 -}
1.1305 -int32_t mmu_icache_data_read( sh4addr_t addr )
1.1306 -{
1.1307 - return 0; // not implemented
1.1308 -}
1.1309 -int32_t mmu_ocache_addr_read( sh4addr_t addr )
1.1310 -{
1.1311 - return 0; // not implemented
1.1312 -}
1.1313 -int32_t mmu_ocache_data_read( sh4addr_t addr )
1.1314 -{
1.1315 - return 0; // not implemented
1.1316 -}
1.1317 -
1.1318 -void mmu_icache_addr_write( sh4addr_t addr, uint32_t val )
1.1319 -{
1.1320 -}
1.1321 -
1.1322 -void mmu_icache_data_write( sh4addr_t addr, uint32_t val )
1.1323 -{
1.1324 -}
1.1325 -
1.1326 -void mmu_ocache_addr_write( sh4addr_t addr, uint32_t val )
1.1327 -{
1.1328 -}
1.1329 -
1.1330 -void mmu_ocache_data_write( sh4addr_t addr, uint32_t val )
1.1331 -{
1.1332 }
1.1333
1.1334 /******************************************************************************/
1.1335 @@ -639,9 +842,22 @@
1.1336 /******************************************************************************/
1.1337
1.1338 /**
1.1339 - * The translations are excessively complicated, but unfortunately it's a
1.1340 - * complicated system. TODO: make this not be painfully slow.
1.1341 + * Translate a 32-bit address into a UTLB entry number. Does not check for
1.1342 + * page protection etc.
1.1343 + * @return the entryNo if found, -1 if not found, and -2 for a multi-hit.
1.1344 */
1.1345 +int mmu_utlb_entry_for_vpn( uint32_t vpn )
1.1346 +{
1.1347 + mem_region_fn_t fn = sh4_address_space[vpn>>12];
1.1348 + if( fn >= &mmu_utlb_pages[0].fn && fn < &mmu_utlb_pages[UTLB_ENTRY_COUNT].fn ) {
1.1349 + return ((struct utlb_page_entry *)fn) - &mmu_utlb_pages[0];
1.1350 + } else if( fn == &mem_region_tlb_multihit ) {
1.1351 + return -2;
1.1352 + } else {
1.1353 + return -1;
1.1354 + }
1.1355 +}
1.1356 +
1.1357
1.1358 /**
1.1359 * Perform the actual utlb lookup w/ asid matching.
1.1360 @@ -763,7 +979,7 @@
1.1361 }
1.1362
1.1363 if( result == -1 ) {
1.1364 - int utlbEntry = mmu_utlb_sorted_find( vpn );
1.1365 + int utlbEntry = mmu_utlb_entry_for_vpn( vpn );
1.1366 if( utlbEntry < 0 ) {
1.1367 return utlbEntry;
1.1368 } else {
1.1369 @@ -824,130 +1040,6 @@
1.1370 return result;
1.1371 }
1.1372
1.1373 -#ifdef HAVE_FRAME_ADDRESS
1.1374 -sh4addr_t FASTCALL mmu_vma_to_phys_read( sh4vma_t addr, void *exc )
1.1375 -#else
1.1376 -sh4addr_t FASTCALL mmu_vma_to_phys_read( sh4vma_t addr )
1.1377 -#endif
1.1378 -{
1.1379 - uint32_t mmucr = MMIO_READ(MMU,MMUCR);
1.1380 - if( addr & 0x80000000 ) {
1.1381 - if( IS_SH4_PRIVMODE() ) {
1.1382 - if( addr >= 0xE0000000 ) {
1.1383 - return addr; /* P4 - passthrough */
1.1384 - } else if( addr < 0xC0000000 ) {
1.1385 - /* P1, P2 regions are pass-through (no translation) */
1.1386 - return VMA_TO_EXT_ADDR(addr);
1.1387 - }
1.1388 - } else {
1.1389 - if( addr >= 0xE0000000 && addr < 0xE4000000 &&
1.1390 - ((mmucr&MMUCR_SQMD) == 0) ) {
1.1391 - /* Conditional user-mode access to the store-queue (no translation) */
1.1392 - return addr;
1.1393 - }
1.1394 - MMU_READ_ADDR_ERROR();
1.1395 - RETURN_VIA(exc);
1.1396 - }
1.1397 - }
1.1398 -
1.1399 - if( (mmucr & MMUCR_AT) == 0 ) {
1.1400 - return VMA_TO_EXT_ADDR(addr);
1.1401 - }
1.1402 -
1.1403 - /* If we get this far, translation is required */
1.1404 - int entryNo;
1.1405 - if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
1.1406 - entryNo = mmu_utlb_sorted_find( addr );
1.1407 - } else {
1.1408 - entryNo = mmu_utlb_lookup_vpn( addr );
1.1409 - }
1.1410 -
1.1411 - switch(entryNo) {
1.1412 - case -1:
1.1413 - MMU_TLB_READ_MISS_ERROR(addr);
1.1414 - RETURN_VIA(exc);
1.1415 - case -2:
1.1416 - MMU_TLB_MULTI_HIT_ERROR(addr);
1.1417 - RETURN_VIA(exc);
1.1418 - default:
1.1419 - if( (mmu_utlb[entryNo].flags & TLB_USERMODE) == 0 &&
1.1420 - !IS_SH4_PRIVMODE() ) {
1.1421 - /* protection violation */
1.1422 - MMU_TLB_READ_PROT_ERROR(addr);
1.1423 - RETURN_VIA(exc);
1.1424 - }
1.1425 -
1.1426 - /* finally generate the target address */
1.1427 - return (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) |
1.1428 - (addr & (~mmu_utlb[entryNo].mask));
1.1429 - }
1.1430 -}
1.1431 -
1.1432 -#ifdef HAVE_FRAME_ADDRESS
1.1433 -sh4addr_t FASTCALL mmu_vma_to_phys_write( sh4vma_t addr, void *exc )
1.1434 -#else
1.1435 -sh4addr_t FASTCALL mmu_vma_to_phys_write( sh4vma_t addr )
1.1436 -#endif
1.1437 -{
1.1438 - uint32_t mmucr = MMIO_READ(MMU,MMUCR);
1.1439 - if( addr & 0x80000000 ) {
1.1440 - if( IS_SH4_PRIVMODE() ) {
1.1441 - if( addr >= 0xE0000000 ) {
1.1442 - return addr; /* P4 - passthrough */
1.1443 - } else if( addr < 0xC0000000 ) {
1.1444 - /* P1, P2 regions are pass-through (no translation) */
1.1445 - return VMA_TO_EXT_ADDR(addr);
1.1446 - }
1.1447 - } else {
1.1448 - if( addr >= 0xE0000000 && addr < 0xE4000000 &&
1.1449 - ((mmucr&MMUCR_SQMD) == 0) ) {
1.1450 - /* Conditional user-mode access to the store-queue (no translation) */
1.1451 - return addr;
1.1452 - }
1.1453 - MMU_WRITE_ADDR_ERROR();
1.1454 - RETURN_VIA(exc);
1.1455 - }
1.1456 - }
1.1457 -
1.1458 - if( (mmucr & MMUCR_AT) == 0 ) {
1.1459 - return VMA_TO_EXT_ADDR(addr);
1.1460 - }
1.1461 -
1.1462 - /* If we get this far, translation is required */
1.1463 - int entryNo;
1.1464 - if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
1.1465 - entryNo = mmu_utlb_sorted_find( addr );
1.1466 - } else {
1.1467 - entryNo = mmu_utlb_lookup_vpn( addr );
1.1468 - }
1.1469 -
1.1470 - switch(entryNo) {
1.1471 - case -1:
1.1472 - MMU_TLB_WRITE_MISS_ERROR(addr);
1.1473 - RETURN_VIA(exc);
1.1474 - case -2:
1.1475 - MMU_TLB_MULTI_HIT_ERROR(addr);
1.1476 - RETURN_VIA(exc);
1.1477 - default:
1.1478 - if( IS_SH4_PRIVMODE() ? ((mmu_utlb[entryNo].flags & TLB_WRITABLE) == 0)
1.1479 - : ((mmu_utlb[entryNo].flags & TLB_USERWRITABLE) != TLB_USERWRITABLE) ) {
1.1480 - /* protection violation */
1.1481 - MMU_TLB_WRITE_PROT_ERROR(addr);
1.1482 - RETURN_VIA(exc);
1.1483 - }
1.1484 -
1.1485 - if( (mmu_utlb[entryNo].flags & TLB_DIRTY) == 0 ) {
1.1486 - MMU_TLB_INITIAL_WRITE_ERROR(addr);
1.1487 - RETURN_VIA(exc);
1.1488 - }
1.1489 -
1.1490 - /* finally generate the target address */
1.1491 - sh4addr_t pma = (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) |
1.1492 - (addr & (~mmu_utlb[entryNo].mask));
1.1493 - return pma;
1.1494 - }
1.1495 -}
1.1496 -
1.1497 /**
1.1498 * Update the icache for an untranslated address
1.1499 */
1.1500 @@ -958,13 +1050,13 @@
1.1501 sh4_icache.page_vma = addr & 0xFF000000;
1.1502 sh4_icache.page_ppa = 0x0C000000;
1.1503 sh4_icache.mask = 0xFF000000;
1.1504 - sh4_icache.page = sh4_main_ram;
1.1505 + sh4_icache.page = dc_main_ram;
1.1506 } else if( (addr & 0x1FE00000) == 0 ) {
1.1507 /* BIOS ROM */
1.1508 sh4_icache.page_vma = addr & 0xFFE00000;
1.1509 sh4_icache.page_ppa = 0;
1.1510 sh4_icache.mask = 0xFFE00000;
1.1511 - sh4_icache.page = mem_get_region(0);
1.1512 + sh4_icache.page = dc_boot_rom;
1.1513 } else {
1.1514 /* not supported */
1.1515 sh4_icache.page_vma = -1;
1.1516 @@ -993,7 +1085,7 @@
1.1517 mmu_update_icache_phys(addr);
1.1518 return TRUE;
1.1519 } else if( addr >= 0xE0000000 && addr < 0xFFFFFF00 ) {
1.1520 - MMU_READ_ADDR_ERROR();
1.1521 + RAISE_MEM_ERROR(EXC_DATA_ADDR_READ, addr);
1.1522 return FALSE;
1.1523 }
1.1524 }
1.1525 @@ -1010,7 +1102,7 @@
1.1526 entryNo = mmu_itlb_lookup_vpn( addr );
1.1527 } else {
1.1528 if( addr & 0x80000000 ) {
1.1529 - MMU_READ_ADDR_ERROR();
1.1530 + RAISE_MEM_ERROR(EXC_DATA_ADDR_READ, addr);
1.1531 return FALSE;
1.1532 }
1.1533
1.1534 @@ -1023,17 +1115,17 @@
1.1535 entryNo = mmu_itlb_lookup_vpn_asid( addr );
1.1536
1.1537 if( entryNo != -1 && (mmu_itlb[entryNo].flags & TLB_USERMODE) == 0 ) {
1.1538 - MMU_TLB_READ_PROT_ERROR(addr);
1.1539 + RAISE_MEM_ERROR(EXC_TLB_PROT_READ, addr);
1.1540 return FALSE;
1.1541 }
1.1542 }
1.1543
1.1544 switch(entryNo) {
1.1545 case -1:
1.1546 - MMU_TLB_READ_MISS_ERROR(addr);
1.1547 + RAISE_TLB_ERROR(EXC_TLB_MISS_READ, addr);
1.1548 return FALSE;
1.1549 case -2:
1.1550 - MMU_TLB_MULTI_HIT_ERROR(addr);
1.1551 + RAISE_TLB_MULTIHIT_ERROR(addr);
1.1552 return FALSE;
1.1553 default:
1.1554 sh4_icache.page_ppa = mmu_itlb[entryNo].ppn & mmu_itlb[entryNo].mask;
1.1555 @@ -1083,55 +1175,365 @@
1.1556 }
1.1557 }
1.1558
1.1559 -void FASTCALL sh4_flush_store_queue( sh4addr_t addr )
1.1560 +/********************** TLB Direct-Access Regions ***************************/
1.1561 +#ifdef HAVE_FRAME_ADDRESS
1.1562 +#define EXCEPTION_EXIT() do{ *(((void **)__builtin_frame_address(0))+1) = exc; return; } while(0)
1.1563 +#else
1.1564 +#define EXCEPTION_EXIT() sh4_core_exit(CORE_EXIT_EXCEPTION)
1.1565 +#endif
1.1566 +
1.1567 +
1.1568 +#define ITLB_ENTRY(addr) ((addr>>7)&0x03)
1.1569 +
1.1570 +int32_t FASTCALL mmu_itlb_addr_read( sh4addr_t addr )
1.1571 {
1.1572 - int queue = (addr&0x20)>>2;
1.1573 - uint32_t hi = MMIO_READ( MMU, QACR0 + (queue>>1)) << 24;
1.1574 - sh4ptr_t src = (sh4ptr_t)&sh4r.store_queue[queue];
1.1575 - sh4addr_t target = (addr&0x03FFFFE0) | hi;
1.1576 - mem_copy_to_sh4( target, src, 32 );
1.1577 -}
1.1578 -
1.1579 -gboolean FASTCALL sh4_flush_store_queue_mmu( sh4addr_t addr )
1.1580 -{
1.1581 - uint32_t mmucr = MMIO_READ(MMU,MMUCR);
1.1582 - int queue = (addr&0x20)>>2;
1.1583 - sh4ptr_t src = (sh4ptr_t)&sh4r.store_queue[queue];
1.1584 - sh4addr_t target;
1.1585 - /* Store queue operation */
1.1586 -
1.1587 - int entryNo;
1.1588 - if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
1.1589 - entryNo = mmu_utlb_lookup_vpn_asid( addr );
1.1590 - } else {
1.1591 - entryNo = mmu_utlb_lookup_vpn( addr );
1.1592 - }
1.1593 - switch(entryNo) {
1.1594 - case -1:
1.1595 - MMU_TLB_WRITE_MISS_ERROR(addr);
1.1596 - return FALSE;
1.1597 - case -2:
1.1598 - MMU_TLB_MULTI_HIT_ERROR(addr);
1.1599 - return FALSE;
1.1600 - default:
1.1601 - if( IS_SH4_PRIVMODE() ? ((mmu_utlb[entryNo].flags & TLB_WRITABLE) == 0)
1.1602 - : ((mmu_utlb[entryNo].flags & TLB_USERWRITABLE) != TLB_USERWRITABLE) ) {
1.1603 - /* protection violation */
1.1604 - MMU_TLB_WRITE_PROT_ERROR(addr);
1.1605 - return FALSE;
1.1606 - }
1.1607 -
1.1608 - if( (mmu_utlb[entryNo].flags & TLB_DIRTY) == 0 ) {
1.1609 - MMU_TLB_INITIAL_WRITE_ERROR(addr);
1.1610 - return FALSE;
1.1611 - }
1.1612 -
1.1613 - /* finally generate the target address */
1.1614 - target = ((mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) |
1.1615 - (addr & (~mmu_utlb[entryNo].mask))) & 0xFFFFFFE0;
1.1616 - }
1.1617 -
1.1618 - mem_copy_to_sh4( target, src, 32 );
1.1619 - return TRUE;
1.1620 + struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
1.1621 + return ent->vpn | ent->asid | (ent->flags & TLB_VALID);
1.1622 }
1.1623
1.1624 +void FASTCALL mmu_itlb_addr_write( sh4addr_t addr, uint32_t val )
1.1625 +{
1.1626 + struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
1.1627 + ent->vpn = val & 0xFFFFFC00;
1.1628 + ent->asid = val & 0x000000FF;
1.1629 + ent->flags = (ent->flags & ~(TLB_VALID)) | (val&TLB_VALID);
1.1630 +}
1.1631 +
1.1632 +int32_t FASTCALL mmu_itlb_data_read( sh4addr_t addr )
1.1633 +{
1.1634 + struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
1.1635 + return (ent->ppn & 0x1FFFFC00) | ent->flags;
1.1636 +}
1.1637 +
1.1638 +void FASTCALL mmu_itlb_data_write( sh4addr_t addr, uint32_t val )
1.1639 +{
1.1640 + struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
1.1641 + ent->ppn = val & 0x1FFFFC00;
1.1642 + ent->flags = val & 0x00001DA;
1.1643 + ent->mask = get_tlb_size_mask(val);
1.1644 + if( ent->ppn >= 0x1C000000 )
1.1645 + ent->ppn |= 0xE0000000;
1.1646 +}
1.1647 +
1.1648 +#define UTLB_ENTRY(addr) ((addr>>8)&0x3F)
1.1649 +#define UTLB_ASSOC(addr) (addr&0x80)
1.1650 +#define UTLB_DATA2(addr) (addr&0x00800000)
1.1651 +
1.1652 +int32_t FASTCALL mmu_utlb_addr_read( sh4addr_t addr )
1.1653 +{
1.1654 + struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
1.1655 + return ent->vpn | ent->asid | (ent->flags & TLB_VALID) |
1.1656 + ((ent->flags & TLB_DIRTY)<<7);
1.1657 +}
1.1658 +int32_t FASTCALL mmu_utlb_data_read( sh4addr_t addr )
1.1659 +{
1.1660 + struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
1.1661 + if( UTLB_DATA2(addr) ) {
1.1662 + return ent->pcmcia;
1.1663 + } else {
1.1664 + return (ent->ppn&0x1FFFFC00) | ent->flags;
1.1665 + }
1.1666 +}
1.1667 +
1.1668 +/**
1.1669 + * Find a UTLB entry for the associative TLB write - same as the normal
1.1670 + * lookup but ignores the valid bit.
1.1671 + */
1.1672 +static inline int mmu_utlb_lookup_assoc( uint32_t vpn, uint32_t asid )
1.1673 +{
1.1674 + int result = -1;
1.1675 + unsigned int i;
1.1676 + for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
1.1677 + if( (mmu_utlb[i].flags & TLB_VALID) &&
1.1678 + ((mmu_utlb[i].flags & TLB_SHARE) || asid == mmu_utlb[i].asid) &&
1.1679 + ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
1.1680 + if( result != -1 ) {
1.1681 + fprintf( stderr, "TLB Multi hit: %d %d\n", result, i );
1.1682 + return -2;
1.1683 + }
1.1684 + result = i;
1.1685 + }
1.1686 + }
1.1687 + return result;
1.1688 +}
1.1689 +
1.1690 +/**
1.1691 + * Find a ITLB entry for the associative TLB write - same as the normal
1.1692 + * lookup but ignores the valid bit.
1.1693 + */
1.1694 +static inline int mmu_itlb_lookup_assoc( uint32_t vpn, uint32_t asid )
1.1695 +{
1.1696 + int result = -1;
1.1697 + unsigned int i;
1.1698 + for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
1.1699 + if( (mmu_itlb[i].flags & TLB_VALID) &&
1.1700 + ((mmu_itlb[i].flags & TLB_SHARE) || asid == mmu_itlb[i].asid) &&
1.1701 + ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
1.1702 + if( result != -1 ) {
1.1703 + return -2;
1.1704 + }
1.1705 + result = i;
1.1706 + }
1.1707 + }
1.1708 + return result;
1.1709 +}
1.1710 +
1.1711 +void FASTCALL mmu_utlb_addr_write( sh4addr_t addr, uint32_t val, void *exc )
1.1712 +{
1.1713 + if( UTLB_ASSOC(addr) ) {
1.1714 + int utlb = mmu_utlb_lookup_assoc( val, mmu_asid );
1.1715 + if( utlb >= 0 ) {
1.1716 + struct utlb_entry *ent = &mmu_utlb[utlb];
1.1717 + uint32_t old_flags = ent->flags;
1.1718 + ent->flags = ent->flags & ~(TLB_DIRTY|TLB_VALID);
1.1719 + ent->flags |= (val & TLB_VALID);
1.1720 + ent->flags |= ((val & 0x200)>>7);
1.1721 + if( ((old_flags^ent->flags) & (TLB_VALID|TLB_DIRTY)) != 0 ) {
1.1722 + if( old_flags & TLB_VALID )
1.1723 + mmu_utlb_remove_entry( utlb );
1.1724 + if( ent->flags & TLB_VALID )
1.1725 + mmu_utlb_insert_entry( utlb );
1.1726 + }
1.1727 + }
1.1728 +
1.1729 + int itlb = mmu_itlb_lookup_assoc( val, mmu_asid );
1.1730 + if( itlb >= 0 ) {
1.1731 + struct itlb_entry *ent = &mmu_itlb[itlb];
1.1732 + ent->flags = (ent->flags & (~TLB_VALID)) | (val & TLB_VALID);
1.1733 + }
1.1734 +
1.1735 + if( itlb == -2 || utlb == -2 ) {
1.1736 + RAISE_TLB_MULTIHIT_ERROR(addr);
1.1737 + EXCEPTION_EXIT();
1.1738 + return;
1.1739 + }
1.1740 + } else {
1.1741 + struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
1.1742 + if( ent->flags & TLB_VALID )
1.1743 + mmu_utlb_remove_entry( UTLB_ENTRY(addr) );
1.1744 + ent->vpn = (val & 0xFFFFFC00);
1.1745 + ent->asid = (val & 0xFF);
1.1746 + ent->flags = (ent->flags & ~(TLB_DIRTY|TLB_VALID));
1.1747 + ent->flags |= (val & TLB_VALID);
1.1748 + ent->flags |= ((val & 0x200)>>7);
1.1749 + if( ent->flags & TLB_VALID )
1.1750 + mmu_utlb_insert_entry( UTLB_ENTRY(addr) );
1.1751 + }
1.1752 +}
1.1753 +
1.1754 +void FASTCALL mmu_utlb_data_write( sh4addr_t addr, uint32_t val )
1.1755 +{
1.1756 + struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
1.1757 + if( UTLB_DATA2(addr) ) {
1.1758 + ent->pcmcia = val & 0x0000000F;
1.1759 + } else {
1.1760 + if( ent->flags & TLB_VALID )
1.1761 + mmu_utlb_remove_entry( UTLB_ENTRY(addr) );
1.1762 + ent->ppn = (val & 0x1FFFFC00);
1.1763 + ent->flags = (val & 0x000001FF);
1.1764 + ent->mask = get_tlb_size_mask(val);
1.1765 + if( ent->flags & TLB_VALID )
1.1766 + mmu_utlb_insert_entry( UTLB_ENTRY(addr) );
1.1767 + }
1.1768 +}
1.1769 +
1.1770 +struct mem_region_fn p4_region_itlb_addr = {
1.1771 + mmu_itlb_addr_read, mmu_itlb_addr_write,
1.1772 + mmu_itlb_addr_read, mmu_itlb_addr_write,
1.1773 + mmu_itlb_addr_read, mmu_itlb_addr_write,
1.1774 + unmapped_read_burst, unmapped_write_burst,
1.1775 + unmapped_prefetch };
1.1776 +struct mem_region_fn p4_region_itlb_data = {
1.1777 + mmu_itlb_data_read, mmu_itlb_data_write,
1.1778 + mmu_itlb_data_read, mmu_itlb_data_write,
1.1779 + mmu_itlb_data_read, mmu_itlb_data_write,
1.1780 + unmapped_read_burst, unmapped_write_burst,
1.1781 + unmapped_prefetch };
1.1782 +struct mem_region_fn p4_region_utlb_addr = {
1.1783 + mmu_utlb_addr_read, (mem_write_fn_t)mmu_utlb_addr_write,
1.1784 + mmu_utlb_addr_read, (mem_write_fn_t)mmu_utlb_addr_write,
1.1785 + mmu_utlb_addr_read, (mem_write_fn_t)mmu_utlb_addr_write,
1.1786 + unmapped_read_burst, unmapped_write_burst,
1.1787 + unmapped_prefetch };
1.1788 +struct mem_region_fn p4_region_utlb_data = {
1.1789 + mmu_utlb_data_read, mmu_utlb_data_write,
1.1790 + mmu_utlb_data_read, mmu_utlb_data_write,
1.1791 + mmu_utlb_data_read, mmu_utlb_data_write,
1.1792 + unmapped_read_burst, unmapped_write_burst,
1.1793 + unmapped_prefetch };
1.1794 +
1.1795 +/********************** Error regions **************************/
1.1796 +
1.1797 +static void FASTCALL address_error_read( sh4addr_t addr, void *exc )
1.1798 +{
1.1799 + RAISE_MEM_ERROR(EXC_DATA_ADDR_READ, addr);
1.1800 + EXCEPTION_EXIT();
1.1801 +}
1.1802 +
1.1803 +static void FASTCALL address_error_read_burst( unsigned char *dest, sh4addr_t addr, void *exc )
1.1804 +{
1.1805 + RAISE_MEM_ERROR(EXC_DATA_ADDR_READ, addr);
1.1806 + EXCEPTION_EXIT();
1.1807 +}
1.1808 +
1.1809 +static void FASTCALL address_error_write( sh4addr_t addr, uint32_t val, void *exc )
1.1810 +{
1.1811 + RAISE_MEM_ERROR(EXC_DATA_ADDR_WRITE, addr);
1.1812 + EXCEPTION_EXIT();
1.1813 +}
1.1814 +
1.1815 +static void FASTCALL tlb_miss_read( sh4addr_t addr, void *exc )
1.1816 +{
1.1817 + RAISE_TLB_ERROR(EXC_TLB_MISS_READ, addr);
1.1818 + EXCEPTION_EXIT();
1.1819 +}
1.1820 +
1.1821 +static void FASTCALL tlb_miss_read_burst( unsigned char *dest, sh4addr_t addr, void *exc )
1.1822 +{
1.1823 + RAISE_TLB_ERROR(EXC_TLB_MISS_READ, addr);
1.1824 + EXCEPTION_EXIT();
1.1825 +}
1.1826 +
1.1827 +static void FASTCALL tlb_miss_write( sh4addr_t addr, uint32_t val, void *exc )
1.1828 +{
1.1829 + RAISE_TLB_ERROR(EXC_TLB_MISS_WRITE, addr);
1.1830 + EXCEPTION_EXIT();
1.1831 +}
1.1832 +
1.1833 +static int32_t FASTCALL tlb_protected_read( sh4addr_t addr, void *exc )
1.1834 +{
1.1835 + RAISE_MEM_ERROR(EXC_TLB_PROT_READ, addr);
1.1836 + EXCEPTION_EXIT();
1.1837 +}
1.1838 +
1.1839 +static int32_t FASTCALL tlb_protected_read_burst( unsigned char *dest, sh4addr_t addr, void *exc )
1.1840 +{
1.1841 + RAISE_MEM_ERROR(EXC_TLB_PROT_READ, addr);
1.1842 + EXCEPTION_EXIT();
1.1843 +}
1.1844 +
1.1845 +static void FASTCALL tlb_protected_write( sh4addr_t addr, uint32_t val, void *exc )
1.1846 +{
1.1847 + RAISE_MEM_ERROR(EXC_TLB_PROT_WRITE, addr);
1.1848 + EXCEPTION_EXIT();
1.1849 +}
1.1850 +
1.1851 +static void FASTCALL tlb_initial_write( sh4addr_t addr, uint32_t val, void *exc )
1.1852 +{
1.1853 + RAISE_MEM_ERROR(EXC_INIT_PAGE_WRITE, addr);
1.1854 + EXCEPTION_EXIT();
1.1855 +}
1.1856 +
1.1857 +static int32_t FASTCALL tlb_multi_hit_read( sh4addr_t addr, void *exc )
1.1858 +{
1.1859 + sh4_raise_tlb_multihit(addr);
1.1860 + EXCEPTION_EXIT();
1.1861 +}
1.1862 +
1.1863 +static int32_t FASTCALL tlb_multi_hit_read_burst( unsigned char *dest, sh4addr_t addr, void *exc )
1.1864 +{
1.1865 + sh4_raise_tlb_multihit(addr);
1.1866 + EXCEPTION_EXIT();
1.1867 +}
1.1868 +static void FASTCALL tlb_multi_hit_write( sh4addr_t addr, uint32_t val, void *exc )
1.1869 +{
1.1870 + sh4_raise_tlb_multihit(addr);
1.1871 + EXCEPTION_EXIT();
1.1872 +}
1.1873 +
1.1874 +/**
1.1875 + * Note: Per sec 4.6.4 of the SH7750 manual, SQ
1.1876 + */
1.1877 +struct mem_region_fn mem_region_address_error = {
1.1878 + (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1.1879 + (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1.1880 + (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1.1881 + (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write,
1.1882 + unmapped_prefetch };
1.1883 +
1.1884 +struct mem_region_fn mem_region_tlb_miss = {
1.1885 + (mem_read_fn_t)tlb_miss_read, (mem_write_fn_t)tlb_miss_write,
1.1886 + (mem_read_fn_t)tlb_miss_read, (mem_write_fn_t)tlb_miss_write,
1.1887 + (mem_read_fn_t)tlb_miss_read, (mem_write_fn_t)tlb_miss_write,
1.1888 + (mem_read_burst_fn_t)tlb_miss_read_burst, (mem_write_burst_fn_t)tlb_miss_write,
1.1889 + unmapped_prefetch };
1.1890 +
1.1891 +struct mem_region_fn mem_region_tlb_protected = {
1.1892 + (mem_read_fn_t)tlb_protected_read, (mem_write_fn_t)tlb_protected_write,
1.1893 + (mem_read_fn_t)tlb_protected_read, (mem_write_fn_t)tlb_protected_write,
1.1894 + (mem_read_fn_t)tlb_protected_read, (mem_write_fn_t)tlb_protected_write,
1.1895 + (mem_read_burst_fn_t)tlb_protected_read_burst, (mem_write_burst_fn_t)tlb_protected_write,
1.1896 + unmapped_prefetch };
1.1897 +
1.1898 +struct mem_region_fn mem_region_tlb_multihit = {
1.1899 + (mem_read_fn_t)tlb_multi_hit_read, (mem_write_fn_t)tlb_multi_hit_write,
1.1900 + (mem_read_fn_t)tlb_multi_hit_read, (mem_write_fn_t)tlb_multi_hit_write,
1.1901 + (mem_read_fn_t)tlb_multi_hit_read, (mem_write_fn_t)tlb_multi_hit_write,
1.1902 + (mem_read_burst_fn_t)tlb_multi_hit_read_burst, (mem_write_burst_fn_t)tlb_multi_hit_write,
1.1903 + (mem_prefetch_fn_t)tlb_multi_hit_read };
1.1904 +
1.1905 +
1.1906 +/* Store-queue regions */
1.1907 +/* These are a bit of a pain - the first 8 fields are controlled by SQMD, while
1.1908 + * the final (prefetch) is controlled by the actual TLB settings (plus SQMD in
1.1909 + * some cases), in contrast to the ordinary fields above.
1.1910 + *
1.1911 + * There is probably a simpler way to do this.
1.1912 + */
1.1913 +
1.1914 +struct mem_region_fn p4_region_storequeue = {
1.1915 + ccn_storequeue_read_long, ccn_storequeue_write_long,
1.1916 + unmapped_read_long, unmapped_write_long, /* TESTME: Officially only long access is supported */
1.1917 + unmapped_read_long, unmapped_write_long,
1.1918 + unmapped_read_burst, unmapped_write_burst,
1.1919 + ccn_storequeue_prefetch };
1.1920 +
1.1921 +struct mem_region_fn p4_region_storequeue_miss = {
1.1922 + ccn_storequeue_read_long, ccn_storequeue_write_long,
1.1923 + unmapped_read_long, unmapped_write_long, /* TESTME: Officially only long access is supported */
1.1924 + unmapped_read_long, unmapped_write_long,
1.1925 + unmapped_read_burst, unmapped_write_burst,
1.1926 + (mem_prefetch_fn_t)tlb_miss_read };
1.1927 +
1.1928 +struct mem_region_fn p4_region_storequeue_multihit = {
1.1929 + ccn_storequeue_read_long, ccn_storequeue_write_long,
1.1930 + unmapped_read_long, unmapped_write_long, /* TESTME: Officially only long access is supported */
1.1931 + unmapped_read_long, unmapped_write_long,
1.1932 + unmapped_read_burst, unmapped_write_burst,
1.1933 + (mem_prefetch_fn_t)tlb_multi_hit_read };
1.1934 +
1.1935 +struct mem_region_fn p4_region_storequeue_protected = {
1.1936 + ccn_storequeue_read_long, ccn_storequeue_write_long,
1.1937 + unmapped_read_long, unmapped_write_long,
1.1938 + unmapped_read_long, unmapped_write_long,
1.1939 + unmapped_read_burst, unmapped_write_burst,
1.1940 + (mem_prefetch_fn_t)tlb_protected_read };
1.1941 +
1.1942 +struct mem_region_fn p4_region_storequeue_sqmd = {
1.1943 + (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1.1944 + (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1.1945 + (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1.1946 + (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write,
1.1947 + (mem_prefetch_fn_t)address_error_read };
1.1948 +
1.1949 +struct mem_region_fn p4_region_storequeue_sqmd_miss = {
1.1950 + (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1.1951 + (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1.1952 + (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1.1953 + (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write,
1.1954 + (mem_prefetch_fn_t)tlb_miss_read };
1.1955 +
1.1956 +struct mem_region_fn p4_region_storequeue_sqmd_multihit = {
1.1957 + (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1.1958 + (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1.1959 + (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1.1960 + (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write,
1.1961 + (mem_prefetch_fn_t)tlb_multi_hit_read };
1.1962 +
1.1963 +struct mem_region_fn p4_region_storequeue_sqmd_protected = {
1.1964 + (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1.1965 + (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1.1966 + (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
1.1967 + (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write,
1.1968 + (mem_prefetch_fn_t)tlb_protected_read };
1.1969 +
.