nkeynes@550 | 1 | /**
|
nkeynes@586 | 2 | * $Id$
|
nkeynes@826 | 3 | *
|
nkeynes@939 | 4 | * SH4 MMU implementation based on address space page maps. This module
|
nkeynes@939 | 5 | * is responsible for all address decoding functions.
|
nkeynes@550 | 6 | *
|
nkeynes@550 | 7 | * Copyright (c) 2005 Nathan Keynes.
|
nkeynes@550 | 8 | *
|
nkeynes@550 | 9 | * This program is free software; you can redistribute it and/or modify
|
nkeynes@550 | 10 | * it under the terms of the GNU General Public License as published by
|
nkeynes@550 | 11 | * the Free Software Foundation; either version 2 of the License, or
|
nkeynes@550 | 12 | * (at your option) any later version.
|
nkeynes@550 | 13 | *
|
nkeynes@550 | 14 | * This program is distributed in the hope that it will be useful,
|
nkeynes@550 | 15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
nkeynes@550 | 16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
nkeynes@550 | 17 | * GNU General Public License for more details.
|
nkeynes@550 | 18 | */
|
nkeynes@550 | 19 | #define MODULE sh4_module
|
nkeynes@550 | 20 |
|
nkeynes@550 | 21 | #include <stdio.h>
|
nkeynes@915 | 22 | #include <assert.h>
|
nkeynes@550 | 23 | #include "sh4/sh4mmio.h"
|
nkeynes@550 | 24 | #include "sh4/sh4core.h"
|
nkeynes@669 | 25 | #include "sh4/sh4trans.h"
|
nkeynes@934 | 26 | #include "dreamcast.h"
|
nkeynes@550 | 27 | #include "mem.h"
|
nkeynes@931 | 28 | #include "mmu.h"
|
nkeynes@550 | 29 |
|
nkeynes@586 | 30 | #define RAISE_TLB_ERROR(code, vpn) \
|
nkeynes@586 | 31 | MMIO_WRITE(MMU, TEA, vpn); \
|
nkeynes@586 | 32 | MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
|
nkeynes@586 | 33 | sh4_raise_tlb_exception(code);
|
nkeynes@586 | 34 | #define RAISE_MEM_ERROR(code, vpn) \
|
nkeynes@586 | 35 | MMIO_WRITE(MMU, TEA, vpn); \
|
nkeynes@586 | 36 | MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \
|
nkeynes@586 | 37 | sh4_raise_exception(code);
|
nkeynes@939 | 38 | #define RAISE_TLB_MULTIHIT_ERROR(vpn) \
|
nkeynes@939 | 39 | sh4_raise_reset(EXC_TLB_MULTI_HIT); \
|
nkeynes@586 | 40 | MMIO_WRITE(MMU, TEA, vpn); \
|
nkeynes@586 | 41 | MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00)));
|
nkeynes@586 | 42 |
|
nkeynes@939 | 43 | /* An entry is a 1K entry if it's one of the mmu_utlb_1k_pages entries */
|
nkeynes@939 | 44 | #define IS_1K_PAGE_ENTRY(ent) ( ((uintptr_t)(((struct utlb_1k_entry *)ent) - &mmu_utlb_1k_pages[0])) < UTLB_ENTRY_COUNT )
|
nkeynes@586 | 45 |
|
nkeynes@939 | 46 | /* Primary address space (used directly by SH4 cores) */
|
nkeynes@939 | 47 | mem_region_fn_t *sh4_address_space;
|
nkeynes@939 | 48 | mem_region_fn_t *sh4_user_address_space;
|
nkeynes@550 | 49 |
|
nkeynes@939 | 50 | /* MMU-mapped storequeue targets. Only used with TLB on */
|
nkeynes@939 | 51 | mem_region_fn_t *storequeue_address_space;
|
nkeynes@939 | 52 | mem_region_fn_t *storequeue_user_address_space;
|
nkeynes@915 | 53 |
|
nkeynes@939 | 54 | /* Accessed from the UTLB accessor methods */
|
nkeynes@939 | 55 | uint32_t mmu_urc;
|
nkeynes@939 | 56 | uint32_t mmu_urb;
|
nkeynes@939 | 57 |
|
nkeynes@939 | 58 | /* Module globals */
|
nkeynes@550 | 59 | static struct itlb_entry mmu_itlb[ITLB_ENTRY_COUNT];
|
nkeynes@550 | 60 | static struct utlb_entry mmu_utlb[UTLB_ENTRY_COUNT];
|
nkeynes@939 | 61 | static struct utlb_page_entry mmu_utlb_pages[UTLB_ENTRY_COUNT];
|
nkeynes@550 | 62 | static uint32_t mmu_lrui;
|
nkeynes@586 | 63 | static uint32_t mmu_asid; // current asid
|
nkeynes@550 | 64 |
|
nkeynes@939 | 65 | /* Structures for 1K page handling */
|
nkeynes@939 | 66 | static struct utlb_1k_entry mmu_utlb_1k_pages[UTLB_ENTRY_COUNT];
|
nkeynes@939 | 67 | static int mmu_utlb_1k_free_list[UTLB_ENTRY_COUNT];
|
nkeynes@939 | 68 | static int mmu_utlb_1k_free_index;
|
nkeynes@915 | 69 |
|
nkeynes@550 | 70 |
|
nkeynes@939 | 71 | /* Function prototypes */
|
nkeynes@550 | 72 | static void mmu_invalidate_tlb();
|
nkeynes@939 | 73 | static void mmu_utlb_register_all();
|
nkeynes@939 | 74 | static void mmu_utlb_remove_entry(int);
|
nkeynes@939 | 75 | static void mmu_utlb_insert_entry(int);
|
nkeynes@939 | 76 | static void mmu_register_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn );
|
nkeynes@939 | 77 | static void mmu_register_user_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn );
|
nkeynes@939 | 78 | static void mmu_set_tlb_enabled( int tlb_on );
|
nkeynes@939 | 79 | static void mmu_set_tlb_asid( uint32_t asid );
|
nkeynes@939 | 80 | static void mmu_set_storequeue_protected( int protected );
|
nkeynes@939 | 81 | static gboolean mmu_utlb_map_pages( mem_region_fn_t priv_page, mem_region_fn_t user_page, sh4addr_t start_addr, int npages );
|
nkeynes@943 | 82 | static void mmu_utlb_remap_pages( gboolean remap_priv, gboolean remap_user, int entryNo );
|
nkeynes@943 | 83 | static gboolean mmu_utlb_unmap_pages( gboolean unmap_priv, gboolean unmap_user, sh4addr_t start_addr, int npages );
|
nkeynes@939 | 84 | static gboolean mmu_ext_page_remapped( sh4addr_t page, mem_region_fn_t fn, void *user_data );
|
nkeynes@939 | 85 | static void mmu_utlb_1k_init();
|
nkeynes@939 | 86 | static struct utlb_1k_entry *mmu_utlb_1k_alloc();
|
nkeynes@939 | 87 | static void mmu_utlb_1k_free( struct utlb_1k_entry *entry );
|
nkeynes@550 | 88 |
|
nkeynes@939 | 89 | static int32_t FASTCALL tlb_protected_read( sh4addr_t addr, void *exc );
|
nkeynes@939 | 90 | static void FASTCALL tlb_protected_write( sh4addr_t addr, uint32_t val, void *exc );
|
nkeynes@939 | 91 | static void FASTCALL tlb_initial_write( sh4addr_t addr, uint32_t val, void *exc );
|
nkeynes@939 | 92 | static uint32_t get_tlb_size_mask( uint32_t flags );
|
nkeynes@939 | 93 | static uint32_t get_tlb_size_pages( uint32_t flags );
|
nkeynes@586 | 94 |
|
nkeynes@550 | 95 |
|
nkeynes@939 | 96 | /*********************** Module public functions ****************************/
|
nkeynes@550 | 97 |
|
nkeynes@939 | 98 | /**
|
nkeynes@939 | 99 | * Allocate memory for the address space maps, and initialize them according
|
nkeynes@939 | 100 | * to the default (reset) values. (TLB is disabled by default)
|
nkeynes@939 | 101 | */
|
nkeynes@939 | 102 |
|
nkeynes@826 | 103 | void MMU_init()
|
nkeynes@550 | 104 | {
|
nkeynes@939 | 105 | sh4_address_space = mem_alloc_pages( sizeof(mem_region_fn_t) * 256 );
|
nkeynes@939 | 106 | sh4_user_address_space = mem_alloc_pages( sizeof(mem_region_fn_t) * 256 );
|
nkeynes@939 | 107 | storequeue_address_space = mem_alloc_pages( sizeof(mem_region_fn_t) * 4 );
|
nkeynes@939 | 108 | storequeue_user_address_space = mem_alloc_pages( sizeof(mem_region_fn_t) * 4 );
|
nkeynes@939 | 109 |
|
nkeynes@939 | 110 | mmu_set_tlb_enabled(0);
|
nkeynes@939 | 111 | mmu_register_user_mem_region( 0x80000000, 0x00000000, &mem_region_address_error );
|
nkeynes@939 | 112 | mmu_register_user_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue);
|
nkeynes@939 | 113 |
|
nkeynes@939 | 114 | /* Setup P4 tlb/cache access regions */
|
nkeynes@939 | 115 | mmu_register_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );
|
nkeynes@939 | 116 | mmu_register_mem_region( 0xE4000000, 0xF0000000, &mem_region_unmapped );
|
nkeynes@939 | 117 | mmu_register_mem_region( 0xF0000000, 0xF1000000, &p4_region_icache_addr );
|
nkeynes@939 | 118 | mmu_register_mem_region( 0xF1000000, 0xF2000000, &p4_region_icache_data );
|
nkeynes@939 | 119 | mmu_register_mem_region( 0xF2000000, 0xF3000000, &p4_region_itlb_addr );
|
nkeynes@939 | 120 | mmu_register_mem_region( 0xF3000000, 0xF4000000, &p4_region_itlb_data );
|
nkeynes@939 | 121 | mmu_register_mem_region( 0xF4000000, 0xF5000000, &p4_region_ocache_addr );
|
nkeynes@939 | 122 | mmu_register_mem_region( 0xF5000000, 0xF6000000, &p4_region_ocache_data );
|
nkeynes@939 | 123 | mmu_register_mem_region( 0xF6000000, 0xF7000000, &p4_region_utlb_addr );
|
nkeynes@939 | 124 | mmu_register_mem_region( 0xF7000000, 0xF8000000, &p4_region_utlb_data );
|
nkeynes@939 | 125 | mmu_register_mem_region( 0xF8000000, 0x00000000, &mem_region_unmapped );
|
nkeynes@939 | 126 |
|
nkeynes@939 | 127 | /* Setup P4 control region */
|
nkeynes@939 | 128 | mmu_register_mem_region( 0xFF000000, 0xFF001000, &mmio_region_MMU.fn );
|
nkeynes@939 | 129 | mmu_register_mem_region( 0xFF100000, 0xFF101000, &mmio_region_PMM.fn );
|
nkeynes@939 | 130 | mmu_register_mem_region( 0xFF200000, 0xFF201000, &mmio_region_UBC.fn );
|
nkeynes@939 | 131 | mmu_register_mem_region( 0xFF800000, 0xFF801000, &mmio_region_BSC.fn );
|
nkeynes@939 | 132 | mmu_register_mem_region( 0xFF900000, 0xFFA00000, &mem_region_unmapped ); // SDMR2 + SDMR3
|
nkeynes@939 | 133 | mmu_register_mem_region( 0xFFA00000, 0xFFA01000, &mmio_region_DMAC.fn );
|
nkeynes@939 | 134 | mmu_register_mem_region( 0xFFC00000, 0xFFC01000, &mmio_region_CPG.fn );
|
nkeynes@939 | 135 | mmu_register_mem_region( 0xFFC80000, 0xFFC81000, &mmio_region_RTC.fn );
|
nkeynes@939 | 136 | mmu_register_mem_region( 0xFFD00000, 0xFFD01000, &mmio_region_INTC.fn );
|
nkeynes@939 | 137 | mmu_register_mem_region( 0xFFD80000, 0xFFD81000, &mmio_region_TMU.fn );
|
nkeynes@939 | 138 | mmu_register_mem_region( 0xFFE00000, 0xFFE01000, &mmio_region_SCI.fn );
|
nkeynes@939 | 139 | mmu_register_mem_region( 0xFFE80000, 0xFFE81000, &mmio_region_SCIF.fn );
|
nkeynes@939 | 140 | mmu_register_mem_region( 0xFFF00000, 0xFFF01000, &mem_region_unmapped ); // H-UDI
|
nkeynes@939 | 141 |
|
nkeynes@939 | 142 | register_mem_page_remapped_hook( mmu_ext_page_remapped, NULL );
|
nkeynes@939 | 143 | mmu_utlb_1k_init();
|
nkeynes@939 | 144 |
|
nkeynes@939 | 145 | /* Ensure the code regions are executable */
|
nkeynes@939 | 146 | mem_unprotect( mmu_utlb_pages, sizeof(mmu_utlb_pages) );
|
nkeynes@939 | 147 | mem_unprotect( mmu_utlb_1k_pages, sizeof(mmu_utlb_1k_pages) );
|
nkeynes@550 | 148 | }
|
nkeynes@550 | 149 |
|
nkeynes@550 | 150 | void MMU_reset()
|
nkeynes@550 | 151 | {
|
nkeynes@550 | 152 | mmio_region_MMU_write( CCR, 0 );
|
nkeynes@586 | 153 | mmio_region_MMU_write( MMUCR, 0 );
|
nkeynes@550 | 154 | }
|
nkeynes@550 | 155 |
|
nkeynes@550 | 156 | void MMU_save_state( FILE *f )
|
nkeynes@550 | 157 | {
|
nkeynes@550 | 158 | fwrite( &mmu_itlb, sizeof(mmu_itlb), 1, f );
|
nkeynes@550 | 159 | fwrite( &mmu_utlb, sizeof(mmu_utlb), 1, f );
|
nkeynes@586 | 160 | fwrite( &mmu_urc, sizeof(mmu_urc), 1, f );
|
nkeynes@586 | 161 | fwrite( &mmu_urb, sizeof(mmu_urb), 1, f );
|
nkeynes@586 | 162 | fwrite( &mmu_lrui, sizeof(mmu_lrui), 1, f );
|
nkeynes@586 | 163 | fwrite( &mmu_asid, sizeof(mmu_asid), 1, f );
|
nkeynes@550 | 164 | }
|
nkeynes@550 | 165 |
|
nkeynes@550 | 166 | int MMU_load_state( FILE *f )
|
nkeynes@550 | 167 | {
|
nkeynes@550 | 168 | if( fread( &mmu_itlb, sizeof(mmu_itlb), 1, f ) != 1 ) {
|
nkeynes@736 | 169 | return 1;
|
nkeynes@550 | 170 | }
|
nkeynes@550 | 171 | if( fread( &mmu_utlb, sizeof(mmu_utlb), 1, f ) != 1 ) {
|
nkeynes@736 | 172 | return 1;
|
nkeynes@550 | 173 | }
|
nkeynes@586 | 174 | if( fread( &mmu_urc, sizeof(mmu_urc), 1, f ) != 1 ) {
|
nkeynes@736 | 175 | return 1;
|
nkeynes@586 | 176 | }
|
nkeynes@586 | 177 | if( fread( &mmu_urc, sizeof(mmu_urb), 1, f ) != 1 ) {
|
nkeynes@736 | 178 | return 1;
|
nkeynes@586 | 179 | }
|
nkeynes@586 | 180 | if( fread( &mmu_lrui, sizeof(mmu_lrui), 1, f ) != 1 ) {
|
nkeynes@736 | 181 | return 1;
|
nkeynes@586 | 182 | }
|
nkeynes@586 | 183 | if( fread( &mmu_asid, sizeof(mmu_asid), 1, f ) != 1 ) {
|
nkeynes@736 | 184 | return 1;
|
nkeynes@586 | 185 | }
|
nkeynes@939 | 186 |
|
nkeynes@939 | 187 | uint32_t mmucr = MMIO_READ(MMU,MMUCR);
|
nkeynes@939 | 188 | mmu_set_tlb_enabled(mmucr&MMUCR_AT);
|
nkeynes@939 | 189 | mmu_set_storequeue_protected(mmucr&MMUCR_SQMD);
|
nkeynes@550 | 190 | return 0;
|
nkeynes@550 | 191 | }
|
nkeynes@550 | 192 |
|
nkeynes@550 | 193 | /**
|
nkeynes@550 | 194 | * LDTLB instruction implementation. Copies PTEH, PTEL and PTEA into the UTLB
|
nkeynes@550 | 195 | * entry identified by MMUCR.URC. Does not modify MMUCR or the ITLB.
|
nkeynes@550 | 196 | */
|
nkeynes@550 | 197 | void MMU_ldtlb()
|
nkeynes@550 | 198 | {
|
nkeynes@939 | 199 | mmu_urc %= mmu_urb;
|
nkeynes@915 | 200 | if( mmu_utlb[mmu_urc].flags & TLB_VALID )
|
nkeynes@915 | 201 | mmu_utlb_remove_entry( mmu_urc );
|
nkeynes@550 | 202 | mmu_utlb[mmu_urc].vpn = MMIO_READ(MMU, PTEH) & 0xFFFFFC00;
|
nkeynes@550 | 203 | mmu_utlb[mmu_urc].asid = MMIO_READ(MMU, PTEH) & 0x000000FF;
|
nkeynes@550 | 204 | mmu_utlb[mmu_urc].ppn = MMIO_READ(MMU, PTEL) & 0x1FFFFC00;
|
nkeynes@550 | 205 | mmu_utlb[mmu_urc].flags = MMIO_READ(MMU, PTEL) & 0x00001FF;
|
nkeynes@550 | 206 | mmu_utlb[mmu_urc].pcmcia = MMIO_READ(MMU, PTEA);
|
nkeynes@939 | 207 | mmu_utlb[mmu_urc].mask = get_tlb_size_mask(mmu_utlb[mmu_urc].flags);
|
nkeynes@915 | 208 | if( mmu_utlb[mmu_urc].flags & TLB_VALID )
|
nkeynes@915 | 209 | mmu_utlb_insert_entry( mmu_urc );
|
nkeynes@550 | 210 | }
|
nkeynes@550 | 211 |
|
nkeynes@939 | 212 |
|
nkeynes@939 | 213 | MMIO_REGION_READ_FN( MMU, reg )
|
nkeynes@939 | 214 | {
|
nkeynes@939 | 215 | reg &= 0xFFF;
|
nkeynes@939 | 216 | switch( reg ) {
|
nkeynes@939 | 217 | case MMUCR:
|
nkeynes@939 | 218 | mmu_urc %= mmu_urb;
|
nkeynes@939 | 219 | return MMIO_READ( MMU, MMUCR) | (mmu_urc<<10) | ((mmu_urb&0x3F)<<18) | (mmu_lrui<<26);
|
nkeynes@939 | 220 | default:
|
nkeynes@939 | 221 | return MMIO_READ( MMU, reg );
|
nkeynes@939 | 222 | }
|
nkeynes@939 | 223 | }
|
nkeynes@939 | 224 |
|
nkeynes@939 | 225 | MMIO_REGION_WRITE_FN( MMU, reg, val )
|
nkeynes@939 | 226 | {
|
nkeynes@939 | 227 | uint32_t tmp;
|
nkeynes@939 | 228 | reg &= 0xFFF;
|
nkeynes@939 | 229 | switch(reg) {
|
nkeynes@939 | 230 | case SH4VER:
|
nkeynes@939 | 231 | return;
|
nkeynes@939 | 232 | case PTEH:
|
nkeynes@939 | 233 | val &= 0xFFFFFCFF;
|
nkeynes@939 | 234 | if( (val & 0xFF) != mmu_asid ) {
|
nkeynes@939 | 235 | mmu_set_tlb_asid( val&0xFF );
|
nkeynes@939 | 236 | sh4_icache.page_vma = -1; // invalidate icache as asid has changed
|
nkeynes@939 | 237 | }
|
nkeynes@939 | 238 | break;
|
nkeynes@939 | 239 | case PTEL:
|
nkeynes@939 | 240 | val &= 0x1FFFFDFF;
|
nkeynes@939 | 241 | break;
|
nkeynes@939 | 242 | case PTEA:
|
nkeynes@939 | 243 | val &= 0x0000000F;
|
nkeynes@939 | 244 | break;
|
nkeynes@939 | 245 | case TRA:
|
nkeynes@939 | 246 | val &= 0x000003FC;
|
nkeynes@939 | 247 | break;
|
nkeynes@939 | 248 | case EXPEVT:
|
nkeynes@939 | 249 | case INTEVT:
|
nkeynes@939 | 250 | val &= 0x00000FFF;
|
nkeynes@939 | 251 | break;
|
nkeynes@939 | 252 | case MMUCR:
|
nkeynes@939 | 253 | if( val & MMUCR_TI ) {
|
nkeynes@939 | 254 | mmu_invalidate_tlb();
|
nkeynes@939 | 255 | }
|
nkeynes@939 | 256 | mmu_urc = (val >> 10) & 0x3F;
|
nkeynes@939 | 257 | mmu_urb = (val >> 18) & 0x3F;
|
nkeynes@939 | 258 | if( mmu_urb == 0 ) {
|
nkeynes@939 | 259 | mmu_urb = 0x40;
|
nkeynes@939 | 260 | }
|
nkeynes@939 | 261 | mmu_lrui = (val >> 26) & 0x3F;
|
nkeynes@939 | 262 | val &= 0x00000301;
|
nkeynes@939 | 263 | tmp = MMIO_READ( MMU, MMUCR );
|
nkeynes@939 | 264 | if( (val ^ tmp) & (MMUCR_SQMD) ) {
|
nkeynes@939 | 265 | mmu_set_storequeue_protected( val & MMUCR_SQMD );
|
nkeynes@939 | 266 | }
|
nkeynes@939 | 267 | if( (val ^ tmp) & (MMUCR_AT) ) {
|
nkeynes@939 | 268 | // AT flag has changed state - flush the xlt cache as all bets
|
nkeynes@939 | 269 | // are off now. We also need to force an immediate exit from the
|
nkeynes@939 | 270 | // current block
|
nkeynes@939 | 271 | mmu_set_tlb_enabled( val & MMUCR_AT );
|
nkeynes@939 | 272 | MMIO_WRITE( MMU, MMUCR, val );
|
nkeynes@939 | 273 | sh4_flush_icache();
|
nkeynes@939 | 274 | }
|
nkeynes@939 | 275 | break;
|
nkeynes@939 | 276 | case CCR:
|
nkeynes@939 | 277 | CCN_set_cache_control( val );
|
nkeynes@939 | 278 | val &= 0x81A7;
|
nkeynes@939 | 279 | break;
|
nkeynes@939 | 280 | case MMUUNK1:
|
nkeynes@939 | 281 | /* Note that if the high bit is set, this appears to reset the machine.
|
nkeynes@939 | 282 | * Not emulating this behaviour yet until we know why...
|
nkeynes@939 | 283 | */
|
nkeynes@939 | 284 | val &= 0x00010007;
|
nkeynes@939 | 285 | break;
|
nkeynes@939 | 286 | case QACR0:
|
nkeynes@939 | 287 | case QACR1:
|
nkeynes@939 | 288 | val &= 0x0000001C;
|
nkeynes@939 | 289 | break;
|
nkeynes@939 | 290 | case PMCR1:
|
nkeynes@939 | 291 | PMM_write_control(0, val);
|
nkeynes@939 | 292 | val &= 0x0000C13F;
|
nkeynes@939 | 293 | break;
|
nkeynes@939 | 294 | case PMCR2:
|
nkeynes@939 | 295 | PMM_write_control(1, val);
|
nkeynes@939 | 296 | val &= 0x0000C13F;
|
nkeynes@939 | 297 | break;
|
nkeynes@939 | 298 | default:
|
nkeynes@939 | 299 | break;
|
nkeynes@939 | 300 | }
|
nkeynes@939 | 301 | MMIO_WRITE( MMU, reg, val );
|
nkeynes@939 | 302 | }
|
nkeynes@939 | 303 |
|
nkeynes@939 | 304 | /********************** 1K Page handling ***********************/
|
nkeynes@939 | 305 | /* Since we use 4K pages as our native page size, 1K pages need a bit of extra
|
nkeynes@939 | 306 | * effort to manage - we justify this on the basis that most programs won't
|
nkeynes@939 | 307 | * actually use 1K pages, so we may as well optimize for the common case.
|
nkeynes@939 | 308 | *
|
nkeynes@939 | 309 | * Implementation uses an intermediate page entry (the utlb_1k_entry) that
|
nkeynes@939 | 310 | * redirects requests to the 'real' page entry. These are allocated on an
|
nkeynes@939 | 311 | * as-needed basis, and returned to the pool when all subpages are empty.
|
nkeynes@939 | 312 | */
|
nkeynes@939 | 313 | static void mmu_utlb_1k_init()
|
nkeynes@939 | 314 | {
|
nkeynes@939 | 315 | int i;
|
nkeynes@939 | 316 | for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
|
nkeynes@939 | 317 | mmu_utlb_1k_free_list[i] = i;
|
nkeynes@939 | 318 | mmu_utlb_1k_init_vtable( &mmu_utlb_1k_pages[i] );
|
nkeynes@939 | 319 | }
|
nkeynes@939 | 320 | mmu_utlb_1k_free_index = 0;
|
nkeynes@939 | 321 | }
|
nkeynes@939 | 322 |
|
nkeynes@939 | 323 | static struct utlb_1k_entry *mmu_utlb_1k_alloc()
|
nkeynes@939 | 324 | {
|
nkeynes@939 | 325 | assert( mmu_utlb_1k_free_index < UTLB_ENTRY_COUNT );
|
nkeynes@939 | 326 | struct utlb_1k_entry *entry = &mmu_utlb_1k_pages[mmu_utlb_1k_free_index++];
|
nkeynes@939 | 327 | return entry;
|
nkeynes@939 | 328 | }
|
nkeynes@939 | 329 |
|
nkeynes@939 | 330 | static void mmu_utlb_1k_free( struct utlb_1k_entry *ent )
|
nkeynes@939 | 331 | {
|
nkeynes@939 | 332 | unsigned int entryNo = ent - &mmu_utlb_1k_pages[0];
|
nkeynes@939 | 333 | assert( entryNo < UTLB_ENTRY_COUNT );
|
nkeynes@939 | 334 | assert( mmu_utlb_1k_free_index > 0 );
|
nkeynes@939 | 335 | mmu_utlb_1k_free_list[--mmu_utlb_1k_free_index] = entryNo;
|
nkeynes@939 | 336 | }
|
nkeynes@939 | 337 |
|
nkeynes@939 | 338 |
|
nkeynes@939 | 339 | /********************** Address space maintenance *************************/
|
nkeynes@939 | 340 |
|
nkeynes@939 | 341 | /**
|
nkeynes@939 | 342 | * MMU accessor functions just increment URC - fixup here if necessary
|
nkeynes@939 | 343 | */
|
nkeynes@939 | 344 | static inline void mmu_urc_fixup()
|
nkeynes@939 | 345 | {
|
nkeynes@939 | 346 | mmu_urc %= mmu_urb;
|
nkeynes@939 | 347 | }
|
nkeynes@939 | 348 |
|
nkeynes@939 | 349 | static void mmu_register_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn )
|
nkeynes@939 | 350 | {
|
nkeynes@939 | 351 | int count = (end - start) >> 12;
|
nkeynes@939 | 352 | mem_region_fn_t *ptr = &sh4_address_space[start>>12];
|
nkeynes@939 | 353 | while( count-- > 0 ) {
|
nkeynes@939 | 354 | *ptr++ = fn;
|
nkeynes@939 | 355 | }
|
nkeynes@939 | 356 | }
|
nkeynes@939 | 357 | static void mmu_register_user_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn )
|
nkeynes@939 | 358 | {
|
nkeynes@939 | 359 | int count = (end - start) >> 12;
|
nkeynes@939 | 360 | mem_region_fn_t *ptr = &sh4_user_address_space[start>>12];
|
nkeynes@939 | 361 | while( count-- > 0 ) {
|
nkeynes@939 | 362 | *ptr++ = fn;
|
nkeynes@939 | 363 | }
|
nkeynes@939 | 364 | }
|
nkeynes@939 | 365 |
|
nkeynes@939 | 366 | static gboolean mmu_ext_page_remapped( sh4addr_t page, mem_region_fn_t fn, void *user_data )
|
nkeynes@939 | 367 | {
|
nkeynes@939 | 368 | int i;
|
nkeynes@939 | 369 | if( (MMIO_READ(MMU,MMUCR)) & MMUCR_AT ) {
|
nkeynes@939 | 370 | /* TLB on */
|
nkeynes@939 | 371 | sh4_address_space[(page|0x80000000)>>12] = fn; /* Direct map to P1 and P2 */
|
nkeynes@939 | 372 | sh4_address_space[(page|0xA0000000)>>12] = fn;
|
nkeynes@939 | 373 | /* Scan UTLB and update any direct-referencing entries */
|
nkeynes@939 | 374 | } else {
|
nkeynes@939 | 375 | /* Direct map to U0, P0, P1, P2, P3 */
|
nkeynes@939 | 376 | for( i=0; i<= 0xC0000000; i+= 0x20000000 ) {
|
nkeynes@939 | 377 | sh4_address_space[(page|i)>>12] = fn;
|
nkeynes@939 | 378 | }
|
nkeynes@939 | 379 | for( i=0; i < 0x80000000; i+= 0x20000000 ) {
|
nkeynes@939 | 380 | sh4_user_address_space[(page|i)>>12] = fn;
|
nkeynes@939 | 381 | }
|
nkeynes@939 | 382 | }
|
nkeynes@939 | 383 | }
|
nkeynes@939 | 384 |
|
nkeynes@939 | 385 | static void mmu_set_tlb_enabled( int tlb_on )
|
nkeynes@939 | 386 | {
|
nkeynes@939 | 387 | mem_region_fn_t *ptr, *uptr;
|
nkeynes@939 | 388 | int i;
|
nkeynes@939 | 389 |
|
nkeynes@939 | 390 | if( tlb_on ) {
|
nkeynes@939 | 391 | mmu_register_mem_region(0x00000000, 0x80000000, &mem_region_tlb_miss );
|
nkeynes@939 | 392 | mmu_register_mem_region(0xC0000000, 0xE0000000, &mem_region_tlb_miss );
|
nkeynes@939 | 393 | mmu_register_user_mem_region(0x00000000, 0x80000000, &mem_region_tlb_miss );
|
nkeynes@939 | 394 | for( i=0, ptr = storequeue_address_space, uptr = storequeue_user_address_space;
|
nkeynes@939 | 395 | i<0x04000000; i+= LXDREAM_PAGE_SIZE ) {
|
nkeynes@939 | 396 | *ptr++ = &mem_region_tlb_miss;
|
nkeynes@939 | 397 | *uptr++ = &mem_region_tlb_miss;
|
nkeynes@939 | 398 | }
|
nkeynes@939 | 399 | mmu_utlb_register_all();
|
nkeynes@939 | 400 | } else {
|
nkeynes@939 | 401 | for( i=0, ptr = sh4_address_space; i<7; i++, ptr += LXDREAM_PAGE_TABLE_ENTRIES ) {
|
nkeynes@939 | 402 | memcpy( ptr, ext_address_space, sizeof(mem_region_fn_t) * LXDREAM_PAGE_TABLE_ENTRIES );
|
nkeynes@939 | 403 | }
|
nkeynes@939 | 404 | for( i=0, ptr = sh4_user_address_space; i<4; i++, ptr += LXDREAM_PAGE_TABLE_ENTRIES ) {
|
nkeynes@939 | 405 | memcpy( ptr, ext_address_space, sizeof(mem_region_fn_t) * LXDREAM_PAGE_TABLE_ENTRIES );
|
nkeynes@939 | 406 | }
|
nkeynes@939 | 407 | }
|
nkeynes@939 | 408 | }
|
nkeynes@939 | 409 |
|
nkeynes@939 | 410 | static void mmu_set_storequeue_protected( int protected )
|
nkeynes@939 | 411 | {
|
nkeynes@939 | 412 | if( protected ) {
|
nkeynes@939 | 413 | mmu_register_user_mem_region( 0xE0000000, 0xE4000000, &mem_region_address_error );
|
nkeynes@939 | 414 | } else {
|
nkeynes@939 | 415 | mmu_register_user_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );
|
nkeynes@939 | 416 | }
|
nkeynes@939 | 417 | }
|
nkeynes@939 | 418 |
|
nkeynes@939 | 419 | static void mmu_set_tlb_asid( uint32_t asid )
|
nkeynes@939 | 420 | {
|
nkeynes@939 | 421 | /* Scan for pages that need to be remapped */
|
nkeynes@939 | 422 | int i;
|
nkeynes@939 | 423 | if( IS_SV_ENABLED() ) {
|
nkeynes@939 | 424 | for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
|
nkeynes@939 | 425 | if( mmu_utlb[i].flags & TLB_VALID ) {
|
nkeynes@939 | 426 | if( (mmu_utlb[i].flags & TLB_SHARE) == 0 ) {
|
nkeynes@939 | 427 | if( mmu_utlb[i].asid == mmu_asid ) { // Matches old ASID - unmap out
|
nkeynes@943 | 428 | if( !mmu_utlb_unmap_pages( FALSE, TRUE, mmu_utlb[i].vpn&mmu_utlb[i].mask,
|
nkeynes@943 | 429 | get_tlb_size_pages(mmu_utlb[i].flags) ) )
|
nkeynes@943 | 430 | mmu_utlb_remap_pages( FALSE, TRUE, i );
|
nkeynes@939 | 431 | } else if( mmu_utlb[i].asid == asid ) { // Matches new ASID - map in
|
nkeynes@939 | 432 | mmu_utlb_map_pages( NULL, mmu_utlb_pages[i].user_fn,
|
nkeynes@939 | 433 | mmu_utlb[i].vpn&mmu_utlb[i].mask,
|
nkeynes@939 | 434 | get_tlb_size_pages(mmu_utlb[i].flags) );
|
nkeynes@939 | 435 | }
|
nkeynes@939 | 436 | }
|
nkeynes@939 | 437 | }
|
nkeynes@939 | 438 | }
|
nkeynes@939 | 439 | } else {
|
nkeynes@939 | 440 | // Remap both Priv+user pages
|
nkeynes@939 | 441 | for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
|
nkeynes@939 | 442 | if( mmu_utlb[i].flags & TLB_VALID ) {
|
nkeynes@939 | 443 | if( (mmu_utlb[i].flags & TLB_SHARE) == 0 ) {
|
nkeynes@939 | 444 | if( mmu_utlb[i].asid == mmu_asid ) { // Matches old ASID - unmap out
|
nkeynes@943 | 445 | if( !mmu_utlb_unmap_pages( TRUE, TRUE, mmu_utlb[i].vpn&mmu_utlb[i].mask,
|
nkeynes@943 | 446 | get_tlb_size_pages(mmu_utlb[i].flags) ) )
|
nkeynes@943 | 447 | mmu_utlb_remap_pages( TRUE, TRUE, i );
|
nkeynes@939 | 448 | } else if( mmu_utlb[i].asid == asid ) { // Matches new ASID - map in
|
nkeynes@939 | 449 | mmu_utlb_map_pages( &mmu_utlb_pages[i].fn, mmu_utlb_pages[i].user_fn,
|
nkeynes@939 | 450 | mmu_utlb[i].vpn&mmu_utlb[i].mask,
|
nkeynes@939 | 451 | get_tlb_size_pages(mmu_utlb[i].flags) );
|
nkeynes@939 | 452 | }
|
nkeynes@939 | 453 | }
|
nkeynes@939 | 454 | }
|
nkeynes@939 | 455 | }
|
nkeynes@939 | 456 | }
|
nkeynes@939 | 457 |
|
nkeynes@939 | 458 | mmu_asid = asid;
|
nkeynes@939 | 459 | }
|
nkeynes@939 | 460 |
|
nkeynes@939 | 461 | static uint32_t get_tlb_size_mask( uint32_t flags )
|
nkeynes@939 | 462 | {
|
nkeynes@939 | 463 | switch( flags & TLB_SIZE_MASK ) {
|
nkeynes@939 | 464 | case TLB_SIZE_1K: return MASK_1K;
|
nkeynes@939 | 465 | case TLB_SIZE_4K: return MASK_4K;
|
nkeynes@939 | 466 | case TLB_SIZE_64K: return MASK_64K;
|
nkeynes@939 | 467 | case TLB_SIZE_1M: return MASK_1M;
|
nkeynes@939 | 468 | default: return 0; /* Unreachable */
|
nkeynes@939 | 469 | }
|
nkeynes@939 | 470 | }
|
nkeynes@939 | 471 | static uint32_t get_tlb_size_pages( uint32_t flags )
|
nkeynes@939 | 472 | {
|
nkeynes@939 | 473 | switch( flags & TLB_SIZE_MASK ) {
|
nkeynes@939 | 474 | case TLB_SIZE_1K: return 0;
|
nkeynes@939 | 475 | case TLB_SIZE_4K: return 1;
|
nkeynes@939 | 476 | case TLB_SIZE_64K: return 16;
|
nkeynes@939 | 477 | case TLB_SIZE_1M: return 256;
|
nkeynes@939 | 478 | default: return 0; /* Unreachable */
|
nkeynes@939 | 479 | }
|
nkeynes@939 | 480 | }
|
nkeynes@939 | 481 |
|
nkeynes@939 | 482 | /**
|
nkeynes@939 | 483 | * Add a new TLB entry mapping to the address space table. If any of the pages
|
nkeynes@939 | 484 | * are already mapped, they are mapped to the TLB multi-hit page instead.
|
nkeynes@939 | 485 | * @return FALSE if a TLB multihit situation was detected, otherwise TRUE.
|
nkeynes@939 | 486 | */
|
nkeynes@939 | 487 | static gboolean mmu_utlb_map_pages( mem_region_fn_t priv_page, mem_region_fn_t user_page, sh4addr_t start_addr, int npages )
|
nkeynes@939 | 488 | {
|
nkeynes@939 | 489 | mem_region_fn_t *ptr = &sh4_address_space[start_addr >> 12];
|
nkeynes@939 | 490 | mem_region_fn_t *uptr = &sh4_user_address_space[start_addr >> 12];
|
nkeynes@939 | 491 | gboolean mapping_ok = TRUE;
|
nkeynes@939 | 492 | int i;
|
nkeynes@939 | 493 |
|
nkeynes@939 | 494 | if( (start_addr & 0xFC000000) == 0xE0000000 ) {
|
nkeynes@939 | 495 | /* Storequeue mapping */
|
nkeynes@939 | 496 | ptr = &storequeue_address_space[(start_addr-0xE0000000) >> 12];
|
nkeynes@939 | 497 | uptr = &storequeue_user_address_space[(start_addr-0xE0000000) >> 12];
|
nkeynes@939 | 498 | } else if( (start_addr & 0xE0000000) == 0xC0000000 ) {
|
nkeynes@939 | 499 | user_page = NULL; /* No user access to P3 region */
|
nkeynes@939 | 500 | } else if( start_addr >= 0x80000000 ) {
|
nkeynes@939 | 501 | return TRUE; // No mapping - legal but meaningless
|
nkeynes@939 | 502 | }
|
nkeynes@939 | 503 |
|
nkeynes@939 | 504 | if( npages == 0 ) {
|
nkeynes@939 | 505 | struct utlb_1k_entry *ent;
|
nkeynes@939 | 506 | int i, idx = (start_addr >> 10) & 0x03;
|
nkeynes@939 | 507 | if( IS_1K_PAGE_ENTRY(*ptr) ) {
|
nkeynes@939 | 508 | ent = (struct utlb_1k_entry *)*ptr;
|
nkeynes@939 | 509 | } else {
|
nkeynes@939 | 510 | ent = mmu_utlb_1k_alloc();
|
nkeynes@939 | 511 | /* New 1K struct - init to previous contents of region */
|
nkeynes@939 | 512 | for( i=0; i<4; i++ ) {
|
nkeynes@939 | 513 | ent->subpages[i] = *ptr;
|
nkeynes@939 | 514 | ent->user_subpages[i] = *uptr;
|
nkeynes@939 | 515 | }
|
nkeynes@939 | 516 | *ptr = &ent->fn;
|
nkeynes@939 | 517 | *uptr = &ent->user_fn;
|
nkeynes@939 | 518 | }
|
nkeynes@939 | 519 |
|
nkeynes@939 | 520 | if( priv_page != NULL ) {
|
nkeynes@939 | 521 | if( ent->subpages[idx] == &mem_region_tlb_miss ) {
|
nkeynes@939 | 522 | ent->subpages[idx] = priv_page;
|
nkeynes@939 | 523 | } else {
|
nkeynes@939 | 524 | mapping_ok = FALSE;
|
nkeynes@939 | 525 | ent->subpages[idx] = &mem_region_tlb_multihit;
|
nkeynes@939 | 526 | }
|
nkeynes@939 | 527 | }
|
nkeynes@939 | 528 | if( user_page != NULL ) {
|
nkeynes@939 | 529 | if( ent->user_subpages[idx] == &mem_region_tlb_miss ) {
|
nkeynes@939 | 530 | ent->user_subpages[idx] = user_page;
|
nkeynes@939 | 531 | } else {
|
nkeynes@939 | 532 | mapping_ok = FALSE;
|
nkeynes@939 | 533 | ent->user_subpages[idx] = &mem_region_tlb_multihit;
|
nkeynes@939 | 534 | }
|
nkeynes@939 | 535 | }
|
nkeynes@939 | 536 |
|
nkeynes@939 | 537 | } else {
|
nkeynes@943 | 538 | if( priv_page != NULL ) {
|
nkeynes@943 | 539 | if( user_page != NULL ) {
|
nkeynes@943 | 540 | for( i=0; i<npages; i++ ) {
|
nkeynes@943 | 541 | if( *ptr == &mem_region_tlb_miss ) {
|
nkeynes@943 | 542 | *ptr++ = priv_page;
|
nkeynes@943 | 543 | *uptr++ = user_page;
|
nkeynes@943 | 544 | } else {
|
nkeynes@943 | 545 | mapping_ok = FALSE;
|
nkeynes@943 | 546 | *ptr++ = &mem_region_tlb_multihit;
|
nkeynes@943 | 547 | *uptr++ = &mem_region_tlb_multihit;
|
nkeynes@943 | 548 | }
|
nkeynes@943 | 549 | }
|
nkeynes@943 | 550 | } else {
|
nkeynes@943 | 551 | /* Privileged mapping only */
|
nkeynes@943 | 552 | for( i=0; i<npages; i++ ) {
|
nkeynes@943 | 553 | if( *ptr == &mem_region_tlb_miss ) {
|
nkeynes@943 | 554 | *ptr++ = priv_page;
|
nkeynes@943 | 555 | } else {
|
nkeynes@943 | 556 | mapping_ok = FALSE;
|
nkeynes@943 | 557 | *ptr++ = &mem_region_tlb_multihit;
|
nkeynes@943 | 558 | }
|
nkeynes@939 | 559 | }
|
nkeynes@939 | 560 | }
|
nkeynes@943 | 561 | } else if( user_page != NULL ) {
|
nkeynes@943 | 562 | /* User mapping only (eg ASID change remap w/ SV=1) */
|
nkeynes@939 | 563 | for( i=0; i<npages; i++ ) {
|
nkeynes@939 | 564 | if( *uptr == &mem_region_tlb_miss ) {
|
nkeynes@939 | 565 | *uptr++ = user_page;
|
nkeynes@939 | 566 | } else {
|
nkeynes@939 | 567 | mapping_ok = FALSE;
|
nkeynes@939 | 568 | *uptr++ = &mem_region_tlb_multihit;
|
nkeynes@939 | 569 | }
|
nkeynes@939 | 570 | }
|
nkeynes@939 | 571 | }
|
nkeynes@939 | 572 | }
|
nkeynes@939 | 573 | return mapping_ok;
|
nkeynes@939 | 574 | }
|
nkeynes@939 | 575 |
|
nkeynes@939 | 576 | /**
|
nkeynes@943 | 577 | * Remap any pages within the region covered by entryNo, but not including
|
nkeynes@943 | 578 | * entryNo itself. This is used to reestablish pages that were previously
|
nkeynes@943 | 579 | * covered by a multi-hit exception region when one of the pages is removed.
|
nkeynes@943 | 580 | */
|
nkeynes@943 | 581 | static void mmu_utlb_remap_pages( gboolean remap_priv, gboolean remap_user, int entryNo )
|
nkeynes@943 | 582 | {
|
nkeynes@943 | 583 | int mask = mmu_utlb[entryNo].mask;
|
nkeynes@943 | 584 | uint32_t remap_addr = mmu_utlb[entryNo].vpn & mask;
|
nkeynes@943 | 585 | int i;
|
nkeynes@943 | 586 |
|
nkeynes@943 | 587 | for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
|
nkeynes@943 | 588 | if( i != entryNo && (mmu_utlb[i].vpn & mask) == remap_addr && (mmu_utlb[i].flags & TLB_VALID) ) {
|
nkeynes@943 | 589 | /* Overlapping region */
|
nkeynes@943 | 590 | mem_region_fn_t priv_page = (remap_priv ? &mmu_utlb_pages[i].fn : NULL);
|
nkeynes@943 | 591 | mem_region_fn_t user_page = (remap_priv ? mmu_utlb_pages[i].user_fn : NULL);
|
nkeynes@943 | 592 | uint32_t start_addr;
|
nkeynes@943 | 593 | int npages;
|
nkeynes@943 | 594 |
|
nkeynes@943 | 595 | if( mmu_utlb[i].mask >= mask ) {
|
nkeynes@943 | 596 | /* entry is no larger than the area we're replacing - map completely */
|
nkeynes@943 | 597 | start_addr = mmu_utlb[i].vpn & mmu_utlb[i].mask;
|
nkeynes@943 | 598 | npages = get_tlb_size_pages( mmu_utlb[i].flags );
|
nkeynes@943 | 599 | } else {
|
nkeynes@943 | 600 | /* Otherwise map subset - region covered by removed page */
|
nkeynes@943 | 601 | start_addr = remap_addr;
|
nkeynes@943 | 602 | npages = get_tlb_size_pages( mmu_utlb[entryNo].flags );
|
nkeynes@943 | 603 | }
|
nkeynes@943 | 604 |
|
nkeynes@943 | 605 | if( (mmu_utlb[i].flags & TLB_SHARE) || mmu_utlb[i].asid == mmu_asid ) {
|
nkeynes@943 | 606 | mmu_utlb_map_pages( priv_page, user_page, start_addr, npages );
|
nkeynes@943 | 607 | } else if( IS_SV_ENABLED() ) {
|
nkeynes@943 | 608 | mmu_utlb_map_pages( priv_page, NULL, start_addr, npages );
|
nkeynes@943 | 609 | }
|
nkeynes@943 | 610 |
|
nkeynes@943 | 611 | }
|
nkeynes@943 | 612 | }
|
nkeynes@943 | 613 | }
|
nkeynes@943 | 614 |
|
nkeynes@943 | 615 | /**
|
nkeynes@939 | 616 | * Remove a previous TLB mapping (replacing them with the TLB miss region).
|
nkeynes@939 | 617 | * @return FALSE if any pages were previously mapped to the TLB multihit page,
|
nkeynes@939 | 618 | * otherwise TRUE. In either case, all pages in the region are cleared to TLB miss.
|
nkeynes@939 | 619 | */
|
nkeynes@943 | 620 | static gboolean mmu_utlb_unmap_pages( gboolean unmap_priv, gboolean unmap_user, sh4addr_t start_addr, int npages )
|
nkeynes@939 | 621 | {
|
nkeynes@939 | 622 | mem_region_fn_t *ptr = &sh4_address_space[start_addr >> 12];
|
nkeynes@939 | 623 | mem_region_fn_t *uptr = &sh4_user_address_space[start_addr >> 12];
|
nkeynes@939 | 624 | gboolean unmapping_ok = TRUE;
|
nkeynes@939 | 625 | int i;
|
nkeynes@939 | 626 |
|
nkeynes@939 | 627 | if( (start_addr & 0xFC000000) == 0xE0000000 ) {
|
nkeynes@939 | 628 | /* Storequeue mapping */
|
nkeynes@939 | 629 | ptr = &storequeue_address_space[(start_addr-0xE0000000) >> 12];
|
nkeynes@939 | 630 | uptr = &storequeue_user_address_space[(start_addr-0xE0000000) >> 12];
|
nkeynes@939 | 631 | } else if( (start_addr & 0xE0000000) == 0xC0000000 ) {
|
nkeynes@939 | 632 | unmap_user = FALSE;
|
nkeynes@939 | 633 | } else if( start_addr >= 0x80000000 ) {
|
nkeynes@939 | 634 | return TRUE; // No mapping - legal but meaningless
|
nkeynes@939 | 635 | }
|
nkeynes@939 | 636 |
|
nkeynes@939 | 637 | if( npages == 0 ) { // 1K page
|
nkeynes@939 | 638 | assert( IS_1K_PAGE_ENTRY( *ptr ) );
|
nkeynes@939 | 639 | struct utlb_1k_entry *ent = (struct utlb_1k_entry *)*ptr;
|
nkeynes@939 | 640 | int i, idx = (start_addr >> 10) & 0x03, mergeable=1;
|
nkeynes@939 | 641 | if( ent->subpages[idx] == &mem_region_tlb_multihit ) {
|
nkeynes@939 | 642 | unmapping_ok = FALSE;
|
nkeynes@939 | 643 | }
|
nkeynes@943 | 644 | if( unmap_priv )
|
nkeynes@943 | 645 | ent->subpages[idx] = &mem_region_tlb_miss;
|
nkeynes@943 | 646 | if( unmap_user )
|
nkeynes@943 | 647 | ent->user_subpages[idx] = &mem_region_tlb_miss;
|
nkeynes@939 | 648 |
|
nkeynes@939 | 649 | /* If all 4 subpages have the same content, merge them together and
|
nkeynes@939 | 650 | * release the 1K entry
|
nkeynes@939 | 651 | */
|
nkeynes@939 | 652 | mem_region_fn_t priv_page = ent->subpages[0];
|
nkeynes@939 | 653 | mem_region_fn_t user_page = ent->user_subpages[0];
|
nkeynes@939 | 654 | for( i=1; i<4; i++ ) {
|
nkeynes@939 | 655 | if( priv_page != ent->subpages[i] || user_page != ent->user_subpages[i] ) {
|
nkeynes@939 | 656 | mergeable = 0;
|
nkeynes@939 | 657 | break;
|
nkeynes@939 | 658 | }
|
nkeynes@939 | 659 | }
|
nkeynes@939 | 660 | if( mergeable ) {
|
nkeynes@939 | 661 | mmu_utlb_1k_free(ent);
|
nkeynes@939 | 662 | *ptr = priv_page;
|
nkeynes@939 | 663 | *uptr = user_page;
|
nkeynes@939 | 664 | }
|
nkeynes@939 | 665 | } else {
|
nkeynes@943 | 666 | if( unmap_priv ) {
|
nkeynes@943 | 667 | if( unmap_user ) {
|
nkeynes@943 | 668 | for( i=0; i<npages; i++ ) {
|
nkeynes@943 | 669 | if( *ptr == &mem_region_tlb_multihit ) {
|
nkeynes@943 | 670 | unmapping_ok = FALSE;
|
nkeynes@943 | 671 | }
|
nkeynes@943 | 672 | *ptr++ = &mem_region_tlb_miss;
|
nkeynes@943 | 673 | *uptr++ = &mem_region_tlb_miss;
|
nkeynes@943 | 674 | }
|
nkeynes@943 | 675 | } else {
|
nkeynes@943 | 676 | /* Privileged (un)mapping only */
|
nkeynes@943 | 677 | for( i=0; i<npages; i++ ) {
|
nkeynes@943 | 678 | if( *ptr == &mem_region_tlb_multihit ) {
|
nkeynes@943 | 679 | unmapping_ok = FALSE;
|
nkeynes@943 | 680 | }
|
nkeynes@943 | 681 | *ptr++ = &mem_region_tlb_miss;
|
nkeynes@943 | 682 | }
|
nkeynes@943 | 683 | }
|
nkeynes@943 | 684 | } else if( unmap_user ) {
|
nkeynes@943 | 685 | /* User (un)mapping only */
|
nkeynes@939 | 686 | for( i=0; i<npages; i++ ) {
|
nkeynes@943 | 687 | if( *uptr == &mem_region_tlb_multihit ) {
|
nkeynes@939 | 688 | unmapping_ok = FALSE;
|
nkeynes@939 | 689 | }
|
nkeynes@939 | 690 | *uptr++ = &mem_region_tlb_miss;
|
nkeynes@943 | 691 | }
|
nkeynes@939 | 692 | }
|
nkeynes@939 | 693 | }
|
nkeynes@943 | 694 |
|
nkeynes@939 | 695 | return unmapping_ok;
|
nkeynes@939 | 696 | }
|
nkeynes@939 | 697 |
|
nkeynes@939 | 698 | static void mmu_utlb_insert_entry( int entry )
|
nkeynes@939 | 699 | {
|
nkeynes@939 | 700 | struct utlb_entry *ent = &mmu_utlb[entry];
|
nkeynes@939 | 701 | mem_region_fn_t page = &mmu_utlb_pages[entry].fn;
|
nkeynes@939 | 702 | mem_region_fn_t upage;
|
nkeynes@939 | 703 | sh4addr_t start_addr = ent->vpn & ent->mask;
|
nkeynes@939 | 704 | int npages = get_tlb_size_pages(ent->flags);
|
nkeynes@939 | 705 |
|
nkeynes@939 | 706 | if( (ent->flags & TLB_USERMODE) == 0 ) {
|
nkeynes@939 | 707 | upage = &mem_region_user_protected;
|
nkeynes@939 | 708 | } else {
|
nkeynes@939 | 709 | upage = page;
|
nkeynes@939 | 710 | }
|
nkeynes@939 | 711 | mmu_utlb_pages[entry].user_fn = upage;
|
nkeynes@939 | 712 |
|
nkeynes@939 | 713 | if( (ent->flags & TLB_WRITABLE) == 0 ) {
|
nkeynes@939 | 714 | page->write_long = (mem_write_fn_t)tlb_protected_write;
|
nkeynes@939 | 715 | page->write_word = (mem_write_fn_t)tlb_protected_write;
|
nkeynes@939 | 716 | page->write_byte = (mem_write_fn_t)tlb_protected_write;
|
nkeynes@939 | 717 | page->write_burst = (mem_write_burst_fn_t)tlb_protected_write;
|
nkeynes@939 | 718 | mmu_utlb_init_vtable( ent, &mmu_utlb_pages[entry], FALSE );
|
nkeynes@939 | 719 | } else if( (ent->flags & TLB_DIRTY) == 0 ) {
|
nkeynes@939 | 720 | page->write_long = (mem_write_fn_t)tlb_initial_write;
|
nkeynes@939 | 721 | page->write_word = (mem_write_fn_t)tlb_initial_write;
|
nkeynes@939 | 722 | page->write_byte = (mem_write_fn_t)tlb_initial_write;
|
nkeynes@939 | 723 | page->write_burst = (mem_write_burst_fn_t)tlb_initial_write;
|
nkeynes@939 | 724 | mmu_utlb_init_vtable( ent, &mmu_utlb_pages[entry], FALSE );
|
nkeynes@939 | 725 | } else {
|
nkeynes@939 | 726 | mmu_utlb_init_vtable( ent, &mmu_utlb_pages[entry], TRUE );
|
nkeynes@939 | 727 | }
|
nkeynes@939 | 728 |
|
nkeynes@939 | 729 | /* Is page visible? */
|
nkeynes@939 | 730 | if( (ent->flags & TLB_SHARE) || ent->asid == mmu_asid ) {
|
nkeynes@939 | 731 | mmu_utlb_map_pages( page, upage, start_addr, npages );
|
nkeynes@939 | 732 | } else if( IS_SV_ENABLED() ) {
|
nkeynes@939 | 733 | mmu_utlb_map_pages( page, NULL, start_addr, npages );
|
nkeynes@939 | 734 | }
|
nkeynes@939 | 735 | }
|
nkeynes@939 | 736 |
|
nkeynes@939 | 737 | static void mmu_utlb_remove_entry( int entry )
|
nkeynes@939 | 738 | {
|
nkeynes@939 | 739 | int i, j;
|
nkeynes@939 | 740 | struct utlb_entry *ent = &mmu_utlb[entry];
|
nkeynes@939 | 741 | sh4addr_t start_addr = ent->vpn&ent->mask;
|
nkeynes@939 | 742 | mem_region_fn_t *ptr = &sh4_address_space[start_addr >> 12];
|
nkeynes@939 | 743 | mem_region_fn_t *uptr = &sh4_user_address_space[start_addr >> 12];
|
nkeynes@939 | 744 | gboolean unmap_user;
|
nkeynes@939 | 745 | int npages = get_tlb_size_pages(ent->flags);
|
nkeynes@939 | 746 |
|
nkeynes@939 | 747 | if( (ent->flags & TLB_SHARE) || ent->asid == mmu_asid ) {
|
nkeynes@939 | 748 | unmap_user = TRUE;
|
nkeynes@939 | 749 | } else if( IS_SV_ENABLED() ) {
|
nkeynes@939 | 750 | unmap_user = FALSE;
|
nkeynes@939 | 751 | } else {
|
nkeynes@939 | 752 | return; // Not mapped
|
nkeynes@939 | 753 | }
|
nkeynes@939 | 754 |
|
nkeynes@943 | 755 | gboolean clean_unmap = mmu_utlb_unmap_pages( TRUE, unmap_user, start_addr, npages );
|
nkeynes@939 | 756 |
|
nkeynes@939 | 757 | if( !clean_unmap ) {
|
nkeynes@943 | 758 | mmu_utlb_remap_pages( TRUE, unmap_user, entry );
|
nkeynes@939 | 759 | }
|
nkeynes@939 | 760 | }
|
nkeynes@939 | 761 |
|
nkeynes@939 | 762 | static void mmu_utlb_register_all()
|
nkeynes@939 | 763 | {
|
nkeynes@939 | 764 | int i;
|
nkeynes@939 | 765 | for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
|
nkeynes@939 | 766 | if( mmu_utlb[i].flags & TLB_VALID )
|
nkeynes@939 | 767 | mmu_utlb_insert_entry( i );
|
nkeynes@939 | 768 | }
|
nkeynes@939 | 769 | }
|
nkeynes@939 | 770 |
|
nkeynes@550 | 771 | static void mmu_invalidate_tlb()
|
nkeynes@550 | 772 | {
|
nkeynes@550 | 773 | int i;
|
nkeynes@550 | 774 | for( i=0; i<ITLB_ENTRY_COUNT; i++ ) {
|
nkeynes@736 | 775 | mmu_itlb[i].flags &= (~TLB_VALID);
|
nkeynes@550 | 776 | }
|
nkeynes@939 | 777 | if( IS_TLB_ENABLED() ) {
|
nkeynes@939 | 778 | for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
|
nkeynes@939 | 779 | if( mmu_utlb[i].flags & TLB_VALID ) {
|
nkeynes@939 | 780 | mmu_utlb_remove_entry( i );
|
nkeynes@939 | 781 | }
|
nkeynes@939 | 782 | }
|
nkeynes@939 | 783 | }
|
nkeynes@550 | 784 | for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
|
nkeynes@736 | 785 | mmu_utlb[i].flags &= (~TLB_VALID);
|
nkeynes@550 | 786 | }
|
nkeynes@550 | 787 | }
|
nkeynes@550 | 788 |
|
nkeynes@586 | 789 | /******************************************************************************/
|
nkeynes@586 | 790 | /* MMU TLB address translation */
|
nkeynes@586 | 791 | /******************************************************************************/
|
nkeynes@586 | 792 |
|
nkeynes@586 | 793 | /**
|
nkeynes@939 | 794 | * Translate a 32-bit address into a UTLB entry number. Does not check for
|
nkeynes@939 | 795 | * page protection etc.
|
nkeynes@939 | 796 | * @return the entryNo if found, -1 if not found, and -2 for a multi-hit.
|
nkeynes@586 | 797 | */
|
nkeynes@939 | 798 | int mmu_utlb_entry_for_vpn( uint32_t vpn )
|
nkeynes@939 | 799 | {
|
nkeynes@939 | 800 | mem_region_fn_t fn = sh4_address_space[vpn>>12];
|
nkeynes@939 | 801 | if( fn >= &mmu_utlb_pages[0].fn && fn < &mmu_utlb_pages[UTLB_ENTRY_COUNT].fn ) {
|
nkeynes@939 | 802 | return ((struct utlb_page_entry *)fn) - &mmu_utlb_pages[0];
|
nkeynes@939 | 803 | } else if( fn == &mem_region_tlb_multihit ) {
|
nkeynes@939 | 804 | return -2;
|
nkeynes@939 | 805 | } else {
|
nkeynes@939 | 806 | return -1;
|
nkeynes@939 | 807 | }
|
nkeynes@939 | 808 | }
|
nkeynes@939 | 809 |
|
nkeynes@586 | 810 |
|
nkeynes@586 | 811 | /**
|
nkeynes@586 | 812 | * Perform the actual utlb lookup w/ asid matching.
|
nkeynes@586 | 813 | * Possible utcomes are:
|
nkeynes@586 | 814 | * 0..63 Single match - good, return entry found
|
nkeynes@586 | 815 | * -1 No match - raise a tlb data miss exception
|
nkeynes@586 | 816 | * -2 Multiple matches - raise a multi-hit exception (reset)
|
nkeynes@586 | 817 | * @param vpn virtual address to resolve
|
nkeynes@586 | 818 | * @return the resultant UTLB entry, or an error.
|
nkeynes@586 | 819 | */
|
nkeynes@586 | 820 | static inline int mmu_utlb_lookup_vpn_asid( uint32_t vpn )
|
nkeynes@586 | 821 | {
|
nkeynes@586 | 822 | int result = -1;
|
nkeynes@586 | 823 | unsigned int i;
|
nkeynes@586 | 824 |
|
nkeynes@586 | 825 | mmu_urc++;
|
nkeynes@586 | 826 | if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
|
nkeynes@736 | 827 | mmu_urc = 0;
|
nkeynes@586 | 828 | }
|
nkeynes@586 | 829 |
|
nkeynes@586 | 830 | for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
|
nkeynes@736 | 831 | if( (mmu_utlb[i].flags & TLB_VALID) &&
|
nkeynes@826 | 832 | ((mmu_utlb[i].flags & TLB_SHARE) || mmu_asid == mmu_utlb[i].asid) &&
|
nkeynes@736 | 833 | ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
|
nkeynes@736 | 834 | if( result != -1 ) {
|
nkeynes@736 | 835 | return -2;
|
nkeynes@736 | 836 | }
|
nkeynes@736 | 837 | result = i;
|
nkeynes@736 | 838 | }
|
nkeynes@586 | 839 | }
|
nkeynes@586 | 840 | return result;
|
nkeynes@586 | 841 | }
|
nkeynes@586 | 842 |
|
nkeynes@586 | 843 | /**
|
nkeynes@586 | 844 | * Perform the actual utlb lookup matching on vpn only
|
nkeynes@586 | 845 | * Possible utcomes are:
|
nkeynes@586 | 846 | * 0..63 Single match - good, return entry found
|
nkeynes@586 | 847 | * -1 No match - raise a tlb data miss exception
|
nkeynes@586 | 848 | * -2 Multiple matches - raise a multi-hit exception (reset)
|
nkeynes@586 | 849 | * @param vpn virtual address to resolve
|
nkeynes@586 | 850 | * @return the resultant UTLB entry, or an error.
|
nkeynes@586 | 851 | */
|
nkeynes@586 | 852 | static inline int mmu_utlb_lookup_vpn( uint32_t vpn )
|
nkeynes@586 | 853 | {
|
nkeynes@586 | 854 | int result = -1;
|
nkeynes@586 | 855 | unsigned int i;
|
nkeynes@586 | 856 |
|
nkeynes@586 | 857 | mmu_urc++;
|
nkeynes@586 | 858 | if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) {
|
nkeynes@736 | 859 | mmu_urc = 0;
|
nkeynes@586 | 860 | }
|
nkeynes@586 | 861 |
|
nkeynes@586 | 862 | for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
|
nkeynes@736 | 863 | if( (mmu_utlb[i].flags & TLB_VALID) &&
|
nkeynes@736 | 864 | ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
|
nkeynes@736 | 865 | if( result != -1 ) {
|
nkeynes@736 | 866 | return -2;
|
nkeynes@736 | 867 | }
|
nkeynes@736 | 868 | result = i;
|
nkeynes@736 | 869 | }
|
nkeynes@586 | 870 | }
|
nkeynes@586 | 871 |
|
nkeynes@586 | 872 | return result;
|
nkeynes@586 | 873 | }
|
nkeynes@586 | 874 |
|
nkeynes@586 | 875 | /**
|
nkeynes@586 | 876 | * Update the ITLB by replacing the LRU entry with the specified UTLB entry.
|
nkeynes@586 | 877 | * @return the number (0-3) of the replaced entry.
|
nkeynes@586 | 878 | */
|
nkeynes@586 | 879 | static int inline mmu_itlb_update_from_utlb( int entryNo )
|
nkeynes@586 | 880 | {
|
nkeynes@586 | 881 | int replace;
|
nkeynes@586 | 882 | /* Determine entry to replace based on lrui */
|
nkeynes@586 | 883 | if( (mmu_lrui & 0x38) == 0x38 ) {
|
nkeynes@736 | 884 | replace = 0;
|
nkeynes@736 | 885 | mmu_lrui = mmu_lrui & 0x07;
|
nkeynes@586 | 886 | } else if( (mmu_lrui & 0x26) == 0x06 ) {
|
nkeynes@736 | 887 | replace = 1;
|
nkeynes@736 | 888 | mmu_lrui = (mmu_lrui & 0x19) | 0x20;
|
nkeynes@586 | 889 | } else if( (mmu_lrui & 0x15) == 0x01 ) {
|
nkeynes@736 | 890 | replace = 2;
|
nkeynes@736 | 891 | mmu_lrui = (mmu_lrui & 0x3E) | 0x14;
|
nkeynes@586 | 892 | } else { // Note - gets invalid entries too
|
nkeynes@736 | 893 | replace = 3;
|
nkeynes@736 | 894 | mmu_lrui = (mmu_lrui | 0x0B);
|
nkeynes@826 | 895 | }
|
nkeynes@586 | 896 |
|
nkeynes@586 | 897 | mmu_itlb[replace].vpn = mmu_utlb[entryNo].vpn;
|
nkeynes@586 | 898 | mmu_itlb[replace].mask = mmu_utlb[entryNo].mask;
|
nkeynes@586 | 899 | mmu_itlb[replace].ppn = mmu_utlb[entryNo].ppn;
|
nkeynes@586 | 900 | mmu_itlb[replace].asid = mmu_utlb[entryNo].asid;
|
nkeynes@586 | 901 | mmu_itlb[replace].flags = mmu_utlb[entryNo].flags & 0x01DA;
|
nkeynes@586 | 902 | return replace;
|
nkeynes@586 | 903 | }
|
nkeynes@586 | 904 |
|
nkeynes@586 | 905 | /**
|
nkeynes@586 | 906 | * Perform the actual itlb lookup w/ asid protection
|
nkeynes@586 | 907 | * Possible utcomes are:
|
nkeynes@586 | 908 | * 0..63 Single match - good, return entry found
|
nkeynes@586 | 909 | * -1 No match - raise a tlb data miss exception
|
nkeynes@586 | 910 | * -2 Multiple matches - raise a multi-hit exception (reset)
|
nkeynes@586 | 911 | * @param vpn virtual address to resolve
|
nkeynes@586 | 912 | * @return the resultant ITLB entry, or an error.
|
nkeynes@586 | 913 | */
|
nkeynes@586 | 914 | static inline int mmu_itlb_lookup_vpn_asid( uint32_t vpn )
|
nkeynes@586 | 915 | {
|
nkeynes@586 | 916 | int result = -1;
|
nkeynes@586 | 917 | unsigned int i;
|
nkeynes@586 | 918 |
|
nkeynes@586 | 919 | for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
|
nkeynes@736 | 920 | if( (mmu_itlb[i].flags & TLB_VALID) &&
|
nkeynes@826 | 921 | ((mmu_itlb[i].flags & TLB_SHARE) || mmu_asid == mmu_itlb[i].asid) &&
|
nkeynes@736 | 922 | ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
|
nkeynes@736 | 923 | if( result != -1 ) {
|
nkeynes@736 | 924 | return -2;
|
nkeynes@736 | 925 | }
|
nkeynes@736 | 926 | result = i;
|
nkeynes@736 | 927 | }
|
nkeynes@586 | 928 | }
|
nkeynes@586 | 929 |
|
nkeynes@586 | 930 | if( result == -1 ) {
|
nkeynes@939 | 931 | int utlbEntry = mmu_utlb_entry_for_vpn( vpn );
|
nkeynes@736 | 932 | if( utlbEntry < 0 ) {
|
nkeynes@736 | 933 | return utlbEntry;
|
nkeynes@736 | 934 | } else {
|
nkeynes@736 | 935 | return mmu_itlb_update_from_utlb( utlbEntry );
|
nkeynes@736 | 936 | }
|
nkeynes@586 | 937 | }
|
nkeynes@586 | 938 |
|
nkeynes@586 | 939 | switch( result ) {
|
nkeynes@586 | 940 | case 0: mmu_lrui = (mmu_lrui & 0x07); break;
|
nkeynes@586 | 941 | case 1: mmu_lrui = (mmu_lrui & 0x19) | 0x20; break;
|
nkeynes@586 | 942 | case 2: mmu_lrui = (mmu_lrui & 0x3E) | 0x14; break;
|
nkeynes@586 | 943 | case 3: mmu_lrui = (mmu_lrui | 0x0B); break;
|
nkeynes@586 | 944 | }
|
nkeynes@736 | 945 |
|
nkeynes@586 | 946 | return result;
|
nkeynes@586 | 947 | }
|
nkeynes@586 | 948 |
|
nkeynes@586 | 949 | /**
|
nkeynes@586 | 950 | * Perform the actual itlb lookup on vpn only
|
nkeynes@586 | 951 | * Possible utcomes are:
|
nkeynes@586 | 952 | * 0..63 Single match - good, return entry found
|
nkeynes@586 | 953 | * -1 No match - raise a tlb data miss exception
|
nkeynes@586 | 954 | * -2 Multiple matches - raise a multi-hit exception (reset)
|
nkeynes@586 | 955 | * @param vpn virtual address to resolve
|
nkeynes@586 | 956 | * @return the resultant ITLB entry, or an error.
|
nkeynes@586 | 957 | */
|
nkeynes@586 | 958 | static inline int mmu_itlb_lookup_vpn( uint32_t vpn )
|
nkeynes@586 | 959 | {
|
nkeynes@586 | 960 | int result = -1;
|
nkeynes@586 | 961 | unsigned int i;
|
nkeynes@586 | 962 |
|
nkeynes@586 | 963 | for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
|
nkeynes@736 | 964 | if( (mmu_itlb[i].flags & TLB_VALID) &&
|
nkeynes@736 | 965 | ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
|
nkeynes@736 | 966 | if( result != -1 ) {
|
nkeynes@736 | 967 | return -2;
|
nkeynes@736 | 968 | }
|
nkeynes@736 | 969 | result = i;
|
nkeynes@736 | 970 | }
|
nkeynes@586 | 971 | }
|
nkeynes@586 | 972 |
|
nkeynes@586 | 973 | if( result == -1 ) {
|
nkeynes@736 | 974 | int utlbEntry = mmu_utlb_lookup_vpn( vpn );
|
nkeynes@736 | 975 | if( utlbEntry < 0 ) {
|
nkeynes@736 | 976 | return utlbEntry;
|
nkeynes@736 | 977 | } else {
|
nkeynes@736 | 978 | return mmu_itlb_update_from_utlb( utlbEntry );
|
nkeynes@736 | 979 | }
|
nkeynes@586 | 980 | }
|
nkeynes@586 | 981 |
|
nkeynes@586 | 982 | switch( result ) {
|
nkeynes@586 | 983 | case 0: mmu_lrui = (mmu_lrui & 0x07); break;
|
nkeynes@586 | 984 | case 1: mmu_lrui = (mmu_lrui & 0x19) | 0x20; break;
|
nkeynes@586 | 985 | case 2: mmu_lrui = (mmu_lrui & 0x3E) | 0x14; break;
|
nkeynes@586 | 986 | case 3: mmu_lrui = (mmu_lrui | 0x0B); break;
|
nkeynes@586 | 987 | }
|
nkeynes@736 | 988 |
|
nkeynes@586 | 989 | return result;
|
nkeynes@586 | 990 | }
|
nkeynes@927 | 991 |
|
nkeynes@586 | 992 | /**
|
nkeynes@586 | 993 | * Update the icache for an untranslated address
|
nkeynes@586 | 994 | */
|
nkeynes@905 | 995 | static inline void mmu_update_icache_phys( sh4addr_t addr )
|
nkeynes@586 | 996 | {
|
nkeynes@586 | 997 | if( (addr & 0x1C000000) == 0x0C000000 ) {
|
nkeynes@736 | 998 | /* Main ram */
|
nkeynes@736 | 999 | sh4_icache.page_vma = addr & 0xFF000000;
|
nkeynes@736 | 1000 | sh4_icache.page_ppa = 0x0C000000;
|
nkeynes@736 | 1001 | sh4_icache.mask = 0xFF000000;
|
nkeynes@934 | 1002 | sh4_icache.page = dc_main_ram;
|
nkeynes@586 | 1003 | } else if( (addr & 0x1FE00000) == 0 ) {
|
nkeynes@736 | 1004 | /* BIOS ROM */
|
nkeynes@736 | 1005 | sh4_icache.page_vma = addr & 0xFFE00000;
|
nkeynes@736 | 1006 | sh4_icache.page_ppa = 0;
|
nkeynes@736 | 1007 | sh4_icache.mask = 0xFFE00000;
|
nkeynes@934 | 1008 | sh4_icache.page = dc_boot_rom;
|
nkeynes@586 | 1009 | } else {
|
nkeynes@736 | 1010 | /* not supported */
|
nkeynes@736 | 1011 | sh4_icache.page_vma = -1;
|
nkeynes@586 | 1012 | }
|
nkeynes@586 | 1013 | }
|
nkeynes@586 | 1014 |
|
nkeynes@586 | 1015 | /**
|
nkeynes@586 | 1016 | * Update the sh4_icache structure to describe the page(s) containing the
|
nkeynes@586 | 1017 | * given vma. If the address does not reference a RAM/ROM region, the icache
|
nkeynes@586 | 1018 | * will be invalidated instead.
|
nkeynes@586 | 1019 | * If AT is on, this method will raise TLB exceptions normally
|
nkeynes@586 | 1020 | * (hence this method should only be used immediately prior to execution of
|
nkeynes@586 | 1021 | * code), and otherwise will set the icache according to the matching TLB entry.
|
nkeynes@586 | 1022 | * If AT is off, this method will set the entire referenced RAM/ROM region in
|
nkeynes@586 | 1023 | * the icache.
|
nkeynes@586 | 1024 | * @return TRUE if the update completed (successfully or otherwise), FALSE
|
nkeynes@586 | 1025 | * if an exception was raised.
|
nkeynes@586 | 1026 | */
|
nkeynes@905 | 1027 | gboolean FASTCALL mmu_update_icache( sh4vma_t addr )
|
nkeynes@586 | 1028 | {
|
nkeynes@586 | 1029 | int entryNo;
|
nkeynes@586 | 1030 | if( IS_SH4_PRIVMODE() ) {
|
nkeynes@736 | 1031 | if( addr & 0x80000000 ) {
|
nkeynes@736 | 1032 | if( addr < 0xC0000000 ) {
|
nkeynes@736 | 1033 | /* P1, P2 and P4 regions are pass-through (no translation) */
|
nkeynes@736 | 1034 | mmu_update_icache_phys(addr);
|
nkeynes@736 | 1035 | return TRUE;
|
nkeynes@736 | 1036 | } else if( addr >= 0xE0000000 && addr < 0xFFFFFF00 ) {
|
nkeynes@939 | 1037 | RAISE_MEM_ERROR(EXC_DATA_ADDR_READ, addr);
|
nkeynes@736 | 1038 | return FALSE;
|
nkeynes@736 | 1039 | }
|
nkeynes@736 | 1040 | }
|
nkeynes@586 | 1041 |
|
nkeynes@736 | 1042 | uint32_t mmucr = MMIO_READ(MMU,MMUCR);
|
nkeynes@736 | 1043 | if( (mmucr & MMUCR_AT) == 0 ) {
|
nkeynes@736 | 1044 | mmu_update_icache_phys(addr);
|
nkeynes@736 | 1045 | return TRUE;
|
nkeynes@736 | 1046 | }
|
nkeynes@736 | 1047 |
|
nkeynes@826 | 1048 | if( (mmucr & MMUCR_SV) == 0 )
|
nkeynes@807 | 1049 | entryNo = mmu_itlb_lookup_vpn_asid( addr );
|
nkeynes@807 | 1050 | else
|
nkeynes@807 | 1051 | entryNo = mmu_itlb_lookup_vpn( addr );
|
nkeynes@586 | 1052 | } else {
|
nkeynes@736 | 1053 | if( addr & 0x80000000 ) {
|
nkeynes@939 | 1054 | RAISE_MEM_ERROR(EXC_DATA_ADDR_READ, addr);
|
nkeynes@736 | 1055 | return FALSE;
|
nkeynes@736 | 1056 | }
|
nkeynes@586 | 1057 |
|
nkeynes@736 | 1058 | uint32_t mmucr = MMIO_READ(MMU,MMUCR);
|
nkeynes@736 | 1059 | if( (mmucr & MMUCR_AT) == 0 ) {
|
nkeynes@736 | 1060 | mmu_update_icache_phys(addr);
|
nkeynes@736 | 1061 | return TRUE;
|
nkeynes@736 | 1062 | }
|
nkeynes@736 | 1063 |
|
nkeynes@807 | 1064 | entryNo = mmu_itlb_lookup_vpn_asid( addr );
|
nkeynes@807 | 1065 |
|
nkeynes@736 | 1066 | if( entryNo != -1 && (mmu_itlb[entryNo].flags & TLB_USERMODE) == 0 ) {
|
nkeynes@939 | 1067 | RAISE_MEM_ERROR(EXC_TLB_PROT_READ, addr);
|
nkeynes@736 | 1068 | return FALSE;
|
nkeynes@736 | 1069 | }
|
nkeynes@586 | 1070 | }
|
nkeynes@586 | 1071 |
|
nkeynes@586 | 1072 | switch(entryNo) {
|
nkeynes@586 | 1073 | case -1:
|
nkeynes@939 | 1074 | RAISE_TLB_ERROR(EXC_TLB_MISS_READ, addr);
|
nkeynes@736 | 1075 | return FALSE;
|
nkeynes@586 | 1076 | case -2:
|
nkeynes@939 | 1077 | RAISE_TLB_MULTIHIT_ERROR(addr);
|
nkeynes@736 | 1078 | return FALSE;
|
nkeynes@586 | 1079 | default:
|
nkeynes@736 | 1080 | sh4_icache.page_ppa = mmu_itlb[entryNo].ppn & mmu_itlb[entryNo].mask;
|
nkeynes@736 | 1081 | sh4_icache.page = mem_get_region( sh4_icache.page_ppa );
|
nkeynes@736 | 1082 | if( sh4_icache.page == NULL ) {
|
nkeynes@736 | 1083 | sh4_icache.page_vma = -1;
|
nkeynes@736 | 1084 | } else {
|
nkeynes@736 | 1085 | sh4_icache.page_vma = mmu_itlb[entryNo].vpn & mmu_itlb[entryNo].mask;
|
nkeynes@736 | 1086 | sh4_icache.mask = mmu_itlb[entryNo].mask;
|
nkeynes@736 | 1087 | }
|
nkeynes@736 | 1088 | return TRUE;
|
nkeynes@586 | 1089 | }
|
nkeynes@586 | 1090 | }
|
nkeynes@586 | 1091 |
|
nkeynes@597 | 1092 | /**
|
nkeynes@826 | 1093 | * Translate address for disassembly purposes (ie performs an instruction
|
nkeynes@597 | 1094 | * lookup) - does not raise exceptions or modify any state, and ignores
|
nkeynes@597 | 1095 | * protection bits. Returns the translated address, or MMU_VMA_ERROR
|
nkeynes@826 | 1096 | * on translation failure.
|
nkeynes@597 | 1097 | */
|
nkeynes@905 | 1098 | sh4addr_t FASTCALL mmu_vma_to_phys_disasm( sh4vma_t vma )
|
nkeynes@597 | 1099 | {
|
nkeynes@597 | 1100 | if( vma & 0x80000000 ) {
|
nkeynes@736 | 1101 | if( vma < 0xC0000000 ) {
|
nkeynes@736 | 1102 | /* P1, P2 and P4 regions are pass-through (no translation) */
|
nkeynes@736 | 1103 | return VMA_TO_EXT_ADDR(vma);
|
nkeynes@736 | 1104 | } else if( vma >= 0xE0000000 && vma < 0xFFFFFF00 ) {
|
nkeynes@736 | 1105 | /* Not translatable */
|
nkeynes@736 | 1106 | return MMU_VMA_ERROR;
|
nkeynes@736 | 1107 | }
|
nkeynes@597 | 1108 | }
|
nkeynes@597 | 1109 |
|
nkeynes@597 | 1110 | uint32_t mmucr = MMIO_READ(MMU,MMUCR);
|
nkeynes@597 | 1111 | if( (mmucr & MMUCR_AT) == 0 ) {
|
nkeynes@736 | 1112 | return VMA_TO_EXT_ADDR(vma);
|
nkeynes@597 | 1113 | }
|
nkeynes@736 | 1114 |
|
nkeynes@597 | 1115 | int entryNo = mmu_itlb_lookup_vpn( vma );
|
nkeynes@597 | 1116 | if( entryNo == -2 ) {
|
nkeynes@736 | 1117 | entryNo = mmu_itlb_lookup_vpn_asid( vma );
|
nkeynes@597 | 1118 | }
|
nkeynes@597 | 1119 | if( entryNo < 0 ) {
|
nkeynes@736 | 1120 | return MMU_VMA_ERROR;
|
nkeynes@597 | 1121 | } else {
|
nkeynes@826 | 1122 | return (mmu_itlb[entryNo].ppn & mmu_itlb[entryNo].mask) |
|
nkeynes@826 | 1123 | (vma & (~mmu_itlb[entryNo].mask));
|
nkeynes@597 | 1124 | }
|
nkeynes@597 | 1125 | }
|
nkeynes@597 | 1126 |
|
nkeynes@911 | 1127 | void FASTCALL sh4_flush_store_queue( sh4addr_t addr )
|
nkeynes@911 | 1128 | {
|
nkeynes@911 | 1129 | int queue = (addr&0x20)>>2;
|
nkeynes@911 | 1130 | uint32_t hi = MMIO_READ( MMU, QACR0 + (queue>>1)) << 24;
|
nkeynes@911 | 1131 | sh4ptr_t src = (sh4ptr_t)&sh4r.store_queue[queue];
|
nkeynes@911 | 1132 | sh4addr_t target = (addr&0x03FFFFE0) | hi;
|
nkeynes@931 | 1133 | ext_address_space[target>>12]->write_burst( target, src );
|
nkeynes@911 | 1134 | }
|
nkeynes@911 | 1135 |
|
nkeynes@939 | 1136 | void FASTCALL sh4_flush_store_queue_mmu( sh4addr_t addr, void *exc )
|
nkeynes@586 | 1137 | {
|
nkeynes@586 | 1138 | int queue = (addr&0x20)>>2;
|
nkeynes@586 | 1139 | sh4ptr_t src = (sh4ptr_t)&sh4r.store_queue[queue];
|
nkeynes@586 | 1140 | sh4addr_t target;
|
nkeynes@586 | 1141 | /* Store queue operation */
|
nkeynes@939 | 1142 | storequeue_address_space[(addr&0x03FFFFFE0)>>12]->write_burst( addr, src);
|
nkeynes@586 | 1143 | }
|
nkeynes@586 | 1144 |
|
nkeynes@939 | 1145 | /********************** TLB Direct-Access Regions ***************************/
|
nkeynes@939 | 1146 | #ifdef HAVE_FRAME_ADDRESS
|
nkeynes@939 | 1147 | #define EXCEPTION_EXIT() do{ *(((void **)__builtin_frame_address(0))+1) = exc; return; } while(0)
|
nkeynes@939 | 1148 | #else
|
nkeynes@939 | 1149 | #define EXCEPTION_EXIT() sh4_core_exit(CORE_EXIT_EXCEPTION)
|
nkeynes@939 | 1150 | #endif
|
nkeynes@939 | 1151 |
|
nkeynes@939 | 1152 |
|
nkeynes@939 | 1153 | #define ITLB_ENTRY(addr) ((addr>>7)&0x03)
|
nkeynes@939 | 1154 |
|
nkeynes@939 | 1155 | int32_t FASTCALL mmu_itlb_addr_read( sh4addr_t addr )
|
nkeynes@939 | 1156 | {
|
nkeynes@939 | 1157 | struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
|
nkeynes@939 | 1158 | return ent->vpn | ent->asid | (ent->flags & TLB_VALID);
|
nkeynes@939 | 1159 | }
|
nkeynes@939 | 1160 |
|
nkeynes@939 | 1161 | void FASTCALL mmu_itlb_addr_write( sh4addr_t addr, uint32_t val )
|
nkeynes@939 | 1162 | {
|
nkeynes@939 | 1163 | struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
|
nkeynes@939 | 1164 | ent->vpn = val & 0xFFFFFC00;
|
nkeynes@939 | 1165 | ent->asid = val & 0x000000FF;
|
nkeynes@939 | 1166 | ent->flags = (ent->flags & ~(TLB_VALID)) | (val&TLB_VALID);
|
nkeynes@939 | 1167 | }
|
nkeynes@939 | 1168 |
|
nkeynes@939 | 1169 | int32_t FASTCALL mmu_itlb_data_read( sh4addr_t addr )
|
nkeynes@939 | 1170 | {
|
nkeynes@939 | 1171 | struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
|
nkeynes@939 | 1172 | return (ent->ppn & 0x1FFFFC00) | ent->flags;
|
nkeynes@939 | 1173 | }
|
nkeynes@939 | 1174 |
|
nkeynes@939 | 1175 | void FASTCALL mmu_itlb_data_write( sh4addr_t addr, uint32_t val )
|
nkeynes@939 | 1176 | {
|
nkeynes@939 | 1177 | struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)];
|
nkeynes@939 | 1178 | ent->ppn = val & 0x1FFFFC00;
|
nkeynes@939 | 1179 | ent->flags = val & 0x00001DA;
|
nkeynes@939 | 1180 | ent->mask = get_tlb_size_mask(val);
|
nkeynes@939 | 1181 | if( ent->ppn >= 0x1C000000 )
|
nkeynes@939 | 1182 | ent->ppn |= 0xE0000000;
|
nkeynes@939 | 1183 | }
|
nkeynes@939 | 1184 |
|
nkeynes@939 | 1185 | #define UTLB_ENTRY(addr) ((addr>>8)&0x3F)
|
nkeynes@939 | 1186 | #define UTLB_ASSOC(addr) (addr&0x80)
|
nkeynes@939 | 1187 | #define UTLB_DATA2(addr) (addr&0x00800000)
|
nkeynes@939 | 1188 |
|
nkeynes@939 | 1189 | int32_t FASTCALL mmu_utlb_addr_read( sh4addr_t addr )
|
nkeynes@939 | 1190 | {
|
nkeynes@939 | 1191 | struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
|
nkeynes@939 | 1192 | return ent->vpn | ent->asid | (ent->flags & TLB_VALID) |
|
nkeynes@939 | 1193 | ((ent->flags & TLB_DIRTY)<<7);
|
nkeynes@939 | 1194 | }
|
nkeynes@939 | 1195 | int32_t FASTCALL mmu_utlb_data_read( sh4addr_t addr )
|
nkeynes@939 | 1196 | {
|
nkeynes@939 | 1197 | struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
|
nkeynes@939 | 1198 | if( UTLB_DATA2(addr) ) {
|
nkeynes@939 | 1199 | return ent->pcmcia;
|
nkeynes@939 | 1200 | } else {
|
nkeynes@939 | 1201 | return (ent->ppn&0x1FFFFC00) | ent->flags;
|
nkeynes@939 | 1202 | }
|
nkeynes@939 | 1203 | }
|
nkeynes@939 | 1204 |
|
nkeynes@939 | 1205 | /**
|
nkeynes@939 | 1206 | * Find a UTLB entry for the associative TLB write - same as the normal
|
nkeynes@939 | 1207 | * lookup but ignores the valid bit.
|
nkeynes@939 | 1208 | */
|
nkeynes@939 | 1209 | static inline int mmu_utlb_lookup_assoc( uint32_t vpn, uint32_t asid )
|
nkeynes@939 | 1210 | {
|
nkeynes@939 | 1211 | int result = -1;
|
nkeynes@939 | 1212 | unsigned int i;
|
nkeynes@939 | 1213 | for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) {
|
nkeynes@939 | 1214 | if( (mmu_utlb[i].flags & TLB_VALID) &&
|
nkeynes@939 | 1215 | ((mmu_utlb[i].flags & TLB_SHARE) || asid == mmu_utlb[i].asid) &&
|
nkeynes@939 | 1216 | ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) {
|
nkeynes@939 | 1217 | if( result != -1 ) {
|
nkeynes@939 | 1218 | fprintf( stderr, "TLB Multi hit: %d %d\n", result, i );
|
nkeynes@939 | 1219 | return -2;
|
nkeynes@939 | 1220 | }
|
nkeynes@939 | 1221 | result = i;
|
nkeynes@939 | 1222 | }
|
nkeynes@939 | 1223 | }
|
nkeynes@939 | 1224 | return result;
|
nkeynes@939 | 1225 | }
|
nkeynes@939 | 1226 |
|
nkeynes@939 | 1227 | /**
|
nkeynes@939 | 1228 | * Find a ITLB entry for the associative TLB write - same as the normal
|
nkeynes@939 | 1229 | * lookup but ignores the valid bit.
|
nkeynes@939 | 1230 | */
|
nkeynes@939 | 1231 | static inline int mmu_itlb_lookup_assoc( uint32_t vpn, uint32_t asid )
|
nkeynes@939 | 1232 | {
|
nkeynes@939 | 1233 | int result = -1;
|
nkeynes@939 | 1234 | unsigned int i;
|
nkeynes@939 | 1235 | for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) {
|
nkeynes@939 | 1236 | if( (mmu_itlb[i].flags & TLB_VALID) &&
|
nkeynes@939 | 1237 | ((mmu_itlb[i].flags & TLB_SHARE) || asid == mmu_itlb[i].asid) &&
|
nkeynes@939 | 1238 | ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) {
|
nkeynes@939 | 1239 | if( result != -1 ) {
|
nkeynes@939 | 1240 | return -2;
|
nkeynes@939 | 1241 | }
|
nkeynes@939 | 1242 | result = i;
|
nkeynes@939 | 1243 | }
|
nkeynes@939 | 1244 | }
|
nkeynes@939 | 1245 | return result;
|
nkeynes@939 | 1246 | }
|
nkeynes@939 | 1247 |
|
nkeynes@939 | 1248 | void FASTCALL mmu_utlb_addr_write( sh4addr_t addr, uint32_t val, void *exc )
|
nkeynes@939 | 1249 | {
|
nkeynes@939 | 1250 | if( UTLB_ASSOC(addr) ) {
|
nkeynes@939 | 1251 | int utlb = mmu_utlb_lookup_assoc( val, mmu_asid );
|
nkeynes@939 | 1252 | if( utlb >= 0 ) {
|
nkeynes@939 | 1253 | struct utlb_entry *ent = &mmu_utlb[utlb];
|
nkeynes@939 | 1254 | uint32_t old_flags = ent->flags;
|
nkeynes@939 | 1255 | ent->flags = ent->flags & ~(TLB_DIRTY|TLB_VALID);
|
nkeynes@939 | 1256 | ent->flags |= (val & TLB_VALID);
|
nkeynes@939 | 1257 | ent->flags |= ((val & 0x200)>>7);
|
nkeynes@939 | 1258 | if( ((old_flags^ent->flags) & (TLB_VALID|TLB_DIRTY)) != 0 ) {
|
nkeynes@939 | 1259 | if( old_flags & TLB_VALID )
|
nkeynes@939 | 1260 | mmu_utlb_remove_entry( utlb );
|
nkeynes@939 | 1261 | if( ent->flags & TLB_VALID )
|
nkeynes@939 | 1262 | mmu_utlb_insert_entry( utlb );
|
nkeynes@939 | 1263 | }
|
nkeynes@939 | 1264 | }
|
nkeynes@939 | 1265 |
|
nkeynes@939 | 1266 | int itlb = mmu_itlb_lookup_assoc( val, mmu_asid );
|
nkeynes@939 | 1267 | if( itlb >= 0 ) {
|
nkeynes@939 | 1268 | struct itlb_entry *ent = &mmu_itlb[itlb];
|
nkeynes@939 | 1269 | ent->flags = (ent->flags & (~TLB_VALID)) | (val & TLB_VALID);
|
nkeynes@939 | 1270 | }
|
nkeynes@939 | 1271 |
|
nkeynes@939 | 1272 | if( itlb == -2 || utlb == -2 ) {
|
nkeynes@939 | 1273 | RAISE_TLB_MULTIHIT_ERROR(addr);
|
nkeynes@939 | 1274 | EXCEPTION_EXIT();
|
nkeynes@939 | 1275 | return;
|
nkeynes@939 | 1276 | }
|
nkeynes@939 | 1277 | } else {
|
nkeynes@939 | 1278 | struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
|
nkeynes@939 | 1279 | if( ent->flags & TLB_VALID )
|
nkeynes@939 | 1280 | mmu_utlb_remove_entry( UTLB_ENTRY(addr) );
|
nkeynes@939 | 1281 | ent->vpn = (val & 0xFFFFFC00);
|
nkeynes@939 | 1282 | ent->asid = (val & 0xFF);
|
nkeynes@939 | 1283 | ent->flags = (ent->flags & ~(TLB_DIRTY|TLB_VALID));
|
nkeynes@939 | 1284 | ent->flags |= (val & TLB_VALID);
|
nkeynes@939 | 1285 | ent->flags |= ((val & 0x200)>>7);
|
nkeynes@939 | 1286 | if( ent->flags & TLB_VALID )
|
nkeynes@939 | 1287 | mmu_utlb_insert_entry( UTLB_ENTRY(addr) );
|
nkeynes@939 | 1288 | }
|
nkeynes@939 | 1289 | }
|
nkeynes@939 | 1290 |
|
nkeynes@939 | 1291 | void FASTCALL mmu_utlb_data_write( sh4addr_t addr, uint32_t val )
|
nkeynes@939 | 1292 | {
|
nkeynes@939 | 1293 | struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)];
|
nkeynes@939 | 1294 | if( UTLB_DATA2(addr) ) {
|
nkeynes@939 | 1295 | ent->pcmcia = val & 0x0000000F;
|
nkeynes@939 | 1296 | } else {
|
nkeynes@939 | 1297 | if( ent->flags & TLB_VALID )
|
nkeynes@939 | 1298 | mmu_utlb_remove_entry( UTLB_ENTRY(addr) );
|
nkeynes@939 | 1299 | ent->ppn = (val & 0x1FFFFC00);
|
nkeynes@939 | 1300 | ent->flags = (val & 0x000001FF);
|
nkeynes@939 | 1301 | ent->mask = get_tlb_size_mask(val);
|
nkeynes@939 | 1302 | if( ent->flags & TLB_VALID )
|
nkeynes@939 | 1303 | mmu_utlb_insert_entry( UTLB_ENTRY(addr) );
|
nkeynes@939 | 1304 | }
|
nkeynes@939 | 1305 | }
|
nkeynes@939 | 1306 |
|
nkeynes@939 | 1307 | struct mem_region_fn p4_region_itlb_addr = {
|
nkeynes@939 | 1308 | mmu_itlb_addr_read, mmu_itlb_addr_write,
|
nkeynes@939 | 1309 | mmu_itlb_addr_read, mmu_itlb_addr_write,
|
nkeynes@939 | 1310 | mmu_itlb_addr_read, mmu_itlb_addr_write,
|
nkeynes@939 | 1311 | unmapped_read_burst, unmapped_write_burst };
|
nkeynes@939 | 1312 | struct mem_region_fn p4_region_itlb_data = {
|
nkeynes@939 | 1313 | mmu_itlb_data_read, mmu_itlb_data_write,
|
nkeynes@939 | 1314 | mmu_itlb_data_read, mmu_itlb_data_write,
|
nkeynes@939 | 1315 | mmu_itlb_data_read, mmu_itlb_data_write,
|
nkeynes@939 | 1316 | unmapped_read_burst, unmapped_write_burst };
|
nkeynes@939 | 1317 | struct mem_region_fn p4_region_utlb_addr = {
|
nkeynes@939 | 1318 | mmu_utlb_addr_read, (mem_write_fn_t)mmu_utlb_addr_write,
|
nkeynes@939 | 1319 | mmu_utlb_addr_read, (mem_write_fn_t)mmu_utlb_addr_write,
|
nkeynes@939 | 1320 | mmu_utlb_addr_read, (mem_write_fn_t)mmu_utlb_addr_write,
|
nkeynes@939 | 1321 | unmapped_read_burst, unmapped_write_burst };
|
nkeynes@939 | 1322 | struct mem_region_fn p4_region_utlb_data = {
|
nkeynes@939 | 1323 | mmu_utlb_data_read, mmu_utlb_data_write,
|
nkeynes@939 | 1324 | mmu_utlb_data_read, mmu_utlb_data_write,
|
nkeynes@939 | 1325 | mmu_utlb_data_read, mmu_utlb_data_write,
|
nkeynes@939 | 1326 | unmapped_read_burst, unmapped_write_burst };
|
nkeynes@939 | 1327 |
|
nkeynes@939 | 1328 | /********************** Error regions **************************/
|
nkeynes@939 | 1329 |
|
nkeynes@939 | 1330 | static void FASTCALL address_error_read( sh4addr_t addr, void *exc )
|
nkeynes@939 | 1331 | {
|
nkeynes@939 | 1332 | RAISE_MEM_ERROR(EXC_DATA_ADDR_READ, addr);
|
nkeynes@939 | 1333 | EXCEPTION_EXIT();
|
nkeynes@939 | 1334 | }
|
nkeynes@939 | 1335 |
|
nkeynes@939 | 1336 | static void FASTCALL address_error_read_burst( unsigned char *dest, sh4addr_t addr, void *exc )
|
nkeynes@939 | 1337 | {
|
nkeynes@939 | 1338 | RAISE_MEM_ERROR(EXC_DATA_ADDR_READ, addr);
|
nkeynes@939 | 1339 | EXCEPTION_EXIT();
|
nkeynes@939 | 1340 | }
|
nkeynes@939 | 1341 |
|
nkeynes@939 | 1342 | static void FASTCALL address_error_write( sh4addr_t addr, uint32_t val, void *exc )
|
nkeynes@939 | 1343 | {
|
nkeynes@939 | 1344 | RAISE_MEM_ERROR(EXC_DATA_ADDR_WRITE, addr);
|
nkeynes@939 | 1345 | EXCEPTION_EXIT();
|
nkeynes@939 | 1346 | }
|
nkeynes@939 | 1347 |
|
nkeynes@939 | 1348 | static void FASTCALL tlb_miss_read( sh4addr_t addr, void *exc )
|
nkeynes@939 | 1349 | {
|
nkeynes@939 | 1350 | RAISE_TLB_ERROR(EXC_TLB_MISS_READ, addr);
|
nkeynes@939 | 1351 | EXCEPTION_EXIT();
|
nkeynes@939 | 1352 | }
|
nkeynes@939 | 1353 |
|
nkeynes@939 | 1354 | static void FASTCALL tlb_miss_read_burst( unsigned char *dest, sh4addr_t addr, void *exc )
|
nkeynes@939 | 1355 | {
|
nkeynes@939 | 1356 | RAISE_TLB_ERROR(EXC_TLB_MISS_READ, addr);
|
nkeynes@939 | 1357 | EXCEPTION_EXIT();
|
nkeynes@939 | 1358 | }
|
nkeynes@939 | 1359 |
|
nkeynes@939 | 1360 | static void FASTCALL tlb_miss_write( sh4addr_t addr, uint32_t val, void *exc )
|
nkeynes@939 | 1361 | {
|
nkeynes@939 | 1362 | RAISE_TLB_ERROR(EXC_TLB_MISS_WRITE, addr);
|
nkeynes@939 | 1363 | EXCEPTION_EXIT();
|
nkeynes@939 | 1364 | }
|
nkeynes@939 | 1365 |
|
nkeynes@939 | 1366 | static int32_t FASTCALL tlb_protected_read( sh4addr_t addr, void *exc )
|
nkeynes@939 | 1367 | {
|
nkeynes@939 | 1368 | RAISE_MEM_ERROR(EXC_TLB_PROT_READ, addr);
|
nkeynes@939 | 1369 | EXCEPTION_EXIT();
|
nkeynes@939 | 1370 | }
|
nkeynes@939 | 1371 |
|
nkeynes@939 | 1372 | static int32_t FASTCALL tlb_protected_read_burst( unsigned char *dest, sh4addr_t addr, void *exc )
|
nkeynes@939 | 1373 | {
|
nkeynes@939 | 1374 | RAISE_MEM_ERROR(EXC_TLB_PROT_READ, addr);
|
nkeynes@939 | 1375 | EXCEPTION_EXIT();
|
nkeynes@939 | 1376 | }
|
nkeynes@939 | 1377 |
|
nkeynes@939 | 1378 | static void FASTCALL tlb_protected_write( sh4addr_t addr, uint32_t val, void *exc )
|
nkeynes@939 | 1379 | {
|
nkeynes@939 | 1380 | RAISE_MEM_ERROR(EXC_TLB_PROT_WRITE, addr);
|
nkeynes@939 | 1381 | EXCEPTION_EXIT();
|
nkeynes@939 | 1382 | }
|
nkeynes@939 | 1383 |
|
nkeynes@939 | 1384 | static void FASTCALL tlb_initial_write( sh4addr_t addr, uint32_t val, void *exc )
|
nkeynes@939 | 1385 | {
|
nkeynes@939 | 1386 | RAISE_MEM_ERROR(EXC_INIT_PAGE_WRITE, addr);
|
nkeynes@939 | 1387 | EXCEPTION_EXIT();
|
nkeynes@939 | 1388 | }
|
nkeynes@939 | 1389 |
|
nkeynes@939 | 1390 | static int32_t FASTCALL tlb_multi_hit_read( sh4addr_t addr, void *exc )
|
nkeynes@939 | 1391 | {
|
nkeynes@939 | 1392 | MMIO_WRITE(MMU, TEA, addr);
|
nkeynes@939 | 1393 | MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (addr&0xFFFFFC00)));
|
nkeynes@939 | 1394 | sh4_raise_reset(EXC_TLB_MULTI_HIT);
|
nkeynes@939 | 1395 | EXCEPTION_EXIT();
|
nkeynes@939 | 1396 | }
|
nkeynes@939 | 1397 |
|
nkeynes@939 | 1398 | static int32_t FASTCALL tlb_multi_hit_read_burst( unsigned char *dest, sh4addr_t addr, void *exc )
|
nkeynes@939 | 1399 | {
|
nkeynes@939 | 1400 | MMIO_WRITE(MMU, TEA, addr);
|
nkeynes@939 | 1401 | MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (addr&0xFFFFFC00)));
|
nkeynes@939 | 1402 | sh4_raise_reset(EXC_TLB_MULTI_HIT);
|
nkeynes@939 | 1403 | EXCEPTION_EXIT();
|
nkeynes@939 | 1404 | }
|
nkeynes@939 | 1405 | static void FASTCALL tlb_multi_hit_write( sh4addr_t addr, uint32_t val, void *exc )
|
nkeynes@939 | 1406 | {
|
nkeynes@939 | 1407 | MMIO_WRITE(MMU, TEA, addr);
|
nkeynes@939 | 1408 | MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (addr&0xFFFFFC00)));
|
nkeynes@939 | 1409 | sh4_raise_reset(EXC_TLB_MULTI_HIT);
|
nkeynes@939 | 1410 | EXCEPTION_EXIT();
|
nkeynes@939 | 1411 | }
|
nkeynes@939 | 1412 |
|
nkeynes@939 | 1413 | /**
|
nkeynes@939 | 1414 | * Note: Per sec 4.6.4 of the SH7750 manual, SQ
|
nkeynes@939 | 1415 | */
|
nkeynes@939 | 1416 | struct mem_region_fn mem_region_address_error = {
|
nkeynes@939 | 1417 | (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
|
nkeynes@939 | 1418 | (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
|
nkeynes@939 | 1419 | (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
|
nkeynes@939 | 1420 | (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write };
|
nkeynes@939 | 1421 |
|
nkeynes@939 | 1422 | struct mem_region_fn mem_region_tlb_miss = {
|
nkeynes@939 | 1423 | (mem_read_fn_t)tlb_miss_read, (mem_write_fn_t)tlb_miss_write,
|
nkeynes@939 | 1424 | (mem_read_fn_t)tlb_miss_read, (mem_write_fn_t)tlb_miss_write,
|
nkeynes@939 | 1425 | (mem_read_fn_t)tlb_miss_read, (mem_write_fn_t)tlb_miss_write,
|
nkeynes@939 | 1426 | (mem_read_burst_fn_t)tlb_miss_read_burst, (mem_write_burst_fn_t)tlb_miss_write };
|
nkeynes@939 | 1427 |
|
nkeynes@939 | 1428 | struct mem_region_fn mem_region_user_protected = {
|
nkeynes@939 | 1429 | (mem_read_fn_t)tlb_protected_read, (mem_write_fn_t)tlb_protected_write,
|
nkeynes@939 | 1430 | (mem_read_fn_t)tlb_protected_read, (mem_write_fn_t)tlb_protected_write,
|
nkeynes@939 | 1431 | (mem_read_fn_t)tlb_protected_read, (mem_write_fn_t)tlb_protected_write,
|
nkeynes@939 | 1432 | (mem_read_burst_fn_t)tlb_protected_read_burst, (mem_write_burst_fn_t)tlb_protected_write };
|
nkeynes@939 | 1433 |
|
nkeynes@939 | 1434 | struct mem_region_fn mem_region_tlb_multihit = {
|
nkeynes@939 | 1435 | (mem_read_fn_t)tlb_multi_hit_read, (mem_write_fn_t)tlb_multi_hit_write,
|
nkeynes@939 | 1436 | (mem_read_fn_t)tlb_multi_hit_read, (mem_write_fn_t)tlb_multi_hit_write,
|
nkeynes@939 | 1437 | (mem_read_fn_t)tlb_multi_hit_read, (mem_write_fn_t)tlb_multi_hit_write,
|
nkeynes@939 | 1438 | (mem_read_burst_fn_t)tlb_multi_hit_read_burst, (mem_write_burst_fn_t)tlb_multi_hit_write };
|
nkeynes@939 | 1439 |
|
nkeynes@939 | 1440 |
|
nkeynes@939 | 1441 | |