Search
lxdream.org :: lxdream/src/sh4/mmu.c :: diff
lxdream 0.9.1
released Jun 29
Download Now
filename src/sh4/mmu.c
changeset 1217:677b1d85f1b4
prev1202:01ae5cbad4c8
next1295:9067aff5522d
author nkeynes
date Mon Feb 13 20:00:27 2012 +1000 (8 years ago)
permissions -rw-r--r--
last change Fix MMU on non-translated platforms
- reintroduce old VMA translation functions (slightly modified)
- modify shadow processing to work on post-translated memory ops
file annotate diff log raw
1.1 --- a/src/sh4/mmu.c Fri Dec 23 08:20:17 2011 +1000
1.2 +++ b/src/sh4/mmu.c Mon Feb 13 20:00:27 2012 +1000
1.3 @@ -34,6 +34,9 @@
1.4 mem_region_fn_t *sh4_address_space;
1.5 mem_region_fn_t *sh4_user_address_space;
1.6
1.7 +/* External address space (usually the same as the global ext_address_space) */
1.8 +static mem_region_fn_t *sh4_ext_address_space;
1.9 +
1.10 /* Accessed from the UTLB accessor methods */
1.11 uint32_t mmu_urc;
1.12 uint32_t mmu_urb;
1.13 @@ -92,6 +95,19 @@
1.14
1.15 #define IS_STOREQUEUE_PROTECTED() (mmu_user_storequeue_regions == &mmu_default_regions[DEFAULT_STOREQUEUE_SQMD_REGIONS])
1.16
1.17 +#ifndef SH4_TRANSLATOR
1.18 +/* Dummy MMU vtable functions */
1.19 +void mmu_utlb_init_vtable( struct utlb_entry *ent, struct utlb_page_entry *page, gboolean writable )
1.20 +{
1.21 +}
1.22 +void mmu_utlb_init_storequeue_vtable( struct utlb_entry *ent, struct utlb_page_entry *page )
1.23 +{
1.24 +}
1.25 +void mmu_utlb_1k_init_vtable( struct utlb_1k_entry *entry )
1.26 +{
1.27 +}
1.28 +#endif
1.29 +
1.30 /*********************** Module public functions ****************************/
1.31
1.32 /**
1.33 @@ -101,6 +117,7 @@
1.34
1.35 void MMU_init()
1.36 {
1.37 + sh4_ext_address_space = ext_address_space;
1.38 sh4_address_space = mem_alloc_pages( sizeof(mem_region_fn_t) * 256 );
1.39 sh4_user_address_space = mem_alloc_pages( sizeof(mem_region_fn_t) * 256 );
1.40 mmu_user_storequeue_regions = &mmu_default_regions[DEFAULT_STOREQUEUE_REGIONS];
1.41 @@ -343,6 +360,14 @@
1.42
1.43 /********************** Address space maintenance *************************/
1.44
1.45 +mem_region_fn_t *mmu_set_ext_address_space( mem_region_fn_t *ext )
1.46 +{
1.47 + mem_region_fn_t *old_ext = sh4_ext_address_space;
1.48 + sh4_ext_address_space = ext;
1.49 + mmu_set_tlb_enabled(IS_TLB_ENABLED());
1.50 + return old_ext;
1.51 +}
1.52 +
1.53 /**
1.54 * MMU accessor functions just increment URC - fixup here if necessary
1.55 */
1.56 @@ -415,10 +440,10 @@
1.57 mmu_utlb_register_all();
1.58 } else {
1.59 for( i=0, ptr = sh4_address_space; i<7; i++, ptr += LXDREAM_PAGE_TABLE_ENTRIES ) {
1.60 - memcpy( ptr, ext_address_space, sizeof(mem_region_fn_t) * LXDREAM_PAGE_TABLE_ENTRIES );
1.61 + memcpy( ptr, sh4_ext_address_space, sizeof(mem_region_fn_t) * LXDREAM_PAGE_TABLE_ENTRIES );
1.62 }
1.63 for( i=0, ptr = sh4_user_address_space; i<4; i++, ptr += LXDREAM_PAGE_TABLE_ENTRIES ) {
1.64 - memcpy( ptr, ext_address_space, sizeof(mem_region_fn_t) * LXDREAM_PAGE_TABLE_ENTRIES );
1.65 + memcpy( ptr, sh4_ext_address_space, sizeof(mem_region_fn_t) * LXDREAM_PAGE_TABLE_ENTRIES );
1.66 }
1.67
1.68 mmu_register_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );
1.69 @@ -1190,6 +1215,219 @@
1.70 }
1.71 }
1.72
1.73 +/**
1.74 + * Translate a virtual to physical address for reading, raising exceptions as
1.75 + * observed.
1.76 + * @param addr Pointer to the virtual memory address. On successful return,
1.77 + * will be updated to contain the physical address.
1.78 + */
1.79 +mem_region_fn_t FASTCALL mmu_get_region_for_vma_read( sh4vma_t *paddr )
1.80 +{
1.81 + sh4vma_t addr = *paddr;
1.82 + uint32_t mmucr = MMIO_READ(MMU,MMUCR);
1.83 + if( addr & 0x80000000 ) {
1.84 + if( IS_SH4_PRIVMODE() ) {
1.85 + if( addr >= 0xE0000000 ) {
1.86 + return sh4_address_space[((uint32_t)addr)>>12]; /* P4 - passthrough */
1.87 + } else if( addr < 0xC0000000 ) {
1.88 + /* P1, P2 regions are pass-through (no translation) */
1.89 + return sh4_ext_address_space[VMA_TO_EXT_ADDR(addr)>>12];
1.90 + }
1.91 + } else {
1.92 + if( addr >= 0xE0000000 && addr < 0xE4000000 &&
1.93 + ((mmucr&MMUCR_SQMD) == 0) ) {
1.94 + /* Conditional user-mode access to the store-queue (no translation) */
1.95 + return &p4_region_storequeue;
1.96 + }
1.97 + sh4_raise_exception(EXC_DATA_ADDR_READ);
1.98 + return NULL;
1.99 + }
1.100 + }
1.101 +
1.102 + if( (mmucr & MMUCR_AT) == 0 ) {
1.103 + return sh4_ext_address_space[VMA_TO_EXT_ADDR(addr)>>12];
1.104 + }
1.105 +
1.106 + /* If we get this far, translation is required */
1.107 + int entryNo;
1.108 + if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
1.109 + entryNo = mmu_utlb_lookup_vpn_asid( addr );
1.110 + } else {
1.111 + entryNo = mmu_utlb_lookup_vpn( addr );
1.112 + }
1.113 +
1.114 + switch(entryNo) {
1.115 + case -1:
1.116 + RAISE_TLB_ERROR(EXC_TLB_MISS_READ,addr);
1.117 + return NULL;
1.118 + case -2:
1.119 + RAISE_TLB_MULTIHIT_ERROR(addr);
1.120 + return NULL;
1.121 + default:
1.122 + if( (mmu_utlb[entryNo].flags & TLB_USERMODE) == 0 &&
1.123 + !IS_SH4_PRIVMODE() ) {
1.124 + /* protection violation */
1.125 + RAISE_MEM_ERROR(EXC_TLB_PROT_READ,addr);
1.126 + return NULL;
1.127 + }
1.128 +
1.129 + /* finally generate the target address */
1.130 + sh4addr_t pma = (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) |
1.131 + (addr & (~mmu_utlb[entryNo].mask));
1.132 + if( pma > 0x1C000000 ) { // Remap 1Cxx .. 1Fxx region to P4
1.133 + addr = pma | 0xE0000000;
1.134 + *paddr = addr;
1.135 + return sh4_address_space[addr>>12];
1.136 + } else {
1.137 + *paddr = pma;
1.138 + return sh4_ext_address_space[pma>>12];
1.139 + }
1.140 + }
1.141 +}
1.142 +
1.143 +/**
1.144 + * Translate a virtual to physical address for prefetch, which mostly
1.145 + * does not raise exceptions.
1.146 + * @param addr Pointer to the virtual memory address. On successful return,
1.147 + * will be updated to contain the physical address.
1.148 + */
1.149 +mem_region_fn_t FASTCALL mmu_get_region_for_vma_prefetch( sh4vma_t *paddr )
1.150 +{
1.151 + sh4vma_t addr = *paddr;
1.152 + uint32_t mmucr = MMIO_READ(MMU,MMUCR);
1.153 + if( addr & 0x80000000 ) {
1.154 + if( IS_SH4_PRIVMODE() ) {
1.155 + if( addr >= 0xE0000000 ) {
1.156 + return sh4_address_space[((uint32_t)addr)>>12]; /* P4 - passthrough */
1.157 + } else if( addr < 0xC0000000 ) {
1.158 + /* P1, P2 regions are pass-through (no translation) */
1.159 + return sh4_ext_address_space[VMA_TO_EXT_ADDR(addr)>>12];
1.160 + }
1.161 + } else {
1.162 + if( addr >= 0xE0000000 && addr < 0xE4000000 &&
1.163 + ((mmucr&MMUCR_SQMD) == 0) ) {
1.164 + /* Conditional user-mode access to the store-queue (no translation) */
1.165 + return &p4_region_storequeue;
1.166 + }
1.167 + sh4_raise_exception(EXC_DATA_ADDR_READ);
1.168 + return NULL;
1.169 + }
1.170 + }
1.171 +
1.172 + if( (mmucr & MMUCR_AT) == 0 ) {
1.173 + return sh4_ext_address_space[VMA_TO_EXT_ADDR(addr)>>12];
1.174 + }
1.175 +
1.176 + /* If we get this far, translation is required */
1.177 + int entryNo;
1.178 + if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
1.179 + entryNo = mmu_utlb_lookup_vpn_asid( addr );
1.180 + } else {
1.181 + entryNo = mmu_utlb_lookup_vpn( addr );
1.182 + }
1.183 +
1.184 + switch(entryNo) {
1.185 + case -1:
1.186 + return &mem_region_unmapped;
1.187 + case -2:
1.188 + RAISE_TLB_MULTIHIT_ERROR(addr);
1.189 + return NULL;
1.190 + default:
1.191 + if( (mmu_utlb[entryNo].flags & TLB_USERMODE) == 0 &&
1.192 + !IS_SH4_PRIVMODE() ) {
1.193 + /* protection violation */
1.194 + return &mem_region_unmapped;
1.195 + }
1.196 +
1.197 + /* finally generate the target address */
1.198 + sh4addr_t pma = (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) |
1.199 + (addr & (~mmu_utlb[entryNo].mask));
1.200 + if( pma > 0x1C000000 ) { // Remap 1Cxx .. 1Fxx region to P4
1.201 + addr = pma | 0xE0000000;
1.202 + *paddr = addr;
1.203 + return sh4_address_space[addr>>12];
1.204 + } else {
1.205 + *paddr = pma;
1.206 + return sh4_ext_address_space[pma>>12];
1.207 + }
1.208 + }
1.209 +}
1.210 +
1.211 +/**
1.212 + * Translate a virtual to physical address for writing, raising exceptions as
1.213 + * observed.
1.214 + */
1.215 +mem_region_fn_t FASTCALL mmu_get_region_for_vma_write( sh4vma_t *paddr )
1.216 +{
1.217 + sh4vma_t addr = *paddr;
1.218 + uint32_t mmucr = MMIO_READ(MMU,MMUCR);
1.219 + if( addr & 0x80000000 ) {
1.220 + if( IS_SH4_PRIVMODE() ) {
1.221 + if( addr >= 0xE0000000 ) {
1.222 + return sh4_address_space[((uint32_t)addr)>>12]; /* P4 - passthrough */
1.223 + } else if( addr < 0xC0000000 ) {
1.224 + /* P1, P2 regions are pass-through (no translation) */
1.225 + return sh4_ext_address_space[VMA_TO_EXT_ADDR(addr)>>12];
1.226 + }
1.227 + } else {
1.228 + if( addr >= 0xE0000000 && addr < 0xE4000000 &&
1.229 + ((mmucr&MMUCR_SQMD) == 0) ) {
1.230 + /* Conditional user-mode access to the store-queue (no translation) */
1.231 + return &p4_region_storequeue;
1.232 + }
1.233 + sh4_raise_exception(EXC_DATA_ADDR_WRITE);
1.234 + return NULL;
1.235 + }
1.236 + }
1.237 +
1.238 + if( (mmucr & MMUCR_AT) == 0 ) {
1.239 + return sh4_ext_address_space[VMA_TO_EXT_ADDR(addr)>>12];
1.240 + }
1.241 +
1.242 + /* If we get this far, translation is required */
1.243 + int entryNo;
1.244 + if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
1.245 + entryNo = mmu_utlb_lookup_vpn_asid( addr );
1.246 + } else {
1.247 + entryNo = mmu_utlb_lookup_vpn( addr );
1.248 + }
1.249 +
1.250 + switch(entryNo) {
1.251 + case -1:
1.252 + RAISE_TLB_ERROR(EXC_TLB_MISS_WRITE,addr);
1.253 + return NULL;
1.254 + case -2:
1.255 + RAISE_TLB_MULTIHIT_ERROR(addr);
1.256 + return NULL;
1.257 + default:
1.258 + if( IS_SH4_PRIVMODE() ? ((mmu_utlb[entryNo].flags & TLB_WRITABLE) == 0)
1.259 + : ((mmu_utlb[entryNo].flags & TLB_USERWRITABLE) != TLB_USERWRITABLE) ) {
1.260 + /* protection violation */
1.261 + RAISE_MEM_ERROR(EXC_TLB_PROT_WRITE,addr);
1.262 + return NULL;
1.263 + }
1.264 +
1.265 + if( (mmu_utlb[entryNo].flags & TLB_DIRTY) == 0 ) {
1.266 + RAISE_MEM_ERROR(EXC_INIT_PAGE_WRITE, addr);
1.267 + return NULL;
1.268 + }
1.269 +
1.270 + /* finally generate the target address */
1.271 + sh4addr_t pma = (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) |
1.272 + (addr & (~mmu_utlb[entryNo].mask));
1.273 + if( pma > 0x1C000000 ) { // Remap 1Cxx .. 1Fxx region to P4
1.274 + addr = pma | 0xE0000000;
1.275 + *paddr = addr;
1.276 + return sh4_address_space[addr>>12];
1.277 + } else {
1.278 + *paddr = pma;
1.279 + return sh4_ext_address_space[pma>>12];
1.280 + }
1.281 + }
1.282 +}
1.283 +
1.284 +
1.285 +
1.286 /********************** TLB Direct-Access Regions ***************************/
1.287 #define ITLB_ENTRY(addr) ((addr>>7)&0x03)
1.288
.