Search
lxdream.org :: lxdream :: r1217:677b1d85f1b4
lxdream 0.9.1
released Jun 29
Download Now
changeset1217:677b1d85f1b4
parent1216:defbd44429d8
child1218:be02e87f9f87
authornkeynes
dateMon Feb 13 20:00:27 2012 +1000 (7 years ago)
Fix MMU on non-translated platforms
- reintroduce old VMA translation functions (slightly modified)
- modify shadow processing to work on post-translated memory ops
src/sh4/mmu.c
src/sh4/mmu.h
src/sh4/sh4core.in
src/sh4/shadow.c
1.1 --- a/src/sh4/mmu.c Mon Feb 13 19:59:19 2012 +1000
1.2 +++ b/src/sh4/mmu.c Mon Feb 13 20:00:27 2012 +1000
1.3 @@ -34,6 +34,9 @@
1.4 mem_region_fn_t *sh4_address_space;
1.5 mem_region_fn_t *sh4_user_address_space;
1.6
1.7 +/* External address space (usually the same as the global ext_address_space) */
1.8 +static mem_region_fn_t *sh4_ext_address_space;
1.9 +
1.10 /* Accessed from the UTLB accessor methods */
1.11 uint32_t mmu_urc;
1.12 uint32_t mmu_urb;
1.13 @@ -92,6 +95,19 @@
1.14
1.15 #define IS_STOREQUEUE_PROTECTED() (mmu_user_storequeue_regions == &mmu_default_regions[DEFAULT_STOREQUEUE_SQMD_REGIONS])
1.16
1.17 +#ifndef SH4_TRANSLATOR
1.18 +/* Dummy MMU vtable functions */
1.19 +void mmu_utlb_init_vtable( struct utlb_entry *ent, struct utlb_page_entry *page, gboolean writable )
1.20 +{
1.21 +}
1.22 +void mmu_utlb_init_storequeue_vtable( struct utlb_entry *ent, struct utlb_page_entry *page )
1.23 +{
1.24 +}
1.25 +void mmu_utlb_1k_init_vtable( struct utlb_1k_entry *entry )
1.26 +{
1.27 +}
1.28 +#endif
1.29 +
1.30 /*********************** Module public functions ****************************/
1.31
1.32 /**
1.33 @@ -101,6 +117,7 @@
1.34
1.35 void MMU_init()
1.36 {
1.37 + sh4_ext_address_space = ext_address_space;
1.38 sh4_address_space = mem_alloc_pages( sizeof(mem_region_fn_t) * 256 );
1.39 sh4_user_address_space = mem_alloc_pages( sizeof(mem_region_fn_t) * 256 );
1.40 mmu_user_storequeue_regions = &mmu_default_regions[DEFAULT_STOREQUEUE_REGIONS];
1.41 @@ -343,6 +360,14 @@
1.42
1.43 /********************** Address space maintenance *************************/
1.44
1.45 +mem_region_fn_t *mmu_set_ext_address_space( mem_region_fn_t *ext )
1.46 +{
1.47 + mem_region_fn_t *old_ext = sh4_ext_address_space;
1.48 + sh4_ext_address_space = ext;
1.49 + mmu_set_tlb_enabled(IS_TLB_ENABLED());
1.50 + return old_ext;
1.51 +}
1.52 +
1.53 /**
1.54 * MMU accessor functions just increment URC - fixup here if necessary
1.55 */
1.56 @@ -415,10 +440,10 @@
1.57 mmu_utlb_register_all();
1.58 } else {
1.59 for( i=0, ptr = sh4_address_space; i<7; i++, ptr += LXDREAM_PAGE_TABLE_ENTRIES ) {
1.60 - memcpy( ptr, ext_address_space, sizeof(mem_region_fn_t) * LXDREAM_PAGE_TABLE_ENTRIES );
1.61 + memcpy( ptr, sh4_ext_address_space, sizeof(mem_region_fn_t) * LXDREAM_PAGE_TABLE_ENTRIES );
1.62 }
1.63 for( i=0, ptr = sh4_user_address_space; i<4; i++, ptr += LXDREAM_PAGE_TABLE_ENTRIES ) {
1.64 - memcpy( ptr, ext_address_space, sizeof(mem_region_fn_t) * LXDREAM_PAGE_TABLE_ENTRIES );
1.65 + memcpy( ptr, sh4_ext_address_space, sizeof(mem_region_fn_t) * LXDREAM_PAGE_TABLE_ENTRIES );
1.66 }
1.67
1.68 mmu_register_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );
1.69 @@ -1190,6 +1215,219 @@
1.70 }
1.71 }
1.72
1.73 +/**
1.74 + * Translate a virtual to physical address for reading, raising exceptions as
1.75 + * observed.
1.76 + * @param addr Pointer to the virtual memory address. On successful return,
1.77 + * will be updated to contain the physical address.
1.78 + */
1.79 +mem_region_fn_t FASTCALL mmu_get_region_for_vma_read( sh4vma_t *paddr )
1.80 +{
1.81 + sh4vma_t addr = *paddr;
1.82 + uint32_t mmucr = MMIO_READ(MMU,MMUCR);
1.83 + if( addr & 0x80000000 ) {
1.84 + if( IS_SH4_PRIVMODE() ) {
1.85 + if( addr >= 0xE0000000 ) {
1.86 + return sh4_address_space[((uint32_t)addr)>>12]; /* P4 - passthrough */
1.87 + } else if( addr < 0xC0000000 ) {
1.88 + /* P1, P2 regions are pass-through (no translation) */
1.89 + return sh4_ext_address_space[VMA_TO_EXT_ADDR(addr)>>12];
1.90 + }
1.91 + } else {
1.92 + if( addr >= 0xE0000000 && addr < 0xE4000000 &&
1.93 + ((mmucr&MMUCR_SQMD) == 0) ) {
1.94 + /* Conditional user-mode access to the store-queue (no translation) */
1.95 + return &p4_region_storequeue;
1.96 + }
1.97 + sh4_raise_exception(EXC_DATA_ADDR_READ);
1.98 + return NULL;
1.99 + }
1.100 + }
1.101 +
1.102 + if( (mmucr & MMUCR_AT) == 0 ) {
1.103 + return sh4_ext_address_space[VMA_TO_EXT_ADDR(addr)>>12];
1.104 + }
1.105 +
1.106 + /* If we get this far, translation is required */
1.107 + int entryNo;
1.108 + if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
1.109 + entryNo = mmu_utlb_lookup_vpn_asid( addr );
1.110 + } else {
1.111 + entryNo = mmu_utlb_lookup_vpn( addr );
1.112 + }
1.113 +
1.114 + switch(entryNo) {
1.115 + case -1:
1.116 + RAISE_TLB_ERROR(EXC_TLB_MISS_READ,addr);
1.117 + return NULL;
1.118 + case -2:
1.119 + RAISE_TLB_MULTIHIT_ERROR(addr);
1.120 + return NULL;
1.121 + default:
1.122 + if( (mmu_utlb[entryNo].flags & TLB_USERMODE) == 0 &&
1.123 + !IS_SH4_PRIVMODE() ) {
1.124 + /* protection violation */
1.125 + RAISE_MEM_ERROR(EXC_TLB_PROT_READ,addr);
1.126 + return NULL;
1.127 + }
1.128 +
1.129 + /* finally generate the target address */
1.130 + sh4addr_t pma = (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) |
1.131 + (addr & (~mmu_utlb[entryNo].mask));
1.132 + if( pma > 0x1C000000 ) { // Remap 1Cxx .. 1Fxx region to P4
1.133 + addr = pma | 0xE0000000;
1.134 + *paddr = addr;
1.135 + return sh4_address_space[addr>>12];
1.136 + } else {
1.137 + *paddr = pma;
1.138 + return sh4_ext_address_space[pma>>12];
1.139 + }
1.140 + }
1.141 +}
1.142 +
1.143 +/**
1.144 + * Translate a virtual to physical address for prefetch, which mostly
1.145 + * does not raise exceptions.
1.146 + * @param addr Pointer to the virtual memory address. On successful return,
1.147 + * will be updated to contain the physical address.
1.148 + */
1.149 +mem_region_fn_t FASTCALL mmu_get_region_for_vma_prefetch( sh4vma_t *paddr )
1.150 +{
1.151 + sh4vma_t addr = *paddr;
1.152 + uint32_t mmucr = MMIO_READ(MMU,MMUCR);
1.153 + if( addr & 0x80000000 ) {
1.154 + if( IS_SH4_PRIVMODE() ) {
1.155 + if( addr >= 0xE0000000 ) {
1.156 + return sh4_address_space[((uint32_t)addr)>>12]; /* P4 - passthrough */
1.157 + } else if( addr < 0xC0000000 ) {
1.158 + /* P1, P2 regions are pass-through (no translation) */
1.159 + return sh4_ext_address_space[VMA_TO_EXT_ADDR(addr)>>12];
1.160 + }
1.161 + } else {
1.162 + if( addr >= 0xE0000000 && addr < 0xE4000000 &&
1.163 + ((mmucr&MMUCR_SQMD) == 0) ) {
1.164 + /* Conditional user-mode access to the store-queue (no translation) */
1.165 + return &p4_region_storequeue;
1.166 + }
1.167 + sh4_raise_exception(EXC_DATA_ADDR_READ);
1.168 + return NULL;
1.169 + }
1.170 + }
1.171 +
1.172 + if( (mmucr & MMUCR_AT) == 0 ) {
1.173 + return sh4_ext_address_space[VMA_TO_EXT_ADDR(addr)>>12];
1.174 + }
1.175 +
1.176 + /* If we get this far, translation is required */
1.177 + int entryNo;
1.178 + if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
1.179 + entryNo = mmu_utlb_lookup_vpn_asid( addr );
1.180 + } else {
1.181 + entryNo = mmu_utlb_lookup_vpn( addr );
1.182 + }
1.183 +
1.184 + switch(entryNo) {
1.185 + case -1:
1.186 + return &mem_region_unmapped;
1.187 + case -2:
1.188 + RAISE_TLB_MULTIHIT_ERROR(addr);
1.189 + return NULL;
1.190 + default:
1.191 + if( (mmu_utlb[entryNo].flags & TLB_USERMODE) == 0 &&
1.192 + !IS_SH4_PRIVMODE() ) {
1.193 + /* protection violation */
1.194 + return &mem_region_unmapped;
1.195 + }
1.196 +
1.197 + /* finally generate the target address */
1.198 + sh4addr_t pma = (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) |
1.199 + (addr & (~mmu_utlb[entryNo].mask));
1.200 + if( pma > 0x1C000000 ) { // Remap 1Cxx .. 1Fxx region to P4
1.201 + addr = pma | 0xE0000000;
1.202 + *paddr = addr;
1.203 + return sh4_address_space[addr>>12];
1.204 + } else {
1.205 + *paddr = pma;
1.206 + return sh4_ext_address_space[pma>>12];
1.207 + }
1.208 + }
1.209 +}
1.210 +
1.211 +/**
1.212 + * Translate a virtual to physical address for writing, raising exceptions as
1.213 + * observed.
1.214 + */
1.215 +mem_region_fn_t FASTCALL mmu_get_region_for_vma_write( sh4vma_t *paddr )
1.216 +{
1.217 + sh4vma_t addr = *paddr;
1.218 + uint32_t mmucr = MMIO_READ(MMU,MMUCR);
1.219 + if( addr & 0x80000000 ) {
1.220 + if( IS_SH4_PRIVMODE() ) {
1.221 + if( addr >= 0xE0000000 ) {
1.222 + return sh4_address_space[((uint32_t)addr)>>12]; /* P4 - passthrough */
1.223 + } else if( addr < 0xC0000000 ) {
1.224 + /* P1, P2 regions are pass-through (no translation) */
1.225 + return sh4_ext_address_space[VMA_TO_EXT_ADDR(addr)>>12];
1.226 + }
1.227 + } else {
1.228 + if( addr >= 0xE0000000 && addr < 0xE4000000 &&
1.229 + ((mmucr&MMUCR_SQMD) == 0) ) {
1.230 + /* Conditional user-mode access to the store-queue (no translation) */
1.231 + return &p4_region_storequeue;
1.232 + }
1.233 + sh4_raise_exception(EXC_DATA_ADDR_WRITE);
1.234 + return NULL;
1.235 + }
1.236 + }
1.237 +
1.238 + if( (mmucr & MMUCR_AT) == 0 ) {
1.239 + return sh4_ext_address_space[VMA_TO_EXT_ADDR(addr)>>12];
1.240 + }
1.241 +
1.242 + /* If we get this far, translation is required */
1.243 + int entryNo;
1.244 + if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {
1.245 + entryNo = mmu_utlb_lookup_vpn_asid( addr );
1.246 + } else {
1.247 + entryNo = mmu_utlb_lookup_vpn( addr );
1.248 + }
1.249 +
1.250 + switch(entryNo) {
1.251 + case -1:
1.252 + RAISE_TLB_ERROR(EXC_TLB_MISS_WRITE,addr);
1.253 + return NULL;
1.254 + case -2:
1.255 + RAISE_TLB_MULTIHIT_ERROR(addr);
1.256 + return NULL;
1.257 + default:
1.258 + if( IS_SH4_PRIVMODE() ? ((mmu_utlb[entryNo].flags & TLB_WRITABLE) == 0)
1.259 + : ((mmu_utlb[entryNo].flags & TLB_USERWRITABLE) != TLB_USERWRITABLE) ) {
1.260 + /* protection violation */
1.261 + RAISE_MEM_ERROR(EXC_TLB_PROT_WRITE,addr);
1.262 + return NULL;
1.263 + }
1.264 +
1.265 + if( (mmu_utlb[entryNo].flags & TLB_DIRTY) == 0 ) {
1.266 + RAISE_MEM_ERROR(EXC_INIT_PAGE_WRITE, addr);
1.267 + return NULL;
1.268 + }
1.269 +
1.270 + /* finally generate the target address */
1.271 + sh4addr_t pma = (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) |
1.272 + (addr & (~mmu_utlb[entryNo].mask));
1.273 + if( pma > 0x1C000000 ) { // Remap 1Cxx .. 1Fxx region to P4
1.274 + addr = pma | 0xE0000000;
1.275 + *paddr = addr;
1.276 + return sh4_address_space[addr>>12];
1.277 + } else {
1.278 + *paddr = pma;
1.279 + return sh4_ext_address_space[pma>>12];
1.280 + }
1.281 + }
1.282 +}
1.283 +
1.284 +
1.285 +
1.286 /********************** TLB Direct-Access Regions ***************************/
1.287 #define ITLB_ENTRY(addr) ((addr>>7)&0x03)
1.288
2.1 --- a/src/sh4/mmu.h Mon Feb 13 19:59:19 2012 +1000
2.2 +++ b/src/sh4/mmu.h Mon Feb 13 20:00:27 2012 +1000
2.3 @@ -106,8 +106,18 @@
2.4 mem_region_fn_t tlb_multihit;
2.5 };
2.6
2.7 +/** Set the MMU's target external address space
2.8 + * @return the previous address space.
2.9 + */
2.10 +mem_region_fn_t *mmu_set_ext_address_space( mem_region_fn_t *space );
2.11 +
2.12 +/* Address translation functions */
2.13 sh4addr_t FASTCALL mmu_vma_to_phys_disasm( sh4vma_t vma );
2.14 +mem_region_fn_t FASTCALL mmu_get_region_for_vma_read( sh4vma_t *addr );
2.15 +mem_region_fn_t FASTCALL mmu_get_region_for_vma_write( sh4vma_t *addr );
2.16 +mem_region_fn_t FASTCALL mmu_get_region_for_vma_prefetch( sh4vma_t *addr );
2.17
2.18 +/* Translator provided helpers */
2.19 void mmu_utlb_init_vtable( struct utlb_entry *ent, struct utlb_page_entry *page, gboolean writable );
2.20 void mmu_utlb_1k_init_vtable( struct utlb_1k_entry *ent );
2.21 void mmu_utlb_init_storequeue_vtable( struct utlb_entry *ent, struct utlb_page_entry *page );
3.1 --- a/src/sh4/sh4core.in Mon Feb 13 19:59:19 2012 +1000
3.2 +++ b/src/sh4/sh4core.in Mon Feb 13 20:00:27 2012 +1000
3.3 @@ -162,28 +162,14 @@
3.4 #define ADDRSPACE (IS_SH4_PRIVMODE() ? sh4_address_space : sh4_user_address_space)
3.5 #define SQADDRSPACE (IS_SH4_PRIVMODE() ? storequeue_address_space : storequeue_user_address_space)
3.6
3.7 -#ifdef HAVE_FRAME_ADDRESS
3.8 -static FASTCALL __attribute__((noinline)) void *__first_arg(void *a, void *b) { return a; }
3.9 -#define INIT_EXCEPTIONS(label) goto *__first_arg(&&fnstart,&&label); fnstart:
3.10 -#define MEM_READ_BYTE( addr, val ) val = ((mem_read_exc_fn_t)ADDRSPACE[(addr)>>12]->read_byte)((addr), &&except)
3.11 -#define MEM_READ_BYTE_FOR_WRITE( addr, val ) val = ((mem_read_exc_fn_t)ADDRSPACE[(addr)>>12]->read_byte_for_write)((addr), &&except)
3.12 -#define MEM_READ_WORD( addr, val ) val = ((mem_read_exc_fn_t)ADDRSPACE[(addr)>>12]->read_word)((addr), &&except)
3.13 -#define MEM_READ_LONG( addr, val ) val = ((mem_read_exc_fn_t)ADDRSPACE[(addr)>>12]->read_long)((addr), &&except)
3.14 -#define MEM_WRITE_BYTE( addr, val ) ((mem_write_exc_fn_t)ADDRSPACE[(addr)>>12]->write_byte)((addr), (val), &&except)
3.15 -#define MEM_WRITE_WORD( addr, val ) ((mem_write_exc_fn_t)ADDRSPACE[(addr)>>12]->write_word)((addr), (val), &&except)
3.16 -#define MEM_WRITE_LONG( addr, val ) ((mem_write_exc_fn_t)ADDRSPACE[(addr)>>12]->write_long)((addr), (val), &&except)
3.17 -#define MEM_PREFETCH( addr ) ((mem_prefetch_exc_fn_t)ADDRSPACE[(addr)>>12]->prefetch)((addr), &&except)
3.18 -#else
3.19 -#define INIT_EXCEPTIONS(label)
3.20 -#define MEM_READ_BYTE( addr, val ) val = ADDRSPACE[(addr)>>12]->read_byte(addr)
3.21 -#define MEM_READ_BYTE_FOR_WRITE( addr, val ) val = ADDRSPACE[(addr)>>12]->read_byte_for_write(addr)
3.22 -#define MEM_READ_WORD( addr, val ) val = ADDRSPACE[(addr)>>12]->read_word(addr)
3.23 -#define MEM_READ_LONG( addr, val ) val = ADDRSPACE[(addr)>>12]->read_long(addr)
3.24 -#define MEM_WRITE_BYTE( addr, val ) ADDRSPACE[(addr)>>12]->write_byte(addr, val)
3.25 -#define MEM_WRITE_WORD( addr, val ) ADDRSPACE[(addr)>>12]->write_word(addr, val)
3.26 -#define MEM_WRITE_LONG( addr, val ) ADDRSPACE[(addr)>>12]->write_long(addr, val)
3.27 -#define MEM_PREFETCH( addr ) ADDRSPACE[(addr)>>12]->prefetch(addr)
3.28 -#endif
3.29 +#define MEM_READ_BYTE( addr, val ) addrtmp = addr; if( (fntmp = mmu_get_region_for_vma_read(&addrtmp)) == NULL ) { sh4r.in_delay_slot = 0; return TRUE; } else { val = fntmp->read_byte(addrtmp); }
3.30 +#define MEM_READ_BYTE_FOR_WRITE( addr, val ) addrtmp = addr; if( (fntmp = mmu_get_region_for_vma_write(&addrtmp)) == NULL ) { sh4r.in_delay_slot = 0; return TRUE; } else { val = fntmp->read_byte_for_write(addrtmp); }
3.31 +#define MEM_READ_WORD( addr, val ) addrtmp = addr; if( (fntmp = mmu_get_region_for_vma_read(&addrtmp)) == NULL ) { sh4r.in_delay_slot = 0; return TRUE; } else { val = fntmp->read_word(addrtmp); }
3.32 +#define MEM_READ_LONG( addr, val ) addrtmp = addr; if( (fntmp = mmu_get_region_for_vma_read(&addrtmp)) == NULL ) { sh4r.in_delay_slot = 0; return TRUE; } else { val = fntmp->read_long(addrtmp); }
3.33 +#define MEM_WRITE_BYTE( addr, val ) addrtmp = addr; if( (fntmp = mmu_get_region_for_vma_write(&addrtmp)) == NULL ) { sh4r.in_delay_slot = 0; return TRUE; } else { fntmp->write_byte(addrtmp,val); }
3.34 +#define MEM_WRITE_WORD( addr, val ) addrtmp = addr; if( (fntmp = mmu_get_region_for_vma_write(&addrtmp)) == NULL ) { sh4r.in_delay_slot = 0; return TRUE; } else { fntmp->write_word(addrtmp,val); }
3.35 +#define MEM_WRITE_LONG( addr, val ) addrtmp = addr; if( (fntmp = mmu_get_region_for_vma_write(&addrtmp)) == NULL ) { sh4r.in_delay_slot = 0; return TRUE; } else { fntmp->write_long(addrtmp,val); }
3.36 +#define MEM_PREFETCH( addr ) addrtmp = addr; if( (fntmp = mmu_get_region_for_vma_prefetch(&addrtmp)) == NULL ) { sh4r.in_delay_slot = 0; return TRUE; } else { fntmp->prefetch(addrtmp); }
3.37
3.38 #define FP_WIDTH (IS_FPU_DOUBLESIZE() ? 8 : 4)
3.39
3.40 @@ -346,10 +332,10 @@
3.41 uint32_t tmp;
3.42 float ftmp;
3.43 double dtmp;
3.44 - int64_t memtmp; // temporary holder for memory reads
3.45 + sh4addr_t addrtmp; // temporary holder for memory addresses
3.46 + mem_region_fn_t fntmp;
3.47 +
3.48
3.49 - INIT_EXCEPTIONS(except)
3.50 -
3.51 #define R0 sh4r.r[0]
3.52 pc = sh4r.pc;
3.53 if( pc > 0xFFFFFF00 ) {
3.54 @@ -1306,7 +1292,6 @@
3.55 sh4r.pc = sh4r.new_pc;
3.56 sh4r.new_pc += 2;
3.57
3.58 -except:
3.59 sh4r.in_delay_slot = 0;
3.60 return TRUE;
3.61 }
4.1 --- a/src/sh4/shadow.c Mon Feb 13 19:59:19 2012 +1000
4.2 +++ b/src/sh4/shadow.c Mon Feb 13 20:00:27 2012 +1000
4.3 @@ -23,18 +23,12 @@
4.4
4.5 #include "clock.h"
4.6 #include "mem.h"
4.7 +#include "mmio.h"
4.8 #include "sh4/sh4.h"
4.9 #include "sh4/sh4core.h"
4.10 #include "sh4/sh4trans.h"
4.11 #include "sh4/mmu.h"
4.12
4.13 -#ifdef HAVE_FRAME_ADDRESS
4.14 -static FASTCALL __attribute__((noinline)) void *__first_arg(void *a, void *b) { return a; }
4.15 -#define INIT_EXCEPTIONS(label) goto *__first_arg(&&fnstart,&&label); fnstart:
4.16 -#else
4.17 -#define INIT_EXCEPTIONS(label)
4.18 -#endif
4.19 -
4.20 typedef enum {
4.21 READ_LONG,
4.22 WRITE_LONG,
4.23 @@ -53,13 +47,17 @@
4.24 MemOp op;
4.25 sh4addr_t addr;
4.26 uint32_t value;
4.27 - sh4addr_t exception_pc;
4.28 };
4.29
4.30 static struct sh4_registers shadow_sh4r;
4.31 -static struct mem_region_fn **log_address_space;
4.32 -static struct mem_region_fn **check_address_space;
4.33 -static struct mem_region_fn **real_address_space;
4.34 +static struct mem_region_fn **shadow_address_space;
4.35 +static struct mem_region_fn **p4_address_space;
4.36 +
4.37 +typedef enum {
4.38 + SHADOW_LOG,
4.39 + SHADOW_CHECK
4.40 +} shadow_mode_t;
4.41 +static shadow_mode_t shadow_address_mode = SHADOW_LOG;
4.42
4.43 #define MEM_LOG_SIZE 4096
4.44 static struct mem_log_entry *mem_log;
4.45 @@ -68,7 +66,7 @@
4.46
4.47 #define IS_STORE_QUEUE(X) (((X)&0xFC000000) == 0xE0000000)
4.48
4.49 -static void log_mem_op( MemOp op, sh4addr_t addr, uint32_t value, int exception )
4.50 +static void log_mem_op( MemOp op, sh4addr_t addr, uint32_t value )
4.51 {
4.52 if( mem_log_posn == mem_log_size ) {
4.53 struct mem_log_entry *tmp = realloc(mem_log, mem_log_size * sizeof(struct mem_log_entry) * 2);
4.54 @@ -79,11 +77,6 @@
4.55 mem_log[mem_log_posn].op = op;
4.56 mem_log[mem_log_posn].addr = addr;
4.57 mem_log[mem_log_posn].value = value;
4.58 - if( exception ) {
4.59 - mem_log[mem_log_posn].exception_pc = sh4r.pc;
4.60 - } else {
4.61 - mem_log[mem_log_posn].exception_pc = -1;
4.62 - }
4.63 mem_log_posn++;
4.64 }
4.65
4.66 @@ -103,7 +96,7 @@
4.67 }
4.68 }
4.69
4.70 -static int32_t check_mem_op( MemOp op, sh4addr_t addr, uint32_t value, int *exception )
4.71 +static int32_t check_mem_op( MemOp op, sh4addr_t addr, uint32_t value )
4.72 {
4.73 if( mem_check_posn >= mem_log_posn ) {
4.74 fprintf( stderr, "Unexpected interpreter memory operation: " );
4.75 @@ -121,14 +114,6 @@
4.76 print_mem_op(stderr, op, addr, value );
4.77 abort();
4.78 }
4.79 -
4.80 - if( mem_log[mem_check_posn].exception_pc != -1 ) {
4.81 - sh4_reraise_exception(mem_log[mem_check_posn].exception_pc);
4.82 - *exception = 1;
4.83 - } else {
4.84 - *exception = 0;
4.85 - }
4.86 -
4.87 return mem_log[mem_check_posn++].value;
4.88 }
4.89
4.90 @@ -201,202 +186,117 @@
4.91 return isgood;
4.92 }
4.93
4.94 -static FASTCALL int32_t log_read_long( sh4addr_t addr, void *exc )
4.95 +static mem_region_fn_t real_region( sh4addr_t addr )
4.96 {
4.97 - INIT_EXCEPTIONS(except);
4.98 - int32_t rv = ((mem_read_exc_fn_t)real_address_space[addr>>12]->read_long)(addr, &&except);
4.99 - log_mem_op( READ_LONG, addr, rv, 0 );
4.100 - return rv;
4.101 -except:
4.102 - log_mem_op( READ_LONG, addr, rv, 1 );
4.103 - SH4_EXCEPTION_EXIT();
4.104 + if( addr >= 0xE0000000 )
4.105 + return p4_address_space[VMA_TO_EXT_ADDR(addr)>>12];
4.106 + else
4.107 + return ext_address_space[VMA_TO_EXT_ADDR(addr)>>12];
4.108 }
4.109
4.110 -static FASTCALL int32_t log_read_word( sh4addr_t addr, void *exc )
4.111 +static FASTCALL int32_t shadow_read_long( sh4addr_t addr )
4.112 {
4.113 - INIT_EXCEPTIONS(except);
4.114 - int32_t rv = ((mem_read_exc_fn_t)real_address_space[addr>>12]->read_word)(addr, &&except);
4.115 - log_mem_op( READ_WORD, addr, rv, 0 );
4.116 - return rv;
4.117 -except:
4.118 - log_mem_op( READ_WORD, addr, rv, 1 );
4.119 - SH4_EXCEPTION_EXIT();
4.120 -}
4.121 -
4.122 -static FASTCALL int32_t log_read_byte( sh4addr_t addr, void *exc )
4.123 -{
4.124 - INIT_EXCEPTIONS(except);
4.125 - int32_t rv = ((mem_read_exc_fn_t)real_address_space[addr>>12]->read_byte)(addr, &&except);
4.126 - log_mem_op( READ_BYTE, addr, rv, 0 );
4.127 - return rv;
4.128 -except:
4.129 - log_mem_op( READ_BYTE, addr, rv, 1 );
4.130 - SH4_EXCEPTION_EXIT();
4.131 -}
4.132 -
4.133 -static FASTCALL int32_t log_read_byte_for_write( sh4addr_t addr, void *exc )
4.134 -{
4.135 - INIT_EXCEPTIONS(except);
4.136 - int32_t rv = ((mem_read_exc_fn_t)real_address_space[addr>>12]->read_byte_for_write)(addr, &&except);
4.137 - log_mem_op( READ_BYTE_FOR_WRITE, addr, rv, 0 );
4.138 - return rv;
4.139 -except:
4.140 - log_mem_op( READ_BYTE_FOR_WRITE, addr, rv, 1 );
4.141 - SH4_EXCEPTION_EXIT();
4.142 -}
4.143 -
4.144 -static FASTCALL void log_write_long( sh4addr_t addr, uint32_t val, void *exc )
4.145 -{
4.146 - INIT_EXCEPTIONS(except);
4.147 - ((mem_write_exc_fn_t)real_address_space[addr>>12]->write_long)(addr, val, &&except);
4.148 - if( !IS_STORE_QUEUE(addr) )
4.149 - log_mem_op( WRITE_LONG, addr, val, 0 );
4.150 - return;
4.151 -except:
4.152 - if( !IS_STORE_QUEUE(addr) )
4.153 - log_mem_op( WRITE_LONG, addr, val, 1 );
4.154 - SH4_EXCEPTION_EXIT();
4.155 -}
4.156 -
4.157 -static FASTCALL void log_write_word( sh4addr_t addr, uint32_t val, void *exc )
4.158 -{
4.159 - INIT_EXCEPTIONS(except);
4.160 - ((mem_write_exc_fn_t)real_address_space[addr>>12]->write_word)(addr, val, &&except);
4.161 - if( !IS_STORE_QUEUE(addr) )
4.162 - log_mem_op( WRITE_WORD, addr, val, 0 );
4.163 - return;
4.164 -except:
4.165 - if( !IS_STORE_QUEUE(addr) )
4.166 - log_mem_op( WRITE_WORD, addr, val, 1 );
4.167 - SH4_EXCEPTION_EXIT();
4.168 -}
4.169 -
4.170 -static FASTCALL void log_write_byte( sh4addr_t addr, uint32_t val, void *exc )
4.171 -{
4.172 - INIT_EXCEPTIONS(except);
4.173 - ((mem_write_exc_fn_t)real_address_space[addr>>12]->write_byte)(addr, val, &&except);
4.174 - if( !IS_STORE_QUEUE(addr) )
4.175 - log_mem_op( WRITE_BYTE, addr, val, 0 );
4.176 - return;
4.177 -except:
4.178 - if( !IS_STORE_QUEUE(addr) )
4.179 - log_mem_op( WRITE_BYTE, addr, val, 1 );
4.180 - SH4_EXCEPTION_EXIT();
4.181 -}
4.182 -
4.183 -static FASTCALL void log_prefetch( sh4addr_t addr, void *exc )
4.184 -{
4.185 - INIT_EXCEPTIONS(except);
4.186 - ((mem_prefetch_exc_fn_t)real_address_space[addr>>12]->prefetch)(addr, &&except);
4.187 - log_mem_op( PREFETCH, addr, 0, 0 );
4.188 - return;
4.189 -except:
4.190 - log_mem_op( PREFETCH, addr, 0, 1 );
4.191 - SH4_EXCEPTION_EXIT();
4.192 -}
4.193 -
4.194 -static FASTCALL int32_t check_read_long( sh4addr_t addr, void *exc )
4.195 -{
4.196 - int except;
4.197 - int32_t value = check_mem_op( READ_LONG, addr, 0, &except );
4.198 - if( except ) {
4.199 - SH4_EXCEPTION_EXIT();
4.200 - }
4.201 - return value;
4.202 -}
4.203 -
4.204 -static FASTCALL int32_t check_read_word( sh4addr_t addr, void *exc )
4.205 -{
4.206 - int except;
4.207 - int32_t value = check_mem_op( READ_WORD, addr, 0, &except );
4.208 - if( except ) {
4.209 - SH4_EXCEPTION_EXIT();
4.210 - }
4.211 - return value;
4.212 -}
4.213 -
4.214 -static FASTCALL int32_t check_read_byte( sh4addr_t addr, void *exc )
4.215 -{
4.216 - int except;
4.217 - int32_t value = check_mem_op( READ_BYTE, addr, 0, &except );
4.218 - if( except ) {
4.219 - SH4_EXCEPTION_EXIT();
4.220 - }
4.221 - return value;
4.222 -}
4.223 -
4.224 -static FASTCALL int32_t check_read_byte_for_write( sh4addr_t addr, void *exc )
4.225 -{
4.226 - int except;
4.227 - int32_t value = check_mem_op( READ_BYTE_FOR_WRITE, addr, 0, &except );
4.228 - if( except ) {
4.229 - SH4_EXCEPTION_EXIT();
4.230 - }
4.231 - return value;
4.232 -}
4.233 -
4.234 -static FASTCALL void check_write_long( sh4addr_t addr, uint32_t value, void *exc )
4.235 -{
4.236 - if( !IS_STORE_QUEUE(addr) ) {
4.237 - int except;
4.238 - check_mem_op( WRITE_LONG, addr, value, &except );
4.239 - if( except ) {
4.240 - SH4_EXCEPTION_EXIT();
4.241 - }
4.242 + if( shadow_address_mode == SHADOW_LOG ) {
4.243 + int32_t rv = real_region(addr)->read_long(addr);
4.244 + log_mem_op( READ_LONG, addr, rv );
4.245 + return rv;
4.246 } else {
4.247 - real_address_space[addr>>12]->write_long(addr, value);
4.248 + return check_mem_op( READ_LONG, addr, 0 );
4.249 }
4.250 }
4.251
4.252 -static FASTCALL void check_write_word( sh4addr_t addr, uint32_t value, void *exc )
4.253 +static FASTCALL int32_t shadow_read_word( sh4addr_t addr )
4.254 {
4.255 - if( !IS_STORE_QUEUE(addr) ) {
4.256 - int except;
4.257 - check_mem_op( WRITE_WORD, addr, value, &except );
4.258 - if( except ) {
4.259 - SH4_EXCEPTION_EXIT();
4.260 - }
4.261 + if( shadow_address_mode == SHADOW_LOG ) {
4.262 + int32_t rv = real_region(addr)->read_word(addr);
4.263 + log_mem_op( READ_WORD, addr, rv );
4.264 + return rv;
4.265 } else {
4.266 - real_address_space[addr>>12]->write_word(addr, value);
4.267 + return check_mem_op( READ_WORD, addr, 0 );
4.268 }
4.269 }
4.270
4.271 -static FASTCALL void check_write_byte( sh4addr_t addr, uint32_t value, void *exc )
4.272 +static FASTCALL int32_t shadow_read_byte( sh4addr_t addr )
4.273 {
4.274 - if( !IS_STORE_QUEUE(addr) ){
4.275 - int except;
4.276 - check_mem_op( WRITE_BYTE, addr, value, &except );
4.277 - if( except ) {
4.278 - SH4_EXCEPTION_EXIT();
4.279 - }
4.280 + if( shadow_address_mode == SHADOW_LOG ) {
4.281 + int32_t rv = real_region(addr)->read_byte(addr);
4.282 + log_mem_op( READ_BYTE, addr, rv );
4.283 + return rv;
4.284 } else {
4.285 - real_address_space[addr>>12]->write_byte(addr, value);
4.286 + return check_mem_op( READ_BYTE, addr, 0 );
4.287 }
4.288 }
4.289
4.290 -static FASTCALL void check_prefetch( sh4addr_t addr, void *exc )
4.291 +static FASTCALL int32_t shadow_read_byte_for_write( sh4addr_t addr )
4.292 {
4.293 - int except;
4.294 - check_mem_op( PREFETCH, addr, 0, &except );
4.295 - if( except ) {
4.296 - SH4_EXCEPTION_EXIT();
4.297 + if( shadow_address_mode == SHADOW_LOG ) {
4.298 + int32_t rv = real_region(addr)->read_byte_for_write(addr);
4.299 + log_mem_op( READ_BYTE_FOR_WRITE, addr, rv );
4.300 + return rv;
4.301 + } else {
4.302 + return check_mem_op( READ_BYTE_FOR_WRITE, addr, 0 );
4.303 }
4.304 }
4.305
4.306 -struct mem_region_fn log_fns = {
4.307 - (mem_read_fn_t)log_read_long, (mem_write_fn_t)log_write_long,
4.308 - (mem_read_fn_t)log_read_word, (mem_write_fn_t)log_write_word,
4.309 - (mem_read_fn_t)log_read_byte, (mem_write_fn_t)log_write_byte,
4.310 - NULL, NULL, (mem_prefetch_fn_t)log_prefetch, (mem_read_fn_t)log_read_byte_for_write };
4.311 +static FASTCALL void shadow_write_long( sh4addr_t addr, uint32_t val )
4.312 +{
4.313 + if( shadow_address_mode == SHADOW_LOG ) {
4.314 + real_region(addr)->write_long(addr, val);
4.315 + if( !IS_STORE_QUEUE(addr) )
4.316 + log_mem_op( WRITE_LONG, addr, val );
4.317 + } else {
4.318 + if( !IS_STORE_QUEUE(addr) ) {
4.319 + check_mem_op( WRITE_LONG, addr, val );
4.320 + } else {
4.321 + real_region(addr)->write_long(addr, val);
4.322 + }
4.323 + }
4.324 +}
4.325
4.326 -struct mem_region_fn check_fns = {
4.327 - (mem_read_fn_t)check_read_long, (mem_write_fn_t)check_write_long,
4.328 - (mem_read_fn_t)check_read_word, (mem_write_fn_t)check_write_word,
4.329 - (mem_read_fn_t)check_read_byte, (mem_write_fn_t)check_write_byte,
4.330 - NULL, NULL, (mem_prefetch_fn_t)check_prefetch, (mem_read_fn_t)check_read_byte_for_write };
4.331 +static FASTCALL void shadow_write_word( sh4addr_t addr, uint32_t val )
4.332 +{
4.333 + if( shadow_address_mode == SHADOW_LOG ) {
4.334 + real_region(addr)->write_word(addr, val);
4.335 + if( !IS_STORE_QUEUE(addr) )
4.336 + log_mem_op( WRITE_WORD, addr, val );
4.337 + } else {
4.338 + if( !IS_STORE_QUEUE(addr) ) {
4.339 + check_mem_op( WRITE_WORD, addr, val );
4.340 + } else {
4.341 + real_region(addr)->write_word(addr, val);
4.342 + }
4.343 + }
4.344 +}
4.345
4.346 +static FASTCALL void shadow_write_byte( sh4addr_t addr, uint32_t val )
4.347 +{
4.348 + if( shadow_address_mode == SHADOW_LOG ) {
4.349 + real_region(addr)->write_byte(addr, val);
4.350 + if( !IS_STORE_QUEUE(addr) )
4.351 + log_mem_op( WRITE_BYTE, addr, val );
4.352 + } else {
4.353 + if( !IS_STORE_QUEUE(addr) ) {
4.354 + check_mem_op( WRITE_BYTE, addr, val );
4.355 + } else {
4.356 + real_region(addr)->write_byte(addr, val);
4.357 + }
4.358 + }
4.359 +}
4.360
4.361 -
4.362 +static FASTCALL void shadow_prefetch( sh4addr_t addr )
4.363 +{
4.364 + if( shadow_address_mode == SHADOW_LOG ) {
4.365 + real_region(addr)->prefetch(addr);
4.366 + log_mem_op( PREFETCH, addr, 0 );
4.367 + } else {
4.368 + check_mem_op( PREFETCH, addr, 0 );
4.369 + }
4.370 +}
4.371 +struct mem_region_fn shadow_fns = {
4.372 + shadow_read_long, shadow_write_long,
4.373 + shadow_read_word, shadow_write_word,
4.374 + shadow_read_byte, shadow_write_byte,
4.375 + NULL, NULL, shadow_prefetch, shadow_read_byte_for_write };
4.376
4.377 void sh4_shadow_block_begin()
4.378 {
4.379 @@ -412,7 +312,7 @@
4.380 memcpy( &temp_sh4r, &sh4r, sizeof(struct sh4_registers) );
4.381 memcpy( &sh4r, &shadow_sh4r, sizeof(struct sh4_registers) );
4.382
4.383 - sh4_address_space = check_address_space;
4.384 + shadow_address_mode = SHADOW_CHECK;
4.385 mem_check_posn = 0;
4.386 sh4r.new_pc = sh4r.pc + 2;
4.387 while( sh4r.slice_cycle < temp_sh4r.slice_cycle ) {
4.388 @@ -434,18 +334,17 @@
4.389 }
4.390 abort();
4.391 }
4.392 - sh4_address_space = real_address_space;
4.393 + shadow_address_mode = SHADOW_LOG;
4.394 }
4.395
4.396
4.397 void sh4_shadow_init()
4.398 {
4.399 - real_address_space = sh4_address_space;
4.400 - log_address_space = mem_alloc_pages( sizeof(mem_region_fn_t) * 256 );
4.401 - check_address_space = mem_alloc_pages( sizeof(mem_region_fn_t) * 256 );
4.402 - for( unsigned i=0; i < (256 * 4096); i++ ) {
4.403 - log_address_space[i] = &log_fns;
4.404 - check_address_space[i] = &check_fns;
4.405 + shadow_address_mode = SHADOW_LOG;
4.406 + p4_address_space = mem_alloc_pages( sizeof(mem_region_fn_t) * 32 );
4.407 + shadow_address_space = mem_alloc_pages( sizeof(mem_region_fn_t) * 32 );
4.408 + for( unsigned i=0; i < (32 * 4096); i++ ) {
4.409 + shadow_address_space[i] = &shadow_fns;
4.410 }
4.411
4.412 mem_log_size = MEM_LOG_SIZE;
4.413 @@ -454,6 +353,10 @@
4.414
4.415 sh4_translate_set_callbacks( sh4_shadow_block_begin, sh4_shadow_block_end );
4.416 sh4_translate_set_fastmem( FALSE );
4.417 - sh4_translate_set_address_space( log_address_space, log_address_space );
4.418 + memcpy( p4_address_space, sh4_address_space + (0xE0000000>>LXDREAM_PAGE_BITS),
4.419 + sizeof(mem_region_fn_t) * (0x20000000>>LXDREAM_PAGE_BITS) );
4.420 + memcpy( sh4_address_space + (0xE0000000>>LXDREAM_PAGE_BITS), shadow_address_space,
4.421 + sizeof(mem_region_fn_t) * (0x20000000>>LXDREAM_PAGE_BITS) );
4.422 + mmu_set_ext_address_space(shadow_address_space);
4.423 }
4.424
.