Search
lxdream.org :: lxdream :: r946:d41ee7994db7
lxdream 0.9.1
released Jun 29
Download Now
changeset946:d41ee7994db7 lxdream-mem
parent945:787729653236
child947:aa80962d6439
authornkeynes
dateTue Jan 06 01:58:08 2009 +0000 (15 years ago)
branchlxdream-mem
Fully integrate SQ with the new address space code - added additional 'prefetch'
memory accessor. TLB is utterly untested, but non-TLB at least still works.
src/mem.c
src/mem.h
src/mmio.h
src/sh4/cache.c
src/sh4/mmu.c
src/sh4/mmu.h
src/sh4/mmux86.c
src/sh4/sh4core.h
src/sh4/sh4core.in
src/sh4/sh4x86.in
1.1 --- a/src/mem.c Mon Jan 05 04:19:46 2009 +0000
1.2 +++ b/src/mem.c Tue Jan 06 01:58:08 2009 +0000
1.3 @@ -78,11 +78,17 @@
1.4 {
1.5 }
1.6
1.7 +void FASTCALL unmapped_prefetch( sh4addr_t addr )
1.8 +{
1.9 + /* No effect */
1.10 +}
1.11 +
1.12 struct mem_region_fn mem_region_unmapped = {
1.13 unmapped_read_long, unmapped_write_long,
1.14 unmapped_read_long, unmapped_write_long,
1.15 unmapped_read_long, unmapped_write_long,
1.16 - unmapped_read_burst, unmapped_write_burst };
1.17 + unmapped_read_burst, unmapped_write_burst,
1.18 + unmapped_prefetch };
1.19
1.20 void *mem_alloc_pages( int n )
1.21 {
1.22 @@ -319,6 +325,7 @@
1.23 mem_rgn[num_mem_rgns].name = name;
1.24 mem_rgn[num_mem_rgns].mem = mem;
1.25 mem_rgn[num_mem_rgns].fn = fn;
1.26 + fn->prefetch = unmapped_prefetch;
1.27 num_mem_rgns++;
1.28
1.29 do {
2.1 --- a/src/mem.h Mon Jan 05 04:19:46 2009 +0000
2.2 +++ b/src/mem.h Tue Jan 06 01:58:08 2009 +0000
2.3 @@ -33,31 +33,38 @@
2.4 typedef FASTCALL void (*mem_write_fn_t)(sh4addr_t, uint32_t);
2.5 typedef FASTCALL void (*mem_read_burst_fn_t)(unsigned char *,sh4addr_t);
2.6 typedef FASTCALL void (*mem_write_burst_fn_t)(sh4addr_t,unsigned char *);
2.7 +typedef FASTCALL void (*mem_prefetch_fn_t)(sh4addr_t);
2.8
2.9 typedef FASTCALL int32_t (*mem_read_exc_fn_t)(sh4addr_t, void *);
2.10 typedef FASTCALL void (*mem_write_exc_fn_t)(sh4addr_t, uint32_t, void *);
2.11 typedef FASTCALL void (*mem_read_burst_exc_fn_t)(unsigned char *,sh4addr_t, void *);
2.12 typedef FASTCALL void (*mem_write_burst_exc_fn_t)(sh4addr_t,unsigned char *, void *);
2.13 +typedef FASTCALL void (*mem_prefetch_exc_fn_t)(sh4addr_t, void *);
2.14
2.15 /**
2.16 * Basic memory region vtable - read/write at byte, word, long, and burst
2.17 * (32-byte) sizes.
2.18 */
2.19 typedef struct mem_region_fn {
2.20 - FASTCALL int32_t (*read_long)(sh4addr_t addr);
2.21 - FASTCALL void (*write_long)(sh4addr_t addr, uint32_t val);
2.22 - FASTCALL int32_t (*read_word)(sh4addr_t addr);
2.23 - FASTCALL void (*write_word)(sh4addr_t addr, uint32_t val);
2.24 - FASTCALL int32_t (*read_byte)(sh4addr_t addr);
2.25 - FASTCALL void (*write_byte)(sh4addr_t addr, uint32_t val);
2.26 - FASTCALL void (*read_burst)(unsigned char *dest, sh4addr_t addr);
2.27 - FASTCALL void (*write_burst)(sh4addr_t addr, unsigned char *src);
2.28 + mem_read_fn_t read_long;
2.29 + mem_write_fn_t write_long;
2.30 + mem_read_fn_t read_word;
2.31 + mem_write_fn_t write_word;
2.32 + mem_read_fn_t read_byte;
2.33 + mem_write_fn_t write_byte;
2.34 + mem_read_burst_fn_t read_burst;
2.35 + mem_write_burst_fn_t write_burst;
2.36 + /* Prefetch is provided as a convenience for the SH4 - external memory
2.37 + * spaces are automatically forced to unmapped_prefetch by mem.c
2.38 + */
2.39 + mem_prefetch_fn_t prefetch;
2.40 } *mem_region_fn_t;
2.41
2.42 int32_t FASTCALL unmapped_read_long( sh4addr_t addr );
2.43 void FASTCALL unmapped_write_long( sh4addr_t addr, uint32_t val );
2.44 void FASTCALL unmapped_read_burst( unsigned char *dest, sh4addr_t addr );
2.45 void FASTCALL unmapped_write_burst( sh4addr_t addr, unsigned char *src );
2.46 +void FASTCALL unmapped_prefetch( sh4addr_t addr );
2.47 extern struct mem_region_fn mem_region_unmapped;
2.48
2.49 typedef struct mem_region {
3.1 --- a/src/mmio.h Mon Jan 05 04:19:46 2009 +0000
3.2 +++ b/src/mmio.h Tue Jan 06 01:58:08 2009 +0000
3.3 @@ -112,7 +112,7 @@
3.4 #undef MMIO_REGION_LIST_BEGIN
3.5 #undef MMIO_REGION
3.6 #undef MMIO_REGION_LIST_END
3.7 -#define MMIO_REGION_BEGIN(b,id,d) struct mmio_region mmio_region_##id = { #id, d, b, {mmio_region_##id##_read, mmio_region_##id##_write,mmio_region_##id##_read, mmio_region_##id##_write,mmio_region_##id##_read, mmio_region_##id##_write,NULL, NULL}, 0, 0, {
3.8 +#define MMIO_REGION_BEGIN(b,id,d) struct mmio_region mmio_region_##id = { #id, d, b, {mmio_region_##id##_read, mmio_region_##id##_write,mmio_region_##id##_read, mmio_region_##id##_write,mmio_region_##id##_read, mmio_region_##id##_write,NULL, NULL, unmapped_prefetch}, 0, 0, {
3.9 #define LONG_PORT( o,id,f,def,d ) { #id, d, 32, o, def, f },
3.10 #define WORD_PORT( o,id,f,def,d ) { #id, d, 16, o, def, f },
3.11 #define BYTE_PORT( o,id,f,def,d ) { #id, d, 8, o, def, f },
4.1 --- a/src/sh4/cache.c Mon Jan 05 04:19:46 2009 +0000
4.2 +++ b/src/sh4/cache.c Tue Jan 06 01:58:08 2009 +0000
4.3 @@ -24,6 +24,7 @@
4.4 #include "sh4/sh4core.h"
4.5 #include "sh4/sh4mmio.h"
4.6 #include "sh4/xltcache.h"
4.7 +#include "sh4/mmu.h"
4.8
4.9 #define OCRAM_START (0x7C000000>>LXDREAM_PAGE_BITS)
4.10 #define OCRAM_MID (0x7E000000>>LXDREAM_PAGE_BITS)
4.11 @@ -79,7 +80,6 @@
4.12 return 0;
4.13 }
4.14
4.15 -
4.16 /************************* OCRAM memory address space ************************/
4.17
4.18 #define OCRAMPAGE0 (&ccn_ocache_data[4096]) /* Lines 128-255 */
4.19 @@ -122,7 +122,8 @@
4.20 ocram_page0_read_long, ocram_page0_write_long,
4.21 ocram_page0_read_word, ocram_page0_write_word,
4.22 ocram_page0_read_byte, ocram_page0_write_byte,
4.23 - ocram_page0_read_burst, ocram_page0_write_burst };
4.24 + ocram_page0_read_burst, ocram_page0_write_burst,
4.25 + unmapped_prefetch };
4.26
4.27 static int32_t FASTCALL ocram_page1_read_long( sh4addr_t addr )
4.28 {
4.29 @@ -161,7 +162,8 @@
4.30 ocram_page1_read_long, ocram_page1_write_long,
4.31 ocram_page1_read_word, ocram_page1_write_word,
4.32 ocram_page1_read_byte, ocram_page1_write_byte,
4.33 - ocram_page1_read_burst, ocram_page1_write_burst };
4.34 + ocram_page1_read_burst, ocram_page1_write_burst,
4.35 + unmapped_prefetch };
4.36
4.37 /************************** Cache direct access ******************************/
4.38
4.39 @@ -187,7 +189,8 @@
4.40 ccn_icache_addr_read, ccn_icache_addr_write,
4.41 unmapped_read_long, unmapped_write_long,
4.42 unmapped_read_long, unmapped_write_long,
4.43 - unmapped_read_burst, unmapped_write_burst };
4.44 + unmapped_read_burst, unmapped_write_burst,
4.45 + unmapped_prefetch };
4.46
4.47
4.48 static int32_t ccn_icache_data_read( sh4addr_t addr )
4.49 @@ -206,7 +209,8 @@
4.50 ccn_icache_data_read, ccn_icache_data_write,
4.51 unmapped_read_long, unmapped_write_long,
4.52 unmapped_read_long, unmapped_write_long,
4.53 - unmapped_read_burst, unmapped_write_burst };
4.54 + unmapped_read_burst, unmapped_write_burst,
4.55 + unmapped_prefetch };
4.56
4.57
4.58 static int32_t ccn_ocache_addr_read( sh4addr_t addr )
4.59 @@ -235,7 +239,8 @@
4.60 ccn_ocache_addr_read, ccn_ocache_addr_write,
4.61 unmapped_read_long, unmapped_write_long,
4.62 unmapped_read_long, unmapped_write_long,
4.63 - unmapped_read_burst, unmapped_write_burst };
4.64 + unmapped_read_burst, unmapped_write_burst,
4.65 + unmapped_prefetch };
4.66
4.67
4.68 static int32_t ccn_ocache_data_read( sh4addr_t addr )
4.69 @@ -254,7 +259,8 @@
4.70 ccn_ocache_data_read, ccn_ocache_data_write,
4.71 unmapped_read_long, unmapped_write_long,
4.72 unmapped_read_long, unmapped_write_long,
4.73 - unmapped_read_burst, unmapped_write_burst };
4.74 + unmapped_read_burst, unmapped_write_burst,
4.75 + unmapped_prefetch };
4.76
4.77
4.78 /****************** Cache control *********************/
4.79 @@ -297,19 +303,58 @@
4.80 }
4.81 }
4.82
4.83 +/**
4.84 + * Prefetch for non-storequeue regions
4.85 + */
4.86 +void FASTCALL ccn_prefetch( sh4addr_t addr )
4.87 +{
4.88 +
4.89 +}
4.90
4.91 -/***** Store-queue (considered part of the cache by the SH7750 manual) ******/
4.92 -static void FASTCALL p4_storequeue_write_long( sh4addr_t addr, uint32_t val )
4.93 +/**
4.94 + * Prefetch for non-cached regions. Oddly enough, this does nothing whatsoever.
4.95 + */
4.96 +void FASTCALL ccn_uncached_prefetch( sh4addr_t addr )
4.97 +{
4.98 +
4.99 +}
4.100 +/********************************* Store-queue *******************************/
4.101 +/*
4.102 + * The storequeue is strictly speaking part of the cache, but most of
4.103 + * the complexity is actually around its addressing (ie in the MMU). The
4.104 + * methods here can assume we've already passed SQMD protection and the TLB
4.105 + * lookups (where appropriate).
4.106 + */
4.107 +void FASTCALL ccn_storequeue_write_long( sh4addr_t addr, uint32_t val )
4.108 {
4.109 sh4r.store_queue[(addr>>2)&0xF] = val;
4.110 }
4.111 -static int32_t FASTCALL p4_storequeue_read_long( sh4addr_t addr )
4.112 +int32_t FASTCALL ccn_storequeue_read_long( sh4addr_t addr )
4.113 {
4.114 return sh4r.store_queue[(addr>>2)&0xF];
4.115 }
4.116
4.117 -struct mem_region_fn p4_region_storequeue = {
4.118 - p4_storequeue_read_long, p4_storequeue_write_long,
4.119 - p4_storequeue_read_long, p4_storequeue_write_long,
4.120 - p4_storequeue_read_long, p4_storequeue_write_long,
4.121 - unmapped_read_burst, unmapped_write_burst }; // No burst access.
4.122 +/**
4.123 + * Variant used when tlb is disabled - address will be the original prefetch
4.124 + * address (ie 0xE0001234). Due to the way the SQ addressing is done, it can't
4.125 + * be hardcoded on 4K page boundaries, so we manually decode it here.
4.126 + */
4.127 +void FASTCALL ccn_storequeue_prefetch( sh4addr_t addr )
4.128 +{
4.129 + int queue = (addr&0x20)>>2;
4.130 + sh4ptr_t src = (sh4ptr_t)&sh4r.store_queue[queue];
4.131 + uint32_t hi = MMIO_READ( MMU, QACR0 + (queue>>1)) << 24;
4.132 + sh4addr_t target = (addr&0x03FFFFE0) | hi;
4.133 + ext_address_space[target>>12]->write_burst( target, src );
4.134 +}
4.135 +
4.136 +/**
4.137 + * Variant used when tlb is enabled - address in this case is already
4.138 + * mapped to the external target address.
4.139 + */
4.140 +void FASTCALL ccn_storequeue_prefetch_tlb( sh4addr_t addr )
4.141 +{
4.142 + int queue = (addr&0x20)>>2;
4.143 + sh4ptr_t src = (sh4ptr_t)&sh4r.store_queue[queue];
4.144 + ext_address_space[addr>>12]->write_burst( (addr & 0x1FFFFFE0), src );
4.145 +}
5.1 --- a/src/sh4/mmu.c Mon Jan 05 04:19:46 2009 +0000
5.2 +++ b/src/sh4/mmu.c Tue Jan 06 01:58:08 2009 +0000
5.3 @@ -47,10 +47,6 @@
5.4 mem_region_fn_t *sh4_address_space;
5.5 mem_region_fn_t *sh4_user_address_space;
5.6
5.7 -/* MMU-mapped storequeue targets. Only used with TLB on */
5.8 -mem_region_fn_t *storequeue_address_space;
5.9 -mem_region_fn_t *storequeue_user_address_space;
5.10 -
5.11 /* Accessed from the UTLB accessor methods */
5.12 uint32_t mmu_urc;
5.13 uint32_t mmu_urb;
5.14 @@ -61,6 +57,7 @@
5.15 static struct utlb_page_entry mmu_utlb_pages[UTLB_ENTRY_COUNT];
5.16 static uint32_t mmu_lrui;
5.17 static uint32_t mmu_asid; // current asid
5.18 +static struct utlb_default_regions *mmu_user_storequeue_regions;
5.19
5.20 /* Structures for 1K page handling */
5.21 static struct utlb_1k_entry mmu_utlb_1k_pages[UTLB_ENTRY_COUNT];
5.22 @@ -77,7 +74,7 @@
5.23 static void mmu_register_user_mem_region( uint32_t start, uint32_t end, mem_region_fn_t fn );
5.24 static void mmu_set_tlb_enabled( int tlb_on );
5.25 static void mmu_set_tlb_asid( uint32_t asid );
5.26 -static void mmu_set_storequeue_protected( int protected );
5.27 +static void mmu_set_storequeue_protected( int protected, int tlb_on );
5.28 static gboolean mmu_utlb_map_pages( mem_region_fn_t priv_page, mem_region_fn_t user_page, sh4addr_t start_addr, int npages );
5.29 static void mmu_utlb_remap_pages( gboolean remap_priv, gboolean remap_user, int entryNo );
5.30 static gboolean mmu_utlb_unmap_pages( gboolean unmap_priv, gboolean unmap_user, sh4addr_t start_addr, int npages );
5.31 @@ -86,12 +83,23 @@
5.32 static struct utlb_1k_entry *mmu_utlb_1k_alloc();
5.33 static void mmu_utlb_1k_free( struct utlb_1k_entry *entry );
5.34
5.35 +static void FASTCALL tlb_miss_read( sh4addr_t addr, void *exc );
5.36 static int32_t FASTCALL tlb_protected_read( sh4addr_t addr, void *exc );
5.37 static void FASTCALL tlb_protected_write( sh4addr_t addr, uint32_t val, void *exc );
5.38 static void FASTCALL tlb_initial_write( sh4addr_t addr, uint32_t val, void *exc );
5.39 static uint32_t get_tlb_size_mask( uint32_t flags );
5.40 static uint32_t get_tlb_size_pages( uint32_t flags );
5.41
5.42 +#define DEFAULT_REGIONS 0
5.43 +#define DEFAULT_STOREQUEUE_REGIONS 1
5.44 +#define DEFAULT_STOREQUEUE_SQMD_REGIONS 2
5.45 +
5.46 +static struct utlb_default_regions mmu_default_regions[3] = {
5.47 + { &mem_region_tlb_miss, &mem_region_tlb_protected, &mem_region_tlb_multihit },
5.48 + { &p4_region_storequeue_miss, &p4_region_storequeue_protected, &p4_region_storequeue_multihit },
5.49 + { &p4_region_storequeue_sqmd_miss, &p4_region_storequeue_sqmd_protected, &p4_region_storequeue_sqmd_multihit } };
5.50 +
5.51 +#define IS_STOREQUEUE_PROTECTED() (mmu_user_storequeue_regions == &mmu_default_regions[DEFAULT_STOREQUEUE_SQMD_REGIONS])
5.52
5.53 /*********************** Module public functions ****************************/
5.54
5.55 @@ -104,12 +112,11 @@
5.56 {
5.57 sh4_address_space = mem_alloc_pages( sizeof(mem_region_fn_t) * 256 );
5.58 sh4_user_address_space = mem_alloc_pages( sizeof(mem_region_fn_t) * 256 );
5.59 - storequeue_address_space = mem_alloc_pages( sizeof(mem_region_fn_t) * 4 );
5.60 - storequeue_user_address_space = mem_alloc_pages( sizeof(mem_region_fn_t) * 4 );
5.61 + mmu_user_storequeue_regions = &mmu_default_regions[DEFAULT_STOREQUEUE_REGIONS];
5.62
5.63 mmu_set_tlb_enabled(0);
5.64 mmu_register_user_mem_region( 0x80000000, 0x00000000, &mem_region_address_error );
5.65 - mmu_register_user_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue);
5.66 + mmu_register_user_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );
5.67
5.68 /* Setup P4 tlb/cache access regions */
5.69 mmu_register_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );
5.70 @@ -186,7 +193,7 @@
5.71
5.72 uint32_t mmucr = MMIO_READ(MMU,MMUCR);
5.73 mmu_set_tlb_enabled(mmucr&MMUCR_AT);
5.74 - mmu_set_storequeue_protected(mmucr&MMUCR_SQMD);
5.75 + mmu_set_storequeue_protected(mmucr&MMUCR_SQMD, mmucr&MMUCR_AT);
5.76 return 0;
5.77 }
5.78
5.79 @@ -262,7 +269,7 @@
5.80 val &= 0x00000301;
5.81 tmp = MMIO_READ( MMU, MMUCR );
5.82 if( (val ^ tmp) & (MMUCR_SQMD) ) {
5.83 - mmu_set_storequeue_protected( val & MMUCR_SQMD );
5.84 + mmu_set_storequeue_protected( val & MMUCR_SQMD, val&MMUCR_AT );
5.85 }
5.86 if( (val ^ tmp) & (MMUCR_AT) ) {
5.87 // AT flag has changed state - flush the xlt cache as all bets
5.88 @@ -387,15 +394,16 @@
5.89 mem_region_fn_t *ptr, *uptr;
5.90 int i;
5.91
5.92 + /* Reset the storequeue area */
5.93 +
5.94 if( tlb_on ) {
5.95 mmu_register_mem_region(0x00000000, 0x80000000, &mem_region_tlb_miss );
5.96 mmu_register_mem_region(0xC0000000, 0xE0000000, &mem_region_tlb_miss );
5.97 mmu_register_user_mem_region(0x00000000, 0x80000000, &mem_region_tlb_miss );
5.98 - for( i=0, ptr = storequeue_address_space, uptr = storequeue_user_address_space;
5.99 - i<0x04000000; i+= LXDREAM_PAGE_SIZE ) {
5.100 - *ptr++ = &mem_region_tlb_miss;
5.101 - *uptr++ = &mem_region_tlb_miss;
5.102 - }
5.103 +
5.104 + /* Default SQ prefetch goes to TLB miss (?) */
5.105 + mmu_register_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue_miss );
5.106 + mmu_register_user_mem_region( 0xE0000000, 0xE4000000, mmu_user_storequeue_regions->tlb_miss );
5.107 mmu_utlb_register_all();
5.108 } else {
5.109 for( i=0, ptr = sh4_address_space; i<7; i++, ptr += LXDREAM_PAGE_TABLE_ENTRIES ) {
5.110 @@ -404,16 +412,45 @@
5.111 for( i=0, ptr = sh4_user_address_space; i<4; i++, ptr += LXDREAM_PAGE_TABLE_ENTRIES ) {
5.112 memcpy( ptr, ext_address_space, sizeof(mem_region_fn_t) * LXDREAM_PAGE_TABLE_ENTRIES );
5.113 }
5.114 +
5.115 + mmu_register_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );
5.116 + if( IS_STOREQUEUE_PROTECTED() ) {
5.117 + mmu_register_user_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue_sqmd );
5.118 + } else {
5.119 + mmu_register_user_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );
5.120 + }
5.121 }
5.122 +
5.123 }
5.124
5.125 -static void mmu_set_storequeue_protected( int protected )
5.126 +/**
5.127 + * Flip the SQMD switch - this is rather expensive, so will need to be changed if
5.128 + * anything expects to do this frequently.
5.129 + */
5.130 +static void mmu_set_storequeue_protected( int protected, int tlb_on )
5.131 {
5.132 + mem_region_fn_t nontlb_region;
5.133 + int i;
5.134 +
5.135 if( protected ) {
5.136 - mmu_register_user_mem_region( 0xE0000000, 0xE4000000, &mem_region_address_error );
5.137 + mmu_user_storequeue_regions = &mmu_default_regions[DEFAULT_STOREQUEUE_SQMD_REGIONS];
5.138 + nontlb_region = &p4_region_storequeue_sqmd;
5.139 } else {
5.140 - mmu_register_user_mem_region( 0xE0000000, 0xE4000000, &p4_region_storequeue );
5.141 + mmu_user_storequeue_regions = &mmu_default_regions[DEFAULT_STOREQUEUE_REGIONS];
5.142 + nontlb_region = &p4_region_storequeue;
5.143 }
5.144 +
5.145 + if( tlb_on ) {
5.146 + mmu_register_user_mem_region( 0xE0000000, 0xE4000000, mmu_user_storequeue_regions->tlb_miss );
5.147 + for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
5.148 + if( (mmu_utlb[i].vpn & 0xFC000000) == 0xE0000000 ) {
5.149 + mmu_utlb_insert_entry(i);
5.150 + }
5.151 + }
5.152 + } else {
5.153 + mmu_register_user_mem_region( 0xE0000000, 0xE4000000, nontlb_region );
5.154 + }
5.155 +
5.156 }
5.157
5.158 static void mmu_set_tlb_asid( uint32_t asid )
5.159 @@ -488,13 +525,16 @@
5.160 {
5.161 mem_region_fn_t *ptr = &sh4_address_space[start_addr >> 12];
5.162 mem_region_fn_t *uptr = &sh4_user_address_space[start_addr >> 12];
5.163 + struct utlb_default_regions *privdefs = &mmu_default_regions[DEFAULT_REGIONS];
5.164 + struct utlb_default_regions *userdefs = privdefs;
5.165 +
5.166 gboolean mapping_ok = TRUE;
5.167 int i;
5.168
5.169 if( (start_addr & 0xFC000000) == 0xE0000000 ) {
5.170 /* Storequeue mapping */
5.171 - ptr = &storequeue_address_space[(start_addr-0xE0000000) >> 12];
5.172 - uptr = &storequeue_user_address_space[(start_addr-0xE0000000) >> 12];
5.173 + privdefs = &mmu_default_regions[DEFAULT_STOREQUEUE_REGIONS];
5.174 + userdefs = mmu_user_storequeue_regions;
5.175 } else if( (start_addr & 0xE0000000) == 0xC0000000 ) {
5.176 user_page = NULL; /* No user access to P3 region */
5.177 } else if( start_addr >= 0x80000000 ) {
5.178 @@ -518,58 +558,47 @@
5.179 }
5.180
5.181 if( priv_page != NULL ) {
5.182 - if( ent->subpages[idx] == &mem_region_tlb_miss ) {
5.183 + if( ent->subpages[idx] == privdefs->tlb_miss ) {
5.184 ent->subpages[idx] = priv_page;
5.185 } else {
5.186 mapping_ok = FALSE;
5.187 - ent->subpages[idx] = &mem_region_tlb_multihit;
5.188 + ent->subpages[idx] = privdefs->tlb_multihit;
5.189 }
5.190 }
5.191 if( user_page != NULL ) {
5.192 - if( ent->user_subpages[idx] == &mem_region_tlb_miss ) {
5.193 + if( ent->user_subpages[idx] == userdefs->tlb_miss ) {
5.194 ent->user_subpages[idx] = user_page;
5.195 } else {
5.196 mapping_ok = FALSE;
5.197 - ent->user_subpages[idx] = &mem_region_tlb_multihit;
5.198 + ent->user_subpages[idx] = userdefs->tlb_multihit;
5.199 }
5.200 }
5.201
5.202 } else {
5.203 if( priv_page != NULL ) {
5.204 - if( user_page != NULL ) {
5.205 - for( i=0; i<npages; i++ ) {
5.206 - if( *ptr == &mem_region_tlb_miss ) {
5.207 - *ptr++ = priv_page;
5.208 - *uptr++ = user_page;
5.209 - } else {
5.210 - mapping_ok = FALSE;
5.211 - *ptr++ = &mem_region_tlb_multihit;
5.212 - *uptr++ = &mem_region_tlb_multihit;
5.213 - }
5.214 - }
5.215 - } else {
5.216 - /* Privileged mapping only */
5.217 - for( i=0; i<npages; i++ ) {
5.218 - if( *ptr == &mem_region_tlb_miss ) {
5.219 - *ptr++ = priv_page;
5.220 - } else {
5.221 - mapping_ok = FALSE;
5.222 - *ptr++ = &mem_region_tlb_multihit;
5.223 - }
5.224 + /* Privileged mapping only */
5.225 + for( i=0; i<npages; i++ ) {
5.226 + if( *ptr == privdefs->tlb_miss ) {
5.227 + *ptr++ = priv_page;
5.228 + } else {
5.229 + mapping_ok = FALSE;
5.230 + *ptr++ = privdefs->tlb_multihit;
5.231 }
5.232 }
5.233 - } else if( user_page != NULL ) {
5.234 + }
5.235 + if( user_page != NULL ) {
5.236 /* User mapping only (eg ASID change remap w/ SV=1) */
5.237 for( i=0; i<npages; i++ ) {
5.238 - if( *uptr == &mem_region_tlb_miss ) {
5.239 + if( *uptr == userdefs->tlb_miss ) {
5.240 *uptr++ = user_page;
5.241 } else {
5.242 mapping_ok = FALSE;
5.243 - *uptr++ = &mem_region_tlb_multihit;
5.244 + *uptr++ = userdefs->tlb_multihit;
5.245 }
5.246 }
5.247 }
5.248 }
5.249 +
5.250 return mapping_ok;
5.251 }
5.252
5.253 @@ -621,13 +650,16 @@
5.254 {
5.255 mem_region_fn_t *ptr = &sh4_address_space[start_addr >> 12];
5.256 mem_region_fn_t *uptr = &sh4_user_address_space[start_addr >> 12];
5.257 + struct utlb_default_regions *privdefs = &mmu_default_regions[DEFAULT_REGIONS];
5.258 + struct utlb_default_regions *userdefs = privdefs;
5.259 +
5.260 gboolean unmapping_ok = TRUE;
5.261 int i;
5.262
5.263 if( (start_addr & 0xFC000000) == 0xE0000000 ) {
5.264 /* Storequeue mapping */
5.265 - ptr = &storequeue_address_space[(start_addr-0xE0000000) >> 12];
5.266 - uptr = &storequeue_user_address_space[(start_addr-0xE0000000) >> 12];
5.267 + privdefs = &mmu_default_regions[DEFAULT_STOREQUEUE_REGIONS];
5.268 + userdefs = mmu_user_storequeue_regions;
5.269 } else if( (start_addr & 0xE0000000) == 0xC0000000 ) {
5.270 unmap_user = FALSE;
5.271 } else if( start_addr >= 0x80000000 ) {
5.272 @@ -638,13 +670,13 @@
5.273 assert( IS_1K_PAGE_ENTRY( *ptr ) );
5.274 struct utlb_1k_entry *ent = (struct utlb_1k_entry *)*ptr;
5.275 int i, idx = (start_addr >> 10) & 0x03, mergeable=1;
5.276 - if( ent->subpages[idx] == &mem_region_tlb_multihit ) {
5.277 + if( ent->subpages[idx] == privdefs->tlb_multihit ) {
5.278 unmapping_ok = FALSE;
5.279 }
5.280 if( unmap_priv )
5.281 - ent->subpages[idx] = &mem_region_tlb_miss;
5.282 + ent->subpages[idx] = privdefs->tlb_miss;
5.283 if( unmap_user )
5.284 - ent->user_subpages[idx] = &mem_region_tlb_miss;
5.285 + ent->user_subpages[idx] = userdefs->tlb_miss;
5.286
5.287 /* If all 4 subpages have the same content, merge them together and
5.288 * release the 1K entry
5.289 @@ -664,30 +696,21 @@
5.290 }
5.291 } else {
5.292 if( unmap_priv ) {
5.293 - if( unmap_user ) {
5.294 - for( i=0; i<npages; i++ ) {
5.295 - if( *ptr == &mem_region_tlb_multihit ) {
5.296 - unmapping_ok = FALSE;
5.297 - }
5.298 - *ptr++ = &mem_region_tlb_miss;
5.299 - *uptr++ = &mem_region_tlb_miss;
5.300 - }
5.301 - } else {
5.302 - /* Privileged (un)mapping only */
5.303 - for( i=0; i<npages; i++ ) {
5.304 - if( *ptr == &mem_region_tlb_multihit ) {
5.305 - unmapping_ok = FALSE;
5.306 - }
5.307 - *ptr++ = &mem_region_tlb_miss;
5.308 - }
5.309 - }
5.310 - } else if( unmap_user ) {
5.311 - /* User (un)mapping only */
5.312 + /* Privileged (un)mapping */
5.313 for( i=0; i<npages; i++ ) {
5.314 - if( *uptr == &mem_region_tlb_multihit ) {
5.315 + if( *ptr == privdefs->tlb_multihit ) {
5.316 unmapping_ok = FALSE;
5.317 }
5.318 - *uptr++ = &mem_region_tlb_miss;
5.319 + *ptr++ = privdefs->tlb_miss;
5.320 + }
5.321 + }
5.322 + if( unmap_user ) {
5.323 + /* User (un)mapping */
5.324 + for( i=0; i<npages; i++ ) {
5.325 + if( *uptr == userdefs->tlb_multihit ) {
5.326 + unmapping_ok = FALSE;
5.327 + }
5.328 + *uptr++ = userdefs->tlb_miss;
5.329 }
5.330 }
5.331 }
5.332 @@ -703,28 +726,47 @@
5.333 sh4addr_t start_addr = ent->vpn & ent->mask;
5.334 int npages = get_tlb_size_pages(ent->flags);
5.335
5.336 - if( (ent->flags & TLB_USERMODE) == 0 ) {
5.337 - upage = &mem_region_user_protected;
5.338 - } else {
5.339 - upage = page;
5.340 + if( (start_addr & 0xFC000000) == 0xE0000000 ) {
5.341 + /* Store queue mappings are a bit different - normal access is fixed to
5.342 + * the store queue register block, and we only map prefetches through
5.343 + * the TLB
5.344 + */
5.345 + mmu_utlb_init_storequeue_vtable( ent, &mmu_utlb_pages[entry] );
5.346 +
5.347 + if( (ent->flags & TLB_USERMODE) == 0 ) {
5.348 + upage = mmu_user_storequeue_regions->tlb_prot;
5.349 + } else if( IS_STOREQUEUE_PROTECTED() ) {
5.350 + upage = &p4_region_storequeue_sqmd;
5.351 + } else {
5.352 + upage = page;
5.353 + }
5.354 +
5.355 + } else {
5.356 +
5.357 + if( (ent->flags & TLB_USERMODE) == 0 ) {
5.358 + upage = &mem_region_tlb_protected;
5.359 + } else {
5.360 + upage = page;
5.361 + }
5.362 +
5.363 + if( (ent->flags & TLB_WRITABLE) == 0 ) {
5.364 + page->write_long = (mem_write_fn_t)tlb_protected_write;
5.365 + page->write_word = (mem_write_fn_t)tlb_protected_write;
5.366 + page->write_byte = (mem_write_fn_t)tlb_protected_write;
5.367 + page->write_burst = (mem_write_burst_fn_t)tlb_protected_write;
5.368 + mmu_utlb_init_vtable( ent, &mmu_utlb_pages[entry], FALSE );
5.369 + } else if( (ent->flags & TLB_DIRTY) == 0 ) {
5.370 + page->write_long = (mem_write_fn_t)tlb_initial_write;
5.371 + page->write_word = (mem_write_fn_t)tlb_initial_write;
5.372 + page->write_byte = (mem_write_fn_t)tlb_initial_write;
5.373 + page->write_burst = (mem_write_burst_fn_t)tlb_initial_write;
5.374 + mmu_utlb_init_vtable( ent, &mmu_utlb_pages[entry], FALSE );
5.375 + } else {
5.376 + mmu_utlb_init_vtable( ent, &mmu_utlb_pages[entry], TRUE );
5.377 + }
5.378 }
5.379 +
5.380 mmu_utlb_pages[entry].user_fn = upage;
5.381 -
5.382 - if( (ent->flags & TLB_WRITABLE) == 0 ) {
5.383 - page->write_long = (mem_write_fn_t)tlb_protected_write;
5.384 - page->write_word = (mem_write_fn_t)tlb_protected_write;
5.385 - page->write_byte = (mem_write_fn_t)tlb_protected_write;
5.386 - page->write_burst = (mem_write_burst_fn_t)tlb_protected_write;
5.387 - mmu_utlb_init_vtable( ent, &mmu_utlb_pages[entry], FALSE );
5.388 - } else if( (ent->flags & TLB_DIRTY) == 0 ) {
5.389 - page->write_long = (mem_write_fn_t)tlb_initial_write;
5.390 - page->write_word = (mem_write_fn_t)tlb_initial_write;
5.391 - page->write_byte = (mem_write_fn_t)tlb_initial_write;
5.392 - page->write_burst = (mem_write_burst_fn_t)tlb_initial_write;
5.393 - mmu_utlb_init_vtable( ent, &mmu_utlb_pages[entry], FALSE );
5.394 - } else {
5.395 - mmu_utlb_init_vtable( ent, &mmu_utlb_pages[entry], TRUE );
5.396 - }
5.397
5.398 /* Is page visible? */
5.399 if( (ent->flags & TLB_SHARE) || ent->asid == mmu_asid ) {
5.400 @@ -1124,24 +1166,6 @@
5.401 }
5.402 }
5.403
5.404 -void FASTCALL sh4_flush_store_queue( sh4addr_t addr )
5.405 -{
5.406 - int queue = (addr&0x20)>>2;
5.407 - uint32_t hi = MMIO_READ( MMU, QACR0 + (queue>>1)) << 24;
5.408 - sh4ptr_t src = (sh4ptr_t)&sh4r.store_queue[queue];
5.409 - sh4addr_t target = (addr&0x03FFFFE0) | hi;
5.410 - ext_address_space[target>>12]->write_burst( target, src );
5.411 -}
5.412 -
5.413 -void FASTCALL sh4_flush_store_queue_mmu( sh4addr_t addr, void *exc )
5.414 -{
5.415 - int queue = (addr&0x20)>>2;
5.416 - sh4ptr_t src = (sh4ptr_t)&sh4r.store_queue[queue];
5.417 - sh4addr_t target;
5.418 - /* Store queue operation */
5.419 - storequeue_address_space[(addr&0x03FFFFFE0)>>12]->write_burst( addr, src);
5.420 -}
5.421 -
5.422 /********************** TLB Direct-Access Regions ***************************/
5.423 #ifdef HAVE_FRAME_ADDRESS
5.424 #define EXCEPTION_EXIT() do{ *(((void **)__builtin_frame_address(0))+1) = exc; return; } while(0)
5.425 @@ -1308,22 +1332,26 @@
5.426 mmu_itlb_addr_read, mmu_itlb_addr_write,
5.427 mmu_itlb_addr_read, mmu_itlb_addr_write,
5.428 mmu_itlb_addr_read, mmu_itlb_addr_write,
5.429 - unmapped_read_burst, unmapped_write_burst };
5.430 + unmapped_read_burst, unmapped_write_burst,
5.431 + unmapped_prefetch };
5.432 struct mem_region_fn p4_region_itlb_data = {
5.433 mmu_itlb_data_read, mmu_itlb_data_write,
5.434 mmu_itlb_data_read, mmu_itlb_data_write,
5.435 mmu_itlb_data_read, mmu_itlb_data_write,
5.436 - unmapped_read_burst, unmapped_write_burst };
5.437 + unmapped_read_burst, unmapped_write_burst,
5.438 + unmapped_prefetch };
5.439 struct mem_region_fn p4_region_utlb_addr = {
5.440 mmu_utlb_addr_read, (mem_write_fn_t)mmu_utlb_addr_write,
5.441 mmu_utlb_addr_read, (mem_write_fn_t)mmu_utlb_addr_write,
5.442 mmu_utlb_addr_read, (mem_write_fn_t)mmu_utlb_addr_write,
5.443 - unmapped_read_burst, unmapped_write_burst };
5.444 + unmapped_read_burst, unmapped_write_burst,
5.445 + unmapped_prefetch };
5.446 struct mem_region_fn p4_region_utlb_data = {
5.447 mmu_utlb_data_read, mmu_utlb_data_write,
5.448 mmu_utlb_data_read, mmu_utlb_data_write,
5.449 mmu_utlb_data_read, mmu_utlb_data_write,
5.450 - unmapped_read_burst, unmapped_write_burst };
5.451 + unmapped_read_burst, unmapped_write_burst,
5.452 + unmapped_prefetch };
5.453
5.454 /********************** Error regions **************************/
5.455
5.456 @@ -1417,25 +1445,92 @@
5.457 (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
5.458 (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
5.459 (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
5.460 - (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write };
5.461 + (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write,
5.462 + unmapped_prefetch };
5.463
5.464 struct mem_region_fn mem_region_tlb_miss = {
5.465 (mem_read_fn_t)tlb_miss_read, (mem_write_fn_t)tlb_miss_write,
5.466 (mem_read_fn_t)tlb_miss_read, (mem_write_fn_t)tlb_miss_write,
5.467 (mem_read_fn_t)tlb_miss_read, (mem_write_fn_t)tlb_miss_write,
5.468 - (mem_read_burst_fn_t)tlb_miss_read_burst, (mem_write_burst_fn_t)tlb_miss_write };
5.469 + (mem_read_burst_fn_t)tlb_miss_read_burst, (mem_write_burst_fn_t)tlb_miss_write,
5.470 + unmapped_prefetch };
5.471
5.472 -struct mem_region_fn mem_region_user_protected = {
5.473 +struct mem_region_fn mem_region_tlb_protected = {
5.474 (mem_read_fn_t)tlb_protected_read, (mem_write_fn_t)tlb_protected_write,
5.475 (mem_read_fn_t)tlb_protected_read, (mem_write_fn_t)tlb_protected_write,
5.476 (mem_read_fn_t)tlb_protected_read, (mem_write_fn_t)tlb_protected_write,
5.477 - (mem_read_burst_fn_t)tlb_protected_read_burst, (mem_write_burst_fn_t)tlb_protected_write };
5.478 + (mem_read_burst_fn_t)tlb_protected_read_burst, (mem_write_burst_fn_t)tlb_protected_write,
5.479 + unmapped_prefetch };
5.480
5.481 struct mem_region_fn mem_region_tlb_multihit = {
5.482 (mem_read_fn_t)tlb_multi_hit_read, (mem_write_fn_t)tlb_multi_hit_write,
5.483 (mem_read_fn_t)tlb_multi_hit_read, (mem_write_fn_t)tlb_multi_hit_write,
5.484 (mem_read_fn_t)tlb_multi_hit_read, (mem_write_fn_t)tlb_multi_hit_write,
5.485 - (mem_read_burst_fn_t)tlb_multi_hit_read_burst, (mem_write_burst_fn_t)tlb_multi_hit_write };
5.486 + (mem_read_burst_fn_t)tlb_multi_hit_read_burst, (mem_write_burst_fn_t)tlb_multi_hit_write,
5.487 + (mem_prefetch_fn_t)tlb_multi_hit_read };
5.488
5.489 +
5.490 +/* Store-queue regions */
5.491 +/* These are a bit of a pain - the first 8 fields are controlled by SQMD, while
5.492 + * the final (prefetch) is controlled by the actual TLB settings (plus SQMD in
5.493 + * some cases), in contrast to the ordinary fields above.
5.494 + *
5.495 + * There is probably a simpler way to do this.
5.496 + */
5.497 +
5.498 +struct mem_region_fn p4_region_storequeue = {
5.499 + ccn_storequeue_read_long, ccn_storequeue_write_long,
5.500 + unmapped_read_long, unmapped_write_long, /* TESTME: Officially only long access is supported */
5.501 + unmapped_read_long, unmapped_write_long,
5.502 + unmapped_read_burst, unmapped_write_burst,
5.503 + ccn_storequeue_prefetch };
5.504 +
5.505 +struct mem_region_fn p4_region_storequeue_miss = {
5.506 + ccn_storequeue_read_long, ccn_storequeue_write_long,
5.507 + unmapped_read_long, unmapped_write_long, /* TESTME: Officially only long access is supported */
5.508 + unmapped_read_long, unmapped_write_long,
5.509 + unmapped_read_burst, unmapped_write_burst,
5.510 + (mem_prefetch_fn_t)tlb_miss_read };
5.511 +
5.512 +struct mem_region_fn p4_region_storequeue_multihit = {
5.513 + ccn_storequeue_read_long, ccn_storequeue_write_long,
5.514 + unmapped_read_long, unmapped_write_long, /* TESTME: Officially only long access is supported */
5.515 + unmapped_read_long, unmapped_write_long,
5.516 + unmapped_read_burst, unmapped_write_burst,
5.517 + (mem_prefetch_fn_t)tlb_multi_hit_read };
5.518 +
5.519 +struct mem_region_fn p4_region_storequeue_protected = {
5.520 + ccn_storequeue_read_long, ccn_storequeue_write_long,
5.521 + unmapped_read_long, unmapped_write_long,
5.522 + unmapped_read_long, unmapped_write_long,
5.523 + unmapped_read_burst, unmapped_write_burst,
5.524 + (mem_prefetch_fn_t)tlb_protected_read };
5.525 +
5.526 +struct mem_region_fn p4_region_storequeue_sqmd = {
5.527 + (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
5.528 + (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
5.529 + (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
5.530 + (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write,
5.531 + (mem_prefetch_fn_t)address_error_read };
5.532
5.533 -
5.534 \ No newline at end of file
5.535 +struct mem_region_fn p4_region_storequeue_sqmd_miss = {
5.536 + (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
5.537 + (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
5.538 + (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
5.539 + (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write,
5.540 + (mem_prefetch_fn_t)tlb_miss_read };
5.541 +
5.542 +struct mem_region_fn p4_region_storequeue_sqmd_multihit = {
5.543 + (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
5.544 + (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
5.545 + (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
5.546 + (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write,
5.547 + (mem_prefetch_fn_t)tlb_multi_hit_read };
5.548 +
5.549 +struct mem_region_fn p4_region_storequeue_sqmd_protected = {
5.550 + (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
5.551 + (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
5.552 + (mem_read_fn_t)address_error_read, (mem_write_fn_t)address_error_write,
5.553 + (mem_read_burst_fn_t)address_error_read_burst, (mem_write_burst_fn_t)address_error_write,
5.554 + (mem_prefetch_fn_t)tlb_protected_read };
5.555 +
6.1 --- a/src/sh4/mmu.h Mon Jan 05 04:19:46 2009 +0000
6.2 +++ b/src/sh4/mmu.h Tue Jan 06 01:58:08 2009 +0000
6.3 @@ -87,9 +87,9 @@
6.4
6.5 struct utlb_page_entry {
6.6 struct mem_region_fn fn;
6.7 - mem_region_fn_t user_fn;
6.8 + struct mem_region_fn *user_fn;
6.9 mem_region_fn_t target;
6.10 - unsigned char code[TLB_FUNC_SIZE*8];
6.11 + unsigned char code[TLB_FUNC_SIZE*9];
6.12 };
6.13
6.14 struct utlb_1k_entry {
6.15 @@ -97,11 +97,19 @@
6.16 struct mem_region_fn user_fn;
6.17 struct mem_region_fn *subpages[4];
6.18 struct mem_region_fn *user_subpages[4];
6.19 - unsigned char code[TLB_FUNC_SIZE*16];
6.20 + unsigned char code[TLB_FUNC_SIZE*18];
6.21 };
6.22
6.23 +struct utlb_default_regions {
6.24 + mem_region_fn_t tlb_miss;
6.25 + mem_region_fn_t tlb_prot;
6.26 + mem_region_fn_t tlb_multihit;
6.27 +};
6.28 +
6.29 +
6.30 void mmu_utlb_init_vtable( struct utlb_entry *ent, struct utlb_page_entry *page, gboolean writable );
6.31 void mmu_utlb_1k_init_vtable( struct utlb_1k_entry *ent );
6.32 +void mmu_utlb_init_storequeue_vtable( struct utlb_entry *ent, struct utlb_page_entry *page );
6.33
6.34 extern uint32_t mmu_urc;
6.35 extern uint32_t mmu_urb;
6.36 @@ -114,12 +122,36 @@
6.37 extern struct mem_region_fn **sh4_address_space;
6.38 extern struct mem_region_fn **sh4_user_address_space;
6.39
6.40 -/** Store-queue (prefetch) address space (privileged and user access)
6.41 - * Page map (4KB) of the 0xE0000000..0xE4000000 region
6.42 - * Same caveats apply as for the primary address space above.
6.43 - */
6.44 -extern struct mem_region_fn **storequeue_address_space;
6.45 -extern struct mem_region_fn **storequeue_user_address_space;
6.46 +/************ Storequeue/cache functions ***********/
6.47 +void FASTCALL ccn_storequeue_write_long( sh4addr_t addr, uint32_t val );
6.48 +int32_t FASTCALL ccn_storequeue_read_long( sh4addr_t addr );
6.49 +
6.50 +/** Default storequeue prefetch when TLB is disabled */
6.51 +void FASTCALL ccn_storequeue_prefetch( sh4addr_t addr );
6.52 +
6.53 +/** TLB-enabled variant of the storequeue prefetch */
6.54 +void FASTCALL ccn_storequeue_prefetch_tlb( sh4addr_t addr );
6.55 +
6.56 +/** Non-storequeue prefetch */
6.57 +void FASTCALL ccn_prefetch( sh4addr_t addr );
6.58 +
6.59 +/** Non-cached prefetch (ie, no-op) */
6.60 +void FASTCALL ccn_uncached_prefetch( sh4addr_t addr );
6.61 +
6.62 +
6.63 +extern struct mem_region_fn mem_region_address_error;
6.64 +extern struct mem_region_fn mem_region_tlb_miss;
6.65 +extern struct mem_region_fn mem_region_tlb_multihit;
6.66 +extern struct mem_region_fn mem_region_tlb_protected;
6.67 +
6.68 +extern struct mem_region_fn p4_region_storequeue;
6.69 +extern struct mem_region_fn p4_region_storequeue_multihit;
6.70 +extern struct mem_region_fn p4_region_storequeue_miss;
6.71 +extern struct mem_region_fn p4_region_storequeue_protected;
6.72 +extern struct mem_region_fn p4_region_storequeue_sqmd;
6.73 +extern struct mem_region_fn p4_region_storequeue_sqmd_miss;
6.74 +extern struct mem_region_fn p4_region_storequeue_sqmd_multihit;
6.75 +extern struct mem_region_fn p4_region_storequeue_sqmd_protected;
6.76
6.77 #ifdef __cplusplus
6.78 }
7.1 --- a/src/sh4/mmux86.c Mon Jan 05 04:19:46 2009 +0000
7.2 +++ b/src/sh4/mmux86.c Tue Jan 06 01:58:08 2009 +0000
7.3 @@ -49,7 +49,7 @@
7.4 uint8_t **fn = (uint8_t **)ext_address_space[ppn>>12];
7.5 uint8_t **out = (uint8_t **)&page->fn;
7.6
7.7 - for( i=0; i<8; i+= inc, fn += inc, out += inc ) {
7.8 + for( i=0; i<9; i+= inc, fn += inc, out += inc ) {
7.9 *out = xlat_output;
7.10 #if SIZEOF_VOID_P == 8
7.11 MOV_imm64_r32((uintptr_t)&mmu_urc, R_EAX );
7.12 @@ -69,6 +69,25 @@
7.13 JMP_r32disp8(R_ECX, (((uintptr_t)out) - ((uintptr_t)&page->fn)) ); // 3
7.14 }
7.15 }
7.16 +
7.17 + page->fn.prefetch = unmapped_prefetch; // FIXME
7.18 +}
7.19 +
7.20 +void mmu_utlb_init_storequeue_vtable( struct utlb_entry *ent, struct utlb_page_entry *page )
7.21 +{
7.22 + uint32_t mask = ent->mask;
7.23 + uint32_t vpn = ent->vpn & mask;
7.24 + uint32_t ppn = ent->ppn & mask;
7.25 +
7.26 + xlat_output = page->code;
7.27 +
7.28 + memcpy( page, &p4_region_storequeue, sizeof(struct mem_region_fn) );
7.29 +
7.30 + /* TESTME: Does a PREF increment the URC counter? */
7.31 + page->fn.prefetch = (mem_prefetch_fn_t)xlat_output;
7.32 + ADD_imm32_r32( ppn-vpn, ARG1 );
7.33 + int rel = ((uint8_t *)ccn_storequeue_prefetch_tlb) - xlat_output;
7.34 + JMP_rel( rel );
7.35 }
7.36
7.37 void mmu_utlb_1k_init_vtable( struct utlb_1k_entry *entry )
7.38 @@ -77,7 +96,7 @@
7.39 int i;
7.40 uint8_t **out = (uint8_t **)&entry->fn;
7.41
7.42 - for( i=0; i<8; i++, out++ ) {
7.43 + for( i=0; i<9; i++, out++ ) {
7.44 *out = xlat_output;
7.45 MOV_r32_r32( ARG1, R_ECX );
7.46 SHR_imm8_r32( 10, R_ECX );
7.47 @@ -92,7 +111,7 @@
7.48 }
7.49
7.50 out = (uint8_t **)&entry->user_fn;
7.51 - for( i=0; i<8; i++, out++ ) {
7.52 + for( i=0; i<9; i++, out++ ) {
7.53 *out = xlat_output;
7.54 MOV_r32_r32( ARG1, R_ECX );
7.55 SHR_imm8_r32( 10, R_ECX );
8.1 --- a/src/sh4/sh4core.h Mon Jan 05 04:19:46 2009 +0000
8.2 +++ b/src/sh4/sh4core.h Tue Jan 06 01:58:08 2009 +0000
8.3 @@ -281,7 +281,6 @@
8.4 #define FPULi (sh4r.fpul.i)
8.5
8.6 /**************** SH4 internal memory regions *****************/
8.7 -extern struct mem_region_fn p4_region_storequeue;
8.8 extern struct mem_region_fn p4_region_itlb_addr;
8.9 extern struct mem_region_fn p4_region_itlb_data;
8.10 extern struct mem_region_fn p4_region_utlb_addr;
8.11 @@ -290,10 +289,7 @@
8.12 extern struct mem_region_fn p4_region_icache_data;
8.13 extern struct mem_region_fn p4_region_ocache_addr;
8.14 extern struct mem_region_fn p4_region_ocache_data;
8.15 -extern struct mem_region_fn mem_region_address_error;
8.16 -extern struct mem_region_fn mem_region_tlb_miss;
8.17 -extern struct mem_region_fn mem_region_tlb_multihit;
8.18 -extern struct mem_region_fn mem_region_user_protected;
8.19 +
8.20
8.21
8.22 #ifdef __cplusplus
9.1 --- a/src/sh4/sh4core.in Mon Jan 05 04:19:46 2009 +0000
9.2 +++ b/src/sh4/sh4core.in Tue Jan 06 01:58:08 2009 +0000
9.3 @@ -176,6 +176,7 @@
9.4 #define MEM_WRITE_BYTE( addr, val ) ((mem_write_exc_fn_t)ADDRSPACE[(addr)>>12]->write_byte)((addr), (val), &&except)
9.5 #define MEM_WRITE_WORD( addr, val ) ((mem_write_exc_fn_t)ADDRSPACE[(addr)>>12]->write_word)((addr), (val), &&except)
9.6 #define MEM_WRITE_LONG( addr, val ) ((mem_write_exc_fn_t)ADDRSPACE[(addr)>>12]->write_long)((addr), (val), &&except)
9.7 +#define MEM_PREFETCH( addr ) ((mem_prefetch_exc_fn_t)ADDRSPACE[(addr)>>12]->prefetch)((addr), &&except)
9.8 #else
9.9 #define INIT_EXCEPTIONS(label)
9.10 #define MEM_READ_BYTE( addr, val ) val = ADDRSPACE[(addr)>>12]->read_byte(addr)
9.11 @@ -184,6 +185,7 @@
9.12 #define MEM_WRITE_BYTE( addr, val ) ADDRSPACE[(addr)>>12]->write_byte(addr, val)
9.13 #define MEM_WRITE_WORD( addr, val ) ADDRSPACE[(addr)>>12]->write_word(addr, val)
9.14 #define MEM_WRITE_LONG( addr, val ) ADDRSPACE[(addr)>>12]->write_long(addr, val)
9.15 +#define MEM_PREFETCH( addr ) ADDRSPACE[(addr)>>12]->prefetch(addr)
9.16 #endif
9.17
9.18
9.19 @@ -354,10 +356,7 @@
9.20 NOP {: /* NOP */ :}
9.21
9.22 PREF @Rn {:
9.23 - tmp = sh4r.r[Rn];
9.24 - if( (tmp & 0xFC000000) == 0xE0000000 ) {
9.25 - sh4_flush_store_queue(tmp);
9.26 - }
9.27 + MEM_PREFETCH(sh4r.r[Rn]);
9.28 :}
9.29 OCBI @Rn {: :}
9.30 OCBP @Rn {: :}
10.1 --- a/src/sh4/sh4x86.in Mon Jan 05 04:19:46 2009 +0000
10.2 +++ b/src/sh4/sh4x86.in Tue Jan 06 01:58:08 2009 +0000
10.3 @@ -311,6 +311,7 @@
10.4 #define MEM_WRITE_BYTE( addr_reg, value_reg ) decode_address(addr_reg); _CALL_WRITE(addr_reg, value_reg, write_byte)
10.5 #define MEM_WRITE_WORD( addr_reg, value_reg ) decode_address(addr_reg); _CALL_WRITE(addr_reg, value_reg, write_word)
10.6 #define MEM_WRITE_LONG( addr_reg, value_reg ) decode_address(addr_reg); _CALL_WRITE(addr_reg, value_reg, write_long)
10.7 +#define MEM_PREFETCH( addr_reg ) decode_address(addr_reg); _CALL_READ(addr_reg, prefetch)
10.8
10.9 #define SLOTILLEGAL() JMP_exc(EXC_SLOT_ILLEGAL); sh4_x86.in_delay_slot = DELAY_NONE; return 2;
10.10
10.11 @@ -2501,18 +2502,7 @@
10.12 PREF @Rn {:
10.13 COUNT_INST(I_PREF);
10.14 load_reg( R_EAX, Rn );
10.15 - MOV_r32_r32( R_EAX, R_ECX );
10.16 - AND_imm32_r32( 0xFC000000, R_ECX );
10.17 - CMP_imm32_r32( 0xE0000000, R_ECX );
10.18 - JNE_rel8(end);
10.19 - if( sh4_x86.tlb_on ) {
10.20 - call_func1( sh4_flush_store_queue_mmu, R_EAX );
10.21 - TEST_r32_r32( R_EAX, R_EAX );
10.22 - JE_exc(-1);
10.23 - } else {
10.24 - call_func1( sh4_flush_store_queue, R_EAX );
10.25 - }
10.26 - JMP_TARGET(end);
10.27 + MEM_PREFETCH( R_EAX );
10.28 sh4_x86.tstate = TSTATE_NONE;
10.29 :}
10.30 SLEEP {:
.