Search
lxdream.org :: lxdream :: r971:886e1ec8447d
lxdream 0.9.1
released Jun 29
Download Now
changeset971:886e1ec8447d
parent970:44d62d0850c8
child972:fb948057cf08
authornkeynes
dateThu Jan 22 02:58:13 2009 +0000 (15 years ago)
Fix 1k-entry allocation
Break asid remap into two passes for simplicity
src/sh4/cache.c
src/sh4/mmu.c
src/sh4/sh4.c
src/sh4/sh4core.h
1.1 --- a/src/sh4/cache.c Thu Jan 22 00:50:09 2009 +0000
1.2 +++ b/src/sh4/cache.c Thu Jan 22 02:58:13 2009 +0000
1.3 @@ -21,6 +21,7 @@
1.4 #include "dream.h"
1.5 #include "mem.h"
1.6 #include "mmio.h"
1.7 +#include "clock.h"
1.8 #include "sh4/sh4core.h"
1.9 #include "sh4/sh4mmio.h"
1.10 #include "sh4/xltcache.h"
1.11 @@ -43,13 +44,22 @@
1.12
1.13
1.14 static struct cache_line ccn_icache[ICACHE_ENTRY_COUNT];
1.15 -static struct cache_line ccn_ocache[OCACHE_ENTRY_COUNT];
1.16 +struct cache_line ccn_ocache[OCACHE_ENTRY_COUNT];
1.17 static unsigned char ccn_icache_data[ICACHE_ENTRY_COUNT*32];
1.18 -static unsigned char ccn_ocache_data[OCACHE_ENTRY_COUNT*32];
1.19 +unsigned char ccn_ocache_data[OCACHE_ENTRY_COUNT*32];
1.20
1.21
1.22 /*********************** General module requirements ********************/
1.23
1.24 +void CCN_reset()
1.25 +{
1.26 + /* Clear everything for consistency */
1.27 + memset( ccn_icache, 0, sizeof(ccn_icache) );
1.28 + memset( ccn_ocache, 0, sizeof(ccn_icache) );
1.29 + memset( ccn_icache_data, 0, sizeof(ccn_icache) );
1.30 + memset( ccn_ocache_data, 0, sizeof(ccn_icache) );
1.31 +}
1.32 +
1.33 void CCN_save_state( FILE *f )
1.34 {
1.35 fwrite( &ccn_icache, sizeof(ccn_icache), 1, f );
1.36 @@ -165,6 +175,141 @@
1.37 ocram_page1_read_burst, ocram_page1_write_burst,
1.38 unmapped_prefetch };
1.39
1.40 +/**************************** Cache functions ********************************/
1.41 +char ccn_cache_map[16 MB]; // 24 bits of address space
1.42 +
1.43 +/**
1.44 + * Load a 32-byte cache line from external memory at the given ext address.
1.45 + * @param addr external address pre-masked to 1FFFFFFE0
1.46 + */
1.47 +sh4addr_t FASTCALL ccn_ocache_load_line( sh4addr_t addr )
1.48 +{
1.49 + int entry = addr & 0x00003FE0;
1.50 + struct cache_line *line = &ccn_ocache[entry>>5];
1.51 + char *cache_data = &ccn_ocache_data[entry];
1.52 + sh4addr_t old_addr = line->tag;
1.53 + line->tag = addr & 0x1FFFFFE0;
1.54 + char oldstate = ccn_cache_map[old_addr>>5];
1.55 + ccn_cache_map[old_addr>>5] = 0;
1.56 + ccn_cache_map[addr>>5] = CACHE_VALID;
1.57 + if( oldstate == (CACHE_VALID|CACHE_DIRTY) ) {
1.58 + // Cache line is dirty - writeback.
1.59 + ext_address_space[old_addr>>12]->write_burst(old_addr, cache_data);
1.60 + }
1.61 + ext_address_space[addr>>12]->read_burst(cache_data, addr & 0x1FFFFFE0);
1.62 + return addr;
1.63 +}
1.64 +
1.65 +/* Long read through the operand cache */
1.66 +/*
1.67 +int32_t FASTCALL ccn_ocache_read_long( sh4addr_t addr );
1.68 +int32_t FASTCALL ccn_ocache_read_word( sh4addr_t addr );
1.69 +int32_t FASTCALL ccn_ocache_read_byte( sh4addr_t addr );
1.70 +void FASTCALL ccn_ocache_write_long_copyback( sh4addr_t addr, uint32_t val );
1.71 +void FASTCALL ccn_ocache_write_word_copyback( sh4addr_t addr, uint32_t val );
1.72 +void FASTCALL ccn_ocache_write_byte_copyback( sh4addr_t addr, uint32_t val );
1.73 +
1.74 +*/
1.75 +static int32_t FASTCALL ccn_ocache_read_long( sh4addr_t addr )
1.76 +{
1.77 + addr &= 0x1FFFFFFF;
1.78 + if( (ccn_cache_map[addr>>5] & CACHE_VALID) == 0 ) {
1.79 + ccn_ocache_load_line(addr);
1.80 + }
1.81 + return *(int32_t *)&ccn_ocache_data[addr & 0x3FFF];
1.82 +}
1.83 +
1.84 +static int32_t FASTCALL ccn_ocache_read_word( sh4addr_t addr )
1.85 +{
1.86 + addr &= 0x1FFFFFFF;
1.87 + if( (ccn_cache_map[addr>>5] & CACHE_VALID) == 0 ) {
1.88 + ccn_ocache_load_line(addr);
1.89 + }
1.90 + return SIGNEXT16(*(int16_t *)&ccn_ocache_data[addr&0x3FFF]);
1.91 +}
1.92 +
1.93 +static int32_t FASTCALL ccn_ocache_read_byte( sh4addr_t addr )
1.94 +{
1.95 + addr &= 0x1FFFFFFF;
1.96 + if( (ccn_cache_map[addr>>5] & CACHE_VALID) == 0 ) {
1.97 + ccn_ocache_load_line(addr);
1.98 + }
1.99 + return SIGNEXT8(ccn_ocache_data[addr&0x3FFF]);
1.100 +}
1.101 +
1.102 +static void FASTCALL ccn_ocache_write_long_copyback( sh4addr_t addr, uint32_t value )
1.103 +{
1.104 + addr &= 0x1FFFFFFF;
1.105 + if( (ccn_cache_map[addr>>5] & CACHE_VALID) == 0 ) {
1.106 + ccn_ocache_load_line(addr);
1.107 + }
1.108 + ccn_cache_map[addr>>5] |= CACHE_DIRTY;
1.109 + *(uint32_t *)&ccn_ocache_data[addr&0x3FFF] = value;
1.110 +}
1.111 +
1.112 +static void FASTCALL ccn_ocache_write_word_copyback( sh4addr_t addr, uint32_t value )
1.113 +{
1.114 + addr &= 0x1FFFFFFF;
1.115 + if( (ccn_cache_map[addr>>5] & CACHE_VALID) == 0 ) {
1.116 + ccn_ocache_load_line(addr);
1.117 + }
1.118 + ccn_cache_map[addr>>5] |= CACHE_DIRTY;
1.119 + *(uint16_t *)&ccn_ocache_data[addr&0x3FFF] = (uint16_t)value;
1.120 +}
1.121 +
1.122 +static void FASTCALL ccn_ocache_write_byte_copyback( sh4addr_t addr, uint32_t value )
1.123 +{
1.124 + addr &= 0x1FFFFFFF;
1.125 + if( (ccn_cache_map[addr>>5] & CACHE_VALID) == 0 ) {
1.126 + ccn_ocache_load_line(addr);
1.127 + }
1.128 + ccn_cache_map[addr>>5] |= CACHE_DIRTY;
1.129 + ccn_ocache_data[addr&0x3FFF] = (uint8_t)value;
1.130 +}
1.131 +
1.132 +static void FASTCALL ccn_ocache_prefetch( sh4addr_t addr )
1.133 +{
1.134 + addr &= 0x1FFFFFFF;
1.135 + if( (ccn_cache_map[addr>>5] & CACHE_VALID) == 0 ) {
1.136 + ccn_ocache_load_line(addr);
1.137 + }
1.138 +}
1.139 +
1.140 +void FASTCALL ccn_ocache_invalidate( sh4addr_t addr )
1.141 +{
1.142 + addr &= 0x1FFFFFFF;
1.143 + ccn_cache_map[addr>>5] &= ~CACHE_VALID;
1.144 +}
1.145 +
1.146 +void FASTCALL ccn_ocache_purge( sh4addr_t addr )
1.147 +{
1.148 + addr &= 0x1FFFFFE0;
1.149 + int oldflags = ccn_cache_map[addr>>5];
1.150 + ccn_cache_map[addr>>5] &= ~CACHE_VALID;
1.151 + if( oldflags == (CACHE_VALID|CACHE_DIRTY) ) {
1.152 + char *cache_data = &ccn_ocache_data[addr & 0x3FE0];
1.153 + ext_address_space[addr>>12]->write_burst(addr, cache_data);
1.154 + }
1.155 +}
1.156 +
1.157 +void FASTCALL ccn_ocache_writeback( sh4addr_t addr )
1.158 +{
1.159 + addr &= 0x1FFFFFE0;
1.160 + if( ccn_cache_map[addr>>5] == (CACHE_VALID|CACHE_DIRTY) ) {
1.161 + ccn_cache_map[addr>>5] &= ~CACHE_DIRTY;
1.162 + char *cache_data = &ccn_ocache_data[addr & 0x3FE0];
1.163 + ext_address_space[addr>>12]->write_burst(addr, cache_data);
1.164 + }
1.165 +}
1.166 +
1.167 +struct mem_region_fn ccn_ocache_cb_region = {
1.168 + ccn_ocache_read_long, ccn_ocache_write_long_copyback,
1.169 + ccn_ocache_read_word, ccn_ocache_write_word_copyback,
1.170 + ccn_ocache_read_byte, ccn_ocache_write_byte_copyback,
1.171 + unmapped_read_burst, unmapped_write_burst,
1.172 + ccn_ocache_prefetch };
1.173 +
1.174 +
1.175 /************************** Cache direct access ******************************/
1.176
1.177 static int32_t FASTCALL ccn_icache_addr_read( sh4addr_t addr )
1.178 @@ -212,11 +357,11 @@
1.179 unmapped_read_burst, unmapped_write_burst,
1.180 unmapped_prefetch };
1.181
1.182 -
1.183 static int32_t FASTCALL ccn_ocache_addr_read( sh4addr_t addr )
1.184 {
1.185 int entry = (addr & 0x00003FE0);
1.186 - return ccn_ocache[entry>>5].tag;
1.187 + sh4addr_t tag = ccn_ocache[entry>>5].tag;
1.188 + return (tag&0x1FFFFC00) | ccn_cache_map[tag>>5];
1.189 }
1.190
1.191 static void FASTCALL ccn_ocache_addr_write( sh4addr_t addr, uint32_t val )
1.192 @@ -225,13 +370,14 @@
1.193 struct cache_line *line = &ccn_ocache[entry>>5];
1.194 if( addr & 0x08 ) { // Associative
1.195 } else {
1.196 - if( (line->tag & (CACHE_VALID|CACHE_DIRTY)) == (CACHE_VALID|CACHE_DIRTY) ) {
1.197 - unsigned char *cache_data = &ccn_ocache_data[entry&0x00003FE0];
1.198 + sh4addr_t tag = line->tag;
1.199 + if( ccn_cache_map[tag>>5] == (CACHE_VALID|CACHE_DIRTY) ) {
1.200 // Cache line is dirty - writeback.
1.201 - ext_address_space[line->tag>>12]->write_burst(line->key, cache_data);
1.202 + unsigned char *cache_data = &ccn_ocache_data[entry];
1.203 + ext_address_space[tag>>12]->write_burst(tag, cache_data);
1.204 }
1.205 - line->tag = val & 0x1FFFFC03;
1.206 - line->key = (val & 0x1FFFFC00)|(entry & 0x000003E0);
1.207 + line->tag = tag = (val & 0x1FFFFC00) | (addr & 0x3E0);
1.208 + ccn_cache_map[tag>>5] = val & 0x03;
1.209 }
1.210 }
1.211
1.212 @@ -271,12 +417,14 @@
1.213
1.214 if( reg & CCR_ICI ) { /* icache invalidate */
1.215 for( i=0; i<ICACHE_ENTRY_COUNT; i++ ) {
1.216 + ccn_icache[i].key = -1;
1.217 ccn_icache[i].tag &= ~CACHE_VALID;
1.218 }
1.219 }
1.220
1.221 if( reg & CCR_OCI ) { /* ocache invalidate */
1.222 for( i=0; i<OCACHE_ENTRY_COUNT; i++ ) {
1.223 + ccn_icache[i].key = -1;
1.224 ccn_ocache[i].tag &= ~(CACHE_VALID|CACHE_DIRTY);
1.225 }
1.226 }
1.227 @@ -311,13 +459,55 @@
1.228
1.229 }
1.230
1.231 -/**
1.232 - * Prefetch for non-cached regions. Oddly enough, this does nothing whatsoever.
1.233 - */
1.234 +/************************** Uncached memory access ***************************/
1.235 +int32_t FASTCALL ccn_uncached_read_long( sh4addr_t addr )
1.236 +{
1.237 + sh4r.slice_cycle += (4*sh4_bus_period);
1.238 + addr &= 0x1FFFFFFF;
1.239 + return ext_address_space[addr>>12]->read_long(addr);
1.240 +}
1.241 +int32_t FASTCALL ccn_uncached_read_word( sh4addr_t addr )
1.242 +{
1.243 + sh4r.slice_cycle += (4*sh4_bus_period);
1.244 + addr &= 0x1FFFFFFF;
1.245 + return ext_address_space[addr>>12]->read_word(addr);
1.246 +}
1.247 +int32_t FASTCALL ccn_uncached_read_byte( sh4addr_t addr )
1.248 +{
1.249 + sh4r.slice_cycle += (4*sh4_bus_period);
1.250 + addr &= 0x1FFFFFFF;
1.251 + return ext_address_space[addr>>12]->read_byte(addr);
1.252 +}
1.253 +void FASTCALL ccn_uncached_write_long( sh4addr_t addr, uint32_t val )
1.254 +{
1.255 + sh4r.slice_cycle += (4*sh4_bus_period);
1.256 + addr &= 0x1FFFFFFF;
1.257 + return ext_address_space[addr>>12]->write_long(addr, val);
1.258 +}
1.259 +void FASTCALL ccn_uncached_write_word( sh4addr_t addr, uint32_t val )
1.260 +{
1.261 + sh4r.slice_cycle += (4*sh4_bus_period);
1.262 + addr &= 0x1FFFFFFF;
1.263 + return ext_address_space[addr>>12]->write_word(addr, val);
1.264 +}
1.265 +void FASTCALL ccn_uncached_write_byte( sh4addr_t addr, uint32_t val )
1.266 +{
1.267 + sh4r.slice_cycle += (4*sh4_bus_period);
1.268 + addr &= 0x1FFFFFFF;
1.269 + return ext_address_space[addr>>12]->write_byte(addr, val);
1.270 +}
1.271 void FASTCALL ccn_uncached_prefetch( sh4addr_t addr )
1.272 {
1.273 -
1.274 }
1.275 +
1.276 +struct mem_region_fn ccn_uncached_region = {
1.277 + ccn_uncached_read_long, ccn_uncached_write_long,
1.278 + ccn_uncached_read_word, ccn_uncached_write_word,
1.279 + ccn_uncached_read_byte, ccn_uncached_write_byte,
1.280 + unmapped_read_burst, unmapped_write_burst,
1.281 + ccn_uncached_prefetch };
1.282 +
1.283 +
1.284 /********************************* Store-queue *******************************/
1.285 /*
1.286 * The storequeue is strictly speaking part of the cache, but most of
1.287 @@ -358,3 +548,4 @@
1.288 sh4ptr_t src = (sh4ptr_t)&sh4r.store_queue[queue];
1.289 ext_address_space[addr>>12]->write_burst( (addr & 0x1FFFFFE0), src );
1.290 }
1.291 +
2.1 --- a/src/sh4/mmu.c Thu Jan 22 00:50:09 2009 +0000
2.2 +++ b/src/sh4/mmu.c Thu Jan 22 02:58:13 2009 +0000
2.3 @@ -334,7 +334,7 @@
2.4 static struct utlb_1k_entry *mmu_utlb_1k_alloc()
2.5 {
2.6 assert( mmu_utlb_1k_free_index < UTLB_ENTRY_COUNT );
2.7 - struct utlb_1k_entry *entry = &mmu_utlb_1k_pages[mmu_utlb_1k_free_index++];
2.8 + struct utlb_1k_entry *entry = &mmu_utlb_1k_pages[mmu_utlb_1k_free_list[mmu_utlb_1k_free_index++]];
2.9 return entry;
2.10 }
2.11
2.12 @@ -473,35 +473,39 @@
2.13 int i;
2.14 if( IS_SV_ENABLED() ) {
2.15 for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
2.16 - if( mmu_utlb[i].flags & TLB_VALID ) {
2.17 - if( (mmu_utlb[i].flags & TLB_SHARE) == 0 ) {
2.18 - if( mmu_utlb[i].asid == mmu_asid ) { // Matches old ASID - unmap out
2.19 - if( !mmu_utlb_unmap_pages( FALSE, TRUE, mmu_utlb[i].vpn&mmu_utlb[i].mask,
2.20 - get_tlb_size_pages(mmu_utlb[i].flags) ) )
2.21 - mmu_utlb_remap_pages( FALSE, TRUE, i );
2.22 - } else if( mmu_utlb[i].asid == asid ) { // Matches new ASID - map in
2.23 - mmu_utlb_map_pages( NULL, mmu_utlb_pages[i].user_fn,
2.24 - mmu_utlb[i].vpn&mmu_utlb[i].mask,
2.25 - get_tlb_size_pages(mmu_utlb[i].flags) );
2.26 - }
2.27 - }
2.28 + if( mmu_utlb[i].asid == mmu_asid &&
2.29 + (mmu_utlb[i].flags & (TLB_VALID|TLB_SHARE)) == (TLB_VALID) ) {
2.30 + // Matches old ASID - unmap out
2.31 + if( !mmu_utlb_unmap_pages( FALSE, TRUE, mmu_utlb[i].vpn&mmu_utlb[i].mask,
2.32 + get_tlb_size_pages(mmu_utlb[i].flags) ) )
2.33 + mmu_utlb_remap_pages( FALSE, TRUE, i );
2.34 + }
2.35 + }
2.36 + for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
2.37 + if( mmu_utlb[i].asid == asid &&
2.38 + (mmu_utlb[i].flags & (TLB_VALID|TLB_SHARE)) == (TLB_VALID) ) {
2.39 + // Matches new ASID - map in
2.40 + mmu_utlb_map_pages( NULL, mmu_utlb_pages[i].user_fn,
2.41 + mmu_utlb[i].vpn&mmu_utlb[i].mask,
2.42 + get_tlb_size_pages(mmu_utlb[i].flags) );
2.43 }
2.44 }
2.45 } else {
2.46 // Remap both Priv+user pages
2.47 for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
2.48 - if( mmu_utlb[i].flags & TLB_VALID ) {
2.49 - if( (mmu_utlb[i].flags & TLB_SHARE) == 0 ) {
2.50 - if( mmu_utlb[i].asid == mmu_asid ) { // Matches old ASID - unmap out
2.51 - if( !mmu_utlb_unmap_pages( TRUE, TRUE, mmu_utlb[i].vpn&mmu_utlb[i].mask,
2.52 - get_tlb_size_pages(mmu_utlb[i].flags) ) )
2.53 - mmu_utlb_remap_pages( TRUE, TRUE, i );
2.54 - } else if( mmu_utlb[i].asid == asid ) { // Matches new ASID - map in
2.55 - mmu_utlb_map_pages( &mmu_utlb_pages[i].fn, mmu_utlb_pages[i].user_fn,
2.56 - mmu_utlb[i].vpn&mmu_utlb[i].mask,
2.57 - get_tlb_size_pages(mmu_utlb[i].flags) );
2.58 - }
2.59 - }
2.60 + if( mmu_utlb[i].asid == mmu_asid &&
2.61 + (mmu_utlb[i].flags & (TLB_VALID|TLB_SHARE)) == (TLB_VALID) ) {
2.62 + if( !mmu_utlb_unmap_pages( TRUE, TRUE, mmu_utlb[i].vpn&mmu_utlb[i].mask,
2.63 + get_tlb_size_pages(mmu_utlb[i].flags) ) )
2.64 + mmu_utlb_remap_pages( TRUE, TRUE, i );
2.65 + }
2.66 + }
2.67 + for( i=0; i<UTLB_ENTRY_COUNT; i++ ) {
2.68 + if( mmu_utlb[i].asid == asid &&
2.69 + (mmu_utlb[i].flags & (TLB_VALID|TLB_SHARE)) == (TLB_VALID) ) {
2.70 + mmu_utlb_map_pages( &mmu_utlb_pages[i].fn, mmu_utlb_pages[i].user_fn,
2.71 + mmu_utlb[i].vpn&mmu_utlb[i].mask,
2.72 + get_tlb_size_pages(mmu_utlb[i].flags) );
2.73 }
2.74 }
2.75 }
3.1 --- a/src/sh4/sh4.c Thu Jan 22 00:50:09 2009 +0000
3.2 +++ b/src/sh4/sh4.c Thu Jan 22 02:58:13 2009 +0000
3.3 @@ -119,6 +119,7 @@
3.4 PMM_reset();
3.5 TMU_reset();
3.6 SCIF_reset();
3.7 + CCN_reset();
3.8 MMU_reset();
3.9 }
3.10
3.11 @@ -205,7 +206,8 @@
3.12 }
3.13 }
3.14 #endif
3.15 - if( exit_code != CORE_EXIT_EXCEPTION ) {
3.16 + if( exit_code != CORE_EXIT_EXCEPTION &&
3.17 + exit_code != CORE_EXIT_BREAKPOINT ) {
3.18 sh4_finalize_instruction();
3.19 }
3.20 // longjmp back into sh4_run_slice
3.21 @@ -350,6 +352,17 @@
3.22 return sh4r.sr;
3.23 }
3.24
3.25 +void sh4_update_exception_readtowrite( void )
3.26 +{
3.27 + int exc = MMIO_READ( MMU, EXPEVT );
3.28 + if( exc == EXC_TLB_MISS_READ ) {
3.29 + MMIO_WRITE( MMU, EXPEVT, EXC_TLB_MISS_WRITE );
3.30 + } else if( exc == EXC_DATA_ADDR_READ ) {
3.31 + MMIO_WRITE( MMU, EXPEVT, EXC_DATA_ADDR_WRITE );
3.32 + }
3.33 +}
3.34 +
3.35 +
3.36 /**
3.37 * Raise a CPU reset exception with the specified exception code.
3.38 */
4.1 --- a/src/sh4/sh4core.h Thu Jan 22 00:50:09 2009 +0000
4.2 +++ b/src/sh4/sh4core.h Thu Jan 22 02:58:13 2009 +0000
4.3 @@ -242,6 +242,13 @@
4.4 void FASTCALL sh4_accept_interrupt( void );
4.5
4.6 /**
4.7 + * Convert a TLB miss or data addr read exception to a write exception
4.8 + * by updating EXPEVT. (used for instructions like AND.B that are
4.9 + * documented to raise write exceptions if the target isn't readable)
4.10 + */
4.11 +void sh4_update_exception_readtowrite( void );
4.12 +
4.13 +/**
4.14 * Complete the current instruction as part of a core exit. Prevents the
4.15 * system from being left in an inconsistent state when an exit is
4.16 * triggered during a memory write.
4.17 @@ -299,7 +306,7 @@
4.18 extern struct mem_region_fn p4_region_ocache_addr;
4.19 extern struct mem_region_fn p4_region_ocache_data;
4.20
4.21 -
4.22 +#define OC_ENABLED 1
4.23
4.24 #ifdef __cplusplus
4.25 }
.