revision 911:2f6ba75b84d1
summary |
tree |
shortlog |
changelog |
graph |
changeset |
raw | bz2 | zip | gz changeset | 911:2f6ba75b84d1 |
parent | 910:661367ef919d |
child | 912:c5606ea44232 |
author | nkeynes |
date | Fri Oct 31 02:57:59 2008 +0000 (14 years ago) |
Declare mem_copy_* functions as FASTCALL
Split sh4_flush_store_queue into TLB/non-TLB versions, and optimize
slightly based on that
Split sh4_flush_store_queue into TLB/non-TLB versions, and optimize
slightly based on that
![]() | src/mem.h | view | annotate | diff | log | |
![]() | src/sh4/mmu.c | view | annotate | diff | log | |
![]() | src/sh4/sh4core.h | view | annotate | diff | log | |
![]() | src/sh4/sh4mem.c | view | annotate | diff | log | |
![]() | src/sh4/sh4x86.in | view | annotate | diff | log | |
![]() | src/test/testsh4x86.c | view | annotate | diff | log |
1.1 --- a/src/mem.h Fri Oct 31 01:07:44 2008 +00001.2 +++ b/src/mem.h Fri Oct 31 02:57:59 2008 +00001.3 @@ -66,8 +66,8 @@1.4 void mem_set_trace( const gchar *tracelist, int flag );1.5 void mem_init( void );1.6 void mem_reset( void );1.7 -void mem_copy_from_sh4( sh4ptr_t dest, sh4addr_t src, size_t count );1.8 -void mem_copy_to_sh4( sh4addr_t dest, sh4ptr_t src, size_t count );1.9 +void FASTCALL mem_copy_from_sh4( sh4ptr_t dest, sh4addr_t src, size_t count );1.10 +void FASTCALL mem_copy_to_sh4( sh4addr_t dest, sh4ptr_t src, size_t count );1.12 /**1.13 * Write a long value directly to SH4-addressable memory.
2.1 --- a/src/sh4/mmu.c Fri Oct 31 01:07:44 2008 +00002.2 +++ b/src/sh4/mmu.c Fri Oct 31 02:57:59 2008 +00002.3 @@ -920,48 +920,54 @@2.4 }2.5 }2.7 -gboolean FASTCALL sh4_flush_store_queue( sh4addr_t addr )2.8 +void FASTCALL sh4_flush_store_queue( sh4addr_t addr )2.9 +{2.10 + int queue = (addr&0x20)>>2;2.11 + uint32_t hi = MMIO_READ( MMU, QACR0 + (queue>>1)) << 24;2.12 + sh4ptr_t src = (sh4ptr_t)&sh4r.store_queue[queue];2.13 + sh4addr_t target = (addr&0x03FFFFE0) | hi;2.14 + mem_copy_to_sh4( target, src, 32 );2.15 +}2.16 +2.17 +gboolean FASTCALL sh4_flush_store_queue_mmu( sh4addr_t addr )2.18 {2.19 uint32_t mmucr = MMIO_READ(MMU,MMUCR);2.20 int queue = (addr&0x20)>>2;2.21 sh4ptr_t src = (sh4ptr_t)&sh4r.store_queue[queue];2.22 sh4addr_t target;2.23 /* Store queue operation */2.24 - if( mmucr & MMUCR_AT ) {2.25 - int entryNo;2.26 - if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {2.27 - entryNo = mmu_utlb_lookup_vpn_asid( addr );2.28 - } else {2.29 - entryNo = mmu_utlb_lookup_vpn( addr );2.30 - }2.31 - switch(entryNo) {2.32 - case -1:2.33 - MMU_TLB_WRITE_MISS_ERROR(addr);2.34 - return FALSE;2.35 - case -2:2.36 - MMU_TLB_MULTI_HIT_ERROR(addr);2.37 - return FALSE;2.38 - default:2.39 - if( IS_SH4_PRIVMODE() ? ((mmu_utlb[entryNo].flags & TLB_WRITABLE) == 0)2.40 - : ((mmu_utlb[entryNo].flags & TLB_USERWRITABLE) != TLB_USERWRITABLE) ) {2.41 - /* protection violation */2.42 - MMU_TLB_WRITE_PROT_ERROR(addr);2.43 - return FALSE;2.44 - }2.46 - if( (mmu_utlb[entryNo].flags & TLB_DIRTY) == 0 ) {2.47 - MMU_TLB_INITIAL_WRITE_ERROR(addr);2.48 - return FALSE;2.49 - }2.50 + int entryNo;2.51 + if( ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE() ) {2.52 + entryNo = mmu_utlb_lookup_vpn_asid( addr );2.53 + } else {2.54 + entryNo = mmu_utlb_lookup_vpn( addr );2.55 + }2.56 + switch(entryNo) {2.57 + case -1:2.58 + MMU_TLB_WRITE_MISS_ERROR(addr);2.59 + return FALSE;2.60 + case -2:2.61 + MMU_TLB_MULTI_HIT_ERROR(addr);2.62 + return FALSE;2.63 + default:2.64 + if( IS_SH4_PRIVMODE() ? ((mmu_utlb[entryNo].flags & TLB_WRITABLE) == 0)2.65 + : ((mmu_utlb[entryNo].flags & TLB_USERWRITABLE) != TLB_USERWRITABLE) ) {2.66 + /* protection violation */2.67 + MMU_TLB_WRITE_PROT_ERROR(addr);2.68 + return FALSE;2.69 + }2.71 - /* finally generate the target address */2.72 - target = ((mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) |2.73 - (addr & (~mmu_utlb[entryNo].mask))) & 0xFFFFFFE0;2.74 - }2.75 - } else {2.76 - uint32_t hi = (MMIO_READ( MMU, (queue == 0 ? QACR0 : QACR1) ) & 0x1C) << 24;2.77 - target = (addr&0x03FFFFE0) | hi;2.78 + if( (mmu_utlb[entryNo].flags & TLB_DIRTY) == 0 ) {2.79 + MMU_TLB_INITIAL_WRITE_ERROR(addr);2.80 + return FALSE;2.81 + }2.82 +2.83 + /* finally generate the target address */2.84 + target = ((mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) |2.85 + (addr & (~mmu_utlb[entryNo].mask))) & 0xFFFFFFE0;2.86 }2.87 +2.88 mem_copy_to_sh4( target, src, 32 );2.89 return TRUE;2.90 }
3.1 --- a/src/sh4/sh4core.h Fri Oct 31 01:07:44 2008 +00003.2 +++ b/src/sh4/sh4core.h Fri Oct 31 02:57:59 2008 +00003.3 @@ -212,7 +212,8 @@3.4 void FASTCALL sh4_write_word( sh4addr_t addr, uint32_t val );3.5 void FASTCALL sh4_write_byte( sh4addr_t addr, uint32_t val );3.6 int32_t sh4_read_phys_word( sh4addr_t addr );3.7 -gboolean FASTCALL sh4_flush_store_queue( sh4addr_t addr );3.8 +void FASTCALL sh4_flush_store_queue( sh4addr_t addr );3.9 +gboolean FASTCALL sh4_flush_store_queue_mmu( sh4addr_t addr );3.11 /* SH4 Exceptions */3.12 #define EXC_POWER_RESET 0x000 /* reset vector */
4.1 --- a/src/sh4/sh4mem.c Fri Oct 31 01:07:44 2008 +00004.2 +++ b/src/sh4/sh4mem.c Fri Oct 31 02:57:59 2008 +00004.3 @@ -400,7 +400,7 @@4.4 /* FIXME: Handle all the many special cases when the range doesn't fall cleanly4.5 * into the same memory block4.6 */4.7 -void mem_copy_from_sh4( sh4ptr_t dest, sh4addr_t srcaddr, size_t count ) {4.8 +void FASTCALL mem_copy_from_sh4( sh4ptr_t dest, sh4addr_t srcaddr, size_t count ) {4.9 if( srcaddr >= 0x04000000 && srcaddr < 0x05000000 ) {4.10 pvr2_vram64_read( dest, srcaddr, count );4.11 } else {4.12 @@ -413,7 +413,7 @@4.13 }4.14 }4.16 -void mem_copy_to_sh4( sh4addr_t destaddr, sh4ptr_t src, size_t count ) {4.17 +void FASTCALL mem_copy_to_sh4( sh4addr_t destaddr, sh4ptr_t src, size_t count ) {4.18 if( destaddr >= 0x10000000 && destaddr < 0x14000000 ) {4.19 pvr2_dma_write( destaddr, src, count );4.20 return;
5.1 --- a/src/sh4/sh4x86.in Fri Oct 31 01:07:44 2008 +00005.2 +++ b/src/sh4/sh4x86.in Fri Oct 31 02:57:59 2008 +00005.3 @@ -411,16 +411,6 @@5.4 assert( IS_IN_ICACHE(pc) );5.5 ir = *(uint16_t *)GET_ICACHE_PTR(pc);5.7 - /* PC is not in the current icache - this usually means we're running5.8 - * with MMU on, and we've gone past the end of the page. And since5.9 - * sh4_translate_block is pretty careful about this, it means we're5.10 - * almost certainly in a delay slot.5.11 - *5.12 - * Since we can't assume the page is present (and we can't fault it in5.13 - * at this point, inline a call to sh4_execute_instruction (with a few5.14 - * small repairs to cope with the different environment).5.15 - */5.16 -5.17 if( !sh4_x86.in_delay_slot ) {5.18 sh4_translate_add_recovery( (pc - sh4_x86.block_start_pc)>>1 );5.19 }5.20 @@ -444,7 +434,7 @@5.21 ADDC Rm, Rn {:5.22 COUNT_INST(I_ADDC);5.23 if( sh4_x86.tstate != TSTATE_C ) {5.24 - LDC_t();5.25 + LDC_t();5.26 }5.27 load_reg( R_EAX, Rm );5.28 load_reg( R_ECX, Rn );5.29 @@ -2567,9 +2557,13 @@5.30 AND_imm32_r32( 0xFC000000, R_ECX );5.31 CMP_imm32_r32( 0xE0000000, R_ECX );5.32 JNE_rel8(end);5.33 - call_func1( sh4_flush_store_queue, R_EAX );5.34 - TEST_r32_r32( R_EAX, R_EAX );5.35 - JE_exc(-1);5.36 + if( sh4_x86.tlb_on ) {5.37 + call_func1( sh4_flush_store_queue_mmu, R_EAX );5.38 + TEST_r32_r32( R_EAX, R_EAX );5.39 + JE_exc(-1);5.40 + } else {5.41 + call_func1( sh4_flush_store_queue, R_EAX );5.42 + }5.43 JMP_TARGET(end);5.44 sh4_x86.tstate = TSTATE_NONE;5.45 :}
6.1 --- a/src/test/testsh4x86.c Fri Oct 31 01:07:44 2008 +00006.2 +++ b/src/test/testsh4x86.c Fri Oct 31 02:57:59 2008 +00006.3 @@ -94,7 +94,7 @@6.4 void FASTCALL sh4_ftrv( float *fv ) { }6.5 void FASTCALL signsat48(void) { }6.6 void sh4_switch_fr_banks() { }6.7 -void mem_copy_to_sh4( sh4addr_t addr, sh4ptr_t src, size_t size ) { }6.8 +void FASTCALL mem_copy_to_sh4( sh4addr_t addr, sh4ptr_t src, size_t size ) { }6.9 gboolean sh4_has_page( sh4vma_t vma ) { return TRUE; }6.10 void syscall_invoke( uint32_t val ) { }6.11 void dreamcast_stop() {}
.