# HG changeset patch # User nkeynes # Date 1199163537 0 # Node ID 06714bc64271e56666748bedd5d127e1abea7816 # Parent 428cf85286992ad2cfdfc8584d3a892a645ba4d1 Commit first pass at full TLB support - still needs a lot more work --- a/src/sh4/ia32abi.h Tue Jan 01 04:56:52 2008 +0000 +++ b/src/sh4/ia32abi.h Tue Jan 01 04:58:57 2008 +0000 @@ -153,26 +153,9 @@ exit_block( pc, pc ); } if( sh4_x86.backpatch_posn != 0 ) { + unsigned int i; + // Raise exception uint8_t *end_ptr = xlat_output; - // Exception termination. Jump block for various exception codes: - PUSH_imm32( EXC_DATA_ADDR_READ ); - JMP_rel8( 33, target1 ); - PUSH_imm32( EXC_DATA_ADDR_WRITE ); - JMP_rel8( 26, target2 ); - PUSH_imm32( EXC_ILLEGAL ); - JMP_rel8( 19, target3 ); - PUSH_imm32( EXC_SLOT_ILLEGAL ); - JMP_rel8( 12, target4 ); - PUSH_imm32( EXC_FPU_DISABLED ); - JMP_rel8( 5, target5 ); - PUSH_imm32( EXC_SLOT_FPU_DISABLED ); - // target - JMP_TARGET(target1); - JMP_TARGET(target2); - JMP_TARGET(target3); - JMP_TARGET(target4); - JMP_TARGET(target5); - // Raise exception load_spreg( R_ECX, REG_OFFSET(pc) ); ADD_r32_r32( R_EDX, R_ECX ); ADD_r32_r32( R_EDX, R_ECX ); @@ -188,7 +171,34 @@ POP_r32(R_EBP); RET(); - sh4_x86_do_backpatch( end_ptr ); + // Exception already raised - just cleanup + uint8_t *preexc_ptr = xlat_output; + load_imm32( R_ECX, sh4_x86.block_start_pc ); + ADD_r32_r32( R_EDX, R_ECX ); + ADD_r32_r32( R_EDX, R_ECX ); + store_spreg( R_ECX, REG_OFFSET(spc) ); + MOV_moff32_EAX( &sh4_cpu_period ); + MUL_r32( R_EDX ); + ADD_r32_sh4r( R_EAX, REG_OFFSET(slice_cycle) ); + load_spreg( R_EAX, REG_OFFSET(pc) ); + call_func1(xlat_get_code,R_EAX); + POP_r32(R_EBP); + RET(); + + for( i=0; i< sh4_x86.backpatch_posn; i++ ) { + *sh4_x86.backpatch_list[i].fixup_addr = + xlat_output - ((uint8_t *)sh4_x86.backpatch_list[i].fixup_addr) - 4; + if( sh4_x86.backpatch_list[i].exc_code == -1 ) { + load_imm32( R_EDX, sh4_x86.backpatch_list[i].fixup_icount ); + int rel = preexc_ptr - xlat_output; + JMP_rel(rel); + } else { + PUSH_imm32( sh4_x86.backpatch_list[i].exc_code ); + load_imm32( R_EDX, sh4_x86.backpatch_list[i].fixup_icount ); + int rel = end_ptr - xlat_output; + JMP_rel(rel); + } + } } } --- a/src/sh4/ia32mac.h Tue Jan 01 04:56:52 2008 +0000 +++ b/src/sh4/ia32mac.h Tue Jan 01 04:58:57 2008 +0000 @@ -176,26 +176,9 @@ exit_block( pc, pc ); } if( sh4_x86.backpatch_posn != 0 ) { + unsigned int i; + // Raise exception uint8_t *end_ptr = xlat_output; - // Exception termination. Jump block for various exception codes: - PUSH_imm32( EXC_DATA_ADDR_READ ); - JMP_rel8( 33, target1 ); - PUSH_imm32( EXC_DATA_ADDR_WRITE ); - JMP_rel8( 26, target2 ); - PUSH_imm32( EXC_ILLEGAL ); - JMP_rel8( 19, target3 ); - PUSH_imm32( EXC_SLOT_ILLEGAL ); - JMP_rel8( 12, target4 ); - PUSH_imm32( EXC_FPU_DISABLED ); - JMP_rel8( 5, target5 ); - PUSH_imm32( EXC_SLOT_FPU_DISABLED ); - // target - JMP_TARGET(target1); - JMP_TARGET(target2); - JMP_TARGET(target3); - JMP_TARGET(target4); - JMP_TARGET(target5); - // Raise exception load_spreg( R_ECX, REG_OFFSET(pc) ); ADD_r32_r32( R_EDX, R_ECX ); ADD_r32_r32( R_EDX, R_ECX ); @@ -204,14 +187,41 @@ MUL_r32( R_EDX ); ADD_r32_sh4r( R_EAX, REG_OFFSET(slice_cycle) ); - POP_r32(R_EDX); - call_func1( sh4_raise_exception, R_EDX ); + POP_r32(R_EDX); + call_func1( sh4_raise_exception, R_EDX ); load_spreg( R_EAX, REG_OFFSET(pc) ); call_func1(xlat_get_code,R_EAX); POP_r32(R_EBP); RET(); - sh4_x86_do_backpatch( end_ptr ); + // Exception already raised - just cleanup + uint8_t *preexc_ptr = xlat_output; + load_imm32( R_ECX, sh4_x86.block_start_pc ); + ADD_r32_r32( R_EDX, R_ECX ); + ADD_r32_r32( R_EDX, R_ECX ); + store_spreg( R_ECX, REG_OFFSET(spc) ); + MOV_moff32_EAX( &sh4_cpu_period ); + MUL_r32( R_EDX ); + ADD_r32_sh4r( R_EAX, REG_OFFSET(slice_cycle) ); + load_spreg( R_EAX, REG_OFFSET(pc) ); + call_func1(xlat_get_code,R_EAX); + POP_r32(R_EBP); + RET(); + + for( i=0; i< sh4_x86.backpatch_posn; i++ ) { + *sh4_x86.backpatch_list[i].fixup_addr = + xlat_output - ((uint8_t *)sh4_x86.backpatch_list[i].fixup_addr) - 4; + if( sh4_x86.backpatch_list[i].exc_code == -1 ) { + load_imm32( R_EDX, sh4_x86.backpatch_list[i].fixup_icount ); + int rel = preexc_ptr - xlat_output; + JMP_rel(rel); + } else { + PUSH_imm32( sh4_x86.backpatch_list[i].exc_code ); + load_imm32( R_EDX, sh4_x86.backpatch_list[i].fixup_icount ); + int rel = end_ptr - xlat_output; + JMP_rel(rel); + } + } } } --- a/src/sh4/ia64abi.h Tue Jan 01 04:56:52 2008 +0000 +++ b/src/sh4/ia64abi.h Tue Jan 01 04:58:57 2008 +0000 @@ -135,7 +135,6 @@ RET(); } - /** * Write the block trailer (exception handling block) */ @@ -145,26 +144,9 @@ exit_block( pc, pc ); } if( sh4_x86.backpatch_posn != 0 ) { + unsigned int i; + // Raise exception uint8_t *end_ptr = xlat_output; - // Exception termination. Jump block for various exception codes: - load_imm32( R_EDI, EXC_DATA_ADDR_READ ); - JMP_rel8( 33, target1 ); - load_imm32( R_EDI, EXC_DATA_ADDR_WRITE ); - JMP_rel8( 26, target2 ); - load_imm32( R_EDI, EXC_ILLEGAL ); - JMP_rel8( 19, target3 ); - load_imm32( R_EDI, EXC_SLOT_ILLEGAL ); - JMP_rel8( 12, target4 ); - load_imm32( R_EDI, EXC_FPU_DISABLED ); - JMP_rel8( 5, target5 ); - load_imm32( R_EDI, EXC_SLOT_FPU_DISABLED ); - // target - JMP_TARGET(target1); - JMP_TARGET(target2); - JMP_TARGET(target3); - JMP_TARGET(target4); - JMP_TARGET(target5); - // Raise exception load_spreg( R_ECX, REG_OFFSET(pc) ); ADD_r32_r32( R_EDX, R_ECX ); ADD_r32_r32( R_EDX, R_ECX ); @@ -179,7 +161,34 @@ POP_r32(R_EBP); RET(); - sh4_x86_do_backpatch( end_ptr ); + // Exception already raised - just cleanup + uint8_t *preexc_ptr = xlat_output; + load_imm32( R_ECX, sh4_x86.block_start_pc ); + ADD_r32_r32( R_EDX, R_ECX ); + ADD_r32_r32( R_EDX, R_ECX ); + store_spreg( R_ECX, REG_OFFSET(spc) ); + MOV_moff32_EAX( &sh4_cpu_period ); + MUL_r32( R_EDX ); + ADD_r32_sh4r( R_EAX, REG_OFFSET(slice_cycle) ); + load_spreg( R_EAX, REG_OFFSET(pc) ); + call_func1(xlat_get_code,R_EAX); + POP_r32(R_EBP); + RET(); + + for( i=0; i< sh4_x86.backpatch_posn; i++ ) { + *sh4_x86.backpatch_list[i].fixup_addr = + xlat_output - ((uint8_t *)sh4_x86.backpatch_list[i].fixup_addr) - 4; + if( sh4_x86.backpatch_list[i].exc_code == -1 ) { + load_imm32( R_EDX, sh4_x86.backpatch_list[i].fixup_icount ); + int rel = preexc_ptr - xlat_output; + JMP_rel(rel); + } else { + load_imm32( R_EDI, sh4_x86.backpatch_list[i].exc_code ); + load_imm32( R_EDX, sh4_x86.backpatch_list[i].fixup_icount ); + int rel = end_ptr - xlat_output; + JMP_rel(rel); + } + } } } --- a/src/sh4/mmu.c Tue Jan 01 04:56:52 2008 +0000 +++ b/src/sh4/mmu.c Tue Jan 01 04:58:57 2008 +0000 @@ -32,6 +32,7 @@ #define TLB_VALID 0x00000100 #define TLB_USERMODE 0x00000040 #define TLB_WRITABLE 0x00000020 +#define TLB_USERWRITABLE (TLB_WRITABLE|TLB_USERMODE) #define TLB_SIZE_MASK 0x00000090 #define TLB_SIZE_1K 0x00000000 #define TLB_SIZE_4K 0x00000010 @@ -42,16 +43,22 @@ #define TLB_SHARE 0x00000002 #define TLB_WRITETHRU 0x00000001 +#define MASK_1K 0xFFFFFC00 +#define MASK_4K 0xFFFFF000 +#define MASK_64K 0xFFFF0000 +#define MASK_1M 0xFFF00000 struct itlb_entry { sh4addr_t vpn; // Virtual Page Number uint32_t asid; // Process ID + uint32_t mask; sh4addr_t ppn; // Physical Page Number uint32_t flags; }; struct utlb_entry { sh4addr_t vpn; // Virtual Page Number + uint32_t mask; // Page size mask uint32_t asid; // Process ID sh4addr_t ppn; // Physical Page Number uint32_t flags; @@ -69,6 +76,16 @@ static void mmu_invalidate_tlb(); +static uint32_t get_mask_for_flags( uint32_t flags ) +{ + switch( flags & TLB_SIZE_MASK ) { + case TLB_SIZE_1K: return MASK_1K; + case TLB_SIZE_4K: return MASK_4K; + case TLB_SIZE_64K: return MASK_64K; + case TLB_SIZE_1M: return MASK_1M; + } +} + int32_t mmio_region_MMU_read( uint32_t reg ) { switch( reg ) { @@ -125,6 +142,9 @@ fwrite( cache, 4096, 2, f ); fwrite( &mmu_itlb, sizeof(mmu_itlb), 1, f ); fwrite( &mmu_utlb, sizeof(mmu_utlb), 1, f ); + fwrite( &mmu_urc, sizeof(mmu_urc), 1, f ); + fwrite( &mmu_urb, sizeof(mmu_urb), 1, f ); + fwrite( &mmu_lrui, sizeof(mmu_lrui), 1, f ); } int MMU_load_state( FILE *f ) @@ -142,6 +162,15 @@ if( fread( &mmu_utlb, sizeof(mmu_utlb), 1, f ) != 1 ) { return 1; } + if( fread( &mmu_urc, sizeof(mmu_urc), 1, f ) != 1 ) { + return 1; + } + if( fread( &mmu_urc, sizeof(mmu_urb), 1, f ) != 1 ) { + return 1; + } + if( fread( &mmu_lrui, sizeof(mmu_lrui), 1, f ) != 1 ) { + return 1; + } return 0; } @@ -177,41 +206,389 @@ mmu_utlb[mmu_urc].ppn = MMIO_READ(MMU, PTEL) & 0x1FFFFC00; mmu_utlb[mmu_urc].flags = MMIO_READ(MMU, PTEL) & 0x00001FF; mmu_utlb[mmu_urc].pcmcia = MMIO_READ(MMU, PTEA); + mmu_utlb[mmu_urc].mask = get_mask_for_flags(mmu_utlb[mmu_urc].flags); } -uint64_t mmu_translate_read( sh4addr_t addr ) +static inline void mmu_flush_pages( struct utlb_entry *ent ) { - uint32_t mmucr = MMIO_READ(MMU,MMUCR); - if( IS_SH4_PRIVMODE() ) { - switch( addr & 0xE0000000 ) { - case 0x80000000: case 0xA0000000: - /* Non-translated read P1,P2 */ - break; - case 0xE0000000: - /* Non-translated read P4 */ - break; - default: - if( mmucr&MMUCR_AT ) { - } else { - // direct read + unsigned int vpn; + switch( ent->flags & TLB_SIZE_MASK ) { + case TLB_SIZE_1K: xlat_flush_page( ent->vpn ); break; + case TLB_SIZE_4K: xlat_flush_page( ent->vpn ); break; + case TLB_SIZE_64K: + for( vpn = ent->vpn; vpn < ent->vpn + 0x10000; vpn += 0x1000 ) { + xlat_flush_page( vpn ); + } + break; + case TLB_SIZE_1M: + for( vpn = ent->vpn; vpn < ent->vpn + 0x100000; vpn += 0x1000 ) { + xlat_flush_page( vpn ); + } + break; + } +} + +/** + * The translations are excessively complicated, but unfortunately it's a + * complicated system. It can undoubtedly be better optimized too. + */ + +/** + * Perform the actual utlb lookup. + * Possible utcomes are: + * 0..63 Single match - good, return entry found + * -1 No match - raise a tlb data miss exception + * -2 Multiple matches - raise a multi-hit exception (reset) + * @param vpn virtual address to resolve + * @param asid Address space identifier + * @param use_asid whether to require an asid match on non-shared pages. + * @return the resultant UTLB entry, or an error. + */ +static inline int mmu_utlb_lookup_vpn( uint32_t vpn, uint32_t asid, int use_asid ) +{ + int result = -1; + unsigned int i; + + mmu_urc++; + if( mmu_urc == mmu_urb || mmu_urc == 0x40 ) { + mmu_urc = 0; + } + + if( use_asid ) { + for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) { + if( (mmu_utlb[i].flags & TLB_VALID) && + ((mmu_utlb[i].flags & TLB_SHARE) || asid == mmu_utlb[i].asid) && + ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) { + if( result != -1 ) { + return -2; + } + result = i; } } } else { - if( addr & 0x80000000 ) { - if( ((addr&0xFC000000) == 0xE0000000 ) && - ((mmucr&MMUCR_SQMD) == 0) ) { - // Store queue - return 0; + for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) { + if( (mmu_utlb[i].flags & TLB_VALID) && + ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) { + if( result != -1 ) { + return -2; + } + result = i; } -// MMU_READ_ADDR_ERROR(); - } - if( mmucr&MMUCR_AT ) { - uint32_t vpn = addr & 0xFFFFFC00; - uint32_t asid = MMIO_READ(MMU,PTEH)&0xFF; - } else { - // direct read } } + return result; +} + +/** + * Find a UTLB entry for the associative TLB write - same as the normal + * lookup but ignores the valid bit. + */ +static inline mmu_utlb_lookup_assoc( uint32_t vpn, uint32_t asid ) +{ + int result = -1; + unsigned int i; + for( i = 0; i < UTLB_ENTRY_COUNT; i++ ) { + if( ((mmu_utlb[i].flags & TLB_SHARE) || asid == mmu_utlb[i].asid) && + ((mmu_utlb[i].vpn ^ vpn) & mmu_utlb[i].mask) == 0 ) { + if( result != -1 ) { + return -2; + } + result = i; + } + } + return result; +} + +/** + * Perform the actual itlb lookup. + * Possible utcomes are: + * 0..63 Single match - good, return entry found + * -1 No match - raise a tlb data miss exception + * -2 Multiple matches - raise a multi-hit exception (reset) + * @param vpn virtual address to resolve + * @param asid Address space identifier + * @param use_asid whether to require an asid match on non-shared pages. + * @return the resultant ITLB entry, or an error. + */ +static inline int mmu_itlb_lookup_vpn( uint32_t vpn, uint32_t asid, int use_asid ) +{ + int result = -1; + unsigned int i; + if( use_asid ) { + for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) { + if( (mmu_itlb[i].flags & TLB_VALID) && + ((mmu_itlb[i].flags & TLB_SHARE) || asid == mmu_itlb[i].asid) && + ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) { + if( result != -1 ) { + return -2; + } + result = i; + } + } + } else { + for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) { + if( (mmu_itlb[i].flags & TLB_VALID) && + ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) { + if( result != -1 ) { + return -2; + } + result = i; + } + } + } + + switch( result ) { + case 0: mmu_lrui = (mmu_lrui & 0x07); break; + case 1: mmu_lrui = (mmu_lrui & 0x19) | 0x20; break; + case 2: mmu_lrui = (mmu_lrui & 0x3E) | 0x14; break; + case 3: mmu_lrui = (mmu_lrui | 0x0B); break; + } + + return result; +} + +static int inline mmu_itlb_update_from_utlb( int entryNo ) +{ + int replace; + /* Determine entry to replace based on lrui */ + if( mmu_lrui & 0x38 == 0x38 ) { + replace = 0; + mmu_lrui = mmu_lrui & 0x07; + } else if( (mmu_lrui & 0x26) == 0x06 ) { + replace = 1; + mmu_lrui = (mmu_lrui & 0x19) | 0x20; + } else if( (mmu_lrui & 0x15) == 0x01 ) { + replace = 2; + mmu_lrui = (mmu_lrui & 0x3E) | 0x14; + } else { // Note - gets invalid entries too + replace = 3; + mmu_lrui = (mmu_lrui | 0x0B); + } + + mmu_itlb[replace].vpn = mmu_utlb[entryNo].vpn; + mmu_itlb[replace].mask = mmu_utlb[entryNo].mask; + mmu_itlb[replace].ppn = mmu_utlb[entryNo].ppn; + mmu_itlb[replace].asid = mmu_utlb[entryNo].asid; + mmu_itlb[replace].flags = mmu_utlb[entryNo].flags & 0x01DA; + return replace; +} + +/** + * Find a ITLB entry for the associative TLB write - same as the normal + * lookup but ignores the valid bit. + */ +static inline mmu_itlb_lookup_assoc( uint32_t vpn, uint32_t asid ) +{ + int result = -1; + unsigned int i; + for( i = 0; i < ITLB_ENTRY_COUNT; i++ ) { + if( ((mmu_itlb[i].flags & TLB_SHARE) || asid == mmu_itlb[i].asid) && + ((mmu_itlb[i].vpn ^ vpn) & mmu_itlb[i].mask) == 0 ) { + if( result != -1 ) { + return -2; + } + result = i; + } + } + return result; +} + +#define RAISE_TLB_ERROR(code, vpn) \ + MMIO_WRITE(MMU, TEA, vpn); \ + MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \ + sh4_raise_tlb_exception(code); \ + return (((uint64_t)code)<<32) + +#define RAISE_MEM_ERROR(code, vpn) \ + MMIO_WRITE(MMU, TEA, vpn); \ + MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \ + sh4_raise_exception(code); \ + return (((uint64_t)code)<<32) + +#define RAISE_OTHER_ERROR(code) \ + sh4_raise_exception(code); \ + return (((uint64_t)EXV_EXCEPTION)<<32) + +/** + * Abort with a non-MMU address error. Caused by user-mode code attempting + * to access privileged regions, or alignment faults. + */ +#define MMU_READ_ADDR_ERROR() RAISE_OTHER_ERROR(EXC_DATA_ADDR_READ) +#define MMU_WRITE_ADDR_ERROR() RAISE_OTHER_ERROR(EXC_DATA_ADDR_WRITE) + +#define MMU_TLB_READ_MISS_ERROR(vpn) RAISE_TLB_ERROR(EXC_TLB_MISS_READ, vpn) +#define MMU_TLB_WRITE_MISS_ERROR(vpn) RAISE_TLB_ERROR(EXC_TLB_MISS_WRITE, vpn) +#define MMU_TLB_INITIAL_WRITE_ERROR(vpn) RAISE_MEM_ERROR(EXC_INIT_PAGE_WRITE, vpn) +#define MMU_TLB_READ_PROT_ERROR(vpn) RAISE_MEM_ERROR(EXC_TLB_PROT_READ, vpn) +#define MMU_TLB_WRITE_PROT_ERROR(vpn) RAISE_MEM_ERROR(EXC_TLB_PROT_WRITE, vpn) +#define MMU_TLB_MULTI_HIT_ERROR(vpn) sh4_raise_reset(EXC_TLB_MULTI_HIT); \ + MMIO_WRITE(MMU, TEA, vpn); \ + MMIO_WRITE(MMU, PTEH, ((MMIO_READ(MMU, PTEH) & 0x000003FF) | (vpn&0xFFFFFC00))); \ + return (((uint64_t)EXC_TLB_MULTI_HIT)<<32) + +uint64_t mmu_vma_to_phys_write( sh4addr_t addr ) +{ + uint32_t mmucr = MMIO_READ(MMU,MMUCR); + if( addr & 0x80000000 ) { + if( IS_SH4_PRIVMODE() ) { + if( addr < 0xC0000000 || addr >= 0xE0000000 ) { + /* P1, P2 and P4 regions are pass-through (no translation) */ + return (uint64_t)addr; + } + } else { + if( addr >= 0xE0000000 && addr < 0xE4000000 && + ((mmucr&MMUCR_SQMD) == 0) ) { + /* Conditional user-mode access to the store-queue (no translation) */ + return (uint64_t)addr; + } + MMU_WRITE_ADDR_ERROR(); + } + } + + if( (mmucr & MMUCR_AT) == 0 ) { + return (uint64_t)addr; + } + + /* If we get this far, translation is required */ + + int use_asid = ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE(); + uint32_t asid = MMIO_READ( MMU, PTEH ) & 0xFF; + + int entryNo = mmu_utlb_lookup_vpn( addr, asid, use_asid ); + + switch(entryNo) { + case -1: + MMU_TLB_WRITE_MISS_ERROR(addr); + break; + case -2: + MMU_TLB_MULTI_HIT_ERROR(addr); + break; + default: + if( IS_SH4_PRIVMODE() ? ((mmu_utlb[entryNo].flags & TLB_WRITABLE) == 0) + : ((mmu_utlb[entryNo].flags & TLB_USERWRITABLE) != TLB_USERWRITABLE) ) { + /* protection violation */ + MMU_TLB_WRITE_PROT_ERROR(addr); + } + + if( (mmu_utlb[entryNo].flags & TLB_DIRTY) == 0 ) { + MMU_TLB_INITIAL_WRITE_ERROR(addr); + } + + /* finally generate the target address */ + return (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) | + (addr & (~mmu_utlb[entryNo].mask)); + } + return -1; + +} + +uint64_t mmu_vma_to_phys_exec( sh4addr_t addr ) +{ + uint32_t mmucr = MMIO_READ(MMU,MMUCR); + if( addr & 0x80000000 ) { + if( IS_SH4_PRIVMODE() ) { + if( addr < 0xC0000000 ) { + /* P1, P2 and P4 regions are pass-through (no translation) */ + return (uint64_t)addr; + } else if( addr >= 0xE0000000 ) { + MMU_READ_ADDR_ERROR(); + } + } else { + MMU_READ_ADDR_ERROR(); + } + } + + if( (mmucr & MMUCR_AT) == 0 ) { + return (uint64_t)addr; + } + + /* If we get this far, translation is required */ + int use_asid = ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE(); + uint32_t asid = MMIO_READ( MMU, PTEH ) & 0xFF; + + int entryNo = mmu_itlb_lookup_vpn( addr, asid, use_asid ); + if( entryNo == -1 ) { + entryNo = mmu_utlb_lookup_vpn( addr, asid, use_asid ); + if( entryNo >= 0 ) { + entryNo = mmu_itlb_update_from_utlb( entryNo ); + } + } + switch(entryNo) { + case -1: + MMU_TLB_READ_MISS_ERROR(addr); + break; + case -2: + MMU_TLB_MULTI_HIT_ERROR(addr); + break; + default: + if( (mmu_itlb[entryNo].flags & TLB_USERMODE) == 0 && + !IS_SH4_PRIVMODE() ) { + /* protection violation */ + MMU_TLB_READ_PROT_ERROR(addr); + } + + /* finally generate the target address */ + return (mmu_itlb[entryNo].ppn & mmu_itlb[entryNo].mask) | + (addr & (~mmu_itlb[entryNo].mask)); + } + return -1; +} + +uint64_t mmu_vma_to_phys_read_noexc( sh4addr_t addr ) { + + +} + + +uint64_t mmu_vma_to_phys_read( sh4addr_t addr ) +{ + uint32_t mmucr = MMIO_READ(MMU,MMUCR); + if( addr & 0x80000000 ) { + if( IS_SH4_PRIVMODE() ) { + if( addr < 0xC0000000 || addr >= 0xE0000000 ) { + /* P1, P2 and P4 regions are pass-through (no translation) */ + return (uint64_t)addr; + } + } else { + if( addr >= 0xE0000000 && addr < 0xE4000000 && + ((mmucr&MMUCR_SQMD) == 0) ) { + /* Conditional user-mode access to the store-queue (no translation) */ + return (uint64_t)addr; + } + MMU_READ_ADDR_ERROR(); + } + } + + if( (mmucr & MMUCR_AT) == 0 ) { + return (uint64_t)addr; + } + + /* If we get this far, translation is required */ + + int use_asid = ((mmucr & MMUCR_SV) == 0) || !IS_SH4_PRIVMODE(); + uint32_t asid = MMIO_READ( MMU, PTEH ) & 0xFF; + + int entryNo = mmu_utlb_lookup_vpn( addr, asid, use_asid ); + + switch(entryNo) { + case -1: + MMU_TLB_READ_MISS_ERROR(addr); + break; + case -2: + MMU_TLB_MULTI_HIT_ERROR(addr); + break; + default: + if( (mmu_utlb[entryNo].flags & TLB_USERMODE) == 0 && + !IS_SH4_PRIVMODE() ) { + /* protection violation */ + MMU_TLB_READ_PROT_ERROR(addr); + } + + /* finally generate the target address */ + return (mmu_utlb[entryNo].ppn & mmu_utlb[entryNo].mask) | + (addr & (~mmu_utlb[entryNo].mask)); + } + return -1; } static void mmu_invalidate_tlb() @@ -251,6 +628,7 @@ struct itlb_entry *ent = &mmu_itlb[ITLB_ENTRY(addr)]; ent->ppn = val & 0x1FFFFC00; ent->flags = val & 0x00001DA; + ent->mask = get_mask_for_flags(val); } #define UTLB_ENTRY(addr) ((addr>>8)&0x3F) @@ -276,6 +654,16 @@ void mmu_utlb_addr_write( sh4addr_t addr, uint32_t val ) { if( UTLB_ASSOC(addr) ) { + uint32_t asid = MMIO_READ( MMU, PTEH ) & 0xFF; + int entryNo = mmu_utlb_lookup_assoc( val, asid ); + if( entryNo >= 0 ) { + struct utlb_entry *ent = &mmu_utlb[entryNo]; + ent->flags = ent->flags & ~(TLB_DIRTY|TLB_VALID); + ent->flags |= (val & TLB_VALID); + ent->flags |= ((val & 0x200)>>7); + } else if( entryNo == -2 ) { + MMU_TLB_MULTI_HIT_ERROR(addr); + } } else { struct utlb_entry *ent = &mmu_utlb[UTLB_ENTRY(addr)]; ent->vpn = (val & 0xFFFFFC00); @@ -294,6 +682,7 @@ } else { ent->ppn = (val & 0x1FFFFC00); ent->flags = (val & 0x000001FF); + ent->mask = get_mask_for_flags(val); } } --- a/src/sh4/sh4.c Tue Jan 01 04:56:52 2008 +0000 +++ b/src/sh4/sh4.c Tue Jan 01 04:58:57 2008 +0000 @@ -30,10 +30,6 @@ #include "clock.h" #include "syscall.h" -#define EXV_EXCEPTION 0x100 /* General exception vector */ -#define EXV_TLBMISS 0x400 /* TLB-miss exception vector */ -#define EXV_INTERRUPT 0x600 /* External interrupt vector */ - void sh4_init( void ); void sh4_xlat_init( void ); void sh4_reset( void ); @@ -254,6 +250,21 @@ RAISE( code, EXV_EXCEPTION ); } +/** + * Raise a CPU reset exception with the specified exception code. + */ +gboolean sh4_raise_reset( int code ) +{ + // FIXME: reset modules as per "manual reset" + sh4_reset(); + MMIO_WRITE(MMU,EXPEVT,code); + sh4r.vbr = 0; + sh4r.pc = 0xA0000000; + sh4r.new_pc = sh4r.pc + 2; + sh4_write_sr( (sh4r.sr|SR_MD|SR_BL|SR_RB|SR_IMASK) + &(~SR_FD) ); +} + gboolean sh4_raise_trap( int trap ) { MMIO_WRITE( MMU, TRA, trap<<2 ); --- a/src/sh4/sh4core.c Tue Jan 01 04:56:52 2008 +0000 +++ b/src/sh4/sh4core.c Tue Jan 01 04:58:57 2008 +0000 @@ -164,12 +164,12 @@ #define TRACE_RETURN( source, dest ) #endif -#define MEM_READ_BYTE( addr ) sh4_read_byte(addr) -#define MEM_READ_WORD( addr ) sh4_read_word(addr) -#define MEM_READ_LONG( addr ) sh4_read_long(addr) -#define MEM_WRITE_BYTE( addr, val ) sh4_write_byte(addr, val) -#define MEM_WRITE_WORD( addr, val ) sh4_write_word(addr, val) -#define MEM_WRITE_LONG( addr, val ) sh4_write_long(addr, val) +#define MEM_READ_BYTE( addr, val ) memtmp = sh4_read_byte(addr); if( memtmp >> 32 ) { return TRUE; } else { val = ((uint32_t)memtmp); } +#define MEM_READ_WORD( addr, val ) memtmp = sh4_read_word(addr); if( memtmp >> 32 ) { return TRUE; } else { val = ((uint32_t)memtmp); } +#define MEM_READ_LONG( addr, val ) memtmp = sh4_read_long(addr); if( memtmp >> 32 ) { return TRUE; } else { val = ((uint32_t)memtmp); } +#define MEM_WRITE_BYTE( addr, val ) if( sh4_write_byte(addr, val) ) { return TRUE; } +#define MEM_WRITE_WORD( addr, val ) if( sh4_write_word(addr, val) ) { return TRUE; } +#define MEM_WRITE_LONG( addr, val ) if( sh4_write_long(addr, val) ) { return TRUE; } #define FP_WIDTH (IS_FPU_DOUBLESIZE() ? 8 : 4) @@ -223,6 +223,7 @@ uint32_t tmp; float ftmp; double dtmp; + int64_t memtmp; // temporary holder for memory reads #define R0 sh4r.r[0] pc = sh4r.pc; @@ -246,7 +247,7 @@ * region, fallback on the full-blown memory read */ sh4_icache = NULL; - ir = MEM_READ_WORD(pc); + MEM_READ_WORD(pc, ir); } else { sh4_icache_addr = pageaddr; ir = sh4_icache[(pc&0xFFF)>>1]; @@ -551,21 +552,21 @@ case 0xC: { /* MOV.B @(R0, Rm), Rn */ uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF); - sh4r.r[Rn] = MEM_READ_BYTE( R0 + sh4r.r[Rm] ); + MEM_READ_BYTE( R0 + sh4r.r[Rm], sh4r.r[Rn] ); } break; case 0xD: { /* MOV.W @(R0, Rm), Rn */ uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF); CHECKRALIGN16( R0 + sh4r.r[Rm] ); - sh4r.r[Rn] = MEM_READ_WORD( R0 + sh4r.r[Rm] ); + MEM_READ_WORD( R0 + sh4r.r[Rm], sh4r.r[Rn] ); } break; case 0xE: { /* MOV.L @(R0, Rm), Rn */ uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF); CHECKRALIGN32( R0 + sh4r.r[Rm] ); - sh4r.r[Rn] = MEM_READ_LONG( R0 + sh4r.r[Rm] ); + MEM_READ_LONG( R0 + sh4r.r[Rm], sh4r.r[Rn] ); } break; case 0xF: @@ -573,9 +574,11 @@ uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF); CHECKRALIGN32( sh4r.r[Rm] ); CHECKRALIGN32( sh4r.r[Rn] ); - int64_t tmpl = SIGNEXT32(MEM_READ_LONG(sh4r.r[Rn])); + MEM_READ_LONG(sh4r.r[Rn], tmp); + int64_t tmpl = SIGNEXT32(tmp); sh4r.r[Rn] += 4; - tmpl = tmpl * SIGNEXT32(MEM_READ_LONG(sh4r.r[Rm])) + sh4r.mac; + MEM_READ_LONG(sh4r.r[Rm], tmp); + tmpl = tmpl * SIGNEXT32(tmp) + sh4r.mac; sh4r.r[Rm] += 4; if( sh4r.s ) { /* 48-bit Saturation. Yuch */ @@ -1058,8 +1061,9 @@ { /* LDS.L @Rm+, MACH */ uint32_t Rm = ((ir>>8)&0xF); CHECKRALIGN32( sh4r.r[Rm] ); + MEM_READ_LONG(sh4r.r[Rm], tmp); sh4r.mac = (sh4r.mac & 0x00000000FFFFFFFF) | - (((uint64_t)MEM_READ_LONG(sh4r.r[Rm]))<<32); + (((uint64_t)tmp)<<32); sh4r.r[Rm] += 4; } break; @@ -1067,8 +1071,9 @@ { /* LDS.L @Rm+, MACL */ uint32_t Rm = ((ir>>8)&0xF); CHECKRALIGN32( sh4r.r[Rm] ); + MEM_READ_LONG(sh4r.r[Rm], tmp); sh4r.mac = (sh4r.mac & 0xFFFFFFFF00000000LL) | - (uint64_t)((uint32_t)MEM_READ_LONG(sh4r.r[Rm])); + (uint64_t)((uint32_t)tmp); sh4r.r[Rm] += 4; } break; @@ -1076,7 +1081,7 @@ { /* LDS.L @Rm+, PR */ uint32_t Rm = ((ir>>8)&0xF); CHECKRALIGN32( sh4r.r[Rm] ); - sh4r.pr = MEM_READ_LONG( sh4r.r[Rm] ); + MEM_READ_LONG( sh4r.r[Rm], sh4r.pr ); sh4r.r[Rm] += 4; } break; @@ -1085,7 +1090,7 @@ uint32_t Rm = ((ir>>8)&0xF); CHECKPRIV(); CHECKRALIGN32( sh4r.r[Rm] ); - sh4r.sgr = MEM_READ_LONG(sh4r.r[Rm]); + MEM_READ_LONG(sh4r.r[Rm], sh4r.sgr); sh4r.r[Rm] +=4; } break; @@ -1093,7 +1098,7 @@ { /* LDS.L @Rm+, FPUL */ uint32_t Rm = ((ir>>8)&0xF); CHECKRALIGN32( sh4r.r[Rm] ); - sh4r.fpul = MEM_READ_LONG(sh4r.r[Rm]); + MEM_READ_LONG(sh4r.r[Rm], sh4r.fpul); sh4r.r[Rm] +=4; } break; @@ -1101,7 +1106,7 @@ { /* LDS.L @Rm+, FPSCR */ uint32_t Rm = ((ir>>8)&0xF); CHECKRALIGN32( sh4r.r[Rm] ); - sh4r.fpscr = MEM_READ_LONG(sh4r.r[Rm]); + MEM_READ_LONG(sh4r.r[Rm], sh4r.fpscr); sh4r.r[Rm] +=4; sh4r.fr_bank = &sh4r.fr[(sh4r.fpscr&FPSCR_FR)>>21][0]; } @@ -1111,7 +1116,7 @@ uint32_t Rm = ((ir>>8)&0xF); CHECKPRIV(); CHECKRALIGN32( sh4r.r[Rm] ); - sh4r.dbr = MEM_READ_LONG(sh4r.r[Rm]); + MEM_READ_LONG(sh4r.r[Rm], sh4r.dbr); sh4r.r[Rm] +=4; } break; @@ -1130,7 +1135,8 @@ CHECKSLOTILLEGAL(); CHECKPRIV(); CHECKWALIGN32( sh4r.r[Rm] ); - sh4_write_sr( MEM_READ_LONG(sh4r.r[Rm]) ); + MEM_READ_LONG(sh4r.r[Rm], tmp); + sh4_write_sr( tmp ); sh4r.r[Rm] +=4; } break; @@ -1138,7 +1144,7 @@ { /* LDC.L @Rm+, GBR */ uint32_t Rm = ((ir>>8)&0xF); CHECKRALIGN32( sh4r.r[Rm] ); - sh4r.gbr = MEM_READ_LONG(sh4r.r[Rm]); + MEM_READ_LONG(sh4r.r[Rm], sh4r.gbr); sh4r.r[Rm] +=4; } break; @@ -1147,7 +1153,7 @@ uint32_t Rm = ((ir>>8)&0xF); CHECKPRIV(); CHECKRALIGN32( sh4r.r[Rm] ); - sh4r.vbr = MEM_READ_LONG(sh4r.r[Rm]); + MEM_READ_LONG(sh4r.r[Rm], sh4r.vbr); sh4r.r[Rm] +=4; } break; @@ -1156,7 +1162,7 @@ uint32_t Rm = ((ir>>8)&0xF); CHECKPRIV(); CHECKRALIGN32( sh4r.r[Rm] ); - sh4r.ssr = MEM_READ_LONG(sh4r.r[Rm]); + MEM_READ_LONG(sh4r.r[Rm], sh4r.ssr); sh4r.r[Rm] +=4; } break; @@ -1165,7 +1171,7 @@ uint32_t Rm = ((ir>>8)&0xF); CHECKPRIV(); CHECKRALIGN32( sh4r.r[Rm] ); - sh4r.spc = MEM_READ_LONG(sh4r.r[Rm]); + MEM_READ_LONG(sh4r.r[Rm], sh4r.spc); sh4r.r[Rm] +=4; } break; @@ -1179,7 +1185,7 @@ uint32_t Rm = ((ir>>8)&0xF); uint32_t Rn_BANK = ((ir>>4)&0x7); CHECKPRIV(); CHECKRALIGN32( sh4r.r[Rm] ); - sh4r.r_bank[Rn_BANK] = MEM_READ_LONG( sh4r.r[Rm] ); + MEM_READ_LONG( sh4r.r[Rm], sh4r.r_bank[Rn_BANK] ); sh4r.r[Rm] += 4; } break; @@ -1307,7 +1313,7 @@ case 0x1: { /* TAS.B @Rn */ uint32_t Rn = ((ir>>8)&0xF); - tmp = MEM_READ_BYTE( sh4r.r[Rn] ); + MEM_READ_BYTE( sh4r.r[Rn], tmp ); sh4r.t = ( tmp == 0 ? 1 : 0 ); MEM_WRITE_BYTE( sh4r.r[Rn], tmp | 0x80 ); } @@ -1406,9 +1412,11 @@ uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF); CHECKRALIGN16( sh4r.r[Rn] ); CHECKRALIGN16( sh4r.r[Rm] ); - int32_t stmp = SIGNEXT16(MEM_READ_WORD(sh4r.r[Rn])); + MEM_READ_WORD(sh4r.r[Rn], tmp); + int32_t stmp = SIGNEXT16(tmp); sh4r.r[Rn] += 2; - stmp = stmp * SIGNEXT16(MEM_READ_WORD(sh4r.r[Rm])); + MEM_READ_WORD(sh4r.r[Rm], tmp); + stmp = stmp * SIGNEXT16(tmp); sh4r.r[Rm] += 2; if( sh4r.s ) { int64_t tmpl = (int64_t)((int32_t)sh4r.mac) + (int64_t)stmp; @@ -1432,7 +1440,7 @@ uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF); uint32_t disp = (ir&0xF)<<2; tmp = sh4r.r[Rm] + disp; CHECKRALIGN32( tmp ); - sh4r.r[Rn] = MEM_READ_LONG( tmp ); + MEM_READ_LONG( tmp, sh4r.r[Rn] ); } break; case 0x6: @@ -1440,19 +1448,19 @@ case 0x0: { /* MOV.B @Rm, Rn */ uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF); - sh4r.r[Rn] = MEM_READ_BYTE( sh4r.r[Rm] ); + MEM_READ_BYTE( sh4r.r[Rm], sh4r.r[Rn] ); } break; case 0x1: { /* MOV.W @Rm, Rn */ uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF); - CHECKRALIGN16( sh4r.r[Rm] ); sh4r.r[Rn] = MEM_READ_WORD( sh4r.r[Rm] ); + CHECKRALIGN16( sh4r.r[Rm] ); MEM_READ_WORD( sh4r.r[Rm], sh4r.r[Rn] ); } break; case 0x2: { /* MOV.L @Rm, Rn */ uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF); - CHECKRALIGN32( sh4r.r[Rm] ); sh4r.r[Rn] = MEM_READ_LONG( sh4r.r[Rm] ); + CHECKRALIGN32( sh4r.r[Rm] ); MEM_READ_LONG( sh4r.r[Rm], sh4r.r[Rn] ); } break; case 0x3: @@ -1464,19 +1472,19 @@ case 0x4: { /* MOV.B @Rm+, Rn */ uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF); - sh4r.r[Rn] = MEM_READ_BYTE( sh4r.r[Rm] ); sh4r.r[Rm] ++; + MEM_READ_BYTE( sh4r.r[Rm], sh4r.r[Rn] ); sh4r.r[Rm] ++; } break; case 0x5: { /* MOV.W @Rm+, Rn */ uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF); - CHECKRALIGN16( sh4r.r[Rm] ); sh4r.r[Rn] = MEM_READ_WORD( sh4r.r[Rm] ); sh4r.r[Rm] += 2; + CHECKRALIGN16( sh4r.r[Rm] ); MEM_READ_WORD( sh4r.r[Rm], sh4r.r[Rn] ); sh4r.r[Rm] += 2; } break; case 0x6: { /* MOV.L @Rm+, Rn */ uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF); - CHECKRALIGN32( sh4r.r[Rm] ); sh4r.r[Rn] = MEM_READ_LONG( sh4r.r[Rm] ); sh4r.r[Rm] += 4; + CHECKRALIGN32( sh4r.r[Rm] ); MEM_READ_LONG( sh4r.r[Rm], sh4r.r[Rn] ); sh4r.r[Rm] += 4; } break; case 0x7: @@ -1562,7 +1570,7 @@ case 0x4: { /* MOV.B @(disp, Rm), R0 */ uint32_t Rm = ((ir>>4)&0xF); uint32_t disp = (ir&0xF); - R0 = MEM_READ_BYTE( sh4r.r[Rm] + disp ); + MEM_READ_BYTE( sh4r.r[Rm] + disp, R0 ); } break; case 0x5: @@ -1570,7 +1578,7 @@ uint32_t Rm = ((ir>>4)&0xF); uint32_t disp = (ir&0xF)<<1; tmp = sh4r.r[Rm] + disp; CHECKRALIGN16( tmp ); - R0 = MEM_READ_WORD( tmp ); + MEM_READ_WORD( tmp, R0 ); } break; case 0x8: @@ -1640,7 +1648,7 @@ uint32_t Rn = ((ir>>8)&0xF); uint32_t disp = (ir&0xFF)<<1; CHECKSLOTILLEGAL(); tmp = pc + 4 + disp; - sh4r.r[Rn] = MEM_READ_WORD( tmp ); + MEM_READ_WORD( tmp, sh4r.r[Rn] ); } break; case 0xA: @@ -1703,7 +1711,7 @@ case 0x4: { /* MOV.B @(disp, GBR), R0 */ uint32_t disp = (ir&0xFF); - R0 = MEM_READ_BYTE( sh4r.gbr + disp ); + MEM_READ_BYTE( sh4r.gbr + disp, R0 ); } break; case 0x5: @@ -1711,7 +1719,7 @@ uint32_t disp = (ir&0xFF)<<1; tmp = sh4r.gbr + disp; CHECKRALIGN16( tmp ); - R0 = MEM_READ_WORD( tmp ); + MEM_READ_WORD( tmp, R0 ); } break; case 0x6: @@ -1719,7 +1727,7 @@ uint32_t disp = (ir&0xFF)<<2; tmp = sh4r.gbr + disp; CHECKRALIGN32( tmp ); - R0 = MEM_READ_LONG( tmp ); + MEM_READ_LONG( tmp, R0 ); } break; case 0x7: @@ -1756,25 +1764,25 @@ case 0xC: { /* TST.B #imm, @(R0, GBR) */ uint32_t imm = (ir&0xFF); - sh4r.t = ( MEM_READ_BYTE(R0 + sh4r.gbr) & imm ? 0 : 1 ); + MEM_READ_BYTE(R0+sh4r.gbr, tmp); sh4r.t = ( tmp & imm ? 0 : 1 ); } break; case 0xD: { /* AND.B #imm, @(R0, GBR) */ uint32_t imm = (ir&0xFF); - MEM_WRITE_BYTE( R0 + sh4r.gbr, imm & MEM_READ_BYTE(R0 + sh4r.gbr) ); + MEM_READ_BYTE(R0+sh4r.gbr, tmp); MEM_WRITE_BYTE( R0 + sh4r.gbr, imm & tmp ); } break; case 0xE: { /* XOR.B #imm, @(R0, GBR) */ uint32_t imm = (ir&0xFF); - MEM_WRITE_BYTE( R0 + sh4r.gbr, imm ^ MEM_READ_BYTE(R0 + sh4r.gbr) ); + MEM_READ_BYTE(R0+sh4r.gbr, tmp); MEM_WRITE_BYTE( R0 + sh4r.gbr, imm ^ tmp ); } break; case 0xF: { /* OR.B #imm, @(R0, GBR) */ uint32_t imm = (ir&0xFF); - MEM_WRITE_BYTE( R0 + sh4r.gbr, imm | MEM_READ_BYTE(R0 + sh4r.gbr) ); + MEM_READ_BYTE(R0+sh4r.gbr, tmp); MEM_WRITE_BYTE( R0 + sh4r.gbr, imm | tmp ); } break; } @@ -1784,7 +1792,7 @@ uint32_t Rn = ((ir>>8)&0xF); uint32_t disp = (ir&0xFF)<<2; CHECKSLOTILLEGAL(); tmp = (pc&0xFFFFFFFC) + disp + 4; - sh4r.r[Rn] = MEM_READ_LONG( tmp ); + MEM_READ_LONG( tmp, sh4r.r[Rn] ); } break; case 0xE: --- a/src/sh4/sh4core.h Tue Jan 01 04:56:52 2008 +0000 +++ b/src/sh4/sh4core.h Tue Jan 01 04:58:57 2008 +0000 @@ -102,6 +102,7 @@ gboolean sh4_execute_instruction( void ); gboolean sh4_raise_exception( int ); +gboolean sh4_raise_reset( int ); gboolean sh4_raise_trap( int ); gboolean sh4_raise_slot_exception( int, int ); gboolean sh4_raise_tlb_exception( int ); @@ -114,16 +115,21 @@ #define BREAK_PERM 2 /* SH4 Memory */ +uint64_t mmu_vma_to_phys_read( sh4addr_t addr ); +uint64_t mmu_vma_to_phys_write( sh4addr_t addr ); +uint64_t mmu_vma_to_phys_exec( sh4addr_t addr ); + int64_t sh4_read_quad( sh4addr_t addr ); -int32_t sh4_read_long( sh4addr_t addr ); -int32_t sh4_read_word( sh4addr_t addr ); -int32_t sh4_read_byte( sh4addr_t addr ); +int64_t sh4_read_long( sh4addr_t addr ); +int64_t sh4_read_word( sh4addr_t addr ); +int64_t sh4_read_byte( sh4addr_t addr ); void sh4_write_quad( sh4addr_t addr, uint64_t val ); -void sh4_write_long( sh4addr_t addr, uint32_t val ); -void sh4_write_word( sh4addr_t addr, uint32_t val ); -void sh4_write_byte( sh4addr_t addr, uint32_t val ); +int32_t sh4_write_long( sh4addr_t addr, uint32_t val ); +int32_t sh4_write_word( sh4addr_t addr, uint32_t val ); +int32_t sh4_write_byte( sh4addr_t addr, uint32_t val ); int32_t sh4_read_phys_word( sh4addr_t addr ); void sh4_flush_store_queue( sh4addr_t addr ); +sh4ptr_t sh4_get_region_by_vma( sh4addr_t addr ); /* SH4 Support methods */ uint32_t sh4_read_sr(void); @@ -160,6 +166,7 @@ #define SIGNEXT16(n) ((int32_t)((int16_t)(n))) #define SIGNEXT32(n) ((int64_t)((int32_t)(n))) #define SIGNEXT48(n) ((((int64_t)(n))<<16)>>16) +#define ZEROEXT32(n) ((int64_t)((uint64_t)((uint32_t)(n)))) /* Status Register (SR) bits */ #define SR_MD 0x40000000 /* Processor mode ( User=0, Privileged=1 ) */ @@ -201,16 +208,26 @@ #define FPULi (sh4r.fpul) /* CPU-generated exception code/vector pairs */ -#define EXC_POWER_RESET 0x000 /* vector special */ -#define EXC_MANUAL_RESET 0x020 -#define EXC_DATA_ADDR_READ 0x0E0 +#define EXC_POWER_RESET 0x000 /* vector special */ +#define EXC_MANUAL_RESET 0x020 +#define EXC_TLB_MISS_READ 0x040 +#define EXC_TLB_MISS_WRITE 0x060 +#define EXC_INIT_PAGE_WRITE 0x080 +#define EXC_TLB_PROT_READ 0x0A0 +#define EXC_TLB_PROT_WRITE 0x0C0 +#define EXC_DATA_ADDR_READ 0x0E0 #define EXC_DATA_ADDR_WRITE 0x100 -#define EXC_SLOT_ILLEGAL 0x1A0 -#define EXC_ILLEGAL 0x180 -#define EXC_TRAP 0x160 -#define EXC_FPU_DISABLED 0x800 +#define EXC_TLB_MULTI_HIT 0x140 +#define EXC_SLOT_ILLEGAL 0x1A0 +#define EXC_ILLEGAL 0x180 +#define EXC_TRAP 0x160 +#define EXC_FPU_DISABLED 0x800 #define EXC_SLOT_FPU_DISABLED 0x820 +#define EXV_EXCEPTION 0x100 /* General exception vector */ +#define EXV_TLBMISS 0x400 /* TLB-miss exception vector */ +#define EXV_INTERRUPT 0x600 /* External interrupt vector */ + /* Exceptions (for use with sh4_raise_exception) */ #define EX_ILLEGAL_INSTRUCTION 0x180, 0x100 --- a/src/sh4/sh4core.in Tue Jan 01 04:56:52 2008 +0000 +++ b/src/sh4/sh4core.in Tue Jan 01 04:58:57 2008 +0000 @@ -164,12 +164,12 @@ #define TRACE_RETURN( source, dest ) #endif -#define MEM_READ_BYTE( addr ) sh4_read_byte(addr) -#define MEM_READ_WORD( addr ) sh4_read_word(addr) -#define MEM_READ_LONG( addr ) sh4_read_long(addr) -#define MEM_WRITE_BYTE( addr, val ) sh4_write_byte(addr, val) -#define MEM_WRITE_WORD( addr, val ) sh4_write_word(addr, val) -#define MEM_WRITE_LONG( addr, val ) sh4_write_long(addr, val) +#define MEM_READ_BYTE( addr, val ) memtmp = sh4_read_byte(addr); if( memtmp >> 32 ) { return TRUE; } else { val = ((uint32_t)memtmp); } +#define MEM_READ_WORD( addr, val ) memtmp = sh4_read_word(addr); if( memtmp >> 32 ) { return TRUE; } else { val = ((uint32_t)memtmp); } +#define MEM_READ_LONG( addr, val ) memtmp = sh4_read_long(addr); if( memtmp >> 32 ) { return TRUE; } else { val = ((uint32_t)memtmp); } +#define MEM_WRITE_BYTE( addr, val ) if( sh4_write_byte(addr, val) ) { return TRUE; } +#define MEM_WRITE_WORD( addr, val ) if( sh4_write_word(addr, val) ) { return TRUE; } +#define MEM_WRITE_LONG( addr, val ) if( sh4_write_long(addr, val) ) { return TRUE; } #define FP_WIDTH (IS_FPU_DOUBLESIZE() ? 8 : 4) @@ -223,6 +223,7 @@ uint32_t tmp; float ftmp; double dtmp; + int64_t memtmp; // temporary holder for memory reads #define R0 sh4r.r[0] pc = sh4r.pc; @@ -246,7 +247,7 @@ * region, fallback on the full-blown memory read */ sh4_icache = NULL; - ir = MEM_READ_WORD(pc); + MEM_READ_WORD(pc, ir); } else { sh4_icache_addr = pageaddr; ir = sh4_icache[(pc&0xFFF)>>1]; @@ -255,22 +256,22 @@ %% AND Rm, Rn {: sh4r.r[Rn] &= sh4r.r[Rm]; :} AND #imm, R0 {: R0 &= imm; :} -AND.B #imm, @(R0, GBR) {: MEM_WRITE_BYTE( R0 + sh4r.gbr, imm & MEM_READ_BYTE(R0 + sh4r.gbr) ); :} + AND.B #imm, @(R0, GBR) {: MEM_READ_BYTE(R0+sh4r.gbr, tmp); MEM_WRITE_BYTE( R0 + sh4r.gbr, imm & tmp ); :} NOT Rm, Rn {: sh4r.r[Rn] = ~sh4r.r[Rm]; :} OR Rm, Rn {: sh4r.r[Rn] |= sh4r.r[Rm]; :} OR #imm, R0 {: R0 |= imm; :} -OR.B #imm, @(R0, GBR) {: MEM_WRITE_BYTE( R0 + sh4r.gbr, imm | MEM_READ_BYTE(R0 + sh4r.gbr) ); :} + OR.B #imm, @(R0, GBR) {: MEM_READ_BYTE(R0+sh4r.gbr, tmp); MEM_WRITE_BYTE( R0 + sh4r.gbr, imm | tmp ); :} TAS.B @Rn {: - tmp = MEM_READ_BYTE( sh4r.r[Rn] ); + MEM_READ_BYTE( sh4r.r[Rn], tmp ); sh4r.t = ( tmp == 0 ? 1 : 0 ); MEM_WRITE_BYTE( sh4r.r[Rn], tmp | 0x80 ); :} TST Rm, Rn {: sh4r.t = (sh4r.r[Rn]&sh4r.r[Rm] ? 0 : 1); :} TST #imm, R0 {: sh4r.t = (R0 & imm ? 0 : 1); :} -TST.B #imm, @(R0, GBR) {: sh4r.t = ( MEM_READ_BYTE(R0 + sh4r.gbr) & imm ? 0 : 1 ); :} + TST.B #imm, @(R0, GBR) {: MEM_READ_BYTE(R0+sh4r.gbr, tmp); sh4r.t = ( tmp & imm ? 0 : 1 ); :} XOR Rm, Rn {: sh4r.r[Rn] ^= sh4r.r[Rm]; :} XOR #imm, R0 {: R0 ^= imm; :} -XOR.B #imm, @(R0, GBR) {: MEM_WRITE_BYTE( R0 + sh4r.gbr, imm ^ MEM_READ_BYTE(R0 + sh4r.gbr) ); :} + XOR.B #imm, @(R0, GBR) {: MEM_READ_BYTE(R0+sh4r.gbr, tmp); MEM_WRITE_BYTE( R0 + sh4r.gbr, imm ^ tmp ); :} XTRCT Rm, Rn {: sh4r.r[Rn] = (sh4r.r[Rn]>>16) | (sh4r.r[Rm]<<16); :} ROTL Rn {: @@ -365,12 +366,12 @@ CHECKWALIGN32( R0 + sh4r.r[Rn] ); MEM_WRITE_LONG( R0 + sh4r.r[Rn], sh4r.r[Rm] ); :} -MOV.B @(R0, Rm), Rn {: sh4r.r[Rn] = MEM_READ_BYTE( R0 + sh4r.r[Rm] ); :} +MOV.B @(R0, Rm), Rn {: MEM_READ_BYTE( R0 + sh4r.r[Rm], sh4r.r[Rn] ); :} MOV.W @(R0, Rm), Rn {: CHECKRALIGN16( R0 + sh4r.r[Rm] ); - sh4r.r[Rn] = MEM_READ_WORD( R0 + sh4r.r[Rm] ); + MEM_READ_WORD( R0 + sh4r.r[Rm], sh4r.r[Rn] ); :} MOV.L @(R0, Rm), Rn {: CHECKRALIGN32( R0 + sh4r.r[Rm] ); - sh4r.r[Rn] = MEM_READ_LONG( R0 + sh4r.r[Rm] ); + MEM_READ_LONG( R0 + sh4r.r[Rm], sh4r.r[Rn] ); :} MOV.L Rm, @(disp, Rn) {: tmp = sh4r.r[Rn] + disp; @@ -386,19 +387,19 @@ MOV.L @(disp, Rm), Rn {: tmp = sh4r.r[Rm] + disp; CHECKRALIGN32( tmp ); - sh4r.r[Rn] = MEM_READ_LONG( tmp ); + MEM_READ_LONG( tmp, sh4r.r[Rn] ); :} -MOV.B @Rm, Rn {: sh4r.r[Rn] = MEM_READ_BYTE( sh4r.r[Rm] ); :} -MOV.W @Rm, Rn {: CHECKRALIGN16( sh4r.r[Rm] ); sh4r.r[Rn] = MEM_READ_WORD( sh4r.r[Rm] ); :} -MOV.L @Rm, Rn {: CHECKRALIGN32( sh4r.r[Rm] ); sh4r.r[Rn] = MEM_READ_LONG( sh4r.r[Rm] ); :} +MOV.B @Rm, Rn {: MEM_READ_BYTE( sh4r.r[Rm], sh4r.r[Rn] ); :} + MOV.W @Rm, Rn {: CHECKRALIGN16( sh4r.r[Rm] ); MEM_READ_WORD( sh4r.r[Rm], sh4r.r[Rn] ); :} + MOV.L @Rm, Rn {: CHECKRALIGN32( sh4r.r[Rm] ); MEM_READ_LONG( sh4r.r[Rm], sh4r.r[Rn] ); :} MOV Rm, Rn {: sh4r.r[Rn] = sh4r.r[Rm]; :} -MOV.B @Rm+, Rn {: sh4r.r[Rn] = MEM_READ_BYTE( sh4r.r[Rm] ); sh4r.r[Rm] ++; :} -MOV.W @Rm+, Rn {: CHECKRALIGN16( sh4r.r[Rm] ); sh4r.r[Rn] = MEM_READ_WORD( sh4r.r[Rm] ); sh4r.r[Rm] += 2; :} -MOV.L @Rm+, Rn {: CHECKRALIGN32( sh4r.r[Rm] ); sh4r.r[Rn] = MEM_READ_LONG( sh4r.r[Rm] ); sh4r.r[Rm] += 4; :} + MOV.B @Rm+, Rn {: MEM_READ_BYTE( sh4r.r[Rm], sh4r.r[Rn] ); sh4r.r[Rm] ++; :} + MOV.W @Rm+, Rn {: CHECKRALIGN16( sh4r.r[Rm] ); MEM_READ_WORD( sh4r.r[Rm], sh4r.r[Rn] ); sh4r.r[Rm] += 2; :} + MOV.L @Rm+, Rn {: CHECKRALIGN32( sh4r.r[Rm] ); MEM_READ_LONG( sh4r.r[Rm], sh4r.r[Rn] ); sh4r.r[Rm] += 4; :} MOV.L @(disp, PC), Rn {: CHECKSLOTILLEGAL(); tmp = (pc&0xFFFFFFFC) + disp + 4; - sh4r.r[Rn] = MEM_READ_LONG( tmp ); + MEM_READ_LONG( tmp, sh4r.r[Rn] ); :} MOV.B R0, @(disp, GBR) {: MEM_WRITE_BYTE( sh4r.gbr + disp, R0 ); :} MOV.W R0, @(disp, GBR) {: @@ -411,16 +412,16 @@ CHECKWALIGN32( tmp ); MEM_WRITE_LONG( tmp, R0 ); :} -MOV.B @(disp, GBR), R0 {: R0 = MEM_READ_BYTE( sh4r.gbr + disp ); :} + MOV.B @(disp, GBR), R0 {: MEM_READ_BYTE( sh4r.gbr + disp, R0 ); :} MOV.W @(disp, GBR), R0 {: tmp = sh4r.gbr + disp; CHECKRALIGN16( tmp ); - R0 = MEM_READ_WORD( tmp ); + MEM_READ_WORD( tmp, R0 ); :} MOV.L @(disp, GBR), R0 {: tmp = sh4r.gbr + disp; CHECKRALIGN32( tmp ); - R0 = MEM_READ_LONG( tmp ); + MEM_READ_LONG( tmp, R0 ); :} MOV.B R0, @(disp, Rn) {: MEM_WRITE_BYTE( sh4r.r[Rn] + disp, R0 ); :} MOV.W R0, @(disp, Rn) {: @@ -428,16 +429,16 @@ CHECKWALIGN16( tmp ); MEM_WRITE_WORD( tmp, R0 ); :} -MOV.B @(disp, Rm), R0 {: R0 = MEM_READ_BYTE( sh4r.r[Rm] + disp ); :} + MOV.B @(disp, Rm), R0 {: MEM_READ_BYTE( sh4r.r[Rm] + disp, R0 ); :} MOV.W @(disp, Rm), R0 {: tmp = sh4r.r[Rm] + disp; CHECKRALIGN16( tmp ); - R0 = MEM_READ_WORD( tmp ); + MEM_READ_WORD( tmp, R0 ); :} MOV.W @(disp, PC), Rn {: CHECKSLOTILLEGAL(); tmp = pc + 4 + disp; - sh4r.r[Rn] = MEM_READ_WORD( tmp ); + MEM_READ_WORD( tmp, sh4r.r[Rn] ); :} MOVA @(disp, PC), R0 {: CHECKSLOTILLEGAL(); @@ -506,9 +507,11 @@ MAC.W @Rm+, @Rn+ {: CHECKRALIGN16( sh4r.r[Rn] ); CHECKRALIGN16( sh4r.r[Rm] ); - int32_t stmp = SIGNEXT16(MEM_READ_WORD(sh4r.r[Rn])); + MEM_READ_WORD(sh4r.r[Rn], tmp); + int32_t stmp = SIGNEXT16(tmp); sh4r.r[Rn] += 2; - stmp = stmp * SIGNEXT16(MEM_READ_WORD(sh4r.r[Rm])); + MEM_READ_WORD(sh4r.r[Rm], tmp); + stmp = stmp * SIGNEXT16(tmp); sh4r.r[Rm] += 2; if( sh4r.s ) { int64_t tmpl = (int64_t)((int32_t)sh4r.mac) + (int64_t)stmp; @@ -527,9 +530,11 @@ MAC.L @Rm+, @Rn+ {: CHECKRALIGN32( sh4r.r[Rm] ); CHECKRALIGN32( sh4r.r[Rn] ); - int64_t tmpl = SIGNEXT32(MEM_READ_LONG(sh4r.r[Rn])); + MEM_READ_LONG(sh4r.r[Rn], tmp); + int64_t tmpl = SIGNEXT32(tmp); sh4r.r[Rn] += 4; - tmpl = tmpl * SIGNEXT32(MEM_READ_LONG(sh4r.r[Rm])) + sh4r.mac; + MEM_READ_LONG(sh4r.r[Rm], tmp); + tmpl = tmpl * SIGNEXT32(tmp) + sh4r.mac; sh4r.r[Rm] += 4; if( sh4r.s ) { /* 48-bit Saturation. Yuch */ @@ -703,15 +708,17 @@ :} LDS.L @Rm+, MACH {: CHECKRALIGN32( sh4r.r[Rm] ); + MEM_READ_LONG(sh4r.r[Rm], tmp); sh4r.mac = (sh4r.mac & 0x00000000FFFFFFFF) | - (((uint64_t)MEM_READ_LONG(sh4r.r[Rm]))<<32); + (((uint64_t)tmp)<<32); sh4r.r[Rm] += 4; :} LDC.L @Rm+, SR {: CHECKSLOTILLEGAL(); CHECKPRIV(); CHECKWALIGN32( sh4r.r[Rm] ); - sh4_write_sr( MEM_READ_LONG(sh4r.r[Rm]) ); + MEM_READ_LONG(sh4r.r[Rm], tmp); + sh4_write_sr( tmp ); sh4r.r[Rm] +=4; :} LDS Rm, MACH {: @@ -730,7 +737,7 @@ LDC.L @Rm+, SGR {: CHECKPRIV(); CHECKRALIGN32( sh4r.r[Rm] ); - sh4r.sgr = MEM_READ_LONG(sh4r.r[Rm]); + MEM_READ_LONG(sh4r.r[Rm], sh4r.sgr); sh4r.r[Rm] +=4; :} STS MACL, Rn {: sh4r.r[Rn] = (uint32_t)sh4r.mac; :} @@ -746,13 +753,14 @@ :} LDS.L @Rm+, MACL {: CHECKRALIGN32( sh4r.r[Rm] ); + MEM_READ_LONG(sh4r.r[Rm], tmp); sh4r.mac = (sh4r.mac & 0xFFFFFFFF00000000LL) | - (uint64_t)((uint32_t)MEM_READ_LONG(sh4r.r[Rm])); + (uint64_t)((uint32_t)tmp); sh4r.r[Rm] += 4; :} LDC.L @Rm+, GBR {: CHECKRALIGN32( sh4r.r[Rm] ); - sh4r.gbr = MEM_READ_LONG(sh4r.r[Rm]); + MEM_READ_LONG(sh4r.r[Rm], sh4r.gbr); sh4r.r[Rm] +=4; :} LDS Rm, MACL {: @@ -774,13 +782,13 @@ :} LDS.L @Rm+, PR {: CHECKRALIGN32( sh4r.r[Rm] ); - sh4r.pr = MEM_READ_LONG( sh4r.r[Rm] ); + MEM_READ_LONG( sh4r.r[Rm], sh4r.pr ); sh4r.r[Rm] += 4; :} LDC.L @Rm+, VBR {: CHECKPRIV(); CHECKRALIGN32( sh4r.r[Rm] ); - sh4r.vbr = MEM_READ_LONG(sh4r.r[Rm]); + MEM_READ_LONG(sh4r.r[Rm], sh4r.vbr); sh4r.r[Rm] +=4; :} LDS Rm, PR {: sh4r.pr = sh4r.r[Rm]; :} @@ -807,7 +815,7 @@ LDC.L @Rm+, SSR {: CHECKPRIV(); CHECKRALIGN32( sh4r.r[Rm] ); - sh4r.ssr = MEM_READ_LONG(sh4r.r[Rm]); + MEM_READ_LONG(sh4r.r[Rm], sh4r.ssr); sh4r.r[Rm] +=4; :} LDC Rm, SSR {: @@ -823,7 +831,7 @@ LDC.L @Rm+, SPC {: CHECKPRIV(); CHECKRALIGN32( sh4r.r[Rm] ); - sh4r.spc = MEM_READ_LONG(sh4r.r[Rm]); + MEM_READ_LONG(sh4r.r[Rm], sh4r.spc); sh4r.r[Rm] +=4; :} LDC Rm, SPC {: @@ -838,7 +846,7 @@ :} LDS.L @Rm+, FPUL {: CHECKRALIGN32( sh4r.r[Rm] ); - sh4r.fpul = MEM_READ_LONG(sh4r.r[Rm]); + MEM_READ_LONG(sh4r.r[Rm], sh4r.fpul); sh4r.r[Rm] +=4; :} LDS Rm, FPUL {: sh4r.fpul = sh4r.r[Rm]; :} @@ -850,7 +858,7 @@ :} LDS.L @Rm+, FPSCR {: CHECKRALIGN32( sh4r.r[Rm] ); - sh4r.fpscr = MEM_READ_LONG(sh4r.r[Rm]); + MEM_READ_LONG(sh4r.r[Rm], sh4r.fpscr); sh4r.r[Rm] +=4; sh4r.fr_bank = &sh4r.fr[(sh4r.fpscr&FPSCR_FR)>>21][0]; :} @@ -868,7 +876,7 @@ LDC.L @Rm+, DBR {: CHECKPRIV(); CHECKRALIGN32( sh4r.r[Rm] ); - sh4r.dbr = MEM_READ_LONG(sh4r.r[Rm]); + MEM_READ_LONG(sh4r.r[Rm], sh4r.dbr); sh4r.r[Rm] +=4; :} LDC Rm, DBR {: @@ -884,7 +892,7 @@ LDC.L @Rm+, Rn_BANK {: CHECKPRIV(); CHECKRALIGN32( sh4r.r[Rm] ); - sh4r.r_bank[Rn_BANK] = MEM_READ_LONG( sh4r.r[Rm] ); + MEM_READ_LONG( sh4r.r[Rm], sh4r.r_bank[Rn_BANK] ); sh4r.r[Rm] += 4; :} LDC Rm, Rn_BANK {: --- a/src/sh4/sh4mem.c Tue Jan 01 04:56:52 2008 +0000 +++ b/src/sh4/sh4mem.c Tue Jan 01 04:58:57 2008 +0000 @@ -156,16 +156,22 @@ (((int64_t)((uint32_t)sh4_read_long(addr+4))) << 32); } -int32_t sh4_read_long( sh4addr_t addr ) +int64_t sh4_read_long( sh4addr_t vma ) { sh4ptr_t page; CHECK_READ_WATCH(addr,4); + uint64_t ppa = mmu_vma_to_phys_read(vma); + if( ppa>>32 ) { + return ppa; + } + sh4addr_t addr = (sh4addr_t)ppa; + if( addr >= 0xE0000000 ) { /* P4 Area, handled specially */ - return sh4_read_p4( addr ); + return ZEROEXT32(sh4_read_p4( addr )); } else if( (addr&0x1C000000) == 0x0C000000 ) { - return *(int32_t *)(sh4_main_ram + (addr&0x00FFFFFF)); + return ZEROEXT32(*(int32_t *)(sh4_main_ram + (addr&0x00FFFFFF))); } else if( (addr&0x1F800000) == 0x04000000 ) { addr = TRANSLATE_VIDEO_64BIT_ADDRESS(addr); pvr2_render_buffer_invalidate(addr, FALSE); @@ -182,22 +188,28 @@ } val = io_rgn[(uintptr_t)page]->io_read(addr&0xFFF); TRACE_IO( "Long read %08X <= %08X", page, (addr&0xFFF), val, addr ); - return val; + return ZEROEXT32(val); } else { - return *(int32_t *)(page+(addr&0xFFF)); + return ZEROEXT32(*(int32_t *)(page+(addr&0xFFF))); } } -int32_t sh4_read_word( sh4addr_t addr ) +int64_t sh4_read_word( sh4addr_t vma ) { sh4ptr_t page; CHECK_READ_WATCH(addr,2); + uint64_t ppa = mmu_vma_to_phys_read(vma); + if( ppa>>32 ) { + return ppa; + } + sh4addr_t addr = (sh4addr_t)ppa; + if( addr >= 0xE0000000 ) { /* P4 Area, handled specially */ - return SIGNEXT16(sh4_read_p4( addr )); + return ZEROEXT32(SIGNEXT16(sh4_read_p4( addr ))); } else if( (addr&0x1C000000) == 0x0C000000 ) { - return SIGNEXT16(*(int16_t *)(sh4_main_ram + (addr&0x00FFFFFF))); + return ZEROEXT32(SIGNEXT16(*(int16_t *)(sh4_main_ram + (addr&0x00FFFFFF)))); } else if( (addr&0x1F800000) == 0x04000000 ) { addr = TRANSLATE_VIDEO_64BIT_ADDRESS(addr); pvr2_render_buffer_invalidate(addr, FALSE); @@ -214,22 +226,28 @@ } val = SIGNEXT16(io_rgn[(uintptr_t)page]->io_read(addr&0xFFF)); TRACE_IO( "Word read %04X <= %08X", page, (addr&0xFFF), val&0xFFFF, addr ); - return val; + return ZEROEXT32(val); } else { - return SIGNEXT16(*(int16_t *)(page+(addr&0xFFF))); + return ZEROEXT32(SIGNEXT16(*(int16_t *)(page+(addr&0xFFF)))); } } -int32_t sh4_read_byte( sh4addr_t addr ) +int64_t sh4_read_byte( sh4addr_t vma ) { sh4ptr_t page; CHECK_READ_WATCH(addr,1); + uint64_t ppa = mmu_vma_to_phys_read(vma); + if( ppa>>32 ) { + return ppa; + } + sh4addr_t addr = (sh4addr_t)ppa; + if( addr >= 0xE0000000 ) { /* P4 Area, handled specially */ - return SIGNEXT8(sh4_read_p4( addr )); + return ZEROEXT32(SIGNEXT8(sh4_read_p4( addr ))); } else if( (addr&0x1C000000) == 0x0C000000 ) { - return SIGNEXT8(*(int8_t *)(sh4_main_ram + (addr&0x00FFFFFF))); + return ZEROEXT32(SIGNEXT8(*(int8_t *)(sh4_main_ram + (addr&0x00FFFFFF)))); } else if( (addr&0x1F800000) == 0x04000000 ) { addr = TRANSLATE_VIDEO_64BIT_ADDRESS(addr); pvr2_render_buffer_invalidate(addr, FALSE); @@ -247,9 +265,9 @@ } val = SIGNEXT8(io_rgn[(uintptr_t)page]->io_read(addr&0xFFF)); TRACE_IO( "Byte read %02X <= %08X", page, (addr&0xFFF), val&0xFF, addr ); - return val; + return ZEROEXT32(val); } else { - return SIGNEXT8(*(int8_t *)(page+(addr&0xFFF))); + return ZEROEXT32(SIGNEXT8(*(int8_t *)(page+(addr&0xFFF)))); } } @@ -262,19 +280,25 @@ sh4_write_long( addr+4, (uint32_t)(val>>32) ); } -void sh4_write_long( sh4addr_t addr, uint32_t val ) +int32_t sh4_write_long( sh4addr_t vma, uint32_t val ) { sh4ptr_t page; + uint64_t ppa = mmu_vma_to_phys_write(vma); + if( ppa>>32 ) { + return ppa>>32; + } + sh4addr_t addr = (sh4addr_t)ppa; + CHECK_WRITE_WATCH(addr,4,val); if( addr >= 0xE0000000 ) { sh4_write_p4( addr, val ); - return; + return 0; } else if( (addr&0x1C000000) == 0x0C000000 ) { *(uint32_t *)(sh4_main_ram + (addr&0x00FFFFFF)) = val; xlat_invalidate_long(addr); - return; + return 0; } else if( (addr&0x1F800000) == 0x04000000 || (addr&0x1F800000) == 0x11000000 ) { texcache_invalidate_page(addr& 0x7FFFFF); @@ -287,7 +311,7 @@ if( (addr&0x1FFFFFFF) < 0x200000 ) { WARN( "Attempted write to read-only memory: %08X => %08X", val, addr); sh4_stop(); - return; + return 0; } if( (addr&0x1F800000) == 0x00800000 ) asic_g2_write_word(); @@ -297,30 +321,37 @@ if( page == NULL ) { if( (addr & 0x1F000000) >= 0x04000000 && (addr & 0x1F000000) < 0x07000000 ) - return; + return 0; WARN( "Long write to missing page: %08X => %08X", val, addr ); - return; + return 0; } TRACE_IO( "Long write %08X => %08X", page, (addr&0xFFF), val, addr ); io_rgn[(uintptr_t)page]->io_write(addr&0xFFF, val); } else { *(uint32_t *)(page+(addr&0xFFF)) = val; } + return 0; } -void sh4_write_word( sh4addr_t addr, uint32_t val ) +int32_t sh4_write_word( sh4addr_t vma, uint32_t val ) { sh4ptr_t page; + uint64_t ppa = mmu_vma_to_phys_write(vma); + if( ppa>>32 ) { + return ppa>>32; + } + sh4addr_t addr = (sh4addr_t)ppa; + CHECK_WRITE_WATCH(addr,2,val); if( addr >= 0xE0000000 ) { sh4_write_p4( addr, (int16_t)val ); - return; + return 0; } else if( (addr&0x1C000000) == 0x0C000000 ) { *(uint16_t *)(sh4_main_ram + (addr&0x00FFFFFF)) = val; xlat_invalidate_word(addr); - return; + return 0; } else if( (addr&0x1F800000) == 0x04000000 || (addr&0x1F800000) == 0x11000000 ) { texcache_invalidate_page(addr& 0x7FFFFF); @@ -333,34 +364,41 @@ if( (addr&0x1FFFFFFF) < 0x200000 ) { WARN( "Attempted write to read-only memory: %08X => %08X", val, addr); sh4_stop(); - return; + return 0; } page = page_map[ (addr & 0x1FFFFFFF) >> 12 ]; if( ((uintptr_t)page) < MAX_IO_REGIONS ) { /* IO Region */ if( page == NULL ) { WARN( "Attempted word write to missing page: %08X", addr ); - return; + return 0; } TRACE_IO( "Word write %04X => %08X", page, (addr&0xFFF), val&0xFFFF, addr ); io_rgn[(uintptr_t)page]->io_write(addr&0xFFF, val); } else { *(uint16_t *)(page+(addr&0xFFF)) = val; } + return 0; } -void sh4_write_byte( sh4addr_t addr, uint32_t val ) +int32_t sh4_write_byte( sh4addr_t vma, uint32_t val ) { sh4ptr_t page; + + uint64_t ppa = mmu_vma_to_phys_write(vma); + if( ppa>>32 ) { + return ppa>>32; + } + sh4addr_t addr = (sh4addr_t)ppa; CHECK_WRITE_WATCH(addr,1,val); if( addr >= 0xE0000000 ) { sh4_write_p4( addr, (int8_t)val ); - return; + return 0; } else if( (addr&0x1C000000) == 0x0C000000 ) { *(uint8_t *)(sh4_main_ram + (addr&0x00FFFFFF)) = val; xlat_invalidate_word(addr); - return; + return 0; } else if( (addr&0x1F800000) == 0x04000000 || (addr&0x1F800000) == 0x11000000 ) { texcache_invalidate_page(addr& 0x7FFFFF); @@ -373,19 +411,20 @@ if( (addr&0x1FFFFFFF) < 0x200000 ) { WARN( "Attempted write to read-only memory: %08X => %08X", val, addr); sh4_stop(); - return; + return 0; } page = page_map[ (addr & 0x1FFFFFFF) >> 12 ]; if( ((uintptr_t)page) < MAX_IO_REGIONS ) { /* IO Region */ if( page == NULL ) { WARN( "Attempted byte write to missing page: %08X", addr ); - return; + return 0; } TRACE_IO( "Byte write %02X => %08X", page, (addr&0xFFF), val&0xFF, addr ); io_rgn[(uintptr_t)page]->io_write( (addr&0xFFF), val); } else { *(uint8_t *)(page+(addr&0xFFF)) = val; } + return 0; } @@ -434,3 +473,20 @@ uint32_t target = (addr&0x03FFFFE0) | hi; mem_copy_to_sh4( target, src, 32 ); } + +sh4ptr_t sh4_get_region_by_vma( sh4addr_t vma ) +{ + uint64_t ppa = mmu_vma_to_phys_read(vma); + if( ppa>>32 ) { + return 0; + } + + sh4addr_t addr = (sh4addr_t)ppa; + sh4ptr_t page = page_map[ (addr & 0x1FFFFFFF) >> 12 ]; + if( ((uintptr_t)page) < MAX_IO_REGIONS ) { /* IO Region */ + return NULL; + } else { + return page+(addr&0xFFF); + } +} + --- a/src/sh4/sh4trans.c Tue Jan 01 04:56:52 2008 +0000 +++ b/src/sh4/sh4trans.c Tue Jan 01 04:58:57 2008 +0000 @@ -23,6 +23,9 @@ #include "sh4/sh4trans.h" #include "sh4/xltcache.h" + +uint32_t last_pc; +void *last_code; /** * Execute a timeslice using translated code only (ie translate/execute loop) * Note this version does not support breakpoints @@ -60,9 +63,22 @@ code = xlat_get_code(sh4r.pc); if( code == NULL ) { + uint64_t ppa = mmu_vma_to_phys_exec( sh4r.pc ); + if( ppa>>32 ) { + // not found, exception + ppa = mmu_vma_to_phys_exec( sh4r.pc ); + if( ppa>>32 ) { + // double fault - halt + dreamcast_stop(); + ERROR( "Double fault - halting" ); + return nanosecs; + } + } code = sh4_translate_basic_block( sh4r.pc ); } } + last_pc = sh4r.pc; + last_code = code; code = code(); } --- a/src/sh4/sh4x86.c Tue Jan 01 04:56:52 2008 +0000 +++ b/src/sh4/sh4x86.c Tue Jan 01 04:58:57 2008 +0000 @@ -34,6 +34,12 @@ #define DEFAULT_BACKPATCH_SIZE 4096 +struct backpatch_record { + uint32_t *fixup_addr; + uint32_t fixup_icount; + uint32_t exc_code; +}; + /** * Struct to manage internal translation state. This state is not saved - * it is only valid between calls to sh4_translate_begin_block() and @@ -49,7 +55,7 @@ int tstate; /* Allocated memory for the (block-wide) back-patch list */ - uint32_t **backpatch_list; + struct backpatch_record *backpatch_list; uint32_t backpatch_posn; uint32_t backpatch_size; }; @@ -75,14 +81,6 @@ OP(0x70+ (sh4_x86.tstate^1)); OP(rel8); \ MARK_JMP(rel8, label) - -#define EXIT_DATA_ADDR_READ 0 -#define EXIT_DATA_ADDR_WRITE 7 -#define EXIT_ILLEGAL 14 -#define EXIT_SLOT_ILLEGAL 21 -#define EXIT_FPU_DISABLED 28 -#define EXIT_SLOT_FPU_DISABLED 35 - static struct sh4_x86_state sh4_x86; static uint32_t max_int = 0x7FFFFFFF; @@ -93,26 +91,25 @@ void sh4_x86_init() { sh4_x86.backpatch_list = malloc(DEFAULT_BACKPATCH_SIZE); - sh4_x86.backpatch_size = DEFAULT_BACKPATCH_SIZE / sizeof(uint32_t *); + sh4_x86.backpatch_size = DEFAULT_BACKPATCH_SIZE / sizeof(struct backpatch_record); } -static void sh4_x86_add_backpatch( uint8_t *ptr ) +static void sh4_x86_add_backpatch( uint8_t *fixup_addr, uint32_t fixup_pc, uint32_t exc_code ) { if( sh4_x86.backpatch_posn == sh4_x86.backpatch_size ) { sh4_x86.backpatch_size <<= 1; - sh4_x86.backpatch_list = realloc( sh4_x86.backpatch_list, sh4_x86.backpatch_size * sizeof(uint32_t *) ); + sh4_x86.backpatch_list = realloc( sh4_x86.backpatch_list, + sh4_x86.backpatch_size * sizeof(struct backpatch_record)); assert( sh4_x86.backpatch_list != NULL ); } - sh4_x86.backpatch_list[sh4_x86.backpatch_posn++] = (uint32_t *)ptr; -} - -static void sh4_x86_do_backpatch( uint8_t *reloc_base ) -{ - unsigned int i; - for( i=0; i>1; + sh4_x86.backpatch_list[sh4_x86.backpatch_posn].exc_code = exc_code; + sh4_x86.backpatch_posn++; } /** @@ -266,97 +263,60 @@ } /* Exception checks - Note that all exception checks will clobber EAX */ -#define precheck() load_imm32(R_EDX, (pc-sh4_x86.block_start_pc-(sh4_x86.in_delay_slot?2:0))>>1) #define check_priv( ) \ if( !sh4_x86.priv_checked ) { \ sh4_x86.priv_checked = TRUE;\ - precheck();\ load_spreg( R_EAX, R_SR );\ AND_imm32_r32( SR_MD, R_EAX );\ if( sh4_x86.in_delay_slot ) {\ - JE_exit( EXIT_SLOT_ILLEGAL );\ + JE_exc( EXC_SLOT_ILLEGAL );\ } else {\ - JE_exit( EXIT_ILLEGAL );\ + JE_exc( EXC_ILLEGAL );\ }\ }\ - -static void check_priv_no_precheck() -{ - if( !sh4_x86.priv_checked ) { - sh4_x86.priv_checked = TRUE; - load_spreg( R_EAX, R_SR ); - AND_imm32_r32( SR_MD, R_EAX ); - if( sh4_x86.in_delay_slot ) { - JE_exit( EXIT_SLOT_ILLEGAL ); - } else { - JE_exit( EXIT_ILLEGAL ); - } - } -} - #define check_fpuen( ) \ if( !sh4_x86.fpuen_checked ) {\ sh4_x86.fpuen_checked = TRUE;\ - precheck();\ load_spreg( R_EAX, R_SR );\ AND_imm32_r32( SR_FD, R_EAX );\ if( sh4_x86.in_delay_slot ) {\ - JNE_exit(EXIT_SLOT_FPU_DISABLED);\ + JNE_exc(EXC_SLOT_FPU_DISABLED);\ } else {\ - JNE_exit(EXIT_FPU_DISABLED);\ + JNE_exc(EXC_FPU_DISABLED);\ }\ } -static void check_fpuen_no_precheck() -{ - if( !sh4_x86.fpuen_checked ) { - sh4_x86.fpuen_checked = TRUE; - load_spreg( R_EAX, R_SR ); - AND_imm32_r32( SR_FD, R_EAX ); - if( sh4_x86.in_delay_slot ) { - JNE_exit(EXIT_SLOT_FPU_DISABLED); - } else { - JNE_exit(EXIT_FPU_DISABLED); - } - } +#define check_ralign16( x86reg ) \ + TEST_imm32_r32( 0x00000001, x86reg ); \ + JNE_exc(EXC_DATA_ADDR_READ) -} +#define check_walign16( x86reg ) \ + TEST_imm32_r32( 0x00000001, x86reg ); \ + JNE_exc(EXC_DATA_ADDR_WRITE); -static void check_ralign16( int x86reg ) -{ - TEST_imm32_r32( 0x00000001, x86reg ); - JNE_exit(EXIT_DATA_ADDR_READ); -} +#define check_ralign32( x86reg ) \ + TEST_imm32_r32( 0x00000003, x86reg ); \ + JNE_exc(EXC_DATA_ADDR_READ) -static void check_walign16( int x86reg ) -{ - TEST_imm32_r32( 0x00000001, x86reg ); - JNE_exit(EXIT_DATA_ADDR_WRITE); -} - -static void check_ralign32( int x86reg ) -{ - TEST_imm32_r32( 0x00000003, x86reg ); - JNE_exit(EXIT_DATA_ADDR_READ); -} -static void check_walign32( int x86reg ) -{ - TEST_imm32_r32( 0x00000003, x86reg ); - JNE_exit(EXIT_DATA_ADDR_WRITE); -} +#define check_walign32( x86reg ) \ + TEST_imm32_r32( 0x00000003, x86reg ); \ + JNE_exc(EXC_DATA_ADDR_WRITE); #define UNDEF() #define MEM_RESULT(value_reg) if(value_reg != R_EAX) { MOV_r32_r32(R_EAX,value_reg); } -#define MEM_READ_BYTE( addr_reg, value_reg ) call_func1(sh4_read_byte, addr_reg ); MEM_RESULT(value_reg) -#define MEM_READ_WORD( addr_reg, value_reg ) call_func1(sh4_read_word, addr_reg ); MEM_RESULT(value_reg) -#define MEM_READ_LONG( addr_reg, value_reg ) call_func1(sh4_read_long, addr_reg ); MEM_RESULT(value_reg) -#define MEM_WRITE_BYTE( addr_reg, value_reg ) call_func2(sh4_write_byte, addr_reg, value_reg) -#define MEM_WRITE_WORD( addr_reg, value_reg ) call_func2(sh4_write_word, addr_reg, value_reg) -#define MEM_WRITE_LONG( addr_reg, value_reg ) call_func2(sh4_write_long, addr_reg, value_reg) +#define MEM_READ_BYTE( addr_reg, value_reg ) call_func1(sh4_read_byte, addr_reg ); TEST_r32_r32( R_EDX, R_EDX ); JNE_exc(-1); MEM_RESULT(value_reg) +#define MEM_READ_WORD( addr_reg, value_reg ) call_func1(sh4_read_word, addr_reg ); TEST_r32_r32( R_EDX, R_EDX ); JNE_exc(-1); MEM_RESULT(value_reg) +#define MEM_READ_LONG( addr_reg, value_reg ) call_func1(sh4_read_long, addr_reg ); TEST_r32_r32( R_EDX, R_EDX ); JNE_exc(-1); MEM_RESULT(value_reg) +#define MEM_WRITE_BYTE( addr_reg, value_reg ) call_func2(sh4_write_byte, addr_reg, value_reg); TEST_r32_r32( R_EAX, R_EAX ); JNE_exc(-1); +#define MEM_WRITE_WORD( addr_reg, value_reg ) call_func2(sh4_write_word, addr_reg, value_reg); TEST_r32_r32( R_EAX, R_EAX ); JNE_exc(-1); +#define MEM_WRITE_LONG( addr_reg, value_reg ) call_func2(sh4_write_long, addr_reg, value_reg); TEST_r32_r32( R_EAX, R_EAX ); JNE_exc(-1); -#define SLOTILLEGAL() precheck(); JMP_exit(EXIT_SLOT_ILLEGAL); sh4_x86.in_delay_slot = FALSE; return 1; +#define MEM_READ_SIZE (CALL_FUNC1_SIZE+8) +#define MEM_WRITE_SIZE (CALL_FUNC2_SIZE+8) + +#define SLOTILLEGAL() JMP_exc(EXC_SLOT_ILLEGAL); sh4_x86.in_delay_slot = FALSE; return 1; extern uint16_t *sh4_icache; extern uint32_t sh4_icache_addr; @@ -389,7 +349,8 @@ if( sh4_icache != NULL && pageaddr == sh4_icache_addr ) { ir = sh4_icache[(pc&0xFFF)>>1]; } else { - sh4_icache = (uint16_t *)mem_get_page(pc); + uint64_t phys = mmu_vma_to_phys_exec(pc); + sh4_icache = (uint16_t *)mem_get_page((uint32_t)phys); if( ((uintptr_t)sh4_icache) < MAX_IO_REGIONS ) { /* If someone's actually been so daft as to try to execute out of an IO * region, fallback on the full-blown memory read @@ -540,7 +501,6 @@ uint32_t Rn = ((ir>>8)&0xF); load_reg( R_EAX, 0 ); load_reg( R_ECX, Rn ); - precheck(); check_walign32( R_ECX ); MEM_WRITE_LONG( R_ECX, R_EAX ); sh4_x86.tstate = TSTATE_NONE; @@ -568,7 +528,6 @@ load_reg( R_EAX, 0 ); load_reg( R_ECX, Rn ); ADD_r32_r32( R_EAX, R_ECX ); - precheck(); check_walign16( R_ECX ); load_reg( R_EAX, Rm ); MEM_WRITE_WORD( R_ECX, R_EAX ); @@ -581,7 +540,6 @@ load_reg( R_EAX, 0 ); load_reg( R_ECX, Rn ); ADD_r32_r32( R_EAX, R_ECX ); - precheck(); check_walign32( R_ECX ); load_reg( R_EAX, Rm ); MEM_WRITE_LONG( R_ECX, R_EAX ); @@ -803,7 +761,6 @@ load_reg( R_EAX, 0 ); load_reg( R_ECX, Rm ); ADD_r32_r32( R_EAX, R_ECX ); - precheck(); check_ralign16( R_ECX ); MEM_READ_WORD( R_ECX, R_EAX ); store_reg( R_EAX, Rn ); @@ -816,7 +773,6 @@ load_reg( R_EAX, 0 ); load_reg( R_ECX, Rm ); ADD_r32_r32( R_EAX, R_ECX ); - precheck(); check_ralign32( R_ECX ); MEM_READ_LONG( R_ECX, R_EAX ); store_reg( R_EAX, Rn ); @@ -827,7 +783,6 @@ { /* MAC.L @Rm+, @Rn+ */ uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF); load_reg( R_ECX, Rm ); - precheck(); check_ralign32( R_ECX ); load_reg( R_ECX, Rn ); check_ralign32( R_ECX ); @@ -861,7 +816,6 @@ load_reg( R_ECX, Rn ); load_reg( R_EAX, Rm ); ADD_imm32_r32( disp, R_ECX ); - precheck(); check_walign32( R_ECX ); MEM_WRITE_LONG( R_ECX, R_EAX ); sh4_x86.tstate = TSTATE_NONE; @@ -882,7 +836,6 @@ { /* MOV.W Rm, @Rn */ uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF); load_reg( R_ECX, Rn ); - precheck(); check_walign16( R_ECX ); load_reg( R_EAX, Rm ); MEM_WRITE_WORD( R_ECX, R_EAX ); @@ -894,7 +847,6 @@ uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF); load_reg( R_EAX, Rm ); load_reg( R_ECX, Rn ); - precheck(); check_walign32(R_ECX); MEM_WRITE_LONG( R_ECX, R_EAX ); sh4_x86.tstate = TSTATE_NONE; @@ -915,7 +867,6 @@ { /* MOV.W Rm, @-Rn */ uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF); load_reg( R_ECX, Rn ); - precheck(); check_walign16( R_ECX ); load_reg( R_EAX, Rm ); ADD_imm8s_r32( -2, R_ECX ); @@ -929,7 +880,6 @@ uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF); load_reg( R_EAX, Rm ); load_reg( R_ECX, Rn ); - precheck(); check_walign32( R_ECX ); ADD_imm8s_r32( -4, R_ECX ); store_reg( R_ECX, Rn ); @@ -1307,7 +1257,6 @@ { /* STS.L MACH, @-Rn */ uint32_t Rn = ((ir>>8)&0xF); load_reg( R_ECX, Rn ); - precheck(); check_walign32( R_ECX ); ADD_imm8s_r32( -4, R_ECX ); store_reg( R_ECX, Rn ); @@ -1320,7 +1269,6 @@ { /* STS.L MACL, @-Rn */ uint32_t Rn = ((ir>>8)&0xF); load_reg( R_ECX, Rn ); - precheck(); check_walign32( R_ECX ); ADD_imm8s_r32( -4, R_ECX ); store_reg( R_ECX, Rn ); @@ -1333,7 +1281,6 @@ { /* STS.L PR, @-Rn */ uint32_t Rn = ((ir>>8)&0xF); load_reg( R_ECX, Rn ); - precheck(); check_walign32( R_ECX ); ADD_imm8s_r32( -4, R_ECX ); store_reg( R_ECX, Rn ); @@ -1345,8 +1292,7 @@ case 0x3: { /* STC.L SGR, @-Rn */ uint32_t Rn = ((ir>>8)&0xF); - precheck(); - check_priv_no_precheck(); + check_priv(); load_reg( R_ECX, Rn ); check_walign32( R_ECX ); ADD_imm8s_r32( -4, R_ECX ); @@ -1360,7 +1306,6 @@ { /* STS.L FPUL, @-Rn */ uint32_t Rn = ((ir>>8)&0xF); load_reg( R_ECX, Rn ); - precheck(); check_walign32( R_ECX ); ADD_imm8s_r32( -4, R_ECX ); store_reg( R_ECX, Rn ); @@ -1373,7 +1318,6 @@ { /* STS.L FPSCR, @-Rn */ uint32_t Rn = ((ir>>8)&0xF); load_reg( R_ECX, Rn ); - precheck(); check_walign32( R_ECX ); ADD_imm8s_r32( -4, R_ECX ); store_reg( R_ECX, Rn ); @@ -1385,8 +1329,7 @@ case 0xF: { /* STC.L DBR, @-Rn */ uint32_t Rn = ((ir>>8)&0xF); - precheck(); - check_priv_no_precheck(); + check_priv(); load_reg( R_ECX, Rn ); check_walign32( R_ECX ); ADD_imm8s_r32( -4, R_ECX ); @@ -1408,8 +1351,7 @@ case 0x0: { /* STC.L SR, @-Rn */ uint32_t Rn = ((ir>>8)&0xF); - precheck(); - check_priv_no_precheck(); + check_priv(); call_func0( sh4_read_sr ); load_reg( R_ECX, Rn ); check_walign32( R_ECX ); @@ -1423,7 +1365,6 @@ { /* STC.L GBR, @-Rn */ uint32_t Rn = ((ir>>8)&0xF); load_reg( R_ECX, Rn ); - precheck(); check_walign32( R_ECX ); ADD_imm8s_r32( -4, R_ECX ); store_reg( R_ECX, Rn ); @@ -1435,8 +1376,7 @@ case 0x2: { /* STC.L VBR, @-Rn */ uint32_t Rn = ((ir>>8)&0xF); - precheck(); - check_priv_no_precheck(); + check_priv(); load_reg( R_ECX, Rn ); check_walign32( R_ECX ); ADD_imm8s_r32( -4, R_ECX ); @@ -1449,8 +1389,7 @@ case 0x3: { /* STC.L SSR, @-Rn */ uint32_t Rn = ((ir>>8)&0xF); - precheck(); - check_priv_no_precheck(); + check_priv(); load_reg( R_ECX, Rn ); check_walign32( R_ECX ); ADD_imm8s_r32( -4, R_ECX ); @@ -1463,8 +1402,7 @@ case 0x4: { /* STC.L SPC, @-Rn */ uint32_t Rn = ((ir>>8)&0xF); - precheck(); - check_priv_no_precheck(); + check_priv(); load_reg( R_ECX, Rn ); check_walign32( R_ECX ); ADD_imm8s_r32( -4, R_ECX ); @@ -1482,8 +1420,7 @@ case 0x1: { /* STC.L Rm_BANK, @-Rn */ uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm_BANK = ((ir>>4)&0x7); - precheck(); - check_priv_no_precheck(); + check_priv(); load_reg( R_ECX, Rn ); check_walign32( R_ECX ); ADD_imm8s_r32( -4, R_ECX ); @@ -1570,7 +1507,6 @@ { /* LDS.L @Rm+, MACH */ uint32_t Rm = ((ir>>8)&0xF); load_reg( R_EAX, Rm ); - precheck(); check_ralign32( R_EAX ); MOV_r32_r32( R_EAX, R_ECX ); ADD_imm8s_r32( 4, R_EAX ); @@ -1584,7 +1520,6 @@ { /* LDS.L @Rm+, MACL */ uint32_t Rm = ((ir>>8)&0xF); load_reg( R_EAX, Rm ); - precheck(); check_ralign32( R_EAX ); MOV_r32_r32( R_EAX, R_ECX ); ADD_imm8s_r32( 4, R_EAX ); @@ -1598,7 +1533,6 @@ { /* LDS.L @Rm+, PR */ uint32_t Rm = ((ir>>8)&0xF); load_reg( R_EAX, Rm ); - precheck(); check_ralign32( R_EAX ); MOV_r32_r32( R_EAX, R_ECX ); ADD_imm8s_r32( 4, R_EAX ); @@ -1611,8 +1545,7 @@ case 0x3: { /* LDC.L @Rm+, SGR */ uint32_t Rm = ((ir>>8)&0xF); - precheck(); - check_priv_no_precheck(); + check_priv(); load_reg( R_EAX, Rm ); check_ralign32( R_EAX ); MOV_r32_r32( R_EAX, R_ECX ); @@ -1627,7 +1560,6 @@ { /* LDS.L @Rm+, FPUL */ uint32_t Rm = ((ir>>8)&0xF); load_reg( R_EAX, Rm ); - precheck(); check_ralign32( R_EAX ); MOV_r32_r32( R_EAX, R_ECX ); ADD_imm8s_r32( 4, R_EAX ); @@ -1641,7 +1573,6 @@ { /* LDS.L @Rm+, FPSCR */ uint32_t Rm = ((ir>>8)&0xF); load_reg( R_EAX, Rm ); - precheck(); check_ralign32( R_EAX ); MOV_r32_r32( R_EAX, R_ECX ); ADD_imm8s_r32( 4, R_EAX ); @@ -1655,8 +1586,7 @@ case 0xF: { /* LDC.L @Rm+, DBR */ uint32_t Rm = ((ir>>8)&0xF); - precheck(); - check_priv_no_precheck(); + check_priv(); load_reg( R_EAX, Rm ); check_ralign32( R_EAX ); MOV_r32_r32( R_EAX, R_ECX ); @@ -1682,8 +1612,7 @@ if( sh4_x86.in_delay_slot ) { SLOTILLEGAL(); } else { - precheck(); - check_priv_no_precheck(); + check_priv(); load_reg( R_EAX, Rm ); check_ralign32( R_EAX ); MOV_r32_r32( R_EAX, R_ECX ); @@ -1701,7 +1630,6 @@ { /* LDC.L @Rm+, GBR */ uint32_t Rm = ((ir>>8)&0xF); load_reg( R_EAX, Rm ); - precheck(); check_ralign32( R_EAX ); MOV_r32_r32( R_EAX, R_ECX ); ADD_imm8s_r32( 4, R_EAX ); @@ -1714,8 +1642,7 @@ case 0x2: { /* LDC.L @Rm+, VBR */ uint32_t Rm = ((ir>>8)&0xF); - precheck(); - check_priv_no_precheck(); + check_priv(); load_reg( R_EAX, Rm ); check_ralign32( R_EAX ); MOV_r32_r32( R_EAX, R_ECX ); @@ -1729,8 +1656,7 @@ case 0x3: { /* LDC.L @Rm+, SSR */ uint32_t Rm = ((ir>>8)&0xF); - precheck(); - check_priv_no_precheck(); + check_priv(); load_reg( R_EAX, Rm ); check_ralign32( R_EAX ); MOV_r32_r32( R_EAX, R_ECX ); @@ -1744,8 +1670,7 @@ case 0x4: { /* LDC.L @Rm+, SPC */ uint32_t Rm = ((ir>>8)&0xF); - precheck(); - check_priv_no_precheck(); + check_priv(); load_reg( R_EAX, Rm ); check_ralign32( R_EAX ); MOV_r32_r32( R_EAX, R_ECX ); @@ -1764,8 +1689,7 @@ case 0x1: { /* LDC.L @Rm+, Rn_BANK */ uint32_t Rm = ((ir>>8)&0xF); uint32_t Rn_BANK = ((ir>>4)&0x7); - precheck(); - check_priv_no_precheck(); + check_priv(); load_reg( R_EAX, Rm ); check_ralign32( R_EAX ); MOV_r32_r32( R_EAX, R_ECX ); @@ -2090,7 +2014,6 @@ { /* MAC.W @Rm+, @Rn+ */ uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF); load_reg( R_ECX, Rm ); - precheck(); check_ralign16( R_ECX ); load_reg( R_ECX, Rn ); check_ralign16( R_ECX ); @@ -2137,7 +2060,6 @@ uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF); uint32_t disp = (ir&0xF)<<2; load_reg( R_ECX, Rm ); ADD_imm8s_r32( disp, R_ECX ); - precheck(); check_ralign32( R_ECX ); MEM_READ_LONG( R_ECX, R_EAX ); store_reg( R_EAX, Rn ); @@ -2159,7 +2081,6 @@ { /* MOV.W @Rm, Rn */ uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF); load_reg( R_ECX, Rm ); - precheck(); check_ralign16( R_ECX ); MEM_READ_WORD( R_ECX, R_EAX ); store_reg( R_EAX, Rn ); @@ -2170,7 +2091,6 @@ { /* MOV.L @Rm, Rn */ uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF); load_reg( R_ECX, Rm ); - precheck(); check_ralign32( R_ECX ); MEM_READ_LONG( R_ECX, R_EAX ); store_reg( R_EAX, Rn ); @@ -2200,7 +2120,6 @@ { /* MOV.W @Rm+, Rn */ uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF); load_reg( R_EAX, Rm ); - precheck(); check_ralign16( R_EAX ); MOV_r32_r32( R_EAX, R_ECX ); ADD_imm8s_r32( 2, R_EAX ); @@ -2214,7 +2133,6 @@ { /* MOV.L @Rm+, Rn */ uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF); load_reg( R_EAX, Rm ); - precheck(); check_ralign32( R_EAX ); MOV_r32_r32( R_EAX, R_ECX ); ADD_imm8s_r32( 4, R_EAX ); @@ -2335,7 +2253,6 @@ load_reg( R_ECX, Rn ); load_reg( R_EAX, 0 ); ADD_imm32_r32( disp, R_ECX ); - precheck(); check_walign16( R_ECX ); MEM_WRITE_WORD( R_ECX, R_EAX ); sh4_x86.tstate = TSTATE_NONE; @@ -2356,7 +2273,6 @@ uint32_t Rm = ((ir>>4)&0xF); uint32_t disp = (ir&0xF)<<1; load_reg( R_ECX, Rm ); ADD_imm32_r32( disp, R_ECX ); - precheck(); check_ralign16( R_ECX ); MEM_READ_WORD( R_ECX, R_EAX ); store_reg( R_EAX, 0 ); @@ -2506,7 +2422,6 @@ load_spreg( R_ECX, R_GBR ); load_reg( R_EAX, 0 ); ADD_imm32_r32( disp, R_ECX ); - precheck(); check_walign16( R_ECX ); MEM_WRITE_WORD( R_ECX, R_EAX ); sh4_x86.tstate = TSTATE_NONE; @@ -2518,7 +2433,6 @@ load_spreg( R_ECX, R_GBR ); load_reg( R_EAX, 0 ); ADD_imm32_r32( disp, R_ECX ); - precheck(); check_walign32( R_ECX ); MEM_WRITE_LONG( R_ECX, R_EAX ); sh4_x86.tstate = TSTATE_NONE; @@ -2556,7 +2470,6 @@ uint32_t disp = (ir&0xFF)<<1; load_spreg( R_ECX, R_GBR ); ADD_imm32_r32( disp, R_ECX ); - precheck(); check_ralign16( R_ECX ); MEM_READ_WORD( R_ECX, R_EAX ); store_reg( R_EAX, 0 ); @@ -2568,7 +2481,6 @@ uint32_t disp = (ir&0xFF)<<2; load_spreg( R_ECX, R_GBR ); ADD_imm32_r32( disp, R_ECX ); - precheck(); check_ralign32( R_ECX ); MEM_READ_LONG( R_ECX, R_EAX ); store_reg( R_EAX, 0 ); @@ -2685,7 +2597,7 @@ SLOTILLEGAL(); } else { uint32_t target = (pc & 0xFFFFFFFC) + disp + 4; - sh4ptr_t ptr = mem_get_region(target); + sh4ptr_t ptr = sh4_get_region_by_vma(target); if( ptr != NULL ) { MOV_moff32_EAX( ptr ); } else { @@ -2839,14 +2751,13 @@ case 0x6: { /* FMOV @(R0, Rm), FRn */ uint32_t FRn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF); - precheck(); - check_fpuen_no_precheck(); + check_fpuen(); load_reg( R_ECX, Rm ); ADD_sh4r_r32( REG_OFFSET(r[0]), R_ECX ); check_ralign32( R_ECX ); load_spreg( R_EDX, R_FPSCR ); TEST_imm32_r32( FPSCR_SZ, R_EDX ); - JNE_rel8(8 + CALL_FUNC1_SIZE, doublesize); + JNE_rel8(8 + MEM_READ_SIZE, doublesize); MEM_READ_LONG( R_ECX, R_EAX ); load_fr_bank( R_EDX ); store_fr( R_EDX, R_EAX, FRn ); @@ -2874,14 +2785,13 @@ case 0x7: { /* FMOV FRm, @(R0, Rn) */ uint32_t Rn = ((ir>>8)&0xF); uint32_t FRm = ((ir>>4)&0xF); - precheck(); - check_fpuen_no_precheck(); + check_fpuen(); load_reg( R_ECX, Rn ); ADD_sh4r_r32( REG_OFFSET(r[0]), R_ECX ); check_walign32( R_ECX ); load_spreg( R_EDX, R_FPSCR ); TEST_imm32_r32( FPSCR_SZ, R_EDX ); - JNE_rel8(8 + CALL_FUNC2_SIZE, doublesize); + JNE_rel8(8 + MEM_WRITE_SIZE, doublesize); load_fr_bank( R_EDX ); load_fr( R_EDX, R_EAX, FRm ); MEM_WRITE_LONG( R_ECX, R_EAX ); // 12 @@ -2908,13 +2818,12 @@ case 0x8: { /* FMOV @Rm, FRn */ uint32_t FRn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF); - precheck(); - check_fpuen_no_precheck(); + check_fpuen(); load_reg( R_ECX, Rm ); check_ralign32( R_ECX ); load_spreg( R_EDX, R_FPSCR ); TEST_imm32_r32( FPSCR_SZ, R_EDX ); - JNE_rel8(8 + CALL_FUNC1_SIZE, doublesize); + JNE_rel8(8 + MEM_READ_SIZE, doublesize); MEM_READ_LONG( R_ECX, R_EAX ); load_fr_bank( R_EDX ); store_fr( R_EDX, R_EAX, FRn ); @@ -2942,14 +2851,13 @@ case 0x9: { /* FMOV @Rm+, FRn */ uint32_t FRn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF); - precheck(); - check_fpuen_no_precheck(); + check_fpuen(); load_reg( R_ECX, Rm ); check_ralign32( R_ECX ); MOV_r32_r32( R_ECX, R_EAX ); load_spreg( R_EDX, R_FPSCR ); TEST_imm32_r32( FPSCR_SZ, R_EDX ); - JNE_rel8(14 + CALL_FUNC1_SIZE, doublesize); + JNE_rel8(14 + MEM_READ_SIZE, doublesize); ADD_imm8s_r32( 4, R_EAX ); store_reg( R_EAX, Rm ); MEM_READ_LONG( R_ECX, R_EAX ); @@ -2982,13 +2890,12 @@ case 0xA: { /* FMOV FRm, @Rn */ uint32_t Rn = ((ir>>8)&0xF); uint32_t FRm = ((ir>>4)&0xF); - precheck(); - check_fpuen_no_precheck(); + check_fpuen(); load_reg( R_ECX, Rn ); check_walign32( R_ECX ); load_spreg( R_EDX, R_FPSCR ); TEST_imm32_r32( FPSCR_SZ, R_EDX ); - JNE_rel8(8 + CALL_FUNC2_SIZE, doublesize); + JNE_rel8(8 + MEM_WRITE_SIZE, doublesize); load_fr_bank( R_EDX ); load_fr( R_EDX, R_EAX, FRm ); MEM_WRITE_LONG( R_ECX, R_EAX ); // 12 @@ -3015,13 +2922,12 @@ case 0xB: { /* FMOV FRm, @-Rn */ uint32_t Rn = ((ir>>8)&0xF); uint32_t FRm = ((ir>>4)&0xF); - precheck(); - check_fpuen_no_precheck(); + check_fpuen(); load_reg( R_ECX, Rn ); check_walign32( R_ECX ); load_spreg( R_EDX, R_FPSCR ); TEST_imm32_r32( FPSCR_SZ, R_EDX ); - JNE_rel8(14 + CALL_FUNC2_SIZE, doublesize); + JNE_rel8(14 + MEM_WRITE_SIZE, doublesize); load_fr_bank( R_EDX ); load_fr( R_EDX, R_EAX, FRm ); ADD_imm8s_r32(-4,R_ECX); @@ -3405,8 +3311,7 @@ if( sh4_x86.in_delay_slot ) { SLOTILLEGAL(); } else { - precheck(); - JMP_exit(EXIT_ILLEGAL); + JMP_exc(EXC_ILLEGAL); return 2; } } --- a/src/sh4/sh4x86.in Tue Jan 01 04:56:52 2008 +0000 +++ b/src/sh4/sh4x86.in Tue Jan 01 04:58:57 2008 +0000 @@ -34,6 +34,12 @@ #define DEFAULT_BACKPATCH_SIZE 4096 +struct backpatch_record { + uint32_t *fixup_addr; + uint32_t fixup_icount; + uint32_t exc_code; +}; + /** * Struct to manage internal translation state. This state is not saved - * it is only valid between calls to sh4_translate_begin_block() and @@ -49,7 +55,7 @@ int tstate; /* Allocated memory for the (block-wide) back-patch list */ - uint32_t **backpatch_list; + struct backpatch_record *backpatch_list; uint32_t backpatch_posn; uint32_t backpatch_size; }; @@ -75,14 +81,6 @@ OP(0x70+ (sh4_x86.tstate^1)); OP(rel8); \ MARK_JMP(rel8, label) - -#define EXIT_DATA_ADDR_READ 0 -#define EXIT_DATA_ADDR_WRITE 7 -#define EXIT_ILLEGAL 14 -#define EXIT_SLOT_ILLEGAL 21 -#define EXIT_FPU_DISABLED 28 -#define EXIT_SLOT_FPU_DISABLED 35 - static struct sh4_x86_state sh4_x86; static uint32_t max_int = 0x7FFFFFFF; @@ -93,26 +91,25 @@ void sh4_x86_init() { sh4_x86.backpatch_list = malloc(DEFAULT_BACKPATCH_SIZE); - sh4_x86.backpatch_size = DEFAULT_BACKPATCH_SIZE / sizeof(uint32_t *); + sh4_x86.backpatch_size = DEFAULT_BACKPATCH_SIZE / sizeof(struct backpatch_record); } -static void sh4_x86_add_backpatch( uint8_t *ptr ) +static void sh4_x86_add_backpatch( uint8_t *fixup_addr, uint32_t fixup_pc, uint32_t exc_code ) { if( sh4_x86.backpatch_posn == sh4_x86.backpatch_size ) { sh4_x86.backpatch_size <<= 1; - sh4_x86.backpatch_list = realloc( sh4_x86.backpatch_list, sh4_x86.backpatch_size * sizeof(uint32_t *) ); + sh4_x86.backpatch_list = realloc( sh4_x86.backpatch_list, + sh4_x86.backpatch_size * sizeof(struct backpatch_record)); assert( sh4_x86.backpatch_list != NULL ); } - sh4_x86.backpatch_list[sh4_x86.backpatch_posn++] = (uint32_t *)ptr; -} - -static void sh4_x86_do_backpatch( uint8_t *reloc_base ) -{ - unsigned int i; - for( i=0; i>1; + sh4_x86.backpatch_list[sh4_x86.backpatch_posn].exc_code = exc_code; + sh4_x86.backpatch_posn++; } /** @@ -266,97 +263,60 @@ } /* Exception checks - Note that all exception checks will clobber EAX */ -#define precheck() load_imm32(R_EDX, (pc-sh4_x86.block_start_pc-(sh4_x86.in_delay_slot?2:0))>>1) #define check_priv( ) \ if( !sh4_x86.priv_checked ) { \ sh4_x86.priv_checked = TRUE;\ - precheck();\ load_spreg( R_EAX, R_SR );\ AND_imm32_r32( SR_MD, R_EAX );\ if( sh4_x86.in_delay_slot ) {\ - JE_exit( EXIT_SLOT_ILLEGAL );\ + JE_exc( EXC_SLOT_ILLEGAL );\ } else {\ - JE_exit( EXIT_ILLEGAL );\ + JE_exc( EXC_ILLEGAL );\ }\ }\ - -static void check_priv_no_precheck() -{ - if( !sh4_x86.priv_checked ) { - sh4_x86.priv_checked = TRUE; - load_spreg( R_EAX, R_SR ); - AND_imm32_r32( SR_MD, R_EAX ); - if( sh4_x86.in_delay_slot ) { - JE_exit( EXIT_SLOT_ILLEGAL ); - } else { - JE_exit( EXIT_ILLEGAL ); - } - } -} - #define check_fpuen( ) \ if( !sh4_x86.fpuen_checked ) {\ sh4_x86.fpuen_checked = TRUE;\ - precheck();\ load_spreg( R_EAX, R_SR );\ AND_imm32_r32( SR_FD, R_EAX );\ if( sh4_x86.in_delay_slot ) {\ - JNE_exit(EXIT_SLOT_FPU_DISABLED);\ + JNE_exc(EXC_SLOT_FPU_DISABLED);\ } else {\ - JNE_exit(EXIT_FPU_DISABLED);\ + JNE_exc(EXC_FPU_DISABLED);\ }\ } -static void check_fpuen_no_precheck() -{ - if( !sh4_x86.fpuen_checked ) { - sh4_x86.fpuen_checked = TRUE; - load_spreg( R_EAX, R_SR ); - AND_imm32_r32( SR_FD, R_EAX ); - if( sh4_x86.in_delay_slot ) { - JNE_exit(EXIT_SLOT_FPU_DISABLED); - } else { - JNE_exit(EXIT_FPU_DISABLED); - } - } +#define check_ralign16( x86reg ) \ + TEST_imm32_r32( 0x00000001, x86reg ); \ + JNE_exc(EXC_DATA_ADDR_READ) -} +#define check_walign16( x86reg ) \ + TEST_imm32_r32( 0x00000001, x86reg ); \ + JNE_exc(EXC_DATA_ADDR_WRITE); -static void check_ralign16( int x86reg ) -{ - TEST_imm32_r32( 0x00000001, x86reg ); - JNE_exit(EXIT_DATA_ADDR_READ); -} +#define check_ralign32( x86reg ) \ + TEST_imm32_r32( 0x00000003, x86reg ); \ + JNE_exc(EXC_DATA_ADDR_READ) -static void check_walign16( int x86reg ) -{ - TEST_imm32_r32( 0x00000001, x86reg ); - JNE_exit(EXIT_DATA_ADDR_WRITE); -} - -static void check_ralign32( int x86reg ) -{ - TEST_imm32_r32( 0x00000003, x86reg ); - JNE_exit(EXIT_DATA_ADDR_READ); -} -static void check_walign32( int x86reg ) -{ - TEST_imm32_r32( 0x00000003, x86reg ); - JNE_exit(EXIT_DATA_ADDR_WRITE); -} +#define check_walign32( x86reg ) \ + TEST_imm32_r32( 0x00000003, x86reg ); \ + JNE_exc(EXC_DATA_ADDR_WRITE); #define UNDEF() #define MEM_RESULT(value_reg) if(value_reg != R_EAX) { MOV_r32_r32(R_EAX,value_reg); } -#define MEM_READ_BYTE( addr_reg, value_reg ) call_func1(sh4_read_byte, addr_reg ); MEM_RESULT(value_reg) -#define MEM_READ_WORD( addr_reg, value_reg ) call_func1(sh4_read_word, addr_reg ); MEM_RESULT(value_reg) -#define MEM_READ_LONG( addr_reg, value_reg ) call_func1(sh4_read_long, addr_reg ); MEM_RESULT(value_reg) -#define MEM_WRITE_BYTE( addr_reg, value_reg ) call_func2(sh4_write_byte, addr_reg, value_reg) -#define MEM_WRITE_WORD( addr_reg, value_reg ) call_func2(sh4_write_word, addr_reg, value_reg) -#define MEM_WRITE_LONG( addr_reg, value_reg ) call_func2(sh4_write_long, addr_reg, value_reg) +#define MEM_READ_BYTE( addr_reg, value_reg ) call_func1(sh4_read_byte, addr_reg ); TEST_r32_r32( R_EDX, R_EDX ); JNE_exc(-1); MEM_RESULT(value_reg) +#define MEM_READ_WORD( addr_reg, value_reg ) call_func1(sh4_read_word, addr_reg ); TEST_r32_r32( R_EDX, R_EDX ); JNE_exc(-1); MEM_RESULT(value_reg) +#define MEM_READ_LONG( addr_reg, value_reg ) call_func1(sh4_read_long, addr_reg ); TEST_r32_r32( R_EDX, R_EDX ); JNE_exc(-1); MEM_RESULT(value_reg) +#define MEM_WRITE_BYTE( addr_reg, value_reg ) call_func2(sh4_write_byte, addr_reg, value_reg); TEST_r32_r32( R_EAX, R_EAX ); JNE_exc(-1); +#define MEM_WRITE_WORD( addr_reg, value_reg ) call_func2(sh4_write_word, addr_reg, value_reg); TEST_r32_r32( R_EAX, R_EAX ); JNE_exc(-1); +#define MEM_WRITE_LONG( addr_reg, value_reg ) call_func2(sh4_write_long, addr_reg, value_reg); TEST_r32_r32( R_EAX, R_EAX ); JNE_exc(-1); -#define SLOTILLEGAL() precheck(); JMP_exit(EXIT_SLOT_ILLEGAL); sh4_x86.in_delay_slot = FALSE; return 1; +#define MEM_READ_SIZE (CALL_FUNC1_SIZE+8) +#define MEM_WRITE_SIZE (CALL_FUNC2_SIZE+8) + +#define SLOTILLEGAL() JMP_exc(EXC_SLOT_ILLEGAL); sh4_x86.in_delay_slot = FALSE; return 1; extern uint16_t *sh4_icache; extern uint32_t sh4_icache_addr; @@ -389,7 +349,8 @@ if( sh4_icache != NULL && pageaddr == sh4_icache_addr ) { ir = sh4_icache[(pc&0xFFF)>>1]; } else { - sh4_icache = (uint16_t *)mem_get_page(pc); + uint64_t phys = mmu_vma_to_phys_exec(pc); + sh4_icache = (uint16_t *)mem_get_page((uint32_t)phys); if( ((uintptr_t)sh4_icache) < MAX_IO_REGIONS ) { /* If someone's actually been so daft as to try to execute out of an IO * region, fallback on the full-blown memory read @@ -619,7 +580,6 @@ :} MAC.L @Rm+, @Rn+ {: load_reg( R_ECX, Rm ); - precheck(); check_ralign32( R_ECX ); load_reg( R_ECX, Rn ); check_ralign32( R_ECX ); @@ -643,7 +603,6 @@ :} MAC.W @Rm+, @Rn+ {: load_reg( R_ECX, Rm ); - precheck(); check_ralign16( R_ECX ); load_reg( R_ECX, Rn ); check_ralign16( R_ECX ); @@ -1090,7 +1049,6 @@ MOV.L Rm, @Rn {: load_reg( R_EAX, Rm ); load_reg( R_ECX, Rn ); - precheck(); check_walign32(R_ECX); MEM_WRITE_LONG( R_ECX, R_EAX ); sh4_x86.tstate = TSTATE_NONE; @@ -1098,7 +1056,6 @@ MOV.L Rm, @-Rn {: load_reg( R_EAX, Rm ); load_reg( R_ECX, Rn ); - precheck(); check_walign32( R_ECX ); ADD_imm8s_r32( -4, R_ECX ); store_reg( R_ECX, Rn ); @@ -1109,7 +1066,6 @@ load_reg( R_EAX, 0 ); load_reg( R_ECX, Rn ); ADD_r32_r32( R_EAX, R_ECX ); - precheck(); check_walign32( R_ECX ); load_reg( R_EAX, Rm ); MEM_WRITE_LONG( R_ECX, R_EAX ); @@ -1119,7 +1075,6 @@ load_spreg( R_ECX, R_GBR ); load_reg( R_EAX, 0 ); ADD_imm32_r32( disp, R_ECX ); - precheck(); check_walign32( R_ECX ); MEM_WRITE_LONG( R_ECX, R_EAX ); sh4_x86.tstate = TSTATE_NONE; @@ -1128,14 +1083,12 @@ load_reg( R_ECX, Rn ); load_reg( R_EAX, Rm ); ADD_imm32_r32( disp, R_ECX ); - precheck(); check_walign32( R_ECX ); MEM_WRITE_LONG( R_ECX, R_EAX ); sh4_x86.tstate = TSTATE_NONE; :} MOV.L @Rm, Rn {: load_reg( R_ECX, Rm ); - precheck(); check_ralign32( R_ECX ); MEM_READ_LONG( R_ECX, R_EAX ); store_reg( R_EAX, Rn ); @@ -1143,7 +1096,6 @@ :} MOV.L @Rm+, Rn {: load_reg( R_EAX, Rm ); - precheck(); check_ralign32( R_EAX ); MOV_r32_r32( R_EAX, R_ECX ); ADD_imm8s_r32( 4, R_EAX ); @@ -1156,7 +1108,6 @@ load_reg( R_EAX, 0 ); load_reg( R_ECX, Rm ); ADD_r32_r32( R_EAX, R_ECX ); - precheck(); check_ralign32( R_ECX ); MEM_READ_LONG( R_ECX, R_EAX ); store_reg( R_EAX, Rn ); @@ -1165,7 +1116,6 @@ MOV.L @(disp, GBR), R0 {: load_spreg( R_ECX, R_GBR ); ADD_imm32_r32( disp, R_ECX ); - precheck(); check_ralign32( R_ECX ); MEM_READ_LONG( R_ECX, R_EAX ); store_reg( R_EAX, 0 ); @@ -1176,7 +1126,7 @@ SLOTILLEGAL(); } else { uint32_t target = (pc & 0xFFFFFFFC) + disp + 4; - sh4ptr_t ptr = mem_get_region(target); + sh4ptr_t ptr = sh4_get_region_by_vma(target); if( ptr != NULL ) { MOV_moff32_EAX( ptr ); } else { @@ -1190,7 +1140,6 @@ MOV.L @(disp, Rm), Rn {: load_reg( R_ECX, Rm ); ADD_imm8s_r32( disp, R_ECX ); - precheck(); check_ralign32( R_ECX ); MEM_READ_LONG( R_ECX, R_EAX ); store_reg( R_EAX, Rn ); @@ -1198,7 +1147,6 @@ :} MOV.W Rm, @Rn {: load_reg( R_ECX, Rn ); - precheck(); check_walign16( R_ECX ); load_reg( R_EAX, Rm ); MEM_WRITE_WORD( R_ECX, R_EAX ); @@ -1206,7 +1154,6 @@ :} MOV.W Rm, @-Rn {: load_reg( R_ECX, Rn ); - precheck(); check_walign16( R_ECX ); load_reg( R_EAX, Rm ); ADD_imm8s_r32( -2, R_ECX ); @@ -1218,7 +1165,6 @@ load_reg( R_EAX, 0 ); load_reg( R_ECX, Rn ); ADD_r32_r32( R_EAX, R_ECX ); - precheck(); check_walign16( R_ECX ); load_reg( R_EAX, Rm ); MEM_WRITE_WORD( R_ECX, R_EAX ); @@ -1228,7 +1174,6 @@ load_spreg( R_ECX, R_GBR ); load_reg( R_EAX, 0 ); ADD_imm32_r32( disp, R_ECX ); - precheck(); check_walign16( R_ECX ); MEM_WRITE_WORD( R_ECX, R_EAX ); sh4_x86.tstate = TSTATE_NONE; @@ -1237,14 +1182,12 @@ load_reg( R_ECX, Rn ); load_reg( R_EAX, 0 ); ADD_imm32_r32( disp, R_ECX ); - precheck(); check_walign16( R_ECX ); MEM_WRITE_WORD( R_ECX, R_EAX ); sh4_x86.tstate = TSTATE_NONE; :} MOV.W @Rm, Rn {: load_reg( R_ECX, Rm ); - precheck(); check_ralign16( R_ECX ); MEM_READ_WORD( R_ECX, R_EAX ); store_reg( R_EAX, Rn ); @@ -1252,7 +1195,6 @@ :} MOV.W @Rm+, Rn {: load_reg( R_EAX, Rm ); - precheck(); check_ralign16( R_EAX ); MOV_r32_r32( R_EAX, R_ECX ); ADD_imm8s_r32( 2, R_EAX ); @@ -1265,7 +1207,6 @@ load_reg( R_EAX, 0 ); load_reg( R_ECX, Rm ); ADD_r32_r32( R_EAX, R_ECX ); - precheck(); check_ralign16( R_ECX ); MEM_READ_WORD( R_ECX, R_EAX ); store_reg( R_EAX, Rn ); @@ -1274,7 +1215,6 @@ MOV.W @(disp, GBR), R0 {: load_spreg( R_ECX, R_GBR ); ADD_imm32_r32( disp, R_ECX ); - precheck(); check_ralign16( R_ECX ); MEM_READ_WORD( R_ECX, R_EAX ); store_reg( R_EAX, 0 ); @@ -1293,7 +1233,6 @@ MOV.W @(disp, Rm), R0 {: load_reg( R_ECX, Rm ); ADD_imm32_r32( disp, R_ECX ); - precheck(); check_ralign16( R_ECX ); MEM_READ_WORD( R_ECX, R_EAX ); store_reg( R_EAX, 0 ); @@ -1310,7 +1249,6 @@ MOVCA.L R0, @Rn {: load_reg( R_EAX, 0 ); load_reg( R_ECX, Rn ); - precheck(); check_walign32( R_ECX ); MEM_WRITE_LONG( R_ECX, R_EAX ); sh4_x86.tstate = TSTATE_NONE; @@ -1506,8 +1444,7 @@ if( sh4_x86.in_delay_slot ) { SLOTILLEGAL(); } else { - precheck(); - JMP_exit(EXIT_ILLEGAL); + JMP_exc(EXC_ILLEGAL); return 2; } :} @@ -1591,13 +1528,12 @@ sh4_x86.tstate = TSTATE_NONE; :} FMOV FRm, @Rn {: - precheck(); - check_fpuen_no_precheck(); + check_fpuen(); load_reg( R_ECX, Rn ); check_walign32( R_ECX ); load_spreg( R_EDX, R_FPSCR ); TEST_imm32_r32( FPSCR_SZ, R_EDX ); - JNE_rel8(8 + CALL_FUNC2_SIZE, doublesize); + JNE_rel8(8 + MEM_WRITE_SIZE, doublesize); load_fr_bank( R_EDX ); load_fr( R_EDX, R_EAX, FRm ); MEM_WRITE_LONG( R_ECX, R_EAX ); // 12 @@ -1621,13 +1557,12 @@ sh4_x86.tstate = TSTATE_NONE; :} FMOV @Rm, FRn {: - precheck(); - check_fpuen_no_precheck(); + check_fpuen(); load_reg( R_ECX, Rm ); check_ralign32( R_ECX ); load_spreg( R_EDX, R_FPSCR ); TEST_imm32_r32( FPSCR_SZ, R_EDX ); - JNE_rel8(8 + CALL_FUNC1_SIZE, doublesize); + JNE_rel8(8 + MEM_READ_SIZE, doublesize); MEM_READ_LONG( R_ECX, R_EAX ); load_fr_bank( R_EDX ); store_fr( R_EDX, R_EAX, FRn ); @@ -1652,13 +1587,12 @@ sh4_x86.tstate = TSTATE_NONE; :} FMOV FRm, @-Rn {: - precheck(); - check_fpuen_no_precheck(); + check_fpuen(); load_reg( R_ECX, Rn ); check_walign32( R_ECX ); load_spreg( R_EDX, R_FPSCR ); TEST_imm32_r32( FPSCR_SZ, R_EDX ); - JNE_rel8(14 + CALL_FUNC2_SIZE, doublesize); + JNE_rel8(14 + MEM_WRITE_SIZE, doublesize); load_fr_bank( R_EDX ); load_fr( R_EDX, R_EAX, FRm ); ADD_imm8s_r32(-4,R_ECX); @@ -1688,14 +1622,13 @@ sh4_x86.tstate = TSTATE_NONE; :} FMOV @Rm+, FRn {: - precheck(); - check_fpuen_no_precheck(); + check_fpuen(); load_reg( R_ECX, Rm ); check_ralign32( R_ECX ); MOV_r32_r32( R_ECX, R_EAX ); load_spreg( R_EDX, R_FPSCR ); TEST_imm32_r32( FPSCR_SZ, R_EDX ); - JNE_rel8(14 + CALL_FUNC1_SIZE, doublesize); + JNE_rel8(14 + MEM_READ_SIZE, doublesize); ADD_imm8s_r32( 4, R_EAX ); store_reg( R_EAX, Rm ); MEM_READ_LONG( R_ECX, R_EAX ); @@ -1725,14 +1658,13 @@ sh4_x86.tstate = TSTATE_NONE; :} FMOV FRm, @(R0, Rn) {: - precheck(); - check_fpuen_no_precheck(); + check_fpuen(); load_reg( R_ECX, Rn ); ADD_sh4r_r32( REG_OFFSET(r[0]), R_ECX ); check_walign32( R_ECX ); load_spreg( R_EDX, R_FPSCR ); TEST_imm32_r32( FPSCR_SZ, R_EDX ); - JNE_rel8(8 + CALL_FUNC2_SIZE, doublesize); + JNE_rel8(8 + MEM_WRITE_SIZE, doublesize); load_fr_bank( R_EDX ); load_fr( R_EDX, R_EAX, FRm ); MEM_WRITE_LONG( R_ECX, R_EAX ); // 12 @@ -1756,14 +1688,13 @@ sh4_x86.tstate = TSTATE_NONE; :} FMOV @(R0, Rm), FRn {: - precheck(); - check_fpuen_no_precheck(); + check_fpuen(); load_reg( R_ECX, Rm ); ADD_sh4r_r32( REG_OFFSET(r[0]), R_ECX ); check_ralign32( R_ECX ); load_spreg( R_EDX, R_FPSCR ); TEST_imm32_r32( FPSCR_SZ, R_EDX ); - JNE_rel8(8 + CALL_FUNC1_SIZE, doublesize); + JNE_rel8(8 + MEM_READ_SIZE, doublesize); MEM_READ_LONG( R_ECX, R_EAX ); load_fr_bank( R_EDX ); store_fr( R_EDX, R_EAX, FRn ); @@ -2222,7 +2153,6 @@ :} LDC.L @Rm+, GBR {: load_reg( R_EAX, Rm ); - precheck(); check_ralign32( R_EAX ); MOV_r32_r32( R_EAX, R_ECX ); ADD_imm8s_r32( 4, R_EAX ); @@ -2235,8 +2165,7 @@ if( sh4_x86.in_delay_slot ) { SLOTILLEGAL(); } else { - precheck(); - check_priv_no_precheck(); + check_priv(); load_reg( R_EAX, Rm ); check_ralign32( R_EAX ); MOV_r32_r32( R_EAX, R_ECX ); @@ -2250,8 +2179,7 @@ } :} LDC.L @Rm+, VBR {: - precheck(); - check_priv_no_precheck(); + check_priv(); load_reg( R_EAX, Rm ); check_ralign32( R_EAX ); MOV_r32_r32( R_EAX, R_ECX ); @@ -2262,8 +2190,7 @@ sh4_x86.tstate = TSTATE_NONE; :} LDC.L @Rm+, SSR {: - precheck(); - check_priv_no_precheck(); + check_priv(); load_reg( R_EAX, Rm ); check_ralign32( R_EAX ); MOV_r32_r32( R_EAX, R_ECX ); @@ -2274,8 +2201,7 @@ sh4_x86.tstate = TSTATE_NONE; :} LDC.L @Rm+, SGR {: - precheck(); - check_priv_no_precheck(); + check_priv(); load_reg( R_EAX, Rm ); check_ralign32( R_EAX ); MOV_r32_r32( R_EAX, R_ECX ); @@ -2286,8 +2212,7 @@ sh4_x86.tstate = TSTATE_NONE; :} LDC.L @Rm+, SPC {: - precheck(); - check_priv_no_precheck(); + check_priv(); load_reg( R_EAX, Rm ); check_ralign32( R_EAX ); MOV_r32_r32( R_EAX, R_ECX ); @@ -2298,8 +2223,7 @@ sh4_x86.tstate = TSTATE_NONE; :} LDC.L @Rm+, DBR {: - precheck(); - check_priv_no_precheck(); + check_priv(); load_reg( R_EAX, Rm ); check_ralign32( R_EAX ); MOV_r32_r32( R_EAX, R_ECX ); @@ -2310,8 +2234,7 @@ sh4_x86.tstate = TSTATE_NONE; :} LDC.L @Rm+, Rn_BANK {: - precheck(); - check_priv_no_precheck(); + check_priv(); load_reg( R_EAX, Rm ); check_ralign32( R_EAX ); MOV_r32_r32( R_EAX, R_ECX ); @@ -2329,7 +2252,6 @@ :} LDS.L @Rm+, FPSCR {: load_reg( R_EAX, Rm ); - precheck(); check_ralign32( R_EAX ); MOV_r32_r32( R_EAX, R_ECX ); ADD_imm8s_r32( 4, R_EAX ); @@ -2345,7 +2267,6 @@ :} LDS.L @Rm+, FPUL {: load_reg( R_EAX, Rm ); - precheck(); check_ralign32( R_EAX ); MOV_r32_r32( R_EAX, R_ECX ); ADD_imm8s_r32( 4, R_EAX ); @@ -2360,7 +2281,6 @@ :} LDS.L @Rm+, MACH {: load_reg( R_EAX, Rm ); - precheck(); check_ralign32( R_EAX ); MOV_r32_r32( R_EAX, R_ECX ); ADD_imm8s_r32( 4, R_EAX ); @@ -2375,7 +2295,6 @@ :} LDS.L @Rm+, MACL {: load_reg( R_EAX, Rm ); - precheck(); check_ralign32( R_EAX ); MOV_r32_r32( R_EAX, R_ECX ); ADD_imm8s_r32( 4, R_EAX ); @@ -2390,7 +2309,6 @@ :} LDS.L @Rm+, PR {: load_reg( R_EAX, Rm ); - precheck(); check_ralign32( R_EAX ); MOV_r32_r32( R_EAX, R_ECX ); ADD_imm8s_r32( 4, R_EAX ); @@ -2469,8 +2387,7 @@ sh4_x86.tstate = TSTATE_NONE; :} STC.L SR, @-Rn {: - precheck(); - check_priv_no_precheck(); + check_priv(); call_func0( sh4_read_sr ); load_reg( R_ECX, Rn ); check_walign32( R_ECX ); @@ -2480,8 +2397,7 @@ sh4_x86.tstate = TSTATE_NONE; :} STC.L VBR, @-Rn {: - precheck(); - check_priv_no_precheck(); + check_priv(); load_reg( R_ECX, Rn ); check_walign32( R_ECX ); ADD_imm8s_r32( -4, R_ECX ); @@ -2491,8 +2407,7 @@ sh4_x86.tstate = TSTATE_NONE; :} STC.L SSR, @-Rn {: - precheck(); - check_priv_no_precheck(); + check_priv(); load_reg( R_ECX, Rn ); check_walign32( R_ECX ); ADD_imm8s_r32( -4, R_ECX ); @@ -2502,8 +2417,7 @@ sh4_x86.tstate = TSTATE_NONE; :} STC.L SPC, @-Rn {: - precheck(); - check_priv_no_precheck(); + check_priv(); load_reg( R_ECX, Rn ); check_walign32( R_ECX ); ADD_imm8s_r32( -4, R_ECX ); @@ -2513,8 +2427,7 @@ sh4_x86.tstate = TSTATE_NONE; :} STC.L SGR, @-Rn {: - precheck(); - check_priv_no_precheck(); + check_priv(); load_reg( R_ECX, Rn ); check_walign32( R_ECX ); ADD_imm8s_r32( -4, R_ECX ); @@ -2524,8 +2437,7 @@ sh4_x86.tstate = TSTATE_NONE; :} STC.L DBR, @-Rn {: - precheck(); - check_priv_no_precheck(); + check_priv(); load_reg( R_ECX, Rn ); check_walign32( R_ECX ); ADD_imm8s_r32( -4, R_ECX ); @@ -2535,8 +2447,7 @@ sh4_x86.tstate = TSTATE_NONE; :} STC.L Rm_BANK, @-Rn {: - precheck(); - check_priv_no_precheck(); + check_priv(); load_reg( R_ECX, Rn ); check_walign32( R_ECX ); ADD_imm8s_r32( -4, R_ECX ); @@ -2547,7 +2458,6 @@ :} STC.L GBR, @-Rn {: load_reg( R_ECX, Rn ); - precheck(); check_walign32( R_ECX ); ADD_imm8s_r32( -4, R_ECX ); store_reg( R_ECX, Rn ); @@ -2561,7 +2471,6 @@ :} STS.L FPSCR, @-Rn {: load_reg( R_ECX, Rn ); - precheck(); check_walign32( R_ECX ); ADD_imm8s_r32( -4, R_ECX ); store_reg( R_ECX, Rn ); @@ -2575,7 +2484,6 @@ :} STS.L FPUL, @-Rn {: load_reg( R_ECX, Rn ); - precheck(); check_walign32( R_ECX ); ADD_imm8s_r32( -4, R_ECX ); store_reg( R_ECX, Rn ); @@ -2589,7 +2497,6 @@ :} STS.L MACH, @-Rn {: load_reg( R_ECX, Rn ); - precheck(); check_walign32( R_ECX ); ADD_imm8s_r32( -4, R_ECX ); store_reg( R_ECX, Rn ); @@ -2603,7 +2510,6 @@ :} STS.L MACL, @-Rn {: load_reg( R_ECX, Rn ); - precheck(); check_walign32( R_ECX ); ADD_imm8s_r32( -4, R_ECX ); store_reg( R_ECX, Rn ); @@ -2617,7 +2523,6 @@ :} STS.L PR, @-Rn {: load_reg( R_ECX, Rn ); - precheck(); check_walign32( R_ECX ); ADD_imm8s_r32( -4, R_ECX ); store_reg( R_ECX, Rn ); --- a/src/sh4/x86op.h Tue Jan 01 04:56:52 2008 +0000 +++ b/src/sh4/x86op.h Tue Jan 01 04:58:57 2008 +0000 @@ -233,23 +233,27 @@ #define JNS_rel8(rel,label) OP(0x79); OP(rel); MARK_JMP(rel,label) #define JS_rel8(rel,label) OP(0x78); OP(rel); MARK_JMP(rel,label) +/** JMP relative 8 or 32 depending on size of rel. rel offset + * from the start of the instruction (not end) + */ +#define JMP_rel(rel) if((rel)<-126||(rel)>129) { OP(0xE9); OP32((rel)-5); } else { OP(0xEB); OP((rel)-2); } -/* 32-bit long forms w/ backpatching to an exit routine */ -#define JMP_exit(rel) OP(0xE9); sh4_x86_add_backpatch(xlat_output); OP32(rel) -#define JE_exit(rel) OP(0x0F); OP(0x84); sh4_x86_add_backpatch(xlat_output); OP32(rel) -#define JA_exit(rel) OP(0x0F); OP(0x87); sh4_x86_add_backpatch(xlat_output); OP32(rel) -#define JAE_exit(rel) OP(0x0F); OP(0x83); sh4_x86_add_backpatch(xlat_output); OP32(rel) -#define JG_exit(rel) OP(0x0F); OP(0x8F); sh4_x86_add_backpatch(xlat_output); OP32(rel) -#define JGE_exit(rel) OP(0x0F); OP(0x8D); sh4_x86_add_backpatch(xlat_output); OP32(rel) -#define JC_exit(rel) OP(0x0F); OP(0x82); sh4_x86_add_backpatch(xlat_output); OP32(rel) -#define JO_exit(rel) OP(0x0F); OP(0x80); sh4_x86_add_backpatch(xlat_output); OP32(rel) -#define JNE_exit(rel) OP(0x0F); OP(0x85); sh4_x86_add_backpatch(xlat_output); OP32(rel) -#define JNA_exit(rel) OP(0x0F); OP(0x86); sh4_x86_add_backpatch(xlat_output); OP32(rel) -#define JNAE_exit(rel) OP(0x0F);OP(0x82); sh4_x86_add_backpatch(xlat_output); OP32(rel) -#define JNG_exit(rel) OP(0x0F); OP(0x8E); sh4_x86_add_backpatch(xlat_output); OP32(rel) -#define JNGE_exit(rel) OP(0x0F);OP(0x8C); sh4_x86_add_backpatch(xlat_output); OP32(rel) -#define JNC_exit(rel) OP(0x0F); OP(0x83); sh4_x86_add_backpatch(xlat_output); OP32(rel) -#define JNO_exit(rel) OP(0x0F); OP(0x81); sh4_x86_add_backpatch(xlat_output); OP32(rel) +/* 32-bit long forms w/ backpatching to an exception routine */ +#define JMP_exc(exc) OP(0xE9); sh4_x86_add_backpatch(xlat_output, pc, exc); OP32(0) +#define JE_exc(exc) OP(0x0F); OP(0x84); sh4_x86_add_backpatch(xlat_output, pc, exc); OP32(0) +#define JA_exc(exc) OP(0x0F); OP(0x87); sh4_x86_add_backpatch(xlat_output, pc, exc); OP32(0) +#define JAE_exc(exc) OP(0x0F); OP(0x83); sh4_x86_add_backpatch(xlat_output, pc, exc); OP32(0) +#define JG_exc(exc) OP(0x0F); OP(0x8F); sh4_x86_add_backpatch(xlat_output, pc, exc); OP32(0) +#define JGE_exc(exc) OP(0x0F); OP(0x8D); sh4_x86_add_backpatch(xlat_output, pc, exc); OP32(0) +#define JC_exc(exc) OP(0x0F); OP(0x82); sh4_x86_add_backpatch(xlat_output, pc, exc); OP32(0) +#define JO_exc(exc) OP(0x0F); OP(0x80); sh4_x86_add_backpatch(xlat_output, pc, exc); OP32(0) +#define JNE_exc(exc) OP(0x0F); OP(0x85); sh4_x86_add_backpatch(xlat_output, pc, exc); OP32(0) +#define JNA_exc(exc) OP(0x0F); OP(0x86); sh4_x86_add_backpatch(xlat_output, pc, exc); OP32(0) +#define JNAE_exc(exc) OP(0x0F);OP(0x82); sh4_x86_add_backpatch(xlat_output, pc, exc); OP32(0) +#define JNG_exc(exc) OP(0x0F); OP(0x8E); sh4_x86_add_backpatch(xlat_output, pc, exc); OP32(0) +#define JNGE_exc(exc) OP(0x0F);OP(0x8C); sh4_x86_add_backpatch(xlat_output, pc, exc); OP32(0) +#define JNC_exc(exc) OP(0x0F); OP(0x83); sh4_x86_add_backpatch(xlat_output, pc, exc); OP32(0) +#define JNO_exc(exc) OP(0x0F); OP(0x81); sh4_x86_add_backpatch(xlat_output, pc, exc); OP32(0) /* Conditional moves ebp-rel */