Search
lxdream.org :: lxdream/src/sh4/mmux86.c :: diff
lxdream 0.9.1
released Jun 29
Download Now
filename src/sh4/mmux86.c
changeset 991:60c7fab9c880
prev975:007bf7eb944f
next995:eb9d43e8aa08
author nkeynes
date Wed Mar 04 23:12:21 2009 +0000 (13 years ago)
permissions -rw-r--r--
last change Move xltcache to xlat/ src directory
Commit new and improved x86 opcode file - cleaned up and added support for amd64 extended registers
file annotate diff log raw
1.1 --- a/src/sh4/mmux86.c Mon Jan 26 07:26:24 2009 +0000
1.2 +++ b/src/sh4/mmux86.c Wed Mar 04 23:12:21 2009 +0000
1.3 @@ -22,19 +22,24 @@
1.4 #include "sh4/sh4mmio.h"
1.5 #include "sh4/sh4trans.h"
1.6 #include "sh4/mmu.h"
1.7 -#include "sh4/x86op.h"
1.8 +#include "xlat/x86/x86op.h"
1.9
1.10 #if SIZEOF_VOID_P == 8
1.11 -#define ARG1 R_EDI
1.12 -#define ARG2 R_ESI
1.13 -#define DECODE() \
1.14 - MOV_imm64_r32((uintptr_t)addr_space, R_EAX); /* movq ptr, %rax */ \
1.15 - REXW(); OP(0x8B); OP(0x0C); OP(0xC8) /* movq [%rax + %rcx*8], %rcx */
1.16 +#define ARG1 REG_RDI
1.17 +#define ARG2 REG_RSI
1.18 +#define XLAT(addr_space, reg) \
1.19 + MOVQ_imm64_r64( (uintptr_t)addr_space, REG_RAX ); \
1.20 + MOVP_sib_rptr( 3, reg, REG_RAX, 0, reg );
1.21 +#define ADDP_imms_ptr(imm,p) \
1.22 + MOVQ_imm64_r64((uintptr_t)p, REG_EAX ); \
1.23 + ADDL_imms_r32disp(imm, REG_EAX, 0);
1.24 #else
1.25 -#define ARG1 R_EAX
1.26 +#define ARG1 REG_EAX
1.27 #define ARG2 R_EDX
1.28 -#define DECODE() \
1.29 - MOV_r32disp32x4_r32( R_ECX, (uintptr_t)addr_space, R_ECX );
1.30 +#define XLAT(addr_space, reg) \
1.31 + MOVP_sib_rptr( 2, reg, -1, (uintptr_t)addr_space, reg );
1.32 +#define ADDP_imms_ptr(imm,p) \
1.33 + ADDL_imms_r32disp(imm, -1, (uintptr_t)p);
1.34 #endif
1.35
1.36 void mmu_utlb_init_vtable( struct utlb_entry *ent, struct utlb_page_entry *page, gboolean writable )
1.37 @@ -61,23 +66,18 @@
1.38 for( i=0; i<10; i+= inc, fn += inc, out += inc ) {
1.39 *out = xlat_output;
1.40 if( i != 9 ) { /* read_byte_for_write doesn't increment mmu_urc, everything else does */
1.41 -#if SIZEOF_VOID_P == 8
1.42 - MOV_imm64_r32((uintptr_t)&mmu_urc, R_EAX );
1.43 - OP(0x83); OP(0x00); OP(0x01); // ADD #1, [RAX]
1.44 -#else
1.45 - OP(0x83); MODRM_r32_disp32(0, (uintptr_t)&mmu_urc); OP(0x01); // ADD #1, mmu_urc
1.46 -#endif
1.47 + ADDP_imms_ptr(1, &mmu_urc);
1.48 }
1.49 - ADD_imm32_r32( ppn-vpn, ARG1 ); // 6
1.50 + ADDL_imms_r32( ppn-vpn, ARG1 ); // 6
1.51 if( ent->mask >= 0xFFFFF000 ) {
1.52 // Maps to a single page, so jump directly there
1.53 int rel = (*fn - xlat_output);
1.54 - JMP_rel( rel ); // 5
1.55 + JMP_prerel( rel ); // 5
1.56 } else {
1.57 - MOV_r32_r32( ARG1, R_ECX ); // 2
1.58 - SHR_imm8_r32( 12, R_ECX ); // 3
1.59 - DECODE(); // 14
1.60 - JMP_r32disp8(R_ECX, (((uintptr_t)out) - ((uintptr_t)&page->fn)) ); // 3
1.61 + MOVL_r32_r32( ARG1, REG_ECX ); // 2
1.62 + SHRL_imm_r32( 12, REG_ECX ); // 3
1.63 + XLAT(addr_space, REG_ECX); // 14
1.64 + JMP_r32disp(REG_ECX, (((uintptr_t)out) - ((uintptr_t)&page->fn)) ); // 3
1.65 }
1.66 }
1.67
1.68 @@ -95,15 +95,10 @@
1.69 memcpy( page, &p4_region_storequeue, sizeof(struct mem_region_fn) );
1.70
1.71 page->fn.prefetch = (mem_prefetch_fn_t)xlat_output;
1.72 -#if SIZEOF_VOID_P == 8
1.73 - MOV_imm64_r32((uintptr_t)&mmu_urc, R_EAX );
1.74 - OP(0x83); OP(0x00); OP(0x01); // ADD #1, [RAX]
1.75 -#else
1.76 - OP(0x83); MODRM_r32_disp32(0, (uintptr_t)&mmu_urc); OP(0x01); // ADD #1, mmu_urc
1.77 -#endif
1.78 - ADD_imm32_r32( ppn-vpn, ARG1 );
1.79 + ADDP_imms_ptr(1, &mmu_urc);
1.80 + ADDL_imms_r32( ppn-vpn, ARG1 );
1.81 int rel = ((uint8_t *)ccn_storequeue_prefetch_tlb) - xlat_output;
1.82 - JMP_rel( rel );
1.83 + JMP_prerel( rel );
1.84 }
1.85
1.86 void mmu_utlb_1k_init_vtable( struct utlb_1k_entry *entry )
1.87 @@ -114,31 +109,21 @@
1.88
1.89 for( i=0; i<9; i++, out++ ) {
1.90 *out = xlat_output;
1.91 - MOV_r32_r32( ARG1, R_ECX );
1.92 - SHR_imm8_r32( 10, R_ECX );
1.93 - AND_imm8s_r32( 0x3, R_ECX );
1.94 -#if SIZEOF_VOID_P == 8
1.95 - MOV_imm64_r32( (uintptr_t)&entry->subpages[0], R_EAX );
1.96 - REXW(); OP(0x8B); OP(0x0C); OP(0xC8); /* movq [%rax + %rcx*8], %rcx */
1.97 -#else
1.98 - MOV_r32disp32x4_r32( R_ECX, ((uintptr_t)&entry->subpages[0]), R_ECX );
1.99 -#endif
1.100 - JMP_r32disp8(R_ECX, (((uintptr_t)out) - ((uintptr_t)&entry->fn)) ); // 3
1.101 + MOVL_r32_r32( ARG1, REG_ECX );
1.102 + SHRL_imm_r32( 10, REG_ECX );
1.103 + ANDL_imms_r32( 0x3, REG_ECX );
1.104 + XLAT( (uintptr_t)&entry->subpages[0], REG_ECX );
1.105 + JMP_r32disp(REG_ECX, (((uintptr_t)out) - ((uintptr_t)&entry->fn)) ); // 3
1.106 }
1.107
1.108 out = (uint8_t **)&entry->user_fn;
1.109 for( i=0; i<9; i++, out++ ) {
1.110 *out = xlat_output;
1.111 - MOV_r32_r32( ARG1, R_ECX );
1.112 - SHR_imm8_r32( 10, R_ECX );
1.113 - AND_imm8s_r32( 0x3, R_ECX );
1.114 -#if SIZEOF_VOID_P == 8
1.115 - MOV_imm64_r32( (uintptr_t)&entry->user_subpages[0], R_EAX );
1.116 - REXW(); OP(0x8B); OP(0x0C); OP(0xC8); /* movq [%rax + %rcx*8], %rcx */
1.117 -#else
1.118 - MOV_r32disp32x4_r32( R_ECX, ((uintptr_t)&entry->user_subpages[0]), R_ECX );
1.119 -#endif
1.120 - JMP_r32disp8(R_ECX, (((uintptr_t)out) - ((uintptr_t)&entry->user_fn)) ); // 3
1.121 + MOVL_r32_r32( ARG1, REG_ECX );
1.122 + SHRL_imm_r32( 10, REG_ECX );
1.123 + ANDL_imms_r32( 0x3, REG_ECX );
1.124 + XLAT( (uintptr_t)&entry->user_subpages[0], REG_ECX );
1.125 + JMP_r32disp(REG_ECX, (((uintptr_t)out) - ((uintptr_t)&entry->user_fn)) ); // 3
1.126 }
1.127
1.128 }
.