Search
lxdream.org :: lxdream/src/sh4/ia64abi.h :: diff
lxdream 0.9.1
released Jun 29
Download Now
filename src/sh4/ia64abi.h
changeset 991:60c7fab9c880
prev957:0f6131f6cc3a
author nkeynes
date Wed Mar 04 23:12:21 2009 +0000 (13 years ago)
permissions -rw-r--r--
last change Move xltcache to xlat/ src directory
Commit new and improved x86 opcode file - cleaned up and added support for amd64 extended registers
file annotate diff log raw
1.1 --- a/src/sh4/ia64abi.h Wed Jan 14 00:16:44 2009 +0000
1.2 +++ b/src/sh4/ia64abi.h Wed Mar 04 23:12:21 2009 +0000
1.3 @@ -24,13 +24,13 @@
1.4
1.5 #define load_ptr( reg, ptr ) load_imm64( reg, (uint64_t)ptr );
1.6
1.7 -static inline decode_address( int addr_reg )
1.8 +static inline void decode_address( int addr_reg )
1.9 {
1.10 uintptr_t base = (sh4r.xlat_sh4_mode&SR_MD) ? (uintptr_t)sh4_address_space : (uintptr_t)sh4_user_address_space;
1.11 - MOV_r32_r32( addr_reg, R_ECX );
1.12 - SHR_imm8_r32( 12, R_ECX );
1.13 - load_ptr( R_EDI, base );
1.14 - REXW(); OP(0x8B); OP(0x0C); OP(0xCF); // mov.q [%rdi + %rcx*8], %rcx
1.15 + MOVL_r32_r32( addr_reg, REG_RCX );
1.16 + SHRL_imm_r32( 12, REG_RCX );
1.17 + MOVP_immptr_rptr( base, REG_RDI );
1.18 + MOVP_sib_rptr(3, REG_RCX, REG_RDI, 0, REG_RCX);
1.19 }
1.20
1.21 /**
1.22 @@ -41,58 +41,59 @@
1.23 #define CALL_FUNC0_SIZE 12
1.24 static inline void call_func0( void *ptr )
1.25 {
1.26 - load_imm64(R_EAX, (uint64_t)ptr);
1.27 - CALL_r32(R_EAX);
1.28 + MOVQ_imm64_r64((uint64_t)ptr, REG_RAX);
1.29 + CALL_r32(REG_RAX);
1.30 }
1.31
1.32 -#define CALL_FUNC1_SIZE 14
1.33 static inline void call_func1( void *ptr, int arg1 )
1.34 {
1.35 - REXW(); MOV_r32_r32(arg1, R_EDI);
1.36 + MOVQ_r64_r64(arg1, REG_RDI);
1.37 call_func0(ptr);
1.38 }
1.39
1.40 static inline void call_func1_exc( void *ptr, int arg1, int pc )
1.41 {
1.42 - REXW(); MOV_r32_r32(arg1, R_EDI);
1.43 - load_exc_backpatch(R_ESI);
1.44 + MOVQ_r64_r64(arg1, REG_RDI);
1.45 + MOVP_immptr_rptr(0, REG_RSI);
1.46 + sh4_x86_add_backpatch( xlat_output, pc, -2 );
1.47 call_func0(ptr);
1.48 }
1.49
1.50 static inline void call_func1_r32disp8( int preg, uint32_t disp8, int arg1 )
1.51 {
1.52 - REXW(); MOV_r32_r32(arg1, R_EDI);
1.53 - CALL_r32disp8(preg, disp8);
1.54 + MOVQ_r64_r64(arg1, REG_RDI);
1.55 + CALL_r32disp(preg, disp8);
1.56 }
1.57
1.58 static inline void call_func1_r32disp8_exc( int preg, uint32_t disp8, int arg1, int pc )
1.59 {
1.60 - REXW(); MOV_r32_r32(arg1, R_EDI);
1.61 - load_exc_backpatch(R_ESI);
1.62 - CALL_r32disp8(preg, disp8);
1.63 + MOVQ_r64_r64(arg1, REG_RDI);
1.64 + MOVP_immptr_rptr(0, REG_RSI);
1.65 + sh4_x86_add_backpatch( xlat_output, pc, -2 );
1.66 + CALL_r32disp(preg, disp8);
1.67 }
1.68
1.69 -#define CALL_FUNC2_SIZE 16
1.70 static inline void call_func2( void *ptr, int arg1, int arg2 )
1.71 {
1.72 - REXW(); MOV_r32_r32(arg1, R_EDI);
1.73 - REXW(); MOV_r32_r32(arg2, R_ESI);
1.74 + MOVQ_r64_r64(arg1, REG_RDI);
1.75 + MOVQ_r64_r64(arg2, REG_RSI);
1.76 call_func0(ptr);
1.77 }
1.78
1.79 static inline void call_func2_r32disp8( int preg, uint32_t disp8, int arg1, int arg2 )
1.80 {
1.81 - REXW(); MOV_r32_r32(arg1, R_EDI);
1.82 - REXW(); MOV_r32_r32(arg2, R_ESI);
1.83 - CALL_r32disp8(preg, disp8);
1.84 + MOVQ_r64_r64(arg1, REG_RDI);
1.85 + MOVQ_r64_r64(arg2, REG_RSI);
1.86 + CALL_r32disp(preg, disp8);
1.87 }
1.88
1.89 static inline void call_func2_r32disp8_exc( int preg, uint32_t disp8, int arg1, int arg2, int pc )
1.90 {
1.91 - REXW(); MOV_r32_r32(arg1, R_EDI);
1.92 - REXW(); MOV_r32_r32(arg2, R_ESI);
1.93 - load_exc_backpatch(R_EDX);
1.94 - CALL_r32disp8(preg, disp8);
1.95 + MOVQ_r64_r64(arg1, REG_RDI);
1.96 + MOVQ_r64_r64(arg2, REG_RSI);
1.97 + MOVP_immptr_rptr(0, REG_RDX);
1.98 + sh4_x86_add_backpatch( xlat_output, pc, -2 );
1.99 + CALL_r32disp(preg, disp8);
1.100 }
1.101
1.102
1.103 @@ -103,16 +104,16 @@
1.104 */
1.105 void enter_block( )
1.106 {
1.107 - PUSH_r32(R_EBP);
1.108 - load_ptr( R_EBP, ((uint8_t *)&sh4r) + 128 );
1.109 + PUSH_r32(REG_RBP);
1.110 + load_ptr( REG_RBP, ((uint8_t *)&sh4r) + 128 );
1.111 // Minimum aligned allocation is 16 bytes
1.112 - REXW(); SUB_imm8s_r32( 16, R_ESP );
1.113 + SUBQ_imms_r64( 16, REG_RSP );
1.114 }
1.115
1.116 static inline void exit_block( )
1.117 {
1.118 - REXW(); ADD_imm8s_r32( 16, R_ESP );
1.119 - POP_r32(R_EBP);
1.120 + ADDQ_imms_r64( 16, REG_RSP );
1.121 + POP_r32(REG_RBP);
1.122 RET();
1.123 }
1.124
1.125 @@ -121,13 +122,13 @@
1.126 */
1.127 void exit_block_pcset( sh4addr_t pc )
1.128 {
1.129 - load_imm32( R_ECX, ((pc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
1.130 - ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) ); // 6
1.131 - load_spreg( R_EAX, R_PC );
1.132 + load_imm32( REG_ECX, ((pc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
1.133 + ADDL_r32_rbpdisp( REG_ECX, REG_OFFSET(slice_cycle) ); // 6
1.134 + load_spreg( REG_RAX, R_PC );
1.135 if( sh4_x86.tlb_on ) {
1.136 - call_func1(xlat_get_code_by_vma,R_EAX);
1.137 + call_func1(xlat_get_code_by_vma,REG_RAX);
1.138 } else {
1.139 - call_func1(xlat_get_code,R_EAX);
1.140 + call_func1(xlat_get_code,REG_RAX);
1.141 }
1.142 exit_block();
1.143 }
1.144 @@ -137,14 +138,14 @@
1.145 */
1.146 void exit_block_newpcset( sh4addr_t pc )
1.147 {
1.148 - load_imm32( R_ECX, ((pc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
1.149 - ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) ); // 6
1.150 - load_spreg( R_EAX, R_NEW_PC );
1.151 - store_spreg( R_EAX, R_PC );
1.152 + load_imm32( REG_ECX, ((pc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
1.153 + ADDL_r32_rbpdisp( REG_ECX, REG_OFFSET(slice_cycle) ); // 6
1.154 + load_spreg( REG_RAX, R_NEW_PC );
1.155 + store_spreg( REG_RAX, R_PC );
1.156 if( sh4_x86.tlb_on ) {
1.157 - call_func1(xlat_get_code_by_vma,R_EAX);
1.158 + call_func1(xlat_get_code_by_vma,REG_RAX);
1.159 } else {
1.160 - call_func1(xlat_get_code,R_EAX);
1.161 + call_func1(xlat_get_code,REG_RAX);
1.162 }
1.163 exit_block();
1.164 }
1.165 @@ -155,18 +156,18 @@
1.166 */
1.167 void exit_block_abs( sh4addr_t pc, sh4addr_t endpc )
1.168 {
1.169 - load_imm32( R_ECX, pc ); // 5
1.170 - store_spreg( R_ECX, REG_OFFSET(pc) ); // 3
1.171 + load_imm32( REG_RCX, pc ); // 5
1.172 + store_spreg( REG_RCX, REG_OFFSET(pc) ); // 3
1.173 if( IS_IN_ICACHE(pc) ) {
1.174 - REXW(); MOV_moff32_EAX( xlat_get_lut_entry(pc) );
1.175 - REXW(); AND_imm8s_r32( 0xFC, R_EAX ); // 4
1.176 + MOVP_moffptr_rax( xlat_get_lut_entry(pc) );
1.177 + ANDQ_imms_r64( 0xFFFFFFFC, REG_RAX ); // 4
1.178 } else if( sh4_x86.tlb_on ) {
1.179 - call_func1(xlat_get_code_by_vma, R_ECX);
1.180 + call_func1(xlat_get_code_by_vma, REG_RCX);
1.181 } else {
1.182 - call_func1(xlat_get_code,R_ECX);
1.183 + call_func1(xlat_get_code,REG_RCX);
1.184 }
1.185 - load_imm32( R_ECX, ((endpc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
1.186 - ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) ); // 6
1.187 + load_imm32( REG_ECX, ((endpc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
1.188 + ADDL_r32_rbpdisp( REG_ECX, REG_OFFSET(slice_cycle) ); // 6
1.189 exit_block();
1.190 }
1.191
1.192 @@ -178,19 +179,19 @@
1.193 */
1.194 void exit_block_rel( sh4addr_t pc, sh4addr_t endpc )
1.195 {
1.196 - load_imm32( R_ECX, pc - sh4_x86.block_start_pc ); // 5
1.197 - ADD_sh4r_r32( R_PC, R_ECX );
1.198 - store_spreg( R_ECX, REG_OFFSET(pc) ); // 3
1.199 + load_imm32( REG_ECX, pc - sh4_x86.block_start_pc ); // 5
1.200 + ADDL_rbpdisp_r32( R_PC, REG_ECX );
1.201 + store_spreg( REG_ECX, REG_OFFSET(pc) ); // 3
1.202 if( IS_IN_ICACHE(pc) ) {
1.203 - REXW(); MOV_moff32_EAX( xlat_get_lut_entry(GET_ICACHE_PHYS(pc)) ); // 5
1.204 - REXW(); AND_imm8s_r32( 0xFC, R_EAX ); // 4
1.205 + MOVP_moffptr_rax( xlat_get_lut_entry(GET_ICACHE_PHYS(pc)) ); // 5
1.206 + ANDQ_imms_r64( 0xFFFFFFFC, REG_RAX ); // 4
1.207 } else if( sh4_x86.tlb_on ) {
1.208 - call_func1(xlat_get_code_by_vma,R_ECX);
1.209 + call_func1(xlat_get_code_by_vma,REG_RCX);
1.210 } else {
1.211 - call_func1(xlat_get_code,R_ECX);
1.212 + call_func1(xlat_get_code,REG_RCX);
1.213 }
1.214 - load_imm32( R_ECX, ((endpc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
1.215 - ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) ); // 6
1.216 + load_imm32( REG_ECX, ((endpc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
1.217 + ADDL_r32_rbpdisp( REG_ECX, REG_OFFSET(slice_cycle) ); // 6
1.218 exit_block();
1.219 }
1.220
1.221 @@ -199,18 +200,18 @@
1.222 */
1.223 void exit_block_exc( int code, sh4addr_t pc )
1.224 {
1.225 - load_imm32( R_ECX, pc - sh4_x86.block_start_pc ); // 5
1.226 - ADD_r32_sh4r( R_ECX, R_PC );
1.227 - load_imm32( R_ECX, ((pc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
1.228 - ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) ); // 6
1.229 - load_imm32( R_EAX, code );
1.230 - call_func1( sh4_raise_exception, R_EAX );
1.231 + load_imm32( REG_ECX, pc - sh4_x86.block_start_pc ); // 5
1.232 + ADDL_r32_rbpdisp( REG_ECX, R_PC );
1.233 + load_imm32( REG_ECX, ((pc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
1.234 + ADDL_r32_rbpdisp( REG_ECX, REG_OFFSET(slice_cycle) ); // 6
1.235 + load_imm32( REG_RAX, code );
1.236 + call_func1( sh4_raise_exception, REG_RAX );
1.237
1.238 - load_spreg( R_EAX, R_PC );
1.239 + load_spreg( REG_RAX, R_PC );
1.240 if( sh4_x86.tlb_on ) {
1.241 - call_func1(xlat_get_code_by_vma,R_EAX);
1.242 + call_func1(xlat_get_code_by_vma,REG_RAX);
1.243 } else {
1.244 - call_func1(xlat_get_code,R_EAX);
1.245 + call_func1(xlat_get_code,REG_RAX);
1.246 }
1.247
1.248 exit_block();
1.249 @@ -229,31 +230,31 @@
1.250 unsigned int i;
1.251 // Raise exception
1.252 uint8_t *end_ptr = xlat_output;
1.253 - MOV_r32_r32( R_EDX, R_ECX );
1.254 - ADD_r32_r32( R_EDX, R_ECX );
1.255 - ADD_r32_sh4r( R_ECX, R_PC );
1.256 - MOV_moff32_EAX( &sh4_cpu_period );
1.257 - MUL_r32( R_EDX );
1.258 - ADD_r32_sh4r( R_EAX, REG_OFFSET(slice_cycle) );
1.259 + MOVL_r32_r32( REG_RDX, REG_RCX );
1.260 + ADDL_r32_r32( REG_RDX, REG_RCX );
1.261 + ADDL_r32_rbpdisp( REG_RCX, R_PC );
1.262 + MOVL_moffptr_eax( &sh4_cpu_period );
1.263 + MULL_r32( REG_RDX );
1.264 + ADDL_r32_rbpdisp( REG_RAX, REG_OFFSET(slice_cycle) );
1.265
1.266 call_func0( sh4_raise_exception );
1.267 - load_spreg( R_EAX, R_PC );
1.268 + load_spreg( REG_RAX, R_PC );
1.269 if( sh4_x86.tlb_on ) {
1.270 - call_func1(xlat_get_code_by_vma,R_EAX);
1.271 + call_func1(xlat_get_code_by_vma,REG_RAX);
1.272 } else {
1.273 - call_func1(xlat_get_code,R_EAX);
1.274 + call_func1(xlat_get_code,REG_RAX);
1.275 }
1.276 exit_block();
1.277
1.278 // Exception already raised - just cleanup
1.279 uint8_t *preexc_ptr = xlat_output;
1.280 - MOV_r32_r32( R_EDX, R_ECX );
1.281 - ADD_r32_r32( R_EDX, R_ECX );
1.282 - ADD_r32_sh4r( R_ECX, R_SPC );
1.283 - MOV_moff32_EAX( &sh4_cpu_period );
1.284 - MUL_r32( R_EDX );
1.285 - ADD_r32_sh4r( R_EAX, REG_OFFSET(slice_cycle) );
1.286 - load_spreg( R_EDI, R_PC );
1.287 + MOVL_r32_r32( REG_EDX, REG_ECX );
1.288 + ADDL_r32_r32( REG_EDX, REG_ECX );
1.289 + ADDL_r32_rbpdisp( REG_ECX, R_SPC );
1.290 + MOVL_moffptr_eax( &sh4_cpu_period );
1.291 + MULL_r32( REG_EDX );
1.292 + ADDL_r32_rbpdisp( REG_EAX, REG_OFFSET(slice_cycle) );
1.293 + load_spreg( REG_RDI, R_PC );
1.294 if( sh4_x86.tlb_on ) {
1.295 call_func0(xlat_get_code_by_vma);
1.296 } else {
1.297 @@ -269,15 +270,15 @@
1.298 } else {
1.299 *fixup_addr = xlat_output - (uint8_t *)&xlat_current_block->code[sh4_x86.backpatch_list[i].fixup_offset] - 4;
1.300 }
1.301 - load_imm32( R_EDX, sh4_x86.backpatch_list[i].fixup_icount );
1.302 + load_imm32( REG_RDX, sh4_x86.backpatch_list[i].fixup_icount );
1.303 int rel = preexc_ptr - xlat_output;
1.304 - JMP_rel(rel);
1.305 + JMP_prerel(rel);
1.306 } else {
1.307 *fixup_addr = xlat_output - (uint8_t *)&xlat_current_block->code[sh4_x86.backpatch_list[i].fixup_offset] - 4;
1.308 - load_imm32( R_EDI, sh4_x86.backpatch_list[i].exc_code );
1.309 - load_imm32( R_EDX, sh4_x86.backpatch_list[i].fixup_icount );
1.310 + load_imm32( REG_RDI, sh4_x86.backpatch_list[i].exc_code );
1.311 + load_imm32( REG_RDX, sh4_x86.backpatch_list[i].fixup_icount );
1.312 int rel = end_ptr - xlat_output;
1.313 - JMP_rel(rel);
1.314 + JMP_prerel(rel);
1.315 }
1.316 }
1.317 }
.