4 * Provides the implementation for the AMD64 ABI (eg prologue, epilogue, and
7 * Copyright (c) 2007 Nathan Keynes.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
20 #ifndef lxdream_ia64abi_H
21 #define lxdream_ia64abi_H 1
25 #define load_ptr( reg, ptr ) load_imm64( reg, (uint64_t)ptr );
27 static inline decode_address( int addr_reg )
29 MOV_r32_r32( addr_reg, R_ECX );
30 SHR_imm8_r32( 12, R_ECX );
31 load_ptr( R_EDI, sh4_address_space );
32 REXW(); OP(0x8B); OP(0x0C); OP(0xCF); // mov.q [%rdi + %rcx*8], %rcx
36 * Note: clobbers EAX to make the indirect call - this isn't usually
37 * a problem since the callee will usually clobber it anyway.
40 #define CALL_FUNC0_SIZE 12
41 static inline void call_func0( void *ptr )
43 load_imm64(R_EAX, (uint64_t)ptr);
47 #define CALL_FUNC1_SIZE 14
48 static inline void call_func1( void *ptr, int arg1 )
50 REXW(); MOV_r32_r32(arg1, R_EDI);
54 static inline void call_func1_exc( void *ptr, int arg1, int pc )
56 REXW(); MOV_r32_r32(arg1, R_EDI);
57 load_exc_backpatch(R_ESI);
61 static inline void call_func1_r32disp8( int preg, uint32_t disp8, int arg1 )
63 REXW(); MOV_r32_r32(arg1, R_EDI);
64 CALL_r32disp8(preg, disp8);
67 #define CALL_FUNC2_SIZE 16
68 static inline void call_func2( void *ptr, int arg1, int arg2 )
70 REXW(); MOV_r32_r32(arg1, R_EDI);
71 REXW(); MOV_r32_r32(arg2, R_ESI);
75 static inline void call_func2_r32disp8( int preg, uint32_t disp8, int arg1, int arg2 )
77 REXW(); MOV_r32_r32(arg1, R_EDI);
78 REXW(); MOV_r32_r32(arg2, R_ESI);
79 CALL_r32disp8(preg, disp8);
83 #define MEM_WRITE_DOUBLE_SIZE 35
85 * Write a double (64-bit) value into memory, with the first word in arg2a, and
88 static inline void MEM_WRITE_DOUBLE( int addr, int arg2a, int arg2b )
92 call_func2(sh4_write_long, addr, arg2a);
95 ADD_imm8s_r32(4, R_EDI);
96 call_func0(sh4_write_long);
99 #define MEM_READ_DOUBLE_SIZE 43
101 * Read a double (64-bit) value from memory, writing the first word into arg2a
102 * and the second into arg2b. The addr must not be in EAX
104 static inline void MEM_READ_DOUBLE( int addr, int arg2a, int arg2b )
106 REXW(); SUB_imm8s_r32( 8, R_ESP );
108 call_func1(sh4_read_long, addr);
111 ADD_imm8s_r32(4, R_EDI);
112 call_func0(sh4_read_long);
113 MOV_r32_r32(R_EAX, arg2b);
115 REXW(); ADD_imm8s_r32( 8, R_ESP );
120 * Emit the 'start of block' assembly. Sets up the stack frame and save
126 load_ptr( R_EBP, ((uint8_t *)&sh4r) + 128 );
127 // Minimum aligned allocation is 16 bytes
128 REXW(); SUB_imm8s_r32( 16, R_ESP );
131 static inline void exit_block( )
133 REXW(); ADD_imm8s_r32( 16, R_ESP );
139 * Exit the block with sh4r.pc already written
141 void exit_block_pcset( sh4addr_t pc )
143 load_imm32( R_ECX, ((pc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
144 ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) ); // 6
145 load_spreg( R_EAX, R_PC );
146 if( sh4_x86.tlb_on ) {
147 call_func1(xlat_get_code_by_vma,R_EAX);
149 call_func1(xlat_get_code,R_EAX);
155 * Exit the block with sh4r.new_pc written with the target address
157 void exit_block_newpcset( sh4addr_t pc )
159 load_imm32( R_ECX, ((pc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
160 ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) ); // 6
161 load_spreg( R_EAX, R_NEW_PC );
162 store_spreg( R_EAX, R_PC );
163 if( sh4_x86.tlb_on ) {
164 call_func1(xlat_get_code_by_vma,R_EAX);
166 call_func1(xlat_get_code,R_EAX);
171 #define EXIT_BLOCK_SIZE(pc) (25 + (IS_IN_ICACHE(pc)?10:CALL_FUNC1_SIZE))
173 * Exit the block to an absolute PC
175 void exit_block_abs( sh4addr_t pc, sh4addr_t endpc )
177 load_imm32( R_ECX, pc ); // 5
178 store_spreg( R_ECX, REG_OFFSET(pc) ); // 3
179 if( IS_IN_ICACHE(pc) ) {
180 REXW(); MOV_moff32_EAX( xlat_get_lut_entry(pc) );
181 REXW(); AND_imm8s_r32( 0xFC, R_EAX ); // 4
182 } else if( sh4_x86.tlb_on ) {
183 call_func1(xlat_get_code_by_vma, R_ECX);
185 call_func1(xlat_get_code,R_ECX);
187 load_imm32( R_ECX, ((endpc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
188 ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) ); // 6
193 #define EXIT_BLOCK_REL_SIZE(pc) (28 + (IS_IN_ICACHE(pc)?10:CALL_FUNC1_SIZE))
196 * Exit the block to a relative PC
198 void exit_block_rel( sh4addr_t pc, sh4addr_t endpc )
200 load_imm32( R_ECX, pc - sh4_x86.block_start_pc ); // 5
201 ADD_sh4r_r32( R_PC, R_ECX );
202 store_spreg( R_ECX, REG_OFFSET(pc) ); // 3
203 if( IS_IN_ICACHE(pc) ) {
204 REXW(); MOV_moff32_EAX( xlat_get_lut_entry(GET_ICACHE_PHYS(pc)) ); // 5
205 REXW(); AND_imm8s_r32( 0xFC, R_EAX ); // 4
206 } else if( sh4_x86.tlb_on ) {
207 call_func1(xlat_get_code_by_vma,R_ECX);
209 call_func1(xlat_get_code,R_ECX);
211 load_imm32( R_ECX, ((endpc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
212 ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) ); // 6
217 * Write the block trailer (exception handling block)
219 void sh4_translate_end_block( sh4addr_t pc ) {
220 if( sh4_x86.branch_taken == FALSE ) {
221 // Didn't exit unconditionally already, so write the termination here
222 exit_block_rel( pc, pc );
224 if( sh4_x86.backpatch_posn != 0 ) {
227 uint8_t *end_ptr = xlat_output;
228 MOV_r32_r32( R_EDX, R_ECX );
229 ADD_r32_r32( R_EDX, R_ECX );
230 ADD_r32_sh4r( R_ECX, R_PC );
231 MOV_moff32_EAX( &sh4_cpu_period );
233 ADD_r32_sh4r( R_EAX, REG_OFFSET(slice_cycle) );
235 call_func0( sh4_raise_exception );
236 load_spreg( R_EAX, R_PC );
237 if( sh4_x86.tlb_on ) {
238 call_func1(xlat_get_code_by_vma,R_EAX);
240 call_func1(xlat_get_code,R_EAX);
244 // Exception already raised - just cleanup
245 uint8_t *preexc_ptr = xlat_output;
246 MOV_r32_r32( R_EDX, R_ECX );
247 ADD_r32_r32( R_EDX, R_ECX );
248 ADD_r32_sh4r( R_ECX, R_SPC );
249 MOV_moff32_EAX( &sh4_cpu_period );
251 ADD_r32_sh4r( R_EAX, REG_OFFSET(slice_cycle) );
252 load_spreg( R_EDI, R_PC );
253 if( sh4_x86.tlb_on ) {
254 call_func0(xlat_get_code_by_vma);
256 call_func0(xlat_get_code);
260 for( i=0; i< sh4_x86.backpatch_posn; i++ ) {
261 uint32_t *fixup_addr = (uint32_t *)&xlat_current_block->code[sh4_x86.backpatch_list[i].fixup_offset];
262 if( sh4_x86.backpatch_list[i].exc_code < 0 ) {
263 if( sh4_x86.backpatch_list[i].exc_code == -2 ) {
264 *((uintptr_t *)fixup_addr) = (uintptr_t)xlat_output;
266 *fixup_addr = xlat_output - (uint8_t *)&xlat_current_block->code[sh4_x86.backpatch_list[i].fixup_offset] - 4;
268 load_imm32( R_EDX, sh4_x86.backpatch_list[i].fixup_icount );
269 int rel = preexc_ptr - xlat_output;
272 *fixup_addr = xlat_output - (uint8_t *)&xlat_current_block->code[sh4_x86.backpatch_list[i].fixup_offset] - 4;
273 load_imm32( R_EDI, sh4_x86.backpatch_list[i].exc_code );
274 load_imm32( R_EDX, sh4_x86.backpatch_list[i].fixup_icount );
275 int rel = end_ptr - xlat_output;
283 uintptr_t block_start;
288 _Unwind_Reason_Code xlat_check_frame( struct _Unwind_Context *context, void *arg )
290 struct UnwindInfo *info = arg;
291 void *pc = (void *)_Unwind_GetIP(context);
292 if( ((uintptr_t)pc) >= info->block_start && ((uintptr_t)pc) < info->block_end ) {
294 return _URC_NORMAL_STOP;
297 return _URC_NO_REASON;
300 void *xlat_get_native_pc( void *code, uint32_t code_size )
302 struct _Unwind_Exception exc;
303 struct UnwindInfo info;
306 info.block_start = (uintptr_t)code;
307 info.block_end = info.block_start + code_size;
309 _Unwind_Backtrace( xlat_check_frame, &info );
313 #endif /* !lxdream_ia64abi_H */
.