4 * Provides the implementation for the AMD64 ABI (eg prologue, epilogue, and
7 * Copyright (c) 2007 Nathan Keynes.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
20 #ifndef lxdream_ia64abi_H
21 #define lxdream_ia64abi_H 1
25 #define load_ptr( reg, ptr ) load_imm64( reg, (uint64_t)ptr );
27 static inline decode_address( int addr_reg )
29 uintptr_t base = (sh4r.xlat_sh4_mode&SR_MD) ? (uintptr_t)sh4_address_space : (uintptr_t)sh4_user_address_space;
30 MOV_r32_r32( addr_reg, R_ECX );
31 SHR_imm8_r32( 12, R_ECX );
32 load_ptr( R_EDI, base );
33 REXW(); OP(0x8B); OP(0x0C); OP(0xCF); // mov.q [%rdi + %rcx*8], %rcx
37 * Note: clobbers EAX to make the indirect call - this isn't usually
38 * a problem since the callee will usually clobber it anyway.
41 #define CALL_FUNC0_SIZE 12
42 static inline void call_func0( void *ptr )
44 load_imm64(R_EAX, (uint64_t)ptr);
48 #define CALL_FUNC1_SIZE 14
49 static inline void call_func1( void *ptr, int arg1 )
51 REXW(); MOV_r32_r32(arg1, R_EDI);
55 static inline void call_func1_exc( void *ptr, int arg1, int pc )
57 REXW(); MOV_r32_r32(arg1, R_EDI);
58 load_exc_backpatch(R_ESI);
62 static inline void call_func1_r32disp8( int preg, uint32_t disp8, int arg1 )
64 REXW(); MOV_r32_r32(arg1, R_EDI);
65 CALL_r32disp8(preg, disp8);
68 static inline void call_func1_r32disp8_exc( int preg, uint32_t disp8, int arg1, int pc )
70 REXW(); MOV_r32_r32(arg1, R_EDI);
71 load_exc_backpatch(R_ESI);
72 CALL_r32disp8(preg, disp8);
75 #define CALL_FUNC2_SIZE 16
76 static inline void call_func2( void *ptr, int arg1, int arg2 )
78 REXW(); MOV_r32_r32(arg1, R_EDI);
79 REXW(); MOV_r32_r32(arg2, R_ESI);
83 static inline void call_func2_r32disp8( int preg, uint32_t disp8, int arg1, int arg2 )
85 REXW(); MOV_r32_r32(arg1, R_EDI);
86 REXW(); MOV_r32_r32(arg2, R_ESI);
87 CALL_r32disp8(preg, disp8);
90 static inline void call_func2_r32disp8_exc( int preg, uint32_t disp8, int arg1, int arg2, int pc )
92 REXW(); MOV_r32_r32(arg1, R_EDI);
93 REXW(); MOV_r32_r32(arg2, R_ESI);
94 load_exc_backpatch(R_EDX);
95 CALL_r32disp8(preg, disp8);
101 * Emit the 'start of block' assembly. Sets up the stack frame and save
107 load_ptr( R_EBP, ((uint8_t *)&sh4r) + 128 );
108 // Minimum aligned allocation is 16 bytes
109 REXW(); SUB_imm8s_r32( 16, R_ESP );
112 static inline void exit_block( )
114 REXW(); ADD_imm8s_r32( 16, R_ESP );
120 * Exit the block with sh4r.pc already written
122 void exit_block_pcset( sh4addr_t pc )
124 load_imm32( R_ECX, ((pc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
125 ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) ); // 6
126 load_spreg( R_EAX, R_PC );
127 if( sh4_x86.tlb_on ) {
128 call_func1(xlat_get_code_by_vma,R_EAX);
130 call_func1(xlat_get_code,R_EAX);
136 * Exit the block with sh4r.new_pc written with the target address
138 void exit_block_newpcset( sh4addr_t pc )
140 load_imm32( R_ECX, ((pc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
141 ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) ); // 6
142 load_spreg( R_EAX, R_NEW_PC );
143 store_spreg( R_EAX, R_PC );
144 if( sh4_x86.tlb_on ) {
145 call_func1(xlat_get_code_by_vma,R_EAX);
147 call_func1(xlat_get_code,R_EAX);
152 #define EXIT_BLOCK_SIZE(pc) (25 + (IS_IN_ICACHE(pc)?10:CALL_FUNC1_SIZE))
154 * Exit the block to an absolute PC
156 void exit_block_abs( sh4addr_t pc, sh4addr_t endpc )
158 load_imm32( R_ECX, pc ); // 5
159 store_spreg( R_ECX, REG_OFFSET(pc) ); // 3
160 if( IS_IN_ICACHE(pc) ) {
161 REXW(); MOV_moff32_EAX( xlat_get_lut_entry(pc) );
162 REXW(); AND_imm8s_r32( 0xFC, R_EAX ); // 4
163 } else if( sh4_x86.tlb_on ) {
164 call_func1(xlat_get_code_by_vma, R_ECX);
166 call_func1(xlat_get_code,R_ECX);
168 load_imm32( R_ECX, ((endpc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
169 ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) ); // 6
174 #define EXIT_BLOCK_REL_SIZE(pc) (28 + (IS_IN_ICACHE(pc)?10:CALL_FUNC1_SIZE))
177 * Exit the block to a relative PC
179 void exit_block_rel( sh4addr_t pc, sh4addr_t endpc )
181 load_imm32( R_ECX, pc - sh4_x86.block_start_pc ); // 5
182 ADD_sh4r_r32( R_PC, R_ECX );
183 store_spreg( R_ECX, REG_OFFSET(pc) ); // 3
184 if( IS_IN_ICACHE(pc) ) {
185 REXW(); MOV_moff32_EAX( xlat_get_lut_entry(GET_ICACHE_PHYS(pc)) ); // 5
186 REXW(); AND_imm8s_r32( 0xFC, R_EAX ); // 4
187 } else if( sh4_x86.tlb_on ) {
188 call_func1(xlat_get_code_by_vma,R_ECX);
190 call_func1(xlat_get_code,R_ECX);
192 load_imm32( R_ECX, ((endpc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
193 ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) ); // 6
198 * Exit unconditionally with a general exception
200 void exit_block_exc( int code, sh4addr_t pc )
202 load_imm32( R_ECX, pc - sh4_x86.block_start_pc ); // 5
203 ADD_r32_sh4r( R_ECX, R_PC );
204 load_imm32( R_ECX, ((pc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
205 ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) ); // 6
206 load_imm32( R_EAX, code );
207 call_func1( sh4_raise_exception, R_EAX );
209 load_spreg( R_EAX, R_PC );
210 if( sh4_x86.tlb_on ) {
211 call_func1(xlat_get_code_by_vma,R_EAX);
213 call_func1(xlat_get_code,R_EAX);
221 * Write the block trailer (exception handling block)
223 void sh4_translate_end_block( sh4addr_t pc ) {
224 if( sh4_x86.branch_taken == FALSE ) {
225 // Didn't exit unconditionally already, so write the termination here
226 exit_block_rel( pc, pc );
228 if( sh4_x86.backpatch_posn != 0 ) {
231 uint8_t *end_ptr = xlat_output;
232 MOV_r32_r32( R_EDX, R_ECX );
233 ADD_r32_r32( R_EDX, R_ECX );
234 ADD_r32_sh4r( R_ECX, R_PC );
235 MOV_moff32_EAX( &sh4_cpu_period );
237 ADD_r32_sh4r( R_EAX, REG_OFFSET(slice_cycle) );
239 call_func0( sh4_raise_exception );
240 load_spreg( R_EAX, R_PC );
241 if( sh4_x86.tlb_on ) {
242 call_func1(xlat_get_code_by_vma,R_EAX);
244 call_func1(xlat_get_code,R_EAX);
248 // Exception already raised - just cleanup
249 uint8_t *preexc_ptr = xlat_output;
250 MOV_r32_r32( R_EDX, R_ECX );
251 ADD_r32_r32( R_EDX, R_ECX );
252 ADD_r32_sh4r( R_ECX, R_SPC );
253 MOV_moff32_EAX( &sh4_cpu_period );
255 ADD_r32_sh4r( R_EAX, REG_OFFSET(slice_cycle) );
256 load_spreg( R_EDI, R_PC );
257 if( sh4_x86.tlb_on ) {
258 call_func0(xlat_get_code_by_vma);
260 call_func0(xlat_get_code);
264 for( i=0; i< sh4_x86.backpatch_posn; i++ ) {
265 uint32_t *fixup_addr = (uint32_t *)&xlat_current_block->code[sh4_x86.backpatch_list[i].fixup_offset];
266 if( sh4_x86.backpatch_list[i].exc_code < 0 ) {
267 if( sh4_x86.backpatch_list[i].exc_code == -2 ) {
268 *((uintptr_t *)fixup_addr) = (uintptr_t)xlat_output;
270 *fixup_addr = xlat_output - (uint8_t *)&xlat_current_block->code[sh4_x86.backpatch_list[i].fixup_offset] - 4;
272 load_imm32( R_EDX, sh4_x86.backpatch_list[i].fixup_icount );
273 int rel = preexc_ptr - xlat_output;
276 *fixup_addr = xlat_output - (uint8_t *)&xlat_current_block->code[sh4_x86.backpatch_list[i].fixup_offset] - 4;
277 load_imm32( R_EDI, sh4_x86.backpatch_list[i].exc_code );
278 load_imm32( R_EDX, sh4_x86.backpatch_list[i].fixup_icount );
279 int rel = end_ptr - xlat_output;
287 uintptr_t block_start;
292 _Unwind_Reason_Code xlat_check_frame( struct _Unwind_Context *context, void *arg )
294 struct UnwindInfo *info = arg;
295 void *pc = (void *)_Unwind_GetIP(context);
296 if( ((uintptr_t)pc) >= info->block_start && ((uintptr_t)pc) < info->block_end ) {
298 return _URC_NORMAL_STOP;
301 return _URC_NO_REASON;
304 void *xlat_get_native_pc( void *code, uint32_t code_size )
306 struct _Unwind_Exception exc;
307 struct UnwindInfo info;
310 info.block_start = (uintptr_t)code;
311 info.block_end = info.block_start + code_size;
313 _Unwind_Backtrace( xlat_check_frame, &info );
317 #endif /* !lxdream_ia64abi_H */
.