4 * Provides the implementation for the AMD64 ABI (eg prologue, epilogue, and
7 * Copyright (c) 2007 Nathan Keynes.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
20 #ifndef lxdream_ia64abi_H
21 #define lxdream_ia64abi_H 1
25 #define load_ptr( reg, ptr ) load_imm64( reg, (uint64_t)ptr );
27 static inline decode_address( int addr_reg )
29 MOV_r32_r32( addr_reg, R_ECX );
30 SHR_imm8_r32( 12, R_ECX );
31 load_ptr( R_EDI, sh4_address_space );
32 REXW(); OP(0x8B); OP(0x0C); OP(0xCF); // mov.q [%rdi + %rcx*8], %rcx
36 * Note: clobbers EAX to make the indirect call - this isn't usually
37 * a problem since the callee will usually clobber it anyway.
40 #define CALL_FUNC0_SIZE 12
41 static inline void call_func0( void *ptr )
43 load_imm64(R_EAX, (uint64_t)ptr);
47 #define CALL_FUNC1_SIZE 14
48 static inline void call_func1( void *ptr, int arg1 )
50 REXW(); MOV_r32_r32(arg1, R_EDI);
54 static inline void call_func1_exc( void *ptr, int arg1, int pc )
56 REXW(); MOV_r32_r32(arg1, R_EDI);
57 load_exc_backpatch(R_ESI);
61 static inline void call_func1_r32disp8( int preg, uint32_t disp8, int arg1 )
63 REXW(); MOV_r32_r32(arg1, R_EDI);
64 CALL_r32disp8(preg, disp8);
67 static inline void call_func1_r32disp8_exc( int preg, uint32_t disp8, int arg1, int pc )
69 REXW(); MOV_r32_r32(arg1, R_EDI);
70 load_exc_backpatch(R_ESI);
71 CALL_r32disp8(preg, disp8);
74 #define CALL_FUNC2_SIZE 16
75 static inline void call_func2( void *ptr, int arg1, int arg2 )
77 REXW(); MOV_r32_r32(arg1, R_EDI);
78 REXW(); MOV_r32_r32(arg2, R_ESI);
82 static inline void call_func2_r32disp8( int preg, uint32_t disp8, int arg1, int arg2 )
84 REXW(); MOV_r32_r32(arg1, R_EDI);
85 REXW(); MOV_r32_r32(arg2, R_ESI);
86 CALL_r32disp8(preg, disp8);
89 static inline void call_func2_r32disp8_exc( int preg, uint32_t disp8, int arg1, int arg2, int pc )
91 REXW(); MOV_r32_r32(arg1, R_EDI);
92 REXW(); MOV_r32_r32(arg2, R_ESI);
93 load_exc_backpatch(R_EDX);
94 CALL_r32disp8(preg, disp8);
100 * Emit the 'start of block' assembly. Sets up the stack frame and save
106 load_ptr( R_EBP, ((uint8_t *)&sh4r) + 128 );
107 // Minimum aligned allocation is 16 bytes
108 REXW(); SUB_imm8s_r32( 16, R_ESP );
111 static inline void exit_block( )
113 REXW(); ADD_imm8s_r32( 16, R_ESP );
119 * Exit the block with sh4r.pc already written
121 void exit_block_pcset( sh4addr_t pc )
123 load_imm32( R_ECX, ((pc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
124 ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) ); // 6
125 load_spreg( R_EAX, R_PC );
126 if( sh4_x86.tlb_on ) {
127 call_func1(xlat_get_code_by_vma,R_EAX);
129 call_func1(xlat_get_code,R_EAX);
135 * Exit the block with sh4r.new_pc written with the target address
137 void exit_block_newpcset( sh4addr_t pc )
139 load_imm32( R_ECX, ((pc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
140 ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) ); // 6
141 load_spreg( R_EAX, R_NEW_PC );
142 store_spreg( R_EAX, R_PC );
143 if( sh4_x86.tlb_on ) {
144 call_func1(xlat_get_code_by_vma,R_EAX);
146 call_func1(xlat_get_code,R_EAX);
151 #define EXIT_BLOCK_SIZE(pc) (25 + (IS_IN_ICACHE(pc)?10:CALL_FUNC1_SIZE))
153 * Exit the block to an absolute PC
155 void exit_block_abs( sh4addr_t pc, sh4addr_t endpc )
157 load_imm32( R_ECX, pc ); // 5
158 store_spreg( R_ECX, REG_OFFSET(pc) ); // 3
159 if( IS_IN_ICACHE(pc) ) {
160 REXW(); MOV_moff32_EAX( xlat_get_lut_entry(pc) );
161 REXW(); AND_imm8s_r32( 0xFC, R_EAX ); // 4
162 } else if( sh4_x86.tlb_on ) {
163 call_func1(xlat_get_code_by_vma, R_ECX);
165 call_func1(xlat_get_code,R_ECX);
167 load_imm32( R_ECX, ((endpc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
168 ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) ); // 6
173 #define EXIT_BLOCK_REL_SIZE(pc) (28 + (IS_IN_ICACHE(pc)?10:CALL_FUNC1_SIZE))
176 * Exit the block to a relative PC
178 void exit_block_rel( sh4addr_t pc, sh4addr_t endpc )
180 load_imm32( R_ECX, pc - sh4_x86.block_start_pc ); // 5
181 ADD_sh4r_r32( R_PC, R_ECX );
182 store_spreg( R_ECX, REG_OFFSET(pc) ); // 3
183 if( IS_IN_ICACHE(pc) ) {
184 REXW(); MOV_moff32_EAX( xlat_get_lut_entry(GET_ICACHE_PHYS(pc)) ); // 5
185 REXW(); AND_imm8s_r32( 0xFC, R_EAX ); // 4
186 } else if( sh4_x86.tlb_on ) {
187 call_func1(xlat_get_code_by_vma,R_ECX);
189 call_func1(xlat_get_code,R_ECX);
191 load_imm32( R_ECX, ((endpc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
192 ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) ); // 6
197 * Write the block trailer (exception handling block)
199 void sh4_translate_end_block( sh4addr_t pc ) {
200 if( sh4_x86.branch_taken == FALSE ) {
201 // Didn't exit unconditionally already, so write the termination here
202 exit_block_rel( pc, pc );
204 if( sh4_x86.backpatch_posn != 0 ) {
207 uint8_t *end_ptr = xlat_output;
208 MOV_r32_r32( R_EDX, R_ECX );
209 ADD_r32_r32( R_EDX, R_ECX );
210 ADD_r32_sh4r( R_ECX, R_PC );
211 MOV_moff32_EAX( &sh4_cpu_period );
213 ADD_r32_sh4r( R_EAX, REG_OFFSET(slice_cycle) );
215 call_func0( sh4_raise_exception );
216 load_spreg( R_EAX, R_PC );
217 if( sh4_x86.tlb_on ) {
218 call_func1(xlat_get_code_by_vma,R_EAX);
220 call_func1(xlat_get_code,R_EAX);
224 // Exception already raised - just cleanup
225 uint8_t *preexc_ptr = xlat_output;
226 MOV_r32_r32( R_EDX, R_ECX );
227 ADD_r32_r32( R_EDX, R_ECX );
228 ADD_r32_sh4r( R_ECX, R_SPC );
229 MOV_moff32_EAX( &sh4_cpu_period );
231 ADD_r32_sh4r( R_EAX, REG_OFFSET(slice_cycle) );
232 load_spreg( R_EDI, R_PC );
233 if( sh4_x86.tlb_on ) {
234 call_func0(xlat_get_code_by_vma);
236 call_func0(xlat_get_code);
240 for( i=0; i< sh4_x86.backpatch_posn; i++ ) {
241 uint32_t *fixup_addr = (uint32_t *)&xlat_current_block->code[sh4_x86.backpatch_list[i].fixup_offset];
242 if( sh4_x86.backpatch_list[i].exc_code < 0 ) {
243 if( sh4_x86.backpatch_list[i].exc_code == -2 ) {
244 *((uintptr_t *)fixup_addr) = (uintptr_t)xlat_output;
246 *fixup_addr = xlat_output - (uint8_t *)&xlat_current_block->code[sh4_x86.backpatch_list[i].fixup_offset] - 4;
248 load_imm32( R_EDX, sh4_x86.backpatch_list[i].fixup_icount );
249 int rel = preexc_ptr - xlat_output;
252 *fixup_addr = xlat_output - (uint8_t *)&xlat_current_block->code[sh4_x86.backpatch_list[i].fixup_offset] - 4;
253 load_imm32( R_EDI, sh4_x86.backpatch_list[i].exc_code );
254 load_imm32( R_EDX, sh4_x86.backpatch_list[i].fixup_icount );
255 int rel = end_ptr - xlat_output;
263 uintptr_t block_start;
268 _Unwind_Reason_Code xlat_check_frame( struct _Unwind_Context *context, void *arg )
270 struct UnwindInfo *info = arg;
271 void *pc = (void *)_Unwind_GetIP(context);
272 if( ((uintptr_t)pc) >= info->block_start && ((uintptr_t)pc) < info->block_end ) {
274 return _URC_NORMAL_STOP;
277 return _URC_NO_REASON;
280 void *xlat_get_native_pc( void *code, uint32_t code_size )
282 struct _Unwind_Exception exc;
283 struct UnwindInfo info;
286 info.block_start = (uintptr_t)code;
287 info.block_end = info.block_start + code_size;
289 _Unwind_Backtrace( xlat_check_frame, &info );
293 #endif /* !lxdream_ia64abi_H */
.