4 * Provides the implementation for the AMD64 ABI (eg prologue, epilogue, and
7 * Copyright (c) 2007 Nathan Keynes.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
20 #ifndef lxdream_ia64abi_H
21 #define lxdream_ia64abi_H 1
25 #define load_ptr( reg, ptr ) load_imm64( reg, (uint64_t)ptr );
28 * Note: clobbers EAX to make the indirect call - this isn't usually
29 * a problem since the callee will usually clobber it anyway.
32 #define CALL_FUNC0_SIZE 12
33 static inline void call_func0( void *ptr )
35 load_imm64(R_EAX, (uint64_t)ptr);
39 #define CALL_FUNC1_SIZE 14
40 static inline void call_func1( void *ptr, int arg1 )
42 REXW(); MOV_r32_r32(arg1, R_EDI);
46 #define CALL_FUNC2_SIZE 16
47 static inline void call_func2( void *ptr, int arg1, int arg2 )
49 REXW(); MOV_r32_r32(arg1, R_EDI);
50 REXW(); MOV_r32_r32(arg2, R_ESI);
54 #define MEM_WRITE_DOUBLE_SIZE 35
56 * Write a double (64-bit) value into memory, with the first word in arg2a, and
59 static inline void MEM_WRITE_DOUBLE( int addr, int arg2a, int arg2b )
63 call_func2(sh4_write_long, addr, arg2a);
66 ADD_imm8s_r32(4, R_EDI);
67 call_func0(sh4_write_long);
70 #define MEM_READ_DOUBLE_SIZE 43
72 * Read a double (64-bit) value from memory, writing the first word into arg2a
73 * and the second into arg2b. The addr must not be in EAX
75 static inline void MEM_READ_DOUBLE( int addr, int arg2a, int arg2b )
77 REXW(); SUB_imm8s_r32( 8, R_ESP );
79 call_func1(sh4_read_long, addr);
82 ADD_imm8s_r32(4, R_EDI);
83 call_func0(sh4_read_long);
84 MOV_r32_r32(R_EAX, arg2b);
86 REXW(); ADD_imm8s_r32( 8, R_ESP );
91 * Emit the 'start of block' assembly. Sets up the stack frame and save
94 void sh4_translate_begin_block( sh4addr_t pc )
98 load_ptr( R_EBP, ((uint8_t *)&sh4r) + 128 );
100 sh4_x86.in_delay_slot = FALSE;
101 sh4_x86.priv_checked = FALSE;
102 sh4_x86.fpuen_checked = FALSE;
103 sh4_x86.branch_taken = FALSE;
104 sh4_x86.backpatch_posn = 0;
105 sh4_x86.block_start_pc = pc;
106 sh4_x86.tlb_on = IS_MMU_ENABLED();
107 sh4_x86.tstate = TSTATE_NONE;
111 * Exit the block with sh4r.pc already written
113 void exit_block_pcset( sh4addr_t pc )
115 load_imm32( R_ECX, ((pc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
116 ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) ); // 6
117 load_spreg( R_EAX, R_PC );
118 if( sh4_x86.tlb_on ) {
119 call_func1(xlat_get_code_by_vma,R_EAX);
121 call_func1(xlat_get_code,R_EAX);
128 * Exit the block with sh4r.new_pc written with the target address
130 void exit_block_newpcset( sh4addr_t pc )
132 load_imm32( R_ECX, ((pc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
133 ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) ); // 6
134 load_spreg( R_EAX, R_NEW_PC );
135 store_spreg( R_EAX, R_PC );
136 if( sh4_x86.tlb_on ) {
137 call_func1(xlat_get_code_by_vma,R_EAX);
139 call_func1(xlat_get_code,R_EAX);
145 #define EXIT_BLOCK_SIZE(pc) (25 + (IS_IN_ICACHE(pc)?10:CALL_FUNC1_SIZE))
147 * Exit the block to an absolute PC
149 void exit_block( sh4addr_t pc, sh4addr_t endpc )
151 load_imm32( R_ECX, pc ); // 5
152 store_spreg( R_ECX, REG_OFFSET(pc) ); // 3
153 if( IS_IN_ICACHE(pc) ) {
154 REXW(); MOV_moff32_EAX( xlat_get_lut_entry(pc) );
155 } else if( sh4_x86.tlb_on ) {
156 call_func1(xlat_get_code_by_vma, R_ECX);
158 call_func1(xlat_get_code,R_ECX);
160 REXW(); AND_imm8s_r32( 0xFC, R_EAX ); // 4
161 load_imm32( R_ECX, ((endpc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
162 ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) ); // 6
168 #define EXIT_BLOCK_REL_SIZE(pc) (28 + (IS_IN_ICACHE(pc)?10:CALL_FUNC1_SIZE))
171 * Exit the block to a relative PC
173 void exit_block_rel( sh4addr_t pc, sh4addr_t endpc )
175 load_imm32( R_ECX, pc - sh4_x86.block_start_pc ); // 5
176 ADD_sh4r_r32( R_PC, R_ECX );
177 store_spreg( R_ECX, REG_OFFSET(pc) ); // 3
178 if( IS_IN_ICACHE(pc) ) {
179 REXW(); MOV_moff32_EAX( xlat_get_lut_entry(GET_ICACHE_PHYS(pc)) ); // 5
180 } else if( sh4_x86.tlb_on ) {
181 call_func1(xlat_get_code_by_vma,R_ECX);
183 call_func1(xlat_get_code,R_ECX);
185 REXW(); AND_imm8s_r32( 0xFC, R_EAX ); // 4
186 load_imm32( R_ECX, ((endpc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
187 ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) ); // 6
193 * Write the block trailer (exception handling block)
195 void sh4_translate_end_block( sh4addr_t pc ) {
196 if( sh4_x86.branch_taken == FALSE ) {
197 // Didn't exit unconditionally already, so write the termination here
198 exit_block_rel( pc, pc );
200 if( sh4_x86.backpatch_posn != 0 ) {
203 uint8_t *end_ptr = xlat_output;
204 MOV_r32_r32( R_EDX, R_ECX );
205 ADD_r32_r32( R_EDX, R_ECX );
206 ADD_r32_sh4r( R_ECX, R_PC );
207 MOV_moff32_EAX( &sh4_cpu_period );
209 ADD_r32_sh4r( R_EAX, REG_OFFSET(slice_cycle) );
211 call_func0( sh4_raise_exception );
212 load_spreg( R_EAX, R_PC );
213 if( sh4_x86.tlb_on ) {
214 call_func1(xlat_get_code_by_vma,R_EAX);
216 call_func1(xlat_get_code,R_EAX);
221 // Exception already raised - just cleanup
222 uint8_t *preexc_ptr = xlat_output;
223 MOV_r32_r32( R_EDX, R_ECX );
224 ADD_r32_r32( R_EDX, R_ECX );
225 ADD_r32_sh4r( R_ECX, R_SPC );
226 MOV_moff32_EAX( &sh4_cpu_period );
228 ADD_r32_sh4r( R_EAX, REG_OFFSET(slice_cycle) );
229 load_spreg( R_EDI, R_PC );
230 if( sh4_x86.tlb_on ) {
231 call_func0(xlat_get_code_by_vma);
233 call_func0(xlat_get_code);
238 for( i=0; i< sh4_x86.backpatch_posn; i++ ) {
239 uint32_t *fixup_addr = (uint32_t *)&xlat_current_block->code[sh4_x86.backpatch_list[i].fixup_offset];
240 *fixup_addr = xlat_output - (uint8_t *)&xlat_current_block->code[sh4_x86.backpatch_list[i].fixup_offset] - 4;
241 if( sh4_x86.backpatch_list[i].exc_code < 0 ) {
242 load_imm32( R_EDX, sh4_x86.backpatch_list[i].fixup_icount );
243 int stack_adj = -1 - sh4_x86.backpatch_list[i].exc_code;
244 if( stack_adj > 0 ) {
245 ADD_imm8s_r32( stack_adj*4, R_ESP );
247 int rel = preexc_ptr - xlat_output;
250 load_imm32( R_EDI, sh4_x86.backpatch_list[i].exc_code );
251 load_imm32( R_EDX, sh4_x86.backpatch_list[i].fixup_icount );
252 int rel = end_ptr - xlat_output;
259 _Unwind_Reason_Code xlat_check_frame( struct _Unwind_Context *context, void *arg )
261 void *rbp = (void *)_Unwind_GetGR(context, 6);
262 void *expect = (((uint8_t *)&sh4r) + 128 );
263 if( rbp == expect ) {
264 void **result = (void **)arg;
265 *result = (void *)_Unwind_GetIP(context);
266 return _URC_NORMAL_STOP;
269 return _URC_NO_REASON;
272 void *xlat_get_native_pc()
274 struct _Unwind_Exception exc;
277 _Unwind_Backtrace( xlat_check_frame, &result );
281 #endif /* !lxdream_ia64abi_H */
.