filename | src/sh4/ia64abi.h |
changeset | 991:60c7fab9c880 |
prev | 957:0f6131f6cc3a |
author | nkeynes |
date | Wed Mar 04 23:12:21 2009 +0000 (15 years ago) |
permissions | -rw-r--r-- |
last change | Move xltcache to xlat/ src directory Commit new and improved x86 opcode file - cleaned up and added support for amd64 extended registers |
view | annotate | diff | log | raw |
1 /**
2 * $Id$
3 *
4 * Provides the implementation for the AMD64 ABI (eg prologue, epilogue, and
5 * calling conventions)
6 *
7 * Copyright (c) 2007 Nathan Keynes.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 */
20 #ifndef lxdream_ia64abi_H
21 #define lxdream_ia64abi_H 1
23 #include <unwind.h>
25 #define load_ptr( reg, ptr ) load_imm64( reg, (uint64_t)ptr );
27 static inline void decode_address( int addr_reg )
28 {
29 uintptr_t base = (sh4r.xlat_sh4_mode&SR_MD) ? (uintptr_t)sh4_address_space : (uintptr_t)sh4_user_address_space;
30 MOVL_r32_r32( addr_reg, REG_RCX );
31 SHRL_imm_r32( 12, REG_RCX );
32 MOVP_immptr_rptr( base, REG_RDI );
33 MOVP_sib_rptr(3, REG_RCX, REG_RDI, 0, REG_RCX);
34 }
36 /**
37 * Note: clobbers EAX to make the indirect call - this isn't usually
38 * a problem since the callee will usually clobber it anyway.
39 * Size: 12 bytes
40 */
41 #define CALL_FUNC0_SIZE 12
42 static inline void call_func0( void *ptr )
43 {
44 MOVQ_imm64_r64((uint64_t)ptr, REG_RAX);
45 CALL_r32(REG_RAX);
46 }
48 static inline void call_func1( void *ptr, int arg1 )
49 {
50 MOVQ_r64_r64(arg1, REG_RDI);
51 call_func0(ptr);
52 }
54 static inline void call_func1_exc( void *ptr, int arg1, int pc )
55 {
56 MOVQ_r64_r64(arg1, REG_RDI);
57 MOVP_immptr_rptr(0, REG_RSI);
58 sh4_x86_add_backpatch( xlat_output, pc, -2 );
59 call_func0(ptr);
60 }
62 static inline void call_func1_r32disp8( int preg, uint32_t disp8, int arg1 )
63 {
64 MOVQ_r64_r64(arg1, REG_RDI);
65 CALL_r32disp(preg, disp8);
66 }
68 static inline void call_func1_r32disp8_exc( int preg, uint32_t disp8, int arg1, int pc )
69 {
70 MOVQ_r64_r64(arg1, REG_RDI);
71 MOVP_immptr_rptr(0, REG_RSI);
72 sh4_x86_add_backpatch( xlat_output, pc, -2 );
73 CALL_r32disp(preg, disp8);
74 }
76 static inline void call_func2( void *ptr, int arg1, int arg2 )
77 {
78 MOVQ_r64_r64(arg1, REG_RDI);
79 MOVQ_r64_r64(arg2, REG_RSI);
80 call_func0(ptr);
81 }
83 static inline void call_func2_r32disp8( int preg, uint32_t disp8, int arg1, int arg2 )
84 {
85 MOVQ_r64_r64(arg1, REG_RDI);
86 MOVQ_r64_r64(arg2, REG_RSI);
87 CALL_r32disp(preg, disp8);
88 }
90 static inline void call_func2_r32disp8_exc( int preg, uint32_t disp8, int arg1, int arg2, int pc )
91 {
92 MOVQ_r64_r64(arg1, REG_RDI);
93 MOVQ_r64_r64(arg2, REG_RSI);
94 MOVP_immptr_rptr(0, REG_RDX);
95 sh4_x86_add_backpatch( xlat_output, pc, -2 );
96 CALL_r32disp(preg, disp8);
97 }
101 /**
102 * Emit the 'start of block' assembly. Sets up the stack frame and save
103 * SI/DI as required
104 */
105 void enter_block( )
106 {
107 PUSH_r32(REG_RBP);
108 load_ptr( REG_RBP, ((uint8_t *)&sh4r) + 128 );
109 // Minimum aligned allocation is 16 bytes
110 SUBQ_imms_r64( 16, REG_RSP );
111 }
113 static inline void exit_block( )
114 {
115 ADDQ_imms_r64( 16, REG_RSP );
116 POP_r32(REG_RBP);
117 RET();
118 }
120 /**
121 * Exit the block with sh4r.pc already written
122 */
123 void exit_block_pcset( sh4addr_t pc )
124 {
125 load_imm32( REG_ECX, ((pc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
126 ADDL_r32_rbpdisp( REG_ECX, REG_OFFSET(slice_cycle) ); // 6
127 load_spreg( REG_RAX, R_PC );
128 if( sh4_x86.tlb_on ) {
129 call_func1(xlat_get_code_by_vma,REG_RAX);
130 } else {
131 call_func1(xlat_get_code,REG_RAX);
132 }
133 exit_block();
134 }
136 /**
137 * Exit the block with sh4r.new_pc written with the target address
138 */
139 void exit_block_newpcset( sh4addr_t pc )
140 {
141 load_imm32( REG_ECX, ((pc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
142 ADDL_r32_rbpdisp( REG_ECX, REG_OFFSET(slice_cycle) ); // 6
143 load_spreg( REG_RAX, R_NEW_PC );
144 store_spreg( REG_RAX, R_PC );
145 if( sh4_x86.tlb_on ) {
146 call_func1(xlat_get_code_by_vma,REG_RAX);
147 } else {
148 call_func1(xlat_get_code,REG_RAX);
149 }
150 exit_block();
151 }
153 #define EXIT_BLOCK_SIZE(pc) (25 + (IS_IN_ICACHE(pc)?10:CALL_FUNC1_SIZE))
154 /**
155 * Exit the block to an absolute PC
156 */
157 void exit_block_abs( sh4addr_t pc, sh4addr_t endpc )
158 {
159 load_imm32( REG_RCX, pc ); // 5
160 store_spreg( REG_RCX, REG_OFFSET(pc) ); // 3
161 if( IS_IN_ICACHE(pc) ) {
162 MOVP_moffptr_rax( xlat_get_lut_entry(pc) );
163 ANDQ_imms_r64( 0xFFFFFFFC, REG_RAX ); // 4
164 } else if( sh4_x86.tlb_on ) {
165 call_func1(xlat_get_code_by_vma, REG_RCX);
166 } else {
167 call_func1(xlat_get_code,REG_RCX);
168 }
169 load_imm32( REG_ECX, ((endpc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
170 ADDL_r32_rbpdisp( REG_ECX, REG_OFFSET(slice_cycle) ); // 6
171 exit_block();
172 }
175 #define EXIT_BLOCK_REL_SIZE(pc) (28 + (IS_IN_ICACHE(pc)?10:CALL_FUNC1_SIZE))
177 /**
178 * Exit the block to a relative PC
179 */
180 void exit_block_rel( sh4addr_t pc, sh4addr_t endpc )
181 {
182 load_imm32( REG_ECX, pc - sh4_x86.block_start_pc ); // 5
183 ADDL_rbpdisp_r32( R_PC, REG_ECX );
184 store_spreg( REG_ECX, REG_OFFSET(pc) ); // 3
185 if( IS_IN_ICACHE(pc) ) {
186 MOVP_moffptr_rax( xlat_get_lut_entry(GET_ICACHE_PHYS(pc)) ); // 5
187 ANDQ_imms_r64( 0xFFFFFFFC, REG_RAX ); // 4
188 } else if( sh4_x86.tlb_on ) {
189 call_func1(xlat_get_code_by_vma,REG_RCX);
190 } else {
191 call_func1(xlat_get_code,REG_RCX);
192 }
193 load_imm32( REG_ECX, ((endpc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
194 ADDL_r32_rbpdisp( REG_ECX, REG_OFFSET(slice_cycle) ); // 6
195 exit_block();
196 }
198 /**
199 * Exit unconditionally with a general exception
200 */
201 void exit_block_exc( int code, sh4addr_t pc )
202 {
203 load_imm32( REG_ECX, pc - sh4_x86.block_start_pc ); // 5
204 ADDL_r32_rbpdisp( REG_ECX, R_PC );
205 load_imm32( REG_ECX, ((pc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
206 ADDL_r32_rbpdisp( REG_ECX, REG_OFFSET(slice_cycle) ); // 6
207 load_imm32( REG_RAX, code );
208 call_func1( sh4_raise_exception, REG_RAX );
210 load_spreg( REG_RAX, R_PC );
211 if( sh4_x86.tlb_on ) {
212 call_func1(xlat_get_code_by_vma,REG_RAX);
213 } else {
214 call_func1(xlat_get_code,REG_RAX);
215 }
217 exit_block();
218 }
221 /**
222 * Write the block trailer (exception handling block)
223 */
224 void sh4_translate_end_block( sh4addr_t pc ) {
225 if( sh4_x86.branch_taken == FALSE ) {
226 // Didn't exit unconditionally already, so write the termination here
227 exit_block_rel( pc, pc );
228 }
229 if( sh4_x86.backpatch_posn != 0 ) {
230 unsigned int i;
231 // Raise exception
232 uint8_t *end_ptr = xlat_output;
233 MOVL_r32_r32( REG_RDX, REG_RCX );
234 ADDL_r32_r32( REG_RDX, REG_RCX );
235 ADDL_r32_rbpdisp( REG_RCX, R_PC );
236 MOVL_moffptr_eax( &sh4_cpu_period );
237 MULL_r32( REG_RDX );
238 ADDL_r32_rbpdisp( REG_RAX, REG_OFFSET(slice_cycle) );
240 call_func0( sh4_raise_exception );
241 load_spreg( REG_RAX, R_PC );
242 if( sh4_x86.tlb_on ) {
243 call_func1(xlat_get_code_by_vma,REG_RAX);
244 } else {
245 call_func1(xlat_get_code,REG_RAX);
246 }
247 exit_block();
249 // Exception already raised - just cleanup
250 uint8_t *preexc_ptr = xlat_output;
251 MOVL_r32_r32( REG_EDX, REG_ECX );
252 ADDL_r32_r32( REG_EDX, REG_ECX );
253 ADDL_r32_rbpdisp( REG_ECX, R_SPC );
254 MOVL_moffptr_eax( &sh4_cpu_period );
255 MULL_r32( REG_EDX );
256 ADDL_r32_rbpdisp( REG_EAX, REG_OFFSET(slice_cycle) );
257 load_spreg( REG_RDI, R_PC );
258 if( sh4_x86.tlb_on ) {
259 call_func0(xlat_get_code_by_vma);
260 } else {
261 call_func0(xlat_get_code);
262 }
263 exit_block();
265 for( i=0; i< sh4_x86.backpatch_posn; i++ ) {
266 uint32_t *fixup_addr = (uint32_t *)&xlat_current_block->code[sh4_x86.backpatch_list[i].fixup_offset];
267 if( sh4_x86.backpatch_list[i].exc_code < 0 ) {
268 if( sh4_x86.backpatch_list[i].exc_code == -2 ) {
269 *((uintptr_t *)fixup_addr) = (uintptr_t)xlat_output;
270 } else {
271 *fixup_addr = xlat_output - (uint8_t *)&xlat_current_block->code[sh4_x86.backpatch_list[i].fixup_offset] - 4;
272 }
273 load_imm32( REG_RDX, sh4_x86.backpatch_list[i].fixup_icount );
274 int rel = preexc_ptr - xlat_output;
275 JMP_prerel(rel);
276 } else {
277 *fixup_addr = xlat_output - (uint8_t *)&xlat_current_block->code[sh4_x86.backpatch_list[i].fixup_offset] - 4;
278 load_imm32( REG_RDI, sh4_x86.backpatch_list[i].exc_code );
279 load_imm32( REG_RDX, sh4_x86.backpatch_list[i].fixup_icount );
280 int rel = end_ptr - xlat_output;
281 JMP_prerel(rel);
282 }
283 }
284 }
285 }
287 struct UnwindInfo {
288 uintptr_t block_start;
289 uintptr_t block_end;
290 void *pc;
291 };
293 _Unwind_Reason_Code xlat_check_frame( struct _Unwind_Context *context, void *arg )
294 {
295 struct UnwindInfo *info = arg;
296 void *pc = (void *)_Unwind_GetIP(context);
297 if( ((uintptr_t)pc) >= info->block_start && ((uintptr_t)pc) < info->block_end ) {
298 info->pc = pc;
299 return _URC_NORMAL_STOP;
300 }
302 return _URC_NO_REASON;
303 }
305 void *xlat_get_native_pc( void *code, uint32_t code_size )
306 {
307 struct _Unwind_Exception exc;
308 struct UnwindInfo info;
310 info.pc = NULL;
311 info.block_start = (uintptr_t)code;
312 info.block_end = info.block_start + code_size;
313 void *result = NULL;
314 _Unwind_Backtrace( xlat_check_frame, &info );
315 return info.pc;
316 }
318 #endif /* !lxdream_ia64abi_H */
.