4 * Provides the implementation for the ia32 ABI (eg prologue, epilogue, and
7 * Copyright (c) 2007 Nathan Keynes.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
20 #ifndef __lxdream_ia32abi_H
21 #define __lxdream_ia32abi_H 1
23 #define load_ptr( reg, ptr ) load_imm32( reg, (uint32_t)ptr );
26 * Note: clobbers EAX to make the indirect call - this isn't usually
27 * a problem since the callee will usually clobber it anyway.
29 #define CALL_FUNC0_SIZE 13
30 static inline void call_func0( void *ptr )
32 int adj = (-sh4_x86.stack_posn)&0x0F;
33 SUB_imm8s_r32( adj, R_ESP );
34 load_imm32(R_EAX, (uint32_t)ptr);
36 ADD_imm8s_r32( adj, R_ESP );
39 #define CALL_FUNC1_SIZE 14
40 static inline void call_func1( void *ptr, int arg1 )
42 int adj = (-4-sh4_x86.stack_posn)&0x0F;
43 SUB_imm8s_r32( adj, R_ESP );
45 load_imm32(R_EAX, (uint32_t)ptr);
47 ADD_imm8s_r32( adj+4, R_ESP );
48 sh4_x86.stack_posn -= 4;
51 #define CALL_FUNC2_SIZE 15
52 static inline void call_func2( void *ptr, int arg1, int arg2 )
54 int adj = (-8-sh4_x86.stack_posn)&0x0F;
55 SUB_imm8s_r32( adj, R_ESP );
58 load_imm32(R_EAX, (uint32_t)ptr);
60 ADD_imm8s_r32( adj+8, R_ESP );
61 sh4_x86.stack_posn -= 8;
65 * Write a double (64-bit) value into memory, with the first word in arg2a, and
69 #define MEM_WRITE_DOUBLE_SIZE 36
70 static inline void MEM_WRITE_DOUBLE( int addr, int arg2a, int arg2b )
72 int adj = (-8-sh4_x86.stack_posn)&0x0F;
73 SUB_imm8s_r32( adj, R_ESP );
74 ADD_imm8s_r32( 4, addr );
77 ADD_imm8s_r32( -4, addr );
78 SUB_imm8s_r32( 8, R_ESP );
81 load_imm32(R_EAX, (uint32_t)sh4_write_long);
83 ADD_imm8s_r32( 16, R_ESP );
84 load_imm32(R_EAX, (uint32_t)sh4_write_long);
86 ADD_imm8s_r32( adj+8, R_ESP );
87 sh4_x86.stack_posn -= 16;
91 * Read a double (64-bit) value from memory, writing the first word into arg2a
92 * and the second into arg2b. The addr must not be in EAX
95 #define MEM_READ_DOUBLE_SIZE 36
96 static inline void MEM_READ_DOUBLE( int addr, int arg2a, int arg2b )
98 int adj = (-4-sh4_x86.stack_posn)&0x0F;
99 int adj2 = (-8-sh4_x86.stack_posn)&0x0F;
100 SUB_imm8s_r32( adj, R_ESP );
102 load_imm32(R_EAX, (uint32_t)sh4_read_long);
105 SUB_imm8s_r32( adj2-adj, R_ESP );
107 ADD_imm8s_r32( 4, R_ECX );
109 load_imm32(R_EAX, (uint32_t)sh4_read_long);
111 ADD_imm8s_r32( 4, R_ESP );
112 MOV_r32_r32( R_EAX, arg2b );
114 ADD_imm8s_r32( adj2, R_ESP );
115 sh4_x86.stack_posn -= 4;
119 * Emit the 'start of block' assembly. Sets up the stack frame and save
122 void sh4_translate_begin_block( sh4addr_t pc )
126 load_ptr( R_EBP, &sh4r );
128 sh4_x86.in_delay_slot = FALSE;
129 sh4_x86.priv_checked = FALSE;
130 sh4_x86.fpuen_checked = FALSE;
131 sh4_x86.branch_taken = FALSE;
132 sh4_x86.backpatch_posn = 0;
133 sh4_x86.recovery_posn = 0;
134 sh4_x86.block_start_pc = pc;
135 sh4_x86.tstate = TSTATE_NONE;
136 sh4_x86.tlb_on = IS_MMU_ENABLED();
137 sh4_x86.stack_posn = 8;
141 * Exit the block with sh4r.new_pc written with the target pc
143 void exit_block_pcset( sh4addr_t pc )
145 load_imm32( R_ECX, ((pc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
146 ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) ); // 6
147 load_spreg( R_EAX, R_PC );
148 if( sh4_x86.tlb_on ) {
149 call_func1(xlat_get_code_by_vma,R_EAX);
151 call_func1(xlat_get_code,R_EAX);
158 * Exit the block with sh4r.new_pc written with the target pc
160 void exit_block_newpcset( sh4addr_t pc )
162 load_imm32( R_ECX, ((pc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
163 ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) ); // 6
164 load_spreg( R_EAX, R_NEW_PC );
165 store_spreg( R_EAX, R_PC );
166 if( sh4_x86.tlb_on ) {
167 call_func1(xlat_get_code_by_vma,R_EAX);
169 call_func1(xlat_get_code,R_EAX);
176 #define EXIT_BLOCK_SIZE(pc) (24 + (IS_IN_ICACHE(pc)?5:CALL_FUNC1_SIZE))
180 * Exit the block to an absolute PC
182 void exit_block( sh4addr_t pc, sh4addr_t endpc )
184 load_imm32( R_ECX, pc ); // 5
185 store_spreg( R_ECX, REG_OFFSET(pc) ); // 3
186 if( IS_IN_ICACHE(pc) ) {
187 MOV_moff32_EAX( xlat_get_lut_entry(GET_ICACHE_PHYS(pc)) ); // 5
188 } else if( sh4_x86.tlb_on ) {
189 call_func1(xlat_get_code_by_vma,R_ECX);
191 call_func1(xlat_get_code,R_ECX);
193 AND_imm8s_r32( 0xFC, R_EAX ); // 3
194 load_imm32( R_ECX, ((endpc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
195 ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) ); // 6
200 #define EXIT_BLOCK_REL_SIZE(pc) (27 + (IS_IN_ICACHE(pc)?5:CALL_FUNC1_SIZE))
203 * Exit the block to a relative PC
205 void exit_block_rel( sh4addr_t pc, sh4addr_t endpc )
207 load_imm32( R_ECX, pc - sh4_x86.block_start_pc ); // 5
208 ADD_sh4r_r32( R_PC, R_ECX );
209 store_spreg( R_ECX, REG_OFFSET(pc) ); // 3
210 if( IS_IN_ICACHE(pc) ) {
211 MOV_moff32_EAX( xlat_get_lut_entry(GET_ICACHE_PHYS(pc)) ); // 5
212 } else if( sh4_x86.tlb_on ) {
213 call_func1(xlat_get_code_by_vma,R_ECX);
215 call_func1(xlat_get_code,R_ECX);
217 AND_imm8s_r32( 0xFC, R_EAX ); // 3
218 load_imm32( R_ECX, ((endpc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
219 ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) ); // 6
225 * Write the block trailer (exception handling block)
227 void sh4_translate_end_block( sh4addr_t pc ) {
228 if( sh4_x86.branch_taken == FALSE ) {
229 // Didn't exit unconditionally already, so write the termination here
230 exit_block_rel( pc, pc );
232 if( sh4_x86.backpatch_posn != 0 ) {
235 uint8_t *end_ptr = xlat_output;
236 MOV_r32_r32( R_EDX, R_ECX );
237 ADD_r32_r32( R_EDX, R_ECX );
238 ADD_r32_sh4r( R_ECX, R_PC );
239 MOV_moff32_EAX( &sh4_cpu_period );
241 ADD_r32_sh4r( R_EAX, REG_OFFSET(slice_cycle) );
244 call_func1( sh4_raise_exception, R_EDX );
245 load_spreg( R_EAX, R_PC );
246 if( sh4_x86.tlb_on ) {
247 call_func1(xlat_get_code_by_vma,R_EAX);
249 call_func1(xlat_get_code,R_EAX);
254 // Exception already raised - just cleanup
255 uint8_t *preexc_ptr = xlat_output;
256 MOV_r32_r32( R_EDX, R_ECX );
257 ADD_r32_r32( R_EDX, R_ECX );
258 ADD_r32_sh4r( R_ECX, R_SPC );
259 MOV_moff32_EAX( &sh4_cpu_period );
261 ADD_r32_sh4r( R_EAX, REG_OFFSET(slice_cycle) );
262 load_spreg( R_EAX, R_PC );
263 if( sh4_x86.tlb_on ) {
264 call_func1(xlat_get_code_by_vma,R_EAX);
266 call_func1(xlat_get_code,R_EAX);
271 for( i=0; i< sh4_x86.backpatch_posn; i++ ) {
272 uint32_t *fixup_addr = (uint32_t *)&xlat_current_block->code[sh4_x86.backpatch_list[i].fixup_offset];
273 *fixup_addr = xlat_output - (uint8_t *)&xlat_current_block->code[sh4_x86.backpatch_list[i].fixup_offset] - 4;
274 if( sh4_x86.backpatch_list[i].exc_code < 0 ) {
275 load_imm32( R_EDX, sh4_x86.backpatch_list[i].fixup_icount );
276 int stack_adj = -1 - sh4_x86.backpatch_list[i].exc_code;
277 if( stack_adj > 0 ) {
278 ADD_imm8s_r32( stack_adj, R_ESP );
280 int rel = preexc_ptr - xlat_output;
283 PUSH_imm32( sh4_x86.backpatch_list[i].exc_code );
284 load_imm32( R_EDX, sh4_x86.backpatch_list[i].fixup_icount );
285 int rel = end_ptr - xlat_output;
292 void *xlat_get_native_pc()
296 "mov %%ebp, %%eax\n\t"
297 "mov $0x8, %%ecx\n\t"
299 "frame_loop: test %%eax, %%eax\n\t"
300 "je frame_not_found\n\t"
301 "cmp (%%eax), %%edx\n\t"
303 "sub $0x1, %%ecx\n\t"
304 "je frame_not_found\n\t"
305 "movl (%%eax), %%eax\n\t"
307 "frame_found: movl 0x4(%%eax), %0\n"
311 : "eax", "ecx", "edx" );
.