filename | src/sh4/ia32mac.h |
changeset | 569:a1c49e1e8776 |
prev | 561:533f6b478071 |
next | 570:d2893980fbf5 |
author | nkeynes |
date | Fri Jan 04 11:54:17 2008 +0000 (15 years ago) |
branch | lxdream-mmu |
permissions | -rw-r--r-- |
last change | Bring icache partially into line with the mmu, a little less slow with AT off now. |
view | annotate | diff | log | raw |
1 /**
2 * $Id$
3 *
4 * Provides the implementation for the ia32 ABI (eg prologue, epilogue, and
5 * calling conventions)
6 *
7 * Copyright (c) 2007 Nathan Keynes.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 */
20 #ifndef __lxdream_ia32abi_H
21 #define __lxdream_ia32abi_H 1
23 #define load_ptr( reg, ptr ) load_imm32( reg, (uint32_t)ptr );
25 /**
26 * Note: clobbers EAX to make the indirect call - this isn't usually
27 * a problem since the callee will usually clobber it anyway.
28 */
29 #define CALL_FUNC0_SIZE 13
30 static inline void call_func0( void *ptr )
31 {
32 int adj = (-sh4_x86.stack_posn)&0x0F;
33 SUB_imm8s_r32( adj, R_ESP );
34 load_imm32(R_EAX, (uint32_t)ptr);
35 CALL_r32(R_EAX);
36 ADD_imm8s_r32( adj, R_ESP );
37 }
39 #define CALL_FUNC1_SIZE 14
40 static inline void call_func1( void *ptr, int arg1 )
41 {
42 int adj = (-4-sh4_x86.stack_posn)&0x0F;
43 SUB_imm8s_r32( adj, R_ESP );
44 PUSH_r32(arg1);
45 load_imm32(R_EAX, (uint32_t)ptr);
46 CALL_r32(R_EAX);
47 ADD_imm8s_r32( adj+4, R_ESP );
48 sh4_x86.stack_posn -= 4;
49 }
51 #define CALL_FUNC2_SIZE 15
52 static inline void call_func2( void *ptr, int arg1, int arg2 )
53 {
54 int adj = (-8-sh4_x86.stack_posn)&0x0F;
55 SUB_imm8s_r32( adj, R_ESP );
56 PUSH_r32(arg2);
57 PUSH_r32(arg1);
58 load_imm32(R_EAX, (uint32_t)ptr);
59 CALL_r32(R_EAX);
60 ADD_imm8s_r32( adj+8, R_ESP );
61 sh4_x86.stack_posn -= 8;
62 }
64 /**
65 * Write a double (64-bit) value into memory, with the first word in arg2a, and
66 * the second in arg2b
67 * NB: 30 bytes
68 */
69 #define MEM_WRITE_DOUBLE_SIZE 36
70 static inline void MEM_WRITE_DOUBLE( int addr, int arg2a, int arg2b )
71 {
72 int adj = (-8-sh4_x86.stack_posn)&0x0F;
73 SUB_imm8s_r32( adj, R_ESP );
74 ADD_imm8s_r32( 4, addr );
75 PUSH_r32(arg2b);
76 PUSH_r32(addr);
77 ADD_imm8s_r32( -4, addr );
78 SUB_imm8s_r32( 8, R_ESP );
79 PUSH_r32(arg2a);
80 PUSH_r32(addr);
81 load_imm32(R_EAX, (uint32_t)sh4_write_long);
82 CALL_r32(R_EAX);
83 ADD_imm8s_r32( 16, R_ESP );
84 load_imm32(R_EAX, (uint32_t)sh4_write_long);
85 CALL_r32(R_EAX);
86 ADD_imm8s_r32( adj+8, R_ESP );
87 sh4_x86.stack_posn -= 16;
88 }
90 /**
91 * Read a double (64-bit) value from memory, writing the first word into arg2a
92 * and the second into arg2b. The addr must not be in EAX
93 * NB: 27 bytes
94 */
95 #define MEM_READ_DOUBLE_SIZE 36
96 static inline void MEM_READ_DOUBLE( int addr, int arg2a, int arg2b )
97 {
98 int adj = (-4-sh4_x86.stack_posn)&0x0F;
99 int adj2 = (-8-sh4_x86.stack_posn)&0x0F;
100 SUB_imm8s_r32( adj, R_ESP );
101 PUSH_r32(addr);
102 load_imm32(R_EAX, (uint32_t)sh4_read_long);
103 CALL_r32(R_EAX);
104 POP_r32(addr);
105 SUB_imm8s_r32( adj2-adj, R_ESP );
106 PUSH_r32(R_EAX);
107 ADD_imm8s_r32( 4, addr );
108 PUSH_r32(addr);
109 load_imm32(R_EAX, (uint32_t)sh4_read_long);
110 CALL_r32(R_EAX);
111 ADD_imm8s_r32( 4, R_ESP );
112 MOV_r32_r32( R_EAX, arg2b );
113 POP_r32(arg2a);
114 ADD_imm8s_r32( adj2, R_ESP );
115 sh4_x86.stack_posn -= 4;
116 }
118 #define EXIT_BLOCK_SIZE 29
121 /**
122 * Emit the 'start of block' assembly. Sets up the stack frame and save
123 * SI/DI as required
124 */
125 void sh4_translate_begin_block( sh4addr_t pc )
126 {
127 PUSH_r32(R_EBP);
128 /* mov &sh4r, ebp */
129 load_ptr( R_EBP, &sh4r );
131 sh4_x86.in_delay_slot = FALSE;
132 sh4_x86.priv_checked = FALSE;
133 sh4_x86.fpuen_checked = FALSE;
134 sh4_x86.branch_taken = FALSE;
135 sh4_x86.backpatch_posn = 0;
136 sh4_x86.block_start_pc = pc;
137 sh4_x86.tstate = TSTATE_NONE;
138 sh4_x86.stack_posn = 8;
139 }
141 /**
142 * Exit the block with sh4r.pc already written
143 * Bytes: 15
144 */
145 void exit_block_pcset( sh4addr_t pc )
146 {
147 load_imm32( R_ECX, ((pc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
148 ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) ); // 6
149 load_spreg( R_EAX, REG_OFFSET(pc) );
150 call_func1(xlat_get_code_by_vma,R_EAX);
151 POP_r32(R_EBP);
152 RET();
153 }
155 /**
156 * Exit the block to an absolute PC
157 */
158 void exit_block( sh4addr_t pc, sh4addr_t endpc )
159 {
160 load_imm32( R_ECX, pc ); // 5
161 store_spreg( R_ECX, REG_OFFSET(pc) ); // 3
162 MOV_moff32_EAX( xlat_get_lut_entry(pc) ); // 5
163 AND_imm8s_r32( 0xFC, R_EAX ); // 3
164 load_imm32( R_ECX, ((endpc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period ); // 5
165 ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) ); // 6
166 POP_r32(R_EBP);
167 RET();
168 }
170 /**
171 * Write the block trailer (exception handling block)
172 */
173 void sh4_translate_end_block( sh4addr_t pc ) {
174 if( sh4_x86.branch_taken == FALSE ) {
175 // Didn't exit unconditionally already, so write the termination here
176 exit_block( pc, pc );
177 }
178 if( sh4_x86.backpatch_posn != 0 ) {
179 unsigned int i;
180 // Raise exception
181 uint8_t *end_ptr = xlat_output;
182 load_spreg( R_ECX, REG_OFFSET(pc) );
183 ADD_r32_r32( R_EDX, R_ECX );
184 ADD_r32_r32( R_EDX, R_ECX );
185 store_spreg( R_ECX, REG_OFFSET(pc) );
186 MOV_moff32_EAX( &sh4_cpu_period );
187 MUL_r32( R_EDX );
188 ADD_r32_sh4r( R_EAX, REG_OFFSET(slice_cycle) );
190 POP_r32(R_EDX);
191 call_func1( sh4_raise_exception, R_EDX );
192 load_spreg( R_EAX, REG_OFFSET(pc) );
193 call_func1(xlat_get_code_by_vma,R_EAX);
194 POP_r32(R_EBP);
195 RET();
197 // Exception already raised - just cleanup
198 uint8_t *preexc_ptr = xlat_output;
199 load_imm32( R_ECX, sh4_x86.block_start_pc );
200 ADD_r32_r32( R_EDX, R_ECX );
201 ADD_r32_r32( R_EDX, R_ECX );
202 store_spreg( R_ECX, REG_OFFSET(spc) );
203 MOV_moff32_EAX( &sh4_cpu_period );
204 MUL_r32( R_EDX );
205 ADD_r32_sh4r( R_EAX, REG_OFFSET(slice_cycle) );
206 load_spreg( R_EAX, REG_OFFSET(pc) );
207 call_func1(xlat_get_code_by_vma,R_EAX);
208 POP_r32(R_EBP);
209 RET();
211 for( i=0; i< sh4_x86.backpatch_posn; i++ ) {
212 *sh4_x86.backpatch_list[i].fixup_addr =
213 xlat_output - ((uint8_t *)sh4_x86.backpatch_list[i].fixup_addr) - 4;
214 if( sh4_x86.backpatch_list[i].exc_code == -1 ) {
215 load_imm32( R_EDX, sh4_x86.backpatch_list[i].fixup_icount );
216 int rel = preexc_ptr - xlat_output;
217 JMP_rel(rel);
218 } else {
219 PUSH_imm32( sh4_x86.backpatch_list[i].exc_code );
220 load_imm32( R_EDX, sh4_x86.backpatch_list[i].fixup_icount );
221 int rel = end_ptr - xlat_output;
222 JMP_rel(rel);
223 }
224 }
225 }
226 }
228 #endif
.