2 * $Id: x86op.h,v 1.10 2007-09-19 09:15:18 nkeynes Exp $
4 * Definitions of x86 opcodes for use by the translator.
6 * Copyright (c) 2007 Nathan Keynes.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
19 #ifndef __lxdream_x86op_H
20 #define __lxdream_x86op_H
42 #define MARK_JMP(n,x) uint8_t *_mark_jmp_##x = xlat_output + n
43 #define JMP_TARGET(x) assert( _mark_jmp_##x == xlat_output )
45 #define MARK_JMP(n, x)
53 #define OP(x) *xlat_output++ = (x)
54 #define OP32(x) *((uint32_t *)xlat_output) = (x); xlat_output+=4
55 #define OP64(x) *((uint64_t *)xlat_output) = (x); xlat_output+=8
56 #if SH4_TRANSLATOR == TARGET_X86_64
57 #define OPPTR(x) OP64((uint64_t)(x))
58 #define STACK_ALIGN 16
59 #define POP_r32(r1) OP(0x58 + r1);
60 #define POP_realigned_r32(r1) OP(0x58 + r1); REXW(); ADD_imm8s_r32(8,R_ESP)
61 #define PUSH_r32(r1) OP(0x50 + r1);
62 #define PUSH_realigned_r32(r1) REXW(); SUB_imm8s_r32(8, R_ESP); OP(0x50 + r1)
63 #define PUSH_imm32(imm) OP(0x68); OP32(imm);
64 #define PUSH_imm64(imm) REXW(); OP(0x68); OP64(imm);
66 #define OPPTR(x) OP32((uint32_t)(x))
67 #define POP_realigned_r32(r1) POP_r32(r1)
68 #define PUSH_realigned_r32(r1) PUSH_r32(r1)
70 #define STACK_ALIGN 16
71 #define POP_r32(r1) OP(0x58 + r1); sh4_x86.stack_posn -= 4;
72 #define PUSH_r32(r1) OP(0x50 + r1); sh4_x86.stack_posn += 4;
73 #define PUSH_imm32(imm) OP(0x68); OP32(imm); sh4_x86.stack_posn += 4;
75 #define POP_r32(r1) OP(0x58 + r1)
76 #define PUSH_r32(r1) OP(0x50 + r1)
77 #define PUSH_imm32(imm) OP(0x68); OP32(imm)
83 #define POP_r32(r1) OP(0x58 + r1)
84 #define PUSH_r32(r1) OP(0x50 + r1)
88 /* Offset of a reg relative to the sh4r structure */
89 #define REG_OFFSET(reg) (((char *)&sh4r.reg) - ((char *)&sh4r))
91 #define R_T REG_OFFSET(t)
92 #define R_Q REG_OFFSET(q)
93 #define R_S REG_OFFSET(s)
94 #define R_M REG_OFFSET(m)
95 #define R_SR REG_OFFSET(sr)
96 #define R_GBR REG_OFFSET(gbr)
97 #define R_SSR REG_OFFSET(ssr)
98 #define R_SPC REG_OFFSET(spc)
99 #define R_VBR REG_OFFSET(vbr)
100 #define R_MACH REG_OFFSET(mac)+4
101 #define R_MACL REG_OFFSET(mac)
102 #define R_PR REG_OFFSET(pr)
103 #define R_SGR REG_OFFSET(sgr)
104 #define R_FPUL REG_OFFSET(fpul)
105 #define R_FPSCR REG_OFFSET(fpscr)
106 #define R_DBR REG_OFFSET(dbr)
108 /**************** Basic X86 operations *********************/
109 /* Note: operands follow SH4 convention (source, dest) rather than x86
110 * conventions (dest, source)
113 /* Two-reg modrm form - first arg is the r32 reg, second arg is the r/m32 reg */
114 #define MODRM_r32_rm32(r1,r2) OP(0xC0 | (r1<<3) | r2)
115 #define MODRM_rm32_r32(r1,r2) OP(0xC0 | (r2<<3) | r1)
117 /* ebp+disp8 modrm form */
118 #define MODRM_r32_ebp8(r1,disp) OP(0x45 | (r1<<3)); OP(disp)
120 /* ebp+disp32 modrm form */
121 #define MODRM_r32_ebp32(r1,disp) OP(0x85 | (r1<<3)); OP32(disp)
123 #define MODRM_r32_sh4r(r1,disp) if(disp>127){ MODRM_r32_ebp32(r1,disp);}else{ MODRM_r32_ebp8(r1,(unsigned char)disp); }
125 #define REXW() OP(0x48)
128 #define ADD_sh4r_r32(disp,r1) OP(0x03); MODRM_r32_sh4r(r1,disp)
129 #define ADD_r32_sh4r(r1,disp) OP(0x01); MODRM_r32_sh4r(r1,disp)
130 #define ADD_r32_r32(r1,r2) OP(0x03); MODRM_rm32_r32(r1,r2)
131 #define ADD_imm8s_r32(imm,r1) OP(0x83); MODRM_rm32_r32(r1, 0); OP(imm)
132 #define ADD_imm8s_sh4r(imm,disp) OP(0x83); MODRM_r32_sh4r(0,disp); OP(imm)
133 #define ADD_imm32_r32(imm32,r1) OP(0x81); MODRM_rm32_r32(r1,0); OP32(imm32)
134 #define ADC_r32_r32(r1,r2) OP(0x13); MODRM_rm32_r32(r1,r2)
135 #define ADC_sh4r_r32(disp,r1) OP(0x13); MODRM_r32_sh4r(r1,disp)
136 #define ADC_r32_sh4r(r1,disp) OP(0x11); MODRM_r32_sh4r(r1,disp)
137 #define AND_r32_r32(r1,r2) OP(0x23); MODRM_rm32_r32(r1,r2)
138 #define AND_imm8_r8(imm8, r1) OP(0x80); MODRM_rm32_r32(r1,4); OP(imm8)
139 #define AND_imm8s_r32(imm8,r1) OP(0x83); MODRM_rm32_r32(r1,4); OP(imm8)
140 #define AND_imm32_r32(imm,r1) OP(0x81); MODRM_rm32_r32(r1,4); OP32(imm)
141 #define CALL_r32(r1) OP(0xFF); MODRM_rm32_r32(r1,2)
142 #define CLC() OP(0xF8)
143 #define CMC() OP(0xF5)
144 #define CMP_sh4r_r32(disp,r1) OP(0x3B); MODRM_r32_sh4r(r1,disp)
145 #define CMP_r32_r32(r1,r2) OP(0x3B); MODRM_rm32_r32(r1,r2)
146 #define CMP_imm32_r32(imm32, r1) OP(0x81); MODRM_rm32_r32(r1,7); OP32(imm32)
147 #define CMP_imm8s_r32(imm,r1) OP(0x83); MODRM_rm32_r32(r1,7); OP(imm)
148 #define CMP_imm8s_sh4r(imm,disp) OP(0x83); MODRM_r32_sh4r(7,disp) OP(imm)
149 #define DEC_r32(r1) OP(0x48+r1)
150 #define IMUL_r32(r1) OP(0xF7); MODRM_rm32_r32(r1,5)
151 #define INC_r32(r1) OP(0x40+r1)
152 #define JMP_rel8(rel, label) OP(0xEB); OP(rel); MARK_JMP(rel,label)
153 #define MOV_r32_r32(r1,r2) OP(0x89); MODRM_r32_rm32(r1,r2)
154 #define MOV_r32_sh4r(r1,disp) OP(0x89); MODRM_r32_sh4r(r1,disp)
155 #define MOV_moff32_EAX(off) OP(0xA1); OPPTR(off)
156 #define MOV_sh4r_r32(disp, r1) OP(0x8B); MODRM_r32_sh4r(r1,disp)
157 #define MOV_r32ind_r32(r1,r2) OP(0x8B); OP(0 + (r2<<3) + r1 )
158 #define MOVSX_r8_r32(r1,r2) OP(0x0F); OP(0xBE); MODRM_rm32_r32(r1,r2)
159 #define MOVSX_r16_r32(r1,r2) OP(0x0F); OP(0xBF); MODRM_rm32_r32(r1,r2)
160 #define MOVZX_r8_r32(r1,r2) OP(0x0F); OP(0xB6); MODRM_rm32_r32(r1,r2)
161 #define MOVZX_r16_r32(r1,r2) OP(0x0F); OP(0xB7); MODRM_rm32_r32(r1,r2)
162 #define MUL_r32(r1) OP(0xF7); MODRM_rm32_r32(r1,4)
163 #define NEG_r32(r1) OP(0xF7); MODRM_rm32_r32(r1,3)
164 #define NOT_r32(r1) OP(0xF7); MODRM_rm32_r32(r1,2)
165 #define OR_r32_r32(r1,r2) OP(0x0B); MODRM_rm32_r32(r1,r2)
166 #define OR_imm8_r8(imm,r1) OP(0x80); MODRM_rm32_r32(r1,1); OP(imm)
167 #define OR_imm32_r32(imm,r1) OP(0x81); MODRM_rm32_r32(r1,1); OP32(imm)
168 #define OR_sh4r_r32(disp,r1) OP(0x0B); MODRM_r32_sh4r(r1,disp)
169 #define RCL1_r32(r1) OP(0xD1); MODRM_rm32_r32(r1,2)
170 #define RCR1_r32(r1) OP(0xD1); MODRM_rm32_r32(r1,3)
171 #define RET() OP(0xC3)
172 #define ROL1_r32(r1) OP(0xD1); MODRM_rm32_r32(r1,0)
173 #define ROR1_r32(r1) OP(0xD1); MODRM_rm32_r32(r1,1)
174 #define SAR1_r32(r1) OP(0xD1); MODRM_rm32_r32(r1,7)
175 #define SAR_imm8_r32(imm,r1) OP(0xC1); MODRM_rm32_r32(r1,7); OP(imm)
176 #define SAR_r32_CL(r1) OP(0xD3); MODRM_rm32_r32(r1,7)
177 #define SBB_r32_r32(r1,r2) OP(0x1B); MODRM_rm32_r32(r1,r2)
178 #define SHL1_r32(r1) OP(0xD1); MODRM_rm32_r32(r1,4)
179 #define SHL_r32_CL(r1) OP(0xD3); MODRM_rm32_r32(r1,4)
180 #define SHL_imm8_r32(imm,r1) OP(0xC1); MODRM_rm32_r32(r1,4); OP(imm)
181 #define SHR1_r32(r1) OP(0xD1); MODRM_rm32_r32(r1,5)
182 #define SHR_r32_CL(r1) OP(0xD3); MODRM_rm32_r32(r1,5)
183 #define SHR_imm8_r32(imm,r1) OP(0xC1); MODRM_rm32_r32(r1,5); OP(imm)
184 #define STC() OP(0xF9)
185 #define SUB_r32_r32(r1,r2) OP(0x2B); MODRM_rm32_r32(r1,r2)
186 #define SUB_sh4r_r32(disp,r1) OP(0x2B); MODRM_r32_sh4r(r1, disp)
187 #define SUB_imm8s_r32(imm,r1) ADD_imm8s_r32(-(imm),r1)
188 #define TEST_r8_r8(r1,r2) OP(0x84); MODRM_r32_rm32(r1,r2)
189 #define TEST_r32_r32(r1,r2) OP(0x85); MODRM_rm32_r32(r1,r2)
190 #define TEST_imm8_r8(imm8,r1) OP(0xF6); MODRM_rm32_r32(r1,0); OP(imm8)
191 #define TEST_imm32_r32(imm,r1) OP(0xF7); MODRM_rm32_r32(r1,0); OP32(imm)
192 #define XCHG_r8_r8(r1,r2) OP(0x86); MODRM_rm32_r32(r1,r2)
193 #define XOR_r8_r8(r1,r2) OP(0x32); MODRM_rm32_r32(r1,r2)
194 #define XOR_imm8s_r32(imm,r1) OP(0x83); MODRM_rm32_r32(r1,6); OP(imm)
195 #define XOR_r32_r32(r1,r2) OP(0x33); MODRM_rm32_r32(r1,r2)
196 #define XOR_sh4r_r32(disp,r1) OP(0x33); MODRM_r32_sh4r(r1,disp)
197 #define XOR_imm32_r32(imm,r1) OP(0x81); MODRM_rm32_r32(r1,6); OP32(imm)
200 /* Floating point ops */
201 #define FABS_st0() OP(0xD9); OP(0xE1)
202 #define FADDP_st(st) OP(0xDE); OP(0xC0+st)
203 #define FCHS_st0() OP(0xD9); OP(0xE0)
204 #define FCOMIP_st(st) OP(0xDF); OP(0xF0+st)
205 #define FDIVP_st(st) OP(0xDE); OP(0xF8+st)
206 #define FILD_sh4r(disp) OP(0xDB); MODRM_r32_sh4r(0, disp)
207 #define FILD_r32ind(r32) OP(0xDB); OP(0x00+r32)
208 #define FISTP_sh4r(disp) OP(0xDB); MODRM_r32_sh4r(3, disp)
209 #define FLD0_st0() OP(0xD9); OP(0xEE);
210 #define FLD1_st0() OP(0xD9); OP(0xE8);
211 #define FLDCW_r32ind(r32) OP(0xD9); OP(0x28+r32)
212 #define FMULP_st(st) OP(0xDE); OP(0xC8+st)
213 #define FNSTCW_r32ind(r32) OP(0xD9); OP(0x38+r32)
214 #define FPOP_st() OP(0xDD); OP(0xC0); OP(0xD9); OP(0xF7)
215 #define FSUBP_st(st) OP(0xDE); OP(0xE8+st)
216 #define FSQRT_st0() OP(0xD9); OP(0xFA)
218 /* Conditional branches */
219 #define JE_rel8(rel,label) OP(0x74); OP(rel); MARK_JMP(rel,label)
220 #define JA_rel8(rel,label) OP(0x77); OP(rel); MARK_JMP(rel,label)
221 #define JAE_rel8(rel,label) OP(0x73); OP(rel); MARK_JMP(rel,label)
222 #define JG_rel8(rel,label) OP(0x7F); OP(rel); MARK_JMP(rel,label)
223 #define JGE_rel8(rel,label) OP(0x7D); OP(rel); MARK_JMP(rel,label)
224 #define JC_rel8(rel,label) OP(0x72); OP(rel); MARK_JMP(rel,label)
225 #define JO_rel8(rel,label) OP(0x70); OP(rel); MARK_JMP(rel,label)
226 #define JNE_rel8(rel,label) OP(0x75); OP(rel); MARK_JMP(rel,label)
227 #define JNA_rel8(rel,label) OP(0x76); OP(rel); MARK_JMP(rel,label)
228 #define JNAE_rel8(rel,label) OP(0x72); OP(rel); MARK_JMP(rel,label)
229 #define JNG_rel8(rel,label) OP(0x7E); OP(rel); MARK_JMP(rel,label)
230 #define JNGE_rel8(rel,label) OP(0x7C); OP(rel); MARK_JMP(rel,label)
231 #define JNC_rel8(rel,label) OP(0x73); OP(rel); MARK_JMP(rel,label)
232 #define JNO_rel8(rel,label) OP(0x71); OP(rel); MARK_JMP(rel,label)
233 #define JNS_rel8(rel,label) OP(0x79); OP(rel); MARK_JMP(rel,label)
234 #define JS_rel8(rel,label) OP(0x78); OP(rel); MARK_JMP(rel,label)
237 /* 32-bit long forms w/ backpatching to an exit routine */
238 #define JMP_exit(rel) OP(0xE9); sh4_x86_add_backpatch(xlat_output); OP32(rel)
239 #define JE_exit(rel) OP(0x0F); OP(0x84); sh4_x86_add_backpatch(xlat_output); OP32(rel)
240 #define JA_exit(rel) OP(0x0F); OP(0x87); sh4_x86_add_backpatch(xlat_output); OP32(rel)
241 #define JAE_exit(rel) OP(0x0F); OP(0x83); sh4_x86_add_backpatch(xlat_output); OP32(rel)
242 #define JG_exit(rel) OP(0x0F); OP(0x8F); sh4_x86_add_backpatch(xlat_output); OP32(rel)
243 #define JGE_exit(rel) OP(0x0F); OP(0x8D); sh4_x86_add_backpatch(xlat_output); OP32(rel)
244 #define JC_exit(rel) OP(0x0F); OP(0x82); sh4_x86_add_backpatch(xlat_output); OP32(rel)
245 #define JO_exit(rel) OP(0x0F); OP(0x80); sh4_x86_add_backpatch(xlat_output); OP32(rel)
246 #define JNE_exit(rel) OP(0x0F); OP(0x85); sh4_x86_add_backpatch(xlat_output); OP32(rel)
247 #define JNA_exit(rel) OP(0x0F); OP(0x86); sh4_x86_add_backpatch(xlat_output); OP32(rel)
248 #define JNAE_exit(rel) OP(0x0F);OP(0x82); sh4_x86_add_backpatch(xlat_output); OP32(rel)
249 #define JNG_exit(rel) OP(0x0F); OP(0x8E); sh4_x86_add_backpatch(xlat_output); OP32(rel)
250 #define JNGE_exit(rel) OP(0x0F);OP(0x8C); sh4_x86_add_backpatch(xlat_output); OP32(rel)
251 #define JNC_exit(rel) OP(0x0F); OP(0x83); sh4_x86_add_backpatch(xlat_output); OP32(rel)
252 #define JNO_exit(rel) OP(0x0F); OP(0x81); sh4_x86_add_backpatch(xlat_output); OP32(rel)
255 /* Conditional moves ebp-rel */
256 #define CMOVE_r32_r32(r1,r2) OP(0x0F); OP(0x44); MODRM_rm32_r32(r1,r2)
257 #define CMOVA_r32_r32(r1,r2) OP(0x0F); OP(0x47); MODRM_rm32_r32(r1,r2)
258 #define CMOVAE_r32_r32(r1,r2) OP(0x0F); OP(0x43); MODRM_rm32_r32(r1,r2)
259 #define CMOVG_r32_r32(r1,r2) OP(0x0F); OP(0x4F); MODRM_rm32_r32(r1,r2)
260 #define CMOVGE_r32_r32(r1,r2) OP(0x0F); OP(0x4D); MODRM_rm32_r32(r1,r2)
261 #define CMOVC_r32_r32(r1,r2) OP(0x0F); OP(0x42); MODRM_rm32_r32(r1,r2)
262 #define CMOVO_r32_r32(r1,r2) OP(0x0F); OP(0x40); MODRM_rm32_r32(r1,r2)
265 /* Conditional setcc - writeback to sh4r.t */
266 #define SETE_sh4r(disp) OP(0x0F); OP(0x94); MODRM_r32_sh4r(0, disp);
267 #define SETA_sh4r(disp) OP(0x0F); OP(0x97); MODRM_r32_sh4r(0, disp);
268 #define SETAE_sh4r(disp) OP(0x0F); OP(0x93); MODRM_r32_sh4r(0, disp);
269 #define SETG_sh4r(disp) OP(0x0F); OP(0x9F); MODRM_r32_sh4r(0, disp);
270 #define SETGE_sh4r(disp) OP(0x0F); OP(0x9D); MODRM_r32_sh4r(0, disp);
271 #define SETC_sh4r(disp) OP(0x0F); OP(0x92); MODRM_r32_sh4r(0, disp);
272 #define SETO_sh4r(disp) OP(0x0F); OP(0x90); MODRM_r32_sh4r(0, disp);
274 #define SETNE_sh4r(disp) OP(0x0F); OP(0x95); MODRM_r32_sh4r(0, disp);
275 #define SETNA_sh4r(disp) OP(0x0F); OP(0x96); MODRM_r32_sh4r(0, disp);
276 #define SETNAE_sh4r(disp) OP(0x0F); OP(0x92); MODRM_r32_sh4r(0, disp);
277 #define SETNG_sh4r(disp) OP(0x0F); OP(0x9E); MODRM_r32_sh4r(0, disp);
278 #define SETNGE_sh4r(disp) OP(0x0F); OP(0x9C); MODRM_r32_sh4r(0, disp);
279 #define SETNC_sh4r(disp) OP(0x0F); OP(0x93); MODRM_r32_sh4r(0, disp);
280 #define SETNO_sh4r(disp) OP(0x0F); OP(0x91); MODRM_r32_sh4r(0, disp);
282 #define SETE_t() SETE_sh4r(R_T)
283 #define SETA_t() SETA_sh4r(R_T)
284 #define SETAE_t() SETAE_sh4r(R_T)
285 #define SETG_t() SETG_sh4r(R_T)
286 #define SETGE_t() SETGE_sh4r(R_T)
287 #define SETC_t() SETC_sh4r(R_T)
288 #define SETO_t() SETO_sh4r(R_T)
289 #define SETNE_t() SETNE_sh4r(R_T)
291 #define SETC_r8(r1) OP(0x0F); OP(0x92); MODRM_rm32_r32(r1, 0)
293 /* Pseudo-op Load carry from T: CMP [EBP+t], #01 ; CMC */
294 #define LDC_t() OP(0x83); MODRM_r32_sh4r(7,R_T); OP(0x01); CMC()
296 #endif /* !__lxdream_x86op_H */
.