4 * Definitions of x86 opcodes for use by the translator.
6 * Copyright (c) 2007 Nathan Keynes.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
19 #ifndef lxdream_x86op_H
20 #define lxdream_x86op_H 1
45 #define MARK_JMP8(x) uint8_t *_mark_jmp_##x = xlat_output
46 #define MARK_JMP32(x) uint32_t *_mark_jmp_##x = (uint32_t *)xlat_output
47 #define JMP_TARGET(x) *_mark_jmp_##x += (xlat_output - _mark_jmp_##x)
49 #define OP(x) *xlat_output++ = (x)
50 #define OP32(x) *((uint32_t *)xlat_output) = (x); xlat_output+=4
51 #define OP64(x) *((uint64_t *)xlat_output) = (x); xlat_output+=8
52 #if SIZEOF_VOID_P == 8
53 #define OPPTR(x) OP64((uint64_t)(x))
54 #define AND_imm8s_rptr(imm, r1) REXW(); AND_imm8s_r32( imm, r1 )
55 #define LEA_sh4r_rptr(disp, r1) REXW(); LEA_sh4r_r32(disp,r1)
56 #define MOV_moffptr_EAX(offptr) REXW(); MOV_moff32_EAX( offptr )
57 #define STACK_ALIGN 16
58 #define POP_r32(r1) OP(0x58 + r1);
59 #define POP_realigned_r32(r1) OP(0x58 + r1); REXW(); ADD_imm8s_r32(8,R_ESP)
60 #define PUSH_r32(r1) OP(0x50 + r1);
61 #define PUSH_realigned_r32(r1) REXW(); SUB_imm8s_r32(8, R_ESP); OP(0x50 + r1)
62 #define PUSH_imm32(imm) OP(0x68); OP32(imm);
63 #define PUSH_imm64(imm) REXW(); OP(0x68); OP64(imm);
64 #else /* 32-bit system */
65 #define OPPTR(x) OP32((uint32_t)(x))
66 #define AND_imm8s_rptr(imm, r1) AND_imm8s_r32( imm, r1 )
67 #define LEA_sh4r_rptr(disp, r1) LEA_sh4r_r32(disp,r1)
68 #define MOV_moffptr_EAX(offptr) MOV_moff32_EAX( offptr )
69 #define POP_realigned_r32(r1) POP_r32(r1)
70 #define PUSH_realigned_r32(r1) PUSH_r32(r1)
72 #define STACK_ALIGN 16
73 #define POP_r32(r1) OP(0x58 + r1); sh4_x86.stack_posn -= 4;
74 #define PUSH_r32(r1) OP(0x50 + r1); sh4_x86.stack_posn += 4;
75 #define PUSH_imm32(imm) OP(0x68); OP32(imm); sh4_x86.stack_posn += 4;
77 #define POP_r32(r1) OP(0x58 + r1)
78 #define PUSH_r32(r1) OP(0x50 + r1)
79 #define PUSH_imm32(imm) OP(0x68); OP32(imm)
85 #define POP_r32(r1) OP(0x58 + r1)
86 #define PUSH_r32(r1) OP(0x50 + r1)
90 /* Offset of a reg relative to the sh4r structure */
91 #define REG_OFFSET(reg) (((char *)&sh4r.reg) - ((char *)&sh4r) - 128)
93 #define R_T REG_OFFSET(t)
94 #define R_Q REG_OFFSET(q)
95 #define R_S REG_OFFSET(s)
96 #define R_M REG_OFFSET(m)
97 #define R_SR REG_OFFSET(sr)
98 #define R_GBR REG_OFFSET(gbr)
99 #define R_SSR REG_OFFSET(ssr)
100 #define R_SPC REG_OFFSET(spc)
101 #define R_VBR REG_OFFSET(vbr)
102 #define R_MACH REG_OFFSET(mac)+4
103 #define R_MACL REG_OFFSET(mac)
104 #define R_PC REG_OFFSET(pc)
105 #define R_NEW_PC REG_OFFSET(new_pc)
106 #define R_PR REG_OFFSET(pr)
107 #define R_SGR REG_OFFSET(sgr)
108 #define R_FPUL REG_OFFSET(fpul)
109 #define R_FPSCR REG_OFFSET(fpscr)
110 #define R_DBR REG_OFFSET(dbr)
112 /**************** Basic X86 operations *********************/
113 /* Note: operands follow SH4 convention (source, dest) rather than x86
114 * conventions (dest, source)
117 /* Two-reg modrm form - first arg is the r32 reg, second arg is the r/m32 reg */
118 #define MODRM_r32_rm32(r1,r2) OP(0xC0 | (r1<<3) | r2)
119 #define MODRM_rm32_r32(r1,r2) OP(0xC0 | (r2<<3) | r1)
121 /* ebp+disp8 modrm form */
122 #define MODRM_r32_ebp8(r1,disp) OP(0x45 | (r1<<3)); OP(disp)
124 /* ebp+disp32 modrm form */
125 #define MODRM_r32_ebp32(r1,disp) OP(0x85 | (r1<<3)); OP32(disp)
127 #define MODRM_r32_sh4r(r1,disp) if(disp>127){ MODRM_r32_ebp32(r1,disp);}else{ MODRM_r32_ebp8(r1,(unsigned char)disp); }
129 #define REXW() OP(0x48)
132 #define ADD_sh4r_r32(disp,r1) OP(0x03); MODRM_r32_sh4r(r1,disp)
133 #define ADD_r32_sh4r(r1,disp) OP(0x01); MODRM_r32_sh4r(r1,disp)
134 #define ADD_r32_r32(r1,r2) OP(0x03); MODRM_rm32_r32(r1,r2)
135 #define ADD_imm8s_r32(imm,r1) OP(0x83); MODRM_rm32_r32(r1, 0); OP(imm)
136 #define ADD_imm8s_sh4r(imm,disp) OP(0x83); MODRM_r32_sh4r(0,disp); OP(imm)
137 #define ADD_imm32_r32(imm32,r1) OP(0x81); MODRM_rm32_r32(r1,0); OP32(imm32)
138 #define ADC_r32_r32(r1,r2) OP(0x13); MODRM_rm32_r32(r1,r2)
139 #define ADC_sh4r_r32(disp,r1) OP(0x13); MODRM_r32_sh4r(r1,disp)
140 #define ADC_r32_sh4r(r1,disp) OP(0x11); MODRM_r32_sh4r(r1,disp)
141 #define AND_r32_r32(r1,r2) OP(0x23); MODRM_rm32_r32(r1,r2)
142 #define AND_imm8_r8(imm8, r1) OP(0x80); MODRM_rm32_r32(r1,4); OP(imm8)
143 #define AND_imm8s_r32(imm8,r1) OP(0x83); MODRM_rm32_r32(r1,4); OP(imm8)
144 #define AND_imm32_r32(imm,r1) OP(0x81); MODRM_rm32_r32(r1,4); OP32(imm)
145 #define CALL_r32(r1) OP(0xFF); MODRM_rm32_r32(r1,2)
146 #define CLC() OP(0xF8)
147 #define CMC() OP(0xF5)
148 #define CMP_sh4r_r32(disp,r1) OP(0x3B); MODRM_r32_sh4r(r1,disp)
149 #define CMP_r32_r32(r1,r2) OP(0x3B); MODRM_rm32_r32(r1,r2)
150 #define CMP_imm32_r32(imm32, r1) OP(0x81); MODRM_rm32_r32(r1,7); OP32(imm32)
151 #define CMP_imm8s_r32(imm,r1) OP(0x83); MODRM_rm32_r32(r1,7); OP(imm)
152 #define CMP_imm8s_sh4r(imm,disp) OP(0x83); MODRM_r32_sh4r(7,disp) OP(imm)
153 #define DEC_r32(r1) OP(0x48+r1)
154 #define IMUL_r32(r1) OP(0xF7); MODRM_rm32_r32(r1,5)
155 #define INC_r32(r1) OP(0x40+r1)
156 #define JMP_rel8(label) OP(0xEB); MARK_JMP8(label); OP(-1);
157 #define LEA_sh4r_r32(disp,r1) OP(0x8D); MODRM_r32_sh4r(r1,disp)
158 #define MOV_r32_r32(r1,r2) OP(0x89); MODRM_r32_rm32(r1,r2)
159 #define MOV_r32_sh4r(r1,disp) OP(0x89); MODRM_r32_sh4r(r1,disp)
160 #define MOV_moff32_EAX(off) OP(0xA1); OPPTR(off)
161 #define MOV_sh4r_r32(disp, r1) OP(0x8B); MODRM_r32_sh4r(r1,disp)
162 #define MOV_r32_r32ind(r2,r1) OP(0x89); OP(0 + (r2<<3) + r1 )
163 #define MOV_r32ind_r32(r1,r2) OP(0x8B); OP(0 + (r2<<3) + r1 )
164 #define MOVSX_r8_r32(r1,r2) OP(0x0F); OP(0xBE); MODRM_rm32_r32(r1,r2)
165 #define MOVSX_r16_r32(r1,r2) OP(0x0F); OP(0xBF); MODRM_rm32_r32(r1,r2)
166 #define MOVZX_r8_r32(r1,r2) OP(0x0F); OP(0xB6); MODRM_rm32_r32(r1,r2)
167 #define MOVZX_r16_r32(r1,r2) OP(0x0F); OP(0xB7); MODRM_rm32_r32(r1,r2)
168 #define MUL_r32(r1) OP(0xF7); MODRM_rm32_r32(r1,4)
169 #define NEG_r32(r1) OP(0xF7); MODRM_rm32_r32(r1,3)
170 #define NOT_r32(r1) OP(0xF7); MODRM_rm32_r32(r1,2)
171 #define OR_r32_r32(r1,r2) OP(0x0B); MODRM_rm32_r32(r1,r2)
172 #define OR_imm8_r8(imm,r1) OP(0x80); MODRM_rm32_r32(r1,1); OP(imm)
173 #define OR_imm32_r32(imm,r1) OP(0x81); MODRM_rm32_r32(r1,1); OP32(imm)
174 #define OR_sh4r_r32(disp,r1) OP(0x0B); MODRM_r32_sh4r(r1,disp)
175 #define RCL1_r32(r1) OP(0xD1); MODRM_rm32_r32(r1,2)
176 #define RCR1_r32(r1) OP(0xD1); MODRM_rm32_r32(r1,3)
177 #define RET() OP(0xC3)
178 #define ROL1_r32(r1) OP(0xD1); MODRM_rm32_r32(r1,0)
179 #define ROR1_r32(r1) OP(0xD1); MODRM_rm32_r32(r1,1)
180 #define SAR1_r32(r1) OP(0xD1); MODRM_rm32_r32(r1,7)
181 #define SAR_imm8_r32(imm,r1) OP(0xC1); MODRM_rm32_r32(r1,7); OP(imm)
182 #define SAR_r32_CL(r1) OP(0xD3); MODRM_rm32_r32(r1,7)
183 #define SBB_r32_r32(r1,r2) OP(0x1B); MODRM_rm32_r32(r1,r2)
184 #define SHL1_r32(r1) OP(0xD1); MODRM_rm32_r32(r1,4)
185 #define SHL_r32_CL(r1) OP(0xD3); MODRM_rm32_r32(r1,4)
186 #define SHL_imm8_r32(imm,r1) OP(0xC1); MODRM_rm32_r32(r1,4); OP(imm)
187 #define SHR1_r32(r1) OP(0xD1); MODRM_rm32_r32(r1,5)
188 #define SHR_r32_CL(r1) OP(0xD3); MODRM_rm32_r32(r1,5)
189 #define SHR_imm8_r32(imm,r1) OP(0xC1); MODRM_rm32_r32(r1,5); OP(imm)
190 #define STC() OP(0xF9)
191 #define SUB_r32_r32(r1,r2) OP(0x2B); MODRM_rm32_r32(r1,r2)
192 #define SUB_sh4r_r32(disp,r1) OP(0x2B); MODRM_r32_sh4r(r1, disp)
193 #define SUB_imm8s_r32(imm,r1) ADD_imm8s_r32(-(imm),r1)
194 #define TEST_r8_r8(r1,r2) OP(0x84); MODRM_r32_rm32(r1,r2)
195 #define TEST_r32_r32(r1,r2) OP(0x85); MODRM_rm32_r32(r1,r2)
196 #define TEST_imm8_r8(imm8,r1) OP(0xF6); MODRM_rm32_r32(r1,0); OP(imm8)
197 #define TEST_imm32_r32(imm,r1) OP(0xF7); MODRM_rm32_r32(r1,0); OP32(imm)
198 #define XCHG_r8_r8(r1,r2) OP(0x86); MODRM_rm32_r32(r1,r2)
199 #define XOR_r8_r8(r1,r2) OP(0x32); MODRM_rm32_r32(r1,r2)
200 #define XOR_imm8s_r32(imm,r1) OP(0x83); MODRM_rm32_r32(r1,6); OP(imm)
201 #define XOR_r32_r32(r1,r2) OP(0x33); MODRM_rm32_r32(r1,r2)
202 #define XOR_sh4r_r32(disp,r1) OP(0x33); MODRM_r32_sh4r(r1,disp)
203 #define XOR_imm32_r32(imm,r1) OP(0x81); MODRM_rm32_r32(r1,6); OP32(imm)
206 /* Floating point ops */
207 #define FABS_st0() OP(0xD9); OP(0xE1)
208 #define FADDP_st(st) OP(0xDE); OP(0xC0+st)
209 #define FCHS_st0() OP(0xD9); OP(0xE0)
210 #define FCOMIP_st(st) OP(0xDF); OP(0xF0+st)
211 #define FDIVP_st(st) OP(0xDE); OP(0xF8+st)
212 #define FILD_r32ind(r32) OP(0xDB); OP(0x00+r32)
213 #define FLD0_st0() OP(0xD9); OP(0xEE);
214 #define FLD1_st0() OP(0xD9); OP(0xE8);
215 #define FLDf_sh4r(disp) OP(0xD9); MODRM_r32_sh4r(0, disp)
216 #define FLDd_sh4r(disp) OP(0xDD); MODRM_r32_sh4r(0, disp)
217 #define FLDCW_r32ind(r32) OP(0xD9); OP(0x28+r32)
218 #define FMULP_st(st) OP(0xDE); OP(0xC8+st)
219 #define FNSTCW_r32ind(r32) OP(0xD9); OP(0x38+r32)
220 #define FPOP_st() OP(0xDD); OP(0xC0); OP(0xD9); OP(0xF7)
221 #define FSTPf_sh4r(disp) OP(0xD9); MODRM_r32_sh4r(3, disp)
222 #define FSTPd_sh4r(disp) OP(0xDD); MODRM_r32_sh4r(3, disp)
223 #define FSUBP_st(st) OP(0xDE); OP(0xE8+st)
224 #define FSQRT_st0() OP(0xD9); OP(0xFA)
226 #define FILD_sh4r(disp) OP(0xDB); MODRM_r32_sh4r(0, disp)
227 #define FLDF_sh4r(disp) OP(0xD9); MODRM_r32_sh4r(0, disp)
228 #define FLDD_sh4r(disp) OP(0xDD); MODRM_r32_sh4r(0, disp)
229 #define FISTP_sh4r(disp) OP(0xDB); MODRM_r32_sh4r(3, disp)
230 #define FSTPF_sh4r(disp) OP(0xD9); MODRM_r32_sh4r(3,disp)
231 #define FSTPD_sh4r(disp) OP(0xDD); MODRM_r32_sh4r(3,disp)
233 /* Conditional branches */
234 #define JE_rel8(label) OP(0x74); MARK_JMP8(label); OP(-1)
235 #define JA_rel8(label) OP(0x77); MARK_JMP8(label); OP(-1)
236 #define JAE_rel8(label) OP(0x73); MARK_JMP8(label); OP(-1)
237 #define JG_rel8(label) OP(0x7F); MARK_JMP8(label); OP(-1)
238 #define JGE_rel8(label) OP(0x7D); MARK_JMP8(label); OP(-1)
239 #define JC_rel8(label) OP(0x72); MARK_JMP8(label); OP(-1)
240 #define JO_rel8(label) OP(0x70); MARK_JMP8(label); OP(-1)
241 #define JNE_rel8(label) OP(0x75); MARK_JMP8(label); OP(-1)
242 #define JNA_rel8(label) OP(0x76); MARK_JMP8(label); OP(-1)
243 #define JNAE_rel8(label) OP(0x72); MARK_JMP8(label); OP(-1)
244 #define JNG_rel8(label) OP(0x7E); MARK_JMP8(label); OP(-1)
245 #define JNGE_rel8(label) OP(0x7C); MARK_JMP8(label); OP(-1)
246 #define JNC_rel8(label) OP(0x73); MARK_JMP8(label); OP(-1)
247 #define JNO_rel8(label) OP(0x71); MARK_JMP8(label); OP(-1)
248 #define JNS_rel8(label) OP(0x79); MARK_JMP8(label); OP(-1)
249 #define JS_rel8(label) OP(0x78); MARK_JMP8(label); OP(-1)
251 /** JMP relative 8 or 32 depending on size of rel. rel offset
252 * from the start of the instruction (not end)
254 #define JMP_rel(rel) if((rel)<-126||(rel)>129) { OP(0xE9); OP32((rel)-5); } else { OP(0xEB); OP((rel)-2); }
256 /* 32-bit long forms w/ backpatching to an exception routine */
257 #define JMP_exc(exc) OP(0xE9); sh4_x86_add_backpatch(xlat_output, pc, exc); OP32(0)
258 #define JE_exc(exc) OP(0x0F); OP(0x84); sh4_x86_add_backpatch(xlat_output, pc, exc); OP32(0)
259 #define JA_exc(exc) OP(0x0F); OP(0x87); sh4_x86_add_backpatch(xlat_output, pc, exc); OP32(0)
260 #define JAE_exc(exc) OP(0x0F); OP(0x83); sh4_x86_add_backpatch(xlat_output, pc, exc); OP32(0)
261 #define JG_exc(exc) OP(0x0F); OP(0x8F); sh4_x86_add_backpatch(xlat_output, pc, exc); OP32(0)
262 #define JGE_exc(exc) OP(0x0F); OP(0x8D); sh4_x86_add_backpatch(xlat_output, pc, exc); OP32(0)
263 #define JC_exc(exc) OP(0x0F); OP(0x82); sh4_x86_add_backpatch(xlat_output, pc, exc); OP32(0)
264 #define JO_exc(exc) OP(0x0F); OP(0x80); sh4_x86_add_backpatch(xlat_output, pc, exc); OP32(0)
265 #define JNE_exc(exc) OP(0x0F); OP(0x85); sh4_x86_add_backpatch(xlat_output, pc, exc); OP32(0)
266 #define JNA_exc(exc) OP(0x0F); OP(0x86); sh4_x86_add_backpatch(xlat_output, pc, exc); OP32(0)
267 #define JNAE_exc(exc) OP(0x0F);OP(0x82); sh4_x86_add_backpatch(xlat_output, pc, exc); OP32(0)
268 #define JNG_exc(exc) OP(0x0F); OP(0x8E); sh4_x86_add_backpatch(xlat_output, pc, exc); OP32(0)
269 #define JNGE_exc(exc) OP(0x0F);OP(0x8C); sh4_x86_add_backpatch(xlat_output, pc, exc); OP32(0)
270 #define JNC_exc(exc) OP(0x0F); OP(0x83); sh4_x86_add_backpatch(xlat_output, pc, exc); OP32(0)
271 #define JNO_exc(exc) OP(0x0F); OP(0x81); sh4_x86_add_backpatch(xlat_output, pc, exc); OP32(0)
274 /* Conditional moves ebp-rel */
275 #define CMOVE_r32_r32(r1,r2) OP(0x0F); OP(0x44); MODRM_rm32_r32(r1,r2)
276 #define CMOVA_r32_r32(r1,r2) OP(0x0F); OP(0x47); MODRM_rm32_r32(r1,r2)
277 #define CMOVAE_r32_r32(r1,r2) OP(0x0F); OP(0x43); MODRM_rm32_r32(r1,r2)
278 #define CMOVG_r32_r32(r1,r2) OP(0x0F); OP(0x4F); MODRM_rm32_r32(r1,r2)
279 #define CMOVGE_r32_r32(r1,r2) OP(0x0F); OP(0x4D); MODRM_rm32_r32(r1,r2)
280 #define CMOVC_r32_r32(r1,r2) OP(0x0F); OP(0x42); MODRM_rm32_r32(r1,r2)
281 #define CMOVO_r32_r32(r1,r2) OP(0x0F); OP(0x40); MODRM_rm32_r32(r1,r2)
284 /* Conditional setcc - writeback to sh4r.t */
285 #define SETE_sh4r(disp) OP(0x0F); OP(0x94); MODRM_r32_sh4r(0, disp);
286 #define SETA_sh4r(disp) OP(0x0F); OP(0x97); MODRM_r32_sh4r(0, disp);
287 #define SETAE_sh4r(disp) OP(0x0F); OP(0x93); MODRM_r32_sh4r(0, disp);
288 #define SETG_sh4r(disp) OP(0x0F); OP(0x9F); MODRM_r32_sh4r(0, disp);
289 #define SETGE_sh4r(disp) OP(0x0F); OP(0x9D); MODRM_r32_sh4r(0, disp);
290 #define SETC_sh4r(disp) OP(0x0F); OP(0x92); MODRM_r32_sh4r(0, disp);
291 #define SETO_sh4r(disp) OP(0x0F); OP(0x90); MODRM_r32_sh4r(0, disp);
293 #define SETNE_sh4r(disp) OP(0x0F); OP(0x95); MODRM_r32_sh4r(0, disp);
294 #define SETNA_sh4r(disp) OP(0x0F); OP(0x96); MODRM_r32_sh4r(0, disp);
295 #define SETNAE_sh4r(disp) OP(0x0F); OP(0x92); MODRM_r32_sh4r(0, disp);
296 #define SETNG_sh4r(disp) OP(0x0F); OP(0x9E); MODRM_r32_sh4r(0, disp);
297 #define SETNGE_sh4r(disp) OP(0x0F); OP(0x9C); MODRM_r32_sh4r(0, disp);
298 #define SETNC_sh4r(disp) OP(0x0F); OP(0x93); MODRM_r32_sh4r(0, disp);
299 #define SETNO_sh4r(disp) OP(0x0F); OP(0x91); MODRM_r32_sh4r(0, disp);
301 #define SETE_t() SETE_sh4r(R_T)
302 #define SETA_t() SETA_sh4r(R_T)
303 #define SETAE_t() SETAE_sh4r(R_T)
304 #define SETG_t() SETG_sh4r(R_T)
305 #define SETGE_t() SETGE_sh4r(R_T)
306 #define SETC_t() SETC_sh4r(R_T)
307 #define SETO_t() SETO_sh4r(R_T)
308 #define SETNE_t() SETNE_sh4r(R_T)
310 #define SETC_r8(r1) OP(0x0F); OP(0x92); MODRM_rm32_r32(r1, 0)
312 /* Pseudo-op Load carry from T: CMP [EBP+t], #01 ; CMC */
313 #define LDC_t() OP(0x83); MODRM_r32_sh4r(7,R_T); OP(0x01); CMC()
315 /* SSE instructions */
316 #define ADDPS_xmm_xmm(xmm1,xmm2) OP(0x0F); OP(0x58); MODRM_rm32_r32(xmm1,xmm2)
317 #define HADDPS_xmm_xmm(xmm1,xmm2) OP(0xF2); OP(0x0F); OP(0x7C); MODRM_rm32_r32(xmm1,xmm2)
318 #define MOVHLPS_xmm_xmm(xmm1,xmm2) OP(0x0F); OP(0x12); MODRM_rm32_r32(xmm1,xmm2)
319 #define MOVLHPS_xmm_xmm(xmm1,xmm2) OP(0x0F); OP(0x16); MODRM_rm32_r32(xmm1,xmm2)
320 #define MOVSHDUP_sh4r_xmm(disp,xmm) OP(0xF3); OP(0x0F); OP(0x16); MODRM_r32_sh4r(xmm,disp)
321 #define MOVSLDUP_sh4r_xmm(disp,xmm) OP(0xF3); OP(0x0F); OP(0x12); MODRM_r32_sh4r(xmm,disp)
322 #define MOVAPS_sh4r_xmm(disp, xmm) OP(0x0F); OP(0x28); MODRM_r32_sh4r(xmm,disp)
323 #define MOVAPS_xmm_sh4r(xmm,disp) OP(0x0F); OP(0x29); MODRM_r32_sh4r(xmm,disp)
324 #define MOVAPS_xmm_xmm(xmm1,xmm2) OP(0x0F); OP(0x28); MODRM_rm32_r32(xmm1,xmm2)
325 #define MOVSS_xmm_sh4r(xmm,disp) OP(0xF3); OP(0x0F); OP(0x11); MODRM_r32_sh4r(xmm,disp)
326 #define MULPS_sh4r_xmm(disp, xmm) OP(0x0F); OP(0x59); MODRM_r32_sh4r(xmm,disp)
327 #define MULPS_xmm_xmm(xmm1,xmm2) OP(0x0F); OP(0x59); MODRM_rm32_r32(xmm1,xmm2)
328 #define SHUFPS_sh4r_xmm(disp,xmm,imm8) OP(0x0F); OP(0xC6); MODRM_r32_sh4r(xmm, disp); OP(imm8)
329 #define SHUFPS_xmm_xmm(xmm1,xmm2,imm8) OP(0x0F); OP(0xC6); MODRM_rm32_r32(xmm1,xmm2); OP(imm8)
330 #define UNPCKHPS_xmm_xmm(xmm1,xmm2) OP(0x0F); OP(0x15); MODRM_rm32_r32(xmm1,xmm2)
331 #define UNPCKLPS_xmm_xmm(xmm1,xmm2) OP(0x0F); OP(0x14); MODRM_rm32_r32(xmm1,xmm2)
337 #endif /* !lxdream_x86op_H */
.