Search
lxdream.org :: lxdream/src/sh4/x86op.h
lxdream 0.9.1
released Jun 29
Download Now
filename src/sh4/x86op.h
changeset 936:f394309c399a
prev930:07e5b11419db
next939:6f2302afeb89
author nkeynes
date Sat Dec 27 02:59:35 2008 +0000 (13 years ago)
branchlxdream-mem
permissions -rw-r--r--
last change Replace fpscr_mask/fpscr flags in xlat_cache_block with a single xlat_sh4_mode,
which tracks the field of the same name in sh4r - actually a little faster this way.
Now depends on SR.MD, FPSCR.PR and FPSCR.SZ (although it doesn't benefit from the SR
flag yet).

Also fixed the failure to check the flags in the common case (code address returned
by previous block) which took away the performance benefits, but oh well.
view annotate diff log raw
     1 /**
     2  * $Id$
     3  * 
     4  * Definitions of x86 opcodes for use by the translator.
     5  *
     6  * Copyright (c) 2007 Nathan Keynes.
     7  *
     8  * This program is free software; you can redistribute it and/or modify
     9  * it under the terms of the GNU General Public License as published by
    10  * the Free Software Foundation; either version 2 of the License, or
    11  * (at your option) any later version.
    12  *
    13  * This program is distributed in the hope that it will be useful,
    14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
    15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    16  * GNU General Public License for more details.
    17  */
    19 #ifndef lxdream_x86op_H
    20 #define lxdream_x86op_H 1
    22 #ifdef __cplusplus
    23 extern "C" {
    24 #endif
    26 #define R_NONE -1
    27 #define R_EAX 0
    28 #define R_ECX 1
    29 #define R_EDX 2
    30 #define R_EBX 3
    31 #define R_ESP 4
    32 #define R_EBP 5
    33 #define R_ESI 6 
    34 #define R_EDI 7 
    36 #define R_AL 0
    37 #define R_CL 1
    38 #define R_DL 2
    39 #define R_BL 3
    40 #define R_AH 4
    41 #define R_CH 5
    42 #define R_DH 6
    43 #define R_BH 7
    45 #define MARK_JMP8(x) uint8_t *_mark_jmp_##x = xlat_output
    46 #define MARK_JMP32(x) uint32_t *_mark_jmp_##x = (uint32_t *)xlat_output
    47 #define JMP_TARGET(x) *_mark_jmp_##x += (xlat_output - _mark_jmp_##x)
    49 #define OP(x) *xlat_output++ = (x)
    50 #define OP32(x) *((uint32_t *)xlat_output) = (x); xlat_output+=4
    51 #define OP64(x) *((uint64_t *)xlat_output) = (x); xlat_output+=8
    52 #if SIZEOF_VOID_P == 8
    53 #define OPPTR(x) OP64((uint64_t)(x))
    54 #define AND_imm8s_rptr(imm, r1)  REXW(); AND_imm8s_r32( imm, r1 )
    55 #define LEA_sh4r_rptr(disp, r1) REXW(); LEA_sh4r_r32(disp,r1)
    56 #define MOV_moffptr_EAX(offptr)  REXW(); MOV_moff32_EAX( offptr )
    57 #define load_exc_backpatch( x86reg )  REXW(); OP(0xB8 + x86reg); sh4_x86_add_backpatch( xlat_output, pc, -2 ); OP64( 0 )
    58 #else /* 32-bit system */
    59 #define OPPTR(x) OP32((uint32_t)(x))
    60 #define AND_imm8s_rptr(imm, r1) AND_imm8s_r32( imm, r1 )
    61 #define LEA_sh4r_rptr(disp, r1) LEA_sh4r_r32(disp,r1)
    62 #define MOV_moffptr_EAX(offptr) MOV_moff32_EAX( offptr )
    63 #define load_exc_backpatch( x86reg )  OP(0xB8 + x86reg); sh4_x86_add_backpatch( xlat_output, pc, -2 ); OP32( 0 )
    64 #endif
    65 #define STACK_ALIGN 16
    66 #define POP_r32(r1)           OP(0x58 + r1)
    67 #define PUSH_r32(r1)          OP(0x50 + r1)
    68 #define PUSH_imm32(imm)       OP(0x68); OP32(imm)
    69 #define PUSH_imm64(imm)       REXW(); OP(0x68); OP64(imm);
    71 #ifdef STACK_ALIGN
    72 #else
    73 #define POP_r32(r1)           OP(0x58 + r1)
    74 #define PUSH_r32(r1)          OP(0x50 + r1)
    75 #endif
    78 /* Offset of a reg relative to the sh4r structure */
    79 #define REG_OFFSET(reg)  (((char *)&sh4r.reg) - ((char *)&sh4r) - 128)
    81 #define R_T   REG_OFFSET(t)
    82 #define R_Q   REG_OFFSET(q)
    83 #define R_S   REG_OFFSET(s)
    84 #define R_M   REG_OFFSET(m)
    85 #define R_SR  REG_OFFSET(sr)
    86 #define R_GBR REG_OFFSET(gbr)
    87 #define R_SSR REG_OFFSET(ssr)
    88 #define R_SPC REG_OFFSET(spc)
    89 #define R_VBR REG_OFFSET(vbr)
    90 #define R_MACH REG_OFFSET(mac)+4
    91 #define R_MACL REG_OFFSET(mac)
    92 #define R_PC REG_OFFSET(pc)
    93 #define R_NEW_PC REG_OFFSET(new_pc)
    94 #define R_PR REG_OFFSET(pr)
    95 #define R_SGR REG_OFFSET(sgr)
    96 #define R_FPUL REG_OFFSET(fpul)
    97 #define R_FPSCR REG_OFFSET(fpscr)
    98 #define R_DBR REG_OFFSET(dbr)
   100 /**************** Basic X86 operations *********************/
   101 /* Note: operands follow SH4 convention (source, dest) rather than x86 
   102  * conventions (dest, source)
   103  */
   105 /* Two-reg modrm form - first arg is the r32 reg, second arg is the r/m32 reg */
   106 #define MODRM_r32_rm32(r1,r2) OP(0xC0 | (r1<<3) | r2)
   107 #define MODRM_rm32_r32(r1,r2) OP(0xC0 | (r2<<3) | r1)
   109 /* ebp+disp8 modrm form */
   110 #define MODRM_r32_ebp8(r1,disp) OP(0x45 | (r1<<3)); OP(disp)
   112 /* ebp+disp32 modrm form */
   113 #define MODRM_r32_ebp32(r1,disp) OP(0x85 | (r1<<3)); OP32(disp)
   115 /* esp+disp8 modrm+sib form */
   116 #define MODRM_r32_esp8(r1,disp) OP(0x44 | (r1<<3)); OP(0x24); OP(disp)
   118 #define MODRM_r32_sh4r(r1,disp) if(disp>127){ MODRM_r32_ebp32(r1,disp);}else{ MODRM_r32_ebp8(r1,(unsigned char)disp); }
   120 #define REXW() OP(0x48)
   122 /* Major opcodes */
   123 #define ADD_sh4r_r32(disp,r1) OP(0x03); MODRM_r32_sh4r(r1,disp)
   124 #define ADD_r32_sh4r(r1,disp) OP(0x01); MODRM_r32_sh4r(r1,disp)
   125 #define ADD_r32_r32(r1,r2) OP(0x03); MODRM_rm32_r32(r1,r2)
   126 #define ADD_imm8s_r32(imm,r1) OP(0x83); MODRM_rm32_r32(r1, 0); OP(imm)
   127 #define ADD_imm8s_sh4r(imm,disp) OP(0x83); MODRM_r32_sh4r(0,disp); OP(imm)
   128 #define ADD_imm8s_esp8(imm,disp) OP(0x83); MODRM_r32_esp8(0,disp); OP(imm)
   129 #define ADD_imm32_r32(imm32,r1) OP(0x81); MODRM_rm32_r32(r1,0); OP32(imm32)
   130 #define ADC_r32_r32(r1,r2)    OP(0x13); MODRM_rm32_r32(r1,r2)
   131 #define ADC_sh4r_r32(disp,r1) OP(0x13); MODRM_r32_sh4r(r1,disp)
   132 #define ADC_r32_sh4r(r1,disp) OP(0x11); MODRM_r32_sh4r(r1,disp)
   133 #define AND_r32_r32(r1,r2)    OP(0x23); MODRM_rm32_r32(r1,r2)
   134 #define AND_imm8_r8(imm8, r1) OP(0x80); MODRM_rm32_r32(r1,4); OP(imm8)
   135 #define AND_imm8s_r32(imm8,r1) OP(0x83); MODRM_rm32_r32(r1,4); OP(imm8)
   136 #define AND_imm32_r32(imm,r1) OP(0x81); MODRM_rm32_r32(r1,4); OP32(imm)
   137 #define AND_sh4r_r32(disp,r1)   OP(0x23); MODRM_r32_sh4r(r1, disp)
   138 #define CALL_r32(r1)          OP(0xFF); MODRM_rm32_r32(r1,2)
   139 #define CALL_ptr(ptr)         OP(0xE8); OP32( (((char *)ptr) - (char *)xlat_output) - 4)
   140 #define CALL_sh4r(disp)       OP(0xFF); MODRM_r32_sh4r(2, disp)
   141 #define CALL_r32disp8(r1,disp)  OP(0xFF); OP(0x50 + r1); OP(disp)
   142 #define CLC()                 OP(0xF8)
   143 #define CMC()                 OP(0xF5)
   144 #define CMP_sh4r_r32(disp,r1)  OP(0x3B); MODRM_r32_sh4r(r1,disp)
   145 #define CMP_r32_r32(r1,r2)    OP(0x3B); MODRM_rm32_r32(r1,r2)
   146 #define CMP_imm32_r32(imm32, r1) OP(0x81); MODRM_rm32_r32(r1,7); OP32(imm32)
   147 #define CMP_imm8s_r32(imm,r1) OP(0x83); MODRM_rm32_r32(r1,7); OP(imm)
   148 #define CMP_imm8s_sh4r(imm,disp) OP(0x83); MODRM_r32_sh4r(7,disp) OP(imm)
   149 #define DEC_r32(r1)           OP(0x48+r1)
   150 #define IMUL_r32(r1)          OP(0xF7); MODRM_rm32_r32(r1,5)
   151 #define INC_r32(r1)           OP(0x40+r1)
   152 #define JMP_rel8(label)  OP(0xEB); MARK_JMP8(label); OP(-1); 
   153 #define LEA_sh4r_r32(disp,r1) OP(0x8D); MODRM_r32_sh4r(r1,disp)
   154 #define LEA_r32disp8_r32(r1, disp, r2) OP(0x8D); OP( 0x40 + (r2<<3) + r1); OP(disp)
   155 #define MOV_r32_r32(r1,r2)    OP(0x89); MODRM_r32_rm32(r1,r2)
   156 #define MOV_r32_sh4r(r1,disp) OP(0x89); MODRM_r32_sh4r(r1,disp)
   157 #define MOV_moff32_EAX(off)   OP(0xA1); OPPTR(off)
   158 #define MOV_sh4r_r32(disp, r1)  OP(0x8B); MODRM_r32_sh4r(r1,disp)
   159 #define MOV_r32_r32ind(r2,r1) OP(0x89); OP(0 + (r2<<3) + r1 )
   160 #define MOV_r32ind_r32(r1,r2) OP(0x8B); OP(0 + (r2<<3) + r1 )
   161 #define MOV_r32_r32disp32(r2,r1,disp)  OP(0x89); OP(0x80 + (r2<<3) + r1); OP32(disp)
   162 #define MOV_r32_ebpr32disp32(r2,r1,disp)  OP(0x89); OP(0x84 + (r2<<3)); OP(0x05 + (r1<<3)); OP32(disp)
   163 #define MOV_r32disp32_r32(r1,disp,r2)  OP(0x8B); OP(0x80 + (r2<<3) + r1); OP32(disp)
   164 #define MOV_r32disp32x4_r32(r1,disp,r2) OP(0x8B); OP(0x04 + (r2<<3)); OP(0x85+(r1<<3)); OP32(disp)
   165 #define MOV_r32_esp8(r1,disp) OP(0x89); MODRM_r32_esp8(r1,disp)
   166 #define MOV_esp8_r32(disp,r1) OP(0x8B); MODRM_r32_esp8(r1,disp)
   167 #define MOVSX_r8_r32(r1,r2)   OP(0x0F); OP(0xBE); MODRM_rm32_r32(r1,r2)
   168 #define MOVSX_r16_r32(r1,r2)  OP(0x0F); OP(0xBF); MODRM_rm32_r32(r1,r2)
   169 #define MOVZX_r8_r32(r1,r2)   OP(0x0F); OP(0xB6); MODRM_rm32_r32(r1,r2)
   170 #define MOVZX_r16_r32(r1,r2)  OP(0x0F); OP(0xB7); MODRM_rm32_r32(r1,r2)
   171 #define MOVZX_sh4r8_r32(disp,r1) OP(0x0F); OP(0xB6); MODRM_r32_sh4r(r1,disp)
   172 #define MOVZX_sh4r16_r32(disp,r1) OP(0x0F); OP(0xB7); MODRM_r32_sh4r(r1,disp)
   173 #define MUL_r32(r1)           OP(0xF7); MODRM_rm32_r32(r1,4)
   174 #define NEG_r32(r1)           OP(0xF7); MODRM_rm32_r32(r1,3)
   175 #define NOT_r32(r1)           OP(0xF7); MODRM_rm32_r32(r1,2)
   176 #define OR_r32_r32(r1,r2)     OP(0x0B); MODRM_rm32_r32(r1,r2)
   177 #define OR_imm8_r8(imm,r1)    OP(0x80); MODRM_rm32_r32(r1,1); OP(imm)
   178 #define OR_imm32_r32(imm,r1)  OP(0x81); MODRM_rm32_r32(r1,1); OP32(imm)
   179 #define OR_sh4r_r32(disp,r1)  OP(0x0B); MODRM_r32_sh4r(r1,disp)
   180 #define RCL1_r32(r1)          OP(0xD1); MODRM_rm32_r32(r1,2)
   181 #define RCR1_r32(r1)          OP(0xD1); MODRM_rm32_r32(r1,3)
   182 #define RET()                 OP(0xC3)
   183 #define ROL1_r32(r1)          OP(0xD1); MODRM_rm32_r32(r1,0)
   184 #define ROR1_r32(r1)          OP(0xD1); MODRM_rm32_r32(r1,1)
   185 #define SAR1_r32(r1)          OP(0xD1); MODRM_rm32_r32(r1,7)
   186 #define SAR_imm8_r32(imm,r1)  OP(0xC1); MODRM_rm32_r32(r1,7); OP(imm)
   187 #define SAR_r32_CL(r1)        OP(0xD3); MODRM_rm32_r32(r1,7)
   188 #define SBB_r32_r32(r1,r2)    OP(0x1B); MODRM_rm32_r32(r1,r2)
   189 #define SHL1_r32(r1)          OP(0xD1); MODRM_rm32_r32(r1,4)
   190 #define SHL_r32_CL(r1)        OP(0xD3); MODRM_rm32_r32(r1,4)
   191 #define SHL_imm8_r32(imm,r1)  OP(0xC1); MODRM_rm32_r32(r1,4); OP(imm)
   192 #define SHR1_r32(r1)          OP(0xD1); MODRM_rm32_r32(r1,5)
   193 #define SHR_r32_CL(r1)        OP(0xD3); MODRM_rm32_r32(r1,5)
   194 #define SHR_imm8_r32(imm,r1)  OP(0xC1); MODRM_rm32_r32(r1,5); OP(imm)
   195 #define STC()                 OP(0xF9)
   196 #define SUB_r32_r32(r1,r2)    OP(0x2B); MODRM_rm32_r32(r1,r2)
   197 #define SUB_sh4r_r32(disp,r1)  OP(0x2B); MODRM_r32_sh4r(r1, disp)
   198 #define SUB_imm8s_r32(imm,r1) ADD_imm8s_r32(-(imm),r1)
   199 #define TEST_r8_r8(r1,r2)     OP(0x84); MODRM_r32_rm32(r1,r2)
   200 #define TEST_r32_r32(r1,r2)   OP(0x85); MODRM_rm32_r32(r1,r2)
   201 #define TEST_imm8_r8(imm8,r1) OP(0xF6); MODRM_rm32_r32(r1,0); OP(imm8)
   202 #define TEST_imm32_r32(imm,r1) OP(0xF7); MODRM_rm32_r32(r1,0); OP32(imm)
   203 #define XCHG_r8_r8(r1,r2)     OP(0x86); MODRM_rm32_r32(r1,r2)
   204 #define XOR_r8_r8(r1,r2)      OP(0x32); MODRM_rm32_r32(r1,r2)
   205 #define XOR_imm8s_r32(imm,r1)   OP(0x83); MODRM_rm32_r32(r1,6); OP(imm)
   206 #define XOR_r32_r32(r1,r2)    OP(0x33); MODRM_rm32_r32(r1,r2)
   207 #define XOR_sh4r_r32(disp,r1)    OP(0x33); MODRM_r32_sh4r(r1,disp)
   208 #define XOR_imm32_r32(imm,r1) OP(0x81); MODRM_rm32_r32(r1,6); OP32(imm)
   209 #define XOR_imm32_sh4r(imm,disp) OP(0x81); MODRM_r32_sh4r(6, disp); OP32(imm)
   212 /* Floating point ops */
   213 #define FABS_st0() OP(0xD9); OP(0xE1)
   214 #define FADDP_st(st) OP(0xDE); OP(0xC0+st)
   215 #define FCHS_st0() OP(0xD9); OP(0xE0)
   216 #define FCOMIP_st(st) OP(0xDF); OP(0xF0+st)
   217 #define FDIVP_st(st) OP(0xDE); OP(0xF8+st)
   218 #define FILD_r32ind(r32) OP(0xDB); OP(0x00+r32)
   219 #define FLD0_st0() OP(0xD9); OP(0xEE);
   220 #define FLD1_st0() OP(0xD9); OP(0xE8);
   221 #define FLDf_sh4r(disp) OP(0xD9); MODRM_r32_sh4r(0, disp)
   222 #define FLDd_sh4r(disp) OP(0xDD); MODRM_r32_sh4r(0, disp)
   223 #define FLDCW_r32ind(r32) OP(0xD9); OP(0x28+r32)
   224 #define FMULP_st(st) OP(0xDE); OP(0xC8+st)
   225 #define FNSTCW_r32ind(r32) OP(0xD9); OP(0x38+r32)
   226 #define FPOP_st()  OP(0xDD); OP(0xC0); OP(0xD9); OP(0xF7)
   227 #define FSTPf_sh4r(disp) OP(0xD9); MODRM_r32_sh4r(3, disp)
   228 #define FSTPd_sh4r(disp) OP(0xDD); MODRM_r32_sh4r(3, disp)
   229 #define FSUBP_st(st) OP(0xDE); OP(0xE8+st)
   230 #define FSQRT_st0() OP(0xD9); OP(0xFA)
   232 #define FILD_sh4r(disp) OP(0xDB); MODRM_r32_sh4r(0, disp)
   233 #define FLDF_sh4r(disp) OP(0xD9); MODRM_r32_sh4r(0, disp)
   234 #define FLDD_sh4r(disp) OP(0xDD); MODRM_r32_sh4r(0, disp)
   235 #define FISTP_sh4r(disp) OP(0xDB); MODRM_r32_sh4r(3, disp)
   236 #define FSTPF_sh4r(disp) OP(0xD9); MODRM_r32_sh4r(3,disp)
   237 #define FSTPD_sh4r(disp) OP(0xDD); MODRM_r32_sh4r(3,disp)
   239 /* Conditional branches */
   240 #define JE_rel8(label)   OP(0x74); MARK_JMP8(label); OP(-1)
   241 #define JA_rel8(label)   OP(0x77); MARK_JMP8(label); OP(-1)
   242 #define JAE_rel8(label)  OP(0x73); MARK_JMP8(label); OP(-1)
   243 #define JG_rel8(label)   OP(0x7F); MARK_JMP8(label); OP(-1)
   244 #define JGE_rel8(label)  OP(0x7D); MARK_JMP8(label); OP(-1)
   245 #define JC_rel8(label)   OP(0x72); MARK_JMP8(label); OP(-1)
   246 #define JO_rel8(label)   OP(0x70); MARK_JMP8(label); OP(-1)
   247 #define JNE_rel8(label)  OP(0x75); MARK_JMP8(label); OP(-1)
   248 #define JNA_rel8(label)  OP(0x76); MARK_JMP8(label); OP(-1)
   249 #define JNAE_rel8(label) OP(0x72); MARK_JMP8(label); OP(-1)
   250 #define JNG_rel8(label)  OP(0x7E); MARK_JMP8(label); OP(-1)
   251 #define JNGE_rel8(label) OP(0x7C); MARK_JMP8(label); OP(-1)
   252 #define JNC_rel8(label)  OP(0x73); MARK_JMP8(label); OP(-1)
   253 #define JNO_rel8(label)  OP(0x71); MARK_JMP8(label); OP(-1)
   254 #define JNS_rel8(label)  OP(0x79); MARK_JMP8(label); OP(-1)
   255 #define JS_rel8(label)   OP(0x78); MARK_JMP8(label); OP(-1)
   257 /** JMP relative 8 or 32 depending on size of rel. rel offset
   258  * from the start of the instruction (not end)
   259  */
   260 #define JMP_rel(rel) if((rel)<-126||(rel)>129) { OP(0xE9); OP32((rel)-5); } else { OP(0xEB); OP((rel)-2); }
   262 /* 32-bit long forms w/ backpatching to an exception routine */
   263 #define JMP_exc(exc)  OP(0xE9); sh4_x86_add_backpatch(xlat_output, pc, exc); OP32(0)
   264 #define JE_exc(exc)  OP(0x0F); OP(0x84); sh4_x86_add_backpatch(xlat_output, pc, exc); OP32(0)
   265 #define JA_exc(exc)  OP(0x0F); OP(0x87); sh4_x86_add_backpatch(xlat_output, pc, exc); OP32(0)
   266 #define JAE_exc(exc) OP(0x0F); OP(0x83); sh4_x86_add_backpatch(xlat_output, pc, exc); OP32(0)
   267 #define JG_exc(exc)  OP(0x0F); OP(0x8F); sh4_x86_add_backpatch(xlat_output, pc, exc); OP32(0)
   268 #define JGE_exc(exc) OP(0x0F); OP(0x8D); sh4_x86_add_backpatch(xlat_output, pc, exc); OP32(0)
   269 #define JC_exc(exc)  OP(0x0F); OP(0x82); sh4_x86_add_backpatch(xlat_output, pc, exc); OP32(0)
   270 #define JO_exc(exc)  OP(0x0F); OP(0x80); sh4_x86_add_backpatch(xlat_output, pc, exc); OP32(0)
   271 #define JNE_exc(exc) OP(0x0F); OP(0x85); sh4_x86_add_backpatch(xlat_output, pc, exc); OP32(0)
   272 #define JNA_exc(exc) OP(0x0F); OP(0x86); sh4_x86_add_backpatch(xlat_output, pc, exc); OP32(0)
   273 #define JNAE_exc(exc) OP(0x0F);OP(0x82); sh4_x86_add_backpatch(xlat_output, pc, exc); OP32(0)
   274 #define JNG_exc(exc) OP(0x0F); OP(0x8E); sh4_x86_add_backpatch(xlat_output, pc, exc); OP32(0)
   275 #define JNGE_exc(exc) OP(0x0F);OP(0x8C); sh4_x86_add_backpatch(xlat_output, pc, exc); OP32(0)
   276 #define JNC_exc(exc) OP(0x0F); OP(0x83); sh4_x86_add_backpatch(xlat_output, pc, exc); OP32(0)
   277 #define JNO_exc(exc) OP(0x0F); OP(0x81); sh4_x86_add_backpatch(xlat_output, pc, exc); OP32(0)
   279 #define EXPJE_rel8(label) OP(0x3E); JE_rel8(label)
   281 /* Conditional moves ebp-rel */
   282 #define CMOVE_r32_r32(r1,r2)  OP(0x0F); OP(0x44); MODRM_rm32_r32(r1,r2)
   283 #define CMOVA_r32_r32(r1,r2)  OP(0x0F); OP(0x47); MODRM_rm32_r32(r1,r2)
   284 #define CMOVAE_r32_r32(r1,r2) OP(0x0F); OP(0x43); MODRM_rm32_r32(r1,r2)
   285 #define CMOVG_r32_r32(r1,r2)  OP(0x0F); OP(0x4F); MODRM_rm32_r32(r1,r2)
   286 #define CMOVGE_r32_r32(r1,r2)  OP(0x0F); OP(0x4D); MODRM_rm32_r32(r1,r2)
   287 #define CMOVC_r32_r32(r1,r2)  OP(0x0F); OP(0x42); MODRM_rm32_r32(r1,r2)
   288 #define CMOVO_r32_r32(r1,r2)  OP(0x0F); OP(0x40); MODRM_rm32_r32(r1,r2)
   291 /* Conditional setcc - writeback to sh4r.t */
   292 #define SETE_sh4r(disp)    OP(0x0F); OP(0x94); MODRM_r32_sh4r(0, disp);
   293 #define SETA_sh4r(disp)    OP(0x0F); OP(0x97); MODRM_r32_sh4r(0, disp);
   294 #define SETAE_sh4r(disp)   OP(0x0F); OP(0x93); MODRM_r32_sh4r(0, disp);
   295 #define SETG_sh4r(disp)    OP(0x0F); OP(0x9F); MODRM_r32_sh4r(0, disp);
   296 #define SETGE_sh4r(disp)   OP(0x0F); OP(0x9D); MODRM_r32_sh4r(0, disp);
   297 #define SETC_sh4r(disp)    OP(0x0F); OP(0x92); MODRM_r32_sh4r(0, disp);
   298 #define SETO_sh4r(disp)    OP(0x0F); OP(0x90); MODRM_r32_sh4r(0, disp);
   300 #define SETNE_sh4r(disp)   OP(0x0F); OP(0x95); MODRM_r32_sh4r(0, disp);
   301 #define SETNA_sh4r(disp)   OP(0x0F); OP(0x96); MODRM_r32_sh4r(0, disp);
   302 #define SETNAE_sh4r(disp)  OP(0x0F); OP(0x92); MODRM_r32_sh4r(0, disp);
   303 #define SETNG_sh4r(disp)   OP(0x0F); OP(0x9E); MODRM_r32_sh4r(0, disp);
   304 #define SETNGE_sh4r(disp)  OP(0x0F); OP(0x9C); MODRM_r32_sh4r(0, disp);
   305 #define SETNC_sh4r(disp)   OP(0x0F); OP(0x93); MODRM_r32_sh4r(0, disp);
   306 #define SETNO_sh4r(disp)   OP(0x0F); OP(0x91); MODRM_r32_sh4r(0, disp);
   308 #define SETE_t() SETE_sh4r(R_T)
   309 #define SETA_t() SETA_sh4r(R_T)
   310 #define SETAE_t() SETAE_sh4r(R_T)
   311 #define SETG_t() SETG_sh4r(R_T)
   312 #define SETGE_t() SETGE_sh4r(R_T)
   313 #define SETC_t() SETC_sh4r(R_T)
   314 #define SETO_t() SETO_sh4r(R_T)
   315 #define SETNE_t() SETNE_sh4r(R_T)
   317 #define SETC_r8(r1)      OP(0x0F); OP(0x92); MODRM_rm32_r32(r1, 0)
   319 /* Pseudo-op Load carry from T: CMP [EBP+t], #01 ; CMC */
   320 #define LDC_t()     OP(0x83); MODRM_r32_sh4r(7,R_T); OP(0x01); CMC()
   322 /* SSE instructions */
   323 #define ADDPS_xmm_xmm(xmm1,xmm2) OP(0x0F); OP(0x58); MODRM_rm32_r32(xmm1,xmm2) 
   324 #define HADDPS_xmm_xmm(xmm1,xmm2) OP(0xF2); OP(0x0F); OP(0x7C); MODRM_rm32_r32(xmm1,xmm2)
   325 #define MOVHLPS_xmm_xmm(xmm1,xmm2) OP(0x0F); OP(0x12); MODRM_rm32_r32(xmm1,xmm2)
   326 #define MOVLHPS_xmm_xmm(xmm1,xmm2) OP(0x0F); OP(0x16); MODRM_rm32_r32(xmm1,xmm2)
   327 #define MOVSHDUP_sh4r_xmm(disp,xmm) OP(0xF3); OP(0x0F); OP(0x16); MODRM_r32_sh4r(xmm,disp)
   328 #define MOVSLDUP_sh4r_xmm(disp,xmm) OP(0xF3); OP(0x0F); OP(0x12); MODRM_r32_sh4r(xmm,disp)
   329 #define MOVAPS_sh4r_xmm(disp, xmm) OP(0x0F); OP(0x28); MODRM_r32_sh4r(xmm,disp)
   330 #define MOVAPS_xmm_sh4r(xmm,disp) OP(0x0F); OP(0x29); MODRM_r32_sh4r(xmm,disp)
   331 #define MOVAPS_xmm_xmm(xmm1,xmm2) OP(0x0F); OP(0x28); MODRM_rm32_r32(xmm1,xmm2)
   332 #define MOVSS_xmm_sh4r(xmm,disp) OP(0xF3); OP(0x0F); OP(0x11); MODRM_r32_sh4r(xmm,disp)
   333 #define MULPS_sh4r_xmm(disp, xmm) OP(0x0F); OP(0x59); MODRM_r32_sh4r(xmm,disp)
   334 #define MULPS_xmm_xmm(xmm1,xmm2) OP(0x0F); OP(0x59); MODRM_rm32_r32(xmm1,xmm2)
   335 #define SHUFPS_sh4r_xmm(disp,xmm,imm8) OP(0x0F); OP(0xC6); MODRM_r32_sh4r(xmm, disp); OP(imm8) 
   336 #define SHUFPS_xmm_xmm(xmm1,xmm2,imm8) OP(0x0F); OP(0xC6); MODRM_rm32_r32(xmm1,xmm2); OP(imm8)
   337 #define UNPCKHPS_xmm_xmm(xmm1,xmm2) OP(0x0F); OP(0x15); MODRM_rm32_r32(xmm1,xmm2)
   338 #define UNPCKLPS_xmm_xmm(xmm1,xmm2) OP(0x0F); OP(0x14); MODRM_rm32_r32(xmm1,xmm2)
   340 #ifdef __cplusplus
   341 }
   342 #endif
   344 #endif /* !lxdream_x86op_H */
.