# HG changeset patch # User nkeynes # Date 1188895223 0 # Node ID 36fac4c42322a3f038ea90feba4ca52da25b8bcb # Parent 9c52dcbad3fbe9f54f0f71b3185d3fda53ac9757 More translator WIP - blocks are approaching something sane --- a/src/sh4/sh4trans.c Tue Sep 04 08:38:33 2007 +0000 +++ b/src/sh4/sh4trans.c Tue Sep 04 08:40:23 2007 +0000 @@ -1,5 +1,5 @@ /** - * $Id: sh4trans.c,v 1.1 2007-08-23 12:33:27 nkeynes Exp $ + * $Id: sh4trans.c,v 1.2 2007-09-04 08:40:23 nkeynes Exp $ * * SH4 translation core module. This part handles the non-target-specific * section of the translation. @@ -27,7 +27,7 @@ */ uint32_t sh4_xlat_run_slice( uint32_t nanosecs ) { - int i, result = 1; + int i; sh4r.slice_cycle = 0; if( sh4r.sh4_state != SH4_STATE_RUNNING ) { @@ -37,7 +37,7 @@ } } - for( ; sh4r.slice_cycle < nanosecs && result != 0; sh4r.slice_cycle ) { + while( sh4r.slice_cycle < nanosecs ) { if( SH4_EVENT_PENDING() ) { if( sh4r.event_types & PENDING_EVENT ) { event_execute(); @@ -48,12 +48,12 @@ } } - int (*code)() = xlat_get_code(sh4r.pc); + gboolean (*code)() = xlat_get_code(sh4r.pc); if( code == NULL ) { code = sh4_translate_basic_block( sh4r.pc ); } - result = code(); - sh4r.slice_cycle += result; + if( !code() ) + break; } /* If we aborted early, but the cpu is still technically running, @@ -97,8 +97,8 @@ } pc += 2; } - sh4_translate_end_block(done); - xlat_commit_block( xlat_output - block->size, pc-start ); + sh4_translate_end_block(pc); + xlat_commit_block( xlat_output - block->code, pc-start ); return block->code; } --- a/src/sh4/sh4x86.c Tue Sep 04 08:38:33 2007 +0000 +++ b/src/sh4/sh4x86.c Tue Sep 04 08:40:23 2007 +0000 @@ -1,5 +1,5 @@ /** - * $Id: sh4x86.c,v 1.2 2007-08-28 08:46:14 nkeynes Exp $ + * $Id: sh4x86.c,v 1.3 2007-09-04 08:40:23 nkeynes Exp $ * * SH4 => x86 translation. This version does no real optimization, it just * outputs straight-line x86 code - it mainly exists to provide a baseline @@ -18,9 +18,73 @@ * GNU General Public License for more details. */ -#include "sh4core.h" -#include "sh4trans.h" -#include "x86op.h" +#include + +#include "sh4/sh4core.h" +#include "sh4/sh4trans.h" +#include "sh4/x86op.h" +#include "clock.h" + +#define DEFAULT_BACKPATCH_SIZE 4096 + +/** + * Struct to manage internal translation state. This state is not saved - + * it is only valid between calls to sh4_translate_begin_block() and + * sh4_translate_end_block() + */ +struct sh4_x86_state { + gboolean in_delay_slot; + gboolean priv_checked; /* true if we've already checked the cpu mode. */ + gboolean fpuen_checked; /* true if we've already checked fpu enabled. */ + + /* Allocated memory for the (block-wide) back-patch list */ + uint32_t **backpatch_list; + uint32_t backpatch_posn; + uint32_t backpatch_size; +}; + +#define EXIT_DATA_ADDR_READ 0 +#define EXIT_DATA_ADDR_WRITE 7 +#define EXIT_ILLEGAL 14 +#define EXIT_SLOT_ILLEGAL 21 +#define EXIT_FPU_DISABLED 28 +#define EXIT_SLOT_FPU_DISABLED 35 + +static struct sh4_x86_state sh4_x86; + +void sh4_x86_init() +{ + sh4_x86.backpatch_list = malloc(DEFAULT_BACKPATCH_SIZE); + sh4_x86.backpatch_size = DEFAULT_BACKPATCH_SIZE / sizeof(uint32_t *); +} + + +static void sh4_x86_add_backpatch( uint8_t *ptr ) +{ + if( sh4_x86.backpatch_posn == sh4_x86.backpatch_size ) { + sh4_x86.backpatch_size <<= 1; + sh4_x86.backpatch_list = realloc( sh4_x86.backpatch_list, sh4_x86.backpatch_size * sizeof(uint32_t *) ); + assert( sh4_x86.backpatch_list != NULL ); + } + sh4_x86.backpatch_list[sh4_x86.backpatch_posn++] = (uint32_t *)ptr; +} + +static void sh4_x86_do_backpatch( uint8_t *reloc_base ) +{ + unsigned int i; + for( i=0; i> 12 ) { case 0x0: switch( ir&0xF ) { @@ -149,7 +348,8 @@ case 0x0: { /* STC SR, Rn */ uint32_t Rn = ((ir>>8)&0xF); - /* TODO */ + read_sr( R_EAX ); + store_reg( R_EAX, Rn ); } break; case 0x1: @@ -566,6 +766,18 @@ case 0xC: { /* CMP/STR Rm, Rn */ uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF); + load_reg( R_EAX, Rm ); + load_reg( R_ECX, Rn ); + XOR_r32_r32( R_ECX, R_EAX ); + TEST_r8_r8( R_AL, R_AL ); + JE_rel8(13); + TEST_r8_r8( R_AH, R_AH ); // 2 + JE_rel8(9); + SHR_imm8_r32( 16, R_EAX ); // 3 + TEST_r8_r8( R_AL, R_AL ); // 2 + JE_rel8(2); + TEST_r8_r8( R_AH, R_AH ); // 2 + SETE_t(); } break; case 0xD: @@ -880,6 +1092,11 @@ { /* STC.L SR, @-Rn */ uint32_t Rn = ((ir>>8)&0xF); /* TODO */ + load_reg( R_ECX, Rn ); + ADD_imm8s_r32( -4, Rn ); + store_reg( R_ECX, Rn ); + read_sr( R_EAX ); + MEM_WRITE_LONG( R_ECX, R_EAX ); } break; case 0x1: @@ -1085,6 +1302,12 @@ case 0x0: { /* LDC.L @Rm+, SR */ uint32_t Rm = ((ir>>8)&0xF); + load_reg( R_EAX, Rm ); + MOV_r32_r32( R_EAX, R_ECX ); + ADD_imm8s_r32( 4, R_EAX ); + store_reg( R_EAX, Rm ); + MEM_READ_LONG( R_ECX, R_EAX ); + write_sr( R_EAX ); } break; case 0x1: @@ -1312,6 +1535,16 @@ case 0xD: { /* SHLD Rm, Rn */ uint32_t Rn = ((ir>>8)&0xF); uint32_t Rm = ((ir>>4)&0xF); + load_reg( R_EAX, Rn ); + load_reg( R_ECX, Rm ); + + MOV_r32_r32( R_EAX, R_EDX ); + SHL_r32_CL( R_EAX ); + NEG_r32( R_ECX ); + SHR_r32_CL( R_EDX ); + CMP_imm8s_r32( 0, R_ECX ); + CMOVAE_r32_r32( R_EDX, R_EAX ); + store_reg( R_EAX, Rn ); } break; case 0xE: @@ -1321,7 +1554,8 @@ case 0x0: { /* LDC Rm, SR */ uint32_t Rm = ((ir>>8)&0xF); - /* We need to be a little careful about SR */ + load_reg( R_EAX, Rm ); + write_sr( R_EAX ); } break; case 0x1: @@ -1590,6 +1824,10 @@ case 0xB: { /* BF disp */ int32_t disp = SIGNEXT8(ir&0xFF)<<1; + CMP_imm8s_ebp( 0, R_T ); + JNE_rel8( 1 ); + exit_block( disp + pc + 4 ); + return 1; } break; case 0xD: @@ -1601,6 +1839,10 @@ case 0xF: { /* BF/S disp */ int32_t disp = SIGNEXT8(ir&0xFF)<<1; + CMP_imm8s_ebp( 0, R_T ); + JNE_rel8( 1 ); + exit_block( disp + pc + 4 ); + sh4_x86.in_delay_slot = TRUE; } break; default: @@ -1619,6 +1861,7 @@ case 0xA: { /* BRA disp */ int32_t disp = SIGNEXT12(ir&0xFFF)<<1; + exit_block( disp + pc + 4 ); } break; case 0xB: @@ -1697,6 +1940,9 @@ case 0x8: { /* TST #imm, R0 */ uint32_t imm = (ir&0xFF); + load_reg( R_EAX, 0 ); + TEST_imm32_r32( imm, R_EAX ); + SETE_t(); } break; case 0x9: @@ -1726,6 +1972,12 @@ case 0xC: { /* TST.B #imm, @(R0, GBR) */ uint32_t imm = (ir&0xFF); + load_reg( R_EAX, 0); + load_reg( R_ECX, R_GBR); + ADD_r32_r32( R_EAX, R_ECX ); + MEM_READ_BYTE( R_ECX, R_EAX ); + TEST_imm8_r8( imm, R_EAX ); + SETE_t(); } break; case 0xD: @@ -1960,6 +2212,7 @@ break; } + INC_r32(R_ESI); return 0; } --- a/src/sh4/sh4x86.in Tue Sep 04 08:38:33 2007 +0000 +++ b/src/sh4/sh4x86.in Tue Sep 04 08:40:23 2007 +0000 @@ -1,5 +1,5 @@ /** - * $Id: sh4x86.in,v 1.2 2007-08-28 08:46:14 nkeynes Exp $ + * $Id: sh4x86.in,v 1.3 2007-09-04 08:40:23 nkeynes Exp $ * * SH4 => x86 translation. This version does no real optimization, it just * outputs straight-line x86 code - it mainly exists to provide a baseline @@ -18,9 +18,73 @@ * GNU General Public License for more details. */ -#include "sh4core.h" -#include "sh4trans.h" -#include "x86op.h" +#include + +#include "sh4/sh4core.h" +#include "sh4/sh4trans.h" +#include "sh4/x86op.h" +#include "clock.h" + +#define DEFAULT_BACKPATCH_SIZE 4096 + +/** + * Struct to manage internal translation state. This state is not saved - + * it is only valid between calls to sh4_translate_begin_block() and + * sh4_translate_end_block() + */ +struct sh4_x86_state { + gboolean in_delay_slot; + gboolean priv_checked; /* true if we've already checked the cpu mode. */ + gboolean fpuen_checked; /* true if we've already checked fpu enabled. */ + + /* Allocated memory for the (block-wide) back-patch list */ + uint32_t **backpatch_list; + uint32_t backpatch_posn; + uint32_t backpatch_size; +}; + +#define EXIT_DATA_ADDR_READ 0 +#define EXIT_DATA_ADDR_WRITE 7 +#define EXIT_ILLEGAL 14 +#define EXIT_SLOT_ILLEGAL 21 +#define EXIT_FPU_DISABLED 28 +#define EXIT_SLOT_FPU_DISABLED 35 + +static struct sh4_x86_state sh4_x86; + +void sh4_x86_init() +{ + sh4_x86.backpatch_list = malloc(DEFAULT_BACKPATCH_SIZE); + sh4_x86.backpatch_size = DEFAULT_BACKPATCH_SIZE / sizeof(uint32_t *); +} + + +static void sh4_x86_add_backpatch( uint8_t *ptr ) +{ + if( sh4_x86.backpatch_posn == sh4_x86.backpatch_size ) { + sh4_x86.backpatch_size <<= 1; + sh4_x86.backpatch_list = realloc( sh4_x86.backpatch_list, sh4_x86.backpatch_size * sizeof(uint32_t *) ); + assert( sh4_x86.backpatch_list != NULL ); + } + sh4_x86.backpatch_list[sh4_x86.backpatch_posn++] = (uint32_t *)ptr; +} + +static void sh4_x86_do_backpatch( uint8_t *reloc_base ) +{ + unsigned int i; + for( i=0; i127){ MODRM_r32_ebp32(r1,disp);}else{ MODRM_r32_ebp8(r1,(unsigned char)disp); } + /* Major opcodes */ #define ADD_r32_r32(r1,r2) OP(0x03); MODRM_rm32_r32(r1,r2) #define ADD_imm8s_r32(imm,r1) OP(0x83); MODRM_rm32_r32(r1, 0); OP(imm) +#define ADD_imm32_r32(imm32,r1) OP(0x81); MODRM_rm32_r32(r1,0); OP32(imm32) #define ADC_r32_r32(r1,r2) OP(0x13); MODRM_rm32_r32(r1,r2) #define AND_r32_r32(r1,r2) OP(0x23); MODRM_rm32_r32(r1,r2) +#define AND_imm8_r8(imm8, r1) OP(0x80); MODRM_rm32_r32(r1,4); OP(imm8) #define AND_imm32_r32(imm,r1) OP(0x81); MODRM_rm32_r32(r1,4); OP32(imm) +#define CALL_r32(r1) OP(0xFF); MODRM_rm32_r32(r1,2) #define CMC() OP(0xF5) #define CMP_r32_r32(r1,r2) OP(0x3B); MODRM_rm32_r32(r1,r2) +#define CMP_imm32_r32(imm32, r1) OP(0x81); MODRM_rm32_r32(r1,7); OP32(imm32) #define CMP_imm8s_r32(imm,r1) OP(0x83); MODRM_rm32_r32(r1,7); OP(imm) +#define CMP_imm8s_ebp(imm,disp) OP(0x83); MODRM_r32_ebp(7,disp) OP(imm) +#define DEC_r32(r1) OP(0x48+r1) +#define IMUL_r32(r1) OP(0xF7); MODRM_rm32_r32(r1,5) +#define INC_r32(r1) OP(0x40+r1) #define JMP_rel8(rel) OP(0xEB); OP(rel) -#define MOV_r32_ebp8(r1,disp) OP(0x89); MODRM_r32_ebp8(r1,disp) +#define MOV_r32_r32(r1,r2) OP(0x89); MODRM_r32_rm32(r1,r2) +#define MOV_r32_ebp(r1,disp) OP(0x89); MODRM_r32_ebp(r1,disp) #define MOV_r32_ebp32(r1,disp) OP(0x89); MODRM_r32_ebp32(r1,disp) -#define MOV_ebp8_r32(r1,disp) OP(0x8B); MODRM_r32_ebp8(r1,disp) -#define MOV_ebp32_r32(r1,disp) OP(0x8B); MODRM_r32_ebp32(r1,disp) +#define MOV_moff32_EAX(off) OP(0xA1); OP32(off) +#define MOV_ebp_r32(disp, r1) OP(0x8B); MODRM_r32_ebp(r1,disp) #define MOVSX_r8_r32(r1,r2) OP(0x0F); OP(0xBE); MODRM_rm32_r32(r1,r2) #define MOVSX_r16_r32(r1,r2) OP(0x0F); OP(0xBF); MODRM_rm32_r32(r1,r2) #define MOVZX_r8_r32(r1,r2) OP(0x0F); OP(0xB6); MODRM_rm32_r32(r1,r2) #define MOVZX_r16_r32(r1,r2) OP(0x0F); OP(0xB7); MODRM_rm32_r32(r1,r2) +#define MUL_r32(r1) OP(0xF7); MODRM_rm32_r32(r1,4) #define NEG_r32(r1) OP(0xF7); MODRM_rm32_r32(r1,3) #define NOT_r32(r1) OP(0xF7); MODRM_rm32_r32(r1,2) #define OR_r32_r32(r1,r2) OP(0x0B); MODRM_rm32_r32(r1,r2) +#define OR_imm8_r8(imm,r1) OP(0x80); MODRM_rm32_r32(r1,1) #define OR_imm32_r32(imm,r1) OP(0x81); MODRM_rm32_r32(r1,1); OP32(imm) +#define OR_ebp_r32(disp,r1) OP(0x0B); MODRM_r32_ebp(r1,disp) +#define POP_r32(r1) OP(0x58 + r1) #define PUSH_r32(r1) OP(0x50 + r1) +#define PUSH_imm32(imm) OP(0x68); OP32(imm) #define RCL1_r32(r1) OP(0xD1); MODRM_rm32_r32(r1,2) #define RCR1_r32(r1) OP(0xD1); MODRM_rm32_r32(r1,3) #define RET() OP(0xC3) @@ -106,29 +123,23 @@ #define ROR1_r32(r1) OP(0xD1); MODRM_rm32_r32(r1,1) #define SAR1_r32(r1) OP(0xD1); MODRM_rm32_r32(r1,7) #define SAR_imm8_r32(imm,r1) OP(0xC1); MODRM_rm32_r32(r1,7); OP(imm) +#define SAR_r32_CL(r1) OP(0xD3); MODRM_rm32_r32(r1,7) #define SBB_r32_r32(r1,r2) OP(0x1B); MODRM_rm32_r32(r1,r2) #define SHL1_r32(r1) OP(0xD1); MODRM_rm32_r32(r1,4) +#define SHL_r32_CL(r1) OP(0xD3); MODRM_rm32_r32(r1,4) #define SHL_imm8_r32(imm,r1) OP(0xC1); MODRM_rm32_r32(r1,4); OP(imm) #define SHR1_r32(r1) OP(0xD1); MODRM_rm32_r32(r1,5) +#define SHR_r32_CL(r1) OP(0xD3); MODRM_rm32_r32(r1,5) #define SHR_imm8_r32(imm,r1) OP(0xC1); MODRM_rm32_r32(r1,5); OP(imm) #define SUB_r32_r32(r1,r2) OP(0x2B); MODRM_rm32_r32(r1,r2) +#define TEST_r8_r8(r1,r2) OP(0x84); MODRM_r32_rm32(r1,r2) #define TEST_r32_r32(r1,r2) OP(0x85); MODRM_rm32_r32(r1,r2) +#define TEST_imm8_r8(imm8,r1) OP(0xF6); MODRM_rm32_r32(r1,0); OP(imm8) #define TEST_imm32_r32(imm,r1) OP(0xF7); MODRM_rm32_r32(r1,0); OP32(imm) +#define XCHG_r8_r8(r1,r2) OP(0x86); MODRM_rm32_r32(r1,r2) #define XOR_r32_r32(r1,r2) OP(0x33); MODRM_rm32_r32(r1,r2) #define XOR_imm32_r32(imm,r1) OP(0x81); MODRM_rm32_r32(r1,6); OP32(imm) -#define ADD_imm32_r32(imm32,r1) OP(0x81); MODRM_rm32_r32(r1,0); OP32(imm32) -#define AND_imm8_r8(imm8, r1) OP(0x80); MODRM_rm32_r32(r1,4); OP(imm8) -#define CMP_imm32_r32(imm32, r1) OP(0x81); MODRM_rm32_r32(r1,7); OP32(imm32) -#define MOV_r32_r32(r1,r2) OP(0x89); MODRM_r32_rm32(r1,r2) -#define MUL_r32(r1) OP(0xF7); MODRM_rm32_r32(r1,4) -#define IMUL_r32(r1) OP(0xF7); MODRM_rm32_r32(r1,5) -#define OR_imm8_r8(imm,r1) OP(0x80); MODRM_rm32_r32(r1,1) -#define TEST_r8_r8(r1,r2) OP(0x84); MODRM_r32_rm32(r1,r2) -#define SAR_r32_CL(r1) OP(0xD3); MODRM_rm32_r32(r1,7) -#define SHR_r32_CL(r1) OP(0xD3); MODRM_rm32_r32(r1,5) -#define SHL_r32_CL(r1) OP(0xD3); MODRM_rm32_r32(r1,4) -#define XCHG_r8_r8(r1,r2) OP(0x86); MODRM_rm32_r32(r1,r2) /* Conditional branches */ #define JE_rel8(rel) OP(0x74); OP(rel) @@ -138,8 +149,6 @@ #define JGE_rel8(rel) OP(0x7D); OP(rel) #define JC_rel8(rel) OP(0x72); OP(rel) #define JO_rel8(rel) OP(0x70); OP(rel) - -/* Negated forms */ #define JNE_rel8(rel) OP(0x75); OP(rel) #define JNA_rel8(rel) OP(0x76); OP(rel) #define JNAE_rel8(rel) OP(0x72); OP(rel) @@ -148,24 +157,59 @@ #define JNC_rel8(rel) OP(0x73); OP(rel) #define JNO_rel8(rel) OP(0x71); OP(rel) +/* 32-bit long forms w/ backpatching to an exit routine */ +#define JE_exit(rel) OP(0x0F); OP(0x84); sh4_x86_add_backpatch(xlat_output); OP32(rel) +#define JA_exit(rel) OP(0x0F); OP(0x87); sh4_x86_add_backpatch(xlat_output); OP32(rel) +#define JAE_exit(rel) OP(0x0F); OP(0x83); sh4_x86_add_backpatch(xlat_output); OP32(rel) +#define JG_exit(rel) OP(0x0F); OP(0x8F); sh4_x86_add_backpatch(xlat_output); OP32(rel) +#define JGE_exit(rel) OP(0x0F); OP(0x8D); sh4_x86_add_backpatch(xlat_output); OP32(rel) +#define JC_exit(rel) OP(0x0F); OP(0x82); sh4_x86_add_backpatch(xlat_output); OP32(rel) +#define JO_exit(rel) OP(0x0F); OP(0x80); sh4_x86_add_backpatch(xlat_output); OP32(rel) +#define JNE_exit(rel) OP(0x0F); OP(0x85); sh4_x86_add_backpatch(xlat_output); OP32(rel) +#define JNA_exit(rel) OP(0x0F); OP(0x86); sh4_x86_add_backpatch(xlat_output); OP32(rel) +#define JNAE_exit(rel) OP(0x0F);OP(0x82); sh4_x86_add_backpatch(xlat_output); OP32(rel) +#define JNG_exit(rel) OP(0x0F); OP(0x8E); sh4_x86_add_backpatch(xlat_output); OP32(rel) +#define JNGE_exit(rel) OP(0x0F);OP(0x8C); sh4_x86_add_backpatch(xlat_output); OP32(rel) +#define JNC_exit(rel) OP(0x0F); OP(0x83); sh4_x86_add_backpatch(xlat_output); OP32(rel) +#define JNO_exit(rel) OP(0x0F); OP(0x81); sh4_x86_add_backpatch(xlat_output); OP32(rel) + + +/* Conditional moves ebp-rel */ +#define CMOVE_r32_r32(r1,r2) OP(0x0F); OP(0x44); MODRM_rm32_r32(r1,r2) +#define CMOVA_r32_r32(r1,r2) OP(0x0F); OP(0x47); MODRM_rm32_r32(r1,r2) +#define CMOVAE_r32_r32(r1,r2) OP(0x0F); OP(0x43); MODRM_rm32_r32(r1,r2) +#define CMOVG_r32_r32(r1,r2) OP(0x0F); OP(0x4F); MODRM_rm32_r32(r1,r2) +#define CMOVGE_r32_r32(r1,r2) OP(0x0F); OP(0x4D); MODRM_rm32_r32(r1,r2) +#define CMOVC_r32_r32(r1,r2) OP(0x0F); OP(0x42); MODRM_rm32_r32(r1,r2) +#define CMOVO_r32_r32(r1,r2) OP(0x0F); OP(0x40); MODRM_rm32_r32(r1,r2) + + /* Conditional setcc - writeback to sh4r.t */ -#define SETE_t() OP(0x0F); OP(0x94); MODRM_r32_ebp8(0, R_T); -#define SETA_t() OP(0x0F); OP(0x97); MODRM_r32_ebp8(0, R_T); -#define SETAE_t() OP(0x0F); OP(0x93); MODRM_r32_ebp8(0, R_T); -#define SETG_t() OP(0x0F); OP(0x9F); MODRM_r32_ebp8(0, R_T); -#define SETGE_t() OP(0x0F); OP(0x9D); MODRM_r32_ebp8(0, R_T); -#define SETC_t() OP(0x0F); OP(0x92); MODRM_r32_ebp8(0, R_T); -#define SETO_t() OP(0x0F); OP(0x90); MODRM_r32_ebp8(0, R_T); +#define SETE_ebp(disp) OP(0x0F); OP(0x94); MODRM_r32_ebp(0, disp); +#define SETA_ebp(disp) OP(0x0F); OP(0x97); MODRM_r32_ebp(0, disp); +#define SETAE_ebp(disp) OP(0x0F); OP(0x93); MODRM_r32_ebp(0, disp); +#define SETG_ebp(disp) OP(0x0F); OP(0x9F); MODRM_r32_ebp(0, disp); +#define SETGE_ebp(disp) OP(0x0F); OP(0x9D); MODRM_r32_ebp(0, disp); +#define SETC_ebp(disp) OP(0x0F); OP(0x92); MODRM_r32_ebp(0, disp); +#define SETO_ebp(disp) OP(0x0F); OP(0x90); MODRM_r32_ebp(0, disp); -#define SETNE_t() OP(0x0F); OP(0x95); MODRM_r32_ebp8(0, R_T); -#define SETNA_t() OP(0x0F); OP(0x96); MODRM_r32_ebp8(0, R_T); -#define SETNAE_t() OP(0x0F); OP(0x92); MODRM_r32_ebp8(0, R_T); -#define SETNG_t() OP(0x0F); OP(0x9E); MODRM_r32_ebp8(0, R_T); -#define SETNGE_t() OP(0x0F); OP(0x9C); MODRM_r32_ebp8(0, R_T); -#define SETNC_t() OP(0x0F); OP(0x93); MODRM_r32_ebp8(0, R_T); -#define SETNO_t() OP(0x0F); OP(0x91); MODRM_r32_ebp8(0, R_T); +#define SETNE_ebp(disp) OP(0x0F); OP(0x95); MODRM_r32_ebp(0, disp); +#define SETNA_ebp(disp) OP(0x0F); OP(0x96); MODRM_r32_ebp(0, disp); +#define SETNAE_ebp(disp) OP(0x0F); OP(0x92); MODRM_r32_ebp(0, disp); +#define SETNG_ebp(disp) OP(0x0F); OP(0x9E); MODRM_r32_ebp(0, disp); +#define SETNGE_ebp(disp) OP(0x0F); OP(0x9C); MODRM_r32_ebp(0, disp); +#define SETNC_ebp(disp) OP(0x0F); OP(0x93); MODRM_r32_ebp(0, disp); +#define SETNO_ebp(disp) OP(0x0F); OP(0x91); MODRM_r32_ebp(0, disp); + +#define SETE_t() SETE_ebp(R_T) +#define SETA_t() SETA_ebp(R_T) +#define SETAE_t() SETAE_ebp(R_T) +#define SETG_t() SETG_ebp(R_T) +#define SETGE_t() SETGE_ebp(R_T) +#define SETC_t() SETC_ebp(R_T) +#define SETO_t() SETO_ebp(R_T) /* Pseudo-op Load carry from T: CMP [EBP+t], #01 ; CMC */ -#define LDC_t() OP(0x83); MODRM_r32_ebp8(7,R_T); OP(0x01); CMC() +#define LDC_t() OP(0x83); MODRM_r32_ebp(7,R_T); OP(0x01); CMC() #endif /* !__lxdream_x86op_H */