4 * SH4 => x86 translation. This version does no real optimization, it just
5 * outputs straight-line x86 code - it mainly exists to provide a baseline
6 * to test the optimizing versions against.
8 * Copyright (c) 2007 Nathan Keynes.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
29 #include "sh4/sh4core.h"
30 #include "sh4/sh4dasm.h"
31 #include "sh4/sh4trans.h"
32 #include "sh4/sh4stat.h"
33 #include "sh4/sh4mmio.h"
35 #include "xlat/xltcache.h"
36 #include "xlat/x86/x86op.h"
37 #include "x86dasm/x86dasm.h"
40 #define DEFAULT_BACKPATCH_SIZE 4096
42 /* Offset of a reg relative to the sh4r structure */
43 #define REG_OFFSET(reg) (((char *)&sh4r.reg) - ((char *)&sh4r) - 128)
45 #define R_T REG_OFFSET(t)
46 #define R_Q REG_OFFSET(q)
47 #define R_S REG_OFFSET(s)
48 #define R_M REG_OFFSET(m)
49 #define R_SR REG_OFFSET(sr)
50 #define R_GBR REG_OFFSET(gbr)
51 #define R_SSR REG_OFFSET(ssr)
52 #define R_SPC REG_OFFSET(spc)
53 #define R_VBR REG_OFFSET(vbr)
54 #define R_MACH REG_OFFSET(mac)+4
55 #define R_MACL REG_OFFSET(mac)
56 #define R_PC REG_OFFSET(pc)
57 #define R_NEW_PC REG_OFFSET(new_pc)
58 #define R_PR REG_OFFSET(pr)
59 #define R_SGR REG_OFFSET(sgr)
60 #define R_FPUL REG_OFFSET(fpul)
61 #define R_FPSCR REG_OFFSET(fpscr)
62 #define R_DBR REG_OFFSET(dbr)
63 #define R_R(rn) REG_OFFSET(r[rn])
64 #define R_FR(f) REG_OFFSET(fr[0][(f)^1])
65 #define R_XF(f) REG_OFFSET(fr[1][(f)^1])
66 #define R_DR(f) REG_OFFSET(fr[(f)&1][(f)&0x0E])
67 #define R_DRL(f) REG_OFFSET(fr[(f)&1][(f)|0x01])
68 #define R_DRH(f) REG_OFFSET(fr[(f)&1][(f)&0x0E])
74 #define SH4_MODE_UNKNOWN -1
76 struct backpatch_record {
77 uint32_t fixup_offset;
78 uint32_t fixup_icount;
83 * Struct to manage internal translation state. This state is not saved -
84 * it is only valid between calls to sh4_translate_begin_block() and
85 * sh4_translate_end_block()
87 struct sh4_x86_state {
90 gboolean fpuen_checked; /* true if we've already checked fpu enabled. */
91 gboolean branch_taken; /* true if we branched unconditionally */
92 gboolean double_prec; /* true if FPU is in double-precision mode */
93 gboolean double_size; /* true if FPU is in double-size mode */
94 gboolean sse3_enabled; /* true if host supports SSE3 instructions */
95 uint32_t block_start_pc;
96 uint32_t stack_posn; /* Trace stack height for alignment purposes */
97 uint32_t sh4_mode; /* Mirror of sh4r.xlat_sh4_mode */
101 gboolean tlb_on; /* True if tlb translation is active */
102 struct mem_region_fn **priv_address_space;
103 struct mem_region_fn **user_address_space;
105 /* Instrumentation */
106 xlat_block_begin_callback_t begin_callback;
107 xlat_block_end_callback_t end_callback;
110 /* Allocated memory for the (block-wide) back-patch list */
111 struct backpatch_record *backpatch_list;
112 uint32_t backpatch_posn;
113 uint32_t backpatch_size;
116 static struct sh4_x86_state sh4_x86;
118 static uint32_t max_int = 0x7FFFFFFF;
119 static uint32_t min_int = 0x80000000;
120 static uint32_t save_fcw; /* save value for fpu control word */
121 static uint32_t trunc_fcw = 0x0F7F; /* fcw value for truncation mode */
123 static void FASTCALL sh4_translate_get_code_and_backpatch( uint32_t pc );
124 static void sh4_x86_translate_unlink_block( void *use_list );
126 static struct x86_symbol x86_symbol_table[] = {
127 { "sh4r+128", ((char *)&sh4r)+128 },
128 { "sh4_cpu_period", &sh4_cpu_period },
129 { "sh4_address_space", NULL },
130 { "sh4_user_address_space", NULL },
131 { "sh4_translate_breakpoint_hit", sh4_translate_breakpoint_hit },
132 { "sh4_translate_get_code_and_backpatch", sh4_translate_get_code_and_backpatch },
133 { "sh4_write_fpscr", sh4_write_fpscr },
134 { "sh4_write_sr", sh4_write_sr },
135 { "sh4_read_sr", sh4_read_sr },
136 { "sh4_raise_exception", sh4_raise_exception },
137 { "sh4_sleep", sh4_sleep },
138 { "sh4_fsca", sh4_fsca },
139 { "sh4_ftrv", sh4_ftrv },
140 { "sh4_switch_fr_banks", sh4_switch_fr_banks },
141 { "sh4_execute_instruction", sh4_execute_instruction },
142 { "signsat48", signsat48 },
143 { "xlat_get_code_by_vma", xlat_get_code_by_vma },
144 { "xlat_get_code", xlat_get_code }
147 static struct xlat_target_fns x86_target_fns = {
148 sh4_x86_translate_unlink_block
152 gboolean is_sse3_supported()
156 __asm__ __volatile__(
157 "mov $0x01, %%eax\n\t"
158 "cpuid\n\t" : "=c" (features) : : "eax", "edx", "ebx");
159 return (features & 1) ? TRUE : FALSE;
162 void sh4_translate_set_address_space( struct mem_region_fn **priv, struct mem_region_fn **user )
164 sh4_x86.priv_address_space = priv;
165 sh4_x86.user_address_space = user;
166 x86_symbol_table[2].ptr = priv;
167 x86_symbol_table[3].ptr = user;
170 void sh4_translate_init(void)
172 sh4_x86.backpatch_list = malloc(DEFAULT_BACKPATCH_SIZE);
173 sh4_x86.backpatch_size = DEFAULT_BACKPATCH_SIZE / sizeof(struct backpatch_record);
174 sh4_x86.begin_callback = NULL;
175 sh4_x86.end_callback = NULL;
176 sh4_translate_set_address_space( sh4_address_space, sh4_user_address_space );
177 sh4_x86.fastmem = TRUE;
178 sh4_x86.sse3_enabled = is_sse3_supported();
180 x86_set_symtab( x86_symbol_table, sizeof(x86_symbol_table)/sizeof(struct x86_symbol) );
181 xlat_set_target_fns(&x86_target_fns);
184 void sh4_translate_set_callbacks( xlat_block_begin_callback_t begin, xlat_block_end_callback_t end )
186 sh4_x86.begin_callback = begin;
187 sh4_x86.end_callback = end;
190 void sh4_translate_set_fastmem( gboolean flag )
192 sh4_x86.fastmem = flag;
196 * Disassemble the given translated code block, and it's source SH4 code block
197 * side-by-side. The current native pc will be marked if non-null.
199 void sh4_translate_disasm_block( FILE *out, void *code, sh4addr_t source_start, void *native_pc )
204 uintptr_t target_start = (uintptr_t)code, target_pc;
205 uintptr_t target_end = target_start + xlat_get_code_size(code);
206 uint32_t source_pc = source_start;
207 uint32_t source_end = source_pc;
208 xlat_recovery_record_t source_recov_table = XLAT_RECOVERY_TABLE(code);
209 xlat_recovery_record_t source_recov_end = source_recov_table + XLAT_BLOCK_FOR_CODE(code)->recover_table_size - 1;
211 for( target_pc = target_start; target_pc < target_end; ) {
212 uintptr_t pc2 = x86_disasm_instruction( target_pc, buf, sizeof(buf), op );
213 #if SIZEOF_VOID_P == 8
214 fprintf( out, "%c%016lx: %-30s %-40s", (target_pc == (uintptr_t)native_pc ? '*' : ' '),
215 target_pc, op, buf );
217 fprintf( out, "%c%08lx: %-30s %-40s", (target_pc == (uintptr_t)native_pc ? '*' : ' '),
218 target_pc, op, buf );
220 if( source_recov_table < source_recov_end &&
221 target_pc >= (target_start + source_recov_table->xlat_offset) ) {
222 source_recov_table++;
223 if( source_end < (source_start + (source_recov_table->sh4_icount)*2) )
224 source_end = source_start + (source_recov_table->sh4_icount)*2;
227 if( source_pc < source_end ) {
228 uint32_t source_pc2 = sh4_disasm_instruction( source_pc, buf, sizeof(buf), op );
229 fprintf( out, " %08X: %s %s\n", source_pc, op, buf );
230 source_pc = source_pc2;
232 fprintf( out, "\n" );
238 while( source_pc < source_end ) {
239 uint32_t source_pc2 = sh4_disasm_instruction( source_pc, buf, sizeof(buf), op );
240 fprintf( out, "%*c %08X: %s %s\n", 72,' ', source_pc, op, buf );
241 source_pc = source_pc2;
245 static void sh4_x86_add_backpatch( uint8_t *fixup_addr, uint32_t fixup_pc, uint32_t exc_code )
249 if( exc_code == -2 ) {
250 reloc_size = sizeof(void *);
253 if( sh4_x86.backpatch_posn == sh4_x86.backpatch_size ) {
254 sh4_x86.backpatch_size <<= 1;
255 sh4_x86.backpatch_list = realloc( sh4_x86.backpatch_list,
256 sh4_x86.backpatch_size * sizeof(struct backpatch_record));
257 assert( sh4_x86.backpatch_list != NULL );
259 if( sh4_x86.in_delay_slot ) {
263 sh4_x86.backpatch_list[sh4_x86.backpatch_posn].fixup_offset =
264 (((uint8_t *)fixup_addr) - ((uint8_t *)xlat_current_block->code)) - reloc_size;
265 sh4_x86.backpatch_list[sh4_x86.backpatch_posn].fixup_icount = (fixup_pc - sh4_x86.block_start_pc)>>1;
266 sh4_x86.backpatch_list[sh4_x86.backpatch_posn].exc_code = exc_code;
267 sh4_x86.backpatch_posn++;
270 #define TSTATE_NONE -1
271 #define TSTATE_O X86_COND_O
272 #define TSTATE_C X86_COND_C
273 #define TSTATE_E X86_COND_E
274 #define TSTATE_NE X86_COND_NE
275 #define TSTATE_G X86_COND_G
276 #define TSTATE_GE X86_COND_GE
277 #define TSTATE_A X86_COND_A
278 #define TSTATE_AE X86_COND_AE
280 #define MARK_JMP8(x) uint8_t *_mark_jmp_##x = (xlat_output-1)
281 #define JMP_TARGET(x) *_mark_jmp_##x += (xlat_output - _mark_jmp_##x)
283 /* Convenience instructions */
284 #define LDC_t() CMPB_imms_rbpdisp(1,R_T); CMC()
285 #define SETE_t() SETCCB_cc_rbpdisp(X86_COND_E,R_T)
286 #define SETA_t() SETCCB_cc_rbpdisp(X86_COND_A,R_T)
287 #define SETAE_t() SETCCB_cc_rbpdisp(X86_COND_AE,R_T)
288 #define SETG_t() SETCCB_cc_rbpdisp(X86_COND_G,R_T)
289 #define SETGE_t() SETCCB_cc_rbpdisp(X86_COND_GE,R_T)
290 #define SETC_t() SETCCB_cc_rbpdisp(X86_COND_C,R_T)
291 #define SETO_t() SETCCB_cc_rbpdisp(X86_COND_O,R_T)
292 #define SETNE_t() SETCCB_cc_rbpdisp(X86_COND_NE,R_T)
293 #define SETC_r8(r1) SETCCB_cc_r8(X86_COND_C, r1)
294 #define JAE_label(label) JCC_cc_rel8(X86_COND_AE,-1); MARK_JMP8(label)
295 #define JBE_label(label) JCC_cc_rel8(X86_COND_BE,-1); MARK_JMP8(label)
296 #define JE_label(label) JCC_cc_rel8(X86_COND_E,-1); MARK_JMP8(label)
297 #define JGE_label(label) JCC_cc_rel8(X86_COND_GE,-1); MARK_JMP8(label)
298 #define JNA_label(label) JCC_cc_rel8(X86_COND_NA,-1); MARK_JMP8(label)
299 #define JNE_label(label) JCC_cc_rel8(X86_COND_NE,-1); MARK_JMP8(label)
300 #define JNO_label(label) JCC_cc_rel8(X86_COND_NO,-1); MARK_JMP8(label)
301 #define JP_label(label) JCC_cc_rel8(X86_COND_P,-1); MARK_JMP8(label)
302 #define JS_label(label) JCC_cc_rel8(X86_COND_S,-1); MARK_JMP8(label)
303 #define JMP_label(label) JMP_rel8(-1); MARK_JMP8(label)
304 #define JNE_exc(exc) JCC_cc_rel32(X86_COND_NE,0); sh4_x86_add_backpatch(xlat_output, pc, exc)
306 #define LOAD_t() if( sh4_x86.tstate == TSTATE_NONE ) { \
307 CMPL_imms_rbpdisp( 1, R_T ); sh4_x86.tstate = TSTATE_E; }
309 /** Branch if T is set (either in the current cflags, or in sh4r.t) */
310 #define JT_label(label) LOAD_t() \
311 JCC_cc_rel8(sh4_x86.tstate,-1); MARK_JMP8(label)
313 /** Branch if T is clear (either in the current cflags or in sh4r.t) */
314 #define JF_label(label) LOAD_t() \
315 JCC_cc_rel8(sh4_x86.tstate^1, -1); MARK_JMP8(label)
318 #define load_reg(x86reg,sh4reg) MOVL_rbpdisp_r32( REG_OFFSET(r[sh4reg]), x86reg )
319 #define store_reg(x86reg,sh4reg) MOVL_r32_rbpdisp( x86reg, REG_OFFSET(r[sh4reg]) )
322 * Load an FR register (single-precision floating point) into an integer x86
323 * register (eg for register-to-register moves)
325 #define load_fr(reg,frm) MOVL_rbpdisp_r32( REG_OFFSET(fr[0][(frm)^1]), reg )
326 #define load_xf(reg,frm) MOVL_rbpdisp_r32( REG_OFFSET(fr[1][(frm)^1]), reg )
329 * Load the low half of a DR register (DR or XD) into an integer x86 register
331 #define load_dr0(reg,frm) MOVL_rbpdisp_r32( REG_OFFSET(fr[frm&1][frm|0x01]), reg )
332 #define load_dr1(reg,frm) MOVL_rbpdisp_r32( REG_OFFSET(fr[frm&1][frm&0x0E]), reg )
335 * Store an FR register (single-precision floating point) from an integer x86+
336 * register (eg for register-to-register moves)
338 #define store_fr(reg,frm) MOVL_r32_rbpdisp( reg, REG_OFFSET(fr[0][(frm)^1]) )
339 #define store_xf(reg,frm) MOVL_r32_rbpdisp( reg, REG_OFFSET(fr[1][(frm)^1]) )
341 #define store_dr0(reg,frm) MOVL_r32_rbpdisp( reg, REG_OFFSET(fr[frm&1][frm|0x01]) )
342 #define store_dr1(reg,frm) MOVL_r32_rbpdisp( reg, REG_OFFSET(fr[frm&1][frm&0x0E]) )
345 #define push_fpul() FLDF_rbpdisp(R_FPUL)
346 #define pop_fpul() FSTPF_rbpdisp(R_FPUL)
347 #define push_fr(frm) FLDF_rbpdisp( REG_OFFSET(fr[0][(frm)^1]) )
348 #define pop_fr(frm) FSTPF_rbpdisp( REG_OFFSET(fr[0][(frm)^1]) )
349 #define push_xf(frm) FLDF_rbpdisp( REG_OFFSET(fr[1][(frm)^1]) )
350 #define pop_xf(frm) FSTPF_rbpdisp( REG_OFFSET(fr[1][(frm)^1]) )
351 #define push_dr(frm) FLDD_rbpdisp( REG_OFFSET(fr[0][(frm)&0x0E]) )
352 #define pop_dr(frm) FSTPD_rbpdisp( REG_OFFSET(fr[0][(frm)&0x0E]) )
353 #define push_xdr(frm) FLDD_rbpdisp( REG_OFFSET(fr[1][(frm)&0x0E]) )
354 #define pop_xdr(frm) FSTPD_rbpdisp( REG_OFFSET(fr[1][(frm)&0x0E]) )
356 #ifdef ENABLE_SH4STATS
357 #define COUNT_INST(id) MOVL_imm32_r32( id, REG_EAX ); CALL1_ptr_r32(sh4_stats_add, REG_EAX); sh4_x86.tstate = TSTATE_NONE
359 #define COUNT_INST(id)
363 /* Exception checks - Note that all exception checks will clobber EAX */
365 #define check_priv( ) \
366 if( (sh4_x86.sh4_mode & SR_MD) == 0 ) { \
367 if( sh4_x86.in_delay_slot ) { \
368 exit_block_exc(EXC_SLOT_ILLEGAL, (pc-2), 4 ); \
370 exit_block_exc(EXC_ILLEGAL, pc, 2); \
372 sh4_x86.branch_taken = TRUE; \
373 sh4_x86.in_delay_slot = DELAY_NONE; \
377 #define check_fpuen( ) \
378 if( !sh4_x86.fpuen_checked ) {\
379 sh4_x86.fpuen_checked = TRUE;\
380 MOVL_rbpdisp_r32( R_SR, REG_EAX );\
381 ANDL_imms_r32( SR_FD, REG_EAX );\
382 if( sh4_x86.in_delay_slot ) {\
383 JNE_exc(EXC_SLOT_FPU_DISABLED);\
385 JNE_exc(EXC_FPU_DISABLED);\
387 sh4_x86.tstate = TSTATE_NONE; \
390 #define check_ralign16( x86reg ) \
391 TESTL_imms_r32( 0x00000001, x86reg ); \
392 JNE_exc(EXC_DATA_ADDR_READ)
394 #define check_walign16( x86reg ) \
395 TESTL_imms_r32( 0x00000001, x86reg ); \
396 JNE_exc(EXC_DATA_ADDR_WRITE);
398 #define check_ralign32( x86reg ) \
399 TESTL_imms_r32( 0x00000003, x86reg ); \
400 JNE_exc(EXC_DATA_ADDR_READ)
402 #define check_walign32( x86reg ) \
403 TESTL_imms_r32( 0x00000003, x86reg ); \
404 JNE_exc(EXC_DATA_ADDR_WRITE);
406 #define check_ralign64( x86reg ) \
407 TESTL_imms_r32( 0x00000007, x86reg ); \
408 JNE_exc(EXC_DATA_ADDR_READ)
410 #define check_walign64( x86reg ) \
411 TESTL_imms_r32( 0x00000007, x86reg ); \
412 JNE_exc(EXC_DATA_ADDR_WRITE);
414 #define address_space() ((sh4_x86.sh4_mode&SR_MD) ? (uintptr_t)sh4_x86.priv_address_space : (uintptr_t)sh4_x86.user_address_space)
417 /* Note: For SR.MD == 1 && MMUCR.AT == 0, there are no memory exceptions, so
418 * don't waste the cycles expecting them. Otherwise we need to save the exception pointer.
420 #ifdef HAVE_FRAME_ADDRESS
421 static void call_read_func(int addr_reg, int value_reg, int offset, int pc)
423 decode_address(address_space(), addr_reg);
424 if( !sh4_x86.tlb_on && (sh4_x86.sh4_mode & SR_MD) ) {
425 CALL1_r32disp_r32(REG_ECX, offset, addr_reg);
427 if( addr_reg != REG_ARG1 ) {
428 MOVL_r32_r32( addr_reg, REG_ARG1 );
430 MOVP_immptr_rptr( 0, REG_ARG2 );
431 sh4_x86_add_backpatch( xlat_output, pc, -2 );
432 CALL2_r32disp_r32_r32(REG_ECX, offset, REG_ARG1, REG_ARG2);
434 if( value_reg != REG_RESULT1 ) {
435 MOVL_r32_r32( REG_RESULT1, value_reg );
439 static void call_write_func(int addr_reg, int value_reg, int offset, int pc)
441 decode_address(address_space(), addr_reg);
442 if( !sh4_x86.tlb_on && (sh4_x86.sh4_mode & SR_MD) ) {
443 CALL2_r32disp_r32_r32(REG_ECX, offset, addr_reg, value_reg);
445 if( value_reg != REG_ARG2 ) {
446 MOVL_r32_r32( value_reg, REG_ARG2 );
448 if( addr_reg != REG_ARG1 ) {
449 MOVL_r32_r32( addr_reg, REG_ARG1 );
452 MOVP_immptr_rptr( 0, REG_ARG3 );
453 sh4_x86_add_backpatch( xlat_output, pc, -2 );
454 CALL3_r32disp_r32_r32_r32(REG_ECX, offset, REG_ARG1, REG_ARG2, REG_ARG3);
456 MOVL_imm32_rspdisp( 0, 0 );
457 sh4_x86_add_backpatch( xlat_output, pc, -2 );
458 CALL3_r32disp_r32_r32_r32(REG_ECX, offset, REG_ARG1, REG_ARG2, 0);
463 static void call_read_func(int addr_reg, int value_reg, int offset, int pc)
465 decode_address(address_space(), addr_reg);
466 CALL1_r32disp_r32(REG_ECX, offset, addr_reg);
467 if( value_reg != REG_RESULT1 ) {
468 MOVL_r32_r32( REG_RESULT1, value_reg );
472 static void call_write_func(int addr_reg, int value_reg, int offset, int pc)
474 decode_address(address_space(), addr_reg);
475 CALL2_r32disp_r32_r32(REG_ECX, offset, addr_reg, value_reg);
479 #define MEM_REGION_PTR(name) offsetof( struct mem_region_fn, name )
480 #define MEM_READ_BYTE( addr_reg, value_reg ) call_read_func(addr_reg, value_reg, MEM_REGION_PTR(read_byte), pc)
481 #define MEM_READ_BYTE_FOR_WRITE( addr_reg, value_reg ) call_read_func( addr_reg, value_reg, MEM_REGION_PTR(read_byte_for_write), pc)
482 #define MEM_READ_WORD( addr_reg, value_reg ) call_read_func(addr_reg, value_reg, MEM_REGION_PTR(read_word), pc)
483 #define MEM_READ_LONG( addr_reg, value_reg ) call_read_func(addr_reg, value_reg, MEM_REGION_PTR(read_long), pc)
484 #define MEM_WRITE_BYTE( addr_reg, value_reg ) call_write_func(addr_reg, value_reg, MEM_REGION_PTR(write_byte), pc)
485 #define MEM_WRITE_WORD( addr_reg, value_reg ) call_write_func(addr_reg, value_reg, MEM_REGION_PTR(write_word), pc)
486 #define MEM_WRITE_LONG( addr_reg, value_reg ) call_write_func(addr_reg, value_reg, MEM_REGION_PTR(write_long), pc)
487 #define MEM_PREFETCH( addr_reg ) call_read_func(addr_reg, REG_RESULT1, MEM_REGION_PTR(prefetch), pc)
489 #define SLOTILLEGAL() exit_block_exc(EXC_SLOT_ILLEGAL, pc-2, 4); sh4_x86.in_delay_slot = DELAY_NONE; return 2;
491 /** Offset of xlat_sh4_mode field relative to the code pointer */
492 #define XLAT_SH4_MODE_CODE_OFFSET (int32_t)(offsetof(struct xlat_cache_block, xlat_sh4_mode) - offsetof(struct xlat_cache_block,code) )
493 #define XLAT_CHAIN_CODE_OFFSET (int32_t)(offsetof(struct xlat_cache_block, chain) - offsetof(struct xlat_cache_block,code) )
494 #define XLAT_ACTIVE_CODE_OFFSET (int32_t)(offsetof(struct xlat_cache_block, active) - offsetof(struct xlat_cache_block,code) )
496 void sh4_translate_begin_block( sh4addr_t pc )
498 sh4_x86.code = xlat_output;
499 sh4_x86.in_delay_slot = FALSE;
500 sh4_x86.fpuen_checked = FALSE;
501 sh4_x86.branch_taken = FALSE;
502 sh4_x86.backpatch_posn = 0;
503 sh4_x86.block_start_pc = pc;
504 sh4_x86.tlb_on = IS_TLB_ENABLED();
505 sh4_x86.tstate = TSTATE_NONE;
506 sh4_x86.double_prec = sh4r.fpscr & FPSCR_PR;
507 sh4_x86.double_size = sh4r.fpscr & FPSCR_SZ;
508 sh4_x86.sh4_mode = sh4r.xlat_sh4_mode;
510 if( sh4_x86.begin_callback ) {
511 CALL_ptr( sh4_x86.begin_callback );
513 if( sh4_profile_blocks ) {
514 MOVP_immptr_rptr( sh4_x86.code + XLAT_ACTIVE_CODE_OFFSET, REG_EAX );
515 ADDL_imms_r32disp( 1, REG_EAX, 0 );
520 uint32_t sh4_translate_end_block_size()
522 uint32_t epilogue_size = EPILOGUE_SIZE;
523 if( sh4_x86.end_callback ) {
524 epilogue_size += (CALL1_PTR_MIN_SIZE - 1);
526 if( sh4_x86.backpatch_posn <= 3 ) {
527 epilogue_size += (sh4_x86.backpatch_posn*(12+CALL1_PTR_MIN_SIZE));
529 epilogue_size += (3*(12+CALL1_PTR_MIN_SIZE)) + (sh4_x86.backpatch_posn-3)*(15+CALL1_PTR_MIN_SIZE);
531 return epilogue_size;
536 * Embed a breakpoint into the generated code
538 void sh4_translate_emit_breakpoint( sh4vma_t pc )
540 MOVL_imm32_r32( pc, REG_EAX );
541 CALL1_ptr_r32( sh4_translate_breakpoint_hit, REG_EAX );
542 sh4_x86.tstate = TSTATE_NONE;
546 #define UNTRANSLATABLE(pc) !IS_IN_ICACHE(pc)
549 * Test if the loaded target code pointer in %eax is valid, and if so jump
550 * directly into it, bypassing the normal exit.
552 static void jump_next_block()
554 uint8_t *ptr = xlat_output;
555 TESTP_rptr_rptr(REG_EAX, REG_EAX);
557 if( sh4_x86.sh4_mode == SH4_MODE_UNKNOWN ) {
558 /* sr/fpscr was changed, possibly updated xlat_sh4_mode, so reload it */
559 MOVL_rbpdisp_r32( REG_OFFSET(xlat_sh4_mode), REG_ECX );
560 CMPL_r32_r32disp( REG_ECX, REG_EAX, XLAT_SH4_MODE_CODE_OFFSET );
562 CMPL_imms_r32disp( sh4_x86.sh4_mode, REG_EAX, XLAT_SH4_MODE_CODE_OFFSET );
564 JNE_label(wrongmode);
565 LEAP_rptrdisp_rptr(REG_EAX, PROLOGUE_SIZE,REG_EAX);
566 if( sh4_x86.end_callback ) {
567 /* Note this does leave the stack out of alignment, but doesn't matter
568 * for what we're currently using it for.
571 MOVP_immptr_rptr(sh4_x86.end_callback, REG_ECX);
576 JMP_TARGET(wrongmode);
577 MOVP_rptrdisp_rptr( REG_EAX, XLAT_CHAIN_CODE_OFFSET, REG_EAX );
578 int rel = ptr - xlat_output;
586 static void FASTCALL sh4_translate_get_code_and_backpatch( uint32_t pc )
588 uint8_t *target = (uint8_t *)xlat_get_code_by_vma(pc);
589 while( target != NULL && sh4r.xlat_sh4_mode != XLAT_BLOCK_MODE(target) ) {
590 target = XLAT_BLOCK_CHAIN(target);
592 if( target == NULL ) {
593 target = sh4_translate_basic_block( pc );
595 uint8_t *backpatch = ((uint8_t *)__builtin_return_address(0)) - (CALL1_PTR_MIN_SIZE);
597 *(uint32_t *)(backpatch+1) = (uint32_t)(target-backpatch)+PROLOGUE_SIZE-5;
598 *(void **)(backpatch+5) = XLAT_BLOCK_FOR_CODE(target)->use_list;
599 XLAT_BLOCK_FOR_CODE(target)->use_list = backpatch;
601 uint8_t * volatile *retptr = ((uint8_t * volatile *)__builtin_frame_address(0))+1;
602 assert( *retptr == ((uint8_t *)__builtin_return_address(0)) );
606 static void emit_translate_and_backpatch()
608 /* NB: this is either 7 bytes (i386) or 12 bytes (x86-64) */
609 CALL1_ptr_r32(sh4_translate_get_code_and_backpatch, REG_ARG1);
611 /* When patched, the jmp instruction will be 5 bytes (either platform) -
612 * we need to reserve sizeof(void*) bytes for the use-list
615 if( sizeof(void*) == 8 ) {
623 * If we're jumping to a fixed address (or at least fixed relative to the
624 * current PC, then we can do a direct branch. REG_ARG1 should contain
625 * the PC at this point.
627 static void jump_next_block_fixed_pc( sh4addr_t pc )
629 if( IS_IN_ICACHE(pc) ) {
630 if( sh4_x86.sh4_mode != SH4_MODE_UNKNOWN && sh4_x86.end_callback == NULL ) {
631 /* Fixed address, in cache, and fixed SH4 mode - generate a call to the
632 * fetch-and-backpatch routine, which will replace the call with a branch */
633 emit_translate_and_backpatch();
636 MOVP_moffptr_rax( xlat_get_lut_entry(GET_ICACHE_PHYS(pc)) );
637 ANDP_imms_rptr( -4, REG_EAX );
639 } else if( sh4_x86.tlb_on ) {
640 CALL1_ptr_r32(xlat_get_code_by_vma, REG_ARG1);
642 CALL1_ptr_r32(xlat_get_code, REG_ARG1);
649 static void sh4_x86_translate_unlink_block( void *use_list )
651 uint8_t *tmp = xlat_output; /* In case something is active, which should never happen */
652 void *next = use_list;
653 while( next != NULL ) {
654 xlat_output = (uint8_t *)next;
655 next = *(void **)(xlat_output+5);
656 emit_translate_and_backpatch();
663 static void exit_block()
666 if( sh4_x86.end_callback ) {
667 MOVP_immptr_rptr(sh4_x86.end_callback, REG_ECX);
675 * Exit the block with sh4r.pc already written
677 void exit_block_pcset( sh4addr_t pc )
679 MOVL_imm32_r32( ((pc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period, REG_ECX );
680 ADDL_rbpdisp_r32( REG_OFFSET(slice_cycle), REG_ECX );
681 MOVL_r32_rbpdisp( REG_ECX, REG_OFFSET(slice_cycle) );
682 CMPL_r32_rbpdisp( REG_ECX, REG_OFFSET(event_pending) );
684 MOVL_rbpdisp_r32( R_PC, REG_ARG1 );
685 if( sh4_x86.tlb_on ) {
686 CALL1_ptr_r32(xlat_get_code_by_vma,REG_ARG1);
688 CALL1_ptr_r32(xlat_get_code,REG_ARG1);
692 JMP_TARGET(exitloop);
697 * Exit the block with sh4r.new_pc written with the target pc
699 void exit_block_newpcset( sh4addr_t pc )
701 MOVL_imm32_r32( ((pc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period, REG_ECX );
702 ADDL_rbpdisp_r32( REG_OFFSET(slice_cycle), REG_ECX );
703 MOVL_r32_rbpdisp( REG_ECX, REG_OFFSET(slice_cycle) );
704 MOVL_rbpdisp_r32( R_NEW_PC, REG_ARG1 );
705 MOVL_r32_rbpdisp( REG_ARG1, R_PC );
706 CMPL_r32_rbpdisp( REG_ECX, REG_OFFSET(event_pending) );
708 if( sh4_x86.tlb_on ) {
709 CALL1_ptr_r32(xlat_get_code_by_vma,REG_ARG1);
711 CALL1_ptr_r32(xlat_get_code,REG_ARG1);
715 JMP_TARGET(exitloop);
721 * Exit the block to an absolute PC
723 void exit_block_abs( sh4addr_t pc, sh4addr_t endpc )
725 MOVL_imm32_r32( ((endpc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period, REG_ECX );
726 ADDL_rbpdisp_r32( REG_OFFSET(slice_cycle), REG_ECX );
727 MOVL_r32_rbpdisp( REG_ECX, REG_OFFSET(slice_cycle) );
729 MOVL_imm32_r32( pc, REG_ARG1 );
730 MOVL_r32_rbpdisp( REG_ARG1, R_PC );
731 CMPL_r32_rbpdisp( REG_ECX, REG_OFFSET(event_pending) );
733 jump_next_block_fixed_pc(pc);
734 JMP_TARGET(exitloop);
739 * Exit the block to a relative PC
741 void exit_block_rel( sh4addr_t pc, sh4addr_t endpc )
743 MOVL_imm32_r32( ((endpc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period, REG_ECX );
744 ADDL_rbpdisp_r32( REG_OFFSET(slice_cycle), REG_ECX );
745 MOVL_r32_rbpdisp( REG_ECX, REG_OFFSET(slice_cycle) );
747 if( pc == sh4_x86.block_start_pc && sh4_x86.sh4_mode == sh4r.xlat_sh4_mode ) {
748 /* Special case for tight loops - the PC doesn't change, and
749 * we already know the target address. Just check events pending before
752 CMPL_r32_rbpdisp( REG_ECX, REG_OFFSET(event_pending) );
753 uint32_t backdisp = ((uintptr_t)(sh4_x86.code - xlat_output)) + PROLOGUE_SIZE;
754 JCC_cc_prerel(X86_COND_A, backdisp);
756 MOVL_imm32_r32( pc - sh4_x86.block_start_pc, REG_ARG1 );
757 ADDL_rbpdisp_r32( R_PC, REG_ARG1 );
758 MOVL_r32_rbpdisp( REG_ARG1, R_PC );
759 CMPL_r32_rbpdisp( REG_ECX, REG_OFFSET(event_pending) );
760 JBE_label(exitloop2);
762 jump_next_block_fixed_pc(pc);
763 JMP_TARGET(exitloop2);
769 * Exit unconditionally with a general exception
771 void exit_block_exc( int code, sh4addr_t pc, int inst_adjust )
773 MOVL_imm32_r32( pc - sh4_x86.block_start_pc, REG_ECX );
774 ADDL_r32_rbpdisp( REG_ECX, R_PC );
775 MOVL_imm32_r32( ((pc - sh4_x86.block_start_pc + inst_adjust)>>1)*sh4_cpu_period, REG_ECX );
776 ADDL_r32_rbpdisp( REG_ECX, REG_OFFSET(slice_cycle) );
777 MOVL_imm32_r32( code, REG_ARG1 );
778 CALL1_ptr_r32( sh4_raise_exception, REG_ARG1 );
783 * Embed a call to sh4_execute_instruction for situations that we
784 * can't translate (just page-crossing delay slots at the moment).
785 * Caller is responsible for setting new_pc before calling this function.
789 * Set sh4r.in_delay_slot = sh4_x86.in_delay_slot
790 * Update slice_cycle for endpc+2 (single step doesn't update slice_cycle)
791 * Call sh4_execute_instruction
792 * Call xlat_get_code_by_vma / xlat_get_code as for normal exit
794 void exit_block_emu( sh4vma_t endpc )
796 MOVL_imm32_r32( endpc - sh4_x86.block_start_pc, REG_ECX ); // 5
797 ADDL_r32_rbpdisp( REG_ECX, R_PC );
799 MOVL_imm32_r32( (((endpc - sh4_x86.block_start_pc)>>1)+1)*sh4_cpu_period, REG_ECX ); // 5
800 ADDL_r32_rbpdisp( REG_ECX, REG_OFFSET(slice_cycle) ); // 6
801 MOVL_imm32_r32( sh4_x86.in_delay_slot ? 1 : 0, REG_ECX );
802 MOVL_r32_rbpdisp( REG_ECX, REG_OFFSET(in_delay_slot) );
804 CALL_ptr( sh4_execute_instruction );
809 * Write the block trailer (exception handling block)
811 void sh4_translate_end_block( sh4addr_t pc ) {
812 if( sh4_x86.branch_taken == FALSE ) {
813 // Didn't exit unconditionally already, so write the termination here
814 exit_block_rel( pc, pc );
816 if( sh4_x86.backpatch_posn != 0 ) {
818 // Exception raised - cleanup and exit
819 uint8_t *end_ptr = xlat_output;
820 MOVL_r32_r32( REG_EDX, REG_ECX );
821 ADDL_r32_r32( REG_EDX, REG_ECX );
822 ADDL_r32_rbpdisp( REG_ECX, R_SPC );
823 MOVL_moffptr_eax( &sh4_cpu_period );
824 INC_r32( REG_EDX ); /* Add 1 for the aborting instruction itself */
826 ADDL_r32_rbpdisp( REG_EAX, REG_OFFSET(slice_cycle) );
829 for( i=0; i< sh4_x86.backpatch_posn; i++ ) {
830 uint32_t *fixup_addr = (uint32_t *)&xlat_current_block->code[sh4_x86.backpatch_list[i].fixup_offset];
831 if( sh4_x86.backpatch_list[i].exc_code < 0 ) {
832 if( sh4_x86.backpatch_list[i].exc_code == -2 ) {
833 *((uintptr_t *)fixup_addr) = (uintptr_t)xlat_output;
835 *fixup_addr += xlat_output - (uint8_t *)&xlat_current_block->code[sh4_x86.backpatch_list[i].fixup_offset] - 4;
837 MOVL_imm32_r32( sh4_x86.backpatch_list[i].fixup_icount, REG_EDX );
838 int rel = end_ptr - xlat_output;
841 *fixup_addr += xlat_output - (uint8_t *)&xlat_current_block->code[sh4_x86.backpatch_list[i].fixup_offset] - 4;
842 MOVL_imm32_r32( sh4_x86.backpatch_list[i].exc_code, REG_ARG1 );
843 CALL1_ptr_r32( sh4_raise_exception, REG_ARG1 );
844 MOVL_imm32_r32( sh4_x86.backpatch_list[i].fixup_icount, REG_EDX );
845 int rel = end_ptr - xlat_output;
853 * Translate a single instruction. Delayed branches are handled specially
854 * by translating both branch and delayed instruction as a single unit (as
856 * The instruction MUST be in the icache (assert check)
858 * @return true if the instruction marks the end of a basic block
861 uint32_t sh4_translate_instruction( sh4vma_t pc )
864 /* Read instruction from icache */
865 assert( IS_IN_ICACHE(pc) );
866 ir = *(uint16_t *)GET_ICACHE_PTR(pc);
868 if( !sh4_x86.in_delay_slot ) {
869 sh4_translate_add_recovery( (pc - sh4_x86.block_start_pc)>>1 );
872 /* check for breakpoints at this pc */
873 for( int i=0; i<sh4_breakpoint_count; i++ ) {
874 if( sh4_breakpoints[i].address == pc ) {
875 sh4_translate_emit_breakpoint(pc);
883 load_reg( REG_EAX, Rm );
884 load_reg( REG_ECX, Rn );
885 ADDL_r32_r32( REG_EAX, REG_ECX );
886 store_reg( REG_ECX, Rn );
887 sh4_x86.tstate = TSTATE_NONE;
891 ADDL_imms_rbpdisp( imm, REG_OFFSET(r[Rn]) );
892 sh4_x86.tstate = TSTATE_NONE;
896 if( sh4_x86.tstate != TSTATE_C ) {
899 load_reg( REG_EAX, Rm );
900 load_reg( REG_ECX, Rn );
901 ADCL_r32_r32( REG_EAX, REG_ECX );
902 store_reg( REG_ECX, Rn );
904 sh4_x86.tstate = TSTATE_C;
908 load_reg( REG_EAX, Rm );
909 load_reg( REG_ECX, Rn );
910 ADDL_r32_r32( REG_EAX, REG_ECX );
911 store_reg( REG_ECX, Rn );
913 sh4_x86.tstate = TSTATE_O;
917 load_reg( REG_EAX, Rm );
918 load_reg( REG_ECX, Rn );
919 ANDL_r32_r32( REG_EAX, REG_ECX );
920 store_reg( REG_ECX, Rn );
921 sh4_x86.tstate = TSTATE_NONE;
925 load_reg( REG_EAX, 0 );
926 ANDL_imms_r32(imm, REG_EAX);
927 store_reg( REG_EAX, 0 );
928 sh4_x86.tstate = TSTATE_NONE;
930 AND.B #imm, @(R0, GBR) {:
932 load_reg( REG_EAX, 0 );
933 ADDL_rbpdisp_r32( R_GBR, REG_EAX );
934 MOVL_r32_rspdisp(REG_EAX, 0);
935 MEM_READ_BYTE_FOR_WRITE( REG_EAX, REG_EDX );
936 MOVL_rspdisp_r32(0, REG_EAX);
937 ANDL_imms_r32(imm, REG_EDX );
938 MEM_WRITE_BYTE( REG_EAX, REG_EDX );
939 sh4_x86.tstate = TSTATE_NONE;
943 load_reg( REG_EAX, Rm );
944 load_reg( REG_ECX, Rn );
945 CMPL_r32_r32( REG_EAX, REG_ECX );
947 sh4_x86.tstate = TSTATE_E;
950 COUNT_INST(I_CMPEQI);
951 load_reg( REG_EAX, 0 );
952 CMPL_imms_r32(imm, REG_EAX);
954 sh4_x86.tstate = TSTATE_E;
958 load_reg( REG_EAX, Rm );
959 load_reg( REG_ECX, Rn );
960 CMPL_r32_r32( REG_EAX, REG_ECX );
962 sh4_x86.tstate = TSTATE_GE;
966 load_reg( REG_EAX, Rm );
967 load_reg( REG_ECX, Rn );
968 CMPL_r32_r32( REG_EAX, REG_ECX );
970 sh4_x86.tstate = TSTATE_G;
974 load_reg( REG_EAX, Rm );
975 load_reg( REG_ECX, Rn );
976 CMPL_r32_r32( REG_EAX, REG_ECX );
978 sh4_x86.tstate = TSTATE_A;
982 load_reg( REG_EAX, Rm );
983 load_reg( REG_ECX, Rn );
984 CMPL_r32_r32( REG_EAX, REG_ECX );
986 sh4_x86.tstate = TSTATE_AE;
990 load_reg( REG_EAX, Rn );
991 CMPL_imms_r32( 0, REG_EAX );
993 sh4_x86.tstate = TSTATE_G;
997 load_reg( REG_EAX, Rn );
998 CMPL_imms_r32( 0, REG_EAX );
1000 sh4_x86.tstate = TSTATE_GE;
1003 COUNT_INST(I_CMPSTR);
1004 load_reg( REG_EAX, Rm );
1005 load_reg( REG_ECX, Rn );
1006 XORL_r32_r32( REG_ECX, REG_EAX );
1007 TESTB_r8_r8( REG_AL, REG_AL );
1009 TESTB_r8_r8( REG_AH, REG_AH );
1011 SHRL_imm_r32( 16, REG_EAX );
1012 TESTB_r8_r8( REG_AL, REG_AL );
1014 TESTB_r8_r8( REG_AH, REG_AH );
1015 JMP_TARGET(target1);
1016 JMP_TARGET(target2);
1017 JMP_TARGET(target3);
1019 sh4_x86.tstate = TSTATE_E;
1022 COUNT_INST(I_DIV0S);
1023 load_reg( REG_EAX, Rm );
1024 load_reg( REG_ECX, Rn );
1025 SHRL_imm_r32( 31, REG_EAX );
1026 SHRL_imm_r32( 31, REG_ECX );
1027 MOVL_r32_rbpdisp( REG_EAX, R_M );
1028 MOVL_r32_rbpdisp( REG_ECX, R_Q );
1029 CMPL_r32_r32( REG_EAX, REG_ECX );
1031 sh4_x86.tstate = TSTATE_NE;
1034 COUNT_INST(I_DIV0U);
1035 XORL_r32_r32( REG_EAX, REG_EAX );
1036 MOVL_r32_rbpdisp( REG_EAX, R_Q );
1037 MOVL_r32_rbpdisp( REG_EAX, R_M );
1038 MOVL_r32_rbpdisp( REG_EAX, R_T );
1039 sh4_x86.tstate = TSTATE_C; // works for DIV1
1043 MOVL_rbpdisp_r32( R_M, REG_ECX );
1044 load_reg( REG_EAX, Rn );
1045 if( sh4_x86.tstate != TSTATE_C ) {
1048 RCLL_imm_r32( 1, REG_EAX );
1049 SETC_r8( REG_DL ); // Q'
1050 CMPL_rbpdisp_r32( R_Q, REG_ECX );
1052 ADDL_rbpdisp_r32( REG_OFFSET(r[Rm]), REG_EAX );
1054 JMP_TARGET(mqequal);
1055 SUBL_rbpdisp_r32( REG_OFFSET(r[Rm]), REG_EAX );
1057 store_reg( REG_EAX, Rn ); // Done with Rn now
1058 SETC_r8(REG_AL); // tmp1
1059 XORB_r8_r8( REG_DL, REG_AL ); // Q' = Q ^ tmp1
1060 XORB_r8_r8( REG_AL, REG_CL ); // Q'' = Q' ^ M
1061 MOVL_r32_rbpdisp( REG_ECX, R_Q );
1062 XORL_imms_r32( 1, REG_AL ); // T = !Q'
1063 MOVZXL_r8_r32( REG_AL, REG_EAX );
1064 MOVL_r32_rbpdisp( REG_EAX, R_T );
1065 sh4_x86.tstate = TSTATE_NONE;
1068 COUNT_INST(I_DMULS);
1069 load_reg( REG_EAX, Rm );
1070 load_reg( REG_ECX, Rn );
1072 MOVL_r32_rbpdisp( REG_EDX, R_MACH );
1073 MOVL_r32_rbpdisp( REG_EAX, R_MACL );
1074 sh4_x86.tstate = TSTATE_NONE;
1077 COUNT_INST(I_DMULU);
1078 load_reg( REG_EAX, Rm );
1079 load_reg( REG_ECX, Rn );
1081 MOVL_r32_rbpdisp( REG_EDX, R_MACH );
1082 MOVL_r32_rbpdisp( REG_EAX, R_MACL );
1083 sh4_x86.tstate = TSTATE_NONE;
1087 load_reg( REG_EAX, Rn );
1088 ADDL_imms_r32( -1, REG_EAX );
1089 store_reg( REG_EAX, Rn );
1091 sh4_x86.tstate = TSTATE_E;
1094 COUNT_INST(I_EXTSB);
1095 load_reg( REG_EAX, Rm );
1096 MOVSXL_r8_r32( REG_EAX, REG_EAX );
1097 store_reg( REG_EAX, Rn );
1100 COUNT_INST(I_EXTSW);
1101 load_reg( REG_EAX, Rm );
1102 MOVSXL_r16_r32( REG_EAX, REG_EAX );
1103 store_reg( REG_EAX, Rn );
1106 COUNT_INST(I_EXTUB);
1107 load_reg( REG_EAX, Rm );
1108 MOVZXL_r8_r32( REG_EAX, REG_EAX );
1109 store_reg( REG_EAX, Rn );
1112 COUNT_INST(I_EXTUW);
1113 load_reg( REG_EAX, Rm );
1114 MOVZXL_r16_r32( REG_EAX, REG_EAX );
1115 store_reg( REG_EAX, Rn );
1120 load_reg( REG_EAX, Rm );
1121 check_ralign32( REG_EAX );
1122 MEM_READ_LONG( REG_EAX, REG_EAX );
1123 MOVL_r32_rspdisp(REG_EAX, 0);
1124 load_reg( REG_EAX, Rm );
1125 LEAL_r32disp_r32( REG_EAX, 4, REG_EAX );
1126 MEM_READ_LONG( REG_EAX, REG_EAX );
1127 ADDL_imms_rbpdisp( 8, REG_OFFSET(r[Rn]) );
1129 load_reg( REG_EAX, Rm );
1130 check_ralign32( REG_EAX );
1131 MEM_READ_LONG( REG_EAX, REG_EAX );
1132 MOVL_r32_rspdisp( REG_EAX, 0 );
1133 load_reg( REG_EAX, Rn );
1134 check_ralign32( REG_EAX );
1135 MEM_READ_LONG( REG_EAX, REG_EAX );
1136 ADDL_imms_rbpdisp( 4, REG_OFFSET(r[Rn]) );
1137 ADDL_imms_rbpdisp( 4, REG_OFFSET(r[Rm]) );
1141 ADDL_r32_rbpdisp( REG_EAX, R_MACL );
1142 ADCL_r32_rbpdisp( REG_EDX, R_MACH );
1144 MOVL_rbpdisp_r32( R_S, REG_ECX );
1145 TESTL_r32_r32(REG_ECX, REG_ECX);
1147 CALL_ptr( signsat48 );
1148 JMP_TARGET( nosat );
1149 sh4_x86.tstate = TSTATE_NONE;
1154 load_reg( REG_EAX, Rm );
1155 check_ralign16( REG_EAX );
1156 MEM_READ_WORD( REG_EAX, REG_EAX );
1157 MOVL_r32_rspdisp( REG_EAX, 0 );
1158 load_reg( REG_EAX, Rm );
1159 LEAL_r32disp_r32( REG_EAX, 2, REG_EAX );
1160 MEM_READ_WORD( REG_EAX, REG_EAX );
1161 ADDL_imms_rbpdisp( 4, REG_OFFSET(r[Rn]) );
1162 // Note translate twice in case of page boundaries. Maybe worth
1163 // adding a page-boundary check to skip the second translation
1165 load_reg( REG_EAX, Rn );
1166 check_ralign16( REG_EAX );
1167 MEM_READ_WORD( REG_EAX, REG_EAX );
1168 MOVL_r32_rspdisp( REG_EAX, 0 );
1169 load_reg( REG_EAX, Rm );
1170 check_ralign16( REG_EAX );
1171 MEM_READ_WORD( REG_EAX, REG_EAX );
1172 ADDL_imms_rbpdisp( 2, REG_OFFSET(r[Rn]) );
1173 ADDL_imms_rbpdisp( 2, REG_OFFSET(r[Rm]) );
1176 MOVL_rbpdisp_r32( R_S, REG_ECX );
1177 TESTL_r32_r32( REG_ECX, REG_ECX );
1180 ADDL_r32_rbpdisp( REG_EAX, R_MACL ); // 6
1181 JNO_label( end ); // 2
1182 MOVL_imm32_r32( 1, REG_EDX ); // 5
1183 MOVL_r32_rbpdisp( REG_EDX, R_MACH ); // 6
1184 JS_label( positive ); // 2
1185 MOVL_imm32_r32( 0x80000000, REG_EAX );// 5
1186 MOVL_r32_rbpdisp( REG_EAX, R_MACL ); // 6
1187 JMP_label(end2); // 2
1189 JMP_TARGET(positive);
1190 MOVL_imm32_r32( 0x7FFFFFFF, REG_EAX );// 5
1191 MOVL_r32_rbpdisp( REG_EAX, R_MACL ); // 6
1192 JMP_label(end3); // 2
1195 ADDL_r32_rbpdisp( REG_EAX, R_MACL ); // 6
1196 ADCL_r32_rbpdisp( REG_EDX, R_MACH ); // 6
1200 sh4_x86.tstate = TSTATE_NONE;
1204 MOVL_rbpdisp_r32( R_T, REG_EAX );
1205 store_reg( REG_EAX, Rn );
1209 load_reg( REG_EAX, Rm );
1210 load_reg( REG_ECX, Rn );
1211 MULL_r32( REG_ECX );
1212 MOVL_r32_rbpdisp( REG_EAX, R_MACL );
1213 sh4_x86.tstate = TSTATE_NONE;
1216 COUNT_INST(I_MULSW);
1217 MOVSXL_rbpdisp16_r32( R_R(Rm), REG_EAX );
1218 MOVSXL_rbpdisp16_r32( R_R(Rn), REG_ECX );
1219 MULL_r32( REG_ECX );
1220 MOVL_r32_rbpdisp( REG_EAX, R_MACL );
1221 sh4_x86.tstate = TSTATE_NONE;
1224 COUNT_INST(I_MULUW);
1225 MOVZXL_rbpdisp16_r32( R_R(Rm), REG_EAX );
1226 MOVZXL_rbpdisp16_r32( R_R(Rn), REG_ECX );
1227 MULL_r32( REG_ECX );
1228 MOVL_r32_rbpdisp( REG_EAX, R_MACL );
1229 sh4_x86.tstate = TSTATE_NONE;
1233 load_reg( REG_EAX, Rm );
1234 NEGL_r32( REG_EAX );
1235 store_reg( REG_EAX, Rn );
1236 sh4_x86.tstate = TSTATE_NONE;
1240 load_reg( REG_EAX, Rm );
1241 XORL_r32_r32( REG_ECX, REG_ECX );
1243 SBBL_r32_r32( REG_EAX, REG_ECX );
1244 store_reg( REG_ECX, Rn );
1246 sh4_x86.tstate = TSTATE_C;
1250 load_reg( REG_EAX, Rm );
1251 NOTL_r32( REG_EAX );
1252 store_reg( REG_EAX, Rn );
1253 sh4_x86.tstate = TSTATE_NONE;
1257 load_reg( REG_EAX, Rm );
1258 load_reg( REG_ECX, Rn );
1259 ORL_r32_r32( REG_EAX, REG_ECX );
1260 store_reg( REG_ECX, Rn );
1261 sh4_x86.tstate = TSTATE_NONE;
1265 load_reg( REG_EAX, 0 );
1266 ORL_imms_r32(imm, REG_EAX);
1267 store_reg( REG_EAX, 0 );
1268 sh4_x86.tstate = TSTATE_NONE;
1270 OR.B #imm, @(R0, GBR) {:
1272 load_reg( REG_EAX, 0 );
1273 ADDL_rbpdisp_r32( R_GBR, REG_EAX );
1274 MOVL_r32_rspdisp( REG_EAX, 0 );
1275 MEM_READ_BYTE_FOR_WRITE( REG_EAX, REG_EDX );
1276 MOVL_rspdisp_r32( 0, REG_EAX );
1277 ORL_imms_r32(imm, REG_EDX );
1278 MEM_WRITE_BYTE( REG_EAX, REG_EDX );
1279 sh4_x86.tstate = TSTATE_NONE;
1282 COUNT_INST(I_ROTCL);
1283 load_reg( REG_EAX, Rn );
1284 if( sh4_x86.tstate != TSTATE_C ) {
1287 RCLL_imm_r32( 1, REG_EAX );
1288 store_reg( REG_EAX, Rn );
1290 sh4_x86.tstate = TSTATE_C;
1293 COUNT_INST(I_ROTCR);
1294 load_reg( REG_EAX, Rn );
1295 if( sh4_x86.tstate != TSTATE_C ) {
1298 RCRL_imm_r32( 1, REG_EAX );
1299 store_reg( REG_EAX, Rn );
1301 sh4_x86.tstate = TSTATE_C;
1305 load_reg( REG_EAX, Rn );
1306 ROLL_imm_r32( 1, REG_EAX );
1307 store_reg( REG_EAX, Rn );
1309 sh4_x86.tstate = TSTATE_C;
1313 load_reg( REG_EAX, Rn );
1314 RORL_imm_r32( 1, REG_EAX );
1315 store_reg( REG_EAX, Rn );
1317 sh4_x86.tstate = TSTATE_C;
1321 /* Annoyingly enough, not directly convertible */
1322 load_reg( REG_EAX, Rn );
1323 load_reg( REG_ECX, Rm );
1324 CMPL_imms_r32( 0, REG_ECX );
1327 NEGL_r32( REG_ECX ); // 2
1328 ANDB_imms_r8( 0x1F, REG_CL ); // 3
1329 JE_label(emptysar); // 2
1330 SARL_cl_r32( REG_EAX ); // 2
1331 JMP_label(end); // 2
1333 JMP_TARGET(emptysar);
1334 SARL_imm_r32(31, REG_EAX ); // 3
1338 ANDB_imms_r8( 0x1F, REG_CL ); // 3
1339 SHLL_cl_r32( REG_EAX ); // 2
1342 store_reg( REG_EAX, Rn );
1343 sh4_x86.tstate = TSTATE_NONE;
1347 load_reg( REG_EAX, Rn );
1348 load_reg( REG_ECX, Rm );
1349 CMPL_imms_r32( 0, REG_ECX );
1352 NEGL_r32( REG_ECX ); // 2
1353 ANDB_imms_r8( 0x1F, REG_CL ); // 3
1354 JE_label(emptyshr );
1355 SHRL_cl_r32( REG_EAX ); // 2
1356 JMP_label(end); // 2
1358 JMP_TARGET(emptyshr);
1359 XORL_r32_r32( REG_EAX, REG_EAX );
1363 ANDB_imms_r8( 0x1F, REG_CL ); // 3
1364 SHLL_cl_r32( REG_EAX ); // 2
1367 store_reg( REG_EAX, Rn );
1368 sh4_x86.tstate = TSTATE_NONE;
1372 load_reg( REG_EAX, Rn );
1373 SHLL_imm_r32( 1, REG_EAX );
1375 store_reg( REG_EAX, Rn );
1376 sh4_x86.tstate = TSTATE_C;
1380 load_reg( REG_EAX, Rn );
1381 SARL_imm_r32( 1, REG_EAX );
1383 store_reg( REG_EAX, Rn );
1384 sh4_x86.tstate = TSTATE_C;
1388 load_reg( REG_EAX, Rn );
1389 SHLL_imm_r32( 1, REG_EAX );
1391 store_reg( REG_EAX, Rn );
1392 sh4_x86.tstate = TSTATE_C;
1396 load_reg( REG_EAX, Rn );
1397 SHLL_imm_r32( 2, REG_EAX );
1398 store_reg( REG_EAX, Rn );
1399 sh4_x86.tstate = TSTATE_NONE;
1403 load_reg( REG_EAX, Rn );
1404 SHLL_imm_r32( 8, REG_EAX );
1405 store_reg( REG_EAX, Rn );
1406 sh4_x86.tstate = TSTATE_NONE;
1410 load_reg( REG_EAX, Rn );
1411 SHLL_imm_r32( 16, REG_EAX );
1412 store_reg( REG_EAX, Rn );
1413 sh4_x86.tstate = TSTATE_NONE;
1417 load_reg( REG_EAX, Rn );
1418 SHRL_imm_r32( 1, REG_EAX );
1420 store_reg( REG_EAX, Rn );
1421 sh4_x86.tstate = TSTATE_C;
1425 load_reg( REG_EAX, Rn );
1426 SHRL_imm_r32( 2, REG_EAX );
1427 store_reg( REG_EAX, Rn );
1428 sh4_x86.tstate = TSTATE_NONE;
1432 load_reg( REG_EAX, Rn );
1433 SHRL_imm_r32( 8, REG_EAX );
1434 store_reg( REG_EAX, Rn );
1435 sh4_x86.tstate = TSTATE_NONE;
1439 load_reg( REG_EAX, Rn );
1440 SHRL_imm_r32( 16, REG_EAX );
1441 store_reg( REG_EAX, Rn );
1442 sh4_x86.tstate = TSTATE_NONE;
1446 load_reg( REG_EAX, Rm );
1447 load_reg( REG_ECX, Rn );
1448 SUBL_r32_r32( REG_EAX, REG_ECX );
1449 store_reg( REG_ECX, Rn );
1450 sh4_x86.tstate = TSTATE_NONE;
1454 load_reg( REG_EAX, Rm );
1455 load_reg( REG_ECX, Rn );
1456 if( sh4_x86.tstate != TSTATE_C ) {
1459 SBBL_r32_r32( REG_EAX, REG_ECX );
1460 store_reg( REG_ECX, Rn );
1462 sh4_x86.tstate = TSTATE_C;
1466 load_reg( REG_EAX, Rm );
1467 load_reg( REG_ECX, Rn );
1468 SUBL_r32_r32( REG_EAX, REG_ECX );
1469 store_reg( REG_ECX, Rn );
1471 sh4_x86.tstate = TSTATE_O;
1474 COUNT_INST(I_SWAPB);
1475 load_reg( REG_EAX, Rm );
1476 XCHGB_r8_r8( REG_AL, REG_AH ); // NB: does not touch EFLAGS
1477 store_reg( REG_EAX, Rn );
1480 COUNT_INST(I_SWAPB);
1481 load_reg( REG_EAX, Rm );
1482 MOVL_r32_r32( REG_EAX, REG_ECX );
1483 SHLL_imm_r32( 16, REG_ECX );
1484 SHRL_imm_r32( 16, REG_EAX );
1485 ORL_r32_r32( REG_EAX, REG_ECX );
1486 store_reg( REG_ECX, Rn );
1487 sh4_x86.tstate = TSTATE_NONE;
1491 load_reg( REG_EAX, Rn );
1492 MOVL_r32_rspdisp( REG_EAX, 0 );
1493 MEM_READ_BYTE_FOR_WRITE( REG_EAX, REG_EDX );
1494 TESTB_r8_r8( REG_DL, REG_DL );
1496 ORB_imms_r8( 0x80, REG_DL );
1497 MOVL_rspdisp_r32( 0, REG_EAX );
1498 MEM_WRITE_BYTE( REG_EAX, REG_EDX );
1499 sh4_x86.tstate = TSTATE_NONE;
1503 load_reg( REG_EAX, Rm );
1504 load_reg( REG_ECX, Rn );
1505 TESTL_r32_r32( REG_EAX, REG_ECX );
1507 sh4_x86.tstate = TSTATE_E;
1511 load_reg( REG_EAX, 0 );
1512 TESTL_imms_r32( imm, REG_EAX );
1514 sh4_x86.tstate = TSTATE_E;
1516 TST.B #imm, @(R0, GBR) {:
1518 load_reg( REG_EAX, 0);
1519 ADDL_rbpdisp_r32( R_GBR, REG_EAX );
1520 MEM_READ_BYTE( REG_EAX, REG_EAX );
1521 TESTB_imms_r8( imm, REG_AL );
1523 sh4_x86.tstate = TSTATE_E;
1527 load_reg( REG_EAX, Rm );
1528 load_reg( REG_ECX, Rn );
1529 XORL_r32_r32( REG_EAX, REG_ECX );
1530 store_reg( REG_ECX, Rn );
1531 sh4_x86.tstate = TSTATE_NONE;
1535 load_reg( REG_EAX, 0 );
1536 XORL_imms_r32( imm, REG_EAX );
1537 store_reg( REG_EAX, 0 );
1538 sh4_x86.tstate = TSTATE_NONE;
1540 XOR.B #imm, @(R0, GBR) {:
1542 load_reg( REG_EAX, 0 );
1543 ADDL_rbpdisp_r32( R_GBR, REG_EAX );
1544 MOVL_r32_rspdisp( REG_EAX, 0 );
1545 MEM_READ_BYTE_FOR_WRITE(REG_EAX, REG_EDX);
1546 MOVL_rspdisp_r32( 0, REG_EAX );
1547 XORL_imms_r32( imm, REG_EDX );
1548 MEM_WRITE_BYTE( REG_EAX, REG_EDX );
1549 sh4_x86.tstate = TSTATE_NONE;
1552 COUNT_INST(I_XTRCT);
1553 load_reg( REG_EAX, Rm );
1554 load_reg( REG_ECX, Rn );
1555 SHLL_imm_r32( 16, REG_EAX );
1556 SHRL_imm_r32( 16, REG_ECX );
1557 ORL_r32_r32( REG_EAX, REG_ECX );
1558 store_reg( REG_ECX, Rn );
1559 sh4_x86.tstate = TSTATE_NONE;
1562 /* Data move instructions */
1565 load_reg( REG_EAX, Rm );
1566 store_reg( REG_EAX, Rn );
1570 MOVL_imm32_r32( imm, REG_EAX );
1571 store_reg( REG_EAX, Rn );
1575 load_reg( REG_EAX, Rn );
1576 load_reg( REG_EDX, Rm );
1577 MEM_WRITE_BYTE( REG_EAX, REG_EDX );
1578 sh4_x86.tstate = TSTATE_NONE;
1582 load_reg( REG_EAX, Rn );
1583 LEAL_r32disp_r32( REG_EAX, -1, REG_EAX );
1584 load_reg( REG_EDX, Rm );
1585 MEM_WRITE_BYTE( REG_EAX, REG_EDX );
1586 ADDL_imms_rbpdisp( -1, REG_OFFSET(r[Rn]) );
1587 sh4_x86.tstate = TSTATE_NONE;
1589 MOV.B Rm, @(R0, Rn) {:
1591 load_reg( REG_EAX, 0 );
1592 ADDL_rbpdisp_r32( REG_OFFSET(r[Rn]), REG_EAX );
1593 load_reg( REG_EDX, Rm );
1594 MEM_WRITE_BYTE( REG_EAX, REG_EDX );
1595 sh4_x86.tstate = TSTATE_NONE;
1597 MOV.B R0, @(disp, GBR) {:
1599 MOVL_rbpdisp_r32( R_GBR, REG_EAX );
1600 ADDL_imms_r32( disp, REG_EAX );
1601 load_reg( REG_EDX, 0 );
1602 MEM_WRITE_BYTE( REG_EAX, REG_EDX );
1603 sh4_x86.tstate = TSTATE_NONE;
1605 MOV.B R0, @(disp, Rn) {:
1607 load_reg( REG_EAX, Rn );
1608 ADDL_imms_r32( disp, REG_EAX );
1609 load_reg( REG_EDX, 0 );
1610 MEM_WRITE_BYTE( REG_EAX, REG_EDX );
1611 sh4_x86.tstate = TSTATE_NONE;
1615 load_reg( REG_EAX, Rm );
1616 MEM_READ_BYTE( REG_EAX, REG_EAX );
1617 store_reg( REG_EAX, Rn );
1618 sh4_x86.tstate = TSTATE_NONE;
1622 load_reg( REG_EAX, Rm );
1623 MEM_READ_BYTE( REG_EAX, REG_EAX );
1625 ADDL_imms_rbpdisp( 1, REG_OFFSET(r[Rm]) );
1627 store_reg( REG_EAX, Rn );
1628 sh4_x86.tstate = TSTATE_NONE;
1630 MOV.B @(R0, Rm), Rn {:
1632 load_reg( REG_EAX, 0 );
1633 ADDL_rbpdisp_r32( REG_OFFSET(r[Rm]), REG_EAX );
1634 MEM_READ_BYTE( REG_EAX, REG_EAX );
1635 store_reg( REG_EAX, Rn );
1636 sh4_x86.tstate = TSTATE_NONE;
1638 MOV.B @(disp, GBR), R0 {:
1640 MOVL_rbpdisp_r32( R_GBR, REG_EAX );
1641 ADDL_imms_r32( disp, REG_EAX );
1642 MEM_READ_BYTE( REG_EAX, REG_EAX );
1643 store_reg( REG_EAX, 0 );
1644 sh4_x86.tstate = TSTATE_NONE;
1646 MOV.B @(disp, Rm), R0 {:
1648 load_reg( REG_EAX, Rm );
1649 ADDL_imms_r32( disp, REG_EAX );
1650 MEM_READ_BYTE( REG_EAX, REG_EAX );
1651 store_reg( REG_EAX, 0 );
1652 sh4_x86.tstate = TSTATE_NONE;
1656 load_reg( REG_EAX, Rn );
1657 check_walign32(REG_EAX);
1658 MOVL_r32_r32( REG_EAX, REG_ECX );
1659 ANDL_imms_r32( 0xFC000000, REG_ECX );
1660 CMPL_imms_r32( 0xE0000000, REG_ECX );
1662 ANDL_imms_r32( 0x3C, REG_EAX );
1663 load_reg( REG_EDX, Rm );
1664 MOVL_r32_sib( REG_EDX, 0, REG_EBP, REG_EAX, REG_OFFSET(store_queue) );
1667 load_reg( REG_EDX, Rm );
1668 MEM_WRITE_LONG( REG_EAX, REG_EDX );
1670 sh4_x86.tstate = TSTATE_NONE;
1674 load_reg( REG_EAX, Rn );
1675 ADDL_imms_r32( -4, REG_EAX );
1676 check_walign32( REG_EAX );
1677 load_reg( REG_EDX, Rm );
1678 MEM_WRITE_LONG( REG_EAX, REG_EDX );
1679 ADDL_imms_rbpdisp( -4, REG_OFFSET(r[Rn]) );
1680 sh4_x86.tstate = TSTATE_NONE;
1682 MOV.L Rm, @(R0, Rn) {:
1684 load_reg( REG_EAX, 0 );
1685 ADDL_rbpdisp_r32( REG_OFFSET(r[Rn]), REG_EAX );
1686 check_walign32( REG_EAX );
1687 load_reg( REG_EDX, Rm );
1688 MEM_WRITE_LONG( REG_EAX, REG_EDX );
1689 sh4_x86.tstate = TSTATE_NONE;
1691 MOV.L R0, @(disp, GBR) {:
1693 MOVL_rbpdisp_r32( R_GBR, REG_EAX );
1694 ADDL_imms_r32( disp, REG_EAX );
1695 check_walign32( REG_EAX );
1696 load_reg( REG_EDX, 0 );
1697 MEM_WRITE_LONG( REG_EAX, REG_EDX );
1698 sh4_x86.tstate = TSTATE_NONE;
1700 MOV.L Rm, @(disp, Rn) {:
1702 load_reg( REG_EAX, Rn );
1703 ADDL_imms_r32( disp, REG_EAX );
1704 check_walign32( REG_EAX );
1705 MOVL_r32_r32( REG_EAX, REG_ECX );
1706 ANDL_imms_r32( 0xFC000000, REG_ECX );
1707 CMPL_imms_r32( 0xE0000000, REG_ECX );
1709 ANDL_imms_r32( 0x3C, REG_EAX );
1710 load_reg( REG_EDX, Rm );
1711 MOVL_r32_sib( REG_EDX, 0, REG_EBP, REG_EAX, REG_OFFSET(store_queue) );
1714 load_reg( REG_EDX, Rm );
1715 MEM_WRITE_LONG( REG_EAX, REG_EDX );
1717 sh4_x86.tstate = TSTATE_NONE;
1721 load_reg( REG_EAX, Rm );
1722 check_ralign32( REG_EAX );
1723 MEM_READ_LONG( REG_EAX, REG_EAX );
1724 store_reg( REG_EAX, Rn );
1725 sh4_x86.tstate = TSTATE_NONE;
1729 load_reg( REG_EAX, Rm );
1730 check_ralign32( REG_EAX );
1731 MEM_READ_LONG( REG_EAX, REG_EAX );
1733 ADDL_imms_rbpdisp( 4, REG_OFFSET(r[Rm]) );
1735 store_reg( REG_EAX, Rn );
1736 sh4_x86.tstate = TSTATE_NONE;
1738 MOV.L @(R0, Rm), Rn {:
1740 load_reg( REG_EAX, 0 );
1741 ADDL_rbpdisp_r32( REG_OFFSET(r[Rm]), REG_EAX );
1742 check_ralign32( REG_EAX );
1743 MEM_READ_LONG( REG_EAX, REG_EAX );
1744 store_reg( REG_EAX, Rn );
1745 sh4_x86.tstate = TSTATE_NONE;
1747 MOV.L @(disp, GBR), R0 {:
1749 MOVL_rbpdisp_r32( R_GBR, REG_EAX );
1750 ADDL_imms_r32( disp, REG_EAX );
1751 check_ralign32( REG_EAX );
1752 MEM_READ_LONG( REG_EAX, REG_EAX );
1753 store_reg( REG_EAX, 0 );
1754 sh4_x86.tstate = TSTATE_NONE;
1756 MOV.L @(disp, PC), Rn {:
1757 COUNT_INST(I_MOVLPC);
1758 if( sh4_x86.in_delay_slot ) {
1761 uint32_t target = (pc & 0xFFFFFFFC) + disp + 4;
1762 if( sh4_x86.fastmem && IS_IN_ICACHE(target) ) {
1763 // If the target address is in the same page as the code, it's
1764 // pretty safe to just ref it directly and circumvent the whole
1765 // memory subsystem. (this is a big performance win)
1767 // FIXME: There's a corner-case that's not handled here when
1768 // the current code-page is in the ITLB but not in the UTLB.
1769 // (should generate a TLB miss although need to test SH4
1770 // behaviour to confirm) Unlikely to be anyone depending on this
1771 // behaviour though.
1772 sh4ptr_t ptr = GET_ICACHE_PTR(target);
1773 MOVL_moffptr_eax( ptr );
1775 // Note: we use sh4r.pc for the calc as we could be running at a
1776 // different virtual address than the translation was done with,
1777 // but we can safely assume that the low bits are the same.
1778 MOVL_imm32_r32( (pc-sh4_x86.block_start_pc) + disp + 4 - (pc&0x03), REG_EAX );
1779 ADDL_rbpdisp_r32( R_PC, REG_EAX );
1780 MEM_READ_LONG( REG_EAX, REG_EAX );
1781 sh4_x86.tstate = TSTATE_NONE;
1783 store_reg( REG_EAX, Rn );
1786 MOV.L @(disp, Rm), Rn {:
1788 load_reg( REG_EAX, Rm );
1789 ADDL_imms_r32( disp, REG_EAX );
1790 check_ralign32( REG_EAX );
1791 MEM_READ_LONG( REG_EAX, REG_EAX );
1792 store_reg( REG_EAX, Rn );
1793 sh4_x86.tstate = TSTATE_NONE;
1797 load_reg( REG_EAX, Rn );
1798 check_walign16( REG_EAX );
1799 load_reg( REG_EDX, Rm );
1800 MEM_WRITE_WORD( REG_EAX, REG_EDX );
1801 sh4_x86.tstate = TSTATE_NONE;
1805 load_reg( REG_EAX, Rn );
1806 check_walign16( REG_EAX );
1807 LEAL_r32disp_r32( REG_EAX, -2, REG_EAX );
1808 load_reg( REG_EDX, Rm );
1809 MEM_WRITE_WORD( REG_EAX, REG_EDX );
1810 ADDL_imms_rbpdisp( -2, REG_OFFSET(r[Rn]) );
1811 sh4_x86.tstate = TSTATE_NONE;
1813 MOV.W Rm, @(R0, Rn) {:
1815 load_reg( REG_EAX, 0 );
1816 ADDL_rbpdisp_r32( REG_OFFSET(r[Rn]), REG_EAX );
1817 check_walign16( REG_EAX );
1818 load_reg( REG_EDX, Rm );
1819 MEM_WRITE_WORD( REG_EAX, REG_EDX );
1820 sh4_x86.tstate = TSTATE_NONE;
1822 MOV.W R0, @(disp, GBR) {:
1824 MOVL_rbpdisp_r32( R_GBR, REG_EAX );
1825 ADDL_imms_r32( disp, REG_EAX );
1826 check_walign16( REG_EAX );
1827 load_reg( REG_EDX, 0 );
1828 MEM_WRITE_WORD( REG_EAX, REG_EDX );
1829 sh4_x86.tstate = TSTATE_NONE;
1831 MOV.W R0, @(disp, Rn) {:
1833 load_reg( REG_EAX, Rn );
1834 ADDL_imms_r32( disp, REG_EAX );
1835 check_walign16( REG_EAX );
1836 load_reg( REG_EDX, 0 );
1837 MEM_WRITE_WORD( REG_EAX, REG_EDX );
1838 sh4_x86.tstate = TSTATE_NONE;
1842 load_reg( REG_EAX, Rm );
1843 check_ralign16( REG_EAX );
1844 MEM_READ_WORD( REG_EAX, REG_EAX );
1845 store_reg( REG_EAX, Rn );
1846 sh4_x86.tstate = TSTATE_NONE;
1850 load_reg( REG_EAX, Rm );
1851 check_ralign16( REG_EAX );
1852 MEM_READ_WORD( REG_EAX, REG_EAX );
1854 ADDL_imms_rbpdisp( 2, REG_OFFSET(r[Rm]) );
1856 store_reg( REG_EAX, Rn );
1857 sh4_x86.tstate = TSTATE_NONE;
1859 MOV.W @(R0, Rm), Rn {:
1861 load_reg( REG_EAX, 0 );
1862 ADDL_rbpdisp_r32( REG_OFFSET(r[Rm]), REG_EAX );
1863 check_ralign16( REG_EAX );
1864 MEM_READ_WORD( REG_EAX, REG_EAX );
1865 store_reg( REG_EAX, Rn );
1866 sh4_x86.tstate = TSTATE_NONE;
1868 MOV.W @(disp, GBR), R0 {:
1870 MOVL_rbpdisp_r32( R_GBR, REG_EAX );
1871 ADDL_imms_r32( disp, REG_EAX );
1872 check_ralign16( REG_EAX );
1873 MEM_READ_WORD( REG_EAX, REG_EAX );
1874 store_reg( REG_EAX, 0 );
1875 sh4_x86.tstate = TSTATE_NONE;
1877 MOV.W @(disp, PC), Rn {:
1879 if( sh4_x86.in_delay_slot ) {
1882 // See comments for MOV.L @(disp, PC), Rn
1883 uint32_t target = pc + disp + 4;
1884 if( sh4_x86.fastmem && IS_IN_ICACHE(target) ) {
1885 sh4ptr_t ptr = GET_ICACHE_PTR(target);
1886 MOVL_moffptr_eax( ptr );
1887 MOVSXL_r16_r32( REG_EAX, REG_EAX );
1889 MOVL_imm32_r32( (pc - sh4_x86.block_start_pc) + disp + 4, REG_EAX );
1890 ADDL_rbpdisp_r32( R_PC, REG_EAX );
1891 MEM_READ_WORD( REG_EAX, REG_EAX );
1892 sh4_x86.tstate = TSTATE_NONE;
1894 store_reg( REG_EAX, Rn );
1897 MOV.W @(disp, Rm), R0 {:
1899 load_reg( REG_EAX, Rm );
1900 ADDL_imms_r32( disp, REG_EAX );
1901 check_ralign16( REG_EAX );
1902 MEM_READ_WORD( REG_EAX, REG_EAX );
1903 store_reg( REG_EAX, 0 );
1904 sh4_x86.tstate = TSTATE_NONE;
1906 MOVA @(disp, PC), R0 {:
1908 if( sh4_x86.in_delay_slot ) {
1911 MOVL_imm32_r32( (pc - sh4_x86.block_start_pc) + disp + 4 - (pc&0x03), REG_ECX );
1912 ADDL_rbpdisp_r32( R_PC, REG_ECX );
1913 store_reg( REG_ECX, 0 );
1914 sh4_x86.tstate = TSTATE_NONE;
1918 COUNT_INST(I_MOVCA);
1919 load_reg( REG_EAX, Rn );
1920 check_walign32( REG_EAX );
1921 load_reg( REG_EDX, 0 );
1922 MEM_WRITE_LONG( REG_EAX, REG_EDX );
1923 sh4_x86.tstate = TSTATE_NONE;
1926 /* Control transfer instructions */
1929 if( sh4_x86.in_delay_slot ) {
1932 sh4vma_t target = disp + pc + 4;
1933 JT_label( nottaken );
1934 exit_block_rel(target, pc+2 );
1935 JMP_TARGET(nottaken);
1941 if( sh4_x86.in_delay_slot ) {
1944 sh4_x86.in_delay_slot = DELAY_PC;
1945 if( UNTRANSLATABLE(pc+2) ) {
1946 MOVL_imm32_r32( pc + 4 - sh4_x86.block_start_pc, REG_EAX );
1948 ADDL_imms_r32( disp, REG_EAX );
1949 JMP_TARGET(nottaken);
1950 ADDL_rbpdisp_r32( R_PC, REG_EAX );
1951 MOVL_r32_rbpdisp( REG_EAX, R_NEW_PC );
1952 exit_block_emu(pc+2);
1953 sh4_x86.branch_taken = TRUE;
1957 sh4vma_t target = disp + pc + 4;
1958 JCC_cc_rel32(sh4_x86.tstate,0);
1959 uint32_t *patch = ((uint32_t *)xlat_output)-1;
1960 int save_tstate = sh4_x86.tstate;
1961 sh4_translate_instruction(pc+2);
1962 sh4_x86.in_delay_slot = DELAY_PC; /* Cleared by sh4_translate_instruction */
1963 exit_block_rel( target, pc+4 );
1966 *patch = (xlat_output - ((uint8_t *)patch)) - 4;
1967 sh4_x86.tstate = save_tstate;
1968 sh4_translate_instruction(pc+2);
1975 if( sh4_x86.in_delay_slot ) {
1978 sh4_x86.in_delay_slot = DELAY_PC;
1979 sh4_x86.branch_taken = TRUE;
1980 if( UNTRANSLATABLE(pc+2) ) {
1981 MOVL_rbpdisp_r32( R_PC, REG_EAX );
1982 ADDL_imms_r32( pc + disp + 4 - sh4_x86.block_start_pc, REG_EAX );
1983 MOVL_r32_rbpdisp( REG_EAX, R_NEW_PC );
1984 exit_block_emu(pc+2);
1987 sh4_translate_instruction( pc + 2 );
1988 exit_block_rel( disp + pc + 4, pc+4 );
1995 if( sh4_x86.in_delay_slot ) {
1998 MOVL_rbpdisp_r32( R_PC, REG_EAX );
1999 ADDL_imms_r32( pc + 4 - sh4_x86.block_start_pc, REG_EAX );
2000 ADDL_rbpdisp_r32( REG_OFFSET(r[Rn]), REG_EAX );
2001 MOVL_r32_rbpdisp( REG_EAX, R_NEW_PC );
2002 sh4_x86.in_delay_slot = DELAY_PC;
2003 sh4_x86.tstate = TSTATE_NONE;
2004 sh4_x86.branch_taken = TRUE;
2005 if( UNTRANSLATABLE(pc+2) ) {
2006 exit_block_emu(pc+2);
2009 sh4_translate_instruction( pc + 2 );
2010 exit_block_newpcset(pc+4);
2017 if( sh4_x86.in_delay_slot ) {
2020 MOVL_rbpdisp_r32( R_PC, REG_EAX );
2021 ADDL_imms_r32( pc + 4 - sh4_x86.block_start_pc, REG_EAX );
2022 MOVL_r32_rbpdisp( REG_EAX, R_PR );
2023 sh4_x86.in_delay_slot = DELAY_PC;
2024 sh4_x86.branch_taken = TRUE;
2025 sh4_x86.tstate = TSTATE_NONE;
2026 if( UNTRANSLATABLE(pc+2) ) {
2027 ADDL_imms_r32( disp, REG_EAX );
2028 MOVL_r32_rbpdisp( REG_EAX, R_NEW_PC );
2029 exit_block_emu(pc+2);
2032 sh4_translate_instruction( pc + 2 );
2033 exit_block_rel( disp + pc + 4, pc+4 );
2040 if( sh4_x86.in_delay_slot ) {
2043 MOVL_rbpdisp_r32( R_PC, REG_EAX );
2044 ADDL_imms_r32( pc + 4 - sh4_x86.block_start_pc, REG_EAX );
2045 MOVL_r32_rbpdisp( REG_EAX, R_PR );
2046 ADDL_rbpdisp_r32( REG_OFFSET(r[Rn]), REG_EAX );
2047 MOVL_r32_rbpdisp( REG_EAX, R_NEW_PC );
2049 sh4_x86.in_delay_slot = DELAY_PC;
2050 sh4_x86.tstate = TSTATE_NONE;
2051 sh4_x86.branch_taken = TRUE;
2052 if( UNTRANSLATABLE(pc+2) ) {
2053 exit_block_emu(pc+2);
2056 sh4_translate_instruction( pc + 2 );
2057 exit_block_newpcset(pc+4);
2064 if( sh4_x86.in_delay_slot ) {
2067 sh4vma_t target = disp + pc + 4;
2068 JF_label( nottaken );
2069 exit_block_rel(target, pc+2 );
2070 JMP_TARGET(nottaken);
2076 if( sh4_x86.in_delay_slot ) {
2079 sh4_x86.in_delay_slot = DELAY_PC;
2080 if( UNTRANSLATABLE(pc+2) ) {
2081 MOVL_imm32_r32( pc + 4 - sh4_x86.block_start_pc, REG_EAX );
2083 ADDL_imms_r32( disp, REG_EAX );
2084 JMP_TARGET(nottaken);
2085 ADDL_rbpdisp_r32( R_PC, REG_EAX );
2086 MOVL_r32_rbpdisp( REG_EAX, R_NEW_PC );
2087 exit_block_emu(pc+2);
2088 sh4_x86.branch_taken = TRUE;
2092 JCC_cc_rel32(sh4_x86.tstate^1,0);
2093 uint32_t *patch = ((uint32_t *)xlat_output)-1;
2095 int save_tstate = sh4_x86.tstate;
2096 sh4_translate_instruction(pc+2);
2097 sh4_x86.in_delay_slot = DELAY_PC; /* Cleared by sh4_translate_instruction */
2098 exit_block_rel( disp + pc + 4, pc+4 );
2100 *patch = (xlat_output - ((uint8_t *)patch)) - 4;
2101 sh4_x86.tstate = save_tstate;
2102 sh4_translate_instruction(pc+2);
2109 if( sh4_x86.in_delay_slot ) {
2112 load_reg( REG_ECX, Rn );
2113 MOVL_r32_rbpdisp( REG_ECX, R_NEW_PC );
2114 sh4_x86.in_delay_slot = DELAY_PC;
2115 sh4_x86.branch_taken = TRUE;
2116 if( UNTRANSLATABLE(pc+2) ) {
2117 exit_block_emu(pc+2);
2120 sh4_translate_instruction(pc+2);
2121 exit_block_newpcset(pc+4);
2128 if( sh4_x86.in_delay_slot ) {
2131 MOVL_rbpdisp_r32( R_PC, REG_EAX );
2132 ADDL_imms_r32( pc + 4 - sh4_x86.block_start_pc, REG_EAX );
2133 MOVL_r32_rbpdisp( REG_EAX, R_PR );
2134 load_reg( REG_ECX, Rn );
2135 MOVL_r32_rbpdisp( REG_ECX, R_NEW_PC );
2136 sh4_x86.in_delay_slot = DELAY_PC;
2137 sh4_x86.branch_taken = TRUE;
2138 sh4_x86.tstate = TSTATE_NONE;
2139 if( UNTRANSLATABLE(pc+2) ) {
2140 exit_block_emu(pc+2);
2143 sh4_translate_instruction(pc+2);
2144 exit_block_newpcset(pc+4);
2151 if( sh4_x86.in_delay_slot ) {
2155 MOVL_rbpdisp_r32( R_SPC, REG_ECX );
2156 MOVL_r32_rbpdisp( REG_ECX, R_NEW_PC );
2157 MOVL_rbpdisp_r32( R_SSR, REG_EAX );
2158 CALL1_ptr_r32( sh4_write_sr, REG_EAX );
2159 sh4_x86.in_delay_slot = DELAY_PC;
2160 sh4_x86.fpuen_checked = FALSE;
2161 sh4_x86.tstate = TSTATE_NONE;
2162 sh4_x86.branch_taken = TRUE;
2163 sh4_x86.sh4_mode = SH4_MODE_UNKNOWN;
2164 if( UNTRANSLATABLE(pc+2) ) {
2165 exit_block_emu(pc+2);
2168 sh4_translate_instruction(pc+2);
2169 exit_block_newpcset(pc+4);
2176 if( sh4_x86.in_delay_slot ) {
2179 MOVL_rbpdisp_r32( R_PR, REG_ECX );
2180 MOVL_r32_rbpdisp( REG_ECX, R_NEW_PC );
2181 sh4_x86.in_delay_slot = DELAY_PC;
2182 sh4_x86.branch_taken = TRUE;
2183 if( UNTRANSLATABLE(pc+2) ) {
2184 exit_block_emu(pc+2);
2187 sh4_translate_instruction(pc+2);
2188 exit_block_newpcset(pc+4);
2194 COUNT_INST(I_TRAPA);
2195 if( sh4_x86.in_delay_slot ) {
2198 MOVL_imm32_r32( pc+2 - sh4_x86.block_start_pc, REG_ECX ); // 5
2199 ADDL_r32_rbpdisp( REG_ECX, R_PC );
2200 MOVL_imm32_r32( imm, REG_EAX );
2201 CALL1_ptr_r32( sh4_raise_trap, REG_EAX );
2202 sh4_x86.tstate = TSTATE_NONE;
2203 exit_block_pcset(pc+2);
2204 sh4_x86.branch_taken = TRUE;
2209 COUNT_INST(I_UNDEF);
2210 if( sh4_x86.in_delay_slot ) {
2211 exit_block_exc(EXC_SLOT_ILLEGAL, pc-2, 4);
2213 exit_block_exc(EXC_ILLEGAL, pc, 2);
2219 COUNT_INST(I_CLRMAC);
2220 XORL_r32_r32(REG_EAX, REG_EAX);
2221 MOVL_r32_rbpdisp( REG_EAX, R_MACL );
2222 MOVL_r32_rbpdisp( REG_EAX, R_MACH );
2223 sh4_x86.tstate = TSTATE_NONE;
2228 SETCCB_cc_rbpdisp(X86_COND_C, R_S);
2229 sh4_x86.tstate = TSTATE_NONE;
2235 sh4_x86.tstate = TSTATE_C;
2240 SETCCB_cc_rbpdisp(X86_COND_C, R_S);
2241 sh4_x86.tstate = TSTATE_NONE;
2247 sh4_x86.tstate = TSTATE_C;
2250 /* Floating point moves */
2252 COUNT_INST(I_FMOV1);
2254 if( sh4_x86.double_size ) {
2255 load_dr0( REG_EAX, FRm );
2256 load_dr1( REG_ECX, FRm );
2257 store_dr0( REG_EAX, FRn );
2258 store_dr1( REG_ECX, FRn );
2260 load_fr( REG_EAX, FRm ); // SZ=0 branch
2261 store_fr( REG_EAX, FRn );
2265 COUNT_INST(I_FMOV2);
2267 load_reg( REG_EAX, Rn );
2268 if( sh4_x86.double_size ) {
2269 check_walign64( REG_EAX );
2270 load_dr0( REG_EDX, FRm );
2271 MEM_WRITE_LONG( REG_EAX, REG_EDX );
2272 load_reg( REG_EAX, Rn );
2273 LEAL_r32disp_r32( REG_EAX, 4, REG_EAX );
2274 load_dr1( REG_EDX, FRm );
2275 MEM_WRITE_LONG( REG_EAX, REG_EDX );
2277 check_walign32( REG_EAX );
2278 load_fr( REG_EDX, FRm );
2279 MEM_WRITE_LONG( REG_EAX, REG_EDX );
2281 sh4_x86.tstate = TSTATE_NONE;
2284 COUNT_INST(I_FMOV5);
2286 load_reg( REG_EAX, Rm );
2287 if( sh4_x86.double_size ) {
2288 check_ralign64( REG_EAX );
2289 MEM_READ_LONG( REG_EAX, REG_EAX );
2290 store_dr0( REG_EAX, FRn );
2291 load_reg( REG_EAX, Rm );
2292 LEAL_r32disp_r32( REG_EAX, 4, REG_EAX );
2293 MEM_READ_LONG( REG_EAX, REG_EAX );
2294 store_dr1( REG_EAX, FRn );
2296 check_ralign32( REG_EAX );
2297 MEM_READ_LONG( REG_EAX, REG_EAX );
2298 store_fr( REG_EAX, FRn );
2300 sh4_x86.tstate = TSTATE_NONE;
2303 COUNT_INST(I_FMOV3);
2305 load_reg( REG_EAX, Rn );
2306 if( sh4_x86.double_size ) {
2307 check_walign64( REG_EAX );
2308 LEAL_r32disp_r32( REG_EAX, -8, REG_EAX );
2309 load_dr0( REG_EDX, FRm );
2310 MEM_WRITE_LONG( REG_EAX, REG_EDX );
2311 load_reg( REG_EAX, Rn );
2312 LEAL_r32disp_r32( REG_EAX, -4, REG_EAX );
2313 load_dr1( REG_EDX, FRm );
2314 MEM_WRITE_LONG( REG_EAX, REG_EDX );
2315 ADDL_imms_rbpdisp(-8,REG_OFFSET(r[Rn]));
2317 check_walign32( REG_EAX );
2318 LEAL_r32disp_r32( REG_EAX, -4, REG_EAX );
2319 load_fr( REG_EDX, FRm );
2320 MEM_WRITE_LONG( REG_EAX, REG_EDX );
2321 ADDL_imms_rbpdisp(-4,REG_OFFSET(r[Rn]));
2323 sh4_x86.tstate = TSTATE_NONE;
2326 COUNT_INST(I_FMOV6);
2328 load_reg( REG_EAX, Rm );
2329 if( sh4_x86.double_size ) {
2330 check_ralign64( REG_EAX );
2331 MEM_READ_LONG( REG_EAX, REG_EAX );
2332 store_dr0( REG_EAX, FRn );
2333 load_reg( REG_EAX, Rm );
2334 LEAL_r32disp_r32( REG_EAX, 4, REG_EAX );
2335 MEM_READ_LONG( REG_EAX, REG_EAX );
2336 store_dr1( REG_EAX, FRn );
2337 ADDL_imms_rbpdisp( 8, REG_OFFSET(r[Rm]) );
2339 check_ralign32( REG_EAX );
2340 MEM_READ_LONG( REG_EAX, REG_EAX );
2341 store_fr( REG_EAX, FRn );
2342 ADDL_imms_rbpdisp( 4, REG_OFFSET(r[Rm]) );
2344 sh4_x86.tstate = TSTATE_NONE;
2346 FMOV FRm, @(R0, Rn) {:
2347 COUNT_INST(I_FMOV4);
2349 load_reg( REG_EAX, Rn );
2350 ADDL_rbpdisp_r32( REG_OFFSET(r[0]), REG_EAX );
2351 if( sh4_x86.double_size ) {
2352 check_walign64( REG_EAX );
2353 load_dr0( REG_EDX, FRm );
2354 MEM_WRITE_LONG( REG_EAX, REG_EDX );
2355 load_reg( REG_EAX, Rn );
2356 ADDL_rbpdisp_r32( REG_OFFSET(r[0]), REG_EAX );
2357 LEAL_r32disp_r32( REG_EAX, 4, REG_EAX );
2358 load_dr1( REG_EDX, FRm );
2359 MEM_WRITE_LONG( REG_EAX, REG_EDX );
2361 check_walign32( REG_EAX );
2362 load_fr( REG_EDX, FRm );
2363 MEM_WRITE_LONG( REG_EAX, REG_EDX ); // 12
2365 sh4_x86.tstate = TSTATE_NONE;
2367 FMOV @(R0, Rm), FRn {:
2368 COUNT_INST(I_FMOV7);
2370 load_reg( REG_EAX, Rm );
2371 ADDL_rbpdisp_r32( REG_OFFSET(r[0]), REG_EAX );
2372 if( sh4_x86.double_size ) {
2373 check_ralign64( REG_EAX );
2374 MEM_READ_LONG( REG_EAX, REG_EAX );
2375 store_dr0( REG_EAX, FRn );
2376 load_reg( REG_EAX, Rm );
2377 ADDL_rbpdisp_r32( REG_OFFSET(r[0]), REG_EAX );
2378 LEAL_r32disp_r32( REG_EAX, 4, REG_EAX );
2379 MEM_READ_LONG( REG_EAX, REG_EAX );
2380 store_dr1( REG_EAX, FRn );
2382 check_ralign32( REG_EAX );
2383 MEM_READ_LONG( REG_EAX, REG_EAX );
2384 store_fr( REG_EAX, FRn );
2386 sh4_x86.tstate = TSTATE_NONE;
2388 FLDI0 FRn {: /* IFF PR=0 */
2389 COUNT_INST(I_FLDI0);
2391 if( sh4_x86.double_prec == 0 ) {
2392 XORL_r32_r32( REG_EAX, REG_EAX );
2393 store_fr( REG_EAX, FRn );
2395 sh4_x86.tstate = TSTATE_NONE;
2397 FLDI1 FRn {: /* IFF PR=0 */
2398 COUNT_INST(I_FLDI1);
2400 if( sh4_x86.double_prec == 0 ) {
2401 MOVL_imm32_r32( 0x3F800000, REG_EAX );
2402 store_fr( REG_EAX, FRn );
2407 COUNT_INST(I_FLOAT);
2409 FILD_rbpdisp(R_FPUL);
2410 if( sh4_x86.double_prec ) {
2419 if( sh4_x86.double_prec ) {
2424 MOVP_immptr_rptr( &min_int, REG_ECX );
2425 FILD_r32disp( REG_ECX, 0 );
2429 MOVP_immptr_rptr( &max_int, REG_ECX );
2430 FILD_r32disp( REG_ECX, 0 );
2433 MOVP_immptr_rptr( &save_fcw, REG_EAX );
2434 FNSTCW_r32disp( REG_EAX, 0 );
2435 MOVP_immptr_rptr( &trunc_fcw, REG_EDX );
2436 FLDCW_r32disp( REG_EDX, 0 );
2437 FISTP_rbpdisp(R_FPUL);
2438 FLDCW_r32disp( REG_EAX, 0 );
2444 MOVL_r32disp_r32( REG_ECX, 0, REG_ECX ); // 2
2445 MOVL_r32_rbpdisp( REG_ECX, R_FPUL );
2448 sh4_x86.tstate = TSTATE_NONE;
2453 load_fr( REG_EAX, FRm );
2454 MOVL_r32_rbpdisp( REG_EAX, R_FPUL );
2459 MOVL_rbpdisp_r32( R_FPUL, REG_EAX );
2460 store_fr( REG_EAX, FRn );
2463 COUNT_INST(I_FCNVDS);
2465 if( sh4_x86.double_prec ) {
2471 COUNT_INST(I_FCNVSD);
2473 if( sh4_x86.double_prec ) {
2479 /* Floating point instructions */
2483 if( sh4_x86.double_prec ) {
2496 if( sh4_x86.double_prec ) {
2511 if( sh4_x86.double_prec ) {
2523 FMAC FR0, FRm, FRn {:
2526 if( sh4_x86.double_prec ) {
2546 if( sh4_x86.double_prec ) {
2561 if( sh4_x86.double_prec ) {
2572 COUNT_INST(I_FSRRA);
2574 if( sh4_x86.double_prec == 0 ) {
2583 COUNT_INST(I_FSQRT);
2585 if( sh4_x86.double_prec ) {
2598 if( sh4_x86.double_prec ) {
2612 COUNT_INST(I_FCMPEQ);
2614 if( sh4_x86.double_prec ) {
2621 XORL_r32_r32(REG_EAX, REG_EAX);
2622 XORL_r32_r32(REG_EDX, REG_EDX);
2624 SETCCB_cc_r8(X86_COND_NP, REG_DL);
2625 CMOVCCL_cc_r32_r32(X86_COND_E, REG_EDX, REG_EAX);
2626 MOVL_r32_rbpdisp(REG_EAX, R_T);
2628 sh4_x86.tstate = TSTATE_NONE;
2631 COUNT_INST(I_FCMPGT);
2633 if( sh4_x86.double_prec ) {
2643 sh4_x86.tstate = TSTATE_A;
2649 if( sh4_x86.double_prec == 0 ) {
2650 LEAP_rbpdisp_rptr( REG_OFFSET(fr[0][FRn&0x0E]), REG_EDX );
2651 MOVL_rbpdisp_r32( R_FPUL, REG_EAX );
2652 CALL2_ptr_r32_r32( sh4_fsca, REG_EAX, REG_EDX );
2654 sh4_x86.tstate = TSTATE_NONE;
2659 if( sh4_x86.double_prec == 0 ) {
2660 if( sh4_x86.sse3_enabled ) {
2661 MOVAPS_rbpdisp_xmm( REG_OFFSET(fr[0][FVm<<2]), 4 );
2662 MULPS_rbpdisp_xmm( REG_OFFSET(fr[0][FVn<<2]), 4 );
2663 HADDPS_xmm_xmm( 4, 4 );
2664 HADDPS_xmm_xmm( 4, 4 );
2665 MOVSS_xmm_rbpdisp( 4, REG_OFFSET(fr[0][(FVn<<2)+2]) );
2670 push_fr( (FVm<<2)+1);
2671 push_fr( (FVn<<2)+1);
2674 push_fr( (FVm<<2)+2);
2675 push_fr( (FVn<<2)+2);
2678 push_fr( (FVm<<2)+3);
2679 push_fr( (FVn<<2)+3);
2682 pop_fr( (FVn<<2)+3);
2689 if( sh4_x86.double_prec == 0 ) {
2690 if( sh4_x86.sse3_enabled && sh4_x86.begin_callback == NULL ) {
2691 /* FIXME: For now, disable this inlining when we're running in shadow mode -
2692 * it gives slightly different results from the emu core. Need to
2693 * fix the precision so both give the right results.
2695 MOVAPS_rbpdisp_xmm( REG_OFFSET(fr[1][0]), 1 ); // M1 M0 M3 M2
2696 MOVAPS_rbpdisp_xmm( REG_OFFSET(fr[1][4]), 0 ); // M5 M4 M7 M6
2697 MOVAPS_rbpdisp_xmm( REG_OFFSET(fr[1][8]), 3 ); // M9 M8 M11 M10
2698 MOVAPS_rbpdisp_xmm( REG_OFFSET(fr[1][12]), 2 );// M13 M12 M15 M14
2700 MOVSLDUP_rbpdisp_xmm( REG_OFFSET(fr[0][FVn<<2]), 4 ); // V1 V1 V3 V3
2701 MOVSHDUP_rbpdisp_xmm( REG_OFFSET(fr[0][FVn<<2]), 5 ); // V0 V0 V2 V2
2702 MOV_xmm_xmm( 4, 6 );
2703 MOV_xmm_xmm( 5, 7 );
2704 MOVLHPS_xmm_xmm( 4, 4 ); // V1 V1 V1 V1
2705 MOVHLPS_xmm_xmm( 6, 6 ); // V3 V3 V3 V3
2706 MOVLHPS_xmm_xmm( 5, 5 ); // V0 V0 V0 V0
2707 MOVHLPS_xmm_xmm( 7, 7 ); // V2 V2 V2 V2
2708 MULPS_xmm_xmm( 0, 4 );
2709 MULPS_xmm_xmm( 1, 5 );
2710 MULPS_xmm_xmm( 2, 6 );
2711 MULPS_xmm_xmm( 3, 7 );
2712 ADDPS_xmm_xmm( 5, 4 );
2713 ADDPS_xmm_xmm( 7, 6 );
2714 ADDPS_xmm_xmm( 6, 4 );
2715 MOVAPS_xmm_rbpdisp( 4, REG_OFFSET(fr[0][FVn<<2]) );
2717 LEAP_rbpdisp_rptr( REG_OFFSET(fr[0][FVn<<2]), REG_EAX );
2718 CALL1_ptr_r32( sh4_ftrv, REG_EAX );
2721 sh4_x86.tstate = TSTATE_NONE;
2725 COUNT_INST(I_FRCHG);
2727 XORL_imms_rbpdisp( FPSCR_FR, R_FPSCR );
2728 CALL_ptr( sh4_switch_fr_banks );
2729 sh4_x86.tstate = TSTATE_NONE;
2732 COUNT_INST(I_FSCHG);
2734 XORL_imms_rbpdisp( FPSCR_SZ, R_FPSCR);
2735 XORL_imms_rbpdisp( FPSCR_SZ, REG_OFFSET(xlat_sh4_mode) );
2736 sh4_x86.tstate = TSTATE_NONE;
2737 sh4_x86.double_size = !sh4_x86.double_size;
2738 sh4_x86.sh4_mode = sh4_x86.sh4_mode ^ FPSCR_SZ;
2741 /* Processor control instructions */
2743 COUNT_INST(I_LDCSR);
2744 if( sh4_x86.in_delay_slot ) {
2748 load_reg( REG_EAX, Rm );
2749 CALL1_ptr_r32( sh4_write_sr, REG_EAX );
2750 sh4_x86.fpuen_checked = FALSE;
2751 sh4_x86.tstate = TSTATE_NONE;
2752 sh4_x86.sh4_mode = SH4_MODE_UNKNOWN;
2758 load_reg( REG_EAX, Rm );
2759 MOVL_r32_rbpdisp( REG_EAX, R_GBR );
2764 load_reg( REG_EAX, Rm );
2765 MOVL_r32_rbpdisp( REG_EAX, R_VBR );
2766 sh4_x86.tstate = TSTATE_NONE;
2771 load_reg( REG_EAX, Rm );
2772 MOVL_r32_rbpdisp( REG_EAX, R_SSR );
2773 sh4_x86.tstate = TSTATE_NONE;
2778 load_reg( REG_EAX, Rm );
2779 MOVL_r32_rbpdisp( REG_EAX, R_SGR );
2780 sh4_x86.tstate = TSTATE_NONE;
2785 load_reg( REG_EAX, Rm );
2786 MOVL_r32_rbpdisp( REG_EAX, R_SPC );
2787 sh4_x86.tstate = TSTATE_NONE;
2792 load_reg( REG_EAX, Rm );
2793 MOVL_r32_rbpdisp( REG_EAX, R_DBR );
2794 sh4_x86.tstate = TSTATE_NONE;
2799 load_reg( REG_EAX, Rm );
2800 MOVL_r32_rbpdisp( REG_EAX, REG_OFFSET(r_bank[Rn_BANK]) );
2801 sh4_x86.tstate = TSTATE_NONE;
2805 load_reg( REG_EAX, Rm );
2806 check_ralign32( REG_EAX );
2807 MEM_READ_LONG( REG_EAX, REG_EAX );
2808 ADDL_imms_rbpdisp( 4, REG_OFFSET(r[Rm]) );
2809 MOVL_r32_rbpdisp( REG_EAX, R_GBR );
2810 sh4_x86.tstate = TSTATE_NONE;
2813 COUNT_INST(I_LDCSRM);
2814 if( sh4_x86.in_delay_slot ) {
2818 load_reg( REG_EAX, Rm );
2819 check_ralign32( REG_EAX );
2820 MEM_READ_LONG( REG_EAX, REG_EAX );
2821 ADDL_imms_rbpdisp( 4, REG_OFFSET(r[Rm]) );
2822 CALL1_ptr_r32( sh4_write_sr, REG_EAX );
2823 sh4_x86.fpuen_checked = FALSE;
2824 sh4_x86.tstate = TSTATE_NONE;
2825 sh4_x86.sh4_mode = SH4_MODE_UNKNOWN;
2832 load_reg( REG_EAX, Rm );
2833 check_ralign32( REG_EAX );
2834 MEM_READ_LONG( REG_EAX, REG_EAX );
2835 ADDL_imms_rbpdisp( 4, REG_OFFSET(r[Rm]) );
2836 MOVL_r32_rbpdisp( REG_EAX, R_VBR );
2837 sh4_x86.tstate = TSTATE_NONE;
2842 load_reg( REG_EAX, Rm );
2843 check_ralign32( REG_EAX );
2844 MEM_READ_LONG( REG_EAX, REG_EAX );
2845 ADDL_imms_rbpdisp( 4, REG_OFFSET(r[Rm]) );
2846 MOVL_r32_rbpdisp( REG_EAX, R_SSR );
2847 sh4_x86.tstate = TSTATE_NONE;
2852 load_reg( REG_EAX, Rm );
2853 check_ralign32( REG_EAX );
2854 MEM_READ_LONG( REG_EAX, REG_EAX );
2855 ADDL_imms_rbpdisp( 4, REG_OFFSET(r[Rm]) );
2856 MOVL_r32_rbpdisp( REG_EAX, R_SGR );
2857 sh4_x86.tstate = TSTATE_NONE;
2862 load_reg( REG_EAX, Rm );
2863 check_ralign32( REG_EAX );
2864 MEM_READ_LONG( REG_EAX, REG_EAX );
2865 ADDL_imms_rbpdisp( 4, REG_OFFSET(r[Rm]) );
2866 MOVL_r32_rbpdisp( REG_EAX, R_SPC );
2867 sh4_x86.tstate = TSTATE_NONE;
2872 load_reg( REG_EAX, Rm );
2873 check_ralign32( REG_EAX );
2874 MEM_READ_LONG( REG_EAX, REG_EAX );
2875 ADDL_imms_rbpdisp( 4, REG_OFFSET(r[Rm]) );
2876 MOVL_r32_rbpdisp( REG_EAX, R_DBR );
2877 sh4_x86.tstate = TSTATE_NONE;
2879 LDC.L @Rm+, Rn_BANK {:
2882 load_reg( REG_EAX, Rm );
2883 check_ralign32( REG_EAX );
2884 MEM_READ_LONG( REG_EAX, REG_EAX );
2885 ADDL_imms_rbpdisp( 4, REG_OFFSET(r[Rm]) );
2886 MOVL_r32_rbpdisp( REG_EAX, REG_OFFSET(r_bank[Rn_BANK]) );
2887 sh4_x86.tstate = TSTATE_NONE;
2890 COUNT_INST(I_LDSFPSCR);
2892 load_reg( REG_EAX, Rm );
2893 CALL1_ptr_r32( sh4_write_fpscr, REG_EAX );
2894 sh4_x86.tstate = TSTATE_NONE;
2895 sh4_x86.sh4_mode = SH4_MODE_UNKNOWN;
2898 LDS.L @Rm+, FPSCR {:
2899 COUNT_INST(I_LDSFPSCRM);
2901 load_reg( REG_EAX, Rm );
2902 check_ralign32( REG_EAX );
2903 MEM_READ_LONG( REG_EAX, REG_EAX );
2904 ADDL_imms_rbpdisp( 4, REG_OFFSET(r[Rm]) );
2905 CALL1_ptr_r32( sh4_write_fpscr, REG_EAX );
2906 sh4_x86.tstate = TSTATE_NONE;
2907 sh4_x86.sh4_mode = SH4_MODE_UNKNOWN;
2913 load_reg( REG_EAX, Rm );
2914 MOVL_r32_rbpdisp( REG_EAX, R_FPUL );
2919 load_reg( REG_EAX, Rm );
2920 check_ralign32( REG_EAX );
2921 MEM_READ_LONG( REG_EAX, REG_EAX );
2922 ADDL_imms_rbpdisp( 4, REG_OFFSET(r[Rm]) );
2923 MOVL_r32_rbpdisp( REG_EAX, R_FPUL );
2924 sh4_x86.tstate = TSTATE_NONE;
2928 load_reg( REG_EAX, Rm );
2929 MOVL_r32_rbpdisp( REG_EAX, R_MACH );
2933 load_reg( REG_EAX, Rm );
2934 check_ralign32( REG_EAX );
2935 MEM_READ_LONG( REG_EAX, REG_EAX );
2936 ADDL_imms_rbpdisp( 4, REG_OFFSET(r[Rm]) );
2937 MOVL_r32_rbpdisp( REG_EAX, R_MACH );
2938 sh4_x86.tstate = TSTATE_NONE;
2942 load_reg( REG_EAX, Rm );
2943 MOVL_r32_rbpdisp( REG_EAX, R_MACL );
2947 load_reg( REG_EAX, Rm );
2948 check_ralign32( REG_EAX );
2949 MEM_READ_LONG( REG_EAX, REG_EAX );
2950 ADDL_imms_rbpdisp( 4, REG_OFFSET(r[Rm]) );
2951 MOVL_r32_rbpdisp( REG_EAX, R_MACL );
2952 sh4_x86.tstate = TSTATE_NONE;
2956 load_reg( REG_EAX, Rm );
2957 MOVL_r32_rbpdisp( REG_EAX, R_PR );
2961 load_reg( REG_EAX, Rm );
2962 check_ralign32( REG_EAX );
2963 MEM_READ_LONG( REG_EAX, REG_EAX );
2964 ADDL_imms_rbpdisp( 4, REG_OFFSET(r[Rm]) );
2965 MOVL_r32_rbpdisp( REG_EAX, R_PR );
2966 sh4_x86.tstate = TSTATE_NONE;
2969 COUNT_INST(I_LDTLB);
2970 CALL_ptr( MMU_ldtlb );
2971 sh4_x86.tstate = TSTATE_NONE;
2980 COUNT_INST(I_OCBWB);
2984 load_reg( REG_EAX, Rn );
2985 MEM_PREFETCH( REG_EAX );
2986 sh4_x86.tstate = TSTATE_NONE;
2989 COUNT_INST(I_SLEEP);
2991 CALL_ptr( sh4_sleep );
2992 sh4_x86.tstate = TSTATE_NONE;
2993 sh4_x86.in_delay_slot = DELAY_NONE;
2997 COUNT_INST(I_STCSR);
2999 CALL_ptr(sh4_read_sr);
3000 store_reg( REG_EAX, Rn );
3001 sh4_x86.tstate = TSTATE_NONE;
3005 MOVL_rbpdisp_r32( R_GBR, REG_EAX );
3006 store_reg( REG_EAX, Rn );
3011 MOVL_rbpdisp_r32( R_VBR, REG_EAX );
3012 store_reg( REG_EAX, Rn );
3013 sh4_x86.tstate = TSTATE_NONE;
3018 MOVL_rbpdisp_r32( R_SSR, REG_EAX );
3019 store_reg( REG_EAX, Rn );
3020 sh4_x86.tstate = TSTATE_NONE;
3025 MOVL_rbpdisp_r32( R_SPC, REG_EAX );
3026 store_reg( REG_EAX, Rn );
3027 sh4_x86.tstate = TSTATE_NONE;
3032 MOVL_rbpdisp_r32( R_SGR, REG_EAX );
3033 store_reg( REG_EAX, Rn );
3034 sh4_x86.tstate = TSTATE_NONE;
3039 MOVL_rbpdisp_r32( R_DBR, REG_EAX );
3040 store_reg( REG_EAX, Rn );
3041 sh4_x86.tstate = TSTATE_NONE;
3046 MOVL_rbpdisp_r32( REG_OFFSET(r_bank[Rm_BANK]), REG_EAX );
3047 store_reg( REG_EAX, Rn );
3048 sh4_x86.tstate = TSTATE_NONE;
3051 COUNT_INST(I_STCSRM);
3053 CALL_ptr( sh4_read_sr );
3054 MOVL_r32_r32( REG_EAX, REG_EDX );
3055 load_reg( REG_EAX, Rn );
3056 check_walign32( REG_EAX );
3057 LEAL_r32disp_r32( REG_EAX, -4, REG_EAX );
3058 MEM_WRITE_LONG( REG_EAX, REG_EDX );
3059 ADDL_imms_rbpdisp( -4, REG_OFFSET(r[Rn]) );
3060 sh4_x86.tstate = TSTATE_NONE;
3065 load_reg( REG_EAX, Rn );
3066 check_walign32( REG_EAX );
3067 ADDL_imms_r32( -4, REG_EAX );
3068 MOVL_rbpdisp_r32( R_VBR, REG_EDX );
3069 MEM_WRITE_LONG( REG_EAX, REG_EDX );
3070 ADDL_imms_rbpdisp( -4, REG_OFFSET(r[Rn]) );
3071 sh4_x86.tstate = TSTATE_NONE;
3076 load_reg( REG_EAX, Rn );
3077 check_walign32( REG_EAX );
3078 ADDL_imms_r32( -4, REG_EAX );
3079 MOVL_rbpdisp_r32( R_SSR, REG_EDX );
3080 MEM_WRITE_LONG( REG_EAX, REG_EDX );
3081 ADDL_imms_rbpdisp( -4, REG_OFFSET(r[Rn]) );
3082 sh4_x86.tstate = TSTATE_NONE;
3087 load_reg( REG_EAX, Rn );
3088 check_walign32( REG_EAX );
3089 ADDL_imms_r32( -4, REG_EAX );
3090 MOVL_rbpdisp_r32( R_SPC, REG_EDX );
3091 MEM_WRITE_LONG( REG_EAX, REG_EDX );
3092 ADDL_imms_rbpdisp( -4, REG_OFFSET(r[Rn]) );
3093 sh4_x86.tstate = TSTATE_NONE;
3098 load_reg( REG_EAX, Rn );
3099 check_walign32( REG_EAX );
3100 ADDL_imms_r32( -4, REG_EAX );
3101 MOVL_rbpdisp_r32( R_SGR, REG_EDX );
3102 MEM_WRITE_LONG( REG_EAX, REG_EDX );
3103 ADDL_imms_rbpdisp( -4, REG_OFFSET(r[Rn]) );
3104 sh4_x86.tstate = TSTATE_NONE;
3109 load_reg( REG_EAX, Rn );
3110 check_walign32( REG_EAX );
3111 ADDL_imms_r32( -4, REG_EAX );
3112 MOVL_rbpdisp_r32( R_DBR, REG_EDX );
3113 MEM_WRITE_LONG( REG_EAX, REG_EDX );
3114 ADDL_imms_rbpdisp( -4, REG_OFFSET(r[Rn]) );
3115 sh4_x86.tstate = TSTATE_NONE;
3117 STC.L Rm_BANK, @-Rn {:
3120 load_reg( REG_EAX, Rn );
3121 check_walign32( REG_EAX );
3122 ADDL_imms_r32( -4, REG_EAX );
3123 MOVL_rbpdisp_r32( REG_OFFSET(r_bank[Rm_BANK]), REG_EDX );
3124 MEM_WRITE_LONG( REG_EAX, REG_EDX );
3125 ADDL_imms_rbpdisp( -4, REG_OFFSET(r[Rn]) );
3126 sh4_x86.tstate = TSTATE_NONE;
3130 load_reg( REG_EAX, Rn );
3131 check_walign32( REG_EAX );
3132 ADDL_imms_r32( -4, REG_EAX );
3133 MOVL_rbpdisp_r32( R_GBR, REG_EDX );
3134 MEM_WRITE_LONG( REG_EAX, REG_EDX );
3135 ADDL_imms_rbpdisp( -4, REG_OFFSET(r[Rn]) );
3136 sh4_x86.tstate = TSTATE_NONE;
3139 COUNT_INST(I_STSFPSCR);
3141 MOVL_rbpdisp_r32( R_FPSCR, REG_EAX );
3142 store_reg( REG_EAX, Rn );
3144 STS.L FPSCR, @-Rn {:
3145 COUNT_INST(I_STSFPSCRM);
3147 load_reg( REG_EAX, Rn );
3148 check_walign32( REG_EAX );
3149 ADDL_imms_r32( -4, REG_EAX );
3150 MOVL_rbpdisp_r32( R_FPSCR, REG_EDX );
3151 MEM_WRITE_LONG( REG_EAX, REG_EDX );
3152 ADDL_imms_rbpdisp( -4, REG_OFFSET(r[Rn]) );
3153 sh4_x86.tstate = TSTATE_NONE;
3158 MOVL_rbpdisp_r32( R_FPUL, REG_EAX );
3159 store_reg( REG_EAX, Rn );
3164 load_reg( REG_EAX, Rn );
3165 check_walign32( REG_EAX );
3166 ADDL_imms_r32( -4, REG_EAX );
3167 MOVL_rbpdisp_r32( R_FPUL, REG_EDX );
3168 MEM_WRITE_LONG( REG_EAX, REG_EDX );
3169 ADDL_imms_rbpdisp( -4, REG_OFFSET(r[Rn]) );
3170 sh4_x86.tstate = TSTATE_NONE;
3174 MOVL_rbpdisp_r32( R_MACH, REG_EAX );
3175 store_reg( REG_EAX, Rn );
3179 load_reg( REG_EAX, Rn );
3180 check_walign32( REG_EAX );
3181 ADDL_imms_r32( -4, REG_EAX );
3182 MOVL_rbpdisp_r32( R_MACH, REG_EDX );
3183 MEM_WRITE_LONG( REG_EAX, REG_EDX );
3184 ADDL_imms_rbpdisp( -4, REG_OFFSET(r[Rn]) );
3185 sh4_x86.tstate = TSTATE_NONE;
3189 MOVL_rbpdisp_r32( R_MACL, REG_EAX );
3190 store_reg( REG_EAX, Rn );
3194 load_reg( REG_EAX, Rn );
3195 check_walign32( REG_EAX );
3196 ADDL_imms_r32( -4, REG_EAX );
3197 MOVL_rbpdisp_r32( R_MACL, REG_EDX );
3198 MEM_WRITE_LONG( REG_EAX, REG_EDX );
3199 ADDL_imms_rbpdisp( -4, REG_OFFSET(r[Rn]) );
3200 sh4_x86.tstate = TSTATE_NONE;
3204 MOVL_rbpdisp_r32( R_PR, REG_EAX );
3205 store_reg( REG_EAX, Rn );
3209 load_reg( REG_EAX, Rn );
3210 check_walign32( REG_EAX );
3211 ADDL_imms_r32( -4, REG_EAX );
3212 MOVL_rbpdisp_r32( R_PR, REG_EDX );
3213 MEM_WRITE_LONG( REG_EAX, REG_EDX );
3214 ADDL_imms_rbpdisp( -4, REG_OFFSET(r[Rn]) );
3215 sh4_x86.tstate = TSTATE_NONE;
3220 /* Do nothing. Well, we could emit an 0x90, but what would really be the point? */
3223 sh4_x86.in_delay_slot = DELAY_NONE;
3229 * The unwind methods only work if we compiled with DWARF2 frame information
3230 * (ie -fexceptions), otherwise we have to use the direct frame scan.
3232 #ifdef HAVE_EXCEPTIONS
3236 uintptr_t block_start;
3237 uintptr_t block_end;
3241 static _Unwind_Reason_Code xlat_check_frame( struct _Unwind_Context *context, void *arg )
3243 struct UnwindInfo *info = arg;
3244 void *pc = (void *)_Unwind_GetIP(context);
3245 if( ((uintptr_t)pc) >= info->block_start && ((uintptr_t)pc) < info->block_end ) {
3247 return _URC_NORMAL_STOP;
3249 return _URC_NO_REASON;
3252 void *xlat_get_native_pc( void *code, uint32_t code_size )
3254 struct _Unwind_Exception exc;
3255 struct UnwindInfo info;
3258 info.block_start = (uintptr_t)code;
3259 info.block_end = info.block_start + code_size;
3260 void *result = NULL;
3261 _Unwind_Backtrace( xlat_check_frame, &info );
3265 /* Assume this is an ia32 build - amd64 should always have dwarf information */
3266 void *xlat_get_native_pc( void *code, uint32_t code_size )
3268 void *result = NULL;
3270 "mov %%ebp, %%eax\n\t"
3271 "mov $0x8, %%ecx\n\t"
3273 "frame_loop: test %%eax, %%eax\n\t"
3274 "je frame_not_found\n\t"
3275 "cmp (%%eax), %%edx\n\t"
3276 "je frame_found\n\t"
3277 "sub $0x1, %%ecx\n\t"
3278 "je frame_not_found\n\t"
3279 "movl (%%eax), %%eax\n\t"
3281 "frame_found: movl 0x4(%%eax), %0\n"
3284 : "r" (((uint8_t *)&sh4r) + 128 )
3285 : "eax", "ecx", "edx" );
.