4 * SH4 => x86 translation. This version does no real optimization, it just
5 * outputs straight-line x86 code - it mainly exists to provide a baseline
6 * to test the optimizing versions against.
8 * Copyright (c) 2007 Nathan Keynes.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
29 #include "sh4/sh4core.h"
30 #include "sh4/sh4dasm.h"
31 #include "sh4/sh4trans.h"
32 #include "sh4/sh4stat.h"
33 #include "sh4/sh4mmio.h"
35 #include "xlat/xltcache.h"
36 #include "xlat/x86/x86op.h"
37 #include "x86dasm/x86dasm.h"
40 #define DEFAULT_BACKPATCH_SIZE 4096
42 /* Offset of a reg relative to the sh4r structure */
43 #define REG_OFFSET(reg) (((char *)&sh4r.reg) - ((char *)&sh4r) - 128)
45 #define R_T REG_OFFSET(t)
46 #define R_Q REG_OFFSET(q)
47 #define R_S REG_OFFSET(s)
48 #define R_M REG_OFFSET(m)
49 #define R_SR REG_OFFSET(sr)
50 #define R_GBR REG_OFFSET(gbr)
51 #define R_SSR REG_OFFSET(ssr)
52 #define R_SPC REG_OFFSET(spc)
53 #define R_VBR REG_OFFSET(vbr)
54 #define R_MACH REG_OFFSET(mac)+4
55 #define R_MACL REG_OFFSET(mac)
56 #define R_PC REG_OFFSET(pc)
57 #define R_NEW_PC REG_OFFSET(new_pc)
58 #define R_PR REG_OFFSET(pr)
59 #define R_SGR REG_OFFSET(sgr)
60 #define R_FPUL REG_OFFSET(fpul)
61 #define R_FPSCR REG_OFFSET(fpscr)
62 #define R_DBR REG_OFFSET(dbr)
63 #define R_R(rn) REG_OFFSET(r[rn])
64 #define R_FR(f) REG_OFFSET(fr[0][(f)^1])
65 #define R_XF(f) REG_OFFSET(fr[1][(f)^1])
66 #define R_DR(f) REG_OFFSET(fr[(f)&1][(f)&0x0E])
67 #define R_DRL(f) REG_OFFSET(fr[(f)&1][(f)|0x01])
68 #define R_DRH(f) REG_OFFSET(fr[(f)&1][(f)&0x0E])
74 #define SH4_MODE_UNKNOWN -1
76 struct backpatch_record {
77 uint32_t fixup_offset;
78 uint32_t fixup_icount;
83 * Struct to manage internal translation state. This state is not saved -
84 * it is only valid between calls to sh4_translate_begin_block() and
85 * sh4_translate_end_block()
87 struct sh4_x86_state {
90 gboolean fpuen_checked; /* true if we've already checked fpu enabled. */
91 gboolean branch_taken; /* true if we branched unconditionally */
92 gboolean double_prec; /* true if FPU is in double-precision mode */
93 gboolean double_size; /* true if FPU is in double-size mode */
94 gboolean sse3_enabled; /* true if host supports SSE3 instructions */
95 uint32_t block_start_pc;
96 uint32_t stack_posn; /* Trace stack height for alignment purposes */
97 uint32_t sh4_mode; /* Mirror of sh4r.xlat_sh4_mode */
101 gboolean tlb_on; /* True if tlb translation is active */
102 struct mem_region_fn **priv_address_space;
103 struct mem_region_fn **user_address_space;
105 /* Instrumentation */
106 xlat_block_begin_callback_t begin_callback;
107 xlat_block_end_callback_t end_callback;
109 gboolean profile_blocks;
111 /* Allocated memory for the (block-wide) back-patch list */
112 struct backpatch_record *backpatch_list;
113 uint32_t backpatch_posn;
114 uint32_t backpatch_size;
117 static struct sh4_x86_state sh4_x86;
119 static uint32_t max_int = 0x7FFFFFFF;
120 static uint32_t min_int = 0x80000000;
121 static uint32_t save_fcw; /* save value for fpu control word */
122 static uint32_t trunc_fcw = 0x0F7F; /* fcw value for truncation mode */
124 static struct x86_symbol x86_symbol_table[] = {
125 { "sh4r+128", ((char *)&sh4r)+128 },
126 { "sh4_cpu_period", &sh4_cpu_period },
127 { "sh4_address_space", NULL },
128 { "sh4_user_address_space", NULL },
129 { "sh4_translate_breakpoint_hit", sh4_translate_breakpoint_hit },
130 { "sh4_write_fpscr", sh4_write_fpscr },
131 { "sh4_write_sr", sh4_write_sr },
132 { "sh4_read_sr", sh4_read_sr },
133 { "sh4_raise_exception", sh4_raise_exception },
134 { "sh4_sleep", sh4_sleep },
135 { "sh4_fsca", sh4_fsca },
136 { "sh4_ftrv", sh4_ftrv },
137 { "sh4_switch_fr_banks", sh4_switch_fr_banks },
138 { "sh4_execute_instruction", sh4_execute_instruction },
139 { "signsat48", signsat48 },
140 { "xlat_get_code_by_vma", xlat_get_code_by_vma },
141 { "xlat_get_code", xlat_get_code }
145 gboolean is_sse3_supported()
149 __asm__ __volatile__(
150 "mov $0x01, %%eax\n\t"
151 "cpuid\n\t" : "=c" (features) : : "eax", "edx", "ebx");
152 return (features & 1) ? TRUE : FALSE;
155 void sh4_translate_set_address_space( struct mem_region_fn **priv, struct mem_region_fn **user )
157 sh4_x86.priv_address_space = priv;
158 sh4_x86.user_address_space = user;
159 x86_symbol_table[2].ptr = priv;
160 x86_symbol_table[3].ptr = user;
163 void sh4_translate_init(void)
165 sh4_x86.backpatch_list = malloc(DEFAULT_BACKPATCH_SIZE);
166 sh4_x86.backpatch_size = DEFAULT_BACKPATCH_SIZE / sizeof(struct backpatch_record);
167 sh4_x86.begin_callback = NULL;
168 sh4_x86.end_callback = NULL;
169 sh4_translate_set_address_space( sh4_address_space, sh4_user_address_space );
170 sh4_x86.fastmem = TRUE;
171 sh4_x86.profile_blocks = FALSE;
172 sh4_x86.sse3_enabled = is_sse3_supported();
174 x86_set_symtab( x86_symbol_table, sizeof(x86_symbol_table)/sizeof(struct x86_symbol) );
177 void sh4_translate_set_callbacks( xlat_block_begin_callback_t begin, xlat_block_end_callback_t end )
179 sh4_x86.begin_callback = begin;
180 sh4_x86.end_callback = end;
183 void sh4_translate_set_fastmem( gboolean flag )
185 sh4_x86.fastmem = flag;
188 void sh4_translate_set_profile_blocks( gboolean flag )
190 sh4_x86.profile_blocks = flag;
193 gboolean sh4_translate_get_profile_blocks()
195 return sh4_x86.profile_blocks;
199 * Disassemble the given translated code block, and it's source SH4 code block
200 * side-by-side. The current native pc will be marked if non-null.
202 void sh4_translate_disasm_block( FILE *out, void *code, sh4addr_t source_start, void *native_pc )
207 uintptr_t target_start = (uintptr_t)code, target_pc;
208 uintptr_t target_end = target_start + xlat_get_code_size(code);
209 uint32_t source_pc = source_start;
210 uint32_t source_end = source_pc;
211 xlat_recovery_record_t source_recov_table = XLAT_RECOVERY_TABLE(code);
212 xlat_recovery_record_t source_recov_end = source_recov_table + XLAT_BLOCK_FOR_CODE(code)->recover_table_size - 1;
214 for( target_pc = target_start; target_pc < target_end; ) {
215 uintptr_t pc2 = x86_disasm_instruction( target_pc, buf, sizeof(buf), op );
216 #if SIZEOF_VOID_P == 8
217 fprintf( out, "%c%016lx: %-30s %-40s", (target_pc == (uintptr_t)native_pc ? '*' : ' '),
218 target_pc, op, buf );
220 fprintf( out, "%c%08lx: %-30s %-40s", (target_pc == (uintptr_t)native_pc ? '*' : ' '),
221 target_pc, op, buf );
223 if( source_recov_table < source_recov_end &&
224 target_pc >= (target_start + source_recov_table->xlat_offset) ) {
225 source_recov_table++;
226 if( source_end < (source_start + (source_recov_table->sh4_icount)*2) )
227 source_end = source_start + (source_recov_table->sh4_icount)*2;
230 if( source_pc < source_end ) {
231 uint32_t source_pc2 = sh4_disasm_instruction( source_pc, buf, sizeof(buf), op );
232 fprintf( out, " %08X: %s %s\n", source_pc, op, buf );
233 source_pc = source_pc2;
235 fprintf( out, "\n" );
241 while( source_pc < source_end ) {
242 uint32_t source_pc2 = sh4_disasm_instruction( source_pc, buf, sizeof(buf), op );
243 fprintf( out, "%*c %08X: %s %s\n", 72,' ', source_pc, op, buf );
244 source_pc = source_pc2;
248 static void sh4_x86_add_backpatch( uint8_t *fixup_addr, uint32_t fixup_pc, uint32_t exc_code )
252 if( exc_code == -2 ) {
253 reloc_size = sizeof(void *);
256 if( sh4_x86.backpatch_posn == sh4_x86.backpatch_size ) {
257 sh4_x86.backpatch_size <<= 1;
258 sh4_x86.backpatch_list = realloc( sh4_x86.backpatch_list,
259 sh4_x86.backpatch_size * sizeof(struct backpatch_record));
260 assert( sh4_x86.backpatch_list != NULL );
262 if( sh4_x86.in_delay_slot ) {
266 sh4_x86.backpatch_list[sh4_x86.backpatch_posn].fixup_offset =
267 (((uint8_t *)fixup_addr) - ((uint8_t *)xlat_current_block->code)) - reloc_size;
268 sh4_x86.backpatch_list[sh4_x86.backpatch_posn].fixup_icount = (fixup_pc - sh4_x86.block_start_pc)>>1;
269 sh4_x86.backpatch_list[sh4_x86.backpatch_posn].exc_code = exc_code;
270 sh4_x86.backpatch_posn++;
273 #define TSTATE_NONE -1
274 #define TSTATE_O X86_COND_O
275 #define TSTATE_C X86_COND_C
276 #define TSTATE_E X86_COND_E
277 #define TSTATE_NE X86_COND_NE
278 #define TSTATE_G X86_COND_G
279 #define TSTATE_GE X86_COND_GE
280 #define TSTATE_A X86_COND_A
281 #define TSTATE_AE X86_COND_AE
283 #define MARK_JMP8(x) uint8_t *_mark_jmp_##x = (xlat_output-1)
284 #define JMP_TARGET(x) *_mark_jmp_##x += (xlat_output - _mark_jmp_##x)
286 /* Convenience instructions */
287 #define LDC_t() CMPB_imms_rbpdisp(1,R_T); CMC()
288 #define SETE_t() SETCCB_cc_rbpdisp(X86_COND_E,R_T)
289 #define SETA_t() SETCCB_cc_rbpdisp(X86_COND_A,R_T)
290 #define SETAE_t() SETCCB_cc_rbpdisp(X86_COND_AE,R_T)
291 #define SETG_t() SETCCB_cc_rbpdisp(X86_COND_G,R_T)
292 #define SETGE_t() SETCCB_cc_rbpdisp(X86_COND_GE,R_T)
293 #define SETC_t() SETCCB_cc_rbpdisp(X86_COND_C,R_T)
294 #define SETO_t() SETCCB_cc_rbpdisp(X86_COND_O,R_T)
295 #define SETNE_t() SETCCB_cc_rbpdisp(X86_COND_NE,R_T)
296 #define SETC_r8(r1) SETCCB_cc_r8(X86_COND_C, r1)
297 #define JAE_label(label) JCC_cc_rel8(X86_COND_AE,-1); MARK_JMP8(label)
298 #define JBE_label(label) JCC_cc_rel8(X86_COND_BE,-1); MARK_JMP8(label)
299 #define JE_label(label) JCC_cc_rel8(X86_COND_E,-1); MARK_JMP8(label)
300 #define JGE_label(label) JCC_cc_rel8(X86_COND_GE,-1); MARK_JMP8(label)
301 #define JNA_label(label) JCC_cc_rel8(X86_COND_NA,-1); MARK_JMP8(label)
302 #define JNE_label(label) JCC_cc_rel8(X86_COND_NE,-1); MARK_JMP8(label)
303 #define JNO_label(label) JCC_cc_rel8(X86_COND_NO,-1); MARK_JMP8(label)
304 #define JS_label(label) JCC_cc_rel8(X86_COND_S,-1); MARK_JMP8(label)
305 #define JMP_label(label) JMP_rel8(-1); MARK_JMP8(label)
306 #define JNE_exc(exc) JCC_cc_rel32(X86_COND_NE,0); sh4_x86_add_backpatch(xlat_output, pc, exc)
308 /** Branch if T is set (either in the current cflags, or in sh4r.t) */
309 #define JT_label(label) if( sh4_x86.tstate == TSTATE_NONE ) { \
310 CMPL_imms_rbpdisp( 1, R_T ); sh4_x86.tstate = TSTATE_E; } \
311 JCC_cc_rel8(sh4_x86.tstate,-1); MARK_JMP8(label)
313 /** Branch if T is clear (either in the current cflags or in sh4r.t) */
314 #define JF_label(label) if( sh4_x86.tstate == TSTATE_NONE ) { \
315 CMPL_imms_rbpdisp( 1, R_T ); sh4_x86.tstate = TSTATE_E; } \
316 JCC_cc_rel8(sh4_x86.tstate^1, -1); MARK_JMP8(label)
319 #define load_reg(x86reg,sh4reg) MOVL_rbpdisp_r32( REG_OFFSET(r[sh4reg]), x86reg )
320 #define store_reg(x86reg,sh4reg) MOVL_r32_rbpdisp( x86reg, REG_OFFSET(r[sh4reg]) )
323 * Load an FR register (single-precision floating point) into an integer x86
324 * register (eg for register-to-register moves)
326 #define load_fr(reg,frm) MOVL_rbpdisp_r32( REG_OFFSET(fr[0][(frm)^1]), reg )
327 #define load_xf(reg,frm) MOVL_rbpdisp_r32( REG_OFFSET(fr[1][(frm)^1]), reg )
330 * Load the low half of a DR register (DR or XD) into an integer x86 register
332 #define load_dr0(reg,frm) MOVL_rbpdisp_r32( REG_OFFSET(fr[frm&1][frm|0x01]), reg )
333 #define load_dr1(reg,frm) MOVL_rbpdisp_r32( REG_OFFSET(fr[frm&1][frm&0x0E]), reg )
336 * Store an FR register (single-precision floating point) from an integer x86+
337 * register (eg for register-to-register moves)
339 #define store_fr(reg,frm) MOVL_r32_rbpdisp( reg, REG_OFFSET(fr[0][(frm)^1]) )
340 #define store_xf(reg,frm) MOVL_r32_rbpdisp( reg, REG_OFFSET(fr[1][(frm)^1]) )
342 #define store_dr0(reg,frm) MOVL_r32_rbpdisp( reg, REG_OFFSET(fr[frm&1][frm|0x01]) )
343 #define store_dr1(reg,frm) MOVL_r32_rbpdisp( reg, REG_OFFSET(fr[frm&1][frm&0x0E]) )
346 #define push_fpul() FLDF_rbpdisp(R_FPUL)
347 #define pop_fpul() FSTPF_rbpdisp(R_FPUL)
348 #define push_fr(frm) FLDF_rbpdisp( REG_OFFSET(fr[0][(frm)^1]) )
349 #define pop_fr(frm) FSTPF_rbpdisp( REG_OFFSET(fr[0][(frm)^1]) )
350 #define push_xf(frm) FLDF_rbpdisp( REG_OFFSET(fr[1][(frm)^1]) )
351 #define pop_xf(frm) FSTPF_rbpdisp( REG_OFFSET(fr[1][(frm)^1]) )
352 #define push_dr(frm) FLDD_rbpdisp( REG_OFFSET(fr[0][(frm)&0x0E]) )
353 #define pop_dr(frm) FSTPD_rbpdisp( REG_OFFSET(fr[0][(frm)&0x0E]) )
354 #define push_xdr(frm) FLDD_rbpdisp( REG_OFFSET(fr[1][(frm)&0x0E]) )
355 #define pop_xdr(frm) FSTPD_rbpdisp( REG_OFFSET(fr[1][(frm)&0x0E]) )
357 #ifdef ENABLE_SH4STATS
358 #define COUNT_INST(id) MOVL_imm32_r32( id, REG_EAX ); CALL1_ptr_r32(sh4_stats_add, REG_EAX); sh4_x86.tstate = TSTATE_NONE
360 #define COUNT_INST(id)
364 /* Exception checks - Note that all exception checks will clobber EAX */
366 #define check_priv( ) \
367 if( (sh4_x86.sh4_mode & SR_MD) == 0 ) { \
368 if( sh4_x86.in_delay_slot ) { \
369 exit_block_exc(EXC_SLOT_ILLEGAL, (pc-2), 4 ); \
371 exit_block_exc(EXC_ILLEGAL, pc, 2); \
373 sh4_x86.branch_taken = TRUE; \
374 sh4_x86.in_delay_slot = DELAY_NONE; \
378 #define check_fpuen( ) \
379 if( !sh4_x86.fpuen_checked ) {\
380 sh4_x86.fpuen_checked = TRUE;\
381 MOVL_rbpdisp_r32( R_SR, REG_EAX );\
382 ANDL_imms_r32( SR_FD, REG_EAX );\
383 if( sh4_x86.in_delay_slot ) {\
384 JNE_exc(EXC_SLOT_FPU_DISABLED);\
386 JNE_exc(EXC_FPU_DISABLED);\
388 sh4_x86.tstate = TSTATE_NONE; \
391 #define check_ralign16( x86reg ) \
392 TESTL_imms_r32( 0x00000001, x86reg ); \
393 JNE_exc(EXC_DATA_ADDR_READ)
395 #define check_walign16( x86reg ) \
396 TESTL_imms_r32( 0x00000001, x86reg ); \
397 JNE_exc(EXC_DATA_ADDR_WRITE);
399 #define check_ralign32( x86reg ) \
400 TESTL_imms_r32( 0x00000003, x86reg ); \
401 JNE_exc(EXC_DATA_ADDR_READ)
403 #define check_walign32( x86reg ) \
404 TESTL_imms_r32( 0x00000003, x86reg ); \
405 JNE_exc(EXC_DATA_ADDR_WRITE);
407 #define check_ralign64( x86reg ) \
408 TESTL_imms_r32( 0x00000007, x86reg ); \
409 JNE_exc(EXC_DATA_ADDR_READ)
411 #define check_walign64( x86reg ) \
412 TESTL_imms_r32( 0x00000007, x86reg ); \
413 JNE_exc(EXC_DATA_ADDR_WRITE);
415 #define address_space() ((sh4_x86.sh4_mode&SR_MD) ? (uintptr_t)sh4_x86.priv_address_space : (uintptr_t)sh4_x86.user_address_space)
418 /* Note: For SR.MD == 1 && MMUCR.AT == 0, there are no memory exceptions, so
419 * don't waste the cycles expecting them. Otherwise we need to save the exception pointer.
421 #ifdef HAVE_FRAME_ADDRESS
422 static void call_read_func(int addr_reg, int value_reg, int offset, int pc)
424 decode_address(address_space(), addr_reg);
425 if( !sh4_x86.tlb_on && (sh4_x86.sh4_mode & SR_MD) ) {
426 CALL1_r32disp_r32(REG_ECX, offset, addr_reg);
428 if( addr_reg != REG_ARG1 ) {
429 MOVL_r32_r32( addr_reg, REG_ARG1 );
431 MOVP_immptr_rptr( 0, REG_ARG2 );
432 sh4_x86_add_backpatch( xlat_output, pc, -2 );
433 CALL2_r32disp_r32_r32(REG_ECX, offset, REG_ARG1, REG_ARG2);
435 if( value_reg != REG_RESULT1 ) {
436 MOVL_r32_r32( REG_RESULT1, value_reg );
440 static void call_write_func(int addr_reg, int value_reg, int offset, int pc)
442 decode_address(address_space(), addr_reg);
443 if( !sh4_x86.tlb_on && (sh4_x86.sh4_mode & SR_MD) ) {
444 CALL2_r32disp_r32_r32(REG_ECX, offset, addr_reg, value_reg);
446 if( value_reg != REG_ARG2 ) {
447 MOVL_r32_r32( value_reg, REG_ARG2 );
449 if( addr_reg != REG_ARG1 ) {
450 MOVL_r32_r32( addr_reg, REG_ARG1 );
453 MOVP_immptr_rptr( 0, REG_ARG3 );
454 sh4_x86_add_backpatch( xlat_output, pc, -2 );
455 CALL3_r32disp_r32_r32_r32(REG_ECX, offset, REG_ARG1, REG_ARG2, REG_ARG3);
457 MOVL_imm32_rspdisp( 0, 0 );
458 sh4_x86_add_backpatch( xlat_output, pc, -2 );
459 CALL3_r32disp_r32_r32_r32(REG_ECX, offset, REG_ARG1, REG_ARG2, 0);
464 static void call_read_func(int addr_reg, int value_reg, int offset, int pc)
466 decode_address(address_space(), addr_reg);
467 CALL1_r32disp_r32(REG_ECX, offset, addr_reg);
468 if( value_reg != REG_RESULT1 ) {
469 MOVL_r32_r32( REG_RESULT1, value_reg );
473 static void call_write_func(int addr_reg, int value_reg, int offset, int pc)
475 decode_address(address_space(), addr_reg);
476 CALL2_r32disp_r32_r32(REG_ECX, offset, addr_reg, value_reg);
480 #define MEM_REGION_PTR(name) offsetof( struct mem_region_fn, name )
481 #define MEM_READ_BYTE( addr_reg, value_reg ) call_read_func(addr_reg, value_reg, MEM_REGION_PTR(read_byte), pc)
482 #define MEM_READ_BYTE_FOR_WRITE( addr_reg, value_reg ) call_read_func( addr_reg, value_reg, MEM_REGION_PTR(read_byte_for_write), pc)
483 #define MEM_READ_WORD( addr_reg, value_reg ) call_read_func(addr_reg, value_reg, MEM_REGION_PTR(read_word), pc)
484 #define MEM_READ_LONG( addr_reg, value_reg ) call_read_func(addr_reg, value_reg, MEM_REGION_PTR(read_long), pc)
485 #define MEM_WRITE_BYTE( addr_reg, value_reg ) call_write_func(addr_reg, value_reg, MEM_REGION_PTR(write_byte), pc)
486 #define MEM_WRITE_WORD( addr_reg, value_reg ) call_write_func(addr_reg, value_reg, MEM_REGION_PTR(write_word), pc)
487 #define MEM_WRITE_LONG( addr_reg, value_reg ) call_write_func(addr_reg, value_reg, MEM_REGION_PTR(write_long), pc)
488 #define MEM_PREFETCH( addr_reg ) call_read_func(addr_reg, REG_RESULT1, MEM_REGION_PTR(prefetch), pc)
490 #define SLOTILLEGAL() exit_block_exc(EXC_SLOT_ILLEGAL, pc-2, 4); sh4_x86.in_delay_slot = DELAY_NONE; return 2;
492 /** Offset of xlat_sh4_mode field relative to the code pointer */
493 #define XLAT_SH4_MODE_CODE_OFFSET (int32_t)(offsetof(struct xlat_cache_block, xlat_sh4_mode) - offsetof(struct xlat_cache_block,code) )
494 #define XLAT_CHAIN_CODE_OFFSET (int32_t)(offsetof(struct xlat_cache_block, chain) - offsetof(struct xlat_cache_block,code) )
495 #define XLAT_ACTIVE_CODE_OFFSET (int32_t)(offsetof(struct xlat_cache_block, active) - offsetof(struct xlat_cache_block,code) )
497 void sh4_translate_begin_block( sh4addr_t pc )
499 sh4_x86.code = xlat_output;
500 sh4_x86.in_delay_slot = FALSE;
501 sh4_x86.fpuen_checked = FALSE;
502 sh4_x86.branch_taken = FALSE;
503 sh4_x86.backpatch_posn = 0;
504 sh4_x86.block_start_pc = pc;
505 sh4_x86.tlb_on = IS_TLB_ENABLED();
506 sh4_x86.tstate = TSTATE_NONE;
507 sh4_x86.double_prec = sh4r.fpscr & FPSCR_PR;
508 sh4_x86.double_size = sh4r.fpscr & FPSCR_SZ;
509 sh4_x86.sh4_mode = sh4r.xlat_sh4_mode;
511 if( sh4_x86.begin_callback ) {
512 CALL_ptr( sh4_x86.begin_callback );
514 if( sh4_x86.profile_blocks ) {
515 MOVP_immptr_rptr( sh4_x86.code + XLAT_ACTIVE_CODE_OFFSET, REG_EAX );
516 ADDL_imms_r32disp( 1, REG_EAX, 0 );
521 uint32_t sh4_translate_end_block_size()
523 if( sh4_x86.backpatch_posn <= 3 ) {
524 return EPILOGUE_SIZE + (sh4_x86.backpatch_posn*(12+CALL1_PTR_MIN_SIZE));
526 return EPILOGUE_SIZE + (3*(12+CALL1_PTR_MIN_SIZE)) + (sh4_x86.backpatch_posn-3)*(15+CALL1_PTR_MIN_SIZE);
532 * Embed a breakpoint into the generated code
534 void sh4_translate_emit_breakpoint( sh4vma_t pc )
536 MOVL_imm32_r32( pc, REG_EAX );
537 CALL1_ptr_r32( sh4_translate_breakpoint_hit, REG_EAX );
538 sh4_x86.tstate = TSTATE_NONE;
542 #define UNTRANSLATABLE(pc) !IS_IN_ICACHE(pc)
545 * Test if the loaded target code pointer in %eax is valid, and if so jump
546 * directly into it, bypassing the normal exit.
548 static void jump_next_block()
550 uint8_t *ptr = xlat_output;
551 TESTP_rptr_rptr(REG_EAX, REG_EAX);
553 if( sh4_x86.sh4_mode == SH4_MODE_UNKNOWN ) {
554 /* sr/fpscr was changed, possibly updated xlat_sh4_mode, so reload it */
555 MOVL_rbpdisp_r32( REG_OFFSET(xlat_sh4_mode), REG_ECX );
556 CMPL_r32_r32disp( REG_ECX, REG_EAX, XLAT_SH4_MODE_CODE_OFFSET );
558 CMPL_imms_r32disp( sh4_x86.sh4_mode, REG_EAX, XLAT_SH4_MODE_CODE_OFFSET );
560 JNE_label(wrongmode);
561 LEAP_rptrdisp_rptr(REG_EAX, PROLOGUE_SIZE,REG_EAX);
562 if( sh4_x86.end_callback ) {
563 /* Note this does leave the stack out of alignment, but doesn't matter
564 * for what we're currently using it for.
567 MOVP_immptr_rptr(sh4_x86.end_callback, REG_ECX);
572 JMP_TARGET(wrongmode);
573 MOVP_rptrdisp_rptr( REG_EAX, XLAT_CHAIN_CODE_OFFSET, REG_EAX );
574 int rel = ptr - xlat_output;
582 static void FASTCALL sh4_translate_get_code_and_backpatch( uint32_t pc )
584 uint8_t *target = (uint8_t *)xlat_get_code_by_vma(pc);
585 while( target != NULL && sh4r.xlat_sh4_mode != XLAT_BLOCK_MODE(target) ) {
586 target = XLAT_BLOCK_CHAIN(target);
588 if( target == NULL ) {
589 target = sh4_translate_basic_block( pc );
591 uint8_t *backpatch = ((uint8_t *)__builtin_return_address(0)) - (CALL1_PTR_MIN_SIZE);
593 *(uint32_t *)(backpatch+1) = (uint32_t)(target-backpatch)+PROLOGUE_SIZE-5;
594 *(void **)(backpatch+5) = XLAT_BLOCK_FOR_CODE(target)->use_list;
595 XLAT_BLOCK_FOR_CODE(target)->use_list = backpatch;
597 uint8_t **retptr = ((uint8_t **)__builtin_frame_address(0))+1;
598 assert( *retptr == ((uint8_t *)__builtin_return_address(0)) );
602 static void emit_translate_and_backpatch()
604 /* NB: this is either 7 bytes (i386) or 12 bytes (x86-64) */
605 CALL1_ptr_r32(sh4_translate_get_code_and_backpatch, REG_ARG1);
607 /* When patched, the jmp instruction will be 5 bytes (either platform) -
608 * we need to reserve sizeof(void*) bytes for the use-list
611 if( sizeof(void*) == 8 ) {
619 * If we're jumping to a fixed address (or at least fixed relative to the
620 * current PC, then we can do a direct branch. REG_ARG1 should contain
621 * the PC at this point.
623 static void jump_next_block_fixed_pc( sh4addr_t pc )
625 if( IS_IN_ICACHE(pc) ) {
626 if( sh4_x86.sh4_mode != SH4_MODE_UNKNOWN && sh4_x86.end_callback == NULL ) {
627 /* Fixed address, in cache, and fixed SH4 mode - generate a call to the
628 * fetch-and-backpatch routine, which will replace the call with a branch */
629 emit_translate_and_backpatch();
632 MOVP_moffptr_rax( xlat_get_lut_entry(GET_ICACHE_PHYS(pc)) );
633 ANDP_imms_rptr( -4, REG_EAX );
635 } else if( sh4_x86.tlb_on ) {
636 CALL1_ptr_r32(xlat_get_code_by_vma, REG_ARG1);
638 CALL1_ptr_r32(xlat_get_code, REG_ARG1);
645 void sh4_translate_unlink_block( void *use_list )
647 uint8_t *tmp = xlat_output; /* In case something is active, which should never happen */
648 void *next = use_list;
649 while( next != NULL ) {
650 xlat_output = (uint8_t *)next;
651 next = *(void **)(xlat_output+5);
652 emit_translate_and_backpatch();
659 static void exit_block()
662 if( sh4_x86.end_callback ) {
663 MOVP_immptr_rptr(sh4_x86.end_callback, REG_ECX);
671 * Exit the block with sh4r.pc already written
673 void exit_block_pcset( sh4addr_t pc )
675 MOVL_imm32_r32( ((pc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period, REG_ECX );
676 ADDL_rbpdisp_r32( REG_OFFSET(slice_cycle), REG_ECX );
677 MOVL_r32_rbpdisp( REG_ECX, REG_OFFSET(slice_cycle) );
678 CMPL_r32_rbpdisp( REG_ECX, REG_OFFSET(event_pending) );
680 MOVL_rbpdisp_r32( R_PC, REG_ARG1 );
681 if( sh4_x86.tlb_on ) {
682 CALL1_ptr_r32(xlat_get_code_by_vma,REG_ARG1);
684 CALL1_ptr_r32(xlat_get_code,REG_ARG1);
688 JMP_TARGET(exitloop);
693 * Exit the block with sh4r.new_pc written with the target pc
695 void exit_block_newpcset( sh4addr_t pc )
697 MOVL_imm32_r32( ((pc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period, REG_ECX );
698 ADDL_rbpdisp_r32( REG_OFFSET(slice_cycle), REG_ECX );
699 MOVL_r32_rbpdisp( REG_ECX, REG_OFFSET(slice_cycle) );
700 MOVL_rbpdisp_r32( R_NEW_PC, REG_ARG1 );
701 MOVL_r32_rbpdisp( REG_ARG1, R_PC );
702 CMPL_r32_rbpdisp( REG_ECX, REG_OFFSET(event_pending) );
704 if( sh4_x86.tlb_on ) {
705 CALL1_ptr_r32(xlat_get_code_by_vma,REG_ARG1);
707 CALL1_ptr_r32(xlat_get_code,REG_ARG1);
711 JMP_TARGET(exitloop);
717 * Exit the block to an absolute PC
719 void exit_block_abs( sh4addr_t pc, sh4addr_t endpc )
721 MOVL_imm32_r32( ((endpc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period, REG_ECX );
722 ADDL_rbpdisp_r32( REG_OFFSET(slice_cycle), REG_ECX );
723 MOVL_r32_rbpdisp( REG_ECX, REG_OFFSET(slice_cycle) );
725 MOVL_imm32_r32( pc, REG_ARG1 );
726 MOVL_r32_rbpdisp( REG_ARG1, R_PC );
727 CMPL_r32_rbpdisp( REG_ECX, REG_OFFSET(event_pending) );
729 jump_next_block_fixed_pc(pc);
730 JMP_TARGET(exitloop);
735 * Exit the block to a relative PC
737 void exit_block_rel( sh4addr_t pc, sh4addr_t endpc )
739 MOVL_imm32_r32( ((endpc - sh4_x86.block_start_pc)>>1)*sh4_cpu_period, REG_ECX );
740 ADDL_rbpdisp_r32( REG_OFFSET(slice_cycle), REG_ECX );
741 MOVL_r32_rbpdisp( REG_ECX, REG_OFFSET(slice_cycle) );
743 if( pc == sh4_x86.block_start_pc && sh4_x86.sh4_mode == sh4r.xlat_sh4_mode ) {
744 /* Special case for tight loops - the PC doesn't change, and
745 * we already know the target address. Just check events pending before
748 CMPL_r32_rbpdisp( REG_ECX, REG_OFFSET(event_pending) );
749 uint32_t backdisp = ((uintptr_t)(sh4_x86.code - xlat_output)) + PROLOGUE_SIZE;
750 JCC_cc_prerel(X86_COND_A, backdisp);
752 MOVL_imm32_r32( pc - sh4_x86.block_start_pc, REG_ARG1 );
753 ADDL_rbpdisp_r32( R_PC, REG_ARG1 );
754 MOVL_r32_rbpdisp( REG_ARG1, R_PC );
755 CMPL_r32_rbpdisp( REG_ECX, REG_OFFSET(event_pending) );
756 JBE_label(exitloop2);
758 jump_next_block_fixed_pc(pc);
759 JMP_TARGET(exitloop2);
765 * Exit unconditionally with a general exception
767 void exit_block_exc( int code, sh4addr_t pc, int inst_adjust )
769 MOVL_imm32_r32( pc - sh4_x86.block_start_pc, REG_ECX );
770 ADDL_r32_rbpdisp( REG_ECX, R_PC );
771 MOVL_imm32_r32( ((pc - sh4_x86.block_start_pc + inst_adjust)>>1)*sh4_cpu_period, REG_ECX );
772 ADDL_r32_rbpdisp( REG_ECX, REG_OFFSET(slice_cycle) );
773 MOVL_imm32_r32( code, REG_ARG1 );
774 CALL1_ptr_r32( sh4_raise_exception, REG_ARG1 );
779 * Embed a call to sh4_execute_instruction for situations that we
780 * can't translate (just page-crossing delay slots at the moment).
781 * Caller is responsible for setting new_pc before calling this function.
785 * Set sh4r.in_delay_slot = sh4_x86.in_delay_slot
786 * Update slice_cycle for endpc+2 (single step doesn't update slice_cycle)
787 * Call sh4_execute_instruction
788 * Call xlat_get_code_by_vma / xlat_get_code as for normal exit
790 void exit_block_emu( sh4vma_t endpc )
792 MOVL_imm32_r32( endpc - sh4_x86.block_start_pc, REG_ECX ); // 5
793 ADDL_r32_rbpdisp( REG_ECX, R_PC );
795 MOVL_imm32_r32( (((endpc - sh4_x86.block_start_pc)>>1)+1)*sh4_cpu_period, REG_ECX ); // 5
796 ADDL_r32_rbpdisp( REG_ECX, REG_OFFSET(slice_cycle) ); // 6
797 MOVL_imm32_r32( sh4_x86.in_delay_slot ? 1 : 0, REG_ECX );
798 MOVL_r32_rbpdisp( REG_ECX, REG_OFFSET(in_delay_slot) );
800 CALL_ptr( sh4_execute_instruction );
805 * Write the block trailer (exception handling block)
807 void sh4_translate_end_block( sh4addr_t pc ) {
808 if( sh4_x86.branch_taken == FALSE ) {
809 // Didn't exit unconditionally already, so write the termination here
810 exit_block_rel( pc, pc );
812 if( sh4_x86.backpatch_posn != 0 ) {
814 // Exception raised - cleanup and exit
815 uint8_t *end_ptr = xlat_output;
816 MOVL_r32_r32( REG_EDX, REG_ECX );
817 ADDL_r32_r32( REG_EDX, REG_ECX );
818 ADDL_r32_rbpdisp( REG_ECX, R_SPC );
819 MOVL_moffptr_eax( &sh4_cpu_period );
820 INC_r32( REG_EDX ); /* Add 1 for the aborting instruction itself */
822 ADDL_r32_rbpdisp( REG_EAX, REG_OFFSET(slice_cycle) );
825 for( i=0; i< sh4_x86.backpatch_posn; i++ ) {
826 uint32_t *fixup_addr = (uint32_t *)&xlat_current_block->code[sh4_x86.backpatch_list[i].fixup_offset];
827 if( sh4_x86.backpatch_list[i].exc_code < 0 ) {
828 if( sh4_x86.backpatch_list[i].exc_code == -2 ) {
829 *((uintptr_t *)fixup_addr) = (uintptr_t)xlat_output;
831 *fixup_addr += xlat_output - (uint8_t *)&xlat_current_block->code[sh4_x86.backpatch_list[i].fixup_offset] - 4;
833 MOVL_imm32_r32( sh4_x86.backpatch_list[i].fixup_icount, REG_EDX );
834 int rel = end_ptr - xlat_output;
837 *fixup_addr += xlat_output - (uint8_t *)&xlat_current_block->code[sh4_x86.backpatch_list[i].fixup_offset] - 4;
838 MOVL_imm32_r32( sh4_x86.backpatch_list[i].exc_code, REG_ARG1 );
839 CALL1_ptr_r32( sh4_raise_exception, REG_ARG1 );
840 MOVL_imm32_r32( sh4_x86.backpatch_list[i].fixup_icount, REG_EDX );
841 int rel = end_ptr - xlat_output;
849 * Translate a single instruction. Delayed branches are handled specially
850 * by translating both branch and delayed instruction as a single unit (as
852 * The instruction MUST be in the icache (assert check)
854 * @return true if the instruction marks the end of a basic block
857 uint32_t sh4_translate_instruction( sh4vma_t pc )
860 /* Read instruction from icache */
861 assert( IS_IN_ICACHE(pc) );
862 ir = *(uint16_t *)GET_ICACHE_PTR(pc);
864 if( !sh4_x86.in_delay_slot ) {
865 sh4_translate_add_recovery( (pc - sh4_x86.block_start_pc)>>1 );
868 /* check for breakpoints at this pc */
869 for( int i=0; i<sh4_breakpoint_count; i++ ) {
870 if( sh4_breakpoints[i].address == pc ) {
871 sh4_translate_emit_breakpoint(pc);
879 load_reg( REG_EAX, Rm );
880 load_reg( REG_ECX, Rn );
881 ADDL_r32_r32( REG_EAX, REG_ECX );
882 store_reg( REG_ECX, Rn );
883 sh4_x86.tstate = TSTATE_NONE;
887 ADDL_imms_rbpdisp( imm, REG_OFFSET(r[Rn]) );
888 sh4_x86.tstate = TSTATE_NONE;
892 if( sh4_x86.tstate != TSTATE_C ) {
895 load_reg( REG_EAX, Rm );
896 load_reg( REG_ECX, Rn );
897 ADCL_r32_r32( REG_EAX, REG_ECX );
898 store_reg( REG_ECX, Rn );
900 sh4_x86.tstate = TSTATE_C;
904 load_reg( REG_EAX, Rm );
905 load_reg( REG_ECX, Rn );
906 ADDL_r32_r32( REG_EAX, REG_ECX );
907 store_reg( REG_ECX, Rn );
909 sh4_x86.tstate = TSTATE_O;
913 load_reg( REG_EAX, Rm );
914 load_reg( REG_ECX, Rn );
915 ANDL_r32_r32( REG_EAX, REG_ECX );
916 store_reg( REG_ECX, Rn );
917 sh4_x86.tstate = TSTATE_NONE;
921 load_reg( REG_EAX, 0 );
922 ANDL_imms_r32(imm, REG_EAX);
923 store_reg( REG_EAX, 0 );
924 sh4_x86.tstate = TSTATE_NONE;
926 AND.B #imm, @(R0, GBR) {:
928 load_reg( REG_EAX, 0 );
929 ADDL_rbpdisp_r32( R_GBR, REG_EAX );
930 MOVL_r32_rspdisp(REG_EAX, 0);
931 MEM_READ_BYTE_FOR_WRITE( REG_EAX, REG_EDX );
932 MOVL_rspdisp_r32(0, REG_EAX);
933 ANDL_imms_r32(imm, REG_EDX );
934 MEM_WRITE_BYTE( REG_EAX, REG_EDX );
935 sh4_x86.tstate = TSTATE_NONE;
939 load_reg( REG_EAX, Rm );
940 load_reg( REG_ECX, Rn );
941 CMPL_r32_r32( REG_EAX, REG_ECX );
943 sh4_x86.tstate = TSTATE_E;
946 COUNT_INST(I_CMPEQI);
947 load_reg( REG_EAX, 0 );
948 CMPL_imms_r32(imm, REG_EAX);
950 sh4_x86.tstate = TSTATE_E;
954 load_reg( REG_EAX, Rm );
955 load_reg( REG_ECX, Rn );
956 CMPL_r32_r32( REG_EAX, REG_ECX );
958 sh4_x86.tstate = TSTATE_GE;
962 load_reg( REG_EAX, Rm );
963 load_reg( REG_ECX, Rn );
964 CMPL_r32_r32( REG_EAX, REG_ECX );
966 sh4_x86.tstate = TSTATE_G;
970 load_reg( REG_EAX, Rm );
971 load_reg( REG_ECX, Rn );
972 CMPL_r32_r32( REG_EAX, REG_ECX );
974 sh4_x86.tstate = TSTATE_A;
978 load_reg( REG_EAX, Rm );
979 load_reg( REG_ECX, Rn );
980 CMPL_r32_r32( REG_EAX, REG_ECX );
982 sh4_x86.tstate = TSTATE_AE;
986 load_reg( REG_EAX, Rn );
987 CMPL_imms_r32( 0, REG_EAX );
989 sh4_x86.tstate = TSTATE_G;
993 load_reg( REG_EAX, Rn );
994 CMPL_imms_r32( 0, REG_EAX );
996 sh4_x86.tstate = TSTATE_GE;
999 COUNT_INST(I_CMPSTR);
1000 load_reg( REG_EAX, Rm );
1001 load_reg( REG_ECX, Rn );
1002 XORL_r32_r32( REG_ECX, REG_EAX );
1003 TESTB_r8_r8( REG_AL, REG_AL );
1005 TESTB_r8_r8( REG_AH, REG_AH );
1007 SHRL_imm_r32( 16, REG_EAX );
1008 TESTB_r8_r8( REG_AL, REG_AL );
1010 TESTB_r8_r8( REG_AH, REG_AH );
1011 JMP_TARGET(target1);
1012 JMP_TARGET(target2);
1013 JMP_TARGET(target3);
1015 sh4_x86.tstate = TSTATE_E;
1018 COUNT_INST(I_DIV0S);
1019 load_reg( REG_EAX, Rm );
1020 load_reg( REG_ECX, Rn );
1021 SHRL_imm_r32( 31, REG_EAX );
1022 SHRL_imm_r32( 31, REG_ECX );
1023 MOVL_r32_rbpdisp( REG_EAX, R_M );
1024 MOVL_r32_rbpdisp( REG_ECX, R_Q );
1025 CMPL_r32_r32( REG_EAX, REG_ECX );
1027 sh4_x86.tstate = TSTATE_NE;
1030 COUNT_INST(I_DIV0U);
1031 XORL_r32_r32( REG_EAX, REG_EAX );
1032 MOVL_r32_rbpdisp( REG_EAX, R_Q );
1033 MOVL_r32_rbpdisp( REG_EAX, R_M );
1034 MOVL_r32_rbpdisp( REG_EAX, R_T );
1035 sh4_x86.tstate = TSTATE_C; // works for DIV1
1039 MOVL_rbpdisp_r32( R_M, REG_ECX );
1040 load_reg( REG_EAX, Rn );
1041 if( sh4_x86.tstate != TSTATE_C ) {
1044 RCLL_imm_r32( 1, REG_EAX );
1045 SETC_r8( REG_DL ); // Q'
1046 CMPL_rbpdisp_r32( R_Q, REG_ECX );
1048 ADDL_rbpdisp_r32( REG_OFFSET(r[Rm]), REG_EAX );
1050 JMP_TARGET(mqequal);
1051 SUBL_rbpdisp_r32( REG_OFFSET(r[Rm]), REG_EAX );
1053 store_reg( REG_EAX, Rn ); // Done with Rn now
1054 SETC_r8(REG_AL); // tmp1
1055 XORB_r8_r8( REG_DL, REG_AL ); // Q' = Q ^ tmp1
1056 XORB_r8_r8( REG_AL, REG_CL ); // Q'' = Q' ^ M
1057 MOVL_r32_rbpdisp( REG_ECX, R_Q );
1058 XORL_imms_r32( 1, REG_AL ); // T = !Q'
1059 MOVZXL_r8_r32( REG_AL, REG_EAX );
1060 MOVL_r32_rbpdisp( REG_EAX, R_T );
1061 sh4_x86.tstate = TSTATE_NONE;
1064 COUNT_INST(I_DMULS);
1065 load_reg( REG_EAX, Rm );
1066 load_reg( REG_ECX, Rn );
1068 MOVL_r32_rbpdisp( REG_EDX, R_MACH );
1069 MOVL_r32_rbpdisp( REG_EAX, R_MACL );
1070 sh4_x86.tstate = TSTATE_NONE;
1073 COUNT_INST(I_DMULU);
1074 load_reg( REG_EAX, Rm );
1075 load_reg( REG_ECX, Rn );
1077 MOVL_r32_rbpdisp( REG_EDX, R_MACH );
1078 MOVL_r32_rbpdisp( REG_EAX, R_MACL );
1079 sh4_x86.tstate = TSTATE_NONE;
1083 load_reg( REG_EAX, Rn );
1084 ADDL_imms_r32( -1, REG_EAX );
1085 store_reg( REG_EAX, Rn );
1087 sh4_x86.tstate = TSTATE_E;
1090 COUNT_INST(I_EXTSB);
1091 load_reg( REG_EAX, Rm );
1092 MOVSXL_r8_r32( REG_EAX, REG_EAX );
1093 store_reg( REG_EAX, Rn );
1096 COUNT_INST(I_EXTSW);
1097 load_reg( REG_EAX, Rm );
1098 MOVSXL_r16_r32( REG_EAX, REG_EAX );
1099 store_reg( REG_EAX, Rn );
1102 COUNT_INST(I_EXTUB);
1103 load_reg( REG_EAX, Rm );
1104 MOVZXL_r8_r32( REG_EAX, REG_EAX );
1105 store_reg( REG_EAX, Rn );
1108 COUNT_INST(I_EXTUW);
1109 load_reg( REG_EAX, Rm );
1110 MOVZXL_r16_r32( REG_EAX, REG_EAX );
1111 store_reg( REG_EAX, Rn );
1116 load_reg( REG_EAX, Rm );
1117 check_ralign32( REG_EAX );
1118 MEM_READ_LONG( REG_EAX, REG_EAX );
1119 MOVL_r32_rspdisp(REG_EAX, 0);
1120 load_reg( REG_EAX, Rm );
1121 LEAL_r32disp_r32( REG_EAX, 4, REG_EAX );
1122 MEM_READ_LONG( REG_EAX, REG_EAX );
1123 ADDL_imms_rbpdisp( 8, REG_OFFSET(r[Rn]) );
1125 load_reg( REG_EAX, Rm );
1126 check_ralign32( REG_EAX );
1127 MEM_READ_LONG( REG_EAX, REG_EAX );
1128 MOVL_r32_rspdisp( REG_EAX, 0 );
1129 load_reg( REG_EAX, Rn );
1130 check_ralign32( REG_EAX );
1131 MEM_READ_LONG( REG_EAX, REG_EAX );
1132 ADDL_imms_rbpdisp( 4, REG_OFFSET(r[Rn]) );
1133 ADDL_imms_rbpdisp( 4, REG_OFFSET(r[Rm]) );
1137 ADDL_r32_rbpdisp( REG_EAX, R_MACL );
1138 ADCL_r32_rbpdisp( REG_EDX, R_MACH );
1140 MOVL_rbpdisp_r32( R_S, REG_ECX );
1141 TESTL_r32_r32(REG_ECX, REG_ECX);
1143 CALL_ptr( signsat48 );
1144 JMP_TARGET( nosat );
1145 sh4_x86.tstate = TSTATE_NONE;
1150 load_reg( REG_EAX, Rm );
1151 check_ralign16( REG_EAX );
1152 MEM_READ_WORD( REG_EAX, REG_EAX );
1153 MOVL_r32_rspdisp( REG_EAX, 0 );
1154 load_reg( REG_EAX, Rm );
1155 LEAL_r32disp_r32( REG_EAX, 2, REG_EAX );
1156 MEM_READ_WORD( REG_EAX, REG_EAX );
1157 ADDL_imms_rbpdisp( 4, REG_OFFSET(r[Rn]) );
1158 // Note translate twice in case of page boundaries. Maybe worth
1159 // adding a page-boundary check to skip the second translation
1161 load_reg( REG_EAX, Rn );
1162 check_ralign16( REG_EAX );
1163 MEM_READ_WORD( REG_EAX, REG_EAX );
1164 MOVL_r32_rspdisp( REG_EAX, 0 );
1165 load_reg( REG_EAX, Rm );
1166 check_ralign16( REG_EAX );
1167 MEM_READ_WORD( REG_EAX, REG_EAX );
1168 ADDL_imms_rbpdisp( 2, REG_OFFSET(r[Rn]) );
1169 ADDL_imms_rbpdisp( 2, REG_OFFSET(r[Rm]) );
1172 MOVL_rbpdisp_r32( R_S, REG_ECX );
1173 TESTL_r32_r32( REG_ECX, REG_ECX );
1176 ADDL_r32_rbpdisp( REG_EAX, R_MACL ); // 6
1177 JNO_label( end ); // 2
1178 MOVL_imm32_r32( 1, REG_EDX ); // 5
1179 MOVL_r32_rbpdisp( REG_EDX, R_MACH ); // 6
1180 JS_label( positive ); // 2
1181 MOVL_imm32_r32( 0x80000000, REG_EAX );// 5
1182 MOVL_r32_rbpdisp( REG_EAX, R_MACL ); // 6
1183 JMP_label(end2); // 2
1185 JMP_TARGET(positive);
1186 MOVL_imm32_r32( 0x7FFFFFFF, REG_EAX );// 5
1187 MOVL_r32_rbpdisp( REG_EAX, R_MACL ); // 6
1188 JMP_label(end3); // 2
1191 ADDL_r32_rbpdisp( REG_EAX, R_MACL ); // 6
1192 ADCL_r32_rbpdisp( REG_EDX, R_MACH ); // 6
1196 sh4_x86.tstate = TSTATE_NONE;
1200 MOVL_rbpdisp_r32( R_T, REG_EAX );
1201 store_reg( REG_EAX, Rn );
1205 load_reg( REG_EAX, Rm );
1206 load_reg( REG_ECX, Rn );
1207 MULL_r32( REG_ECX );
1208 MOVL_r32_rbpdisp( REG_EAX, R_MACL );
1209 sh4_x86.tstate = TSTATE_NONE;
1212 COUNT_INST(I_MULSW);
1213 MOVSXL_rbpdisp16_r32( R_R(Rm), REG_EAX );
1214 MOVSXL_rbpdisp16_r32( R_R(Rn), REG_ECX );
1215 MULL_r32( REG_ECX );
1216 MOVL_r32_rbpdisp( REG_EAX, R_MACL );
1217 sh4_x86.tstate = TSTATE_NONE;
1220 COUNT_INST(I_MULUW);
1221 MOVZXL_rbpdisp16_r32( R_R(Rm), REG_EAX );
1222 MOVZXL_rbpdisp16_r32( R_R(Rn), REG_ECX );
1223 MULL_r32( REG_ECX );
1224 MOVL_r32_rbpdisp( REG_EAX, R_MACL );
1225 sh4_x86.tstate = TSTATE_NONE;
1229 load_reg( REG_EAX, Rm );
1230 NEGL_r32( REG_EAX );
1231 store_reg( REG_EAX, Rn );
1232 sh4_x86.tstate = TSTATE_NONE;
1236 load_reg( REG_EAX, Rm );
1237 XORL_r32_r32( REG_ECX, REG_ECX );
1239 SBBL_r32_r32( REG_EAX, REG_ECX );
1240 store_reg( REG_ECX, Rn );
1242 sh4_x86.tstate = TSTATE_C;
1246 load_reg( REG_EAX, Rm );
1247 NOTL_r32( REG_EAX );
1248 store_reg( REG_EAX, Rn );
1249 sh4_x86.tstate = TSTATE_NONE;
1253 load_reg( REG_EAX, Rm );
1254 load_reg( REG_ECX, Rn );
1255 ORL_r32_r32( REG_EAX, REG_ECX );
1256 store_reg( REG_ECX, Rn );
1257 sh4_x86.tstate = TSTATE_NONE;
1261 load_reg( REG_EAX, 0 );
1262 ORL_imms_r32(imm, REG_EAX);
1263 store_reg( REG_EAX, 0 );
1264 sh4_x86.tstate = TSTATE_NONE;
1266 OR.B #imm, @(R0, GBR) {:
1268 load_reg( REG_EAX, 0 );
1269 ADDL_rbpdisp_r32( R_GBR, REG_EAX );
1270 MOVL_r32_rspdisp( REG_EAX, 0 );
1271 MEM_READ_BYTE_FOR_WRITE( REG_EAX, REG_EDX );
1272 MOVL_rspdisp_r32( 0, REG_EAX );
1273 ORL_imms_r32(imm, REG_EDX );
1274 MEM_WRITE_BYTE( REG_EAX, REG_EDX );
1275 sh4_x86.tstate = TSTATE_NONE;
1278 COUNT_INST(I_ROTCL);
1279 load_reg( REG_EAX, Rn );
1280 if( sh4_x86.tstate != TSTATE_C ) {
1283 RCLL_imm_r32( 1, REG_EAX );
1284 store_reg( REG_EAX, Rn );
1286 sh4_x86.tstate = TSTATE_C;
1289 COUNT_INST(I_ROTCR);
1290 load_reg( REG_EAX, Rn );
1291 if( sh4_x86.tstate != TSTATE_C ) {
1294 RCRL_imm_r32( 1, REG_EAX );
1295 store_reg( REG_EAX, Rn );
1297 sh4_x86.tstate = TSTATE_C;
1301 load_reg( REG_EAX, Rn );
1302 ROLL_imm_r32( 1, REG_EAX );
1303 store_reg( REG_EAX, Rn );
1305 sh4_x86.tstate = TSTATE_C;
1309 load_reg( REG_EAX, Rn );
1310 RORL_imm_r32( 1, REG_EAX );
1311 store_reg( REG_EAX, Rn );
1313 sh4_x86.tstate = TSTATE_C;
1317 /* Annoyingly enough, not directly convertible */
1318 load_reg( REG_EAX, Rn );
1319 load_reg( REG_ECX, Rm );
1320 CMPL_imms_r32( 0, REG_ECX );
1323 NEGL_r32( REG_ECX ); // 2
1324 ANDB_imms_r8( 0x1F, REG_CL ); // 3
1325 JE_label(emptysar); // 2
1326 SARL_cl_r32( REG_EAX ); // 2
1327 JMP_label(end); // 2
1329 JMP_TARGET(emptysar);
1330 SARL_imm_r32(31, REG_EAX ); // 3
1334 ANDB_imms_r8( 0x1F, REG_CL ); // 3
1335 SHLL_cl_r32( REG_EAX ); // 2
1338 store_reg( REG_EAX, Rn );
1339 sh4_x86.tstate = TSTATE_NONE;
1343 load_reg( REG_EAX, Rn );
1344 load_reg( REG_ECX, Rm );
1345 CMPL_imms_r32( 0, REG_ECX );
1348 NEGL_r32( REG_ECX ); // 2
1349 ANDB_imms_r8( 0x1F, REG_CL ); // 3
1350 JE_label(emptyshr );
1351 SHRL_cl_r32( REG_EAX ); // 2
1352 JMP_label(end); // 2
1354 JMP_TARGET(emptyshr);
1355 XORL_r32_r32( REG_EAX, REG_EAX );
1359 ANDB_imms_r8( 0x1F, REG_CL ); // 3
1360 SHLL_cl_r32( REG_EAX ); // 2
1363 store_reg( REG_EAX, Rn );
1364 sh4_x86.tstate = TSTATE_NONE;
1368 load_reg( REG_EAX, Rn );
1369 SHLL_imm_r32( 1, REG_EAX );
1371 store_reg( REG_EAX, Rn );
1372 sh4_x86.tstate = TSTATE_C;
1376 load_reg( REG_EAX, Rn );
1377 SARL_imm_r32( 1, REG_EAX );
1379 store_reg( REG_EAX, Rn );
1380 sh4_x86.tstate = TSTATE_C;
1384 load_reg( REG_EAX, Rn );
1385 SHLL_imm_r32( 1, REG_EAX );
1387 store_reg( REG_EAX, Rn );
1388 sh4_x86.tstate = TSTATE_C;
1392 load_reg( REG_EAX, Rn );
1393 SHLL_imm_r32( 2, REG_EAX );
1394 store_reg( REG_EAX, Rn );
1395 sh4_x86.tstate = TSTATE_NONE;
1399 load_reg( REG_EAX, Rn );
1400 SHLL_imm_r32( 8, REG_EAX );
1401 store_reg( REG_EAX, Rn );
1402 sh4_x86.tstate = TSTATE_NONE;
1406 load_reg( REG_EAX, Rn );
1407 SHLL_imm_r32( 16, REG_EAX );
1408 store_reg( REG_EAX, Rn );
1409 sh4_x86.tstate = TSTATE_NONE;
1413 load_reg( REG_EAX, Rn );
1414 SHRL_imm_r32( 1, REG_EAX );
1416 store_reg( REG_EAX, Rn );
1417 sh4_x86.tstate = TSTATE_C;
1421 load_reg( REG_EAX, Rn );
1422 SHRL_imm_r32( 2, REG_EAX );
1423 store_reg( REG_EAX, Rn );
1424 sh4_x86.tstate = TSTATE_NONE;
1428 load_reg( REG_EAX, Rn );
1429 SHRL_imm_r32( 8, REG_EAX );
1430 store_reg( REG_EAX, Rn );
1431 sh4_x86.tstate = TSTATE_NONE;
1435 load_reg( REG_EAX, Rn );
1436 SHRL_imm_r32( 16, REG_EAX );
1437 store_reg( REG_EAX, Rn );
1438 sh4_x86.tstate = TSTATE_NONE;
1442 load_reg( REG_EAX, Rm );
1443 load_reg( REG_ECX, Rn );
1444 SUBL_r32_r32( REG_EAX, REG_ECX );
1445 store_reg( REG_ECX, Rn );
1446 sh4_x86.tstate = TSTATE_NONE;
1450 load_reg( REG_EAX, Rm );
1451 load_reg( REG_ECX, Rn );
1452 if( sh4_x86.tstate != TSTATE_C ) {
1455 SBBL_r32_r32( REG_EAX, REG_ECX );
1456 store_reg( REG_ECX, Rn );
1458 sh4_x86.tstate = TSTATE_C;
1462 load_reg( REG_EAX, Rm );
1463 load_reg( REG_ECX, Rn );
1464 SUBL_r32_r32( REG_EAX, REG_ECX );
1465 store_reg( REG_ECX, Rn );
1467 sh4_x86.tstate = TSTATE_O;
1470 COUNT_INST(I_SWAPB);
1471 load_reg( REG_EAX, Rm );
1472 XCHGB_r8_r8( REG_AL, REG_AH ); // NB: does not touch EFLAGS
1473 store_reg( REG_EAX, Rn );
1476 COUNT_INST(I_SWAPB);
1477 load_reg( REG_EAX, Rm );
1478 MOVL_r32_r32( REG_EAX, REG_ECX );
1479 SHLL_imm_r32( 16, REG_ECX );
1480 SHRL_imm_r32( 16, REG_EAX );
1481 ORL_r32_r32( REG_EAX, REG_ECX );
1482 store_reg( REG_ECX, Rn );
1483 sh4_x86.tstate = TSTATE_NONE;
1487 load_reg( REG_EAX, Rn );
1488 MOVL_r32_rspdisp( REG_EAX, 0 );
1489 MEM_READ_BYTE_FOR_WRITE( REG_EAX, REG_EDX );
1490 TESTB_r8_r8( REG_DL, REG_DL );
1492 ORB_imms_r8( 0x80, REG_DL );
1493 MOVL_rspdisp_r32( 0, REG_EAX );
1494 MEM_WRITE_BYTE( REG_EAX, REG_EDX );
1495 sh4_x86.tstate = TSTATE_NONE;
1499 load_reg( REG_EAX, Rm );
1500 load_reg( REG_ECX, Rn );
1501 TESTL_r32_r32( REG_EAX, REG_ECX );
1503 sh4_x86.tstate = TSTATE_E;
1507 load_reg( REG_EAX, 0 );
1508 TESTL_imms_r32( imm, REG_EAX );
1510 sh4_x86.tstate = TSTATE_E;
1512 TST.B #imm, @(R0, GBR) {:
1514 load_reg( REG_EAX, 0);
1515 ADDL_rbpdisp_r32( R_GBR, REG_EAX );
1516 MEM_READ_BYTE( REG_EAX, REG_EAX );
1517 TESTB_imms_r8( imm, REG_AL );
1519 sh4_x86.tstate = TSTATE_E;
1523 load_reg( REG_EAX, Rm );
1524 load_reg( REG_ECX, Rn );
1525 XORL_r32_r32( REG_EAX, REG_ECX );
1526 store_reg( REG_ECX, Rn );
1527 sh4_x86.tstate = TSTATE_NONE;
1531 load_reg( REG_EAX, 0 );
1532 XORL_imms_r32( imm, REG_EAX );
1533 store_reg( REG_EAX, 0 );
1534 sh4_x86.tstate = TSTATE_NONE;
1536 XOR.B #imm, @(R0, GBR) {:
1538 load_reg( REG_EAX, 0 );
1539 ADDL_rbpdisp_r32( R_GBR, REG_EAX );
1540 MOVL_r32_rspdisp( REG_EAX, 0 );
1541 MEM_READ_BYTE_FOR_WRITE(REG_EAX, REG_EDX);
1542 MOVL_rspdisp_r32( 0, REG_EAX );
1543 XORL_imms_r32( imm, REG_EDX );
1544 MEM_WRITE_BYTE( REG_EAX, REG_EDX );
1545 sh4_x86.tstate = TSTATE_NONE;
1548 COUNT_INST(I_XTRCT);
1549 load_reg( REG_EAX, Rm );
1550 load_reg( REG_ECX, Rn );
1551 SHLL_imm_r32( 16, REG_EAX );
1552 SHRL_imm_r32( 16, REG_ECX );
1553 ORL_r32_r32( REG_EAX, REG_ECX );
1554 store_reg( REG_ECX, Rn );
1555 sh4_x86.tstate = TSTATE_NONE;
1558 /* Data move instructions */
1561 load_reg( REG_EAX, Rm );
1562 store_reg( REG_EAX, Rn );
1566 MOVL_imm32_r32( imm, REG_EAX );
1567 store_reg( REG_EAX, Rn );
1571 load_reg( REG_EAX, Rn );
1572 load_reg( REG_EDX, Rm );
1573 MEM_WRITE_BYTE( REG_EAX, REG_EDX );
1574 sh4_x86.tstate = TSTATE_NONE;
1578 load_reg( REG_EAX, Rn );
1579 LEAL_r32disp_r32( REG_EAX, -1, REG_EAX );
1580 load_reg( REG_EDX, Rm );
1581 MEM_WRITE_BYTE( REG_EAX, REG_EDX );
1582 ADDL_imms_rbpdisp( -1, REG_OFFSET(r[Rn]) );
1583 sh4_x86.tstate = TSTATE_NONE;
1585 MOV.B Rm, @(R0, Rn) {:
1587 load_reg( REG_EAX, 0 );
1588 ADDL_rbpdisp_r32( REG_OFFSET(r[Rn]), REG_EAX );
1589 load_reg( REG_EDX, Rm );
1590 MEM_WRITE_BYTE( REG_EAX, REG_EDX );
1591 sh4_x86.tstate = TSTATE_NONE;
1593 MOV.B R0, @(disp, GBR) {:
1595 MOVL_rbpdisp_r32( R_GBR, REG_EAX );
1596 ADDL_imms_r32( disp, REG_EAX );
1597 load_reg( REG_EDX, 0 );
1598 MEM_WRITE_BYTE( REG_EAX, REG_EDX );
1599 sh4_x86.tstate = TSTATE_NONE;
1601 MOV.B R0, @(disp, Rn) {:
1603 load_reg( REG_EAX, Rn );
1604 ADDL_imms_r32( disp, REG_EAX );
1605 load_reg( REG_EDX, 0 );
1606 MEM_WRITE_BYTE( REG_EAX, REG_EDX );
1607 sh4_x86.tstate = TSTATE_NONE;
1611 load_reg( REG_EAX, Rm );
1612 MEM_READ_BYTE( REG_EAX, REG_EAX );
1613 store_reg( REG_EAX, Rn );
1614 sh4_x86.tstate = TSTATE_NONE;
1618 load_reg( REG_EAX, Rm );
1619 MEM_READ_BYTE( REG_EAX, REG_EAX );
1621 ADDL_imms_rbpdisp( 1, REG_OFFSET(r[Rm]) );
1623 store_reg( REG_EAX, Rn );
1624 sh4_x86.tstate = TSTATE_NONE;
1626 MOV.B @(R0, Rm), Rn {:
1628 load_reg( REG_EAX, 0 );
1629 ADDL_rbpdisp_r32( REG_OFFSET(r[Rm]), REG_EAX );
1630 MEM_READ_BYTE( REG_EAX, REG_EAX );
1631 store_reg( REG_EAX, Rn );
1632 sh4_x86.tstate = TSTATE_NONE;
1634 MOV.B @(disp, GBR), R0 {:
1636 MOVL_rbpdisp_r32( R_GBR, REG_EAX );
1637 ADDL_imms_r32( disp, REG_EAX );
1638 MEM_READ_BYTE( REG_EAX, REG_EAX );
1639 store_reg( REG_EAX, 0 );
1640 sh4_x86.tstate = TSTATE_NONE;
1642 MOV.B @(disp, Rm), R0 {:
1644 load_reg( REG_EAX, Rm );
1645 ADDL_imms_r32( disp, REG_EAX );
1646 MEM_READ_BYTE( REG_EAX, REG_EAX );
1647 store_reg( REG_EAX, 0 );
1648 sh4_x86.tstate = TSTATE_NONE;
1652 load_reg( REG_EAX, Rn );
1653 check_walign32(REG_EAX);
1654 MOVL_r32_r32( REG_EAX, REG_ECX );
1655 ANDL_imms_r32( 0xFC000000, REG_ECX );
1656 CMPL_imms_r32( 0xE0000000, REG_ECX );
1658 ANDL_imms_r32( 0x3C, REG_EAX );
1659 load_reg( REG_EDX, Rm );
1660 MOVL_r32_sib( REG_EDX, 0, REG_EBP, REG_EAX, REG_OFFSET(store_queue) );
1663 load_reg( REG_EDX, Rm );
1664 MEM_WRITE_LONG( REG_EAX, REG_EDX );
1666 sh4_x86.tstate = TSTATE_NONE;
1670 load_reg( REG_EAX, Rn );
1671 ADDL_imms_r32( -4, REG_EAX );
1672 check_walign32( REG_EAX );
1673 load_reg( REG_EDX, Rm );
1674 MEM_WRITE_LONG( REG_EAX, REG_EDX );
1675 ADDL_imms_rbpdisp( -4, REG_OFFSET(r[Rn]) );
1676 sh4_x86.tstate = TSTATE_NONE;
1678 MOV.L Rm, @(R0, Rn) {:
1680 load_reg( REG_EAX, 0 );
1681 ADDL_rbpdisp_r32( REG_OFFSET(r[Rn]), REG_EAX );
1682 check_walign32( REG_EAX );
1683 load_reg( REG_EDX, Rm );
1684 MEM_WRITE_LONG( REG_EAX, REG_EDX );
1685 sh4_x86.tstate = TSTATE_NONE;
1687 MOV.L R0, @(disp, GBR) {:
1689 MOVL_rbpdisp_r32( R_GBR, REG_EAX );
1690 ADDL_imms_r32( disp, REG_EAX );
1691 check_walign32( REG_EAX );
1692 load_reg( REG_EDX, 0 );
1693 MEM_WRITE_LONG( REG_EAX, REG_EDX );
1694 sh4_x86.tstate = TSTATE_NONE;
1696 MOV.L Rm, @(disp, Rn) {:
1698 load_reg( REG_EAX, Rn );
1699 ADDL_imms_r32( disp, REG_EAX );
1700 check_walign32( REG_EAX );
1701 MOVL_r32_r32( REG_EAX, REG_ECX );
1702 ANDL_imms_r32( 0xFC000000, REG_ECX );
1703 CMPL_imms_r32( 0xE0000000, REG_ECX );
1705 ANDL_imms_r32( 0x3C, REG_EAX );
1706 load_reg( REG_EDX, Rm );
1707 MOVL_r32_sib( REG_EDX, 0, REG_EBP, REG_EAX, REG_OFFSET(store_queue) );
1710 load_reg( REG_EDX, Rm );
1711 MEM_WRITE_LONG( REG_EAX, REG_EDX );
1713 sh4_x86.tstate = TSTATE_NONE;
1717 load_reg( REG_EAX, Rm );
1718 check_ralign32( REG_EAX );
1719 MEM_READ_LONG( REG_EAX, REG_EAX );
1720 store_reg( REG_EAX, Rn );
1721 sh4_x86.tstate = TSTATE_NONE;
1725 load_reg( REG_EAX, Rm );
1726 check_ralign32( REG_EAX );
1727 MEM_READ_LONG( REG_EAX, REG_EAX );
1729 ADDL_imms_rbpdisp( 4, REG_OFFSET(r[Rm]) );
1731 store_reg( REG_EAX, Rn );
1732 sh4_x86.tstate = TSTATE_NONE;
1734 MOV.L @(R0, Rm), Rn {:
1736 load_reg( REG_EAX, 0 );
1737 ADDL_rbpdisp_r32( REG_OFFSET(r[Rm]), REG_EAX );
1738 check_ralign32( REG_EAX );
1739 MEM_READ_LONG( REG_EAX, REG_EAX );
1740 store_reg( REG_EAX, Rn );
1741 sh4_x86.tstate = TSTATE_NONE;
1743 MOV.L @(disp, GBR), R0 {:
1745 MOVL_rbpdisp_r32( R_GBR, REG_EAX );
1746 ADDL_imms_r32( disp, REG_EAX );
1747 check_ralign32( REG_EAX );
1748 MEM_READ_LONG( REG_EAX, REG_EAX );
1749 store_reg( REG_EAX, 0 );
1750 sh4_x86.tstate = TSTATE_NONE;
1752 MOV.L @(disp, PC), Rn {:
1753 COUNT_INST(I_MOVLPC);
1754 if( sh4_x86.in_delay_slot ) {
1757 uint32_t target = (pc & 0xFFFFFFFC) + disp + 4;
1758 if( sh4_x86.fastmem && IS_IN_ICACHE(target) ) {
1759 // If the target address is in the same page as the code, it's
1760 // pretty safe to just ref it directly and circumvent the whole
1761 // memory subsystem. (this is a big performance win)
1763 // FIXME: There's a corner-case that's not handled here when
1764 // the current code-page is in the ITLB but not in the UTLB.
1765 // (should generate a TLB miss although need to test SH4
1766 // behaviour to confirm) Unlikely to be anyone depending on this
1767 // behaviour though.
1768 sh4ptr_t ptr = GET_ICACHE_PTR(target);
1769 MOVL_moffptr_eax( ptr );
1771 // Note: we use sh4r.pc for the calc as we could be running at a
1772 // different virtual address than the translation was done with,
1773 // but we can safely assume that the low bits are the same.
1774 MOVL_imm32_r32( (pc-sh4_x86.block_start_pc) + disp + 4 - (pc&0x03), REG_EAX );
1775 ADDL_rbpdisp_r32( R_PC, REG_EAX );
1776 MEM_READ_LONG( REG_EAX, REG_EAX );
1777 sh4_x86.tstate = TSTATE_NONE;
1779 store_reg( REG_EAX, Rn );
1782 MOV.L @(disp, Rm), Rn {:
1784 load_reg( REG_EAX, Rm );
1785 ADDL_imms_r32( disp, REG_EAX );
1786 check_ralign32( REG_EAX );
1787 MEM_READ_LONG( REG_EAX, REG_EAX );
1788 store_reg( REG_EAX, Rn );
1789 sh4_x86.tstate = TSTATE_NONE;
1793 load_reg( REG_EAX, Rn );
1794 check_walign16( REG_EAX );
1795 load_reg( REG_EDX, Rm );
1796 MEM_WRITE_WORD( REG_EAX, REG_EDX );
1797 sh4_x86.tstate = TSTATE_NONE;
1801 load_reg( REG_EAX, Rn );
1802 check_walign16( REG_EAX );
1803 LEAL_r32disp_r32( REG_EAX, -2, REG_EAX );
1804 load_reg( REG_EDX, Rm );
1805 MEM_WRITE_WORD( REG_EAX, REG_EDX );
1806 ADDL_imms_rbpdisp( -2, REG_OFFSET(r[Rn]) );
1807 sh4_x86.tstate = TSTATE_NONE;
1809 MOV.W Rm, @(R0, Rn) {:
1811 load_reg( REG_EAX, 0 );
1812 ADDL_rbpdisp_r32( REG_OFFSET(r[Rn]), REG_EAX );
1813 check_walign16( REG_EAX );
1814 load_reg( REG_EDX, Rm );
1815 MEM_WRITE_WORD( REG_EAX, REG_EDX );
1816 sh4_x86.tstate = TSTATE_NONE;
1818 MOV.W R0, @(disp, GBR) {:
1820 MOVL_rbpdisp_r32( R_GBR, REG_EAX );
1821 ADDL_imms_r32( disp, REG_EAX );
1822 check_walign16( REG_EAX );
1823 load_reg( REG_EDX, 0 );
1824 MEM_WRITE_WORD( REG_EAX, REG_EDX );
1825 sh4_x86.tstate = TSTATE_NONE;
1827 MOV.W R0, @(disp, Rn) {:
1829 load_reg( REG_EAX, Rn );
1830 ADDL_imms_r32( disp, REG_EAX );
1831 check_walign16( REG_EAX );
1832 load_reg( REG_EDX, 0 );
1833 MEM_WRITE_WORD( REG_EAX, REG_EDX );
1834 sh4_x86.tstate = TSTATE_NONE;
1838 load_reg( REG_EAX, Rm );
1839 check_ralign16( REG_EAX );
1840 MEM_READ_WORD( REG_EAX, REG_EAX );
1841 store_reg( REG_EAX, Rn );
1842 sh4_x86.tstate = TSTATE_NONE;
1846 load_reg( REG_EAX, Rm );
1847 check_ralign16( REG_EAX );
1848 MEM_READ_WORD( REG_EAX, REG_EAX );
1850 ADDL_imms_rbpdisp( 2, REG_OFFSET(r[Rm]) );
1852 store_reg( REG_EAX, Rn );
1853 sh4_x86.tstate = TSTATE_NONE;
1855 MOV.W @(R0, Rm), Rn {:
1857 load_reg( REG_EAX, 0 );
1858 ADDL_rbpdisp_r32( REG_OFFSET(r[Rm]), REG_EAX );
1859 check_ralign16( REG_EAX );
1860 MEM_READ_WORD( REG_EAX, REG_EAX );
1861 store_reg( REG_EAX, Rn );
1862 sh4_x86.tstate = TSTATE_NONE;
1864 MOV.W @(disp, GBR), R0 {:
1866 MOVL_rbpdisp_r32( R_GBR, REG_EAX );
1867 ADDL_imms_r32( disp, REG_EAX );
1868 check_ralign16( REG_EAX );
1869 MEM_READ_WORD( REG_EAX, REG_EAX );
1870 store_reg( REG_EAX, 0 );
1871 sh4_x86.tstate = TSTATE_NONE;
1873 MOV.W @(disp, PC), Rn {:
1875 if( sh4_x86.in_delay_slot ) {
1878 // See comments for MOV.L @(disp, PC), Rn
1879 uint32_t target = pc + disp + 4;
1880 if( sh4_x86.fastmem && IS_IN_ICACHE(target) ) {
1881 sh4ptr_t ptr = GET_ICACHE_PTR(target);
1882 MOVL_moffptr_eax( ptr );
1883 MOVSXL_r16_r32( REG_EAX, REG_EAX );
1885 MOVL_imm32_r32( (pc - sh4_x86.block_start_pc) + disp + 4, REG_EAX );
1886 ADDL_rbpdisp_r32( R_PC, REG_EAX );
1887 MEM_READ_WORD( REG_EAX, REG_EAX );
1888 sh4_x86.tstate = TSTATE_NONE;
1890 store_reg( REG_EAX, Rn );
1893 MOV.W @(disp, Rm), R0 {:
1895 load_reg( REG_EAX, Rm );
1896 ADDL_imms_r32( disp, REG_EAX );
1897 check_ralign16( REG_EAX );
1898 MEM_READ_WORD( REG_EAX, REG_EAX );
1899 store_reg( REG_EAX, 0 );
1900 sh4_x86.tstate = TSTATE_NONE;
1902 MOVA @(disp, PC), R0 {:
1904 if( sh4_x86.in_delay_slot ) {
1907 MOVL_imm32_r32( (pc - sh4_x86.block_start_pc) + disp + 4 - (pc&0x03), REG_ECX );
1908 ADDL_rbpdisp_r32( R_PC, REG_ECX );
1909 store_reg( REG_ECX, 0 );
1910 sh4_x86.tstate = TSTATE_NONE;
1914 COUNT_INST(I_MOVCA);
1915 load_reg( REG_EAX, Rn );
1916 check_walign32( REG_EAX );
1917 load_reg( REG_EDX, 0 );
1918 MEM_WRITE_LONG( REG_EAX, REG_EDX );
1919 sh4_x86.tstate = TSTATE_NONE;
1922 /* Control transfer instructions */
1925 if( sh4_x86.in_delay_slot ) {
1928 sh4vma_t target = disp + pc + 4;
1929 JT_label( nottaken );
1930 exit_block_rel(target, pc+2 );
1931 JMP_TARGET(nottaken);
1937 if( sh4_x86.in_delay_slot ) {
1940 sh4_x86.in_delay_slot = DELAY_PC;
1941 if( UNTRANSLATABLE(pc+2) ) {
1942 MOVL_imm32_r32( pc + 4 - sh4_x86.block_start_pc, REG_EAX );
1944 ADDL_imms_r32( disp, REG_EAX );
1945 JMP_TARGET(nottaken);
1946 ADDL_rbpdisp_r32( R_PC, REG_EAX );
1947 MOVL_r32_rbpdisp( REG_EAX, R_NEW_PC );
1948 exit_block_emu(pc+2);
1949 sh4_x86.branch_taken = TRUE;
1952 if( sh4_x86.tstate == TSTATE_NONE ) {
1953 CMPL_imms_rbpdisp( 1, R_T );
1954 sh4_x86.tstate = TSTATE_E;
1956 sh4vma_t target = disp + pc + 4;
1957 JCC_cc_rel32(sh4_x86.tstate,0);
1958 uint32_t *patch = ((uint32_t *)xlat_output)-1;
1959 int save_tstate = sh4_x86.tstate;
1960 sh4_translate_instruction(pc+2);
1961 sh4_x86.in_delay_slot = DELAY_PC; /* Cleared by sh4_translate_instruction */
1962 exit_block_rel( target, pc+4 );
1965 *patch = (xlat_output - ((uint8_t *)patch)) - 4;
1966 sh4_x86.tstate = save_tstate;
1967 sh4_translate_instruction(pc+2);
1974 if( sh4_x86.in_delay_slot ) {
1977 sh4_x86.in_delay_slot = DELAY_PC;
1978 sh4_x86.branch_taken = TRUE;
1979 if( UNTRANSLATABLE(pc+2) ) {
1980 MOVL_rbpdisp_r32( R_PC, REG_EAX );
1981 ADDL_imms_r32( pc + disp + 4 - sh4_x86.block_start_pc, REG_EAX );
1982 MOVL_r32_rbpdisp( REG_EAX, R_NEW_PC );
1983 exit_block_emu(pc+2);
1986 sh4_translate_instruction( pc + 2 );
1987 exit_block_rel( disp + pc + 4, pc+4 );
1994 if( sh4_x86.in_delay_slot ) {
1997 MOVL_rbpdisp_r32( R_PC, REG_EAX );
1998 ADDL_imms_r32( pc + 4 - sh4_x86.block_start_pc, REG_EAX );
1999 ADDL_rbpdisp_r32( REG_OFFSET(r[Rn]), REG_EAX );
2000 MOVL_r32_rbpdisp( REG_EAX, R_NEW_PC );
2001 sh4_x86.in_delay_slot = DELAY_PC;
2002 sh4_x86.tstate = TSTATE_NONE;
2003 sh4_x86.branch_taken = TRUE;
2004 if( UNTRANSLATABLE(pc+2) ) {
2005 exit_block_emu(pc+2);
2008 sh4_translate_instruction( pc + 2 );
2009 exit_block_newpcset(pc+4);
2016 if( sh4_x86.in_delay_slot ) {
2019 MOVL_rbpdisp_r32( R_PC, REG_EAX );
2020 ADDL_imms_r32( pc + 4 - sh4_x86.block_start_pc, REG_EAX );
2021 MOVL_r32_rbpdisp( REG_EAX, R_PR );
2022 sh4_x86.in_delay_slot = DELAY_PC;
2023 sh4_x86.branch_taken = TRUE;
2024 sh4_x86.tstate = TSTATE_NONE;
2025 if( UNTRANSLATABLE(pc+2) ) {
2026 ADDL_imms_r32( disp, REG_EAX );
2027 MOVL_r32_rbpdisp( REG_EAX, R_NEW_PC );
2028 exit_block_emu(pc+2);
2031 sh4_translate_instruction( pc + 2 );
2032 exit_block_rel( disp + pc + 4, pc+4 );
2039 if( sh4_x86.in_delay_slot ) {
2042 MOVL_rbpdisp_r32( R_PC, REG_EAX );
2043 ADDL_imms_r32( pc + 4 - sh4_x86.block_start_pc, REG_EAX );
2044 MOVL_r32_rbpdisp( REG_EAX, R_PR );
2045 ADDL_rbpdisp_r32( REG_OFFSET(r[Rn]), REG_EAX );
2046 MOVL_r32_rbpdisp( REG_EAX, R_NEW_PC );
2048 sh4_x86.in_delay_slot = DELAY_PC;
2049 sh4_x86.tstate = TSTATE_NONE;
2050 sh4_x86.branch_taken = TRUE;
2051 if( UNTRANSLATABLE(pc+2) ) {
2052 exit_block_emu(pc+2);
2055 sh4_translate_instruction( pc + 2 );
2056 exit_block_newpcset(pc+4);
2063 if( sh4_x86.in_delay_slot ) {
2066 sh4vma_t target = disp + pc + 4;
2067 JF_label( nottaken );
2068 exit_block_rel(target, pc+2 );
2069 JMP_TARGET(nottaken);
2075 if( sh4_x86.in_delay_slot ) {
2078 sh4_x86.in_delay_slot = DELAY_PC;
2079 if( UNTRANSLATABLE(pc+2) ) {
2080 MOVL_imm32_r32( pc + 4 - sh4_x86.block_start_pc, REG_EAX );
2082 ADDL_imms_r32( disp, REG_EAX );
2083 JMP_TARGET(nottaken);
2084 ADDL_rbpdisp_r32( R_PC, REG_EAX );
2085 MOVL_r32_rbpdisp( REG_EAX, R_NEW_PC );
2086 exit_block_emu(pc+2);
2087 sh4_x86.branch_taken = TRUE;
2090 if( sh4_x86.tstate == TSTATE_NONE ) {
2091 CMPL_imms_rbpdisp( 1, R_T );
2092 sh4_x86.tstate = TSTATE_E;
2094 JCC_cc_rel32(sh4_x86.tstate^1,0);
2095 uint32_t *patch = ((uint32_t *)xlat_output)-1;
2097 int save_tstate = sh4_x86.tstate;
2098 sh4_translate_instruction(pc+2);
2099 sh4_x86.in_delay_slot = DELAY_PC; /* Cleared by sh4_translate_instruction */
2100 exit_block_rel( disp + pc + 4, pc+4 );
2102 *patch = (xlat_output - ((uint8_t *)patch)) - 4;
2103 sh4_x86.tstate = save_tstate;
2104 sh4_translate_instruction(pc+2);
2111 if( sh4_x86.in_delay_slot ) {
2114 load_reg( REG_ECX, Rn );
2115 MOVL_r32_rbpdisp( REG_ECX, R_NEW_PC );
2116 sh4_x86.in_delay_slot = DELAY_PC;
2117 sh4_x86.branch_taken = TRUE;
2118 if( UNTRANSLATABLE(pc+2) ) {
2119 exit_block_emu(pc+2);
2122 sh4_translate_instruction(pc+2);
2123 exit_block_newpcset(pc+4);
2130 if( sh4_x86.in_delay_slot ) {
2133 MOVL_rbpdisp_r32( R_PC, REG_EAX );
2134 ADDL_imms_r32( pc + 4 - sh4_x86.block_start_pc, REG_EAX );
2135 MOVL_r32_rbpdisp( REG_EAX, R_PR );
2136 load_reg( REG_ECX, Rn );
2137 MOVL_r32_rbpdisp( REG_ECX, R_NEW_PC );
2138 sh4_x86.in_delay_slot = DELAY_PC;
2139 sh4_x86.branch_taken = TRUE;
2140 sh4_x86.tstate = TSTATE_NONE;
2141 if( UNTRANSLATABLE(pc+2) ) {
2142 exit_block_emu(pc+2);
2145 sh4_translate_instruction(pc+2);
2146 exit_block_newpcset(pc+4);
2153 if( sh4_x86.in_delay_slot ) {
2157 MOVL_rbpdisp_r32( R_SPC, REG_ECX );
2158 MOVL_r32_rbpdisp( REG_ECX, R_NEW_PC );
2159 MOVL_rbpdisp_r32( R_SSR, REG_EAX );
2160 CALL1_ptr_r32( sh4_write_sr, REG_EAX );
2161 sh4_x86.in_delay_slot = DELAY_PC;
2162 sh4_x86.fpuen_checked = FALSE;
2163 sh4_x86.tstate = TSTATE_NONE;
2164 sh4_x86.branch_taken = TRUE;
2165 sh4_x86.sh4_mode = SH4_MODE_UNKNOWN;
2166 if( UNTRANSLATABLE(pc+2) ) {
2167 exit_block_emu(pc+2);
2170 sh4_translate_instruction(pc+2);
2171 exit_block_newpcset(pc+4);
2178 if( sh4_x86.in_delay_slot ) {
2181 MOVL_rbpdisp_r32( R_PR, REG_ECX );
2182 MOVL_r32_rbpdisp( REG_ECX, R_NEW_PC );
2183 sh4_x86.in_delay_slot = DELAY_PC;
2184 sh4_x86.branch_taken = TRUE;
2185 if( UNTRANSLATABLE(pc+2) ) {
2186 exit_block_emu(pc+2);
2189 sh4_translate_instruction(pc+2);
2190 exit_block_newpcset(pc+4);
2196 COUNT_INST(I_TRAPA);
2197 if( sh4_x86.in_delay_slot ) {
2200 MOVL_imm32_r32( pc+2 - sh4_x86.block_start_pc, REG_ECX ); // 5
2201 ADDL_r32_rbpdisp( REG_ECX, R_PC );
2202 MOVL_imm32_r32( imm, REG_EAX );
2203 CALL1_ptr_r32( sh4_raise_trap, REG_EAX );
2204 sh4_x86.tstate = TSTATE_NONE;
2205 exit_block_pcset(pc+2);
2206 sh4_x86.branch_taken = TRUE;
2211 COUNT_INST(I_UNDEF);
2212 if( sh4_x86.in_delay_slot ) {
2213 exit_block_exc(EXC_SLOT_ILLEGAL, pc-2, 4);
2215 exit_block_exc(EXC_ILLEGAL, pc, 2);
2221 COUNT_INST(I_CLRMAC);
2222 XORL_r32_r32(REG_EAX, REG_EAX);
2223 MOVL_r32_rbpdisp( REG_EAX, R_MACL );
2224 MOVL_r32_rbpdisp( REG_EAX, R_MACH );
2225 sh4_x86.tstate = TSTATE_NONE;
2230 SETCCB_cc_rbpdisp(X86_COND_C, R_S);
2231 sh4_x86.tstate = TSTATE_NONE;
2237 sh4_x86.tstate = TSTATE_C;
2242 SETCCB_cc_rbpdisp(X86_COND_C, R_S);
2243 sh4_x86.tstate = TSTATE_NONE;
2249 sh4_x86.tstate = TSTATE_C;
2252 /* Floating point moves */
2254 COUNT_INST(I_FMOV1);
2256 if( sh4_x86.double_size ) {
2257 load_dr0( REG_EAX, FRm );
2258 load_dr1( REG_ECX, FRm );
2259 store_dr0( REG_EAX, FRn );
2260 store_dr1( REG_ECX, FRn );
2262 load_fr( REG_EAX, FRm ); // SZ=0 branch
2263 store_fr( REG_EAX, FRn );
2267 COUNT_INST(I_FMOV2);
2269 load_reg( REG_EAX, Rn );
2270 if( sh4_x86.double_size ) {
2271 check_walign64( REG_EAX );
2272 load_dr0( REG_EDX, FRm );
2273 MEM_WRITE_LONG( REG_EAX, REG_EDX );
2274 load_reg( REG_EAX, Rn );
2275 LEAL_r32disp_r32( REG_EAX, 4, REG_EAX );
2276 load_dr1( REG_EDX, FRm );
2277 MEM_WRITE_LONG( REG_EAX, REG_EDX );
2279 check_walign32( REG_EAX );
2280 load_fr( REG_EDX, FRm );
2281 MEM_WRITE_LONG( REG_EAX, REG_EDX );
2283 sh4_x86.tstate = TSTATE_NONE;
2286 COUNT_INST(I_FMOV5);
2288 load_reg( REG_EAX, Rm );
2289 if( sh4_x86.double_size ) {
2290 check_ralign64( REG_EAX );
2291 MEM_READ_LONG( REG_EAX, REG_EAX );
2292 store_dr0( REG_EAX, FRn );
2293 load_reg( REG_EAX, Rm );
2294 LEAL_r32disp_r32( REG_EAX, 4, REG_EAX );
2295 MEM_READ_LONG( REG_EAX, REG_EAX );
2296 store_dr1( REG_EAX, FRn );
2298 check_ralign32( REG_EAX );
2299 MEM_READ_LONG( REG_EAX, REG_EAX );
2300 store_fr( REG_EAX, FRn );
2302 sh4_x86.tstate = TSTATE_NONE;
2305 COUNT_INST(I_FMOV3);
2307 load_reg( REG_EAX, Rn );
2308 if( sh4_x86.double_size ) {
2309 check_walign64( REG_EAX );
2310 LEAL_r32disp_r32( REG_EAX, -8, REG_EAX );
2311 load_dr0( REG_EDX, FRm );
2312 MEM_WRITE_LONG( REG_EAX, REG_EDX );
2313 load_reg( REG_EAX, Rn );
2314 LEAL_r32disp_r32( REG_EAX, -4, REG_EAX );
2315 load_dr1( REG_EDX, FRm );
2316 MEM_WRITE_LONG( REG_EAX, REG_EDX );
2317 ADDL_imms_rbpdisp(-8,REG_OFFSET(r[Rn]));
2319 check_walign32( REG_EAX );
2320 LEAL_r32disp_r32( REG_EAX, -4, REG_EAX );
2321 load_fr( REG_EDX, FRm );
2322 MEM_WRITE_LONG( REG_EAX, REG_EDX );
2323 ADDL_imms_rbpdisp(-4,REG_OFFSET(r[Rn]));
2325 sh4_x86.tstate = TSTATE_NONE;
2328 COUNT_INST(I_FMOV6);
2330 load_reg( REG_EAX, Rm );
2331 if( sh4_x86.double_size ) {
2332 check_ralign64( REG_EAX );
2333 MEM_READ_LONG( REG_EAX, REG_EAX );
2334 store_dr0( REG_EAX, FRn );
2335 load_reg( REG_EAX, Rm );
2336 LEAL_r32disp_r32( REG_EAX, 4, REG_EAX );
2337 MEM_READ_LONG( REG_EAX, REG_EAX );
2338 store_dr1( REG_EAX, FRn );
2339 ADDL_imms_rbpdisp( 8, REG_OFFSET(r[Rm]) );
2341 check_ralign32( REG_EAX );
2342 MEM_READ_LONG( REG_EAX, REG_EAX );
2343 store_fr( REG_EAX, FRn );
2344 ADDL_imms_rbpdisp( 4, REG_OFFSET(r[Rm]) );
2346 sh4_x86.tstate = TSTATE_NONE;
2348 FMOV FRm, @(R0, Rn) {:
2349 COUNT_INST(I_FMOV4);
2351 load_reg( REG_EAX, Rn );
2352 ADDL_rbpdisp_r32( REG_OFFSET(r[0]), REG_EAX );
2353 if( sh4_x86.double_size ) {
2354 check_walign64( REG_EAX );
2355 load_dr0( REG_EDX, FRm );
2356 MEM_WRITE_LONG( REG_EAX, REG_EDX );
2357 load_reg( REG_EAX, Rn );
2358 ADDL_rbpdisp_r32( REG_OFFSET(r[0]), REG_EAX );
2359 LEAL_r32disp_r32( REG_EAX, 4, REG_EAX );
2360 load_dr1( REG_EDX, FRm );
2361 MEM_WRITE_LONG( REG_EAX, REG_EDX );
2363 check_walign32( REG_EAX );
2364 load_fr( REG_EDX, FRm );
2365 MEM_WRITE_LONG( REG_EAX, REG_EDX ); // 12
2367 sh4_x86.tstate = TSTATE_NONE;
2369 FMOV @(R0, Rm), FRn {:
2370 COUNT_INST(I_FMOV7);
2372 load_reg( REG_EAX, Rm );
2373 ADDL_rbpdisp_r32( REG_OFFSET(r[0]), REG_EAX );
2374 if( sh4_x86.double_size ) {
2375 check_ralign64( REG_EAX );
2376 MEM_READ_LONG( REG_EAX, REG_EAX );
2377 store_dr0( REG_EAX, FRn );
2378 load_reg( REG_EAX, Rm );
2379 ADDL_rbpdisp_r32( REG_OFFSET(r[0]), REG_EAX );
2380 LEAL_r32disp_r32( REG_EAX, 4, REG_EAX );
2381 MEM_READ_LONG( REG_EAX, REG_EAX );
2382 store_dr1( REG_EAX, FRn );
2384 check_ralign32( REG_EAX );
2385 MEM_READ_LONG( REG_EAX, REG_EAX );
2386 store_fr( REG_EAX, FRn );
2388 sh4_x86.tstate = TSTATE_NONE;
2390 FLDI0 FRn {: /* IFF PR=0 */
2391 COUNT_INST(I_FLDI0);
2393 if( sh4_x86.double_prec == 0 ) {
2394 XORL_r32_r32( REG_EAX, REG_EAX );
2395 store_fr( REG_EAX, FRn );
2397 sh4_x86.tstate = TSTATE_NONE;
2399 FLDI1 FRn {: /* IFF PR=0 */
2400 COUNT_INST(I_FLDI1);
2402 if( sh4_x86.double_prec == 0 ) {
2403 MOVL_imm32_r32( 0x3F800000, REG_EAX );
2404 store_fr( REG_EAX, FRn );
2409 COUNT_INST(I_FLOAT);
2411 FILD_rbpdisp(R_FPUL);
2412 if( sh4_x86.double_prec ) {
2421 if( sh4_x86.double_prec ) {
2426 MOVP_immptr_rptr( &max_int, REG_ECX );
2427 FILD_r32disp( REG_ECX, 0 );
2430 MOVP_immptr_rptr( &min_int, REG_ECX );
2431 FILD_r32disp( REG_ECX, 0 );
2434 MOVP_immptr_rptr( &save_fcw, REG_EAX );
2435 FNSTCW_r32disp( REG_EAX, 0 );
2436 MOVP_immptr_rptr( &trunc_fcw, REG_EDX );
2437 FLDCW_r32disp( REG_EDX, 0 );
2438 FISTP_rbpdisp(R_FPUL);
2439 FLDCW_r32disp( REG_EAX, 0 );
2444 MOVL_r32disp_r32( REG_ECX, 0, REG_ECX ); // 2
2445 MOVL_r32_rbpdisp( REG_ECX, R_FPUL );
2448 sh4_x86.tstate = TSTATE_NONE;
2453 load_fr( REG_EAX, FRm );
2454 MOVL_r32_rbpdisp( REG_EAX, R_FPUL );
2459 MOVL_rbpdisp_r32( R_FPUL, REG_EAX );
2460 store_fr( REG_EAX, FRn );
2463 COUNT_INST(I_FCNVDS);
2465 if( sh4_x86.double_prec ) {
2471 COUNT_INST(I_FCNVSD);
2473 if( sh4_x86.double_prec ) {
2479 /* Floating point instructions */
2483 if( sh4_x86.double_prec ) {
2496 if( sh4_x86.double_prec ) {
2511 if( sh4_x86.double_prec ) {
2523 FMAC FR0, FRm, FRn {:
2526 if( sh4_x86.double_prec ) {
2546 if( sh4_x86.double_prec ) {
2561 if( sh4_x86.double_prec ) {
2572 COUNT_INST(I_FSRRA);
2574 if( sh4_x86.double_prec == 0 ) {
2583 COUNT_INST(I_FSQRT);
2585 if( sh4_x86.double_prec ) {
2598 if( sh4_x86.double_prec ) {
2612 COUNT_INST(I_FCMPEQ);
2614 if( sh4_x86.double_prec ) {
2624 sh4_x86.tstate = TSTATE_E;
2627 COUNT_INST(I_FCMPGT);
2629 if( sh4_x86.double_prec ) {
2639 sh4_x86.tstate = TSTATE_A;
2645 if( sh4_x86.double_prec == 0 ) {
2646 LEAP_rbpdisp_rptr( REG_OFFSET(fr[0][FRn&0x0E]), REG_EDX );
2647 MOVL_rbpdisp_r32( R_FPUL, REG_EAX );
2648 CALL2_ptr_r32_r32( sh4_fsca, REG_EAX, REG_EDX );
2650 sh4_x86.tstate = TSTATE_NONE;
2655 if( sh4_x86.double_prec == 0 ) {
2656 if( sh4_x86.sse3_enabled ) {
2657 MOVAPS_rbpdisp_xmm( REG_OFFSET(fr[0][FVm<<2]), 4 );
2658 MULPS_rbpdisp_xmm( REG_OFFSET(fr[0][FVn<<2]), 4 );
2659 HADDPS_xmm_xmm( 4, 4 );
2660 HADDPS_xmm_xmm( 4, 4 );
2661 MOVSS_xmm_rbpdisp( 4, REG_OFFSET(fr[0][(FVn<<2)+2]) );
2666 push_fr( (FVm<<2)+1);
2667 push_fr( (FVn<<2)+1);
2670 push_fr( (FVm<<2)+2);
2671 push_fr( (FVn<<2)+2);
2674 push_fr( (FVm<<2)+3);
2675 push_fr( (FVn<<2)+3);
2678 pop_fr( (FVn<<2)+3);
2685 if( sh4_x86.double_prec == 0 ) {
2686 if( sh4_x86.sse3_enabled && sh4_x86.begin_callback == NULL ) {
2687 /* FIXME: For now, disable this inlining when we're running in shadow mode -
2688 * it gives slightly different results from the emu core. Need to
2689 * fix the precision so both give the right results.
2691 MOVAPS_rbpdisp_xmm( REG_OFFSET(fr[1][0]), 1 ); // M1 M0 M3 M2
2692 MOVAPS_rbpdisp_xmm( REG_OFFSET(fr[1][4]), 0 ); // M5 M4 M7 M6
2693 MOVAPS_rbpdisp_xmm( REG_OFFSET(fr[1][8]), 3 ); // M9 M8 M11 M10
2694 MOVAPS_rbpdisp_xmm( REG_OFFSET(fr[1][12]), 2 );// M13 M12 M15 M14
2696 MOVSLDUP_rbpdisp_xmm( REG_OFFSET(fr[0][FVn<<2]), 4 ); // V1 V1 V3 V3
2697 MOVSHDUP_rbpdisp_xmm( REG_OFFSET(fr[0][FVn<<2]), 5 ); // V0 V0 V2 V2
2698 MOV_xmm_xmm( 4, 6 );
2699 MOV_xmm_xmm( 5, 7 );
2700 MOVLHPS_xmm_xmm( 4, 4 ); // V1 V1 V1 V1
2701 MOVHLPS_xmm_xmm( 6, 6 ); // V3 V3 V3 V3
2702 MOVLHPS_xmm_xmm( 5, 5 ); // V0 V0 V0 V0
2703 MOVHLPS_xmm_xmm( 7, 7 ); // V2 V2 V2 V2
2704 MULPS_xmm_xmm( 0, 4 );
2705 MULPS_xmm_xmm( 1, 5 );
2706 MULPS_xmm_xmm( 2, 6 );
2707 MULPS_xmm_xmm( 3, 7 );
2708 ADDPS_xmm_xmm( 5, 4 );
2709 ADDPS_xmm_xmm( 7, 6 );
2710 ADDPS_xmm_xmm( 6, 4 );
2711 MOVAPS_xmm_rbpdisp( 4, REG_OFFSET(fr[0][FVn<<2]) );
2713 LEAP_rbpdisp_rptr( REG_OFFSET(fr[0][FVn<<2]), REG_EAX );
2714 CALL1_ptr_r32( sh4_ftrv, REG_EAX );
2717 sh4_x86.tstate = TSTATE_NONE;
2721 COUNT_INST(I_FRCHG);
2723 XORL_imms_rbpdisp( FPSCR_FR, R_FPSCR );
2724 CALL_ptr( sh4_switch_fr_banks );
2725 sh4_x86.tstate = TSTATE_NONE;
2728 COUNT_INST(I_FSCHG);
2730 XORL_imms_rbpdisp( FPSCR_SZ, R_FPSCR);
2731 XORL_imms_rbpdisp( FPSCR_SZ, REG_OFFSET(xlat_sh4_mode) );
2732 sh4_x86.tstate = TSTATE_NONE;
2733 sh4_x86.double_size = !sh4_x86.double_size;
2734 sh4_x86.sh4_mode = sh4_x86.sh4_mode ^ FPSCR_SZ;
2737 /* Processor control instructions */
2739 COUNT_INST(I_LDCSR);
2740 if( sh4_x86.in_delay_slot ) {
2744 load_reg( REG_EAX, Rm );
2745 CALL1_ptr_r32( sh4_write_sr, REG_EAX );
2746 sh4_x86.fpuen_checked = FALSE;
2747 sh4_x86.tstate = TSTATE_NONE;
2748 sh4_x86.sh4_mode = SH4_MODE_UNKNOWN;
2754 load_reg( REG_EAX, Rm );
2755 MOVL_r32_rbpdisp( REG_EAX, R_GBR );
2760 load_reg( REG_EAX, Rm );
2761 MOVL_r32_rbpdisp( REG_EAX, R_VBR );
2762 sh4_x86.tstate = TSTATE_NONE;
2767 load_reg( REG_EAX, Rm );
2768 MOVL_r32_rbpdisp( REG_EAX, R_SSR );
2769 sh4_x86.tstate = TSTATE_NONE;
2774 load_reg( REG_EAX, Rm );
2775 MOVL_r32_rbpdisp( REG_EAX, R_SGR );
2776 sh4_x86.tstate = TSTATE_NONE;
2781 load_reg( REG_EAX, Rm );
2782 MOVL_r32_rbpdisp( REG_EAX, R_SPC );
2783 sh4_x86.tstate = TSTATE_NONE;
2788 load_reg( REG_EAX, Rm );
2789 MOVL_r32_rbpdisp( REG_EAX, R_DBR );
2790 sh4_x86.tstate = TSTATE_NONE;
2795 load_reg( REG_EAX, Rm );
2796 MOVL_r32_rbpdisp( REG_EAX, REG_OFFSET(r_bank[Rn_BANK]) );
2797 sh4_x86.tstate = TSTATE_NONE;
2801 load_reg( REG_EAX, Rm );
2802 check_ralign32( REG_EAX );
2803 MEM_READ_LONG( REG_EAX, REG_EAX );
2804 ADDL_imms_rbpdisp( 4, REG_OFFSET(r[Rm]) );
2805 MOVL_r32_rbpdisp( REG_EAX, R_GBR );
2806 sh4_x86.tstate = TSTATE_NONE;
2809 COUNT_INST(I_LDCSRM);
2810 if( sh4_x86.in_delay_slot ) {
2814 load_reg( REG_EAX, Rm );
2815 check_ralign32( REG_EAX );
2816 MEM_READ_LONG( REG_EAX, REG_EAX );
2817 ADDL_imms_rbpdisp( 4, REG_OFFSET(r[Rm]) );
2818 CALL1_ptr_r32( sh4_write_sr, REG_EAX );
2819 sh4_x86.fpuen_checked = FALSE;
2820 sh4_x86.tstate = TSTATE_NONE;
2821 sh4_x86.sh4_mode = SH4_MODE_UNKNOWN;
2828 load_reg( REG_EAX, Rm );
2829 check_ralign32( REG_EAX );
2830 MEM_READ_LONG( REG_EAX, REG_EAX );
2831 ADDL_imms_rbpdisp( 4, REG_OFFSET(r[Rm]) );
2832 MOVL_r32_rbpdisp( REG_EAX, R_VBR );
2833 sh4_x86.tstate = TSTATE_NONE;
2838 load_reg( REG_EAX, Rm );
2839 check_ralign32( REG_EAX );
2840 MEM_READ_LONG( REG_EAX, REG_EAX );
2841 ADDL_imms_rbpdisp( 4, REG_OFFSET(r[Rm]) );
2842 MOVL_r32_rbpdisp( REG_EAX, R_SSR );
2843 sh4_x86.tstate = TSTATE_NONE;
2848 load_reg( REG_EAX, Rm );
2849 check_ralign32( REG_EAX );
2850 MEM_READ_LONG( REG_EAX, REG_EAX );
2851 ADDL_imms_rbpdisp( 4, REG_OFFSET(r[Rm]) );
2852 MOVL_r32_rbpdisp( REG_EAX, R_SGR );
2853 sh4_x86.tstate = TSTATE_NONE;
2858 load_reg( REG_EAX, Rm );
2859 check_ralign32( REG_EAX );
2860 MEM_READ_LONG( REG_EAX, REG_EAX );
2861 ADDL_imms_rbpdisp( 4, REG_OFFSET(r[Rm]) );
2862 MOVL_r32_rbpdisp( REG_EAX, R_SPC );
2863 sh4_x86.tstate = TSTATE_NONE;
2868 load_reg( REG_EAX, Rm );
2869 check_ralign32( REG_EAX );
2870 MEM_READ_LONG( REG_EAX, REG_EAX );
2871 ADDL_imms_rbpdisp( 4, REG_OFFSET(r[Rm]) );
2872 MOVL_r32_rbpdisp( REG_EAX, R_DBR );
2873 sh4_x86.tstate = TSTATE_NONE;
2875 LDC.L @Rm+, Rn_BANK {:
2878 load_reg( REG_EAX, Rm );
2879 check_ralign32( REG_EAX );
2880 MEM_READ_LONG( REG_EAX, REG_EAX );
2881 ADDL_imms_rbpdisp( 4, REG_OFFSET(r[Rm]) );
2882 MOVL_r32_rbpdisp( REG_EAX, REG_OFFSET(r_bank[Rn_BANK]) );
2883 sh4_x86.tstate = TSTATE_NONE;
2886 COUNT_INST(I_LDSFPSCR);
2888 load_reg( REG_EAX, Rm );
2889 CALL1_ptr_r32( sh4_write_fpscr, REG_EAX );
2890 sh4_x86.tstate = TSTATE_NONE;
2891 sh4_x86.sh4_mode = SH4_MODE_UNKNOWN;
2894 LDS.L @Rm+, FPSCR {:
2895 COUNT_INST(I_LDSFPSCRM);
2897 load_reg( REG_EAX, Rm );
2898 check_ralign32( REG_EAX );
2899 MEM_READ_LONG( REG_EAX, REG_EAX );
2900 ADDL_imms_rbpdisp( 4, REG_OFFSET(r[Rm]) );
2901 CALL1_ptr_r32( sh4_write_fpscr, REG_EAX );
2902 sh4_x86.tstate = TSTATE_NONE;
2903 sh4_x86.sh4_mode = SH4_MODE_UNKNOWN;
2909 load_reg( REG_EAX, Rm );
2910 MOVL_r32_rbpdisp( REG_EAX, R_FPUL );
2915 load_reg( REG_EAX, Rm );
2916 check_ralign32( REG_EAX );
2917 MEM_READ_LONG( REG_EAX, REG_EAX );
2918 ADDL_imms_rbpdisp( 4, REG_OFFSET(r[Rm]) );
2919 MOVL_r32_rbpdisp( REG_EAX, R_FPUL );
2920 sh4_x86.tstate = TSTATE_NONE;
2924 load_reg( REG_EAX, Rm );
2925 MOVL_r32_rbpdisp( REG_EAX, R_MACH );
2929 load_reg( REG_EAX, Rm );
2930 check_ralign32( REG_EAX );
2931 MEM_READ_LONG( REG_EAX, REG_EAX );
2932 ADDL_imms_rbpdisp( 4, REG_OFFSET(r[Rm]) );
2933 MOVL_r32_rbpdisp( REG_EAX, R_MACH );
2934 sh4_x86.tstate = TSTATE_NONE;
2938 load_reg( REG_EAX, Rm );
2939 MOVL_r32_rbpdisp( REG_EAX, R_MACL );
2943 load_reg( REG_EAX, Rm );
2944 check_ralign32( REG_EAX );
2945 MEM_READ_LONG( REG_EAX, REG_EAX );
2946 ADDL_imms_rbpdisp( 4, REG_OFFSET(r[Rm]) );
2947 MOVL_r32_rbpdisp( REG_EAX, R_MACL );
2948 sh4_x86.tstate = TSTATE_NONE;
2952 load_reg( REG_EAX, Rm );
2953 MOVL_r32_rbpdisp( REG_EAX, R_PR );
2957 load_reg( REG_EAX, Rm );
2958 check_ralign32( REG_EAX );
2959 MEM_READ_LONG( REG_EAX, REG_EAX );
2960 ADDL_imms_rbpdisp( 4, REG_OFFSET(r[Rm]) );
2961 MOVL_r32_rbpdisp( REG_EAX, R_PR );
2962 sh4_x86.tstate = TSTATE_NONE;
2965 COUNT_INST(I_LDTLB);
2966 CALL_ptr( MMU_ldtlb );
2967 sh4_x86.tstate = TSTATE_NONE;
2976 COUNT_INST(I_OCBWB);
2980 load_reg( REG_EAX, Rn );
2981 MEM_PREFETCH( REG_EAX );
2982 sh4_x86.tstate = TSTATE_NONE;
2985 COUNT_INST(I_SLEEP);
2987 CALL_ptr( sh4_sleep );
2988 sh4_x86.tstate = TSTATE_NONE;
2989 sh4_x86.in_delay_slot = DELAY_NONE;
2993 COUNT_INST(I_STCSR);
2995 CALL_ptr(sh4_read_sr);
2996 store_reg( REG_EAX, Rn );
2997 sh4_x86.tstate = TSTATE_NONE;
3001 MOVL_rbpdisp_r32( R_GBR, REG_EAX );
3002 store_reg( REG_EAX, Rn );
3007 MOVL_rbpdisp_r32( R_VBR, REG_EAX );
3008 store_reg( REG_EAX, Rn );
3009 sh4_x86.tstate = TSTATE_NONE;
3014 MOVL_rbpdisp_r32( R_SSR, REG_EAX );
3015 store_reg( REG_EAX, Rn );
3016 sh4_x86.tstate = TSTATE_NONE;
3021 MOVL_rbpdisp_r32( R_SPC, REG_EAX );
3022 store_reg( REG_EAX, Rn );
3023 sh4_x86.tstate = TSTATE_NONE;
3028 MOVL_rbpdisp_r32( R_SGR, REG_EAX );
3029 store_reg( REG_EAX, Rn );
3030 sh4_x86.tstate = TSTATE_NONE;
3035 MOVL_rbpdisp_r32( R_DBR, REG_EAX );
3036 store_reg( REG_EAX, Rn );
3037 sh4_x86.tstate = TSTATE_NONE;
3042 MOVL_rbpdisp_r32( REG_OFFSET(r_bank[Rm_BANK]), REG_EAX );
3043 store_reg( REG_EAX, Rn );
3044 sh4_x86.tstate = TSTATE_NONE;
3047 COUNT_INST(I_STCSRM);
3049 CALL_ptr( sh4_read_sr );
3050 MOVL_r32_r32( REG_EAX, REG_EDX );
3051 load_reg( REG_EAX, Rn );
3052 check_walign32( REG_EAX );
3053 LEAL_r32disp_r32( REG_EAX, -4, REG_EAX );
3054 MEM_WRITE_LONG( REG_EAX, REG_EDX );
3055 ADDL_imms_rbpdisp( -4, REG_OFFSET(r[Rn]) );
3056 sh4_x86.tstate = TSTATE_NONE;
3061 load_reg( REG_EAX, Rn );
3062 check_walign32( REG_EAX );
3063 ADDL_imms_r32( -4, REG_EAX );
3064 MOVL_rbpdisp_r32( R_VBR, REG_EDX );
3065 MEM_WRITE_LONG( REG_EAX, REG_EDX );
3066 ADDL_imms_rbpdisp( -4, REG_OFFSET(r[Rn]) );
3067 sh4_x86.tstate = TSTATE_NONE;
3072 load_reg( REG_EAX, Rn );
3073 check_walign32( REG_EAX );
3074 ADDL_imms_r32( -4, REG_EAX );
3075 MOVL_rbpdisp_r32( R_SSR, REG_EDX );
3076 MEM_WRITE_LONG( REG_EAX, REG_EDX );
3077 ADDL_imms_rbpdisp( -4, REG_OFFSET(r[Rn]) );
3078 sh4_x86.tstate = TSTATE_NONE;
3083 load_reg( REG_EAX, Rn );
3084 check_walign32( REG_EAX );
3085 ADDL_imms_r32( -4, REG_EAX );
3086 MOVL_rbpdisp_r32( R_SPC, REG_EDX );
3087 MEM_WRITE_LONG( REG_EAX, REG_EDX );
3088 ADDL_imms_rbpdisp( -4, REG_OFFSET(r[Rn]) );
3089 sh4_x86.tstate = TSTATE_NONE;
3094 load_reg( REG_EAX, Rn );
3095 check_walign32( REG_EAX );
3096 ADDL_imms_r32( -4, REG_EAX );
3097 MOVL_rbpdisp_r32( R_SGR, REG_EDX );
3098 MEM_WRITE_LONG( REG_EAX, REG_EDX );
3099 ADDL_imms_rbpdisp( -4, REG_OFFSET(r[Rn]) );
3100 sh4_x86.tstate = TSTATE_NONE;
3105 load_reg( REG_EAX, Rn );
3106 check_walign32( REG_EAX );
3107 ADDL_imms_r32( -4, REG_EAX );
3108 MOVL_rbpdisp_r32( R_DBR, REG_EDX );
3109 MEM_WRITE_LONG( REG_EAX, REG_EDX );
3110 ADDL_imms_rbpdisp( -4, REG_OFFSET(r[Rn]) );
3111 sh4_x86.tstate = TSTATE_NONE;
3113 STC.L Rm_BANK, @-Rn {:
3116 load_reg( REG_EAX, Rn );
3117 check_walign32( REG_EAX );
3118 ADDL_imms_r32( -4, REG_EAX );
3119 MOVL_rbpdisp_r32( REG_OFFSET(r_bank[Rm_BANK]), REG_EDX );
3120 MEM_WRITE_LONG( REG_EAX, REG_EDX );
3121 ADDL_imms_rbpdisp( -4, REG_OFFSET(r[Rn]) );
3122 sh4_x86.tstate = TSTATE_NONE;
3126 load_reg( REG_EAX, Rn );
3127 check_walign32( REG_EAX );
3128 ADDL_imms_r32( -4, REG_EAX );
3129 MOVL_rbpdisp_r32( R_GBR, REG_EDX );
3130 MEM_WRITE_LONG( REG_EAX, REG_EDX );
3131 ADDL_imms_rbpdisp( -4, REG_OFFSET(r[Rn]) );
3132 sh4_x86.tstate = TSTATE_NONE;
3135 COUNT_INST(I_STSFPSCR);
3137 MOVL_rbpdisp_r32( R_FPSCR, REG_EAX );
3138 store_reg( REG_EAX, Rn );
3140 STS.L FPSCR, @-Rn {:
3141 COUNT_INST(I_STSFPSCRM);
3143 load_reg( REG_EAX, Rn );
3144 check_walign32( REG_EAX );
3145 ADDL_imms_r32( -4, REG_EAX );
3146 MOVL_rbpdisp_r32( R_FPSCR, REG_EDX );
3147 MEM_WRITE_LONG( REG_EAX, REG_EDX );
3148 ADDL_imms_rbpdisp( -4, REG_OFFSET(r[Rn]) );
3149 sh4_x86.tstate = TSTATE_NONE;
3154 MOVL_rbpdisp_r32( R_FPUL, REG_EAX );
3155 store_reg( REG_EAX, Rn );
3160 load_reg( REG_EAX, Rn );
3161 check_walign32( REG_EAX );
3162 ADDL_imms_r32( -4, REG_EAX );
3163 MOVL_rbpdisp_r32( R_FPUL, REG_EDX );
3164 MEM_WRITE_LONG( REG_EAX, REG_EDX );
3165 ADDL_imms_rbpdisp( -4, REG_OFFSET(r[Rn]) );
3166 sh4_x86.tstate = TSTATE_NONE;
3170 MOVL_rbpdisp_r32( R_MACH, REG_EAX );
3171 store_reg( REG_EAX, Rn );
3175 load_reg( REG_EAX, Rn );
3176 check_walign32( REG_EAX );
3177 ADDL_imms_r32( -4, REG_EAX );
3178 MOVL_rbpdisp_r32( R_MACH, REG_EDX );
3179 MEM_WRITE_LONG( REG_EAX, REG_EDX );
3180 ADDL_imms_rbpdisp( -4, REG_OFFSET(r[Rn]) );
3181 sh4_x86.tstate = TSTATE_NONE;
3185 MOVL_rbpdisp_r32( R_MACL, REG_EAX );
3186 store_reg( REG_EAX, Rn );
3190 load_reg( REG_EAX, Rn );
3191 check_walign32( REG_EAX );
3192 ADDL_imms_r32( -4, REG_EAX );
3193 MOVL_rbpdisp_r32( R_MACL, REG_EDX );
3194 MEM_WRITE_LONG( REG_EAX, REG_EDX );
3195 ADDL_imms_rbpdisp( -4, REG_OFFSET(r[Rn]) );
3196 sh4_x86.tstate = TSTATE_NONE;
3200 MOVL_rbpdisp_r32( R_PR, REG_EAX );
3201 store_reg( REG_EAX, Rn );
3205 load_reg( REG_EAX, Rn );
3206 check_walign32( REG_EAX );
3207 ADDL_imms_r32( -4, REG_EAX );
3208 MOVL_rbpdisp_r32( R_PR, REG_EDX );
3209 MEM_WRITE_LONG( REG_EAX, REG_EDX );
3210 ADDL_imms_rbpdisp( -4, REG_OFFSET(r[Rn]) );
3211 sh4_x86.tstate = TSTATE_NONE;
3216 /* Do nothing. Well, we could emit an 0x90, but what would really be the point? */
3219 sh4_x86.in_delay_slot = DELAY_NONE;
3225 * The unwind methods only work if we compiled with DWARF2 frame information
3226 * (ie -fexceptions), otherwise we have to use the direct frame scan.
3228 #ifdef HAVE_EXCEPTIONS
3232 uintptr_t block_start;
3233 uintptr_t block_end;
3237 static _Unwind_Reason_Code xlat_check_frame( struct _Unwind_Context *context, void *arg )
3239 struct UnwindInfo *info = arg;
3240 void *pc = (void *)_Unwind_GetIP(context);
3241 if( ((uintptr_t)pc) >= info->block_start && ((uintptr_t)pc) < info->block_end ) {
3243 return _URC_NORMAL_STOP;
3245 return _URC_NO_REASON;
3248 void *xlat_get_native_pc( void *code, uint32_t code_size )
3250 struct _Unwind_Exception exc;
3251 struct UnwindInfo info;
3254 info.block_start = (uintptr_t)code;
3255 info.block_end = info.block_start + code_size;
3256 void *result = NULL;
3257 _Unwind_Backtrace( xlat_check_frame, &info );
3261 /* Assume this is an ia32 build - amd64 should always have dwarf information */
3262 void *xlat_get_native_pc( void *code, uint32_t code_size )
3264 void *result = NULL;
3266 "mov %%ebp, %%eax\n\t"
3267 "mov $0x8, %%ecx\n\t"
3269 "frame_loop: test %%eax, %%eax\n\t"
3270 "je frame_not_found\n\t"
3271 "cmp (%%eax), %%edx\n\t"
3272 "je frame_found\n\t"
3273 "sub $0x1, %%ecx\n\t"
3274 "je frame_not_found\n\t"
3275 "movl (%%eax), %%eax\n\t"
3277 "frame_found: movl 0x4(%%eax), %0\n"
3280 : "r" (((uint8_t *)&sh4r) + 128 )
3281 : "eax", "ecx", "edx" );
.