4 * SH4 => x86 translation. This version does no real optimization, it just
5 * outputs straight-line x86 code - it mainly exists to provide a baseline
6 * to test the optimizing versions against.
8 * Copyright (c) 2007 Nathan Keynes.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
29 #include "sh4/xltcache.h"
30 #include "sh4/sh4core.h"
31 #include "sh4/sh4trans.h"
32 #include "sh4/sh4stat.h"
33 #include "sh4/sh4mmio.h"
34 #include "sh4/x86op.h"
37 #define DEFAULT_BACKPATCH_SIZE 4096
39 struct backpatch_record {
40 uint32_t fixup_offset;
41 uint32_t fixup_icount;
50 * Struct to manage internal translation state. This state is not saved -
51 * it is only valid between calls to sh4_translate_begin_block() and
52 * sh4_translate_end_block()
54 struct sh4_x86_state {
56 gboolean priv_checked; /* true if we've already checked the cpu mode. */
57 gboolean fpuen_checked; /* true if we've already checked fpu enabled. */
58 gboolean branch_taken; /* true if we branched unconditionally */
59 gboolean double_prec; /* true if FPU is in double-precision mode */
60 gboolean double_size; /* true if FPU is in double-size mode */
61 gboolean sse3_enabled; /* true if host supports SSE3 instructions */
62 uint32_t block_start_pc;
63 uint32_t stack_posn; /* Trace stack height for alignment purposes */
67 gboolean tlb_on; /* True if tlb translation is active */
69 /* Allocated memory for the (block-wide) back-patch list */
70 struct backpatch_record *backpatch_list;
71 uint32_t backpatch_posn;
72 uint32_t backpatch_size;
75 #define TSTATE_NONE -1
85 #ifdef ENABLE_SH4STATS
86 #define COUNT_INST(id) load_imm32(R_EAX,id); call_func1(sh4_stats_add, R_EAX); sh4_x86.tstate = TSTATE_NONE
88 #define COUNT_INST(id)
91 /** Branch if T is set (either in the current cflags, or in sh4r.t) */
92 #define JT_rel8(label) if( sh4_x86.tstate == TSTATE_NONE ) { \
93 CMP_imm8s_sh4r( 1, R_T ); sh4_x86.tstate = TSTATE_E; } \
94 OP(0x70+sh4_x86.tstate); MARK_JMP8(label); OP(-1)
96 /** Branch if T is clear (either in the current cflags or in sh4r.t) */
97 #define JF_rel8(label) if( sh4_x86.tstate == TSTATE_NONE ) { \
98 CMP_imm8s_sh4r( 1, R_T ); sh4_x86.tstate = TSTATE_E; } \
99 OP(0x70+ (sh4_x86.tstate^1)); MARK_JMP8(label); OP(-1)
101 static struct sh4_x86_state sh4_x86;
103 static uint32_t max_int = 0x7FFFFFFF;
104 static uint32_t min_int = 0x80000000;
105 static uint32_t save_fcw; /* save value for fpu control word */
106 static uint32_t trunc_fcw = 0x0F7F; /* fcw value for truncation mode */
108 gboolean is_sse3_supported()
112 // Note: Include the push/pop ebx sequence in case of PIC builds. This
113 // isn't exactly on a critical path anyway
114 __asm__ __volatile__(
116 "mov $0x01, %%eax\n\t"
118 "popl %%ebx" : "=c" (features) : : "eax", "edx");
119 return (features & 1) ? TRUE : FALSE;
122 void sh4_translate_init(void)
124 sh4_x86.backpatch_list = malloc(DEFAULT_BACKPATCH_SIZE);
125 sh4_x86.backpatch_size = DEFAULT_BACKPATCH_SIZE / sizeof(struct backpatch_record);
126 sh4_x86.sse3_enabled = is_sse3_supported();
130 static void sh4_x86_add_backpatch( uint8_t *fixup_addr, uint32_t fixup_pc, uint32_t exc_code )
132 if( sh4_x86.backpatch_posn == sh4_x86.backpatch_size ) {
133 sh4_x86.backpatch_size <<= 1;
134 sh4_x86.backpatch_list = realloc( sh4_x86.backpatch_list,
135 sh4_x86.backpatch_size * sizeof(struct backpatch_record));
136 assert( sh4_x86.backpatch_list != NULL );
138 if( sh4_x86.in_delay_slot ) {
141 sh4_x86.backpatch_list[sh4_x86.backpatch_posn].fixup_offset =
142 ((uint8_t *)fixup_addr) - ((uint8_t *)xlat_current_block->code);
143 sh4_x86.backpatch_list[sh4_x86.backpatch_posn].fixup_icount = (fixup_pc - sh4_x86.block_start_pc)>>1;
144 sh4_x86.backpatch_list[sh4_x86.backpatch_posn].exc_code = exc_code;
145 sh4_x86.backpatch_posn++;
149 * Emit an instruction to load an SH4 reg into a real register
151 static inline void load_reg( int x86reg, int sh4reg )
153 /* mov [bp+n], reg */
155 OP(0x45 + (x86reg<<3));
156 OP(REG_OFFSET(r[sh4reg]));
159 static inline void load_reg16s( int x86reg, int sh4reg )
163 MODRM_r32_sh4r(x86reg, REG_OFFSET(r[sh4reg]));
166 static inline void load_reg16u( int x86reg, int sh4reg )
170 MODRM_r32_sh4r(x86reg, REG_OFFSET(r[sh4reg]));
174 #define load_spreg( x86reg, regoff ) MOV_sh4r_r32( regoff, x86reg )
175 #define store_spreg( x86reg, regoff ) MOV_r32_sh4r( x86reg, regoff )
177 * Emit an instruction to load an immediate value into a register
179 static inline void load_imm32( int x86reg, uint32_t value ) {
180 /* mov #value, reg */
186 * Load an immediate 64-bit quantity (note: x86-64 only)
188 static inline void load_imm64( int x86reg, uint64_t value ) {
189 /* mov #value, reg */
196 * Emit an instruction to store an SH4 reg (RN)
198 void static inline store_reg( int x86reg, int sh4reg ) {
199 /* mov reg, [bp+n] */
201 OP(0x45 + (x86reg<<3));
202 OP(REG_OFFSET(r[sh4reg]));
206 * Load an FR register (single-precision floating point) into an integer x86
207 * register (eg for register-to-register moves)
209 #define load_fr(reg,frm) OP(0x8B); MODRM_r32_ebp32(reg, REG_OFFSET(fr[0][(frm)^1]) )
210 #define load_xf(reg,frm) OP(0x8B); MODRM_r32_ebp32(reg, REG_OFFSET(fr[1][(frm)^1]) )
213 * Load the low half of a DR register (DR or XD) into an integer x86 register
215 #define load_dr0(reg,frm) OP(0x8B); MODRM_r32_ebp32(reg, REG_OFFSET(fr[frm&1][frm|0x01]) )
216 #define load_dr1(reg,frm) OP(0x8B); MODRM_r32_ebp32(reg, REG_OFFSET(fr[frm&1][frm&0x0E]) )
219 * Store an FR register (single-precision floating point) from an integer x86+
220 * register (eg for register-to-register moves)
222 #define store_fr(reg,frm) OP(0x89); MODRM_r32_ebp32( reg, REG_OFFSET(fr[0][(frm)^1]) )
223 #define store_xf(reg,frm) OP(0x89); MODRM_r32_ebp32( reg, REG_OFFSET(fr[1][(frm)^1]) )
225 #define store_dr0(reg,frm) OP(0x89); MODRM_r32_ebp32( reg, REG_OFFSET(fr[frm&1][frm|0x01]) )
226 #define store_dr1(reg,frm) OP(0x89); MODRM_r32_ebp32( reg, REG_OFFSET(fr[frm&1][frm&0x0E]) )
229 #define push_fpul() FLDF_sh4r(R_FPUL)
230 #define pop_fpul() FSTPF_sh4r(R_FPUL)
231 #define push_fr(frm) FLDF_sh4r( REG_OFFSET(fr[0][(frm)^1]) )
232 #define pop_fr(frm) FSTPF_sh4r( REG_OFFSET(fr[0][(frm)^1]) )
233 #define push_xf(frm) FLDF_sh4r( REG_OFFSET(fr[1][(frm)^1]) )
234 #define pop_xf(frm) FSTPF_sh4r( REG_OFFSET(fr[1][(frm)^1]) )
235 #define push_dr(frm) FLDD_sh4r( REG_OFFSET(fr[0][(frm)&0x0E]) )
236 #define pop_dr(frm) FSTPD_sh4r( REG_OFFSET(fr[0][(frm)&0x0E]) )
237 #define push_xdr(frm) FLDD_sh4r( REG_OFFSET(fr[1][(frm)&0x0E]) )
238 #define pop_xdr(frm) FSTPD_sh4r( REG_OFFSET(fr[1][(frm)&0x0E]) )
242 /* Exception checks - Note that all exception checks will clobber EAX */
244 #define check_priv( ) \
245 if( !sh4_x86.priv_checked ) { \
246 sh4_x86.priv_checked = TRUE;\
247 load_spreg( R_EAX, R_SR );\
248 AND_imm32_r32( SR_MD, R_EAX );\
249 if( sh4_x86.in_delay_slot ) {\
250 JE_exc( EXC_SLOT_ILLEGAL );\
252 JE_exc( EXC_ILLEGAL );\
254 sh4_x86.tstate = TSTATE_NONE; \
257 #define check_fpuen( ) \
258 if( !sh4_x86.fpuen_checked ) {\
259 sh4_x86.fpuen_checked = TRUE;\
260 load_spreg( R_EAX, R_SR );\
261 AND_imm32_r32( SR_FD, R_EAX );\
262 if( sh4_x86.in_delay_slot ) {\
263 JNE_exc(EXC_SLOT_FPU_DISABLED);\
265 JNE_exc(EXC_FPU_DISABLED);\
267 sh4_x86.tstate = TSTATE_NONE; \
270 #define check_ralign16( x86reg ) \
271 TEST_imm32_r32( 0x00000001, x86reg ); \
272 JNE_exc(EXC_DATA_ADDR_READ)
274 #define check_walign16( x86reg ) \
275 TEST_imm32_r32( 0x00000001, x86reg ); \
276 JNE_exc(EXC_DATA_ADDR_WRITE);
278 #define check_ralign32( x86reg ) \
279 TEST_imm32_r32( 0x00000003, x86reg ); \
280 JNE_exc(EXC_DATA_ADDR_READ)
282 #define check_walign32( x86reg ) \
283 TEST_imm32_r32( 0x00000003, x86reg ); \
284 JNE_exc(EXC_DATA_ADDR_WRITE);
286 #define check_ralign64( x86reg ) \
287 TEST_imm32_r32( 0x00000007, x86reg ); \
288 JNE_exc(EXC_DATA_ADDR_READ)
290 #define check_walign64( x86reg ) \
291 TEST_imm32_r32( 0x00000007, x86reg ); \
292 JNE_exc(EXC_DATA_ADDR_WRITE);
295 #define MEM_RESULT(value_reg) if(value_reg != R_EAX) { MOV_r32_r32(R_EAX,value_reg); }
296 #define MEM_READ_BYTE( addr_reg, value_reg ) call_func1(sh4_read_byte, addr_reg ); MEM_RESULT(value_reg)
297 #define MEM_READ_WORD( addr_reg, value_reg ) call_func1(sh4_read_word, addr_reg ); MEM_RESULT(value_reg)
298 #define MEM_READ_LONG( addr_reg, value_reg ) call_func1(sh4_read_long, addr_reg ); MEM_RESULT(value_reg)
299 #define MEM_WRITE_BYTE( addr_reg, value_reg ) call_func2(sh4_write_byte, addr_reg, value_reg)
300 #define MEM_WRITE_WORD( addr_reg, value_reg ) call_func2(sh4_write_word, addr_reg, value_reg)
301 #define MEM_WRITE_LONG( addr_reg, value_reg ) call_func2(sh4_write_long, addr_reg, value_reg)
304 * Perform MMU translation on the address in addr_reg for a read operation, iff the TLB is turned
305 * on, otherwise do nothing. Clobbers EAX, ECX and EDX. May raise a TLB exception or address error.
307 #define MMU_TRANSLATE_READ( addr_reg ) if( sh4_x86.tlb_on ) { call_func1(mmu_vma_to_phys_read, addr_reg); CMP_imm32_r32(MMU_VMA_ERROR, R_EAX); JE_exc(-1); MEM_RESULT(addr_reg); }
309 #define MMU_TRANSLATE_READ_EXC( addr_reg, exc_code ) if( sh4_x86.tlb_on ) { call_func1(mmu_vma_to_phys_read, addr_reg); CMP_imm32_r32(MMU_VMA_ERROR, R_EAX); JE_exc(exc_code); MEM_RESULT(addr_reg) }
311 * Perform MMU translation on the address in addr_reg for a write operation, iff the TLB is turned
312 * on, otherwise do nothing. Clobbers EAX, ECX and EDX. May raise a TLB exception or address error.
314 #define MMU_TRANSLATE_WRITE( addr_reg ) if( sh4_x86.tlb_on ) { call_func1(mmu_vma_to_phys_write, addr_reg); CMP_imm32_r32(MMU_VMA_ERROR, R_EAX); JE_exc(-1); MEM_RESULT(addr_reg); }
316 #define SLOTILLEGAL() JMP_exc(EXC_SLOT_ILLEGAL); sh4_x86.in_delay_slot = DELAY_NONE; return 1;
318 /****** Import appropriate calling conventions ******/
319 #if SIZEOF_VOID_P == 8
320 #include "sh4/ia64abi.h"
321 #else /* 32-bit system */
323 #include "sh4/ia32mac.h"
325 #include "sh4/ia32abi.h"
329 void sh4_translate_begin_block( sh4addr_t pc )
332 sh4_x86.in_delay_slot = FALSE;
333 sh4_x86.priv_checked = FALSE;
334 sh4_x86.fpuen_checked = FALSE;
335 sh4_x86.branch_taken = FALSE;
336 sh4_x86.backpatch_posn = 0;
337 sh4_x86.block_start_pc = pc;
338 sh4_x86.tlb_on = IS_MMU_ENABLED();
339 sh4_x86.tstate = TSTATE_NONE;
340 sh4_x86.double_prec = sh4r.fpscr & FPSCR_PR;
341 sh4_x86.double_size = sh4r.fpscr & FPSCR_SZ;
345 uint32_t sh4_translate_end_block_size()
347 if( sh4_x86.backpatch_posn <= 3 ) {
348 return EPILOGUE_SIZE + (sh4_x86.backpatch_posn*12);
350 return EPILOGUE_SIZE + 48 + (sh4_x86.backpatch_posn-3)*15;
356 * Embed a breakpoint into the generated code
358 void sh4_translate_emit_breakpoint( sh4vma_t pc )
360 load_imm32( R_EAX, pc );
361 call_func1( sh4_translate_breakpoint_hit, R_EAX );
362 sh4_x86.tstate = TSTATE_NONE;
366 #define UNTRANSLATABLE(pc) !IS_IN_ICACHE(pc)
369 * Embed a call to sh4_execute_instruction for situations that we
370 * can't translate (just page-crossing delay slots at the moment).
371 * Caller is responsible for setting new_pc before calling this function.
375 * Set sh4r.in_delay_slot = sh4_x86.in_delay_slot
376 * Update slice_cycle for endpc+2 (single step doesn't update slice_cycle)
377 * Call sh4_execute_instruction
378 * Call xlat_get_code_by_vma / xlat_get_code as for normal exit
380 void exit_block_emu( sh4vma_t endpc )
382 load_imm32( R_ECX, endpc - sh4_x86.block_start_pc ); // 5
383 ADD_r32_sh4r( R_ECX, R_PC );
385 load_imm32( R_ECX, (((endpc - sh4_x86.block_start_pc)>>1)+1)*sh4_cpu_period ); // 5
386 ADD_r32_sh4r( R_ECX, REG_OFFSET(slice_cycle) ); // 6
387 load_imm32( R_ECX, sh4_x86.in_delay_slot ? 1 : 0 );
388 store_spreg( R_ECX, REG_OFFSET(in_delay_slot) );
390 call_func0( sh4_execute_instruction );
391 load_spreg( R_EAX, R_PC );
392 if( sh4_x86.tlb_on ) {
393 call_func1(xlat_get_code_by_vma,R_EAX);
395 call_func1(xlat_get_code,R_EAX);
397 AND_imm8s_rptr( 0xFC, R_EAX );
403 * Translate a single instruction. Delayed branches are handled specially
404 * by translating both branch and delayed instruction as a single unit (as
406 * The instruction MUST be in the icache (assert check)
408 * @return true if the instruction marks the end of a basic block
411 uint32_t sh4_translate_instruction( sh4vma_t pc )
414 /* Read instruction from icache */
415 assert( IS_IN_ICACHE(pc) );
416 ir = *(uint16_t *)GET_ICACHE_PTR(pc);
418 /* PC is not in the current icache - this usually means we're running
419 * with MMU on, and we've gone past the end of the page. And since
420 * sh4_translate_block is pretty careful about this, it means we're
421 * almost certainly in a delay slot.
423 * Since we can't assume the page is present (and we can't fault it in
424 * at this point, inline a call to sh4_execute_instruction (with a few
425 * small repairs to cope with the different environment).
428 if( !sh4_x86.in_delay_slot ) {
429 sh4_translate_add_recovery( (pc - sh4_x86.block_start_pc)>>1 );
435 load_reg( R_EAX, Rm );
436 load_reg( R_ECX, Rn );
437 ADD_r32_r32( R_EAX, R_ECX );
438 store_reg( R_ECX, Rn );
439 sh4_x86.tstate = TSTATE_NONE;
443 load_reg( R_EAX, Rn );
444 ADD_imm8s_r32( imm, R_EAX );
445 store_reg( R_EAX, Rn );
446 sh4_x86.tstate = TSTATE_NONE;
450 if( sh4_x86.tstate != TSTATE_C ) {
453 load_reg( R_EAX, Rm );
454 load_reg( R_ECX, Rn );
455 ADC_r32_r32( R_EAX, R_ECX );
456 store_reg( R_ECX, Rn );
458 sh4_x86.tstate = TSTATE_C;
462 load_reg( R_EAX, Rm );
463 load_reg( R_ECX, Rn );
464 ADD_r32_r32( R_EAX, R_ECX );
465 store_reg( R_ECX, Rn );
467 sh4_x86.tstate = TSTATE_O;
471 load_reg( R_EAX, Rm );
472 load_reg( R_ECX, Rn );
473 AND_r32_r32( R_EAX, R_ECX );
474 store_reg( R_ECX, Rn );
475 sh4_x86.tstate = TSTATE_NONE;
479 load_reg( R_EAX, 0 );
480 AND_imm32_r32(imm, R_EAX);
481 store_reg( R_EAX, 0 );
482 sh4_x86.tstate = TSTATE_NONE;
484 AND.B #imm, @(R0, GBR) {:
486 load_reg( R_EAX, 0 );
487 load_spreg( R_ECX, R_GBR );
488 ADD_r32_r32( R_ECX, R_EAX );
489 MMU_TRANSLATE_WRITE( R_EAX );
490 PUSH_realigned_r32(R_EAX);
491 MEM_READ_BYTE( R_EAX, R_EDX );
492 POP_realigned_r32(R_EAX);
493 AND_imm32_r32(imm, R_EDX );
494 MEM_WRITE_BYTE( R_EAX, R_EDX );
495 sh4_x86.tstate = TSTATE_NONE;
499 load_reg( R_EAX, Rm );
500 load_reg( R_ECX, Rn );
501 CMP_r32_r32( R_EAX, R_ECX );
503 sh4_x86.tstate = TSTATE_E;
506 COUNT_INST(I_CMPEQI);
507 load_reg( R_EAX, 0 );
508 CMP_imm8s_r32(imm, R_EAX);
510 sh4_x86.tstate = TSTATE_E;
514 load_reg( R_EAX, Rm );
515 load_reg( R_ECX, Rn );
516 CMP_r32_r32( R_EAX, R_ECX );
518 sh4_x86.tstate = TSTATE_GE;
522 load_reg( R_EAX, Rm );
523 load_reg( R_ECX, Rn );
524 CMP_r32_r32( R_EAX, R_ECX );
526 sh4_x86.tstate = TSTATE_G;
530 load_reg( R_EAX, Rm );
531 load_reg( R_ECX, Rn );
532 CMP_r32_r32( R_EAX, R_ECX );
534 sh4_x86.tstate = TSTATE_A;
538 load_reg( R_EAX, Rm );
539 load_reg( R_ECX, Rn );
540 CMP_r32_r32( R_EAX, R_ECX );
542 sh4_x86.tstate = TSTATE_AE;
546 load_reg( R_EAX, Rn );
547 CMP_imm8s_r32( 0, R_EAX );
549 sh4_x86.tstate = TSTATE_G;
553 load_reg( R_EAX, Rn );
554 CMP_imm8s_r32( 0, R_EAX );
556 sh4_x86.tstate = TSTATE_GE;
559 COUNT_INST(I_CMPSTR);
560 load_reg( R_EAX, Rm );
561 load_reg( R_ECX, Rn );
562 XOR_r32_r32( R_ECX, R_EAX );
563 TEST_r8_r8( R_AL, R_AL );
565 TEST_r8_r8( R_AH, R_AH );
567 SHR_imm8_r32( 16, R_EAX );
568 TEST_r8_r8( R_AL, R_AL );
570 TEST_r8_r8( R_AH, R_AH );
575 sh4_x86.tstate = TSTATE_E;
579 load_reg( R_EAX, Rm );
580 load_reg( R_ECX, Rn );
581 SHR_imm8_r32( 31, R_EAX );
582 SHR_imm8_r32( 31, R_ECX );
583 store_spreg( R_EAX, R_M );
584 store_spreg( R_ECX, R_Q );
585 CMP_r32_r32( R_EAX, R_ECX );
587 sh4_x86.tstate = TSTATE_NE;
591 XOR_r32_r32( R_EAX, R_EAX );
592 store_spreg( R_EAX, R_Q );
593 store_spreg( R_EAX, R_M );
594 store_spreg( R_EAX, R_T );
595 sh4_x86.tstate = TSTATE_C; // works for DIV1
599 load_spreg( R_ECX, R_M );
600 load_reg( R_EAX, Rn );
601 if( sh4_x86.tstate != TSTATE_C ) {
605 SETC_r8( R_DL ); // Q'
606 CMP_sh4r_r32( R_Q, R_ECX );
608 ADD_sh4r_r32( REG_OFFSET(r[Rm]), R_EAX );
611 SUB_sh4r_r32( REG_OFFSET(r[Rm]), R_EAX );
613 store_reg( R_EAX, Rn ); // Done with Rn now
614 SETC_r8(R_AL); // tmp1
615 XOR_r8_r8( R_DL, R_AL ); // Q' = Q ^ tmp1
616 XOR_r8_r8( R_AL, R_CL ); // Q'' = Q' ^ M
617 store_spreg( R_ECX, R_Q );
618 XOR_imm8s_r32( 1, R_AL ); // T = !Q'
619 MOVZX_r8_r32( R_AL, R_EAX );
620 store_spreg( R_EAX, R_T );
621 sh4_x86.tstate = TSTATE_NONE;
625 load_reg( R_EAX, Rm );
626 load_reg( R_ECX, Rn );
628 store_spreg( R_EDX, R_MACH );
629 store_spreg( R_EAX, R_MACL );
630 sh4_x86.tstate = TSTATE_NONE;
634 load_reg( R_EAX, Rm );
635 load_reg( R_ECX, Rn );
637 store_spreg( R_EDX, R_MACH );
638 store_spreg( R_EAX, R_MACL );
639 sh4_x86.tstate = TSTATE_NONE;
643 load_reg( R_EAX, Rn );
644 ADD_imm8s_r32( -1, R_EAX );
645 store_reg( R_EAX, Rn );
647 sh4_x86.tstate = TSTATE_E;
651 load_reg( R_EAX, Rm );
652 MOVSX_r8_r32( R_EAX, R_EAX );
653 store_reg( R_EAX, Rn );
657 load_reg( R_EAX, Rm );
658 MOVSX_r16_r32( R_EAX, R_EAX );
659 store_reg( R_EAX, Rn );
663 load_reg( R_EAX, Rm );
664 MOVZX_r8_r32( R_EAX, R_EAX );
665 store_reg( R_EAX, Rn );
669 load_reg( R_EAX, Rm );
670 MOVZX_r16_r32( R_EAX, R_EAX );
671 store_reg( R_EAX, Rn );
676 load_reg( R_EAX, Rm );
677 check_ralign32( R_EAX );
678 MMU_TRANSLATE_READ( R_EAX );
679 PUSH_realigned_r32( R_EAX );
680 load_reg( R_EAX, Rn );
681 ADD_imm8s_r32( 4, R_EAX );
682 MMU_TRANSLATE_READ_EXC( R_EAX, -5 );
683 ADD_imm8s_sh4r( 8, REG_OFFSET(r[Rn]) );
684 // Note translate twice in case of page boundaries. Maybe worth
685 // adding a page-boundary check to skip the second translation
687 load_reg( R_EAX, Rm );
688 check_ralign32( R_EAX );
689 MMU_TRANSLATE_READ( R_EAX );
690 load_reg( R_ECX, Rn );
691 check_ralign32( R_ECX );
692 PUSH_realigned_r32( R_EAX );
693 MMU_TRANSLATE_READ_EXC( R_ECX, -5 );
694 MOV_r32_r32( R_ECX, R_EAX );
695 ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rn]) );
696 ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
698 MEM_READ_LONG( R_EAX, R_EAX );
701 MEM_READ_LONG( R_ECX, R_EAX );
702 POP_realigned_r32( R_ECX );
705 ADD_r32_sh4r( R_EAX, R_MACL );
706 ADC_r32_sh4r( R_EDX, R_MACH );
708 load_spreg( R_ECX, R_S );
709 TEST_r32_r32(R_ECX, R_ECX);
711 call_func0( signsat48 );
713 sh4_x86.tstate = TSTATE_NONE;
718 load_reg( R_EAX, Rm );
719 check_ralign16( R_EAX );
720 MMU_TRANSLATE_READ( R_EAX );
721 PUSH_realigned_r32( R_EAX );
722 load_reg( R_EAX, Rn );
723 ADD_imm8s_r32( 2, R_EAX );
724 MMU_TRANSLATE_READ_EXC( R_EAX, -5 );
725 ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rn]) );
726 // Note translate twice in case of page boundaries. Maybe worth
727 // adding a page-boundary check to skip the second translation
729 load_reg( R_EAX, Rm );
730 check_ralign16( R_EAX );
731 MMU_TRANSLATE_READ( R_EAX );
732 load_reg( R_ECX, Rn );
733 check_ralign16( R_ECX );
734 PUSH_realigned_r32( R_EAX );
735 MMU_TRANSLATE_READ_EXC( R_ECX, -5 );
736 MOV_r32_r32( R_ECX, R_EAX );
737 ADD_imm8s_sh4r( 2, REG_OFFSET(r[Rn]) );
738 ADD_imm8s_sh4r( 2, REG_OFFSET(r[Rm]) );
740 MEM_READ_WORD( R_EAX, R_EAX );
743 MEM_READ_WORD( R_ECX, R_EAX );
744 POP_realigned_r32( R_ECX );
747 load_spreg( R_ECX, R_S );
748 TEST_r32_r32( R_ECX, R_ECX );
751 ADD_r32_sh4r( R_EAX, R_MACL ); // 6
752 JNO_rel8( end ); // 2
753 load_imm32( R_EDX, 1 ); // 5
754 store_spreg( R_EDX, R_MACH ); // 6
755 JS_rel8( positive ); // 2
756 load_imm32( R_EAX, 0x80000000 );// 5
757 store_spreg( R_EAX, R_MACL ); // 6
760 JMP_TARGET(positive);
761 load_imm32( R_EAX, 0x7FFFFFFF );// 5
762 store_spreg( R_EAX, R_MACL ); // 6
766 ADD_r32_sh4r( R_EAX, R_MACL ); // 6
767 ADC_r32_sh4r( R_EDX, R_MACH ); // 6
771 sh4_x86.tstate = TSTATE_NONE;
775 load_spreg( R_EAX, R_T );
776 store_reg( R_EAX, Rn );
780 load_reg( R_EAX, Rm );
781 load_reg( R_ECX, Rn );
783 store_spreg( R_EAX, R_MACL );
784 sh4_x86.tstate = TSTATE_NONE;
788 load_reg16s( R_EAX, Rm );
789 load_reg16s( R_ECX, Rn );
791 store_spreg( R_EAX, R_MACL );
792 sh4_x86.tstate = TSTATE_NONE;
796 load_reg16u( R_EAX, Rm );
797 load_reg16u( R_ECX, Rn );
799 store_spreg( R_EAX, R_MACL );
800 sh4_x86.tstate = TSTATE_NONE;
804 load_reg( R_EAX, Rm );
806 store_reg( R_EAX, Rn );
807 sh4_x86.tstate = TSTATE_NONE;
811 load_reg( R_EAX, Rm );
812 XOR_r32_r32( R_ECX, R_ECX );
814 SBB_r32_r32( R_EAX, R_ECX );
815 store_reg( R_ECX, Rn );
817 sh4_x86.tstate = TSTATE_C;
821 load_reg( R_EAX, Rm );
823 store_reg( R_EAX, Rn );
824 sh4_x86.tstate = TSTATE_NONE;
828 load_reg( R_EAX, Rm );
829 load_reg( R_ECX, Rn );
830 OR_r32_r32( R_EAX, R_ECX );
831 store_reg( R_ECX, Rn );
832 sh4_x86.tstate = TSTATE_NONE;
836 load_reg( R_EAX, 0 );
837 OR_imm32_r32(imm, R_EAX);
838 store_reg( R_EAX, 0 );
839 sh4_x86.tstate = TSTATE_NONE;
841 OR.B #imm, @(R0, GBR) {:
843 load_reg( R_EAX, 0 );
844 load_spreg( R_ECX, R_GBR );
845 ADD_r32_r32( R_ECX, R_EAX );
846 MMU_TRANSLATE_WRITE( R_EAX );
847 PUSH_realigned_r32(R_EAX);
848 MEM_READ_BYTE( R_EAX, R_EDX );
849 POP_realigned_r32(R_EAX);
850 OR_imm32_r32(imm, R_EDX );
851 MEM_WRITE_BYTE( R_EAX, R_EDX );
852 sh4_x86.tstate = TSTATE_NONE;
856 load_reg( R_EAX, Rn );
857 if( sh4_x86.tstate != TSTATE_C ) {
861 store_reg( R_EAX, Rn );
863 sh4_x86.tstate = TSTATE_C;
867 load_reg( R_EAX, Rn );
868 if( sh4_x86.tstate != TSTATE_C ) {
872 store_reg( R_EAX, Rn );
874 sh4_x86.tstate = TSTATE_C;
878 load_reg( R_EAX, Rn );
880 store_reg( R_EAX, Rn );
882 sh4_x86.tstate = TSTATE_C;
886 load_reg( R_EAX, Rn );
888 store_reg( R_EAX, Rn );
890 sh4_x86.tstate = TSTATE_C;
894 /* Annoyingly enough, not directly convertible */
895 load_reg( R_EAX, Rn );
896 load_reg( R_ECX, Rm );
897 CMP_imm32_r32( 0, R_ECX );
900 NEG_r32( R_ECX ); // 2
901 AND_imm8_r8( 0x1F, R_CL ); // 3
902 JE_rel8(emptysar); // 2
903 SAR_r32_CL( R_EAX ); // 2
906 JMP_TARGET(emptysar);
907 SAR_imm8_r32(31, R_EAX ); // 3
911 AND_imm8_r8( 0x1F, R_CL ); // 3
912 SHL_r32_CL( R_EAX ); // 2
915 store_reg( R_EAX, Rn );
916 sh4_x86.tstate = TSTATE_NONE;
920 load_reg( R_EAX, Rn );
921 load_reg( R_ECX, Rm );
922 CMP_imm32_r32( 0, R_ECX );
925 NEG_r32( R_ECX ); // 2
926 AND_imm8_r8( 0x1F, R_CL ); // 3
928 SHR_r32_CL( R_EAX ); // 2
931 JMP_TARGET(emptyshr);
932 XOR_r32_r32( R_EAX, R_EAX );
936 AND_imm8_r8( 0x1F, R_CL ); // 3
937 SHL_r32_CL( R_EAX ); // 2
940 store_reg( R_EAX, Rn );
941 sh4_x86.tstate = TSTATE_NONE;
945 load_reg( R_EAX, Rn );
948 store_reg( R_EAX, Rn );
949 sh4_x86.tstate = TSTATE_C;
953 load_reg( R_EAX, Rn );
956 store_reg( R_EAX, Rn );
957 sh4_x86.tstate = TSTATE_C;
961 load_reg( R_EAX, Rn );
964 store_reg( R_EAX, Rn );
965 sh4_x86.tstate = TSTATE_C;
969 load_reg( R_EAX, Rn );
970 SHL_imm8_r32( 2, R_EAX );
971 store_reg( R_EAX, Rn );
972 sh4_x86.tstate = TSTATE_NONE;
976 load_reg( R_EAX, Rn );
977 SHL_imm8_r32( 8, R_EAX );
978 store_reg( R_EAX, Rn );
979 sh4_x86.tstate = TSTATE_NONE;
983 load_reg( R_EAX, Rn );
984 SHL_imm8_r32( 16, R_EAX );
985 store_reg( R_EAX, Rn );
986 sh4_x86.tstate = TSTATE_NONE;
990 load_reg( R_EAX, Rn );
993 store_reg( R_EAX, Rn );
994 sh4_x86.tstate = TSTATE_C;
998 load_reg( R_EAX, Rn );
999 SHR_imm8_r32( 2, R_EAX );
1000 store_reg( R_EAX, Rn );
1001 sh4_x86.tstate = TSTATE_NONE;
1005 load_reg( R_EAX, Rn );
1006 SHR_imm8_r32( 8, R_EAX );
1007 store_reg( R_EAX, Rn );
1008 sh4_x86.tstate = TSTATE_NONE;
1012 load_reg( R_EAX, Rn );
1013 SHR_imm8_r32( 16, R_EAX );
1014 store_reg( R_EAX, Rn );
1015 sh4_x86.tstate = TSTATE_NONE;
1019 load_reg( R_EAX, Rm );
1020 load_reg( R_ECX, Rn );
1021 SUB_r32_r32( R_EAX, R_ECX );
1022 store_reg( R_ECX, Rn );
1023 sh4_x86.tstate = TSTATE_NONE;
1027 load_reg( R_EAX, Rm );
1028 load_reg( R_ECX, Rn );
1029 if( sh4_x86.tstate != TSTATE_C ) {
1032 SBB_r32_r32( R_EAX, R_ECX );
1033 store_reg( R_ECX, Rn );
1035 sh4_x86.tstate = TSTATE_C;
1039 load_reg( R_EAX, Rm );
1040 load_reg( R_ECX, Rn );
1041 SUB_r32_r32( R_EAX, R_ECX );
1042 store_reg( R_ECX, Rn );
1044 sh4_x86.tstate = TSTATE_O;
1047 COUNT_INST(I_SWAPB);
1048 load_reg( R_EAX, Rm );
1049 XCHG_r8_r8( R_AL, R_AH ); // NB: does not touch EFLAGS
1050 store_reg( R_EAX, Rn );
1053 COUNT_INST(I_SWAPB);
1054 load_reg( R_EAX, Rm );
1055 MOV_r32_r32( R_EAX, R_ECX );
1056 SHL_imm8_r32( 16, R_ECX );
1057 SHR_imm8_r32( 16, R_EAX );
1058 OR_r32_r32( R_EAX, R_ECX );
1059 store_reg( R_ECX, Rn );
1060 sh4_x86.tstate = TSTATE_NONE;
1064 load_reg( R_EAX, Rn );
1065 MMU_TRANSLATE_WRITE( R_EAX );
1066 PUSH_realigned_r32( R_EAX );
1067 MEM_READ_BYTE( R_EAX, R_EDX );
1068 TEST_r8_r8( R_DL, R_DL );
1070 OR_imm8_r8( 0x80, R_DL );
1071 POP_realigned_r32( R_EAX );
1072 MEM_WRITE_BYTE( R_EAX, R_EDX );
1073 sh4_x86.tstate = TSTATE_NONE;
1077 load_reg( R_EAX, Rm );
1078 load_reg( R_ECX, Rn );
1079 TEST_r32_r32( R_EAX, R_ECX );
1081 sh4_x86.tstate = TSTATE_E;
1085 load_reg( R_EAX, 0 );
1086 TEST_imm32_r32( imm, R_EAX );
1088 sh4_x86.tstate = TSTATE_E;
1090 TST.B #imm, @(R0, GBR) {:
1092 load_reg( R_EAX, 0);
1093 load_reg( R_ECX, R_GBR);
1094 ADD_r32_r32( R_ECX, R_EAX );
1095 MMU_TRANSLATE_READ( R_EAX );
1096 MEM_READ_BYTE( R_EAX, R_EAX );
1097 TEST_imm8_r8( imm, R_AL );
1099 sh4_x86.tstate = TSTATE_E;
1103 load_reg( R_EAX, Rm );
1104 load_reg( R_ECX, Rn );
1105 XOR_r32_r32( R_EAX, R_ECX );
1106 store_reg( R_ECX, Rn );
1107 sh4_x86.tstate = TSTATE_NONE;
1111 load_reg( R_EAX, 0 );
1112 XOR_imm32_r32( imm, R_EAX );
1113 store_reg( R_EAX, 0 );
1114 sh4_x86.tstate = TSTATE_NONE;
1116 XOR.B #imm, @(R0, GBR) {:
1118 load_reg( R_EAX, 0 );
1119 load_spreg( R_ECX, R_GBR );
1120 ADD_r32_r32( R_ECX, R_EAX );
1121 MMU_TRANSLATE_WRITE( R_EAX );
1122 PUSH_realigned_r32(R_EAX);
1123 MEM_READ_BYTE(R_EAX, R_EDX);
1124 POP_realigned_r32(R_EAX);
1125 XOR_imm32_r32( imm, R_EDX );
1126 MEM_WRITE_BYTE( R_EAX, R_EDX );
1127 sh4_x86.tstate = TSTATE_NONE;
1130 COUNT_INST(I_XTRCT);
1131 load_reg( R_EAX, Rm );
1132 load_reg( R_ECX, Rn );
1133 SHL_imm8_r32( 16, R_EAX );
1134 SHR_imm8_r32( 16, R_ECX );
1135 OR_r32_r32( R_EAX, R_ECX );
1136 store_reg( R_ECX, Rn );
1137 sh4_x86.tstate = TSTATE_NONE;
1140 /* Data move instructions */
1143 load_reg( R_EAX, Rm );
1144 store_reg( R_EAX, Rn );
1148 load_imm32( R_EAX, imm );
1149 store_reg( R_EAX, Rn );
1153 load_reg( R_EAX, Rn );
1154 MMU_TRANSLATE_WRITE( R_EAX );
1155 load_reg( R_EDX, Rm );
1156 MEM_WRITE_BYTE( R_EAX, R_EDX );
1157 sh4_x86.tstate = TSTATE_NONE;
1161 load_reg( R_EAX, Rn );
1162 ADD_imm8s_r32( -1, R_EAX );
1163 MMU_TRANSLATE_WRITE( R_EAX );
1164 load_reg( R_EDX, Rm );
1165 ADD_imm8s_sh4r( -1, REG_OFFSET(r[Rn]) );
1166 MEM_WRITE_BYTE( R_EAX, R_EDX );
1167 sh4_x86.tstate = TSTATE_NONE;
1169 MOV.B Rm, @(R0, Rn) {:
1171 load_reg( R_EAX, 0 );
1172 load_reg( R_ECX, Rn );
1173 ADD_r32_r32( R_ECX, R_EAX );
1174 MMU_TRANSLATE_WRITE( R_EAX );
1175 load_reg( R_EDX, Rm );
1176 MEM_WRITE_BYTE( R_EAX, R_EDX );
1177 sh4_x86.tstate = TSTATE_NONE;
1179 MOV.B R0, @(disp, GBR) {:
1181 load_spreg( R_EAX, R_GBR );
1182 ADD_imm32_r32( disp, R_EAX );
1183 MMU_TRANSLATE_WRITE( R_EAX );
1184 load_reg( R_EDX, 0 );
1185 MEM_WRITE_BYTE( R_EAX, R_EDX );
1186 sh4_x86.tstate = TSTATE_NONE;
1188 MOV.B R0, @(disp, Rn) {:
1190 load_reg( R_EAX, Rn );
1191 ADD_imm32_r32( disp, R_EAX );
1192 MMU_TRANSLATE_WRITE( R_EAX );
1193 load_reg( R_EDX, 0 );
1194 MEM_WRITE_BYTE( R_EAX, R_EDX );
1195 sh4_x86.tstate = TSTATE_NONE;
1199 load_reg( R_EAX, Rm );
1200 MMU_TRANSLATE_READ( R_EAX );
1201 MEM_READ_BYTE( R_EAX, R_EAX );
1202 store_reg( R_EAX, Rn );
1203 sh4_x86.tstate = TSTATE_NONE;
1207 load_reg( R_EAX, Rm );
1208 MMU_TRANSLATE_READ( R_EAX );
1209 ADD_imm8s_sh4r( 1, REG_OFFSET(r[Rm]) );
1210 MEM_READ_BYTE( R_EAX, R_EAX );
1211 store_reg( R_EAX, Rn );
1212 sh4_x86.tstate = TSTATE_NONE;
1214 MOV.B @(R0, Rm), Rn {:
1216 load_reg( R_EAX, 0 );
1217 load_reg( R_ECX, Rm );
1218 ADD_r32_r32( R_ECX, R_EAX );
1219 MMU_TRANSLATE_READ( R_EAX )
1220 MEM_READ_BYTE( R_EAX, R_EAX );
1221 store_reg( R_EAX, Rn );
1222 sh4_x86.tstate = TSTATE_NONE;
1224 MOV.B @(disp, GBR), R0 {:
1226 load_spreg( R_EAX, R_GBR );
1227 ADD_imm32_r32( disp, R_EAX );
1228 MMU_TRANSLATE_READ( R_EAX );
1229 MEM_READ_BYTE( R_EAX, R_EAX );
1230 store_reg( R_EAX, 0 );
1231 sh4_x86.tstate = TSTATE_NONE;
1233 MOV.B @(disp, Rm), R0 {:
1235 load_reg( R_EAX, Rm );
1236 ADD_imm32_r32( disp, R_EAX );
1237 MMU_TRANSLATE_READ( R_EAX );
1238 MEM_READ_BYTE( R_EAX, R_EAX );
1239 store_reg( R_EAX, 0 );
1240 sh4_x86.tstate = TSTATE_NONE;
1244 load_reg( R_EAX, Rn );
1245 check_walign32(R_EAX);
1246 MMU_TRANSLATE_WRITE( R_EAX );
1247 load_reg( R_EDX, Rm );
1248 MEM_WRITE_LONG( R_EAX, R_EDX );
1249 sh4_x86.tstate = TSTATE_NONE;
1253 load_reg( R_EAX, Rn );
1254 ADD_imm8s_r32( -4, R_EAX );
1255 check_walign32( R_EAX );
1256 MMU_TRANSLATE_WRITE( R_EAX );
1257 load_reg( R_EDX, Rm );
1258 ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
1259 MEM_WRITE_LONG( R_EAX, R_EDX );
1260 sh4_x86.tstate = TSTATE_NONE;
1262 MOV.L Rm, @(R0, Rn) {:
1264 load_reg( R_EAX, 0 );
1265 load_reg( R_ECX, Rn );
1266 ADD_r32_r32( R_ECX, R_EAX );
1267 check_walign32( R_EAX );
1268 MMU_TRANSLATE_WRITE( R_EAX );
1269 load_reg( R_EDX, Rm );
1270 MEM_WRITE_LONG( R_EAX, R_EDX );
1271 sh4_x86.tstate = TSTATE_NONE;
1273 MOV.L R0, @(disp, GBR) {:
1275 load_spreg( R_EAX, R_GBR );
1276 ADD_imm32_r32( disp, R_EAX );
1277 check_walign32( R_EAX );
1278 MMU_TRANSLATE_WRITE( R_EAX );
1279 load_reg( R_EDX, 0 );
1280 MEM_WRITE_LONG( R_EAX, R_EDX );
1281 sh4_x86.tstate = TSTATE_NONE;
1283 MOV.L Rm, @(disp, Rn) {:
1285 load_reg( R_EAX, Rn );
1286 ADD_imm32_r32( disp, R_EAX );
1287 check_walign32( R_EAX );
1288 MMU_TRANSLATE_WRITE( R_EAX );
1289 load_reg( R_EDX, Rm );
1290 MEM_WRITE_LONG( R_EAX, R_EDX );
1291 sh4_x86.tstate = TSTATE_NONE;
1295 load_reg( R_EAX, Rm );
1296 check_ralign32( R_EAX );
1297 MMU_TRANSLATE_READ( R_EAX );
1298 MEM_READ_LONG( R_EAX, R_EAX );
1299 store_reg( R_EAX, Rn );
1300 sh4_x86.tstate = TSTATE_NONE;
1304 load_reg( R_EAX, Rm );
1305 check_ralign32( R_EAX );
1306 MMU_TRANSLATE_READ( R_EAX );
1307 ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
1308 MEM_READ_LONG( R_EAX, R_EAX );
1309 store_reg( R_EAX, Rn );
1310 sh4_x86.tstate = TSTATE_NONE;
1312 MOV.L @(R0, Rm), Rn {:
1314 load_reg( R_EAX, 0 );
1315 load_reg( R_ECX, Rm );
1316 ADD_r32_r32( R_ECX, R_EAX );
1317 check_ralign32( R_EAX );
1318 MMU_TRANSLATE_READ( R_EAX );
1319 MEM_READ_LONG( R_EAX, R_EAX );
1320 store_reg( R_EAX, Rn );
1321 sh4_x86.tstate = TSTATE_NONE;
1323 MOV.L @(disp, GBR), R0 {:
1325 load_spreg( R_EAX, R_GBR );
1326 ADD_imm32_r32( disp, R_EAX );
1327 check_ralign32( R_EAX );
1328 MMU_TRANSLATE_READ( R_EAX );
1329 MEM_READ_LONG( R_EAX, R_EAX );
1330 store_reg( R_EAX, 0 );
1331 sh4_x86.tstate = TSTATE_NONE;
1333 MOV.L @(disp, PC), Rn {:
1334 COUNT_INST(I_MOVLPC);
1335 if( sh4_x86.in_delay_slot ) {
1338 uint32_t target = (pc & 0xFFFFFFFC) + disp + 4;
1339 if( IS_IN_ICACHE(target) ) {
1340 // If the target address is in the same page as the code, it's
1341 // pretty safe to just ref it directly and circumvent the whole
1342 // memory subsystem. (this is a big performance win)
1344 // FIXME: There's a corner-case that's not handled here when
1345 // the current code-page is in the ITLB but not in the UTLB.
1346 // (should generate a TLB miss although need to test SH4
1347 // behaviour to confirm) Unlikely to be anyone depending on this
1348 // behaviour though.
1349 sh4ptr_t ptr = GET_ICACHE_PTR(target);
1350 MOV_moff32_EAX( ptr );
1352 // Note: we use sh4r.pc for the calc as we could be running at a
1353 // different virtual address than the translation was done with,
1354 // but we can safely assume that the low bits are the same.
1355 load_imm32( R_EAX, (pc-sh4_x86.block_start_pc) + disp + 4 - (pc&0x03) );
1356 ADD_sh4r_r32( R_PC, R_EAX );
1357 MMU_TRANSLATE_READ( R_EAX );
1358 MEM_READ_LONG( R_EAX, R_EAX );
1359 sh4_x86.tstate = TSTATE_NONE;
1361 store_reg( R_EAX, Rn );
1364 MOV.L @(disp, Rm), Rn {:
1366 load_reg( R_EAX, Rm );
1367 ADD_imm8s_r32( disp, R_EAX );
1368 check_ralign32( R_EAX );
1369 MMU_TRANSLATE_READ( R_EAX );
1370 MEM_READ_LONG( R_EAX, R_EAX );
1371 store_reg( R_EAX, Rn );
1372 sh4_x86.tstate = TSTATE_NONE;
1376 load_reg( R_EAX, Rn );
1377 check_walign16( R_EAX );
1378 MMU_TRANSLATE_WRITE( R_EAX )
1379 load_reg( R_EDX, Rm );
1380 MEM_WRITE_WORD( R_EAX, R_EDX );
1381 sh4_x86.tstate = TSTATE_NONE;
1385 load_reg( R_EAX, Rn );
1386 ADD_imm8s_r32( -2, R_EAX );
1387 check_walign16( R_EAX );
1388 MMU_TRANSLATE_WRITE( R_EAX );
1389 load_reg( R_EDX, Rm );
1390 ADD_imm8s_sh4r( -2, REG_OFFSET(r[Rn]) );
1391 MEM_WRITE_WORD( R_EAX, R_EDX );
1392 sh4_x86.tstate = TSTATE_NONE;
1394 MOV.W Rm, @(R0, Rn) {:
1396 load_reg( R_EAX, 0 );
1397 load_reg( R_ECX, Rn );
1398 ADD_r32_r32( R_ECX, R_EAX );
1399 check_walign16( R_EAX );
1400 MMU_TRANSLATE_WRITE( R_EAX );
1401 load_reg( R_EDX, Rm );
1402 MEM_WRITE_WORD( R_EAX, R_EDX );
1403 sh4_x86.tstate = TSTATE_NONE;
1405 MOV.W R0, @(disp, GBR) {:
1407 load_spreg( R_EAX, R_GBR );
1408 ADD_imm32_r32( disp, R_EAX );
1409 check_walign16( R_EAX );
1410 MMU_TRANSLATE_WRITE( R_EAX );
1411 load_reg( R_EDX, 0 );
1412 MEM_WRITE_WORD( R_EAX, R_EDX );
1413 sh4_x86.tstate = TSTATE_NONE;
1415 MOV.W R0, @(disp, Rn) {:
1417 load_reg( R_EAX, Rn );
1418 ADD_imm32_r32( disp, R_EAX );
1419 check_walign16( R_EAX );
1420 MMU_TRANSLATE_WRITE( R_EAX );
1421 load_reg( R_EDX, 0 );
1422 MEM_WRITE_WORD( R_EAX, R_EDX );
1423 sh4_x86.tstate = TSTATE_NONE;
1427 load_reg( R_EAX, Rm );
1428 check_ralign16( R_EAX );
1429 MMU_TRANSLATE_READ( R_EAX );
1430 MEM_READ_WORD( R_EAX, R_EAX );
1431 store_reg( R_EAX, Rn );
1432 sh4_x86.tstate = TSTATE_NONE;
1436 load_reg( R_EAX, Rm );
1437 check_ralign16( R_EAX );
1438 MMU_TRANSLATE_READ( R_EAX );
1439 ADD_imm8s_sh4r( 2, REG_OFFSET(r[Rm]) );
1440 MEM_READ_WORD( R_EAX, R_EAX );
1441 store_reg( R_EAX, Rn );
1442 sh4_x86.tstate = TSTATE_NONE;
1444 MOV.W @(R0, Rm), Rn {:
1446 load_reg( R_EAX, 0 );
1447 load_reg( R_ECX, Rm );
1448 ADD_r32_r32( R_ECX, R_EAX );
1449 check_ralign16( R_EAX );
1450 MMU_TRANSLATE_READ( R_EAX );
1451 MEM_READ_WORD( R_EAX, R_EAX );
1452 store_reg( R_EAX, Rn );
1453 sh4_x86.tstate = TSTATE_NONE;
1455 MOV.W @(disp, GBR), R0 {:
1457 load_spreg( R_EAX, R_GBR );
1458 ADD_imm32_r32( disp, R_EAX );
1459 check_ralign16( R_EAX );
1460 MMU_TRANSLATE_READ( R_EAX );
1461 MEM_READ_WORD( R_EAX, R_EAX );
1462 store_reg( R_EAX, 0 );
1463 sh4_x86.tstate = TSTATE_NONE;
1465 MOV.W @(disp, PC), Rn {:
1467 if( sh4_x86.in_delay_slot ) {
1470 // See comments for MOV.L @(disp, PC), Rn
1471 uint32_t target = pc + disp + 4;
1472 if( IS_IN_ICACHE(target) ) {
1473 sh4ptr_t ptr = GET_ICACHE_PTR(target);
1474 MOV_moff32_EAX( ptr );
1475 MOVSX_r16_r32( R_EAX, R_EAX );
1477 load_imm32( R_EAX, (pc - sh4_x86.block_start_pc) + disp + 4 );
1478 ADD_sh4r_r32( R_PC, R_EAX );
1479 MMU_TRANSLATE_READ( R_EAX );
1480 MEM_READ_WORD( R_EAX, R_EAX );
1481 sh4_x86.tstate = TSTATE_NONE;
1483 store_reg( R_EAX, Rn );
1486 MOV.W @(disp, Rm), R0 {:
1488 load_reg( R_EAX, Rm );
1489 ADD_imm32_r32( disp, R_EAX );
1490 check_ralign16( R_EAX );
1491 MMU_TRANSLATE_READ( R_EAX );
1492 MEM_READ_WORD( R_EAX, R_EAX );
1493 store_reg( R_EAX, 0 );
1494 sh4_x86.tstate = TSTATE_NONE;
1496 MOVA @(disp, PC), R0 {:
1498 if( sh4_x86.in_delay_slot ) {
1501 load_imm32( R_ECX, (pc - sh4_x86.block_start_pc) + disp + 4 - (pc&0x03) );
1502 ADD_sh4r_r32( R_PC, R_ECX );
1503 store_reg( R_ECX, 0 );
1504 sh4_x86.tstate = TSTATE_NONE;
1508 COUNT_INST(I_MOVCA);
1509 load_reg( R_EAX, Rn );
1510 check_walign32( R_EAX );
1511 MMU_TRANSLATE_WRITE( R_EAX );
1512 load_reg( R_EDX, 0 );
1513 MEM_WRITE_LONG( R_EAX, R_EDX );
1514 sh4_x86.tstate = TSTATE_NONE;
1517 /* Control transfer instructions */
1520 if( sh4_x86.in_delay_slot ) {
1523 sh4vma_t target = disp + pc + 4;
1524 JT_rel8( nottaken );
1525 exit_block_rel(target, pc+2 );
1526 JMP_TARGET(nottaken);
1532 if( sh4_x86.in_delay_slot ) {
1535 sh4_x86.in_delay_slot = DELAY_PC;
1536 if( UNTRANSLATABLE(pc+2) ) {
1537 load_imm32( R_EAX, pc + 4 - sh4_x86.block_start_pc );
1539 ADD_imm32_r32( disp, R_EAX );
1540 JMP_TARGET(nottaken);
1541 ADD_sh4r_r32( R_PC, R_EAX );
1542 store_spreg( R_EAX, R_NEW_PC );
1543 exit_block_emu(pc+2);
1544 sh4_x86.branch_taken = TRUE;
1547 if( sh4_x86.tstate == TSTATE_NONE ) {
1548 CMP_imm8s_sh4r( 1, R_T );
1549 sh4_x86.tstate = TSTATE_E;
1551 sh4vma_t target = disp + pc + 4;
1552 OP(0x0F); OP(0x80+sh4_x86.tstate); uint32_t *patch = (uint32_t *)xlat_output; OP32(0); // JT rel32
1553 int save_tstate = sh4_x86.tstate;
1554 sh4_translate_instruction(pc+2);
1555 exit_block_rel( target, pc+4 );
1558 *patch = (xlat_output - ((uint8_t *)patch)) - 4;
1559 sh4_x86.tstate = save_tstate;
1560 sh4_translate_instruction(pc+2);
1567 if( sh4_x86.in_delay_slot ) {
1570 sh4_x86.in_delay_slot = DELAY_PC;
1571 sh4_x86.branch_taken = TRUE;
1572 if( UNTRANSLATABLE(pc+2) ) {
1573 load_spreg( R_EAX, R_PC );
1574 ADD_imm32_r32( pc + disp + 4 - sh4_x86.block_start_pc, R_EAX );
1575 store_spreg( R_EAX, R_NEW_PC );
1576 exit_block_emu(pc+2);
1579 sh4_translate_instruction( pc + 2 );
1580 exit_block_rel( disp + pc + 4, pc+4 );
1587 if( sh4_x86.in_delay_slot ) {
1590 load_spreg( R_EAX, R_PC );
1591 ADD_imm32_r32( pc + 4 - sh4_x86.block_start_pc, R_EAX );
1592 ADD_sh4r_r32( REG_OFFSET(r[Rn]), R_EAX );
1593 store_spreg( R_EAX, R_NEW_PC );
1594 sh4_x86.in_delay_slot = DELAY_PC;
1595 sh4_x86.tstate = TSTATE_NONE;
1596 sh4_x86.branch_taken = TRUE;
1597 if( UNTRANSLATABLE(pc+2) ) {
1598 exit_block_emu(pc+2);
1601 sh4_translate_instruction( pc + 2 );
1602 exit_block_newpcset(pc+2);
1609 if( sh4_x86.in_delay_slot ) {
1612 load_spreg( R_EAX, R_PC );
1613 ADD_imm32_r32( pc + 4 - sh4_x86.block_start_pc, R_EAX );
1614 store_spreg( R_EAX, R_PR );
1615 sh4_x86.in_delay_slot = DELAY_PC;
1616 sh4_x86.branch_taken = TRUE;
1617 sh4_x86.tstate = TSTATE_NONE;
1618 if( UNTRANSLATABLE(pc+2) ) {
1619 ADD_imm32_r32( disp, R_EAX );
1620 store_spreg( R_EAX, R_NEW_PC );
1621 exit_block_emu(pc+2);
1624 sh4_translate_instruction( pc + 2 );
1625 exit_block_rel( disp + pc + 4, pc+4 );
1632 if( sh4_x86.in_delay_slot ) {
1635 load_spreg( R_EAX, R_PC );
1636 ADD_imm32_r32( pc + 4 - sh4_x86.block_start_pc, R_EAX );
1637 store_spreg( R_EAX, R_PR );
1638 ADD_sh4r_r32( REG_OFFSET(r[Rn]), R_EAX );
1639 store_spreg( R_EAX, R_NEW_PC );
1641 sh4_x86.in_delay_slot = DELAY_PC;
1642 sh4_x86.tstate = TSTATE_NONE;
1643 sh4_x86.branch_taken = TRUE;
1644 if( UNTRANSLATABLE(pc+2) ) {
1645 exit_block_emu(pc+2);
1648 sh4_translate_instruction( pc + 2 );
1649 exit_block_newpcset(pc+2);
1656 if( sh4_x86.in_delay_slot ) {
1659 sh4vma_t target = disp + pc + 4;
1660 JF_rel8( nottaken );
1661 exit_block_rel(target, pc+2 );
1662 JMP_TARGET(nottaken);
1668 if( sh4_x86.in_delay_slot ) {
1671 sh4_x86.in_delay_slot = DELAY_PC;
1672 if( UNTRANSLATABLE(pc+2) ) {
1673 load_imm32( R_EAX, pc + 4 - sh4_x86.block_start_pc );
1675 ADD_imm32_r32( disp, R_EAX );
1676 JMP_TARGET(nottaken);
1677 ADD_sh4r_r32( R_PC, R_EAX );
1678 store_spreg( R_EAX, R_NEW_PC );
1679 exit_block_emu(pc+2);
1680 sh4_x86.branch_taken = TRUE;
1683 if( sh4_x86.tstate == TSTATE_NONE ) {
1684 CMP_imm8s_sh4r( 1, R_T );
1685 sh4_x86.tstate = TSTATE_E;
1687 OP(0x0F); OP(0x80+(sh4_x86.tstate^1)); uint32_t *patch = (uint32_t *)xlat_output; OP32(0); // JF rel32
1688 int save_tstate = sh4_x86.tstate;
1689 sh4_translate_instruction(pc+2);
1690 exit_block_rel( disp + pc + 4, pc+4 );
1692 *patch = (xlat_output - ((uint8_t *)patch)) - 4;
1693 sh4_x86.tstate = save_tstate;
1694 sh4_translate_instruction(pc+2);
1701 if( sh4_x86.in_delay_slot ) {
1704 load_reg( R_ECX, Rn );
1705 store_spreg( R_ECX, R_NEW_PC );
1706 sh4_x86.in_delay_slot = DELAY_PC;
1707 sh4_x86.branch_taken = TRUE;
1708 if( UNTRANSLATABLE(pc+2) ) {
1709 exit_block_emu(pc+2);
1712 sh4_translate_instruction(pc+2);
1713 exit_block_newpcset(pc+2);
1720 if( sh4_x86.in_delay_slot ) {
1723 load_spreg( R_EAX, R_PC );
1724 ADD_imm32_r32( pc + 4 - sh4_x86.block_start_pc, R_EAX );
1725 store_spreg( R_EAX, R_PR );
1726 load_reg( R_ECX, Rn );
1727 store_spreg( R_ECX, R_NEW_PC );
1728 sh4_x86.in_delay_slot = DELAY_PC;
1729 sh4_x86.branch_taken = TRUE;
1730 sh4_x86.tstate = TSTATE_NONE;
1731 if( UNTRANSLATABLE(pc+2) ) {
1732 exit_block_emu(pc+2);
1735 sh4_translate_instruction(pc+2);
1736 exit_block_newpcset(pc+2);
1743 if( sh4_x86.in_delay_slot ) {
1747 load_spreg( R_ECX, R_SPC );
1748 store_spreg( R_ECX, R_NEW_PC );
1749 load_spreg( R_EAX, R_SSR );
1750 call_func1( sh4_write_sr, R_EAX );
1751 sh4_x86.in_delay_slot = DELAY_PC;
1752 sh4_x86.priv_checked = FALSE;
1753 sh4_x86.fpuen_checked = FALSE;
1754 sh4_x86.tstate = TSTATE_NONE;
1755 sh4_x86.branch_taken = TRUE;
1756 if( UNTRANSLATABLE(pc+2) ) {
1757 exit_block_emu(pc+2);
1760 sh4_translate_instruction(pc+2);
1761 exit_block_newpcset(pc+2);
1768 if( sh4_x86.in_delay_slot ) {
1771 load_spreg( R_ECX, R_PR );
1772 store_spreg( R_ECX, R_NEW_PC );
1773 sh4_x86.in_delay_slot = DELAY_PC;
1774 sh4_x86.branch_taken = TRUE;
1775 if( UNTRANSLATABLE(pc+2) ) {
1776 exit_block_emu(pc+2);
1779 sh4_translate_instruction(pc+2);
1780 exit_block_newpcset(pc+2);
1786 COUNT_INST(I_TRAPA);
1787 if( sh4_x86.in_delay_slot ) {
1790 load_imm32( R_ECX, pc+2 - sh4_x86.block_start_pc ); // 5
1791 ADD_r32_sh4r( R_ECX, R_PC );
1792 load_imm32( R_EAX, imm );
1793 call_func1( sh4_raise_trap, R_EAX );
1794 sh4_x86.tstate = TSTATE_NONE;
1795 exit_block_pcset(pc);
1796 sh4_x86.branch_taken = TRUE;
1801 COUNT_INST(I_UNDEF);
1802 if( sh4_x86.in_delay_slot ) {
1805 JMP_exc(EXC_ILLEGAL);
1811 COUNT_INST(I_CLRMAC);
1812 XOR_r32_r32(R_EAX, R_EAX);
1813 store_spreg( R_EAX, R_MACL );
1814 store_spreg( R_EAX, R_MACH );
1815 sh4_x86.tstate = TSTATE_NONE;
1821 sh4_x86.tstate = TSTATE_NONE;
1827 sh4_x86.tstate = TSTATE_C;
1833 sh4_x86.tstate = TSTATE_NONE;
1839 sh4_x86.tstate = TSTATE_C;
1842 /* Floating point moves */
1844 COUNT_INST(I_FMOV1);
1846 if( sh4_x86.double_size ) {
1847 load_dr0( R_EAX, FRm );
1848 load_dr1( R_ECX, FRm );
1849 store_dr0( R_EAX, FRn );
1850 store_dr1( R_ECX, FRn );
1852 load_fr( R_EAX, FRm ); // SZ=0 branch
1853 store_fr( R_EAX, FRn );
1857 COUNT_INST(I_FMOV2);
1859 load_reg( R_EAX, Rn );
1860 if( sh4_x86.double_size ) {
1861 check_walign64( R_EAX );
1862 MMU_TRANSLATE_WRITE( R_EAX );
1863 load_dr0( R_EDX, FRm );
1864 load_dr1( R_ECX, FRm );
1865 MEM_WRITE_DOUBLE( R_EAX, R_EDX, R_ECX );
1867 check_walign32( R_EAX );
1868 MMU_TRANSLATE_WRITE( R_EAX );
1869 load_fr( R_EDX, FRm );
1870 MEM_WRITE_LONG( R_EAX, R_EDX );
1872 sh4_x86.tstate = TSTATE_NONE;
1875 COUNT_INST(I_FMOV5);
1877 load_reg( R_EAX, Rm );
1878 if( sh4_x86.double_size ) {
1879 check_ralign64( R_EAX );
1880 MMU_TRANSLATE_READ( R_EAX );
1881 MEM_READ_DOUBLE( R_EAX, R_EDX, R_EAX );
1882 store_dr0( R_EDX, FRn );
1883 store_dr1( R_EAX, FRn );
1885 check_ralign32( R_EAX );
1886 MMU_TRANSLATE_READ( R_EAX );
1887 MEM_READ_LONG( R_EAX, R_EAX );
1888 store_fr( R_EAX, FRn );
1890 sh4_x86.tstate = TSTATE_NONE;
1893 COUNT_INST(I_FMOV3);
1895 load_reg( R_EAX, Rn );
1896 if( sh4_x86.double_size ) {
1897 check_walign64( R_EAX );
1898 ADD_imm8s_r32(-8,R_EAX);
1899 MMU_TRANSLATE_WRITE( R_EAX );
1900 load_dr0( R_EDX, FRm );
1901 load_dr1( R_ECX, FRm );
1902 ADD_imm8s_sh4r(-8,REG_OFFSET(r[Rn]));
1903 MEM_WRITE_DOUBLE( R_EAX, R_EDX, R_ECX );
1905 check_walign32( R_EAX );
1906 ADD_imm8s_r32( -4, R_EAX );
1907 MMU_TRANSLATE_WRITE( R_EAX );
1908 load_fr( R_EDX, FRm );
1909 ADD_imm8s_sh4r(-4,REG_OFFSET(r[Rn]));
1910 MEM_WRITE_LONG( R_EAX, R_EDX );
1912 sh4_x86.tstate = TSTATE_NONE;
1915 COUNT_INST(I_FMOV6);
1917 load_reg( R_EAX, Rm );
1918 if( sh4_x86.double_size ) {
1919 check_ralign64( R_EAX );
1920 MMU_TRANSLATE_READ( R_EAX );
1921 ADD_imm8s_sh4r( 8, REG_OFFSET(r[Rm]) );
1922 MEM_READ_DOUBLE( R_EAX, R_EDX, R_EAX );
1923 store_dr0( R_EDX, FRn );
1924 store_dr1( R_EAX, FRn );
1926 check_ralign32( R_EAX );
1927 MMU_TRANSLATE_READ( R_EAX );
1928 ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
1929 MEM_READ_LONG( R_EAX, R_EAX );
1930 store_fr( R_EAX, FRn );
1932 sh4_x86.tstate = TSTATE_NONE;
1934 FMOV FRm, @(R0, Rn) {:
1935 COUNT_INST(I_FMOV4);
1937 load_reg( R_EAX, Rn );
1938 ADD_sh4r_r32( REG_OFFSET(r[0]), R_EAX );
1939 if( sh4_x86.double_size ) {
1940 check_walign64( R_EAX );
1941 MMU_TRANSLATE_WRITE( R_EAX );
1942 load_dr0( R_EDX, FRm );
1943 load_dr1( R_ECX, FRm );
1944 MEM_WRITE_DOUBLE( R_EAX, R_EDX, R_ECX );
1946 check_walign32( R_EAX );
1947 MMU_TRANSLATE_WRITE( R_EAX );
1948 load_fr( R_EDX, FRm );
1949 MEM_WRITE_LONG( R_EAX, R_EDX ); // 12
1951 sh4_x86.tstate = TSTATE_NONE;
1953 FMOV @(R0, Rm), FRn {:
1954 COUNT_INST(I_FMOV7);
1956 load_reg( R_EAX, Rm );
1957 ADD_sh4r_r32( REG_OFFSET(r[0]), R_EAX );
1958 if( sh4_x86.double_size ) {
1959 check_ralign64( R_EAX );
1960 MMU_TRANSLATE_READ( R_EAX );
1961 MEM_READ_DOUBLE( R_EAX, R_ECX, R_EAX );
1962 store_dr0( R_ECX, FRn );
1963 store_dr1( R_EAX, FRn );
1965 check_ralign32( R_EAX );
1966 MMU_TRANSLATE_READ( R_EAX );
1967 MEM_READ_LONG( R_EAX, R_EAX );
1968 store_fr( R_EAX, FRn );
1970 sh4_x86.tstate = TSTATE_NONE;
1972 FLDI0 FRn {: /* IFF PR=0 */
1973 COUNT_INST(I_FLDI0);
1975 if( sh4_x86.double_prec == 0 ) {
1976 XOR_r32_r32( R_EAX, R_EAX );
1977 store_fr( R_EAX, FRn );
1979 sh4_x86.tstate = TSTATE_NONE;
1981 FLDI1 FRn {: /* IFF PR=0 */
1982 COUNT_INST(I_FLDI1);
1984 if( sh4_x86.double_prec == 0 ) {
1985 load_imm32(R_EAX, 0x3F800000);
1986 store_fr( R_EAX, FRn );
1991 COUNT_INST(I_FLOAT);
1994 if( sh4_x86.double_prec ) {
2003 if( sh4_x86.double_prec ) {
2008 load_ptr( R_ECX, &max_int );
2009 FILD_r32ind( R_ECX );
2012 load_ptr( R_ECX, &min_int ); // 5
2013 FILD_r32ind( R_ECX ); // 2
2015 JAE_rel8( sat2 ); // 2
2016 load_ptr( R_EAX, &save_fcw );
2017 FNSTCW_r32ind( R_EAX );
2018 load_ptr( R_EDX, &trunc_fcw );
2019 FLDCW_r32ind( R_EDX );
2020 FISTP_sh4r(R_FPUL); // 3
2021 FLDCW_r32ind( R_EAX );
2026 MOV_r32ind_r32( R_ECX, R_ECX ); // 2
2027 store_spreg( R_ECX, R_FPUL );
2030 sh4_x86.tstate = TSTATE_NONE;
2035 load_fr( R_EAX, FRm );
2036 store_spreg( R_EAX, R_FPUL );
2041 load_spreg( R_EAX, R_FPUL );
2042 store_fr( R_EAX, FRn );
2045 COUNT_INST(I_FCNVDS);
2047 if( sh4_x86.double_prec ) {
2053 COUNT_INST(I_FCNVSD);
2055 if( sh4_x86.double_prec ) {
2061 /* Floating point instructions */
2065 if( sh4_x86.double_prec ) {
2078 if( sh4_x86.double_prec ) {
2093 if( sh4_x86.double_prec ) {
2105 FMAC FR0, FRm, FRn {:
2108 if( sh4_x86.double_prec ) {
2128 if( sh4_x86.double_prec ) {
2143 if( sh4_x86.double_prec ) {
2154 COUNT_INST(I_FSRRA);
2156 if( sh4_x86.double_prec == 0 ) {
2165 COUNT_INST(I_FSQRT);
2167 if( sh4_x86.double_prec ) {
2180 if( sh4_x86.double_prec ) {
2194 COUNT_INST(I_FCMPEQ);
2196 if( sh4_x86.double_prec ) {
2206 sh4_x86.tstate = TSTATE_E;
2209 COUNT_INST(I_FCMPGT);
2211 if( sh4_x86.double_prec ) {
2221 sh4_x86.tstate = TSTATE_A;
2227 if( sh4_x86.double_prec == 0 ) {
2228 LEA_sh4r_rptr( REG_OFFSET(fr[0][FRn&0x0E]), R_EDX );
2229 load_spreg( R_EAX, R_FPUL );
2230 call_func2( sh4_fsca, R_EAX, R_EDX );
2232 sh4_x86.tstate = TSTATE_NONE;
2237 if( sh4_x86.double_prec == 0 ) {
2238 if( sh4_x86.sse3_enabled ) {
2239 MOVAPS_sh4r_xmm( REG_OFFSET(fr[0][FVm<<2]), 4 );
2240 MULPS_sh4r_xmm( REG_OFFSET(fr[0][FVn<<2]), 4 );
2241 HADDPS_xmm_xmm( 4, 4 );
2242 HADDPS_xmm_xmm( 4, 4 );
2243 MOVSS_xmm_sh4r( 4, REG_OFFSET(fr[0][(FVn<<2)+2]) );
2248 push_fr( (FVm<<2)+1);
2249 push_fr( (FVn<<2)+1);
2252 push_fr( (FVm<<2)+2);
2253 push_fr( (FVn<<2)+2);
2256 push_fr( (FVm<<2)+3);
2257 push_fr( (FVn<<2)+3);
2260 pop_fr( (FVn<<2)+3);
2267 if( sh4_x86.double_prec == 0 ) {
2268 if( sh4_x86.sse3_enabled ) {
2269 MOVAPS_sh4r_xmm( REG_OFFSET(fr[1][0]), 1 ); // M1 M0 M3 M2
2270 MOVAPS_sh4r_xmm( REG_OFFSET(fr[1][4]), 0 ); // M5 M4 M7 M6
2271 MOVAPS_sh4r_xmm( REG_OFFSET(fr[1][8]), 3 ); // M9 M8 M11 M10
2272 MOVAPS_sh4r_xmm( REG_OFFSET(fr[1][12]), 2 );// M13 M12 M15 M14
2274 MOVSLDUP_sh4r_xmm( REG_OFFSET(fr[0][FVn<<2]), 4 ); // V1 V1 V3 V3
2275 MOVSHDUP_sh4r_xmm( REG_OFFSET(fr[0][FVn<<2]), 5 ); // V0 V0 V2 V2
2276 MOVAPS_xmm_xmm( 4, 6 );
2277 MOVAPS_xmm_xmm( 5, 7 );
2278 MOVLHPS_xmm_xmm( 4, 4 ); // V1 V1 V1 V1
2279 MOVHLPS_xmm_xmm( 6, 6 ); // V3 V3 V3 V3
2280 MOVLHPS_xmm_xmm( 5, 5 ); // V0 V0 V0 V0
2281 MOVHLPS_xmm_xmm( 7, 7 ); // V2 V2 V2 V2
2282 MULPS_xmm_xmm( 0, 4 );
2283 MULPS_xmm_xmm( 1, 5 );
2284 MULPS_xmm_xmm( 2, 6 );
2285 MULPS_xmm_xmm( 3, 7 );
2286 ADDPS_xmm_xmm( 5, 4 );
2287 ADDPS_xmm_xmm( 7, 6 );
2288 ADDPS_xmm_xmm( 6, 4 );
2289 MOVAPS_xmm_sh4r( 4, REG_OFFSET(fr[0][FVn<<2]) );
2291 LEA_sh4r_rptr( REG_OFFSET(fr[0][FVn<<2]), R_EAX );
2292 call_func1( sh4_ftrv, R_EAX );
2295 sh4_x86.tstate = TSTATE_NONE;
2299 COUNT_INST(I_FRCHG);
2301 load_spreg( R_ECX, R_FPSCR );
2302 XOR_imm32_r32( FPSCR_FR, R_ECX );
2303 store_spreg( R_ECX, R_FPSCR );
2304 call_func0( sh4_switch_fr_banks );
2305 sh4_x86.tstate = TSTATE_NONE;
2308 COUNT_INST(I_FSCHG);
2310 load_spreg( R_ECX, R_FPSCR );
2311 XOR_imm32_r32( FPSCR_SZ, R_ECX );
2312 store_spreg( R_ECX, R_FPSCR );
2313 sh4_x86.tstate = TSTATE_NONE;
2314 sh4_x86.double_size = !sh4_x86.double_size;
2317 /* Processor control instructions */
2319 COUNT_INST(I_LDCSR);
2320 if( sh4_x86.in_delay_slot ) {
2324 load_reg( R_EAX, Rm );
2325 call_func1( sh4_write_sr, R_EAX );
2326 sh4_x86.priv_checked = FALSE;
2327 sh4_x86.fpuen_checked = FALSE;
2328 sh4_x86.tstate = TSTATE_NONE;
2333 load_reg( R_EAX, Rm );
2334 store_spreg( R_EAX, R_GBR );
2339 load_reg( R_EAX, Rm );
2340 store_spreg( R_EAX, R_VBR );
2341 sh4_x86.tstate = TSTATE_NONE;
2346 load_reg( R_EAX, Rm );
2347 store_spreg( R_EAX, R_SSR );
2348 sh4_x86.tstate = TSTATE_NONE;
2353 load_reg( R_EAX, Rm );
2354 store_spreg( R_EAX, R_SGR );
2355 sh4_x86.tstate = TSTATE_NONE;
2360 load_reg( R_EAX, Rm );
2361 store_spreg( R_EAX, R_SPC );
2362 sh4_x86.tstate = TSTATE_NONE;
2367 load_reg( R_EAX, Rm );
2368 store_spreg( R_EAX, R_DBR );
2369 sh4_x86.tstate = TSTATE_NONE;
2374 load_reg( R_EAX, Rm );
2375 store_spreg( R_EAX, REG_OFFSET(r_bank[Rn_BANK]) );
2376 sh4_x86.tstate = TSTATE_NONE;
2380 load_reg( R_EAX, Rm );
2381 check_ralign32( R_EAX );
2382 MMU_TRANSLATE_READ( R_EAX );
2383 ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
2384 MEM_READ_LONG( R_EAX, R_EAX );
2385 store_spreg( R_EAX, R_GBR );
2386 sh4_x86.tstate = TSTATE_NONE;
2389 COUNT_INST(I_LDCSRM);
2390 if( sh4_x86.in_delay_slot ) {
2394 load_reg( R_EAX, Rm );
2395 check_ralign32( R_EAX );
2396 MMU_TRANSLATE_READ( R_EAX );
2397 ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
2398 MEM_READ_LONG( R_EAX, R_EAX );
2399 call_func1( sh4_write_sr, R_EAX );
2400 sh4_x86.priv_checked = FALSE;
2401 sh4_x86.fpuen_checked = FALSE;
2402 sh4_x86.tstate = TSTATE_NONE;
2408 load_reg( R_EAX, Rm );
2409 check_ralign32( R_EAX );
2410 MMU_TRANSLATE_READ( R_EAX );
2411 ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
2412 MEM_READ_LONG( R_EAX, R_EAX );
2413 store_spreg( R_EAX, R_VBR );
2414 sh4_x86.tstate = TSTATE_NONE;
2419 load_reg( R_EAX, Rm );
2420 check_ralign32( R_EAX );
2421 MMU_TRANSLATE_READ( R_EAX );
2422 ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
2423 MEM_READ_LONG( R_EAX, R_EAX );
2424 store_spreg( R_EAX, R_SSR );
2425 sh4_x86.tstate = TSTATE_NONE;
2430 load_reg( R_EAX, Rm );
2431 check_ralign32( R_EAX );
2432 MMU_TRANSLATE_READ( R_EAX );
2433 ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
2434 MEM_READ_LONG( R_EAX, R_EAX );
2435 store_spreg( R_EAX, R_SGR );
2436 sh4_x86.tstate = TSTATE_NONE;
2441 load_reg( R_EAX, Rm );
2442 check_ralign32( R_EAX );
2443 MMU_TRANSLATE_READ( R_EAX );
2444 ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
2445 MEM_READ_LONG( R_EAX, R_EAX );
2446 store_spreg( R_EAX, R_SPC );
2447 sh4_x86.tstate = TSTATE_NONE;
2452 load_reg( R_EAX, Rm );
2453 check_ralign32( R_EAX );
2454 MMU_TRANSLATE_READ( R_EAX );
2455 ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
2456 MEM_READ_LONG( R_EAX, R_EAX );
2457 store_spreg( R_EAX, R_DBR );
2458 sh4_x86.tstate = TSTATE_NONE;
2460 LDC.L @Rm+, Rn_BANK {:
2463 load_reg( R_EAX, Rm );
2464 check_ralign32( R_EAX );
2465 MMU_TRANSLATE_READ( R_EAX );
2466 ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
2467 MEM_READ_LONG( R_EAX, R_EAX );
2468 store_spreg( R_EAX, REG_OFFSET(r_bank[Rn_BANK]) );
2469 sh4_x86.tstate = TSTATE_NONE;
2472 COUNT_INST(I_LDSFPSCR);
2474 load_reg( R_EAX, Rm );
2475 call_func1( sh4_write_fpscr, R_EAX );
2476 sh4_x86.tstate = TSTATE_NONE;
2479 LDS.L @Rm+, FPSCR {:
2480 COUNT_INST(I_LDSFPSCRM);
2482 load_reg( R_EAX, Rm );
2483 check_ralign32( R_EAX );
2484 MMU_TRANSLATE_READ( R_EAX );
2485 ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
2486 MEM_READ_LONG( R_EAX, R_EAX );
2487 call_func1( sh4_write_fpscr, R_EAX );
2488 sh4_x86.tstate = TSTATE_NONE;
2494 load_reg( R_EAX, Rm );
2495 store_spreg( R_EAX, R_FPUL );
2500 load_reg( R_EAX, Rm );
2501 check_ralign32( R_EAX );
2502 MMU_TRANSLATE_READ( R_EAX );
2503 ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
2504 MEM_READ_LONG( R_EAX, R_EAX );
2505 store_spreg( R_EAX, R_FPUL );
2506 sh4_x86.tstate = TSTATE_NONE;
2510 load_reg( R_EAX, Rm );
2511 store_spreg( R_EAX, R_MACH );
2515 load_reg( R_EAX, Rm );
2516 check_ralign32( R_EAX );
2517 MMU_TRANSLATE_READ( R_EAX );
2518 ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
2519 MEM_READ_LONG( R_EAX, R_EAX );
2520 store_spreg( R_EAX, R_MACH );
2521 sh4_x86.tstate = TSTATE_NONE;
2525 load_reg( R_EAX, Rm );
2526 store_spreg( R_EAX, R_MACL );
2530 load_reg( R_EAX, Rm );
2531 check_ralign32( R_EAX );
2532 MMU_TRANSLATE_READ( R_EAX );
2533 ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
2534 MEM_READ_LONG( R_EAX, R_EAX );
2535 store_spreg( R_EAX, R_MACL );
2536 sh4_x86.tstate = TSTATE_NONE;
2540 load_reg( R_EAX, Rm );
2541 store_spreg( R_EAX, R_PR );
2545 load_reg( R_EAX, Rm );
2546 check_ralign32( R_EAX );
2547 MMU_TRANSLATE_READ( R_EAX );
2548 ADD_imm8s_sh4r( 4, REG_OFFSET(r[Rm]) );
2549 MEM_READ_LONG( R_EAX, R_EAX );
2550 store_spreg( R_EAX, R_PR );
2551 sh4_x86.tstate = TSTATE_NONE;
2554 COUNT_INST(I_LDTLB);
2555 call_func0( MMU_ldtlb );
2556 sh4_x86.tstate = TSTATE_NONE;
2565 COUNT_INST(I_OCBWB);
2569 load_reg( R_EAX, Rn );
2570 MOV_r32_r32( R_EAX, R_ECX );
2571 AND_imm32_r32( 0xFC000000, R_ECX );
2572 CMP_imm32_r32( 0xE0000000, R_ECX );
2574 call_func1( sh4_flush_store_queue, R_EAX );
2575 TEST_r32_r32( R_EAX, R_EAX );
2578 sh4_x86.tstate = TSTATE_NONE;
2581 COUNT_INST(I_SLEEP);
2583 call_func0( sh4_sleep );
2584 sh4_x86.tstate = TSTATE_NONE;
2585 sh4_x86.in_delay_slot = DELAY_NONE;
2589 COUNT_INST(I_STCSR);
2591 call_func0(sh4_read_sr);
2592 store_reg( R_EAX, Rn );
2593 sh4_x86.tstate = TSTATE_NONE;
2597 load_spreg( R_EAX, R_GBR );
2598 store_reg( R_EAX, Rn );
2603 load_spreg( R_EAX, R_VBR );
2604 store_reg( R_EAX, Rn );
2605 sh4_x86.tstate = TSTATE_NONE;
2610 load_spreg( R_EAX, R_SSR );
2611 store_reg( R_EAX, Rn );
2612 sh4_x86.tstate = TSTATE_NONE;
2617 load_spreg( R_EAX, R_SPC );
2618 store_reg( R_EAX, Rn );
2619 sh4_x86.tstate = TSTATE_NONE;
2624 load_spreg( R_EAX, R_SGR );
2625 store_reg( R_EAX, Rn );
2626 sh4_x86.tstate = TSTATE_NONE;
2631 load_spreg( R_EAX, R_DBR );
2632 store_reg( R_EAX, Rn );
2633 sh4_x86.tstate = TSTATE_NONE;
2638 load_spreg( R_EAX, REG_OFFSET(r_bank[Rm_BANK]) );
2639 store_reg( R_EAX, Rn );
2640 sh4_x86.tstate = TSTATE_NONE;
2643 COUNT_INST(I_STCSRM);
2645 load_reg( R_EAX, Rn );
2646 check_walign32( R_EAX );
2647 ADD_imm8s_r32( -4, R_EAX );
2648 MMU_TRANSLATE_WRITE( R_EAX );
2649 PUSH_realigned_r32( R_EAX );
2650 call_func0( sh4_read_sr );
2651 POP_realigned_r32( R_ECX );
2652 ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
2653 MEM_WRITE_LONG( R_ECX, R_EAX );
2654 sh4_x86.tstate = TSTATE_NONE;
2659 load_reg( R_EAX, Rn );
2660 check_walign32( R_EAX );
2661 ADD_imm8s_r32( -4, R_EAX );
2662 MMU_TRANSLATE_WRITE( R_EAX );
2663 load_spreg( R_EDX, R_VBR );
2664 ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
2665 MEM_WRITE_LONG( R_EAX, R_EDX );
2666 sh4_x86.tstate = TSTATE_NONE;
2671 load_reg( R_EAX, Rn );
2672 check_walign32( R_EAX );
2673 ADD_imm8s_r32( -4, R_EAX );
2674 MMU_TRANSLATE_WRITE( R_EAX );
2675 load_spreg( R_EDX, R_SSR );
2676 ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
2677 MEM_WRITE_LONG( R_EAX, R_EDX );
2678 sh4_x86.tstate = TSTATE_NONE;
2683 load_reg( R_EAX, Rn );
2684 check_walign32( R_EAX );
2685 ADD_imm8s_r32( -4, R_EAX );
2686 MMU_TRANSLATE_WRITE( R_EAX );
2687 load_spreg( R_EDX, R_SPC );
2688 ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
2689 MEM_WRITE_LONG( R_EAX, R_EDX );
2690 sh4_x86.tstate = TSTATE_NONE;
2695 load_reg( R_EAX, Rn );
2696 check_walign32( R_EAX );
2697 ADD_imm8s_r32( -4, R_EAX );
2698 MMU_TRANSLATE_WRITE( R_EAX );
2699 load_spreg( R_EDX, R_SGR );
2700 ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
2701 MEM_WRITE_LONG( R_EAX, R_EDX );
2702 sh4_x86.tstate = TSTATE_NONE;
2707 load_reg( R_EAX, Rn );
2708 check_walign32( R_EAX );
2709 ADD_imm8s_r32( -4, R_EAX );
2710 MMU_TRANSLATE_WRITE( R_EAX );
2711 load_spreg( R_EDX, R_DBR );
2712 ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
2713 MEM_WRITE_LONG( R_EAX, R_EDX );
2714 sh4_x86.tstate = TSTATE_NONE;
2716 STC.L Rm_BANK, @-Rn {:
2719 load_reg( R_EAX, Rn );
2720 check_walign32( R_EAX );
2721 ADD_imm8s_r32( -4, R_EAX );
2722 MMU_TRANSLATE_WRITE( R_EAX );
2723 load_spreg( R_EDX, REG_OFFSET(r_bank[Rm_BANK]) );
2724 ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
2725 MEM_WRITE_LONG( R_EAX, R_EDX );
2726 sh4_x86.tstate = TSTATE_NONE;
2730 load_reg( R_EAX, Rn );
2731 check_walign32( R_EAX );
2732 ADD_imm8s_r32( -4, R_EAX );
2733 MMU_TRANSLATE_WRITE( R_EAX );
2734 load_spreg( R_EDX, R_GBR );
2735 ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
2736 MEM_WRITE_LONG( R_EAX, R_EDX );
2737 sh4_x86.tstate = TSTATE_NONE;
2740 COUNT_INST(I_STSFPSCR);
2742 load_spreg( R_EAX, R_FPSCR );
2743 store_reg( R_EAX, Rn );
2745 STS.L FPSCR, @-Rn {:
2746 COUNT_INST(I_STSFPSCRM);
2748 load_reg( R_EAX, Rn );
2749 check_walign32( R_EAX );
2750 ADD_imm8s_r32( -4, R_EAX );
2751 MMU_TRANSLATE_WRITE( R_EAX );
2752 load_spreg( R_EDX, R_FPSCR );
2753 ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
2754 MEM_WRITE_LONG( R_EAX, R_EDX );
2755 sh4_x86.tstate = TSTATE_NONE;
2760 load_spreg( R_EAX, R_FPUL );
2761 store_reg( R_EAX, Rn );
2766 load_reg( R_EAX, Rn );
2767 check_walign32( R_EAX );
2768 ADD_imm8s_r32( -4, R_EAX );
2769 MMU_TRANSLATE_WRITE( R_EAX );
2770 load_spreg( R_EDX, R_FPUL );
2771 ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
2772 MEM_WRITE_LONG( R_EAX, R_EDX );
2773 sh4_x86.tstate = TSTATE_NONE;
2777 load_spreg( R_EAX, R_MACH );
2778 store_reg( R_EAX, Rn );
2782 load_reg( R_EAX, Rn );
2783 check_walign32( R_EAX );
2784 ADD_imm8s_r32( -4, R_EAX );
2785 MMU_TRANSLATE_WRITE( R_EAX );
2786 load_spreg( R_EDX, R_MACH );
2787 ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
2788 MEM_WRITE_LONG( R_EAX, R_EDX );
2789 sh4_x86.tstate = TSTATE_NONE;
2793 load_spreg( R_EAX, R_MACL );
2794 store_reg( R_EAX, Rn );
2798 load_reg( R_EAX, Rn );
2799 check_walign32( R_EAX );
2800 ADD_imm8s_r32( -4, R_EAX );
2801 MMU_TRANSLATE_WRITE( R_EAX );
2802 load_spreg( R_EDX, R_MACL );
2803 ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
2804 MEM_WRITE_LONG( R_EAX, R_EDX );
2805 sh4_x86.tstate = TSTATE_NONE;
2809 load_spreg( R_EAX, R_PR );
2810 store_reg( R_EAX, Rn );
2814 load_reg( R_EAX, Rn );
2815 check_walign32( R_EAX );
2816 ADD_imm8s_r32( -4, R_EAX );
2817 MMU_TRANSLATE_WRITE( R_EAX );
2818 load_spreg( R_EDX, R_PR );
2819 ADD_imm8s_sh4r( -4, REG_OFFSET(r[Rn]) );
2820 MEM_WRITE_LONG( R_EAX, R_EDX );
2821 sh4_x86.tstate = TSTATE_NONE;
2826 /* Do nothing. Well, we could emit an 0x90, but what would really be the point? */
2829 sh4_x86.in_delay_slot = DELAY_NONE;
.